From: "Mike Pagano" <mpagano@gentoo.org> To: gentoo-commits@lists.gentoo.org Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: / Date: Thu, 14 Aug 2014 12:44:15 +0000 (UTC) [thread overview] Message-ID: <1408020254.c32d169fb51a74c530420b42f58b1c37285ac0d5.mpagano@gentoo> (raw) commit: c32d169fb51a74c530420b42f58b1c37285ac0d5 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Thu Aug 14 12:44:14 2014 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Thu Aug 14 12:44:14 2014 +0000 URL: http://git.overlays.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=c32d169f Linux patch 3.14.17 --- 0000_README | 4 + 1016_linux-3.14.17.patch | 1765 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1769 insertions(+) diff --git a/0000_README b/0000_README index 75c60df..599e94c 100644 --- a/0000_README +++ b/0000_README @@ -106,6 +106,10 @@ Patch: 1015_linux-3.14.16.patch From: http://www.kernel.org Desc: Linux 3.14.16 +Patch: 1016_linux-3.14.17.patch +From: http://www.kernel.org +Desc: Linux 3.14.17 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1016_linux-3.14.17.patch b/1016_linux-3.14.17.patch new file mode 100644 index 0000000..19c6720 --- /dev/null +++ b/1016_linux-3.14.17.patch @@ -0,0 +1,1765 @@ +diff --git a/Makefile b/Makefile +index 8b22e24a2d8e..12aac0325888 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 16 ++SUBLEVEL = 17 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h +index 0f9e94537eee..1a49ffdf9da9 100644 +--- a/arch/sparc/include/asm/pgtable_64.h ++++ b/arch/sparc/include/asm/pgtable_64.h +@@ -24,7 +24,8 @@ + + /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). + * The page copy blockops can use 0x6000000 to 0x8000000. +- * The TSB is mapped in the 0x8000000 to 0xa000000 range. ++ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. ++ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range. + * The PROM resides in an area spanning 0xf0000000 to 0x100000000. + * The vmalloc area spans 0x100000000 to 0x200000000. + * Since modules need to be in the lowest 32-bits of the address space, +@@ -33,7 +34,8 @@ + * 0x400000000. + */ + #define TLBTEMP_BASE _AC(0x0000000006000000,UL) +-#define TSBMAP_BASE _AC(0x0000000008000000,UL) ++#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL) ++#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL) + #define MODULES_VADDR _AC(0x0000000010000000,UL) + #define MODULES_LEN _AC(0x00000000e0000000,UL) + #define MODULES_END _AC(0x00000000f0000000,UL) +@@ -71,6 +73,23 @@ + + #include <linux/sched.h> + ++extern unsigned long sparc64_valid_addr_bitmap[]; ++ ++/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ ++static inline bool __kern_addr_valid(unsigned long paddr) ++{ ++ if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) ++ return false; ++ return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); ++} ++ ++static inline bool kern_addr_valid(unsigned long addr) ++{ ++ unsigned long paddr = __pa(addr); ++ ++ return __kern_addr_valid(paddr); ++} ++ + /* Entries per page directory level. */ + #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) + #define PTRS_PER_PMD (1UL << PMD_BITS) +@@ -79,9 +98,12 @@ + /* Kernel has a separate 44bit address space. */ + #define FIRST_USER_ADDRESS 0 + +-#define pte_ERROR(e) __builtin_trap() +-#define pmd_ERROR(e) __builtin_trap() +-#define pgd_ERROR(e) __builtin_trap() ++#define pmd_ERROR(e) \ ++ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ ++ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) ++#define pgd_ERROR(e) \ ++ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ ++ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) + + #endif /* !(__ASSEMBLY__) */ + +@@ -258,8 +280,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) + { + unsigned long mask, tmp; + +- /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) +- * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) ++ /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7) ++ * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8) + * + * Even if we use negation tricks the result is still a 6 + * instruction sequence, so don't try to play fancy and just +@@ -289,10 +311,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) + " .previous\n" + : "=r" (mask), "=r" (tmp) + : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | +- _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | ++ _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), + "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | +- _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | ++ _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); + + return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); +@@ -633,7 +655,7 @@ static inline unsigned long pmd_large(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); ++ return pte_val(pte) & _PAGE_PMD_HUGE; + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +@@ -719,20 +741,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) + return __pmd(pte_val(pte)); + } + +-static inline pmd_t pmd_mknotpresent(pmd_t pmd) +-{ +- unsigned long mask; +- +- if (tlb_type == hypervisor) +- mask = _PAGE_PRESENT_4V; +- else +- mask = _PAGE_PRESENT_4U; +- +- pmd_val(pmd) &= ~mask; +- +- return pmd; +-} +- + static inline pmd_t pmd_mksplitting(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); +@@ -757,6 +765,20 @@ static inline int pmd_present(pmd_t pmd) + + #define pmd_none(pmd) (!pmd_val(pmd)) + ++/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is ++ * very simple, it's just the physical address. PTE tables are of ++ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and ++ * the top bits outside of the range of any physical address size we ++ * support are clear as well. We also validate the physical itself. ++ */ ++#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ ++ !__kern_addr_valid(pmd_val(pmd))) ++ ++#define pud_none(pud) (!pud_val(pud)) ++ ++#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ ++ !__kern_addr_valid(pud_val(pud))) ++ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +@@ -790,10 +812,7 @@ static inline unsigned long __pmd_page(pmd_t pmd) + #define pud_page_vaddr(pud) \ + ((unsigned long) __va(pud_val(pud))) + #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) +-#define pmd_bad(pmd) (0) + #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) +-#define pud_none(pud) (!pud_val(pud)) +-#define pud_bad(pud) (0) + #define pud_present(pud) (pud_val(pud) != 0U) + #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) + +@@ -893,6 +912,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); + extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); + ++#define __HAVE_ARCH_PMDP_INVALIDATE ++extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ++ pmd_t *pmdp); ++ + #define __HAVE_ARCH_PGTABLE_DEPOSIT + extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +@@ -919,18 +942,6 @@ extern unsigned long pte_file(pte_t); + extern pte_t pgoff_to_pte(unsigned long); + #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) + +-extern unsigned long sparc64_valid_addr_bitmap[]; +- +-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +-static inline bool kern_addr_valid(unsigned long addr) +-{ +- unsigned long paddr = __pa(addr); +- +- if ((paddr >> 41UL) != 0UL) +- return false; +- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); +-} +- + extern int page_in_phys_avail(unsigned long paddr); + + /* +diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h +index 3c3c89f52643..7f9bab26a499 100644 +--- a/arch/sparc/include/asm/tlbflush_64.h ++++ b/arch/sparc/include/asm/tlbflush_64.h +@@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, + { + } + ++void flush_tlb_kernel_range(unsigned long start, unsigned long end); ++ + #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE + + extern void flush_tlb_pending(void); +@@ -48,11 +50,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); + + #ifndef CONFIG_SMP + +-#define flush_tlb_kernel_range(start,end) \ +-do { flush_tsb_kernel_range(start,end); \ +- __flush_tlb_kernel_range(start,end); \ +-} while (0) +- + static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) + { + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +@@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad + extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); + extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); + +-#define flush_tlb_kernel_range(start, end) \ +-do { flush_tsb_kernel_range(start,end); \ +- smp_flush_tlb_kernel_range(start, end); \ +-} while (0) +- + #define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + +diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h +index 2230f80d9fe3..90916f955cac 100644 +--- a/arch/sparc/include/asm/tsb.h ++++ b/arch/sparc/include/asm/tsb.h +@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(4 * 1024 * 1024), REG2; \ +- andn REG1, REG2, REG1; \ ++ brgez,pn REG1, FAIL_LABEL; \ ++ andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ +diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S +index 26b706a1867d..452f04fe8da6 100644 +--- a/arch/sparc/kernel/head_64.S ++++ b/arch/sparc/kernel/head_64.S +@@ -282,8 +282,8 @@ sun4v_chip_type: + stx %l2, [%l4 + 0x0] + ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low + /* 4MB align */ +- srlx %l3, 22, %l3 +- sllx %l3, 22, %l3 ++ srlx %l3, ILOG2_4MB, %l3 ++ sllx %l3, ILOG2_4MB, %l3 + stx %l3, [%l4 + 0x8] + + /* Leave service as-is, "call-method" */ +diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S +index 542e96ac4d39..605d49204580 100644 +--- a/arch/sparc/kernel/ktlb.S ++++ b/arch/sparc/kernel/ktlb.S +@@ -277,7 +277,7 @@ kvmap_dtlb_load: + #ifdef CONFIG_SPARSEMEM_VMEMMAP + kvmap_vmemmap: + sub %g4, %g5, %g5 +- srlx %g5, 22, %g5 ++ srlx %g5, ILOG2_4MB, %g5 + sethi %hi(vmemmap_table), %g1 + sllx %g5, 3, %g5 + or %g1, %lo(vmemmap_table), %g1 +diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c +index e01d75d40329..66dacd56bb10 100644 +--- a/arch/sparc/kernel/ldc.c ++++ b/arch/sparc/kernel/ldc.c +@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp) + if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || + !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || + lp->hs_state != LDC_HS_OPEN) +- err = -EINVAL; ++ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL); + else + err = start_handshake(lp); + +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index b085311dcd0e..8416d7fadcce 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -151,7 +151,7 @@ void cpu_panic(void) + #define NUM_ROUNDS 64 /* magic value */ + #define NUM_ITERS 5 /* likewise */ + +-static DEFINE_SPINLOCK(itc_sync_lock); ++static DEFINE_RAW_SPINLOCK(itc_sync_lock); + static unsigned long go[SLAVE + 1]; + + #define DEBUG_TICK_SYNC 0 +@@ -259,7 +259,7 @@ static void smp_synchronize_one_tick(int cpu) + go[MASTER] = 0; + membar_safe("#StoreLoad"); + +- spin_lock_irqsave(&itc_sync_lock, flags); ++ raw_spin_lock_irqsave(&itc_sync_lock, flags); + { + for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { + while (!go[MASTER]) +@@ -270,7 +270,7 @@ static void smp_synchronize_one_tick(int cpu) + membar_safe("#StoreLoad"); + } + } +- spin_unlock_irqrestore(&itc_sync_lock, flags); ++ raw_spin_unlock_irqrestore(&itc_sync_lock, flags); + } + + #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) +diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S +index f7c72b6efc27..d066eb18650c 100644 +--- a/arch/sparc/kernel/sys32.S ++++ b/arch/sparc/kernel/sys32.S +@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) + SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) + SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) + SIGN1(sys32_select, compat_sys_select, %o0) +-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) ++SIGN1(sys32_futex, compat_sys_futex, %o1) + SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) + SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) + SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) +diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c +index 3c1a7cb31579..35ab8b60d256 100644 +--- a/arch/sparc/kernel/unaligned_64.c ++++ b/arch/sparc/kernel/unaligned_64.c +@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) + unsigned long compute_effective_address(struct pt_regs *regs, + unsigned int insn, unsigned int rd) + { ++ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned int rs1 = (insn >> 14) & 0x1f; + unsigned int rs2 = insn & 0x1f; +- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; ++ unsigned long addr; + + if (insn & 0x2000) { + maybe_flush_windows(rs1, 0, rd, from_kernel); +- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); ++ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + } else { + maybe_flush_windows(rs1, rs2, rd, from_kernel); +- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); ++ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + } ++ ++ if (!from_kernel && test_thread_flag(TIF_32BIT)) ++ addr &= 0xffffffff; ++ ++ return addr; + } + + /* This is just to make gcc think die_if_kernel does return... */ +diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S +index 2c20ad63ddbf..30eee6e8a81b 100644 +--- a/arch/sparc/lib/NG2memcpy.S ++++ b/arch/sparc/lib/NG2memcpy.S +@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + */ + VISEntryHalf + ++ membar #Sync + alignaddr %o1, %g0, %g0 + + add %o1, (64 - 1), %o4 +diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c +index aa4d55b0bdf0..5ce8f2f64604 100644 +--- a/arch/sparc/math-emu/math_32.c ++++ b/arch/sparc/math-emu/math_32.c +@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) + case 0: fsr = *pfsr; + if (IR == -1) IR = 2; + /* fcc is always fcc0 */ +- fsr &= ~0xc00; fsr |= (IR << 10); break; ++ fsr &= ~0xc00; fsr |= (IR << 10); + *pfsr = fsr; + break; + case 1: rd->s = IR; break; +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 69bb818fdd79..4ced3fc66130 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc) + pte_t *ptep, pte; + unsigned long pa; + u32 insn = 0; +- unsigned long pstate; + +- if (pgd_none(*pgdp)) +- goto outret; ++ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) ++ goto out; + pudp = pud_offset(pgdp, tpc); +- if (pud_none(*pudp)) +- goto outret; +- pmdp = pmd_offset(pudp, tpc); +- if (pmd_none(*pmdp)) +- goto outret; ++ if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) ++ goto out; + + /* This disables preemption for us as well. */ +- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); +- __asm__ __volatile__("wrpr %0, %1, %%pstate" +- : : "r" (pstate), "i" (PSTATE_IE)); +- ptep = pte_offset_map(pmdp, tpc); +- pte = *ptep; +- if (!pte_present(pte)) +- goto out; ++ local_irq_disable(); ++ ++ pmdp = pmd_offset(pudp, tpc); ++ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) ++ goto out_irq_enable; + +- pa = (pte_pfn(pte) << PAGE_SHIFT); +- pa += (tpc & ~PAGE_MASK); ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ if (pmd_trans_huge(*pmdp)) { ++ if (pmd_trans_splitting(*pmdp)) ++ goto out_irq_enable; + +- /* Use phys bypass so we don't pollute dtlb/dcache. */ +- __asm__ __volatile__("lduwa [%1] %2, %0" +- : "=r" (insn) +- : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ pa = pmd_pfn(*pmdp) << PAGE_SHIFT; ++ pa += tpc & ~HPAGE_MASK; + ++ /* Use phys bypass so we don't pollute dtlb/dcache. */ ++ __asm__ __volatile__("lduwa [%1] %2, %0" ++ : "=r" (insn) ++ : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ } else ++#endif ++ { ++ ptep = pte_offset_map(pmdp, tpc); ++ pte = *ptep; ++ if (pte_present(pte)) { ++ pa = (pte_pfn(pte) << PAGE_SHIFT); ++ pa += (tpc & ~PAGE_MASK); ++ ++ /* Use phys bypass so we don't pollute dtlb/dcache. */ ++ __asm__ __volatile__("lduwa [%1] %2, %0" ++ : "=r" (insn) ++ : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ } ++ pte_unmap(ptep); ++ } ++out_irq_enable: ++ local_irq_enable(); + out: +- pte_unmap(ptep); +- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); +-outret: + return insn; + } + +@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code, + } + + static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, +- unsigned int insn, int fault_code) ++ unsigned long fault_addr, unsigned int insn, ++ int fault_code) + { + unsigned long addr; + siginfo_t info; +@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + info.si_code = code; + info.si_signo = sig; + info.si_errno = 0; +- if (fault_code & FAULT_CODE_ITLB) ++ if (fault_code & FAULT_CODE_ITLB) { + addr = regs->tpc; +- else +- addr = compute_effective_address(regs, insn, 0); ++ } else { ++ /* If we were able to probe the faulting instruction, use it ++ * to compute a precise fault address. Otherwise use the fault ++ * time provided address which may only have page granularity. ++ */ ++ if (insn) ++ addr = compute_effective_address(regs, insn, 0); ++ else ++ addr = fault_addr; ++ } + info.si_addr = (void __user *) addr; + info.si_trapno = 0; + +@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, + /* The si_code was set to make clear whether + * this was a SEGV_MAPERR or SEGV_ACCERR fault. + */ +- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); ++ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); + return; + } + +@@ -259,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) + show_regs(regs); + } + +-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, +- unsigned long addr) +-{ +- static int times; +- +- if (times++ < 10) +- printk(KERN_ERR "FAULT[%s:%d]: 32-bit process " +- "reports 64-bit fault address [%lx]\n", +- current->comm, current->pid, addr); +- show_regs(regs); +-} +- + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + { + enum ctx_state prev_state = exception_enter(); +@@ -300,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + goto intr_or_no_mm; + } + } +- if (unlikely((address >> 32) != 0)) { +- bogus_32bit_fault_address(regs, address); ++ if (unlikely((address >> 32) != 0)) + goto intr_or_no_mm; +- } + } + + if (regs->tstate & TSTATE_PRIV) { +@@ -525,7 +533,7 @@ do_sigbus: + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ +- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); ++ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); + + /* Kernel mode? Handle exceptions or die */ + if (regs->tstate & TSTATE_PRIV) +diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c +index c4d3da68b800..1aed0432c64b 100644 +--- a/arch/sparc/mm/gup.c ++++ b/arch/sparc/mm/gup.c +@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, + struct page *head, *page, *tail; + int refs; + +- if (!pmd_large(pmd)) ++ if (!(pmd_val(pmd) & _PAGE_VALID)) + return 0; + + if (write && !pmd_write(pmd)) +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index eafbc65c9c47..96862241b342 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * + + mm = vma->vm_mm; + ++ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ ++ if (!pte_accessible(mm, pte)) ++ return; ++ + spin_lock_irqsave(&mm->context.lock, flags); + + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +@@ -588,7 +592,7 @@ static void __init remap_kernel(void) + int i, tlb_ent = sparc64_highest_locked_tlbent(); + + tte_vaddr = (unsigned long) KERNBASE; +- phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ++ phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; + tte_data = kern_large_tte(phys_page); + + kern_locked_tte_data = tte_data; +@@ -1881,7 +1885,7 @@ void __init paging_init(void) + + BUILD_BUG_ON(NR_CPUS > 4096); + +- kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ++ kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + + /* Invalidate both kernel TSBs. */ +@@ -1937,7 +1941,7 @@ void __init paging_init(void) + shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); + + real_end = (unsigned long)_end; +- num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); ++ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); + printk("Kernel: Using %d locked TLB entries for main kernel image.\n", + num_kernel_image_mappings); + +@@ -2094,7 +2098,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) + + if (new_start <= old_start && + new_end >= (old_start + PAGE_SIZE)) { +- set_bit(old_start >> 22, bitmap); ++ set_bit(old_start >> ILOG2_4MB, bitmap); + goto do_next_page; + } + } +@@ -2143,7 +2147,7 @@ void __init mem_init(void) + addr = PAGE_OFFSET + kern_base; + last = PAGE_ALIGN(kern_size) + addr; + while (addr < last) { +- set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); ++ set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap); + addr += PAGE_SIZE; + } + +@@ -2267,7 +2271,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, + void *block; + + if (!(*vmem_pp & _PAGE_VALID)) { +- block = vmemmap_alloc_block(1UL << 22, node); ++ block = vmemmap_alloc_block(1UL << ILOG2_4MB, node); + if (!block) + return -ENOMEM; + +@@ -2614,6 +2618,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + + pte = pmd_val(entry); + ++ /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ ++ if (!(pte & _PAGE_VALID)) ++ return; ++ + /* We are fabricating 8MB pages using 4MB real hw pages. */ + pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); + +@@ -2694,3 +2702,26 @@ void hugetlb_setup(struct pt_regs *regs) + } + } + #endif ++ ++#ifdef CONFIG_SMP ++#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range ++#else ++#define do_flush_tlb_kernel_range __flush_tlb_kernel_range ++#endif ++ ++void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++{ ++ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { ++ if (start < LOW_OBP_ADDRESS) { ++ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); ++ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); ++ } ++ if (end > HI_OBP_ADDRESS) { ++ flush_tsb_kernel_range(end, HI_OBP_ADDRESS); ++ do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS); ++ } ++ } else { ++ flush_tsb_kernel_range(start, end); ++ do_flush_tlb_kernel_range(start, end); ++ } ++} +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c +index b12cb5e72812..b89aba217e3b 100644 +--- a/arch/sparc/mm/tlb.c ++++ b/arch/sparc/mm/tlb.c +@@ -134,7 +134,7 @@ no_cache_flush: + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, +- pmd_t pmd, bool exec) ++ pmd_t pmd) + { + unsigned long end; + pte_t *pte; +@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, + pte = pte_offset_map(&pmd, vaddr); + end = vaddr + HPAGE_SIZE; + while (vaddr < end) { +- if (pte_val(*pte) & _PAGE_VALID) ++ if (pte_val(*pte) & _PAGE_VALID) { ++ bool exec = pte_exec(*pte); ++ + tlb_batch_add_one(mm, vaddr, exec); ++ } + pte++; + vaddr += PAGE_SIZE; + } +@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, + } + + if (!pmd_none(orig)) { +- pte_t orig_pte = __pte(pmd_val(orig)); +- bool exec = pte_exec(orig_pte); +- + addr &= HPAGE_MASK; + if (pmd_trans_huge(orig)) { ++ pte_t orig_pte = __pte(pmd_val(orig)); ++ bool exec = pte_exec(orig_pte); ++ + tlb_batch_add_one(mm, addr, exec); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + } else { +- tlb_batch_pmd_scan(mm, addr, orig, exec); ++ tlb_batch_pmd_scan(mm, addr, orig); + } + } + } + ++void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ++ pmd_t *pmdp) ++{ ++ pmd_t entry = *pmdp; ++ ++ pmd_val(entry) &= ~_PAGE_VALID; ++ ++ set_pmd_at(vma->vm_mm, address, pmdp, entry); ++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); ++} ++ + void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable) + { +diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c +index f5d506fdddad..fe19b81acc09 100644 +--- a/arch/sparc/mm/tsb.c ++++ b/arch/sparc/mm/tsb.c +@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign + mm->context.tsb_block[tsb_idx].tsb_nentries = + tsb_bytes / sizeof(struct tsb); + +- base = TSBMAP_BASE; ++ switch (tsb_idx) { ++ case MM_TSB_BASE: ++ base = TSBMAP_8K_BASE; ++ break; ++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) ++ case MM_TSB_HUGE: ++ base = TSBMAP_4M_BASE; ++ break; ++#endif ++ default: ++ BUG(); ++ } ++ + tte = pgprot_val(PAGE_KERNEL_LOCKED); + tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); + BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +index 391f29ef6d2e..1fbeaa9dd202 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +@@ -337,6 +337,7 @@ struct sw_tx_bd { + u8 flags; + /* Set on the first BD descriptor when there is a split BD */ + #define BNX2X_TSO_SPLIT_BD (1<<0) ++#define BNX2X_HAS_SECOND_PBD (1<<1) + }; + + struct sw_rx_page { +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +index 5ed512473b12..afa4a1f63270 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -223,6 +223,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + ++ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { ++ /* Skip second parse bd... */ ++ --nbd; ++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); ++ } ++ + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; +@@ -3868,6 +3874,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) + /* set encapsulation flag in start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_TUNNEL_EXIST, 1); ++ ++ tx_buf->flags |= BNX2X_HAS_SECOND_PBD; ++ + nbd++; + } else if (xmit_type & XMIT_CSUM) { + /* Set PBD in checksum offload case w/o encapsulation */ +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c +index 4ad1187e82fb..669eeb4eb247 100644 +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) + prefetch(bnad->netdev); + + cq = ccb->sw_q; +- cmpl = &cq[ccb->producer_index]; + + while (packets < budget) { ++ cmpl = &cq[ccb->producer_index]; + if (!cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only after writing +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 5adecc5f52b7..7f1abb7c18f2 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -548,6 +548,7 @@ static int macvlan_init(struct net_device *dev) + (lowerdev->state & MACVLAN_STATE_MASK); + dev->features = lowerdev->features & MACVLAN_FEATURES; + dev->features |= ALWAYS_ON_FEATURES; ++ dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; + dev->gso_max_size = lowerdev->gso_max_size; + dev->iflink = lowerdev->ifindex; + dev->hard_header_len = lowerdev->hard_header_len; +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 365375408904..25f74191a788 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -353,7 +353,7 @@ int phy_device_register(struct phy_device *phydev) + phydev->bus->phy_map[phydev->addr] = phydev; + + /* Run all of the fixups for this PHY */ +- err = phy_init_hw(phydev); ++ err = phy_scan_fixups(phydev); + if (err) { + pr_err("PHY %d failed to initialize\n", phydev->addr); + goto out; +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c +index 01805319e1e0..1aff970be33e 100644 +--- a/drivers/net/ppp/pptp.c ++++ b/drivers/net/ppp/pptp.c +@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) + nf_reset(skb); + + skb->ip_summed = CHECKSUM_NONE; +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + ip_send_check(iph); + + ip_local_out(skb); +diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c +index 160e7510aca6..0787b9756165 100644 +--- a/drivers/sbus/char/bbc_envctrl.c ++++ b/drivers/sbus/char/bbc_envctrl.c +@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op, + if (!tp) + return; + ++ INIT_LIST_HEAD(&tp->bp_list); ++ INIT_LIST_HEAD(&tp->glob_list); ++ + tp->client = bbc_i2c_attach(bp, op); + if (!tp->client) { + kfree(tp); +@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op, + if (!fp) + return; + ++ INIT_LIST_HEAD(&fp->bp_list); ++ INIT_LIST_HEAD(&fp->glob_list); ++ + fp->client = bbc_i2c_attach(bp, op); + if (!fp->client) { + kfree(fp); +diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c +index c7763e482eb2..812b5f0361b6 100644 +--- a/drivers/sbus/char/bbc_i2c.c ++++ b/drivers/sbus/char/bbc_i2c.c +@@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index + if (!bp) + return NULL; + ++ INIT_LIST_HEAD(&bp->temps); ++ INIT_LIST_HEAD(&bp->fans); ++ + bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); + if (!bp->i2c_control_regs) + goto fail; + +- bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); +- if (!bp->i2c_bussel_reg) +- goto fail; ++ if (op->num_resources == 2) { ++ bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); ++ if (!bp->i2c_bussel_reg) ++ goto fail; ++ } + + bp->waiting = 0; + init_waitqueue_head(&bp->wq); +diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c +index 80a58eca785b..e8f77606561b 100644 +--- a/drivers/tty/serial/sunsab.c ++++ b/drivers/tty/serial/sunsab.c +@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up, + (up->port.line == up->port.cons->index)) + saw_console_brk = 1; + ++ if (count == 0) { ++ if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { ++ stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | ++ SAB82532_ISR0_FERR); ++ up->port.icount.brk++; ++ uart_handle_break(&up->port); ++ } ++ } ++ + for (i = 0; i < count; i++) { + unsigned char ch = buf[i], flag; + +diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h +index b0f4ef77fa70..bf9781e9fd92 100644 +--- a/fs/xfs/xfs_log.h ++++ b/fs/xfs/xfs_log.h +@@ -24,7 +24,8 @@ struct xfs_log_vec { + struct xfs_log_iovec *lv_iovecp; /* iovec array */ + struct xfs_log_item *lv_item; /* owner */ + char *lv_buf; /* formatted buffer */ +- int lv_buf_len; /* size of formatted buffer */ ++ int lv_bytes; /* accounted space in buffer */ ++ int lv_buf_len; /* aligned size of buffer */ + int lv_size; /* size of allocated lv */ + }; + +@@ -52,15 +53,21 @@ xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, + return vec->i_addr; + } + ++/* ++ * We need to make sure the next buffer is naturally aligned for the biggest ++ * basic data type we put into it. We already accounted for this padding when ++ * sizing the buffer. ++ * ++ * However, this padding does not get written into the log, and hence we have to ++ * track the space used by the log vectors separately to prevent log space hangs ++ * due to inaccurate accounting (i.e. a leak) of the used log space through the ++ * CIL context ticket. ++ */ + static inline void + xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) + { +- /* +- * We need to make sure the next buffer is naturally aligned for the +- * biggest basic data type we put into it. We already accounted for +- * this when sizing the buffer. +- */ + lv->lv_buf_len += round_up(len, sizeof(uint64_t)); ++ lv->lv_bytes += len; + vec->i_len = len; + } + +diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c +index 4ef6fdbced78..bcfbaae4702c 100644 +--- a/fs/xfs/xfs_log_cil.c ++++ b/fs/xfs/xfs_log_cil.c +@@ -97,7 +97,7 @@ xfs_cil_prepare_item( + { + /* Account for the new LV being passed in */ + if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { +- *diff_len += lv->lv_buf_len; ++ *diff_len += lv->lv_bytes; + *diff_iovecs += lv->lv_niovecs; + } + +@@ -111,7 +111,7 @@ xfs_cil_prepare_item( + else if (old_lv != lv) { + ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); + +- *diff_len -= old_lv->lv_buf_len; ++ *diff_len -= old_lv->lv_bytes; + *diff_iovecs -= old_lv->lv_niovecs; + kmem_free(old_lv); + } +@@ -239,7 +239,7 @@ xlog_cil_insert_format_items( + * that the space reservation accounting is correct. + */ + *diff_iovecs -= lv->lv_niovecs; +- *diff_len -= lv->lv_buf_len; ++ *diff_len -= lv->lv_bytes; + } else { + /* allocate new data chunk */ + lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); +@@ -259,6 +259,7 @@ xlog_cil_insert_format_items( + + /* The allocated data region lies beyond the iovec region */ + lv->lv_buf_len = 0; ++ lv->lv_bytes = 0; + lv->lv_buf = (char *)lv + buf_size - nbytes; + ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); + +diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h +index 058271bde27a..823ec7bb9c67 100644 +--- a/include/net/inetpeer.h ++++ b/include/net/inetpeer.h +@@ -41,14 +41,13 @@ struct inet_peer { + struct rcu_head gc_rcu; + }; + /* +- * Once inet_peer is queued for deletion (refcnt == -1), following fields +- * are not available: rid, ip_id_count ++ * Once inet_peer is queued for deletion (refcnt == -1), following field ++ * is not available: rid + * We can share memory with rcu_head to help keep inet_peer small. + */ + union { + struct { + atomic_t rid; /* Frag reception counter */ +- atomic_t ip_id_count; /* IP ID for the next packet */ + }; + struct rcu_head rcu; + struct inet_peer *gc_next; +@@ -165,7 +164,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); + void inetpeer_invalidate_tree(struct inet_peer_base *); + + /* +- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, ++ * temporary check to make sure we dont access rid, tcp_ts, + * tcp_ts_stamp if no refcount is taken on inet_peer + */ + static inline void inet_peer_refcheck(const struct inet_peer *p) +@@ -173,13 +172,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p) + WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); + } + +- +-/* can be called with or without local BH being disabled */ +-static inline int inet_getid(struct inet_peer *p, int more) +-{ +- more++; +- inet_peer_refcheck(p); +- return atomic_add_return(more, &p->ip_id_count) - more; +-} +- + #endif /* _NET_INETPEER_H */ +diff --git a/include/net/ip.h b/include/net/ip.h +index 23be0fd37937..937f19681426 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -297,9 +297,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) + } + } + +-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); ++u32 ip_idents_reserve(u32 hash, int segs); ++void __ip_select_ident(struct iphdr *iph, int segs); + +-static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) ++static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) + { + struct iphdr *iph = ip_hdr(skb); + +@@ -309,24 +310,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s + * does not change, they drop every other packet in + * a TCP stream using header compression. + */ +- iph->id = (sk && inet_sk(sk)->inet_daddr) ? +- htons(inet_sk(sk)->inet_id++) : 0; +- } else +- __ip_select_ident(iph, dst, 0); +-} +- +-static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) +-{ +- struct iphdr *iph = ip_hdr(skb); +- +- if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { + if (sk && inet_sk(sk)->inet_daddr) { + iph->id = htons(inet_sk(sk)->inet_id); +- inet_sk(sk)->inet_id += 1 + more; +- } else ++ inet_sk(sk)->inet_id += segs; ++ } else { + iph->id = 0; +- } else +- __ip_select_ident(iph, dst, more); ++ } ++ } else { ++ __ip_select_ident(iph, segs); ++ } ++} ++ ++static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk) ++{ ++ ip_select_ident_segs(skb, sk, 1); + } + + /* +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h +index e77c10405d51..7b9ec5837496 100644 +--- a/include/net/ip_tunnels.h ++++ b/include/net/ip_tunnels.h +@@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry { + + struct ip_tunnel_dst { + struct dst_entry __rcu *dst; ++ __be32 saddr; + }; + + struct ip_tunnel { +diff --git a/include/net/ipv6.h b/include/net/ipv6.h +index 4f541f11ce63..9ac65781d44b 100644 +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -660,8 +660,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add + return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); + } + +-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); +- + int ip6_dst_hoplimit(struct dst_entry *dst); + + /* +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h +index f257486f17be..3f36d45b714a 100644 +--- a/include/net/secure_seq.h ++++ b/include/net/secure_seq.h +@@ -3,8 +3,6 @@ + + #include <linux/types.h> + +-__u32 secure_ip_id(__be32 daddr); +-__u32 secure_ipv6_id(const __be32 daddr[4]); + u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); + u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, + __be16 dport); +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c +index cc1cfd60c094..c46387a46535 100644 +--- a/net/batman-adv/fragmentation.c ++++ b/net/batman-adv/fragmentation.c +@@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, + { + struct batadv_frag_table_entry *chain; + struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr; ++ struct batadv_frag_list_entry *frag_entry_last = NULL; + struct batadv_frag_packet *frag_packet; + uint8_t bucket; + uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet); +@@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, + ret = true; + goto out; + } ++ ++ /* store current entry because it could be the last in list */ ++ frag_entry_last = frag_entry_curr; + } + +- /* Reached the end of the list, so insert after 'frag_entry_curr'. */ +- if (likely(frag_entry_curr)) { +- hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list); ++ /* Reached the end of the list, so insert after 'frag_entry_last'. */ ++ if (likely(frag_entry_last)) { ++ hlist_add_after(&frag_entry_last->list, &frag_entry_new->list); + chain->size += skb->len - hdr_size; + chain->timestamp = jiffies; + ret = true; +diff --git a/net/compat.c b/net/compat.c +index f50161fb812e..cbc1a2a26587 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + { + int tot_len; + +- if (kern_msg->msg_namelen) { ++ if (kern_msg->msg_name && kern_msg->msg_namelen) { + if (mode == VERIFY_READ) { + int err = move_addr_to_kernel(kern_msg->msg_name, + kern_msg->msg_namelen, +@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + if (err < 0) + return err; + } +- if (kern_msg->msg_name) +- kern_msg->msg_name = kern_address; +- } else ++ kern_msg->msg_name = kern_address; ++ } else { + kern_msg->msg_name = NULL; ++ kern_msg->msg_namelen = 0; ++ } + + tot_len = iov_from_user_compat_to_kern(kern_iov, + (struct compat_iovec __user *)kern_msg->msg_iov, +diff --git a/net/core/iovec.c b/net/core/iovec.c +index b61869429f4c..26dc0062652f 100644 +--- a/net/core/iovec.c ++++ b/net/core/iovec.c +@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a + { + int size, ct, err; + +- if (m->msg_namelen) { ++ if (m->msg_name && m->msg_namelen) { + if (mode == VERIFY_READ) { + void __user *namep; + namep = (void __user __force *) m->msg_name; +@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a + if (err < 0) + return err; + } +- if (m->msg_name) +- m->msg_name = address; ++ m->msg_name = address; + } else { + m->msg_name = NULL; ++ m->msg_namelen = 0; + } + + size = m->msg_iovlen * sizeof(struct iovec); +@@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend); + int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, + int offset, int len) + { ++ /* No data? Done! */ ++ if (len == 0) ++ return 0; ++ + /* Skip over the finished iovecs */ + while (offset >= iov->iov_len) { + offset -= iov->iov_len; +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c +index 897da56f3aff..ba71212f0251 100644 +--- a/net/core/secure_seq.c ++++ b/net/core/secure_seq.c +@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); + #endif + + #ifdef CONFIG_INET +-__u32 secure_ip_id(__be32 daddr) +-{ +- u32 hash[MD5_DIGEST_WORDS]; +- +- net_secret_init(); +- hash[0] = (__force __u32) daddr; +- hash[1] = net_secret[13]; +- hash[2] = net_secret[14]; +- hash[3] = net_secret[15]; +- +- md5_transform(hash, net_secret); +- +- return hash[0]; +-} +- +-__u32 secure_ipv6_id(const __be32 daddr[4]) +-{ +- __u32 hash[4]; +- +- net_secret_init(); +- memcpy(hash, daddr, 16); +- md5_transform(hash, net_secret); +- +- return hash[0]; +-} + + __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport) +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 7f2e1fce706e..8f6391bbf509 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2968,9 +2968,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, + tail = nskb; + + __copy_skb_header(nskb, head_skb); +- nskb->mac_len = head_skb->mac_len; + + skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); ++ skb_reset_mac_len(nskb); + + skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, + nskb->data - tnl_hlen, +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 9db3b877fcaf..0ffcd4d64e0a 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) + pip->saddr = fl4.saddr; + pip->protocol = IPPROTO_IGMP; + pip->tot_len = 0; /* filled in later */ +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + ((u8 *)&pip[1])[0] = IPOPT_RA; + ((u8 *)&pip[1])[1] = 4; + ((u8 *)&pip[1])[2] = 0; +@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, + iph->daddr = dst; + iph->saddr = fl4.saddr; + iph->protocol = IPPROTO_IGMP; +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + ((u8 *)&iph[1])[0] = IPOPT_RA; + ((u8 *)&iph[1])[1] = 4; + ((u8 *)&iph[1])[2] = 0; +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c +index 48f424465112..bf2cb4a4714b 100644 +--- a/net/ipv4/inetpeer.c ++++ b/net/ipv4/inetpeer.c +@@ -26,20 +26,7 @@ + * Theory of operations. + * We keep one entry for each peer IP address. The nodes contains long-living + * information about the peer which doesn't depend on routes. +- * At this moment this information consists only of ID field for the next +- * outgoing IP packet. This field is incremented with each packet as encoded +- * in inet_getid() function (include/net/inetpeer.h). +- * At the moment of writing this notes identifier of IP packets is generated +- * to be unpredictable using this code only for packets subjected +- * (actually or potentially) to defragmentation. I.e. DF packets less than +- * PMTU in size when local fragmentation is disabled use a constant ID and do +- * not use this code (see ip_select_ident() in include/net/ip.h). + * +- * Route cache entries hold references to our nodes. +- * New cache entries get references via lookup by destination IP address in +- * the avl tree. The reference is grabbed only when it's needed i.e. only +- * when we try to output IP packet which needs an unpredictable ID (see +- * __ip_select_ident() in net/ipv4/route.c). + * Nodes are removed only when reference counter goes to 0. + * When it's happened the node may be removed when a sufficient amount of + * time has been passed since its last use. The less-recently-used entry can +@@ -62,7 +49,6 @@ + * refcnt: atomically against modifications on other CPU; + * usually under some other lock to prevent node disappearing + * daddr: unchangeable +- * ip_id_count: atomic value (no lock needed) + */ + + static struct kmem_cache *peer_cachep __read_mostly; +@@ -497,10 +483,6 @@ relookup: + p->daddr = *daddr; + atomic_set(&p->refcnt, 1); + atomic_set(&p->rid, 0); +- atomic_set(&p->ip_id_count, +- (daddr->family == AF_INET) ? +- secure_ip_id(daddr->addr.a4) : +- secure_ipv6_id(daddr->addr.a6)); + p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; + p->rate_tokens = 0; + /* 60*HZ is arbitrary, but chosen enough high so that the first +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 73c6b63bba74..ed88d781248f 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); + iph->saddr = saddr; + iph->protocol = sk->sk_protocol; +- ip_select_ident(skb, &rt->dst, sk); ++ ip_select_ident(skb, sk); + + if (opt && opt->opt.optlen) { + iph->ihl += opt->opt.optlen>>2; +@@ -386,8 +386,7 @@ packet_routed: + ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); + } + +- ip_select_ident_more(skb, &rt->dst, sk, +- (skb_shinfo(skb)->gso_segs ?: 1) - 1); ++ ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1); + + skb->priority = sk->sk_priority; + skb->mark = sk->sk_mark; +@@ -1338,7 +1337,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, + iph->ttl = ttl; + iph->protocol = sk->sk_protocol; + ip_copy_addrs(iph, fl4); +- ip_select_ident(skb, &rt->dst, sk); ++ ip_select_ident(skb, sk); + + if (opt) { + iph->ihl += opt->optlen>>2; +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index 62cd9e0ae35b..0a4af0920af3 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote) + } + + static void __tunnel_dst_set(struct ip_tunnel_dst *idst, +- struct dst_entry *dst) ++ struct dst_entry *dst, __be32 saddr) + { + struct dst_entry *old_dst; + + dst_clone(dst); + old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); + dst_release(old_dst); ++ idst->saddr = saddr; + } + +-static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) ++static void tunnel_dst_set(struct ip_tunnel *t, ++ struct dst_entry *dst, __be32 saddr) + { +- __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst); ++ __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr); + } + + static void tunnel_dst_reset(struct ip_tunnel *t) + { +- tunnel_dst_set(t, NULL); ++ tunnel_dst_set(t, NULL, 0); + } + + void ip_tunnel_dst_reset_all(struct ip_tunnel *t) +@@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t) + int i; + + for_each_possible_cpu(i) +- __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); ++ __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0); + } + EXPORT_SYMBOL(ip_tunnel_dst_reset_all); + +-static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) ++static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, ++ u32 cookie, __be32 *saddr) + { ++ struct ip_tunnel_dst *idst; + struct dst_entry *dst; + + rcu_read_lock(); +- dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); ++ idst = this_cpu_ptr(t->dst_cache); ++ dst = rcu_dereference(idst->dst); + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) + dst = NULL; + if (dst) { +- if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { ++ if (!dst->obsolete || dst->ops->check(dst, cookie)) { ++ *saddr = idst->saddr; ++ } else { + tunnel_dst_reset(t); + dst_release(dst); + dst = NULL; +@@ -362,7 +369,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) + + if (!IS_ERR(rt)) { + tdev = rt->dst.dev; +- tunnel_dst_set(tunnel, &rt->dst); ++ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); + ip_rt_put(rt); + } + if (dev->type != ARPHRD_ETHER) +@@ -606,7 +613,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, + tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); + +- rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL; ++ rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL; + + if (!rt) { + rt = ip_route_output_key(tunnel->net, &fl4); +@@ -616,7 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + if (connected) +- tunnel_dst_set(tunnel, &rt->dst); ++ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); + } + + if (rt->dst.dev == dev) { +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c +index 8d69626f2206..65b664d30fa1 100644 +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -74,7 +74,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, + iph->daddr = dst; + iph->saddr = src; + iph->ttl = ttl; +- __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); ++ __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); + + err = ip_local_out(skb); + if (unlikely(net_xmit_eval(err))) +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index 28863570dd60..1149fc2290e2 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) + iph->protocol = IPPROTO_IPIP; + iph->ihl = 5; + iph->tot_len = htons(skb->len); +- ip_select_ident(skb, skb_dst(skb), NULL); ++ ip_select_ident(skb, NULL); + ip_send_check(iph); + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index c04518f4850a..11c8d81fdc59 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, + iph->check = 0; + iph->tot_len = htons(length); + if (!iph->id) +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 031553f8a306..ca5a01ed8ed6 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -89,6 +89,7 @@ + #include <linux/rcupdate.h> + #include <linux/times.h> + #include <linux/slab.h> ++#include <linux/jhash.h> + #include <net/dst.h> + #include <net/net_namespace.h> + #include <net/protocol.h> +@@ -462,39 +463,45 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + return neigh_create(&arp_tbl, pkey, dev); + } + +-/* +- * Peer allocation may fail only in serious out-of-memory conditions. However +- * we still can generate some output. +- * Random ID selection looks a bit dangerous because we have no chances to +- * select ID being unique in a reasonable period of time. +- * But broken packet identifier may be better than no packet at all. ++#define IP_IDENTS_SZ 2048u ++struct ip_ident_bucket { ++ atomic_t id; ++ u32 stamp32; ++}; ++ ++static struct ip_ident_bucket *ip_idents __read_mostly; ++ ++/* In order to protect privacy, we add a perturbation to identifiers ++ * if one generator is seldom used. This makes hard for an attacker ++ * to infer how many packets were sent between two points in time. + */ +-static void ip_select_fb_ident(struct iphdr *iph) ++u32 ip_idents_reserve(u32 hash, int segs) + { +- static DEFINE_SPINLOCK(ip_fb_id_lock); +- static u32 ip_fallback_id; +- u32 salt; ++ struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ; ++ u32 old = ACCESS_ONCE(bucket->stamp32); ++ u32 now = (u32)jiffies; ++ u32 delta = 0; ++ ++ if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) ++ delta = prandom_u32_max(now - old); + +- spin_lock_bh(&ip_fb_id_lock); +- salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr); +- iph->id = htons(salt & 0xFFFF); +- ip_fallback_id = salt; +- spin_unlock_bh(&ip_fb_id_lock); ++ return atomic_add_return(segs + delta, &bucket->id) - segs; + } ++EXPORT_SYMBOL(ip_idents_reserve); + +-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) ++void __ip_select_ident(struct iphdr *iph, int segs) + { +- struct net *net = dev_net(dst->dev); +- struct inet_peer *peer; ++ static u32 ip_idents_hashrnd __read_mostly; ++ u32 hash, id; + +- peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); +- if (peer) { +- iph->id = htons(inet_getid(peer, more)); +- inet_putpeer(peer); +- return; +- } ++ net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); + +- ip_select_fb_ident(iph); ++ hash = jhash_3words((__force u32)iph->daddr, ++ (__force u32)iph->saddr, ++ iph->protocol, ++ ip_idents_hashrnd); ++ id = ip_idents_reserve(hash, segs); ++ iph->id = htons(id); + } + EXPORT_SYMBOL(__ip_select_ident); + +@@ -2718,6 +2725,12 @@ int __init ip_rt_init(void) + { + int rc = 0; + ++ ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); ++ if (!ip_idents) ++ panic("IP: failed to allocate ip_idents\n"); ++ ++ prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); ++ + #ifdef CONFIG_IP_ROUTE_CLASSID + ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); + if (!ip_rt_acct) +diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c +index 06cae62bf208..6b1a5fd60598 100644 +--- a/net/ipv4/tcp_vegas.c ++++ b/net/ipv4/tcp_vegas.c +@@ -219,7 +219,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, + * This is: + * (actual rate in segments) * baseRTT + */ +- target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt; ++ target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT; ++ do_div(target_cwnd, rtt); + + /* Calculate the difference between the window we had, + * and the window we would like to have. This quantity +diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c +index 326475a94865..603ad498e18a 100644 +--- a/net/ipv4/tcp_veno.c ++++ b/net/ipv4/tcp_veno.c +@@ -145,7 +145,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, + + rtt = veno->minrtt; + +- target_cwnd = (tp->snd_cwnd * veno->basertt); ++ target_cwnd = (u64)tp->snd_cwnd * veno->basertt; + target_cwnd <<= V_PARAM_SHIFT; + do_div(target_cwnd, rtt); + +diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c +index 31b18152528f..1f564a1487a3 100644 +--- a/net/ipv4/xfrm4_mode_tunnel.c ++++ b/net/ipv4/xfrm4_mode_tunnel.c +@@ -117,12 +117,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) + + top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? + 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); +- ip_select_ident(skb, dst->child, NULL); + + top_iph->ttl = ip4_dst_hoplimit(dst->child); + + top_iph->saddr = x->props.saddr.a4; + top_iph->daddr = x->id.daddr.a4; ++ ip_select_ident(skb, NULL); + + return 0; + } +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index a62b610307ec..073e5a6fc631 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -537,6 +537,20 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) + skb_copy_secmark(to, from); + } + ++static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) ++{ ++ static u32 ip6_idents_hashrnd __read_mostly; ++ u32 hash, id; ++ ++ net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); ++ ++ hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); ++ hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash); ++ ++ id = ip_idents_reserve(hash, 1); ++ fhdr->identification = htonl(id); ++} ++ + int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) + { + struct sk_buff *frag; +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index b31a01263185..798eb0f79078 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -7,29 +7,6 @@ + #include <net/ip6_fib.h> + #include <net/addrconf.h> + +-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) +-{ +- static atomic_t ipv6_fragmentation_id; +- int ident; +- +-#if IS_ENABLED(CONFIG_IPV6) +- if (rt && !(rt->dst.flags & DST_NOPEER)) { +- struct inet_peer *peer; +- struct net *net; +- +- net = dev_net(rt->dst.dev); +- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); +- if (peer) { +- fhdr->identification = htonl(inet_getid(peer, 0)); +- inet_putpeer(peer); +- return; +- } +- } +-#endif +- ident = atomic_inc_return(&ipv6_fragmentation_id); +- fhdr->identification = htonl(ident); +-} +-EXPORT_SYMBOL(ipv6_select_ident); + + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + { +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index c47444e4cf8c..7f0e1cf2d7e8 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + iph->daddr = cp->daddr.ip; + iph->saddr = saddr; + iph->ttl = old_iph->ttl; +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + + /* Another hack: avoid icmp_send in ip_fragment */ + skb->local_df = 1; +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index a4d570126f5d..5d97d8fe4be7 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1151,6 +1151,7 @@ void sctp_assoc_update(struct sctp_association *asoc, + asoc->c = new->c; + asoc->peer.rwnd = new->peer.rwnd; + asoc->peer.sack_needed = new->peer.sack_needed; ++ asoc->peer.auth_capable = new->peer.auth_capable; + asoc->peer.i = new->peer.i; + sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, + asoc->peer.i.initial_tsn, GFP_ATOMIC); +diff --git a/net/sctp/output.c b/net/sctp/output.c +index 0f4d15fc2627..8267b06c3646 100644 +--- a/net/sctp/output.c ++++ b/net/sctp/output.c +@@ -599,7 +599,7 @@ out: + return err; + no_route: + kfree_skb(nskb); +- IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); ++ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); + + /* FIXME: Returning the 'err' will effect all the associations + * associated with a socket, although only one of the paths of the +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index b10d04fa3933..3bea4ddc699d 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -176,9 +176,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, + attrs[XFRMA_ALG_AEAD] || + attrs[XFRMA_ALG_CRYPT] || + attrs[XFRMA_ALG_COMP] || +- attrs[XFRMA_TFCPAD] || +- (ntohl(p->id.spi) >= 0x10000)) +- ++ attrs[XFRMA_TFCPAD]) + goto out; + break; + +@@ -206,7 +204,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, + attrs[XFRMA_ALG_AUTH] || + attrs[XFRMA_ALG_AUTH_TRUNC] || + attrs[XFRMA_ALG_CRYPT] || +- attrs[XFRMA_TFCPAD]) ++ attrs[XFRMA_TFCPAD] || ++ (ntohl(p->id.spi) >= 0x10000)) + goto out; + break; +
WARNING: multiple messages have this Message-ID (diff)
From: "Mike Pagano" <mpagano@gentoo.org> To: gentoo-commits@lists.gentoo.org Subject: [gentoo-commits] proj/linux-patches:3.14 commit in: / Date: Tue, 19 Aug 2014 11:44:49 +0000 (UTC) [thread overview] Message-ID: <1408020254.c32d169fb51a74c530420b42f58b1c37285ac0d5.mpagano@gentoo> (raw) Message-ID: <20140819114449.PB1iRCcpwP31sgpgzUzc2xjIiLGE8rFWix3rK-iX8mY@z> (raw) commit: c32d169fb51a74c530420b42f58b1c37285ac0d5 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org> AuthorDate: Thu Aug 14 12:44:14 2014 +0000 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org> CommitDate: Thu Aug 14 12:44:14 2014 +0000 URL: http://sources.gentoo.org/gitweb/?p=proj/linux-patches.git;a=commit;h=c32d169f Linux patch 3.14.17 --- 0000_README | 4 + 1016_linux-3.14.17.patch | 1765 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 1769 insertions(+) diff --git a/0000_README b/0000_README index 75c60df..599e94c 100644 --- a/0000_README +++ b/0000_README @@ -106,6 +106,10 @@ Patch: 1015_linux-3.14.16.patch From: http://www.kernel.org Desc: Linux 3.14.16 +Patch: 1016_linux-3.14.17.patch +From: http://www.kernel.org +Desc: Linux 3.14.17 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1016_linux-3.14.17.patch b/1016_linux-3.14.17.patch new file mode 100644 index 0000000..19c6720 --- /dev/null +++ b/1016_linux-3.14.17.patch @@ -0,0 +1,1765 @@ +diff --git a/Makefile b/Makefile +index 8b22e24a2d8e..12aac0325888 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 14 +-SUBLEVEL = 16 ++SUBLEVEL = 17 + EXTRAVERSION = + NAME = Remembering Coco + +diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h +index 0f9e94537eee..1a49ffdf9da9 100644 +--- a/arch/sparc/include/asm/pgtable_64.h ++++ b/arch/sparc/include/asm/pgtable_64.h +@@ -24,7 +24,8 @@ + + /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). + * The page copy blockops can use 0x6000000 to 0x8000000. +- * The TSB is mapped in the 0x8000000 to 0xa000000 range. ++ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. ++ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range. + * The PROM resides in an area spanning 0xf0000000 to 0x100000000. + * The vmalloc area spans 0x100000000 to 0x200000000. + * Since modules need to be in the lowest 32-bits of the address space, +@@ -33,7 +34,8 @@ + * 0x400000000. + */ + #define TLBTEMP_BASE _AC(0x0000000006000000,UL) +-#define TSBMAP_BASE _AC(0x0000000008000000,UL) ++#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL) ++#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL) + #define MODULES_VADDR _AC(0x0000000010000000,UL) + #define MODULES_LEN _AC(0x00000000e0000000,UL) + #define MODULES_END _AC(0x00000000f0000000,UL) +@@ -71,6 +73,23 @@ + + #include <linux/sched.h> + ++extern unsigned long sparc64_valid_addr_bitmap[]; ++ ++/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ ++static inline bool __kern_addr_valid(unsigned long paddr) ++{ ++ if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL) ++ return false; ++ return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap); ++} ++ ++static inline bool kern_addr_valid(unsigned long addr) ++{ ++ unsigned long paddr = __pa(addr); ++ ++ return __kern_addr_valid(paddr); ++} ++ + /* Entries per page directory level. */ + #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3)) + #define PTRS_PER_PMD (1UL << PMD_BITS) +@@ -79,9 +98,12 @@ + /* Kernel has a separate 44bit address space. */ + #define FIRST_USER_ADDRESS 0 + +-#define pte_ERROR(e) __builtin_trap() +-#define pmd_ERROR(e) __builtin_trap() +-#define pgd_ERROR(e) __builtin_trap() ++#define pmd_ERROR(e) \ ++ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \ ++ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0)) ++#define pgd_ERROR(e) \ ++ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \ ++ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0)) + + #endif /* !(__ASSEMBLY__) */ + +@@ -258,8 +280,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) + { + unsigned long mask, tmp; + +- /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347) +- * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8) ++ /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7) ++ * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8) + * + * Even if we use negation tricks the result is still a 6 + * instruction sequence, so don't try to play fancy and just +@@ -289,10 +311,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot) + " .previous\n" + : "=r" (mask), "=r" (tmp) + : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U | +- _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U | ++ _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U), + "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V | +- _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V | ++ _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | + _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V)); + + return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask)); +@@ -633,7 +655,7 @@ static inline unsigned long pmd_large(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); + +- return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte); ++ return pte_val(pte) & _PAGE_PMD_HUGE; + } + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +@@ -719,20 +741,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) + return __pmd(pte_val(pte)); + } + +-static inline pmd_t pmd_mknotpresent(pmd_t pmd) +-{ +- unsigned long mask; +- +- if (tlb_type == hypervisor) +- mask = _PAGE_PRESENT_4V; +- else +- mask = _PAGE_PRESENT_4U; +- +- pmd_val(pmd) &= ~mask; +- +- return pmd; +-} +- + static inline pmd_t pmd_mksplitting(pmd_t pmd) + { + pte_t pte = __pte(pmd_val(pmd)); +@@ -757,6 +765,20 @@ static inline int pmd_present(pmd_t pmd) + + #define pmd_none(pmd) (!pmd_val(pmd)) + ++/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is ++ * very simple, it's just the physical address. PTE tables are of ++ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and ++ * the top bits outside of the range of any physical address size we ++ * support are clear as well. We also validate the physical itself. ++ */ ++#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \ ++ !__kern_addr_valid(pmd_val(pmd))) ++ ++#define pud_none(pud) (!pud_val(pud)) ++ ++#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \ ++ !__kern_addr_valid(pud_val(pud))) ++ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd); +@@ -790,10 +812,7 @@ static inline unsigned long __pmd_page(pmd_t pmd) + #define pud_page_vaddr(pud) \ + ((unsigned long) __va(pud_val(pud))) + #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) +-#define pmd_bad(pmd) (0) + #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) +-#define pud_none(pud) (!pud_val(pud)) +-#define pud_bad(pud) (0) + #define pud_present(pud) (pud_val(pud) != 0U) + #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) + +@@ -893,6 +912,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); + extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd); + ++#define __HAVE_ARCH_PMDP_INVALIDATE ++extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ++ pmd_t *pmdp); ++ + #define __HAVE_ARCH_PGTABLE_DEPOSIT + extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +@@ -919,18 +942,6 @@ extern unsigned long pte_file(pte_t); + extern pte_t pgoff_to_pte(unsigned long); + #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) + +-extern unsigned long sparc64_valid_addr_bitmap[]; +- +-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ +-static inline bool kern_addr_valid(unsigned long addr) +-{ +- unsigned long paddr = __pa(addr); +- +- if ((paddr >> 41UL) != 0UL) +- return false; +- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap); +-} +- + extern int page_in_phys_avail(unsigned long paddr); + + /* +diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h +index 3c3c89f52643..7f9bab26a499 100644 +--- a/arch/sparc/include/asm/tlbflush_64.h ++++ b/arch/sparc/include/asm/tlbflush_64.h +@@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, + { + } + ++void flush_tlb_kernel_range(unsigned long start, unsigned long end); ++ + #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE + + extern void flush_tlb_pending(void); +@@ -48,11 +50,6 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); + + #ifndef CONFIG_SMP + +-#define flush_tlb_kernel_range(start,end) \ +-do { flush_tsb_kernel_range(start,end); \ +- __flush_tlb_kernel_range(start,end); \ +-} while (0) +- + static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) + { + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +@@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad + extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); + extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); + +-#define flush_tlb_kernel_range(start, end) \ +-do { flush_tsb_kernel_range(start,end); \ +- smp_flush_tlb_kernel_range(start, end); \ +-} while (0) +- + #define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + +diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h +index 2230f80d9fe3..90916f955cac 100644 +--- a/arch/sparc/include/asm/tsb.h ++++ b/arch/sparc/include/asm/tsb.h +@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(4 * 1024 * 1024), REG2; \ +- andn REG1, REG2, REG1; \ ++ brgez,pn REG1, FAIL_LABEL; \ ++ andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ +diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S +index 26b706a1867d..452f04fe8da6 100644 +--- a/arch/sparc/kernel/head_64.S ++++ b/arch/sparc/kernel/head_64.S +@@ -282,8 +282,8 @@ sun4v_chip_type: + stx %l2, [%l4 + 0x0] + ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low + /* 4MB align */ +- srlx %l3, 22, %l3 +- sllx %l3, 22, %l3 ++ srlx %l3, ILOG2_4MB, %l3 ++ sllx %l3, ILOG2_4MB, %l3 + stx %l3, [%l4 + 0x8] + + /* Leave service as-is, "call-method" */ +diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S +index 542e96ac4d39..605d49204580 100644 +--- a/arch/sparc/kernel/ktlb.S ++++ b/arch/sparc/kernel/ktlb.S +@@ -277,7 +277,7 @@ kvmap_dtlb_load: + #ifdef CONFIG_SPARSEMEM_VMEMMAP + kvmap_vmemmap: + sub %g4, %g5, %g5 +- srlx %g5, 22, %g5 ++ srlx %g5, ILOG2_4MB, %g5 + sethi %hi(vmemmap_table), %g1 + sllx %g5, 3, %g5 + or %g1, %lo(vmemmap_table), %g1 +diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c +index e01d75d40329..66dacd56bb10 100644 +--- a/arch/sparc/kernel/ldc.c ++++ b/arch/sparc/kernel/ldc.c +@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp) + if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || + !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || + lp->hs_state != LDC_HS_OPEN) +- err = -EINVAL; ++ err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL); + else + err = start_handshake(lp); + +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index b085311dcd0e..8416d7fadcce 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -151,7 +151,7 @@ void cpu_panic(void) + #define NUM_ROUNDS 64 /* magic value */ + #define NUM_ITERS 5 /* likewise */ + +-static DEFINE_SPINLOCK(itc_sync_lock); ++static DEFINE_RAW_SPINLOCK(itc_sync_lock); + static unsigned long go[SLAVE + 1]; + + #define DEBUG_TICK_SYNC 0 +@@ -259,7 +259,7 @@ static void smp_synchronize_one_tick(int cpu) + go[MASTER] = 0; + membar_safe("#StoreLoad"); + +- spin_lock_irqsave(&itc_sync_lock, flags); ++ raw_spin_lock_irqsave(&itc_sync_lock, flags); + { + for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { + while (!go[MASTER]) +@@ -270,7 +270,7 @@ static void smp_synchronize_one_tick(int cpu) + membar_safe("#StoreLoad"); + } + } +- spin_unlock_irqrestore(&itc_sync_lock, flags); ++ raw_spin_unlock_irqrestore(&itc_sync_lock, flags); + } + + #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) +diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S +index f7c72b6efc27..d066eb18650c 100644 +--- a/arch/sparc/kernel/sys32.S ++++ b/arch/sparc/kernel/sys32.S +@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) + SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) + SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) + SIGN1(sys32_select, compat_sys_select, %o0) +-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) ++SIGN1(sys32_futex, compat_sys_futex, %o1) + SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) + SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) + SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) +diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c +index 3c1a7cb31579..35ab8b60d256 100644 +--- a/arch/sparc/kernel/unaligned_64.c ++++ b/arch/sparc/kernel/unaligned_64.c +@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) + unsigned long compute_effective_address(struct pt_regs *regs, + unsigned int insn, unsigned int rd) + { ++ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned int rs1 = (insn >> 14) & 0x1f; + unsigned int rs2 = insn & 0x1f; +- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; ++ unsigned long addr; + + if (insn & 0x2000) { + maybe_flush_windows(rs1, 0, rd, from_kernel); +- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); ++ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + } else { + maybe_flush_windows(rs1, rs2, rd, from_kernel); +- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); ++ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + } ++ ++ if (!from_kernel && test_thread_flag(TIF_32BIT)) ++ addr &= 0xffffffff; ++ ++ return addr; + } + + /* This is just to make gcc think die_if_kernel does return... */ +diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S +index 2c20ad63ddbf..30eee6e8a81b 100644 +--- a/arch/sparc/lib/NG2memcpy.S ++++ b/arch/sparc/lib/NG2memcpy.S +@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ + */ + VISEntryHalf + ++ membar #Sync + alignaddr %o1, %g0, %g0 + + add %o1, (64 - 1), %o4 +diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c +index aa4d55b0bdf0..5ce8f2f64604 100644 +--- a/arch/sparc/math-emu/math_32.c ++++ b/arch/sparc/math-emu/math_32.c +@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs) + case 0: fsr = *pfsr; + if (IR == -1) IR = 2; + /* fcc is always fcc0 */ +- fsr &= ~0xc00; fsr |= (IR << 10); break; ++ fsr &= ~0xc00; fsr |= (IR << 10); + *pfsr = fsr; + break; + case 1: rd->s = IR; break; +diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c +index 69bb818fdd79..4ced3fc66130 100644 +--- a/arch/sparc/mm/fault_64.c ++++ b/arch/sparc/mm/fault_64.c +@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc) + pte_t *ptep, pte; + unsigned long pa; + u32 insn = 0; +- unsigned long pstate; + +- if (pgd_none(*pgdp)) +- goto outret; ++ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) ++ goto out; + pudp = pud_offset(pgdp, tpc); +- if (pud_none(*pudp)) +- goto outret; +- pmdp = pmd_offset(pudp, tpc); +- if (pmd_none(*pmdp)) +- goto outret; ++ if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) ++ goto out; + + /* This disables preemption for us as well. */ +- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); +- __asm__ __volatile__("wrpr %0, %1, %%pstate" +- : : "r" (pstate), "i" (PSTATE_IE)); +- ptep = pte_offset_map(pmdp, tpc); +- pte = *ptep; +- if (!pte_present(pte)) +- goto out; ++ local_irq_disable(); ++ ++ pmdp = pmd_offset(pudp, tpc); ++ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) ++ goto out_irq_enable; + +- pa = (pte_pfn(pte) << PAGE_SHIFT); +- pa += (tpc & ~PAGE_MASK); ++#ifdef CONFIG_TRANSPARENT_HUGEPAGE ++ if (pmd_trans_huge(*pmdp)) { ++ if (pmd_trans_splitting(*pmdp)) ++ goto out_irq_enable; + +- /* Use phys bypass so we don't pollute dtlb/dcache. */ +- __asm__ __volatile__("lduwa [%1] %2, %0" +- : "=r" (insn) +- : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ pa = pmd_pfn(*pmdp) << PAGE_SHIFT; ++ pa += tpc & ~HPAGE_MASK; + ++ /* Use phys bypass so we don't pollute dtlb/dcache. */ ++ __asm__ __volatile__("lduwa [%1] %2, %0" ++ : "=r" (insn) ++ : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ } else ++#endif ++ { ++ ptep = pte_offset_map(pmdp, tpc); ++ pte = *ptep; ++ if (pte_present(pte)) { ++ pa = (pte_pfn(pte) << PAGE_SHIFT); ++ pa += (tpc & ~PAGE_MASK); ++ ++ /* Use phys bypass so we don't pollute dtlb/dcache. */ ++ __asm__ __volatile__("lduwa [%1] %2, %0" ++ : "=r" (insn) ++ : "r" (pa), "i" (ASI_PHYS_USE_EC)); ++ } ++ pte_unmap(ptep); ++ } ++out_irq_enable: ++ local_irq_enable(); + out: +- pte_unmap(ptep); +- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); +-outret: + return insn; + } + +@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code, + } + + static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, +- unsigned int insn, int fault_code) ++ unsigned long fault_addr, unsigned int insn, ++ int fault_code) + { + unsigned long addr; + siginfo_t info; +@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + info.si_code = code; + info.si_signo = sig; + info.si_errno = 0; +- if (fault_code & FAULT_CODE_ITLB) ++ if (fault_code & FAULT_CODE_ITLB) { + addr = regs->tpc; +- else +- addr = compute_effective_address(regs, insn, 0); ++ } else { ++ /* If we were able to probe the faulting instruction, use it ++ * to compute a precise fault address. Otherwise use the fault ++ * time provided address which may only have page granularity. ++ */ ++ if (insn) ++ addr = compute_effective_address(regs, insn, 0); ++ else ++ addr = fault_addr; ++ } + info.si_addr = (void __user *) addr; + info.si_trapno = 0; + +@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, + /* The si_code was set to make clear whether + * this was a SEGV_MAPERR or SEGV_ACCERR fault. + */ +- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code); ++ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); + return; + } + +@@ -259,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) + show_regs(regs); + } + +-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, +- unsigned long addr) +-{ +- static int times; +- +- if (times++ < 10) +- printk(KERN_ERR "FAULT[%s:%d]: 32-bit process " +- "reports 64-bit fault address [%lx]\n", +- current->comm, current->pid, addr); +- show_regs(regs); +-} +- + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + { + enum ctx_state prev_state = exception_enter(); +@@ -300,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + goto intr_or_no_mm; + } + } +- if (unlikely((address >> 32) != 0)) { +- bogus_32bit_fault_address(regs, address); ++ if (unlikely((address >> 32) != 0)) + goto intr_or_no_mm; +- } + } + + if (regs->tstate & TSTATE_PRIV) { +@@ -525,7 +533,7 @@ do_sigbus: + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ +- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code); ++ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); + + /* Kernel mode? Handle exceptions or die */ + if (regs->tstate & TSTATE_PRIV) +diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c +index c4d3da68b800..1aed0432c64b 100644 +--- a/arch/sparc/mm/gup.c ++++ b/arch/sparc/mm/gup.c +@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, + struct page *head, *page, *tail; + int refs; + +- if (!pmd_large(pmd)) ++ if (!(pmd_val(pmd) & _PAGE_VALID)) + return 0; + + if (write && !pmd_write(pmd)) +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c +index eafbc65c9c47..96862241b342 100644 +--- a/arch/sparc/mm/init_64.c ++++ b/arch/sparc/mm/init_64.c +@@ -350,6 +350,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * + + mm = vma->vm_mm; + ++ /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */ ++ if (!pte_accessible(mm, pte)) ++ return; ++ + spin_lock_irqsave(&mm->context.lock, flags); + + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +@@ -588,7 +592,7 @@ static void __init remap_kernel(void) + int i, tlb_ent = sparc64_highest_locked_tlbent(); + + tte_vaddr = (unsigned long) KERNBASE; +- phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ++ phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; + tte_data = kern_large_tte(phys_page); + + kern_locked_tte_data = tte_data; +@@ -1881,7 +1885,7 @@ void __init paging_init(void) + + BUILD_BUG_ON(NR_CPUS > 4096); + +- kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; ++ kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + + /* Invalidate both kernel TSBs. */ +@@ -1937,7 +1941,7 @@ void __init paging_init(void) + shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); + + real_end = (unsigned long)_end; +- num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22); ++ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB); + printk("Kernel: Using %d locked TLB entries for main kernel image.\n", + num_kernel_image_mappings); + +@@ -2094,7 +2098,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap) + + if (new_start <= old_start && + new_end >= (old_start + PAGE_SIZE)) { +- set_bit(old_start >> 22, bitmap); ++ set_bit(old_start >> ILOG2_4MB, bitmap); + goto do_next_page; + } + } +@@ -2143,7 +2147,7 @@ void __init mem_init(void) + addr = PAGE_OFFSET + kern_base; + last = PAGE_ALIGN(kern_size) + addr; + while (addr < last) { +- set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); ++ set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap); + addr += PAGE_SIZE; + } + +@@ -2267,7 +2271,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, + void *block; + + if (!(*vmem_pp & _PAGE_VALID)) { +- block = vmemmap_alloc_block(1UL << 22, node); ++ block = vmemmap_alloc_block(1UL << ILOG2_4MB, node); + if (!block) + return -ENOMEM; + +@@ -2614,6 +2618,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, + + pte = pmd_val(entry); + ++ /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */ ++ if (!(pte & _PAGE_VALID)) ++ return; ++ + /* We are fabricating 8MB pages using 4MB real hw pages. */ + pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); + +@@ -2694,3 +2702,26 @@ void hugetlb_setup(struct pt_regs *regs) + } + } + #endif ++ ++#ifdef CONFIG_SMP ++#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range ++#else ++#define do_flush_tlb_kernel_range __flush_tlb_kernel_range ++#endif ++ ++void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++{ ++ if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) { ++ if (start < LOW_OBP_ADDRESS) { ++ flush_tsb_kernel_range(start, LOW_OBP_ADDRESS); ++ do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS); ++ } ++ if (end > HI_OBP_ADDRESS) { ++ flush_tsb_kernel_range(end, HI_OBP_ADDRESS); ++ do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS); ++ } ++ } else { ++ flush_tsb_kernel_range(start, end); ++ do_flush_tlb_kernel_range(start, end); ++ } ++} +diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c +index b12cb5e72812..b89aba217e3b 100644 +--- a/arch/sparc/mm/tlb.c ++++ b/arch/sparc/mm/tlb.c +@@ -134,7 +134,7 @@ no_cache_flush: + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, +- pmd_t pmd, bool exec) ++ pmd_t pmd) + { + unsigned long end; + pte_t *pte; +@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, + pte = pte_offset_map(&pmd, vaddr); + end = vaddr + HPAGE_SIZE; + while (vaddr < end) { +- if (pte_val(*pte) & _PAGE_VALID) ++ if (pte_val(*pte) & _PAGE_VALID) { ++ bool exec = pte_exec(*pte); ++ + tlb_batch_add_one(mm, vaddr, exec); ++ } + pte++; + vaddr += PAGE_SIZE; + } +@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, + } + + if (!pmd_none(orig)) { +- pte_t orig_pte = __pte(pmd_val(orig)); +- bool exec = pte_exec(orig_pte); +- + addr &= HPAGE_MASK; + if (pmd_trans_huge(orig)) { ++ pte_t orig_pte = __pte(pmd_val(orig)); ++ bool exec = pte_exec(orig_pte); ++ + tlb_batch_add_one(mm, addr, exec); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec); + } else { +- tlb_batch_pmd_scan(mm, addr, orig, exec); ++ tlb_batch_pmd_scan(mm, addr, orig); + } + } + } + ++void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ++ pmd_t *pmdp) ++{ ++ pmd_t entry = *pmdp; ++ ++ pmd_val(entry) &= ~_PAGE_VALID; ++ ++ set_pmd_at(vma->vm_mm, address, pmdp, entry); ++ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); ++} ++ + void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable) + { +diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c +index f5d506fdddad..fe19b81acc09 100644 +--- a/arch/sparc/mm/tsb.c ++++ b/arch/sparc/mm/tsb.c +@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign + mm->context.tsb_block[tsb_idx].tsb_nentries = + tsb_bytes / sizeof(struct tsb); + +- base = TSBMAP_BASE; ++ switch (tsb_idx) { ++ case MM_TSB_BASE: ++ base = TSBMAP_8K_BASE; ++ break; ++#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) ++ case MM_TSB_HUGE: ++ base = TSBMAP_4M_BASE; ++ break; ++#endif ++ default: ++ BUG(); ++ } ++ + tte = pgprot_val(PAGE_KERNEL_LOCKED); + tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); + BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +index 391f29ef6d2e..1fbeaa9dd202 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +@@ -337,6 +337,7 @@ struct sw_tx_bd { + u8 flags; + /* Set on the first BD descriptor when there is a split BD */ + #define BNX2X_TSO_SPLIT_BD (1<<0) ++#define BNX2X_HAS_SECOND_PBD (1<<1) + }; + + struct sw_rx_page { +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +index 5ed512473b12..afa4a1f63270 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -223,6 +223,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + ++ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) { ++ /* Skip second parse bd... */ ++ --nbd; ++ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); ++ } ++ + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; +@@ -3868,6 +3874,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) + /* set encapsulation flag in start BD */ + SET_FLAG(tx_start_bd->general_data, + ETH_TX_START_BD_TUNNEL_EXIST, 1); ++ ++ tx_buf->flags |= BNX2X_HAS_SECOND_PBD; ++ + nbd++; + } else if (xmit_type & XMIT_CSUM) { + /* Set PBD in checksum offload case w/o encapsulation */ +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c +index 4ad1187e82fb..669eeb4eb247 100644 +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) + prefetch(bnad->netdev); + + cq = ccb->sw_q; +- cmpl = &cq[ccb->producer_index]; + + while (packets < budget) { ++ cmpl = &cq[ccb->producer_index]; + if (!cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only after writing +diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c +index 5adecc5f52b7..7f1abb7c18f2 100644 +--- a/drivers/net/macvlan.c ++++ b/drivers/net/macvlan.c +@@ -548,6 +548,7 @@ static int macvlan_init(struct net_device *dev) + (lowerdev->state & MACVLAN_STATE_MASK); + dev->features = lowerdev->features & MACVLAN_FEATURES; + dev->features |= ALWAYS_ON_FEATURES; ++ dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; + dev->gso_max_size = lowerdev->gso_max_size; + dev->iflink = lowerdev->ifindex; + dev->hard_header_len = lowerdev->hard_header_len; +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c +index 365375408904..25f74191a788 100644 +--- a/drivers/net/phy/phy_device.c ++++ b/drivers/net/phy/phy_device.c +@@ -353,7 +353,7 @@ int phy_device_register(struct phy_device *phydev) + phydev->bus->phy_map[phydev->addr] = phydev; + + /* Run all of the fixups for this PHY */ +- err = phy_init_hw(phydev); ++ err = phy_scan_fixups(phydev); + if (err) { + pr_err("PHY %d failed to initialize\n", phydev->addr); + goto out; +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c +index 01805319e1e0..1aff970be33e 100644 +--- a/drivers/net/ppp/pptp.c ++++ b/drivers/net/ppp/pptp.c +@@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) + nf_reset(skb); + + skb->ip_summed = CHECKSUM_NONE; +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + ip_send_check(iph); + + ip_local_out(skb); +diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c +index 160e7510aca6..0787b9756165 100644 +--- a/drivers/sbus/char/bbc_envctrl.c ++++ b/drivers/sbus/char/bbc_envctrl.c +@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op, + if (!tp) + return; + ++ INIT_LIST_HEAD(&tp->bp_list); ++ INIT_LIST_HEAD(&tp->glob_list); ++ + tp->client = bbc_i2c_attach(bp, op); + if (!tp->client) { + kfree(tp); +@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op, + if (!fp) + return; + ++ INIT_LIST_HEAD(&fp->bp_list); ++ INIT_LIST_HEAD(&fp->glob_list); ++ + fp->client = bbc_i2c_attach(bp, op); + if (!fp->client) { + kfree(fp); +diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c +index c7763e482eb2..812b5f0361b6 100644 +--- a/drivers/sbus/char/bbc_i2c.c ++++ b/drivers/sbus/char/bbc_i2c.c +@@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index + if (!bp) + return NULL; + ++ INIT_LIST_HEAD(&bp->temps); ++ INIT_LIST_HEAD(&bp->fans); ++ + bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); + if (!bp->i2c_control_regs) + goto fail; + +- bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); +- if (!bp->i2c_bussel_reg) +- goto fail; ++ if (op->num_resources == 2) { ++ bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); ++ if (!bp->i2c_bussel_reg) ++ goto fail; ++ } + + bp->waiting = 0; + init_waitqueue_head(&bp->wq); +diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c +index 80a58eca785b..e8f77606561b 100644 +--- a/drivers/tty/serial/sunsab.c ++++ b/drivers/tty/serial/sunsab.c +@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up, + (up->port.line == up->port.cons->index)) + saw_console_brk = 1; + ++ if (count == 0) { ++ if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) { ++ stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR | ++ SAB82532_ISR0_FERR); ++ up->port.icount.brk++; ++ uart_handle_break(&up->port); ++ } ++ } ++ + for (i = 0; i < count; i++) { + unsigned char ch = buf[i], flag; + +diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h +index b0f4ef77fa70..bf9781e9fd92 100644 +--- a/fs/xfs/xfs_log.h ++++ b/fs/xfs/xfs_log.h +@@ -24,7 +24,8 @@ struct xfs_log_vec { + struct xfs_log_iovec *lv_iovecp; /* iovec array */ + struct xfs_log_item *lv_item; /* owner */ + char *lv_buf; /* formatted buffer */ +- int lv_buf_len; /* size of formatted buffer */ ++ int lv_bytes; /* accounted space in buffer */ ++ int lv_buf_len; /* aligned size of buffer */ + int lv_size; /* size of allocated lv */ + }; + +@@ -52,15 +53,21 @@ xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, + return vec->i_addr; + } + ++/* ++ * We need to make sure the next buffer is naturally aligned for the biggest ++ * basic data type we put into it. We already accounted for this padding when ++ * sizing the buffer. ++ * ++ * However, this padding does not get written into the log, and hence we have to ++ * track the space used by the log vectors separately to prevent log space hangs ++ * due to inaccurate accounting (i.e. a leak) of the used log space through the ++ * CIL context ticket. ++ */ + static inline void + xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len) + { +- /* +- * We need to make sure the next buffer is naturally aligned for the +- * biggest basic data type we put into it. We already accounted for +- * this when sizing the buffer. +- */ + lv->lv_buf_len += round_up(len, sizeof(uint64_t)); ++ lv->lv_bytes += len; + vec->i_len = len; + } + +diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c +index 4ef6fdbced78..bcfbaae4702c 100644 +--- a/fs/xfs/xfs_log_cil.c ++++ b/fs/xfs/xfs_log_cil.c +@@ -97,7 +97,7 @@ xfs_cil_prepare_item( + { + /* Account for the new LV being passed in */ + if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { +- *diff_len += lv->lv_buf_len; ++ *diff_len += lv->lv_bytes; + *diff_iovecs += lv->lv_niovecs; + } + +@@ -111,7 +111,7 @@ xfs_cil_prepare_item( + else if (old_lv != lv) { + ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); + +- *diff_len -= old_lv->lv_buf_len; ++ *diff_len -= old_lv->lv_bytes; + *diff_iovecs -= old_lv->lv_niovecs; + kmem_free(old_lv); + } +@@ -239,7 +239,7 @@ xlog_cil_insert_format_items( + * that the space reservation accounting is correct. + */ + *diff_iovecs -= lv->lv_niovecs; +- *diff_len -= lv->lv_buf_len; ++ *diff_len -= lv->lv_bytes; + } else { + /* allocate new data chunk */ + lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); +@@ -259,6 +259,7 @@ xlog_cil_insert_format_items( + + /* The allocated data region lies beyond the iovec region */ + lv->lv_buf_len = 0; ++ lv->lv_bytes = 0; + lv->lv_buf = (char *)lv + buf_size - nbytes; + ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); + +diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h +index 058271bde27a..823ec7bb9c67 100644 +--- a/include/net/inetpeer.h ++++ b/include/net/inetpeer.h +@@ -41,14 +41,13 @@ struct inet_peer { + struct rcu_head gc_rcu; + }; + /* +- * Once inet_peer is queued for deletion (refcnt == -1), following fields +- * are not available: rid, ip_id_count ++ * Once inet_peer is queued for deletion (refcnt == -1), following field ++ * is not available: rid + * We can share memory with rcu_head to help keep inet_peer small. + */ + union { + struct { + atomic_t rid; /* Frag reception counter */ +- atomic_t ip_id_count; /* IP ID for the next packet */ + }; + struct rcu_head rcu; + struct inet_peer *gc_next; +@@ -165,7 +164,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout); + void inetpeer_invalidate_tree(struct inet_peer_base *); + + /* +- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts, ++ * temporary check to make sure we dont access rid, tcp_ts, + * tcp_ts_stamp if no refcount is taken on inet_peer + */ + static inline void inet_peer_refcheck(const struct inet_peer *p) +@@ -173,13 +172,4 @@ static inline void inet_peer_refcheck(const struct inet_peer *p) + WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0); + } + +- +-/* can be called with or without local BH being disabled */ +-static inline int inet_getid(struct inet_peer *p, int more) +-{ +- more++; +- inet_peer_refcheck(p); +- return atomic_add_return(more, &p->ip_id_count) - more; +-} +- + #endif /* _NET_INETPEER_H */ +diff --git a/include/net/ip.h b/include/net/ip.h +index 23be0fd37937..937f19681426 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -297,9 +297,10 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) + } + } + +-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); ++u32 ip_idents_reserve(u32 hash, int segs); ++void __ip_select_ident(struct iphdr *iph, int segs); + +-static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) ++static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) + { + struct iphdr *iph = ip_hdr(skb); + +@@ -309,24 +310,20 @@ static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, s + * does not change, they drop every other packet in + * a TCP stream using header compression. + */ +- iph->id = (sk && inet_sk(sk)->inet_daddr) ? +- htons(inet_sk(sk)->inet_id++) : 0; +- } else +- __ip_select_ident(iph, dst, 0); +-} +- +-static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) +-{ +- struct iphdr *iph = ip_hdr(skb); +- +- if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { + if (sk && inet_sk(sk)->inet_daddr) { + iph->id = htons(inet_sk(sk)->inet_id); +- inet_sk(sk)->inet_id += 1 + more; +- } else ++ inet_sk(sk)->inet_id += segs; ++ } else { + iph->id = 0; +- } else +- __ip_select_ident(iph, dst, more); ++ } ++ } else { ++ __ip_select_ident(iph, segs); ++ } ++} ++ ++static inline void ip_select_ident(struct sk_buff *skb, struct sock *sk) ++{ ++ ip_select_ident_segs(skb, sk, 1); + } + + /* +diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h +index e77c10405d51..7b9ec5837496 100644 +--- a/include/net/ip_tunnels.h ++++ b/include/net/ip_tunnels.h +@@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry { + + struct ip_tunnel_dst { + struct dst_entry __rcu *dst; ++ __be32 saddr; + }; + + struct ip_tunnel { +diff --git a/include/net/ipv6.h b/include/net/ipv6.h +index 4f541f11ce63..9ac65781d44b 100644 +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -660,8 +660,6 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add + return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); + } + +-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt); +- + int ip6_dst_hoplimit(struct dst_entry *dst); + + /* +diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h +index f257486f17be..3f36d45b714a 100644 +--- a/include/net/secure_seq.h ++++ b/include/net/secure_seq.h +@@ -3,8 +3,6 @@ + + #include <linux/types.h> + +-__u32 secure_ip_id(__be32 daddr); +-__u32 secure_ipv6_id(const __be32 daddr[4]); + u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); + u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, + __be16 dport); +diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c +index cc1cfd60c094..c46387a46535 100644 +--- a/net/batman-adv/fragmentation.c ++++ b/net/batman-adv/fragmentation.c +@@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, + { + struct batadv_frag_table_entry *chain; + struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr; ++ struct batadv_frag_list_entry *frag_entry_last = NULL; + struct batadv_frag_packet *frag_packet; + uint8_t bucket; + uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet); +@@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node, + ret = true; + goto out; + } ++ ++ /* store current entry because it could be the last in list */ ++ frag_entry_last = frag_entry_curr; + } + +- /* Reached the end of the list, so insert after 'frag_entry_curr'. */ +- if (likely(frag_entry_curr)) { +- hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list); ++ /* Reached the end of the list, so insert after 'frag_entry_last'. */ ++ if (likely(frag_entry_last)) { ++ hlist_add_after(&frag_entry_last->list, &frag_entry_new->list); + chain->size += skb->len - hdr_size; + chain->timestamp = jiffies; + ret = true; +diff --git a/net/compat.c b/net/compat.c +index f50161fb812e..cbc1a2a26587 100644 +--- a/net/compat.c ++++ b/net/compat.c +@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + { + int tot_len; + +- if (kern_msg->msg_namelen) { ++ if (kern_msg->msg_name && kern_msg->msg_namelen) { + if (mode == VERIFY_READ) { + int err = move_addr_to_kernel(kern_msg->msg_name, + kern_msg->msg_namelen, +@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, + if (err < 0) + return err; + } +- if (kern_msg->msg_name) +- kern_msg->msg_name = kern_address; +- } else ++ kern_msg->msg_name = kern_address; ++ } else { + kern_msg->msg_name = NULL; ++ kern_msg->msg_namelen = 0; ++ } + + tot_len = iov_from_user_compat_to_kern(kern_iov, + (struct compat_iovec __user *)kern_msg->msg_iov, +diff --git a/net/core/iovec.c b/net/core/iovec.c +index b61869429f4c..26dc0062652f 100644 +--- a/net/core/iovec.c ++++ b/net/core/iovec.c +@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a + { + int size, ct, err; + +- if (m->msg_namelen) { ++ if (m->msg_name && m->msg_namelen) { + if (mode == VERIFY_READ) { + void __user *namep; + namep = (void __user __force *) m->msg_name; +@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a + if (err < 0) + return err; + } +- if (m->msg_name) +- m->msg_name = address; ++ m->msg_name = address; + } else { + m->msg_name = NULL; ++ m->msg_namelen = 0; + } + + size = m->msg_iovlen * sizeof(struct iovec); +@@ -107,6 +107,10 @@ EXPORT_SYMBOL(memcpy_toiovecend); + int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, + int offset, int len) + { ++ /* No data? Done! */ ++ if (len == 0) ++ return 0; ++ + /* Skip over the finished iovecs */ + while (offset >= iov->iov_len) { + offset -= iov->iov_len; +diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c +index 897da56f3aff..ba71212f0251 100644 +--- a/net/core/secure_seq.c ++++ b/net/core/secure_seq.c +@@ -85,31 +85,6 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); + #endif + + #ifdef CONFIG_INET +-__u32 secure_ip_id(__be32 daddr) +-{ +- u32 hash[MD5_DIGEST_WORDS]; +- +- net_secret_init(); +- hash[0] = (__force __u32) daddr; +- hash[1] = net_secret[13]; +- hash[2] = net_secret[14]; +- hash[3] = net_secret[15]; +- +- md5_transform(hash, net_secret); +- +- return hash[0]; +-} +- +-__u32 secure_ipv6_id(const __be32 daddr[4]) +-{ +- __u32 hash[4]; +- +- net_secret_init(); +- memcpy(hash, daddr, 16); +- md5_transform(hash, net_secret); +- +- return hash[0]; +-} + + __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, + __be16 sport, __be16 dport) +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 7f2e1fce706e..8f6391bbf509 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -2968,9 +2968,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, + tail = nskb; + + __copy_skb_header(nskb, head_skb); +- nskb->mac_len = head_skb->mac_len; + + skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); ++ skb_reset_mac_len(nskb); + + skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, + nskb->data - tnl_hlen, +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 9db3b877fcaf..0ffcd4d64e0a 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -369,7 +369,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) + pip->saddr = fl4.saddr; + pip->protocol = IPPROTO_IGMP; + pip->tot_len = 0; /* filled in later */ +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + ((u8 *)&pip[1])[0] = IPOPT_RA; + ((u8 *)&pip[1])[1] = 4; + ((u8 *)&pip[1])[2] = 0; +@@ -714,7 +714,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, + iph->daddr = dst; + iph->saddr = fl4.saddr; + iph->protocol = IPPROTO_IGMP; +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + ((u8 *)&iph[1])[0] = IPOPT_RA; + ((u8 *)&iph[1])[1] = 4; + ((u8 *)&iph[1])[2] = 0; +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c +index 48f424465112..bf2cb4a4714b 100644 +--- a/net/ipv4/inetpeer.c ++++ b/net/ipv4/inetpeer.c +@@ -26,20 +26,7 @@ + * Theory of operations. + * We keep one entry for each peer IP address. The nodes contains long-living + * information about the peer which doesn't depend on routes. +- * At this moment this information consists only of ID field for the next +- * outgoing IP packet. This field is incremented with each packet as encoded +- * in inet_getid() function (include/net/inetpeer.h). +- * At the moment of writing this notes identifier of IP packets is generated +- * to be unpredictable using this code only for packets subjected +- * (actually or potentially) to defragmentation. I.e. DF packets less than +- * PMTU in size when local fragmentation is disabled use a constant ID and do +- * not use this code (see ip_select_ident() in include/net/ip.h). + * +- * Route cache entries hold references to our nodes. +- * New cache entries get references via lookup by destination IP address in +- * the avl tree. The reference is grabbed only when it's needed i.e. only +- * when we try to output IP packet which needs an unpredictable ID (see +- * __ip_select_ident() in net/ipv4/route.c). + * Nodes are removed only when reference counter goes to 0. + * When it's happened the node may be removed when a sufficient amount of + * time has been passed since its last use. The less-recently-used entry can +@@ -62,7 +49,6 @@ + * refcnt: atomically against modifications on other CPU; + * usually under some other lock to prevent node disappearing + * daddr: unchangeable +- * ip_id_count: atomic value (no lock needed) + */ + + static struct kmem_cache *peer_cachep __read_mostly; +@@ -497,10 +483,6 @@ relookup: + p->daddr = *daddr; + atomic_set(&p->refcnt, 1); + atomic_set(&p->rid, 0); +- atomic_set(&p->ip_id_count, +- (daddr->family == AF_INET) ? +- secure_ip_id(daddr->addr.a4) : +- secure_ipv6_id(daddr->addr.a6)); + p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; + p->rate_tokens = 0; + /* 60*HZ is arbitrary, but chosen enough high so that the first +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index 73c6b63bba74..ed88d781248f 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -148,7 +148,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, + iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); + iph->saddr = saddr; + iph->protocol = sk->sk_protocol; +- ip_select_ident(skb, &rt->dst, sk); ++ ip_select_ident(skb, sk); + + if (opt && opt->opt.optlen) { + iph->ihl += opt->opt.optlen>>2; +@@ -386,8 +386,7 @@ packet_routed: + ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); + } + +- ip_select_ident_more(skb, &rt->dst, sk, +- (skb_shinfo(skb)->gso_segs ?: 1) - 1); ++ ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1); + + skb->priority = sk->sk_priority; + skb->mark = sk->sk_mark; +@@ -1338,7 +1337,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, + iph->ttl = ttl; + iph->protocol = sk->sk_protocol; + ip_copy_addrs(iph, fl4); +- ip_select_ident(skb, &rt->dst, sk); ++ ip_select_ident(skb, sk); + + if (opt) { + iph->ihl += opt->optlen>>2; +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index 62cd9e0ae35b..0a4af0920af3 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote) + } + + static void __tunnel_dst_set(struct ip_tunnel_dst *idst, +- struct dst_entry *dst) ++ struct dst_entry *dst, __be32 saddr) + { + struct dst_entry *old_dst; + + dst_clone(dst); + old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); + dst_release(old_dst); ++ idst->saddr = saddr; + } + +-static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) ++static void tunnel_dst_set(struct ip_tunnel *t, ++ struct dst_entry *dst, __be32 saddr) + { +- __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst); ++ __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr); + } + + static void tunnel_dst_reset(struct ip_tunnel *t) + { +- tunnel_dst_set(t, NULL); ++ tunnel_dst_set(t, NULL, 0); + } + + void ip_tunnel_dst_reset_all(struct ip_tunnel *t) +@@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t) + int i; + + for_each_possible_cpu(i) +- __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); ++ __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0); + } + EXPORT_SYMBOL(ip_tunnel_dst_reset_all); + +-static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) ++static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, ++ u32 cookie, __be32 *saddr) + { ++ struct ip_tunnel_dst *idst; + struct dst_entry *dst; + + rcu_read_lock(); +- dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); ++ idst = this_cpu_ptr(t->dst_cache); ++ dst = rcu_dereference(idst->dst); + if (dst && !atomic_inc_not_zero(&dst->__refcnt)) + dst = NULL; + if (dst) { +- if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { ++ if (!dst->obsolete || dst->ops->check(dst, cookie)) { ++ *saddr = idst->saddr; ++ } else { + tunnel_dst_reset(t); + dst_release(dst); + dst = NULL; +@@ -362,7 +369,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) + + if (!IS_ERR(rt)) { + tdev = rt->dst.dev; +- tunnel_dst_set(tunnel, &rt->dst); ++ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); + ip_rt_put(rt); + } + if (dev->type != ARPHRD_ETHER) +@@ -606,7 +613,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, + tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); + +- rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL; ++ rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL; + + if (!rt) { + rt = ip_route_output_key(tunnel->net, &fl4); +@@ -616,7 +623,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + if (connected) +- tunnel_dst_set(tunnel, &rt->dst); ++ tunnel_dst_set(tunnel, &rt->dst, fl4.saddr); + } + + if (rt->dst.dev == dev) { +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c +index 8d69626f2206..65b664d30fa1 100644 +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -74,7 +74,7 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, + iph->daddr = dst; + iph->saddr = src; + iph->ttl = ttl; +- __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); ++ __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); + + err = ip_local_out(skb); + if (unlikely(net_xmit_eval(err))) +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index 28863570dd60..1149fc2290e2 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -1663,7 +1663,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) + iph->protocol = IPPROTO_IPIP; + iph->ihl = 5; + iph->tot_len = htons(skb->len); +- ip_select_ident(skb, skb_dst(skb), NULL); ++ ip_select_ident(skb, NULL); + ip_send_check(iph); + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c +index c04518f4850a..11c8d81fdc59 100644 +--- a/net/ipv4/raw.c ++++ b/net/ipv4/raw.c +@@ -389,7 +389,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, + iph->check = 0; + iph->tot_len = htons(length); + if (!iph->id) +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + + iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); + } +diff --git a/net/ipv4/route.c b/net/ipv4/route.c +index 031553f8a306..ca5a01ed8ed6 100644 +--- a/net/ipv4/route.c ++++ b/net/ipv4/route.c +@@ -89,6 +89,7 @@ + #include <linux/rcupdate.h> + #include <linux/times.h> + #include <linux/slab.h> ++#include <linux/jhash.h> + #include <net/dst.h> + #include <net/net_namespace.h> + #include <net/protocol.h> +@@ -462,39 +463,45 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + return neigh_create(&arp_tbl, pkey, dev); + } + +-/* +- * Peer allocation may fail only in serious out-of-memory conditions. However +- * we still can generate some output. +- * Random ID selection looks a bit dangerous because we have no chances to +- * select ID being unique in a reasonable period of time. +- * But broken packet identifier may be better than no packet at all. ++#define IP_IDENTS_SZ 2048u ++struct ip_ident_bucket { ++ atomic_t id; ++ u32 stamp32; ++}; ++ ++static struct ip_ident_bucket *ip_idents __read_mostly; ++ ++/* In order to protect privacy, we add a perturbation to identifiers ++ * if one generator is seldom used. This makes hard for an attacker ++ * to infer how many packets were sent between two points in time. + */ +-static void ip_select_fb_ident(struct iphdr *iph) ++u32 ip_idents_reserve(u32 hash, int segs) + { +- static DEFINE_SPINLOCK(ip_fb_id_lock); +- static u32 ip_fallback_id; +- u32 salt; ++ struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ; ++ u32 old = ACCESS_ONCE(bucket->stamp32); ++ u32 now = (u32)jiffies; ++ u32 delta = 0; ++ ++ if (old != now && cmpxchg(&bucket->stamp32, old, now) == old) ++ delta = prandom_u32_max(now - old); + +- spin_lock_bh(&ip_fb_id_lock); +- salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr); +- iph->id = htons(salt & 0xFFFF); +- ip_fallback_id = salt; +- spin_unlock_bh(&ip_fb_id_lock); ++ return atomic_add_return(segs + delta, &bucket->id) - segs; + } ++EXPORT_SYMBOL(ip_idents_reserve); + +-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) ++void __ip_select_ident(struct iphdr *iph, int segs) + { +- struct net *net = dev_net(dst->dev); +- struct inet_peer *peer; ++ static u32 ip_idents_hashrnd __read_mostly; ++ u32 hash, id; + +- peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); +- if (peer) { +- iph->id = htons(inet_getid(peer, more)); +- inet_putpeer(peer); +- return; +- } ++ net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); + +- ip_select_fb_ident(iph); ++ hash = jhash_3words((__force u32)iph->daddr, ++ (__force u32)iph->saddr, ++ iph->protocol, ++ ip_idents_hashrnd); ++ id = ip_idents_reserve(hash, segs); ++ iph->id = htons(id); + } + EXPORT_SYMBOL(__ip_select_ident); + +@@ -2718,6 +2725,12 @@ int __init ip_rt_init(void) + { + int rc = 0; + ++ ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL); ++ if (!ip_idents) ++ panic("IP: failed to allocate ip_idents\n"); ++ ++ prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents)); ++ + #ifdef CONFIG_IP_ROUTE_CLASSID + ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); + if (!ip_rt_acct) +diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c +index 06cae62bf208..6b1a5fd60598 100644 +--- a/net/ipv4/tcp_vegas.c ++++ b/net/ipv4/tcp_vegas.c +@@ -219,7 +219,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, + * This is: + * (actual rate in segments) * baseRTT + */ +- target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt; ++ target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT; ++ do_div(target_cwnd, rtt); + + /* Calculate the difference between the window we had, + * and the window we would like to have. This quantity +diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c +index 326475a94865..603ad498e18a 100644 +--- a/net/ipv4/tcp_veno.c ++++ b/net/ipv4/tcp_veno.c +@@ -145,7 +145,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, + + rtt = veno->minrtt; + +- target_cwnd = (tp->snd_cwnd * veno->basertt); ++ target_cwnd = (u64)tp->snd_cwnd * veno->basertt; + target_cwnd <<= V_PARAM_SHIFT; + do_div(target_cwnd, rtt); + +diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c +index 31b18152528f..1f564a1487a3 100644 +--- a/net/ipv4/xfrm4_mode_tunnel.c ++++ b/net/ipv4/xfrm4_mode_tunnel.c +@@ -117,12 +117,12 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) + + top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? + 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); +- ip_select_ident(skb, dst->child, NULL); + + top_iph->ttl = ip4_dst_hoplimit(dst->child); + + top_iph->saddr = x->props.saddr.a4; + top_iph->daddr = x->id.daddr.a4; ++ ip_select_ident(skb, NULL); + + return 0; + } +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c +index a62b610307ec..073e5a6fc631 100644 +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -537,6 +537,20 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) + skb_copy_secmark(to, from); + } + ++static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) ++{ ++ static u32 ip6_idents_hashrnd __read_mostly; ++ u32 hash, id; ++ ++ net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); ++ ++ hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); ++ hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash); ++ ++ id = ip_idents_reserve(hash, 1); ++ fhdr->identification = htonl(id); ++} ++ + int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) + { + struct sk_buff *frag; +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index b31a01263185..798eb0f79078 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -7,29 +7,6 @@ + #include <net/ip6_fib.h> + #include <net/addrconf.h> + +-void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) +-{ +- static atomic_t ipv6_fragmentation_id; +- int ident; +- +-#if IS_ENABLED(CONFIG_IPV6) +- if (rt && !(rt->dst.flags & DST_NOPEER)) { +- struct inet_peer *peer; +- struct net *net; +- +- net = dev_net(rt->dst.dev); +- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); +- if (peer) { +- fhdr->identification = htonl(inet_getid(peer, 0)); +- inet_putpeer(peer); +- return; +- } +- } +-#endif +- ident = atomic_inc_return(&ipv6_fragmentation_id); +- fhdr->identification = htonl(ident); +-} +-EXPORT_SYMBOL(ipv6_select_ident); + + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + { +diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c +index c47444e4cf8c..7f0e1cf2d7e8 100644 +--- a/net/netfilter/ipvs/ip_vs_xmit.c ++++ b/net/netfilter/ipvs/ip_vs_xmit.c +@@ -883,7 +883,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, + iph->daddr = cp->daddr.ip; + iph->saddr = saddr; + iph->ttl = old_iph->ttl; +- ip_select_ident(skb, &rt->dst, NULL); ++ ip_select_ident(skb, NULL); + + /* Another hack: avoid icmp_send in ip_fragment */ + skb->local_df = 1; +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index a4d570126f5d..5d97d8fe4be7 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1151,6 +1151,7 @@ void sctp_assoc_update(struct sctp_association *asoc, + asoc->c = new->c; + asoc->peer.rwnd = new->peer.rwnd; + asoc->peer.sack_needed = new->peer.sack_needed; ++ asoc->peer.auth_capable = new->peer.auth_capable; + asoc->peer.i = new->peer.i; + sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, + asoc->peer.i.initial_tsn, GFP_ATOMIC); +diff --git a/net/sctp/output.c b/net/sctp/output.c +index 0f4d15fc2627..8267b06c3646 100644 +--- a/net/sctp/output.c ++++ b/net/sctp/output.c +@@ -599,7 +599,7 @@ out: + return err; + no_route: + kfree_skb(nskb); +- IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); ++ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); + + /* FIXME: Returning the 'err' will effect all the associations + * associated with a socket, although only one of the paths of the +diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c +index b10d04fa3933..3bea4ddc699d 100644 +--- a/net/xfrm/xfrm_user.c ++++ b/net/xfrm/xfrm_user.c +@@ -176,9 +176,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, + attrs[XFRMA_ALG_AEAD] || + attrs[XFRMA_ALG_CRYPT] || + attrs[XFRMA_ALG_COMP] || +- attrs[XFRMA_TFCPAD] || +- (ntohl(p->id.spi) >= 0x10000)) +- ++ attrs[XFRMA_TFCPAD]) + goto out; + break; + +@@ -206,7 +204,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p, + attrs[XFRMA_ALG_AUTH] || + attrs[XFRMA_ALG_AUTH_TRUNC] || + attrs[XFRMA_ALG_CRYPT] || +- attrs[XFRMA_TFCPAD]) ++ attrs[XFRMA_TFCPAD] || ++ (ntohl(p->id.spi) >= 0x10000)) + goto out; + break; +
next reply other threads:[~2014-08-14 12:44 UTC|newest] Thread overview: 85+ messages / expand[flat|nested] mbox.gz Atom feed top 2014-08-14 12:44 Mike Pagano [this message] 2014-08-19 11:44 ` [gentoo-commits] proj/linux-patches:3.14 commit in: / Mike Pagano -- strict thread matches above, loose matches on Subject: below -- 2016-12-11 22:31 Mike Pagano 2016-09-11 17:39 Mike Pagano 2016-09-09 19:22 Mike Pagano 2016-08-20 16:29 Mike Pagano 2016-08-17 12:18 Mike Pagano 2016-08-10 12:53 Mike Pagano 2016-07-27 19:15 Mike Pagano 2016-06-24 20:37 Mike Pagano 2016-06-08 11:21 Mike Pagano 2016-06-02 18:01 Mike Pagano 2016-05-19 12:38 Mike Pagano 2016-05-12 0:07 Mike Pagano 2016-05-04 23:46 Mike Pagano 2016-04-20 10:10 Mike Pagano 2016-04-12 19:01 Mike Pagano 2016-03-16 19:41 Mike Pagano 2016-03-10 0:49 Mike Pagano 2016-03-04 0:16 Mike Pagano 2016-02-25 23:29 Mike Pagano 2016-02-17 23:58 Mike Pagano 2016-01-31 21:34 Mike Pagano 2016-01-23 18:58 Mike Pagano 2016-01-20 15:13 Mike Pagano 2015-12-10 13:52 Mike Pagano 2015-11-10 0:05 Mike Pagano 2015-10-27 13:38 Mike Pagano 2015-10-23 19:40 Mike Pagano 2015-10-01 13:18 Mike Pagano 2015-09-21 17:37 Mike Pagano 2015-09-14 16:23 Mike Pagano 2015-08-17 16:37 Mike Pagano 2015-08-10 23:13 Mike Pagano 2015-08-03 22:33 Mike Pagano 2015-07-17 15:34 Mike Pagano 2015-07-10 23:40 Mike Pagano 2015-07-07 0:44 Mike Pagano 2015-06-30 14:34 Mike Pagano 2015-06-23 17:10 Mike Pagano 2015-06-06 21:34 Mike Pagano 2015-05-18 19:33 Mike Pagano 2015-05-13 19:23 Mike Pagano 2015-05-08 12:14 Mike Pagano 2015-04-29 17:04 Mike Pagano 2015-04-20 9:42 Mike Pagano 2015-04-14 9:50 Mike Pagano 2015-03-28 20:25 Mike Pagano 2015-03-26 20:52 Mike Pagano 2015-03-19 12:42 Mike Pagano 2015-03-07 14:45 Mike Pagano 2015-02-27 14:34 Mike Pagano 2015-02-14 21:11 Mike Pagano 2015-02-11 15:16 Mike Pagano 2015-02-07 1:28 Mike Pagano 2015-01-30 11:12 Mike Pagano 2015-01-28 22:16 Anthony G. Basile 2015-01-28 22:01 Anthony G. Basile 2015-01-17 0:55 Mike Pagano 2015-01-09 18:28 Mike Pagano 2015-01-09 16:18 Mike Pagano 2015-01-02 19:10 Mike Pagano 2014-12-16 20:29 Mike Pagano 2014-12-09 23:03 Mike Pagano 2014-11-23 12:07 Anthony G. Basile 2014-11-22 20:16 Mike Pagano 2014-11-15 0:32 Mike Pagano 2014-10-30 22:56 Mike Pagano 2014-10-30 22:42 Mike Pagano 2014-10-15 15:43 Mike Pagano 2014-10-09 23:03 Mike Pagano 2014-10-06 15:44 Mike Pagano 2014-09-17 19:59 Anthony G. Basile 2014-09-09 22:16 Vlastimil Babka 2014-08-19 11:44 Mike Pagano 2014-08-08 18:30 ` Mike Pagano 2014-08-02 0:19 Mike Pagano 2014-08-19 11:44 ` Mike Pagano 2014-07-28 19:17 Mike Pagano 2014-08-19 11:44 ` Mike Pagano 2014-07-18 12:05 Mike Pagano 2014-07-09 23:09 Mike Pagano 2014-07-08 18:04 Mike Pagano 2014-07-01 12:08 Mike Pagano 2014-06-27 15:00 Mike Pagano
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1408020254.c32d169fb51a74c530420b42f58b1c37285ac0d5.mpagano@gentoo \ --to=mpagano@gentoo.org \ --cc=gentoo-commits@lists.gentoo.org \ --cc=gentoo-dev@lists.gentoo.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox