* [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.20/, 3.16.3/, 3.16.4/, 3.14.19/, 3.2.63/
@ 2014-10-07 20:30 Anthony G. Basile
0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2014-10-07 20:30 UTC (permalink / raw
To: gentoo-commits
commit: 42f60911eb39ea29cc7fd044f75a7a53c39cb744
Author: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 7 20:30:36 2014 +0000
Commit: Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Tue Oct 7 20:30:36 2014 +0000
URL: http://sources.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=42f60911
Grsec/PaX: 3.0-{3.2.63,3.14.20,3.16.4}-201410062041
---
{3.14.19 => 3.14.20}/0000_README | 2 +-
.../4420_grsecurity-3.0-3.14.20-201410062037.patch | 1077 ++++-------
.../4425_grsec_remove_EI_PAX.patch | 0
.../4427_force_XATTR_PAX_tmpfs.patch | 0
.../4430_grsec-remove-localversion-grsec.patch | 0
.../4435_grsec-mute-warnings.patch | 0
.../4440_grsec-remove-protected-paths.patch | 0
.../4450_grsec-kconfig-default-gids.patch | 0
.../4465_selinux-avc_audit-log-curr_ip.patch | 0
.../4470_disable-compat_vdso.patch | 0
.../4475_emutramp_default_on.patch | 0
{3.16.3 => 3.16.4}/0000_README | 2 +-
.../4420_grsecurity-3.0-3.16.4-201410062041.patch | 1883 ++++++--------------
{3.16.3 => 3.16.4}/4425_grsec_remove_EI_PAX.patch | 0
.../4427_force_XATTR_PAX_tmpfs.patch | 0
.../4430_grsec-remove-localversion-grsec.patch | 0
{3.16.3 => 3.16.4}/4435_grsec-mute-warnings.patch | 0
.../4440_grsec-remove-protected-paths.patch | 0
.../4450_grsec-kconfig-default-gids.patch | 0
.../4465_selinux-avc_audit-log-curr_ip.patch | 0
{3.16.3 => 3.16.4}/4470_disable-compat_vdso.patch | 0
{3.16.3 => 3.16.4}/4475_emutramp_default_on.patch | 0
3.2.63/0000_README | 2 +-
... 4420_grsecurity-3.0-3.2.63-201410062032.patch} | 142 +-
24 files changed, 964 insertions(+), 2144 deletions(-)
diff --git a/3.14.19/0000_README b/3.14.20/0000_README
similarity index 96%
rename from 3.14.19/0000_README
rename to 3.14.20/0000_README
index 56f5a2f..ed0f1e3 100644
--- a/3.14.19/0000_README
+++ b/3.14.20/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.14.19-201409282024.patch
+Patch: 4420_grsecurity-3.0-3.14.20-201410062037.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.14.19/4420_grsecurity-3.0-3.14.19-201409282024.patch b/3.14.20/4420_grsecurity-3.0-3.14.20-201410062037.patch
similarity index 99%
rename from 3.14.19/4420_grsecurity-3.0-3.14.19-201409282024.patch
rename to 3.14.20/4420_grsecurity-3.0-3.14.20-201410062037.patch
index 6d97454..07a0783 100644
--- a/3.14.19/4420_grsecurity-3.0-3.14.19-201409282024.patch
+++ b/3.14.20/4420_grsecurity-3.0-3.14.20-201410062037.patch
@@ -287,7 +287,7 @@ index 7116fda..d8ed6e8 100644
pcd. [PARIDE]
diff --git a/Makefile b/Makefile
-index b1746b4..35b5438 100644
+index beb7e6f..70db31f 100644
--- a/Makefile
+++ b/Makefile
@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2167,94 +2167,27 @@ index 71a06b2..8bb9ae1 100644
/*
* Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
-index 83259b8..8c7e01d 100644
+index 5f833f7..76e6644 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
-@@ -1,6 +1,9 @@
- #ifndef __ASMARM_TLS_H
- #define __ASMARM_TLS_H
+@@ -3,6 +3,7 @@
+
+ #include <linux/compiler.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
-+#include <linux/compiler.h>
-+#include <asm/thread_info.h>
-+
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
- .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
-@@ -50,6 +53,50 @@
- #endif
-
- #ifndef __ASSEMBLY__
-+#include <asm/pgtable.h>
-+
-+static inline void set_tls(unsigned long val)
-+{
-+ struct thread_info *thread;
-+
-+ thread = current_thread_info();
-+
-+ thread->tp_value[0] = val;
-+
-+ /*
-+ * This code runs with preemption enabled and therefore must
-+ * be reentrant with respect to switch_tls.
-+ *
-+ * We need to ensure ordering between the shadow state and the
-+ * hardware state, so that we don't corrupt the hardware state
-+ * with a stale shadow state during context switch.
-+ *
-+ * If we're preempted here, switch_tls will load TPIDRURO from
-+ * thread_info upon resuming execution and the following mcr
-+ * is merely redundant.
-+ */
-+ barrier();
-+
-+ if (!tls_emu) {
-+ if (has_tls_reg) {
-+ asm("mcr p15, 0, %0, c13, c0, 3"
-+ : : "r" (val));
-+ } else {
-+ /*
-+ * User space must never try to access this
-+ * directly. Expect your app to break
-+ * eventually if you do so. The user helper
-+ * at 0xffff0fe0 must be used instead. (see
-+ * entry-armv.S for details)
-+ */
+@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
+ pax_open_kernel();
-+ *((unsigned int *)0xffff0ff0) = val;
+ *((unsigned int *)0xffff0ff0) = val;
+ pax_close_kernel();
-+ }
-+
-+ }
-+}
-+
- static inline unsigned long get_tpuser(void)
- {
- unsigned long reg = 0;
-@@ -59,5 +106,23 @@ static inline unsigned long get_tpuser(void)
-
- return reg;
- }
-+
-+static inline void set_tpuser(unsigned long val)
-+{
-+ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
-+ * we need not update thread_info.
-+ */
-+ if (has_tls_reg && !tls_emu) {
-+ asm("mcr p15, 0, %0, c13, c0, 2"
-+ : : "r" (val));
-+ }
-+}
-+
-+static inline void flush_tls(void)
-+{
-+ set_tls(0);
-+ set_tpuser(0);
-+}
-+
#endif
- #endif /* __ASMARM_TLS_H */
+ }
+
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 7f3f3cc..bdf0665 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -2930,7 +2863,7 @@ index 07314af..c46655c 100644
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 92f7b15..b5e6630 100644
+index 5f6e650..b5e6630 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -217,6 +217,7 @@ void machine_power_off(void)
@@ -2961,16 +2894,7 @@ index 92f7b15..b5e6630 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -334,6 +335,8 @@ void flush_thread(void)
- memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
- memset(&thread->fpstate, 0, sizeof(union fp_state));
-
-+ flush_tls();
-+
- thread_notify(THREAD_NOTIFY_FLUSH, thread);
- }
-
-@@ -425,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -2983,7 +2907,7 @@ index 92f7b15..b5e6630 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -446,7 +443,7 @@ static struct vm_area_struct gate_vma = {
+@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -2992,7 +2916,7 @@ index 92f7b15..b5e6630 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -472,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
+@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -3237,21 +3161,8 @@ index 7a3be1d..b00c7de 100644
pr_debug("CPU ITCM: copied code from %p - %p\n",
start, end);
itcm_present = true;
-diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
-index 7b8403b..80f0d69 100644
---- a/arch/arm/kernel/thumbee.c
-+++ b/arch/arm/kernel/thumbee.c
-@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
-- thread->thumbee_state = 0;
-+ teehbr_write(0);
- break;
- case THREAD_NOTIFY_SWITCH:
- current_thread_info()->thumbee_state = teehbr_read();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 172ee18..381ce44 100644
+index 9265b8b..381ce44 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3282,38 +3193,7 @@ index 172ee18..381ce44 100644
if (signr)
do_exit(signr);
}
-@@ -578,7 +583,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
- #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
- asmlinkage int arm_syscall(int no, struct pt_regs *regs)
- {
-- struct thread_info *thread = current_thread_info();
- siginfo_t info;
-
- if ((no >> 16) != (__ARM_NR_BASE>> 16))
-@@ -629,21 +633,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
- return regs->ARM_r0;
-
- case NR(set_tls):
-- thread->tp_value[0] = regs->ARM_r0;
-- if (tls_emu)
-- return 0;
-- if (has_tls_reg) {
-- asm ("mcr p15, 0, %0, c13, c0, 3"
-- : : "r" (regs->ARM_r0));
-- } else {
-- /*
-- * User space must never try to access this directly.
-- * Expect your app to break eventually if you do so.
-- * The user helper at 0xffff0fe0 must be used instead.
-- * (see entry-armv.S for details)
-- */
-- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
-- }
-+ set_tls(regs->ARM_r0);
- return 0;
-
- #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
-@@ -899,7 +889,11 @@ void __init early_trap_init(void *vectors_base)
+@@ -884,7 +889,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -3778,7 +3658,7 @@ index 78c02b3..c94109a 100644
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index c914b00..8a653a7 100644
+index 4551efd..d487c24 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
@@ -3910,10 +3790,10 @@ index ca8ecde..58ba893 100644
If all of the binaries and libraries which run on your platform
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
-index 9240364..a2b8cf3 100644
+index d301662..a6ef72c 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
-@@ -212,10 +212,12 @@ union offset_union {
+@@ -213,10 +213,12 @@ union offset_union {
#define __get16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
@@ -3926,7 +3806,7 @@ index 9240364..a2b8cf3 100644
if (err) \
goto fault; \
} while (0)
-@@ -229,6 +231,7 @@ union offset_union {
+@@ -230,6 +232,7 @@ union offset_union {
#define __get32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
@@ -3934,7 +3814,7 @@ index 9240364..a2b8cf3 100644
__get8_unaligned_check(ins,v,a,err); \
val = v << ((BE) ? 24 : 0); \
__get8_unaligned_check(ins,v,a,err); \
-@@ -237,6 +240,7 @@ union offset_union {
+@@ -238,6 +241,7 @@ union offset_union {
val |= v << ((BE) ? 8 : 16); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 0 : 24); \
@@ -3942,7 +3822,7 @@ index 9240364..a2b8cf3 100644
if (err) \
goto fault; \
} while (0)
-@@ -250,6 +254,7 @@ union offset_union {
+@@ -251,6 +255,7 @@ union offset_union {
#define __put16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
@@ -3950,7 +3830,7 @@ index 9240364..a2b8cf3 100644
__asm__( FIRST_BYTE_16 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
-@@ -269,6 +274,7 @@ union offset_union {
+@@ -270,6 +275,7 @@ union offset_union {
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
@@ -3958,7 +3838,7 @@ index 9240364..a2b8cf3 100644
if (err) \
goto fault; \
} while (0)
-@@ -282,6 +288,7 @@ union offset_union {
+@@ -283,6 +289,7 @@ union offset_union {
#define __put32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
@@ -3966,7 +3846,7 @@ index 9240364..a2b8cf3 100644
__asm__( FIRST_BYTE_32 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
-@@ -311,6 +318,7 @@ union offset_union {
+@@ -312,6 +319,7 @@ union offset_union {
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
@@ -17848,7 +17728,7 @@ index 81bb91b..9392125 100644
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index bbc8b12..f228861 100644
+index bbc8b12..a614983 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
@@ -17859,7 +17739,7 @@ index bbc8b12..f228861 100644
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
-@@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+@@ -82,12 +83,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
#define arch_end_context_switch(prev) do {} while(0)
@@ -17880,6 +17760,7 @@ index bbc8b12..f228861 100644
+ cr0 = read_cr0() ^ X86_CR0_WP;
+ BUG_ON(cr0 & X86_CR0_WP);
+ write_cr0(cr0);
++ barrier();
+ return cr0 ^ X86_CR0_WP;
+}
+
@@ -17887,6 +17768,7 @@ index bbc8b12..f228861 100644
+{
+ unsigned long cr0;
+
++ barrier();
+ cr0 = read_cr0() ^ X86_CR0_WP;
+ BUG_ON(!(cr0 & X86_CR0_WP));
+ write_cr0(cr0);
@@ -17911,7 +17793,7 @@ index bbc8b12..f228861 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+@@ -148,6 +190,11 @@ static inline unsigned long pud_pfn(pud_t pud)
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
@@ -17923,7 +17805,7 @@ index bbc8b12..f228861 100644
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
-@@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -201,9 +248,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -17954,7 +17836,7 @@ index bbc8b12..f228861 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -430,6 +497,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -17971,7 +17853,7 @@ index bbc8b12..f228861 100644
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/log2.h>
-@@ -570,7 +645,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+@@ -570,7 +647,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -17980,7 +17862,7 @@ index bbc8b12..f228861 100644
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-@@ -610,7 +685,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+@@ -610,7 +687,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -17989,7 +17871,7 @@ index bbc8b12..f228861 100644
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
-@@ -625,7 +700,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -625,7 +702,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -17998,7 +17880,7 @@ index bbc8b12..f228861 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -648,7 +723,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -648,7 +725,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -18012,7 +17894,7 @@ index bbc8b12..f228861 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -659,6 +739,23 @@ static inline int pgd_none(pgd_t pgd)
+@@ -659,6 +741,23 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -18036,7 +17918,7 @@ index bbc8b12..f228861 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -825,11 +922,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+@@ -825,11 +924,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -18142,10 +18024,10 @@ index ed5903b..c7fe163 100644
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index e22c1db..82f2923 100644
+index d869931..82f2923 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,10 +16,15 @@
+@@ -16,11 +16,15 @@
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
@@ -18156,14 +18038,14 @@ index e22c1db..82f2923 100644
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
-extern pmd_t level2_ident_pgt[512];
--extern pgd_t init_level4_pgt[];
+extern pmd_t level2_ident_pgt[512*2];
-+extern pte_t level1_fixmap_pgt[512];
+ extern pte_t level1_fixmap_pgt[512];
+-extern pgd_t init_level4_pgt[];
+extern pgd_t init_level4_pgt[512];
#define swapper_pg_dir init_level4_pgt
-@@ -61,7 +66,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -62,7 +66,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -18173,7 +18055,7 @@ index e22c1db..82f2923 100644
}
static inline void native_pmd_clear(pmd_t *pmd)
-@@ -97,7 +104,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+@@ -98,7 +104,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
@@ -18183,7 +18065,7 @@ index e22c1db..82f2923 100644
}
static inline void native_pud_clear(pud_t *pud)
-@@ -107,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
+@@ -108,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -21792,7 +21674,7 @@ index 1340ebf..fc6d5c9 100644
intel_ds_init();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-index 5ad35ad..e0a3960 100644
+index 95700e5..19779f8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -425,7 +425,7 @@ static struct attribute *rapl_events_cln_attr[] = {
@@ -27326,7 +27208,7 @@ index 7c3a5a6..f0a8961 100644
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index 395be6d..11665af 100644
+index 68287653..3597685 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -229,14 +229,17 @@ static void notrace start_secondary(void *unused)
@@ -35998,7 +35880,7 @@ index 201d09a..e4723e5 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 2423ef0..a5f0379 100644
+index c83da6f..a5f0379 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
@@ -36010,63 +35892,17 @@ index 2423ef0..a5f0379 100644
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
-@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
- *
- * We can construct this by grafting the Xen provided pagetable into
- * head_64.S's preconstructed pagetables. We copy the Xen L2's into
-- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
-- * means that only the kernel has a physical mapping to start with -
-- * but that's enough to get __va working. We need to fill in the rest
-- * of the physical mapping once some sort of allocator has been set
-- * up.
-- * NOTE: for PVH, the page tables are native.
-+ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
-+ * kernel has a physical mapping to start with - but that's enough to
-+ * get __va working. We need to fill in the rest of the physical
-+ * mapping once some sort of allocator has been set up. NOTE: for
-+ * PVH, the page tables are native.
- */
- void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
- {
-@@ -1902,8 +1901,14 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
- /* L3_i[0] -> level2_ident_pgt */
- convert_pfn_mfn(level3_ident_pgt);
+@@ -1903,6 +1903,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[510] -> level2_kernel_pgt
-- * L3_i[511] -> level2_fixmap_pgt */
-+ * L3_k[511] -> level2_fixmap_pgt */
+ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
+ convert_pfn_mfn(level3_vmalloc_start_pgt);
+ convert_pfn_mfn(level3_vmalloc_end_pgt);
+ convert_pfn_mfn(level3_vmemmap_pgt);
-+
-+ /* L3_k[511][506] -> level1_fixmap_pgt */
-+ convert_pfn_mfn(level2_fixmap_pgt);
- }
- /* We get [511][511] and have Xen's version of level2_kernel_pgt */
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
-@@ -1913,30 +1918,29 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
- addr[1] = (unsigned long)l3;
- addr[2] = (unsigned long)l2;
- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-- * Both L4[272][0] and L4[511][511] have entries that point to the same
-+ * Both L4[272][0] and L4[511][510] have entries that point to the same
- * L2 (PMD) tables. Meaning that if you modify it in __va space
- * it will be also modified in the __ka space! (But if you just
- * modify the PMD table to point to other PTE's or none, then you
- * are OK - which is what cleanup_highmap does) */
- copy_page(level2_ident_pgt, l2);
-- /* Graft it onto L4[511][511] */
-+ /* Graft it onto L4[511][510] */
- copy_page(level2_kernel_pgt, l2);
-
-- /* Get [511][510] and graft that in level2_fixmap_pgt */
-- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-- copy_page(level2_fixmap_pgt, l2);
-- /* Note that we don't do anything with level1_fixmap_pgt which
-- * we don't need. */
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Make pagetable pieces RO */
+
+ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
+@@ -1929,8 +1932,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -36078,11 +35914,8 @@ index 2423ef0..a5f0379 100644
+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
-
- /* Pin down new L4 */
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-@@ -2123,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+@@ -2120,6 +2127,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -36090,7 +35923,7 @@ index 2423ef0..a5f0379 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2201,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2198,6 +2206,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -36437,10 +36270,10 @@ index a0926a6..b2b14b2 100644
err = -EFAULT;
goto out;
diff --git a/block/genhd.c b/block/genhd.c
-index 791f419..89f21c4 100644
+index e6723bd..703e4ac 100644
--- a/block/genhd.c
+++ b/block/genhd.c
-@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
+@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
/*
* Register device numbers dev..(dev+range-1)
@@ -36748,7 +36581,7 @@ index 36605ab..6ef6d4b 100644
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index f761603..3042d5c 100644
+index 538574f..4344396 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
@@ -39568,10 +39401,10 @@ index 18448a7..d5fad43 100644
/* Force all MSRs to the same value */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 153f4b9..d47054a 100644
+index 4159236..b850472 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -1972,7 +1972,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+@@ -1974,7 +1974,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
#endif
mutex_lock(&cpufreq_governor_mutex);
@@ -39580,7 +39413,7 @@ index 153f4b9..d47054a 100644
mutex_unlock(&cpufreq_governor_mutex);
return;
}
-@@ -2202,7 +2202,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+@@ -2204,7 +2204,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -39589,7 +39422,7 @@ index 153f4b9..d47054a 100644
.notifier_call = cpufreq_cpu_callback,
};
-@@ -2242,13 +2242,17 @@ int cpufreq_boost_trigger_state(int state)
+@@ -2244,13 +2244,17 @@ int cpufreq_boost_trigger_state(int state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -39609,7 +39442,7 @@ index 153f4b9..d47054a 100644
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n", __func__,
-@@ -2302,8 +2306,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2304,8 +2308,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -39623,7 +39456,7 @@ index 153f4b9..d47054a 100644
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
-@@ -2318,8 +2325,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2320,8 +2327,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
* Check if driver provides function to enable boost -
* if not, use cpufreq_boost_set_sw as default
*/
@@ -41246,7 +41079,7 @@ index d45d50d..72a5dd2 100644
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
-index 471347e..5adc6b9d 100644
+index a92fb01..35e0602 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -67,7 +67,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
@@ -41608,7 +41441,7 @@ index 4a85bb6..aaea819 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 0bf6f4a..18e2437 100644
+index e39026c..b32e98e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1128,7 +1128,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
@@ -41828,22 +41661,6 @@ index dbc2def..0a9f710 100644
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
-diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-index 863bef9..cba15cf 100644
---- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
-+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-@@ -391,9 +391,9 @@ out:
- static unsigned long
- ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
- {
-- static atomic_t start_pool = ATOMIC_INIT(0);
-+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
- unsigned i;
-- unsigned pool_offset = atomic_add_return(1, &start_pool);
-+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
- struct ttm_page_pool *pool;
- int shrink_pages = sc->nr_to_scan;
- unsigned long freed = 0;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbadd49..1b7457b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
@@ -41966,7 +41783,7 @@ index 0783155..b29e18e 100644
wait_queue_head_t fifo_queue;
int fence_queue_waiters; /* Protected by hw_mutex */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
-index 6ccd993..618d592 100644
+index 6eae14d..aa311b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
@@ -41978,7 +41795,7 @@ index 6ccd993..618d592 100644
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
-@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+@@ -373,7 +373,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
@@ -41987,7 +41804,7 @@ index 6ccd993..618d592 100644
} else {
need_bounce = true;
}
-@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+@@ -493,7 +493,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
@@ -41996,7 +41813,7 @@ index 6ccd993..618d592 100644
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
-@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+@@ -501,7 +501,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
}
do {
@@ -42082,7 +41899,7 @@ index 8a8725c2..afed796 100644
marker = list_first_entry(&queue->head,
struct vmw_marker, head);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
-index 6866448..2ad2b34 100644
+index 37ac7b5..d52a5c9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
@@ -42094,7 +41911,7 @@ index 6866448..2ad2b34 100644
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
-@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
+@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
return ret;
}
@@ -42125,51 +41942,6 @@ index 7cd42ea..a367c48 100644
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
-diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
-index 3b43d1c..991ba79 100644
---- a/drivers/hid/hid-magicmouse.c
-+++ b/drivers/hid/hid-magicmouse.c
-@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
- if (size < 4 || ((size - 4) % 9) != 0)
- return 0;
- npoints = (size - 4) / 9;
-+ if (npoints > 15) {
-+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
-+ size);
-+ return 0;
-+ }
- msc->ntouches = 0;
- for (ii = 0; ii < npoints; ii++)
- magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
-@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
- if (size < 6 || ((size - 6) % 8) != 0)
- return 0;
- npoints = (size - 6) / 8;
-+ if (npoints > 15) {
-+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
-+ size);
-+ return 0;
-+ }
- msc->ntouches = 0;
- for (ii = 0; ii < npoints; ii++)
- magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
-diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
-index acbb0210..020df3c 100644
---- a/drivers/hid/hid-picolcd_core.c
-+++ b/drivers/hid/hid-picolcd_core.c
-@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
- if (!data)
- return 1;
-
-+ if (size > 64) {
-+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
-+ size);
-+ return 0;
-+ }
-+
- if (report->id == REPORT_KEY_STATE) {
- if (data->input_keys)
- ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index c13fb5b..55a3802 100644
--- a/drivers/hid/hid-wiimote-debug.c
@@ -44879,10 +44651,10 @@ index 3e6d115..ffecdeb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index d7690f8..3db9ef1 100644
+index 55de4f6..b1c57fe 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
-@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (r1_sync_page_io(rdev, sect, s,
bio->bi_io_vec[idx].bv_page,
READ) != 0)
@@ -44891,8 +44663,8 @@ index d7690f8..3db9ef1 100644
}
sectors -= s;
sect += s;
-@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
- test_bit(In_sync, &rdev->flags)) {
+@@ -2170,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ !test_bit(Faulty, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
- atomic_add(s, &rdev->corrected_errors);
@@ -50511,32 +50283,6 @@ index 1b3a094..068e683 100644
}
}
EXPORT_SYMBOL(fc_exch_update_stats);
-diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
-index 4046241..4549986 100644
---- a/drivers/scsi/libiscsi.c
-+++ b/drivers/scsi/libiscsi.c
-@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- return NULL;
- }
-
-+ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
-+ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
-+ return NULL;
-+ }
-+
- task = conn->login_task;
- } else {
- if (session->state != ISCSI_STATE_LOGGED_IN)
- return NULL;
-
-+ if (data_size != 0) {
-+ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
-+ return NULL;
-+ }
-+
- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
-
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index d289583..b745eec 100644
--- a/drivers/scsi/libsas/sas_ata.c
@@ -53439,7 +53185,7 @@ index 2518c32..1c201bb 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 6650df7..3a94427 100644
+index 263612c..dbc0f3d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
@@ -57453,10 +57199,10 @@ index ce25d75..dc09eeb 100644
&data);
if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
-index 6d68e01..6bc8e9a 100644
+index f45ddaa..0160abc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
-@@ -380,7 +380,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+@@ -381,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
size += sizeof(struct io_event) * nr_events;
nr_pages = PFN_UP(size);
@@ -57465,19 +57211,6 @@ index 6d68e01..6bc8e9a 100644
return -EINVAL;
file = aio_private_file(ctx, nr_pages);
-@@ -1065,6 +1065,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
- tail = ring->tail;
- kunmap_atomic(ring);
-
-+ /*
-+ * Ensure that once we've read the current tail pointer, that
-+ * we also see the events that were stored up to the tail.
-+ */
-+ smp_rmb();
-+
- pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
-
- if (head == tail)
diff --git a/fs/attr.c b/fs/attr.c
index 6530ced..4a827e2 100644
--- a/fs/attr.c
@@ -58782,30 +58515,10 @@ index ff286f3..8153a14 100644
.attrs = attrs,
};
diff --git a/fs/buffer.c b/fs/buffer.c
-index 27265a8..8673b7b 100644
+index 71e2d0e..8673b7b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
- bh = page_buffers(page);
- if (bh->b_size == size) {
- end_block = init_page_buffers(page, bdev,
-- index << sizebits, size);
-+ (sector_t)index << sizebits,
-+ size);
- goto done;
- }
- if (!try_to_free_buffers(page))
-@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
- */
- spin_lock(&inode->i_mapping->private_lock);
- link_dev_buffers(page, bh);
-- end_block = init_page_buffers(page, bdev, index << sizebits, size);
-+ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
-+ size);
- spin_unlock(&inode->i_mapping->private_lock);
- done:
- ret = (block < end_block) ? 1 : -ENXIO;
-@@ -3428,7 +3430,7 @@ void __init buffer_init(void)
+@@ -3430,7 +3430,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -59232,9 +58945,18 @@ index 3b0c62e..f7d090c 100644
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
-index d1fdfa8..94558f8 100644
+index d1fdfa8..186defc 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
+@@ -586,7 +586,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
+ if (tmprc == -EOPNOTSUPP)
+ *symlink = true;
+- else
++ else if (tmprc == 0)
+ CIFSSMBClose(xid, tcon, fid.netfid);
+ }
+
@@ -626,27 +626,27 @@ static void
cifs_clear_stats(struct cifs_tcon *tcon)
{
@@ -59340,6 +59062,19 @@ index d1fdfa8..94558f8 100644
#endif
}
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index e31a9df..1007867 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -256,6 +256,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
+ "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
+ {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
++ {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
++ "STATUS_REPARSE_NOT_HANDLED"},
+ {STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
+ "STATUS_DEVICE_REQUIRES_CLEANING"},
+ {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f8977b2..bb38079 100644
--- a/fs/cifs/smb2ops.c
@@ -63141,7 +62876,7 @@ index b29e42f..5ea7fdf 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index d5a4fae..27e6c48 100644
+index dd2f2c5..27e6c48 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,34 @@ int generic_permission(struct inode *inode, int mask)
@@ -63197,42 +62932,7 @@ index d5a4fae..27e6c48 100644
return -EACCES;
}
-@@ -642,24 +651,22 @@ static int complete_walk(struct nameidata *nd)
-
- static __always_inline void set_root(struct nameidata *nd)
- {
-- if (!nd->root.mnt)
-- get_fs_root(current->fs, &nd->root);
-+ get_fs_root(current->fs, &nd->root);
- }
-
- static int link_path_walk(const char *, struct nameidata *);
-
--static __always_inline void set_root_rcu(struct nameidata *nd)
-+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
- {
-- if (!nd->root.mnt) {
-- struct fs_struct *fs = current->fs;
-- unsigned seq;
-+ struct fs_struct *fs = current->fs;
-+ unsigned seq, res;
-
-- do {
-- seq = read_seqcount_begin(&fs->seq);
-- nd->root = fs->root;
-- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-- } while (read_seqcount_retry(&fs->seq, seq));
-- }
-+ do {
-+ seq = read_seqcount_begin(&fs->seq);
-+ nd->root = fs->root;
-+ res = __read_seqcount_begin(&nd->root.dentry->d_seq);
-+ } while (read_seqcount_retry(&fs->seq, seq));
-+ return res;
- }
-
- static void path_put_conditional(struct path *path, struct nameidata *nd)
-@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -821,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
{
struct dentry *dentry = link->dentry;
int error;
@@ -63241,7 +62941,7 @@ index d5a4fae..27e6c48 100644
BUG_ON(nd->flags & LOOKUP_RCU);
-@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -842,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
if (error)
goto out_put_nd_path;
@@ -63254,27 +62954,14 @@ index d5a4fae..27e6c48 100644
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
-@@ -859,7 +872,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
- return PTR_ERR(s);
- }
- if (*s == '/') {
-- set_root(nd);
-+ if (!nd->root.mnt)
-+ set_root(nd);
- path_put(&nd->path);
- nd->path = nd->root;
- path_get(&nd->root);
-@@ -1132,7 +1146,9 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+@@ -1131,6 +1146,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
static int follow_dotdot_rcu(struct nameidata *nd)
{
-- set_root_rcu(nd);
+ struct inode *inode = nd->inode;
-+ if (!nd->root.mnt)
-+ set_root_rcu(nd);
+ if (!nd->root.mnt)
+ set_root_rcu(nd);
- while (1) {
- if (nd->path.dentry == nd->root.dentry &&
@@ -1144,6 +1160,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
struct dentry *parent = old->d_parent;
unsigned seq;
@@ -63305,17 +62992,7 @@ index d5a4fae..27e6c48 100644
return 0;
failed:
-@@ -1244,7 +1263,8 @@ static void follow_mount(struct path *path)
-
- static void follow_dotdot(struct nameidata *nd)
- {
-- set_root(nd);
-+ if (!nd->root.mnt)
-+ set_root(nd);
-
- while(1) {
- struct dentry *old = nd->path.dentry;
-@@ -1592,6 +1612,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -1593,6 +1612,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
@@ -63324,7 +63001,7 @@ index d5a4fae..27e6c48 100644
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1664,7 +1686,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1665,7 +1686,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -63333,16 +63010,7 @@ index d5a4fae..27e6c48 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -1842,7 +1864,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
- if (*name=='/') {
- if (flags & LOOKUP_RCU) {
- rcu_read_lock();
-- set_root_rcu(nd);
-+ nd->seq = set_root_rcu(nd);
- } else {
- set_root(nd);
- path_get(&nd->root);
-@@ -1893,7 +1915,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+@@ -1894,7 +1915,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
}
nd->inode = nd->path.dentry->d_inode;
@@ -63358,7 +63026,7 @@ index d5a4fae..27e6c48 100644
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
-@@ -1948,6 +1977,8 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1949,6 +1977,8 @@ static int path_lookupat(int dfd, const char *name,
if (err)
break;
err = lookup_last(nd, &path);
@@ -63367,7 +63035,7 @@ index d5a4fae..27e6c48 100644
put_link(nd, &link, cookie);
}
}
-@@ -1955,6 +1986,13 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1956,6 +1986,13 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -63381,7 +63049,7 @@ index d5a4fae..27e6c48 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
-@@ -1982,8 +2020,15 @@ static int filename_lookup(int dfd, struct filename *name,
+@@ -1983,8 +2020,15 @@ static int filename_lookup(int dfd, struct filename *name,
retval = path_lookupat(dfd, name->name,
flags | LOOKUP_REVAL, nd);
@@ -63398,7 +63066,7 @@ index d5a4fae..27e6c48 100644
return retval;
}
-@@ -2558,6 +2603,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2559,6 +2603,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -63412,7 +63080,7 @@ index d5a4fae..27e6c48 100644
return 0;
}
-@@ -2789,7 +2841,7 @@ looked_up:
+@@ -2790,7 +2841,7 @@ looked_up:
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
@@ -63421,7 +63089,7 @@ index d5a4fae..27e6c48 100644
const struct open_flags *op,
bool got_write, int *opened)
{
-@@ -2824,6 +2876,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2825,6 +2876,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -63439,7 +63107,7 @@ index d5a4fae..27e6c48 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2845,6 +2908,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2846,6 +2908,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -63448,7 +63116,7 @@ index d5a4fae..27e6c48 100644
}
out_no_open:
path->dentry = dentry;
-@@ -2859,7 +2924,7 @@ out_dput:
+@@ -2860,7 +2924,7 @@ out_dput:
/*
* Handle the last step of open()
*/
@@ -63457,7 +63125,7 @@ index d5a4fae..27e6c48 100644
struct file *file, const struct open_flags *op,
int *opened, struct filename *name)
{
-@@ -2909,6 +2974,15 @@ static int do_last(struct nameidata *nd, struct path *path,
+@@ -2910,6 +2974,15 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
@@ -63473,7 +63141,7 @@ index d5a4fae..27e6c48 100644
audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
-@@ -2928,7 +3002,7 @@ retry_lookup:
+@@ -2929,7 +3002,7 @@ retry_lookup:
*/
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -63482,7 +63150,7 @@ index d5a4fae..27e6c48 100644
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
-@@ -2952,11 +3026,28 @@ retry_lookup:
+@@ -2953,11 +3026,28 @@ retry_lookup:
goto finish_open_created;
}
@@ -63512,7 +63180,7 @@ index d5a4fae..27e6c48 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -2997,6 +3088,11 @@ finish_lookup:
+@@ -2998,6 +3088,11 @@ finish_lookup:
}
}
BUG_ON(inode != path->dentry->d_inode);
@@ -63524,7 +63192,7 @@ index d5a4fae..27e6c48 100644
return 1;
}
-@@ -3006,7 +3102,6 @@ finish_lookup:
+@@ -3007,7 +3102,6 @@ finish_lookup:
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path->mnt);
nd->path.dentry = path->dentry;
@@ -63532,7 +63200,7 @@ index d5a4fae..27e6c48 100644
}
nd->inode = inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
-@@ -3016,7 +3111,18 @@ finish_open:
+@@ -3017,7 +3111,18 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -63551,7 +63219,7 @@ index d5a4fae..27e6c48 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3179,7 +3285,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3180,7 +3285,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -63560,7 +63228,7 @@ index d5a4fae..27e6c48 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3197,7 +3303,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3198,7 +3303,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -63569,7 +63237,7 @@ index d5a4fae..27e6c48 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3297,9 +3403,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3298,9 +3403,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -63583,7 +63251,7 @@ index d5a4fae..27e6c48 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3351,6 +3459,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3352,6 +3459,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -63604,7 +63272,7 @@ index d5a4fae..27e6c48 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3413,6 +3535,17 @@ retry:
+@@ -3414,6 +3535,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -63622,7 +63290,7 @@ index d5a4fae..27e6c48 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3429,6 +3562,8 @@ retry:
+@@ -3430,6 +3562,8 @@ retry:
break;
}
out:
@@ -63631,7 +63299,7 @@ index d5a4fae..27e6c48 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3481,9 +3616,16 @@ retry:
+@@ -3482,9 +3616,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -63648,7 +63316,7 @@ index d5a4fae..27e6c48 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3564,6 +3706,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3565,6 +3706,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -63657,7 +63325,7 @@ index d5a4fae..27e6c48 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3596,10 +3740,21 @@ retry:
+@@ -3597,10 +3740,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -63679,7 +63347,7 @@ index d5a4fae..27e6c48 100644
exit3:
dput(dentry);
exit2:
-@@ -3689,6 +3844,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3690,6 +3844,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -63688,7 +63356,7 @@ index d5a4fae..27e6c48 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3715,10 +3872,22 @@ retry_deleg:
+@@ -3716,10 +3872,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -63711,7 +63379,7 @@ index d5a4fae..27e6c48 100644
exit2:
dput(dentry);
}
-@@ -3806,9 +3975,17 @@ retry:
+@@ -3807,9 +3975,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -63729,7 +63397,7 @@ index d5a4fae..27e6c48 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3911,6 +4088,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3912,6 +4088,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -63737,7 +63405,7 @@ index d5a4fae..27e6c48 100644
int how = 0;
int error;
-@@ -3934,7 +4112,7 @@ retry:
+@@ -3935,7 +4112,7 @@ retry:
if (error)
return error;
@@ -63746,7 +63414,7 @@ index d5a4fae..27e6c48 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3946,11 +4124,28 @@ retry:
+@@ -3947,11 +4124,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -63775,7 +63443,7 @@ index d5a4fae..27e6c48 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4237,6 +4432,12 @@ retry_deleg:
+@@ -4238,6 +4432,12 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -63788,7 +63456,7 @@ index d5a4fae..27e6c48 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
-@@ -4244,6 +4445,9 @@ retry_deleg:
+@@ -4245,6 +4445,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode);
@@ -63798,7 +63466,7 @@ index d5a4fae..27e6c48 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4280,6 +4484,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4281,6 +4484,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
{
@@ -63807,7 +63475,7 @@ index d5a4fae..27e6c48 100644
int len;
len = PTR_ERR(link);
-@@ -4289,7 +4495,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
+@@ -4290,7 +4495,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
len = strlen(link);
if (len > (unsigned) buflen)
len = buflen;
@@ -64208,28 +63876,6 @@ index 287a22c..4e56e4e 100644
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
oevent->response = 0;
-diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
-index 238a593..9d7e2b9 100644
---- a/fs/notify/fdinfo.c
-+++ b/fs/notify/fdinfo.c
-@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
- {
- struct {
- struct file_handle handle;
-- u8 pad[64];
-+ u8 pad[MAX_HANDLE_SZ];
- } f;
- int size, ret, i;
-
-@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
- size = f.handle.handle_bytes >> 2;
-
- ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
-- if ((ret == 255) || (ret == -ENOSPC)) {
-+ if ((ret == FILEID_INVALID) || (ret < 0)) {
- WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
- return 0;
- }
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 1e58402..bb2d6f4 100644
--- a/fs/notify/notification.c
@@ -85084,27 +84730,29 @@ index 6f8fbcf..4efc177 100644
+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
-index 502073a..a7de024 100644
+index b483abd..af305ad 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
-@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
+@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
--int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
+ void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
+-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
#else
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
-@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
+@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
--static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
+ static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
+-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
#endif
@@ -86492,7 +86140,7 @@ index fe94bb9..c9e51c2 100644
} __attribute__ ((packed));
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
-index c38355c..17a57bc 100644
+index 1590c49..5eab462 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -73,5 +73,9 @@
@@ -87537,10 +87185,10 @@ index 00adb21..d5954a8 100644
+}
+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 0c753dd..3ce8cca 100644
+index 550e205..b0a7f7d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -5190,6 +5190,14 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -5189,6 +5189,14 @@ static void cgroup_release_agent(struct work_struct *work)
release_list);
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
@@ -87555,7 +87203,7 @@ index 0c753dd..3ce8cca 100644
pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!pathbuf)
goto continue_free;
-@@ -5372,7 +5380,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
+@@ -5371,7 +5379,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct css_set *cset = link->cset;
struct task_struct *task;
int count = 0;
@@ -87981,7 +87629,7 @@ index 0b097c8..11dd5c5 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index f774e93..c602612 100644
+index 3a140ca..6624485 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -158,8 +158,15 @@ static struct srcu_struct pmus_srcu;
@@ -88019,7 +87667,7 @@ index f774e93..c602612 100644
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
-@@ -3000,7 +3007,7 @@ static void __perf_event_read(void *info)
+@@ -3010,7 +3017,7 @@ static void __perf_event_read(void *info)
static inline u64 perf_event_count(struct perf_event *event)
{
@@ -88028,7 +87676,7 @@ index f774e93..c602612 100644
}
static u64 perf_event_read(struct perf_event *event)
-@@ -3365,9 +3372,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3375,9 +3382,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
@@ -88040,7 +87688,7 @@ index f774e93..c602612 100644
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
-@@ -3796,10 +3803,10 @@ void perf_event_update_userpage(struct perf_event *event)
+@@ -3806,10 +3813,10 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
@@ -88053,7 +87701,7 @@ index f774e93..c602612 100644
arch_perf_update_userpage(userpg, now);
-@@ -4350,7 +4357,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+@@ -4360,7 +4367,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
/* Data. */
sp = perf_user_stack_pointer(regs);
@@ -88062,7 +87710,7 @@ index f774e93..c602612 100644
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
-@@ -4441,11 +4448,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+@@ -4451,11 +4458,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
@@ -88076,7 +87724,7 @@ index f774e93..c602612 100644
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
-@@ -6724,7 +6731,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+@@ -6734,7 +6741,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
@@ -88085,7 +87733,7 @@ index f774e93..c602612 100644
event->state = PERF_EVENT_STATE_INACTIVE;
-@@ -7024,6 +7031,11 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -7034,6 +7041,11 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -88097,7 +87745,7 @@ index f774e93..c602612 100644
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
-@@ -7362,10 +7374,10 @@ static void sync_child_event(struct perf_event *child_event,
+@@ -7372,10 +7384,10 @@ static void sync_child_event(struct perf_event *child_event,
/*
* Add back the child's count to the parent's count:
*/
@@ -88111,6 +87759,18 @@ index f774e93..c602612 100644
&parent_event->child_total_time_running);
/*
+@@ -7836,8 +7848,10 @@ int perf_event_init_task(struct task_struct *child)
+
+ for_each_task_context_nr(ctxn) {
+ ret = perf_event_init_context(child, ctxn);
+- if (ret)
++ if (ret) {
++ perf_event_free_task(child);
+ return ret;
++ }
+ }
+
+ return 0;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 569b2187..19940d9 100644
--- a/kernel/events/internal.h
@@ -88229,7 +87889,7 @@ index 81b3d67..ef189a4 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index c44bff8..a3c5876 100644
+index c44bff8..7361260 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -180,6 +180,48 @@ void thread_info_cache_init(void)
@@ -88599,6 +88259,15 @@ index c44bff8..a3c5876 100644
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (p->real_cred->user != INIT_USER &&
+@@ -1323,7 +1428,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ goto bad_fork_cleanup_policy;
+ retval = audit_alloc(p);
+ if (retval)
+- goto bad_fork_cleanup_policy;
++ goto bad_fork_cleanup_perf;
+ /* copy all the process information */
+ retval = copy_semundo(clone_flags, p);
+ if (retval)
@@ -1449,6 +1554,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
}
@@ -88611,7 +88280,18 @@ index c44bff8..a3c5876 100644
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
-@@ -1539,6 +1649,8 @@ bad_fork_cleanup_count:
+@@ -1522,8 +1632,9 @@ bad_fork_cleanup_semundo:
+ exit_sem(p);
+ bad_fork_cleanup_audit:
+ audit_free(p);
+-bad_fork_cleanup_policy:
++bad_fork_cleanup_perf:
+ perf_event_free_task(p);
++bad_fork_cleanup_policy:
+ #ifdef CONFIG_NUMA
+ mpol_put(p->mempolicy);
+ bad_fork_cleanup_cgroup:
+@@ -1539,6 +1650,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -88620,7 +88300,7 @@ index c44bff8..a3c5876 100644
return ERR_PTR(retval);
}
-@@ -1600,6 +1712,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1600,6 +1713,7 @@ long do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace);
@@ -88628,7 +88308,7 @@ index c44bff8..a3c5876 100644
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
-@@ -1616,6 +1729,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1616,6 +1730,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -88637,7 +88317,7 @@ index c44bff8..a3c5876 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1734,7 +1849,7 @@ void __init proc_caches_init(void)
+@@ -1734,7 +1850,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -88646,7 +88326,7 @@ index c44bff8..a3c5876 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1774,7 +1889,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1774,7 +1890,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -88655,7 +88335,7 @@ index c44bff8..a3c5876 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1881,7 +1996,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1881,7 +1997,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -88666,7 +88346,7 @@ index c44bff8..a3c5876 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index e3087af..4730710 100644
+index 0b0dc02..4730710 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -54,6 +54,7 @@
@@ -88716,15 +88396,7 @@ index e3087af..4730710 100644
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -2614,6 +2620,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- * shared futexes. We need to compare the keys:
- */
- if (match_futex(&q.key, &key2)) {
-+ queue_unlock(hb);
- ret = -EINVAL;
- goto out_put_keys;
- }
-@@ -3019,6 +3026,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3020,6 +3026,7 @@ static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
@@ -88732,7 +88404,7 @@ index e3087af..4730710 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -3030,8 +3038,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3031,8 +3038,11 @@ static void __init futex_detect_cmpxchg(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -88966,26 +88638,10 @@ index 3127ad5..159d880 100644
return -ENOMEM;
reset_iter(iter, 0);
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
-index e30ac0f..a7fcafb 100644
+index 0aa69ea..a7fcafb 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
-@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
- */
- static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
- {
-- long ret;
-+ long t1, t2;
-
-- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
-+ t1 = kptr_obfuscate((long)v1, type);
-+ t2 = kptr_obfuscate((long)v2, type);
-
-- return (ret < 0) | ((ret > 0) << 1);
-+ return (t1 < t2) | ((t1 > t2) << 1);
- }
-
- /* The caller must have pinned the task */
-@@ -99,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
struct task_struct *task1, *task2;
int ret;
@@ -92674,71 +92330,10 @@ index 7c7964c..2a0d412 100644
update_vsyscall_tz();
if (firsttime) {
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
-index fe75444..b8a1463 100644
+index cd45a07..b8a1463 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
-@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
- static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
- ktime_t now)
- {
-+ unsigned long flags;
- struct k_itimer *ptr = container_of(alarm, struct k_itimer,
- it.alarm.alarmtimer);
-- if (posix_timer_event(ptr, 0) != 0)
-- ptr->it_overrun++;
-+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
-+
-+ spin_lock_irqsave(&ptr->it_lock, flags);
-+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
-+ if (posix_timer_event(ptr, 0) != 0)
-+ ptr->it_overrun++;
-+ }
-
- /* Re-add periodic timers */
- if (ptr->it.alarm.interval.tv64) {
- ptr->it_overrun += alarm_forward(alarm, now,
- ptr->it.alarm.interval);
-- return ALARMTIMER_RESTART;
-+ result = ALARMTIMER_RESTART;
- }
-- return ALARMTIMER_NORESTART;
-+ spin_unlock_irqrestore(&ptr->it_lock, flags);
-+
-+ return result;
- }
-
- /**
-@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
- * @new_timer: k_itimer pointer
- * @cur_setting: itimerspec data to fill
- *
-- * Copies the itimerspec data out from the k_itimer
-+ * Copies out the current itimerspec data
- */
- static void alarm_timer_get(struct k_itimer *timr,
- struct itimerspec *cur_setting)
- {
-- memset(cur_setting, 0, sizeof(struct itimerspec));
-+ ktime_t relative_expiry_time =
-+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
-
-- cur_setting->it_interval =
-- ktime_to_timespec(timr->it.alarm.interval);
-- cur_setting->it_value =
-- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
-- return;
-+ if (ktime_to_ns(relative_expiry_time) > 0) {
-+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
-+ } else {
-+ cur_setting->it_value.tv_sec = 0;
-+ cur_setting->it_value.tv_nsec = 0;
-+ }
-+
-+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
- }
-
- /**
-@@ -811,7 +823,7 @@ static int __init alarmtimer_init(void)
+@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
struct platform_device *pdev;
int error = 0;
int i;
@@ -93002,7 +92597,7 @@ index e3be87e..7480b36 100644
ftrace_graph_active++;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index a53f1bb..0e70660 100644
+index 773aba8..0e70660 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -352,9 +352,9 @@ struct buffer_data_page {
@@ -93028,31 +92623,7 @@ index a53f1bb..0e70660 100644
local_t dropped_events;
local_t committing;
local_t commits;
-@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
- work = &cpu_buffer->irq_work;
- }
-
-- work->waiters_pending = true;
- poll_wait(filp, &work->waiters, poll_table);
-+ work->waiters_pending = true;
-+ /*
-+ * There's a tight race between setting the waiters_pending and
-+ * checking if the ring buffer is empty. Once the waiters_pending bit
-+ * is set, the next event will wake the task up, but we can get stuck
-+ * if there's only a single event in.
-+ *
-+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
-+ * but adding a memory barrier to all events will cause too much of a
-+ * performance hit in the fast path. We only need a memory barrier when
-+ * the buffer goes from empty to having content. But as this race is
-+ * extremely small, and it's not a problem if another event comes in, we
-+ * will fix it later.
-+ */
-+ smp_mb();
-
- if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
- (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -1005,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*
* We add a counter to the write field to denote this.
*/
@@ -93063,7 +92634,7 @@ index a53f1bb..0e70660 100644
/*
* Just make sure we have seen our old_write and synchronize
-@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -1034,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
* cmpxchg to only update if an interrupt did not already
* do it for us. If the cmpxchg fails, we don't care.
*/
@@ -93074,7 +92645,7 @@ index a53f1bb..0e70660 100644
/*
* No need to worry about races with clearing out the commit.
-@@ -1385,12 +1399,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+@@ -1399,12 +1399,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
{
@@ -93089,7 +92660,7 @@ index a53f1bb..0e70660 100644
}
static int
-@@ -1485,7 +1499,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+@@ -1499,7 +1499,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
* bytes consumed in ring buffer from here.
* Increment overrun to account for the lost events.
*/
@@ -93098,7 +92669,7 @@ index a53f1bb..0e70660 100644
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
-@@ -2063,7 +2077,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2077,7 +2077,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* it is our responsibility to update
* the counters.
*/
@@ -93107,7 +92678,7 @@ index a53f1bb..0e70660 100644
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
/*
-@@ -2213,7 +2227,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2227,7 +2227,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
if (tail == BUF_PAGE_SIZE)
tail_page->real_end = 0;
@@ -93116,7 +92687,7 @@ index a53f1bb..0e70660 100644
return;
}
-@@ -2248,7 +2262,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2262,7 +2262,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
rb_event_set_padding(event);
/* Set the write back to the previous setting */
@@ -93125,7 +92696,7 @@ index a53f1bb..0e70660 100644
return;
}
-@@ -2260,7 +2274,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2274,7 +2274,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE;
@@ -93134,7 +92705,7 @@ index a53f1bb..0e70660 100644
}
/*
-@@ -2286,7 +2300,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2300,7 +2300,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* about it.
*/
if (unlikely(next_page == commit_page)) {
@@ -93143,7 +92714,7 @@ index a53f1bb..0e70660 100644
goto out_reset;
}
-@@ -2342,7 +2356,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2356,7 +2356,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->tail_page) &&
(cpu_buffer->commit_page ==
cpu_buffer->reader_page))) {
@@ -93152,7 +92723,7 @@ index a53f1bb..0e70660 100644
goto out_reset;
}
}
-@@ -2390,7 +2404,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2404,7 +2404,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
length += RB_LEN_TIME_EXTEND;
tail_page = cpu_buffer->tail_page;
@@ -93161,7 +92732,7 @@ index a53f1bb..0e70660 100644
/* set write to only the index of the write */
write &= RB_WRITE_MASK;
-@@ -2414,7 +2428,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2428,7 +2428,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
@@ -93170,7 +92741,7 @@ index a53f1bb..0e70660 100644
/*
* If this is the first commit on the page, then update
-@@ -2447,7 +2461,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2461,7 +2461,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
@@ -93179,7 +92750,7 @@ index a53f1bb..0e70660 100644
unsigned long event_length = rb_event_length(event);
/*
* This is on the tail page. It is possible that
-@@ -2457,7 +2471,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2471,7 +2471,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
*/
old_index += write_mask;
new_index += write_mask;
@@ -93188,7 +92759,7 @@ index a53f1bb..0e70660 100644
if (index == old_index) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
-@@ -2849,7 +2863,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2863,7 +2863,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
@@ -93197,7 +92768,7 @@ index a53f1bb..0e70660 100644
return;
}
-@@ -2861,7 +2875,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2875,7 +2875,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
start = bpage;
do {
if (bpage->page == (void *)addr) {
@@ -93206,7 +92777,7 @@ index a53f1bb..0e70660 100644
return;
}
rb_inc_page(cpu_buffer, &bpage);
-@@ -3145,7 +3159,7 @@ static inline unsigned long
+@@ -3159,7 +3159,7 @@ static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
{
return local_read(&cpu_buffer->entries) -
@@ -93215,7 +92786,7 @@ index a53f1bb..0e70660 100644
}
/**
-@@ -3234,7 +3248,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3248,7 +3248,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -93224,7 +92795,7 @@ index a53f1bb..0e70660 100644
return ret;
}
-@@ -3257,7 +3271,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3271,7 +3271,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -93233,7 +92804,7 @@ index a53f1bb..0e70660 100644
return ret;
}
-@@ -3342,7 +3356,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+@@ -3356,7 +3356,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -93242,7 +92813,7 @@ index a53f1bb..0e70660 100644
}
return overruns;
-@@ -3513,8 +3527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3527,8 +3527,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Reset the reader page to size zero.
*/
@@ -93253,7 +92824,7 @@ index a53f1bb..0e70660 100644
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
-@@ -3548,7 +3562,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3562,7 +3562,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* want to compare with the last_overrun.
*/
smp_mb();
@@ -93262,7 +92833,7 @@ index a53f1bb..0e70660 100644
/*
* Here's the tricky part.
-@@ -4120,8 +4134,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4134,8 +4134,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -93273,7 +92844,7 @@ index a53f1bb..0e70660 100644
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
-@@ -4131,14 +4145,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4145,14 +4145,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -93292,7 +92863,7 @@ index a53f1bb..0e70660 100644
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
-@@ -4543,8 +4557,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+@@ -4557,8 +4557,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
rb_init_page(bpage);
bpage = reader->page;
reader->page = *data_page;
@@ -94597,6 +94168,31 @@ index b32b70c..e512eb0 100644
pkmap_count[last_pkmap_nr] = 1;
set_page_address(page, (void *)vaddr);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 1c42d0c..2a99426 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1824,6 +1824,11 @@ static int __split_huge_page_map(struct page *page,
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ BUG_ON(PageCompound(page+i));
++ /*
++ * Note that pmd_numa is not transferred deliberately
++ * to avoid any possibility that pte_numa leaks to
++ * a PROT_NONE VMA by accident.
++ */
+ entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (!pmd_write(*pmd))
+@@ -1832,8 +1837,6 @@ static int __split_huge_page_map(struct page *page,
+ BUG_ON(page_mapcount(page) != 1);
+ if (!pmd_young(*pmd))
+ entry = pte_mkold(entry);
+- if (pmd_numa(*pmd))
+- entry = pte_mknuma(entry);
+ pte = pte_offset_map(&_pmd, haddr);
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, haddr, pte, entry);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 923f38e..74e159a 100644
--- a/mm/hugetlb.c
@@ -95008,7 +94604,7 @@ index 33365e9..2234ef9 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 2121d8b8..fa1095a 100644
+index 492e36f..3771c0a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -95779,10 +95375,23 @@ index 15a8ea0..cb50389 100644
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
diff --git a/mm/migrate.c b/mm/migrate.c
-index bed4880..a493f67 100644
+index bed4880..95c4b9f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
-@@ -1485,8 +1485,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+@@ -148,8 +148,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ if (pte_swp_soft_dirty(*ptep))
+ pte = pte_mksoft_dirty(pte);
++
++ /* Recheck VMA as permissions can change since migration started */
+ if (is_write_migration_entry(entry))
+- pte = pte_mkwrite(pte);
++ pte = maybe_mkwrite(pte, vma);
++
+ #ifdef CONFIG_HUGETLB_PAGE
+ if (PageHuge(new)) {
+ pte = pte_mkhuge(pte);
+@@ -1485,8 +1488,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
@@ -97664,7 +97273,7 @@ index 7c59ef6..1358905 100644
};
diff --git a/mm/percpu.c b/mm/percpu.c
-index a2a54a8..43ecb68 100644
+index 8cd4308..ab22f17 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
@@ -97844,7 +97453,7 @@ index cdbd312..2e1e0b9 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index ff85863..7037c25 100644
+index f0d698b..7037c25 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -97865,19 +97474,7 @@ index ff85863..7037c25 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -2143,8 +2143,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
-
- if (new_dentry->d_inode) {
- (void) shmem_unlink(new_dir, new_dentry);
-- if (they_are_dirs)
-+ if (they_are_dirs) {
-+ drop_nlink(new_dentry->d_inode);
- drop_nlink(old_dir);
-+ }
- } else if (they_are_dirs) {
- drop_nlink(old_dir);
- inc_nlink(new_dir);
-@@ -2298,6 +2300,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2300,6 +2300,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -97889,7 +97486,7 @@ index ff85863..7037c25 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2353,6 +2360,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2355,6 +2360,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -97905,7 +97502,7 @@ index ff85863..7037c25 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -2665,8 +2681,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2667,8 +2681,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -97916,7 +97513,7 @@ index ff85863..7037c25 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 6dd8d5f..673c763 100644
+index ea854eb..673c763 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -300,10 +300,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -97969,32 +97566,7 @@ index 6dd8d5f..673c763 100644
slab_early_init = 0;
-@@ -2189,7 +2193,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
- int
- __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
- {
-- size_t left_over, freelist_size, ralign;
-+ size_t left_over, freelist_size;
-+ size_t ralign = BYTES_PER_WORD;
- gfp_t gfp;
- int err;
- size_t size = cachep->size;
-@@ -2222,14 +2227,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
- size &= ~(BYTES_PER_WORD - 1);
- }
-
-- /*
-- * Redzoning and user store require word alignment or possibly larger.
-- * Note this will be overridden by architecture or caller mandated
-- * alignment if either is greater than BYTES_PER_WORD.
-- */
-- if (flags & SLAB_STORE_USER)
-- ralign = BYTES_PER_WORD;
--
- if (flags & SLAB_RED_ZONE) {
- ralign = REDZONE_ALIGN;
- /* If redzoning, ensure that the second redzone is suitably
-@@ -3484,6 +3481,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -3477,6 +3481,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
@@ -98016,7 +97588,7 @@ index 6dd8d5f..673c763 100644
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3712,6 +3724,7 @@ void kfree(const void *objp)
+@@ -3705,6 +3724,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -98024,7 +97596,7 @@ index 6dd8d5f..673c763 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4153,14 +4166,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+@@ -4146,14 +4166,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
}
/* cpu stats */
{
@@ -98051,7 +97623,7 @@ index 6dd8d5f..673c763 100644
#endif
}
-@@ -4381,13 +4402,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4374,13 +4402,69 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -103434,7 +103006,7 @@ index de770ec..3fc49d2 100644
.get_optmin = SO_IP_SET,
.get_optmax = SO_IP_SET + 1,
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
-index a8eb0a8..86f2de4 100644
+index 610e19c..08d0c3f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
@@ -103446,7 +103018,7 @@ index a8eb0a8..86f2de4 100644
if (cp->protocol != IPPROTO_UDP)
conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
flags = cp->flags;
-@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
+@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
cp->control = NULL;
atomic_set(&cp->n_control, 0);
@@ -103455,7 +103027,7 @@ index a8eb0a8..86f2de4 100644
cp->packet_xmit = NULL;
cp->app = NULL;
-@@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
+@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
/* Don't drop the entry if its number of incoming packets is not
located in [0, 8] */
@@ -103465,7 +103037,7 @@ index a8eb0a8..86f2de4 100644
if (!todrop_rate[i]) return 0;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
-index 3d2d2c8..c87e4d3 100644
+index 27d3f40..f95d8d0 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
@@ -103620,7 +103192,7 @@ index db80126..ef7110e 100644
cp->old_state = cp->state;
/*
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
-index 7f0e1cf..e9a86e6 100644
+index 1692e75..0d7c8e3 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -103918,10 +103490,10 @@ index 0000000..c566332
+MODULE_ALIAS("ipt_gradm");
+MODULE_ALIAS("ip6t_gradm");
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
-index a3910fc..2d2ba14 100644
+index 47dc683..2e0d52c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
-@@ -870,11 +870,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
+@@ -871,11 +871,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
@@ -104303,6 +103875,43 @@ index 48f8ffc..0ef3eec 100644
struct rds_sock {
struct sock rs_sk;
+diff --git a/net/rds/send.c b/net/rds/send.c
+index a82fb66..1ea9251 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
+ sock_put(rds_rs_to_sk(rs));
+ }
+ rs = rm->m_rs;
+- sock_hold(rds_rs_to_sk(rs));
++ if (rs)
++ sock_hold(rds_rs_to_sk(rs));
+ }
++ if (!rs)
++ goto unlock_and_drop;
+ spin_lock(&rs->rs_lock);
+
+ if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
+@@ -638,9 +641,6 @@ unlock_and_drop:
+ * queue. This means that in the TCP case, the message may not have been
+ * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
+ * checks the RDS_MSG_HAS_ACK_SEQ bit.
+- *
+- * XXX It's not clear to me how this is safely serialized with socket
+- * destruction. Maybe it should bail if it sees SOCK_DEAD.
+ */
+ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
+ is_acked_func is_acked)
+@@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
+ */
+ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
+ spin_unlock_irqrestore(&conn->c_lock, flags);
++ spin_lock_irqsave(&rm->m_rs_lock, flags);
++ rm->m_rs = NULL;
++ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+ continue;
+ }
+ list_del_init(&rm->m_conn_item);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef..16bcb98 100644
--- a/net/rds/tcp.c
@@ -116489,10 +116098,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..38b3d62
+index 0000000..e4b26fe
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5989 @@
+@@ -0,0 +1,5991 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
@@ -117837,7 +117446,8 @@ index 0000000..38b3d62
+sta_dev_read_14782 sta_dev_read 3 14782 NULL
+keys_proc_write_14792 keys_proc_write 3 14792 NULL
+ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
-+__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
++__kfifo_in_14797 __kfifo_in 3-0 14797 NULL nohasharray
++ttm_page_pool_free_14797 ttm_page_pool_free 2 14797 &__kfifo_in_14797
+hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
+security_inode_rename_14805 security_inode_rename 0 14805 NULL
@@ -118260,6 +117870,7 @@ index 0000000..38b3d62
+kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
+ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
+batadv_tvlv_container_register_19520 batadv_tvlv_container_register 5 19520 NULL
++ttm_dma_page_pool_free_19527 ttm_dma_page_pool_free 2 19527 NULL
+apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 NULL nohasharray
+cfc_write_array_to_buffer_19529 cfc_write_array_to_buffer 3 19529 &apei_exec_pre_map_gars_19529
+nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
diff --git a/3.14.19/4425_grsec_remove_EI_PAX.patch b/3.14.20/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.14.19/4425_grsec_remove_EI_PAX.patch
rename to 3.14.20/4425_grsec_remove_EI_PAX.patch
diff --git a/3.14.19/4427_force_XATTR_PAX_tmpfs.patch b/3.14.20/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 3.14.19/4427_force_XATTR_PAX_tmpfs.patch
rename to 3.14.20/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.14.19/4430_grsec-remove-localversion-grsec.patch b/3.14.20/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.14.19/4430_grsec-remove-localversion-grsec.patch
rename to 3.14.20/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.14.19/4435_grsec-mute-warnings.patch b/3.14.20/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.14.19/4435_grsec-mute-warnings.patch
rename to 3.14.20/4435_grsec-mute-warnings.patch
diff --git a/3.14.19/4440_grsec-remove-protected-paths.patch b/3.14.20/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.14.19/4440_grsec-remove-protected-paths.patch
rename to 3.14.20/4440_grsec-remove-protected-paths.patch
diff --git a/3.14.19/4450_grsec-kconfig-default-gids.patch b/3.14.20/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 3.14.19/4450_grsec-kconfig-default-gids.patch
rename to 3.14.20/4450_grsec-kconfig-default-gids.patch
diff --git a/3.14.19/4465_selinux-avc_audit-log-curr_ip.patch b/3.14.20/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 3.14.19/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.14.20/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.14.19/4470_disable-compat_vdso.patch b/3.14.20/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.14.19/4470_disable-compat_vdso.patch
rename to 3.14.20/4470_disable-compat_vdso.patch
diff --git a/3.14.19/4475_emutramp_default_on.patch b/3.14.20/4475_emutramp_default_on.patch
similarity index 100%
rename from 3.14.19/4475_emutramp_default_on.patch
rename to 3.14.20/4475_emutramp_default_on.patch
diff --git a/3.16.3/0000_README b/3.16.4/0000_README
similarity index 96%
rename from 3.16.3/0000_README
rename to 3.16.4/0000_README
index 47d2ef9..bf10499 100644
--- a/3.16.3/0000_README
+++ b/3.16.4/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.0-3.16.3-201409282025.patch
+Patch: 4420_grsecurity-3.0-3.16.4-201410062041.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.16.3/4420_grsecurity-3.0-3.16.3-201409282025.patch b/3.16.4/4420_grsecurity-3.0-3.16.4-201410062041.patch
similarity index 98%
rename from 3.16.3/4420_grsecurity-3.0-3.16.3-201409282025.patch
rename to 3.16.4/4420_grsecurity-3.0-3.16.4-201410062041.patch
index 9207cde..5883945 100644
--- a/3.16.3/4420_grsecurity-3.0-3.16.3-201409282025.patch
+++ b/3.16.4/4420_grsecurity-3.0-3.16.4-201410062041.patch
@@ -249,7 +249,7 @@ index a1d0d7a..61d65cc 100644
This function is only used if DCACHE_MANAGE_TRANSIT is set on the
dentry being transited from.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index b7fa2f5..90cd9f8 100644
+index f896f68..817e3ea 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1138,6 +1138,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
@@ -799,7 +799,7 @@ index ee78eba..a06b48d 100644
Daniel Borkmann <dborkman@redhat.com>
-Alexei Starovoitov <ast@plumgrid.com>
diff --git a/Makefile b/Makefile
-index 9b25a83..e77c38a 100644
+index e75c75f..ebe05e8 100644
--- a/Makefile
+++ b/Makefile
@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2107,7 +2107,7 @@ index 75fe66b..ba3dee4 100644
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
-index fd43f7f..a817f5a 100644
+index 79ecb4f..6b0bbdd 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
@@ -2715,94 +2715,27 @@ index e4e4208..086684a 100644
/*
* Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
-index 83259b8..8c7e01d 100644
+index 5f833f7..76e6644 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
-@@ -1,6 +1,9 @@
- #ifndef __ASMARM_TLS_H
- #define __ASMARM_TLS_H
+@@ -3,6 +3,7 @@
+
+ #include <linux/compiler.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
-+#include <linux/compiler.h>
-+#include <asm/thread_info.h>
-+
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
- .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
-@@ -50,6 +53,50 @@
- #endif
-
- #ifndef __ASSEMBLY__
-+#include <asm/pgtable.h>
-+
-+static inline void set_tls(unsigned long val)
-+{
-+ struct thread_info *thread;
-+
-+ thread = current_thread_info();
-+
-+ thread->tp_value[0] = val;
-+
-+ /*
-+ * This code runs with preemption enabled and therefore must
-+ * be reentrant with respect to switch_tls.
-+ *
-+ * We need to ensure ordering between the shadow state and the
-+ * hardware state, so that we don't corrupt the hardware state
-+ * with a stale shadow state during context switch.
-+ *
-+ * If we're preempted here, switch_tls will load TPIDRURO from
-+ * thread_info upon resuming execution and the following mcr
-+ * is merely redundant.
-+ */
-+ barrier();
-+
-+ if (!tls_emu) {
-+ if (has_tls_reg) {
-+ asm("mcr p15, 0, %0, c13, c0, 3"
-+ : : "r" (val));
-+ } else {
-+ /*
-+ * User space must never try to access this
-+ * directly. Expect your app to break
-+ * eventually if you do so. The user helper
-+ * at 0xffff0fe0 must be used instead. (see
-+ * entry-armv.S for details)
-+ */
+@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
+ pax_open_kernel();
-+ *((unsigned int *)0xffff0ff0) = val;
+ *((unsigned int *)0xffff0ff0) = val;
+ pax_close_kernel();
-+ }
-+
-+ }
-+}
-+
- static inline unsigned long get_tpuser(void)
- {
- unsigned long reg = 0;
-@@ -59,5 +106,23 @@ static inline unsigned long get_tpuser(void)
-
- return reg;
- }
-+
-+static inline void set_tpuser(unsigned long val)
-+{
-+ /* Since TPIDRURW is fully context-switched (unlike TPIDRURO),
-+ * we need not update thread_info.
-+ */
-+ if (has_tls_reg && !tls_emu) {
-+ asm("mcr p15, 0, %0, c13, c0, 2"
-+ : : "r" (val));
-+ }
-+}
-+
-+static inline void flush_tls(void)
-+{
-+ set_tls(0);
-+ set_tpuser(0);
-+}
-+
#endif
- #endif /* __ASMARM_TLS_H */
+ }
+
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 75d9579..b5b40e4 100644
--- a/arch/arm/include/asm/uaccess.h
@@ -3299,7 +3232,7 @@ index 7139d4a..feaf37f 100644
#if defined(CONFIG_OABI_COMPAT)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
-index 5d702f8..f5fc51a 100644
+index 0325dbf..e8e47ff 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -188,6 +188,60 @@
@@ -3371,9 +3304,9 @@ index 5d702f8..f5fc51a 100644
+ pax_exit_kernel
+
msr spsr_cxsf, \rpsr
- #if defined(CONFIG_CPU_V6)
- ldr r0, [sp]
-@@ -265,6 +322,9 @@
+ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+@@ -260,6 +317,9 @@
blne trace_hardirqs_off
#endif
.endif
@@ -3382,7 +3315,7 @@ index 5d702f8..f5fc51a 100644
+
ldr lr, [sp, #S_SP] @ top of the stack
ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
- clrex @ clear the exclusive monitor
+
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 918875d..cd5fa27 100644
--- a/arch/arm/kernel/fiq.c
@@ -3478,7 +3411,7 @@ index 07314af..c46655c 100644
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 81ef686..7af43a0 100644
+index a35f6eb..7af43a0 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -212,6 +212,7 @@ void machine_power_off(void)
@@ -3509,16 +3442,7 @@ index 81ef686..7af43a0 100644
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
"sp : %08lx ip : %08lx fp : %08lx\n",
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
-@@ -334,6 +335,8 @@ void flush_thread(void)
- memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
- memset(&thread->fpstate, 0, sizeof(union fp_state));
-
-+ flush_tls();
-+
- thread_notify(THREAD_NOTIFY_FLUSH, thread);
- }
-
-@@ -425,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
@@ -3531,7 +3455,7 @@ index 81ef686..7af43a0 100644
#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
-@@ -446,7 +443,7 @@ static struct vm_area_struct gate_vma = {
+@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -3540,7 +3464,7 @@ index 81ef686..7af43a0 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -472,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
+@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
const char *arch_vma_name(struct vm_area_struct *vma)
{
@@ -3764,21 +3688,8 @@ index 7a3be1d..b00c7de 100644
pr_debug("CPU ITCM: copied code from %p - %p\n",
start, end);
itcm_present = true;
-diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
-index 7b8403b..80f0d69 100644
---- a/arch/arm/kernel/thumbee.c
-+++ b/arch/arm/kernel/thumbee.c
-@@ -45,7 +45,7 @@ static int thumbee_notifier(struct notifier_block *self, unsigned long cmd, void
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
-- thread->thumbee_state = 0;
-+ teehbr_write(0);
- break;
- case THREAD_NOTIFY_SWITCH:
- current_thread_info()->thumbee_state = teehbr_read();
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index abd2fc0..1e2696e 100644
+index da11b28..1e2696e 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3809,38 +3720,7 @@ index abd2fc0..1e2696e 100644
if (signr)
do_exit(signr);
}
-@@ -579,7 +584,6 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
- #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
- asmlinkage int arm_syscall(int no, struct pt_regs *regs)
- {
-- struct thread_info *thread = current_thread_info();
- siginfo_t info;
-
- if ((no >> 16) != (__ARM_NR_BASE>> 16))
-@@ -630,21 +634,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
- return regs->ARM_r0;
-
- case NR(set_tls):
-- thread->tp_value[0] = regs->ARM_r0;
-- if (tls_emu)
-- return 0;
-- if (has_tls_reg) {
-- asm ("mcr p15, 0, %0, c13, c0, 3"
-- : : "r" (regs->ARM_r0));
-- } else {
-- /*
-- * User space must never try to access this directly.
-- * Expect your app to break eventually if you do so.
-- * The user helper at 0xffff0fe0 must be used instead.
-- * (see entry-armv.S for details)
-- */
-- *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
-- }
-+ set_tls(regs->ARM_r0);
- return 0;
-
- #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
-@@ -900,7 +890,11 @@ void __init early_trap_init(void *vectors_base)
+@@ -885,7 +890,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -4327,7 +4207,7 @@ index 78c02b3..c94109a 100644
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index da1b256..ab2a327 100644
+index 8fd87a3..099ed60 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
@@ -4459,10 +4339,10 @@ index c348eae..456a1a4 100644
If all of the binaries and libraries which run on your platform
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
-index b8cb1a2..6a5624a 100644
+index 33ca980..6b23b44 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
-@@ -214,10 +214,12 @@ union offset_union {
+@@ -215,10 +215,12 @@ union offset_union {
#define __get16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
@@ -4475,7 +4355,7 @@ index b8cb1a2..6a5624a 100644
if (err) \
goto fault; \
} while (0)
-@@ -231,6 +233,7 @@ union offset_union {
+@@ -232,6 +234,7 @@ union offset_union {
#define __get32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v, a = addr; \
@@ -4483,7 +4363,7 @@ index b8cb1a2..6a5624a 100644
__get8_unaligned_check(ins,v,a,err); \
val = v << ((BE) ? 24 : 0); \
__get8_unaligned_check(ins,v,a,err); \
-@@ -239,6 +242,7 @@ union offset_union {
+@@ -240,6 +243,7 @@ union offset_union {
val |= v << ((BE) ? 8 : 16); \
__get8_unaligned_check(ins,v,a,err); \
val |= v << ((BE) ? 0 : 24); \
@@ -4491,7 +4371,7 @@ index b8cb1a2..6a5624a 100644
if (err) \
goto fault; \
} while (0)
-@@ -252,6 +256,7 @@ union offset_union {
+@@ -253,6 +257,7 @@ union offset_union {
#define __put16_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
@@ -4499,7 +4379,7 @@ index b8cb1a2..6a5624a 100644
__asm__( FIRST_BYTE_16 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
-@@ -271,6 +276,7 @@ union offset_union {
+@@ -272,6 +277,7 @@ union offset_union {
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
@@ -4507,7 +4387,7 @@ index b8cb1a2..6a5624a 100644
if (err) \
goto fault; \
} while (0)
-@@ -284,6 +290,7 @@ union offset_union {
+@@ -285,6 +291,7 @@ union offset_union {
#define __put32_unaligned_check(ins,val,addr) \
do { \
unsigned int err = 0, v = val, a = addr; \
@@ -4515,7 +4395,7 @@ index b8cb1a2..6a5624a 100644
__asm__( FIRST_BYTE_32 \
ARM( "1: "ins" %1, [%2], #1\n" ) \
THUMB( "1: "ins" %1, [%2]\n" ) \
-@@ -313,6 +320,7 @@ union offset_union {
+@@ -314,6 +321,7 @@ union offset_union {
" .popsection\n" \
: "=r" (err), "=&r" (v), "=&r" (a) \
: "0" (err), "1" (v), "2" (a)); \
@@ -14990,7 +14870,7 @@ index d21ff89..6da8e6e 100644
set_fs(KERNEL_DS);
has_dumped = 1;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index f9e181a..b0df8b3 100644
+index f9e181a..300544c 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
@@ -15011,9 +14891,14 @@ index f9e181a..b0df8b3 100644
return (void __user *) sp;
}
-@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
- restorer = current->mm->context.vdso +
- selected_vdso32->sym___kernel_sigreturn;
+@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+ } else {
+ /* Return stub is in 32bit vsyscall page */
+ if (current->mm->context.vdso)
+- restorer = current->mm->context.vdso +
+- selected_vdso32->sym___kernel_sigreturn;
++ restorer = (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym___kernel_sigreturn);
else
- restorer = &frame->retcode;
+ restorer = frame->retcode;
@@ -15042,12 +14927,13 @@ index f9e181a..b0df8b3 100644
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
-- else
+ else if (current->mm->context.vdso)
+ /* Return stub is in 32bit vsyscall page */
- restorer = current->mm->context.vdso +
- selected_vdso32->sym___kernel_rt_sigreturn;
-+ else
++ restorer = (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym___kernel_rt_sigreturn);
+ else
+- restorer = current->mm->context.vdso +
+- selected_vdso32->sym___kernel_rt_sigreturn;
+ restorer = frame->retcode;
put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
@@ -18351,7 +18237,7 @@ index 81bb91b..9392125 100644
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index aa97a07..f169e5b 100644
+index aa97a07..5c53c32 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
@@ -18362,7 +18248,7 @@ index aa97a07..f169e5b 100644
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
-@@ -83,12 +84,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+@@ -83,12 +84,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
#define arch_end_context_switch(prev) do {} while(0)
@@ -18383,6 +18269,7 @@ index aa97a07..f169e5b 100644
+ cr0 = read_cr0() ^ X86_CR0_WP;
+ BUG_ON(cr0 & X86_CR0_WP);
+ write_cr0(cr0);
++ barrier();
+ return cr0 ^ X86_CR0_WP;
+}
+
@@ -18390,6 +18277,7 @@ index aa97a07..f169e5b 100644
+{
+ unsigned long cr0;
+
++ barrier();
+ cr0 = read_cr0() ^ X86_CR0_WP;
+ BUG_ON(!(cr0 & X86_CR0_WP));
+ write_cr0(cr0);
@@ -18414,7 +18302,7 @@ index aa97a07..f169e5b 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -155,6 +195,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+@@ -155,6 +197,11 @@ static inline unsigned long pud_pfn(pud_t pud)
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
@@ -18426,7 +18314,7 @@ index aa97a07..f169e5b 100644
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
-@@ -208,9 +253,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -208,9 +255,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -18457,7 +18345,7 @@ index aa97a07..f169e5b 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -440,6 +505,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -440,6 +507,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -18474,7 +18362,7 @@ index aa97a07..f169e5b 100644
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/log2.h>
-@@ -586,7 +661,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+@@ -586,7 +663,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -18483,7 +18371,7 @@ index aa97a07..f169e5b 100644
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-@@ -626,7 +701,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+@@ -626,7 +703,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -18492,7 +18380,7 @@ index aa97a07..f169e5b 100644
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
-@@ -641,7 +716,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -641,7 +718,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -18501,7 +18389,7 @@ index aa97a07..f169e5b 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -664,7 +739,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -664,7 +741,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -18515,7 +18403,7 @@ index aa97a07..f169e5b 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -675,6 +755,23 @@ static inline int pgd_none(pgd_t pgd)
+@@ -675,6 +757,23 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -18539,7 +18427,7 @@ index aa97a07..f169e5b 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -841,11 +938,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+@@ -841,11 +940,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -18645,10 +18533,10 @@ index ed5903b..c7fe163 100644
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 5be9063..0c42843 100644
+index 3874693..0c42843 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,10 +16,15 @@
+@@ -16,11 +16,15 @@
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
@@ -18659,14 +18547,14 @@ index 5be9063..0c42843 100644
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
-extern pmd_t level2_ident_pgt[512];
--extern pgd_t init_level4_pgt[];
+extern pmd_t level2_ident_pgt[512*2];
-+extern pte_t level1_fixmap_pgt[512];
+ extern pte_t level1_fixmap_pgt[512];
+-extern pgd_t init_level4_pgt[];
+extern pgd_t init_level4_pgt[512];
#define swapper_pg_dir init_level4_pgt
-@@ -61,7 +66,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -62,7 +66,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -18676,7 +18564,7 @@ index 5be9063..0c42843 100644
}
static inline void native_pmd_clear(pmd_t *pmd)
-@@ -97,7 +104,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+@@ -98,7 +104,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
@@ -18686,7 +18574,7 @@ index 5be9063..0c42843 100644
}
static inline void native_pud_clear(pud_t *pud)
-@@ -107,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
+@@ -108,6 +116,13 @@ static inline void native_pud_clear(pud_t *pud)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -27723,7 +27611,7 @@ index be8e1bd..a3d93fa 100644
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index 5492798..a3bd4f2 100644
+index 215815b..9a814fd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -230,14 +230,17 @@ static void notrace start_secondary(void *unused)
@@ -37763,7 +37651,7 @@ index ffb101e..98c0ecf 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index e8a1201..e1fb520 100644
+index 16fb009..e1fb520 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
@@ -37775,63 +37663,17 @@ index e8a1201..e1fb520 100644
{
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
-@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
- *
- * We can construct this by grafting the Xen provided pagetable into
- * head_64.S's preconstructed pagetables. We copy the Xen L2's into
-- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
-- * means that only the kernel has a physical mapping to start with -
-- * but that's enough to get __va working. We need to fill in the rest
-- * of the physical mapping once some sort of allocator has been set
-- * up.
-- * NOTE: for PVH, the page tables are native.
-+ * level2_ident_pgt, and level2_kernel_pgt. This means that only the
-+ * kernel has a physical mapping to start with - but that's enough to
-+ * get __va working. We need to fill in the rest of the physical
-+ * mapping once some sort of allocator has been set up. NOTE: for
-+ * PVH, the page tables are native.
- */
- void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
- {
-@@ -1902,8 +1901,14 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
- /* L3_i[0] -> level2_ident_pgt */
- convert_pfn_mfn(level3_ident_pgt);
+@@ -1903,6 +1903,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[510] -> level2_kernel_pgt
-- * L3_i[511] -> level2_fixmap_pgt */
-+ * L3_k[511] -> level2_fixmap_pgt */
+ * L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
+ convert_pfn_mfn(level3_vmalloc_start_pgt);
+ convert_pfn_mfn(level3_vmalloc_end_pgt);
+ convert_pfn_mfn(level3_vmemmap_pgt);
-+
-+ /* L3_k[511][506] -> level1_fixmap_pgt */
-+ convert_pfn_mfn(level2_fixmap_pgt);
- }
- /* We get [511][511] and have Xen's version of level2_kernel_pgt */
- l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
-@@ -1913,30 +1918,29 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
- addr[1] = (unsigned long)l3;
- addr[2] = (unsigned long)l2;
- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-- * Both L4[272][0] and L4[511][511] have entries that point to the same
-+ * Both L4[272][0] and L4[511][510] have entries that point to the same
- * L2 (PMD) tables. Meaning that if you modify it in __va space
- * it will be also modified in the __ka space! (But if you just
- * modify the PMD table to point to other PTE's or none, then you
- * are OK - which is what cleanup_highmap does) */
- copy_page(level2_ident_pgt, l2);
-- /* Graft it onto L4[511][511] */
-+ /* Graft it onto L4[511][510] */
- copy_page(level2_kernel_pgt, l2);
-
-- /* Get [511][510] and graft that in level2_fixmap_pgt */
-- l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-- l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-- copy_page(level2_fixmap_pgt, l2);
-- /* Note that we don't do anything with level1_fixmap_pgt which
-- * we don't need. */
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Make pagetable pieces RO */
+
+ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
+@@ -1929,8 +1932,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -37843,11 +37685,8 @@ index e8a1201..e1fb520 100644
+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
-
- /* Pin down new L4 */
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-@@ -2120,6 +2124,7 @@ static void __init xen_post_allocator_init(void)
+ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+@@ -2117,6 +2124,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -37855,7 +37694,7 @@ index e8a1201..e1fb520 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2198,6 +2203,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2195,6 +2203,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -38134,25 +37973,9 @@ index f890d43..97b0482 100644
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
diff --git a/block/blk-mq.c b/block/blk-mq.c
-index ad69ef6..034c0ff 100644
+index 06ac59f..034c0ff 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -219,7 +219,6 @@ __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
- if (tag != BLK_MQ_TAG_FAIL) {
- rq = data->hctx->tags->rqs[tag];
-
-- rq->cmd_flags = 0;
- if (blk_mq_tag_busy(data->hctx)) {
- rq->cmd_flags = REQ_MQ_INFLIGHT;
- atomic_inc(&data->hctx->nr_active);
-@@ -274,6 +273,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
-
- if (rq->cmd_flags & REQ_MQ_INFLIGHT)
- atomic_dec(&hctx->nr_active);
-+ rq->cmd_flags = 0;
-
- clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- blk_mq_put_tag(hctx, tag, &ctx->last_tag);
@@ -973,14 +973,9 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -38171,15 +37994,6 @@ index ad69ef6..034c0ff 100644
if (run_queue)
blk_mq_run_hw_queue(hctx, async);
-@@ -1411,6 +1406,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
- left -= to_do * rq_size;
- for (j = 0; j < to_do; j++) {
- tags->rqs[i] = p;
-+ tags->rqs[i]->atomic_flags = 0;
-+ tags->rqs[i]->cmd_flags = 0;
- if (set->ops->init_request) {
- if (set->ops->init_request(set->driver_data,
- tags->rqs[i], hctx_idx, i,
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 53b1737..08177d2e 100644
--- a/block/blk-softirq.c
@@ -38247,10 +38061,10 @@ index a0926a6..b2b14b2 100644
err = -EFAULT;
goto out;
diff --git a/block/genhd.c b/block/genhd.c
-index 791f419..89f21c4 100644
+index e6723bd..703e4ac 100644
--- a/block/genhd.c
+++ b/block/genhd.c
-@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
+@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
/*
* Register device numbers dev..(dev+range-1)
@@ -38558,7 +38372,7 @@ index d72ce04..d6ab3c2 100644
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index 677c0c1..354b89b 100644
+index e7f30b5..a8cc9cd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
@@ -41376,10 +41190,10 @@ index b0c18ed..1713a80 100644
cpu_notifier_register_begin();
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 6f02485..13684ae 100644
+index 21ab8bc..90ee9f8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -2100,7 +2100,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+@@ -2103,7 +2103,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
}
mutex_lock(&cpufreq_governor_mutex);
@@ -41388,7 +41202,7 @@ index 6f02485..13684ae 100644
mutex_unlock(&cpufreq_governor_mutex);
return;
}
-@@ -2316,7 +2316,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+@@ -2319,7 +2319,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -41397,7 +41211,7 @@ index 6f02485..13684ae 100644
.notifier_call = cpufreq_cpu_callback,
};
-@@ -2356,13 +2356,17 @@ int cpufreq_boost_trigger_state(int state)
+@@ -2359,13 +2359,17 @@ int cpufreq_boost_trigger_state(int state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -41417,7 +41231,7 @@ index 6f02485..13684ae 100644
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
-@@ -2419,8 +2423,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2422,8 +2426,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("trying to register driver %s\n", driver_data->name);
@@ -41431,7 +41245,7 @@ index 6f02485..13684ae 100644
write_lock_irqsave(&cpufreq_driver_lock, flags);
if (cpufreq_driver) {
-@@ -2435,8 +2442,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2438,8 +2445,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
* Check if driver provides function to enable boost -
* if not, use cpufreq_boost_set_sw as default
*/
@@ -42308,7 +42122,7 @@ index 66cbcc1..0c5e622 100644
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index 2ebc907..01bdd6e 100644
+index 810c84f..2c9310d 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1482,8 +1482,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
@@ -42688,10 +42502,10 @@ index 2e0613e..a8b94d9 100644
return ret;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index f0be855..94e82d9 100644
+index ffaf8be..155f1bb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11604,13 +11604,13 @@ struct intel_quirk {
+@@ -11623,13 +11623,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -42707,7 +42521,7 @@ index f0be855..94e82d9 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -11618,18 +11618,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -11637,18 +11637,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -42868,10 +42682,10 @@ index 462679a..88e32a7 100644
if (nr < DRM_COMMAND_BASE)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
-index ab0228f..20b756b 100644
+index 7e185c1..8f74e5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
-@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+@@ -131,11 +131,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
@@ -42888,7 +42702,7 @@ index ab0228f..20b756b 100644
};
static int
-@@ -199,11 +199,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+@@ -201,11 +201,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
@@ -42905,7 +42719,7 @@ index ab0228f..20b756b 100644
};
#include <core/subdev/vm/nv04.h>
-@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+@@ -274,11 +274,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nv04_gart_manager = {
@@ -42923,7 +42737,7 @@ index ab0228f..20b756b 100644
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
-index 4f4c3fe..2cce716 100644
+index c110b2c..f237b7a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -70,7 +70,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
@@ -43285,7 +43099,7 @@ index 4a85bb6..aaea819 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 697add2..9860f5b 100644
+index 52a0cfd..0a63ced 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1169,7 +1169,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
@@ -43425,7 +43239,7 @@ index c8a8a51..219dacc 100644
vma->vm_ops = &radeon_ttm_vm_ops;
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
-index ef40381..347463e 100644
+index 48c3bc4..e72d5a5 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1173,7 +1173,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
@@ -43438,7 +43252,7 @@ index ef40381..347463e 100644
err = drm_debugfs_create_files(dc->debugfs_files,
ARRAY_SIZE(debugfs_files),
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
-index bd56f2a..255af4b 100644
+index 97c409f..51e0de0 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -41,7 +41,7 @@ struct tegra_dsi {
@@ -43451,7 +43265,7 @@ index bd56f2a..255af4b 100644
struct dentry *debugfs;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
-index ba067bb..23afbbd 100644
+index ffe2654..03c7b1c 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -60,7 +60,7 @@ struct tegra_hdmi {
@@ -43464,10 +43278,10 @@ index ba067bb..23afbbd 100644
struct dentry *debugfs;
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
-index bd850c9..d9f3573 100644
+index 9e103a48..0e117f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
-@@ -146,10 +146,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+@@ -147,10 +147,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
@@ -43505,22 +43319,6 @@ index dbc2def..0a9f710 100644
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
-diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-index 863bef9..cba15cf 100644
---- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
-+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
-@@ -391,9 +391,9 @@ out:
- static unsigned long
- ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
- {
-- static atomic_t start_pool = ATOMIC_INIT(0);
-+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
- unsigned i;
-- unsigned pool_offset = atomic_add_return(1, &start_pool);
-+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
- struct ttm_page_pool *pool;
- int shrink_pages = sc->nr_to_scan;
- unsigned long freed = 0;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 3771763..883f206 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
@@ -43643,7 +43441,7 @@ index 6b252a8..5975dfe 100644
wait_queue_head_t fifo_queue;
int fence_queue_waiters; /* Protected by hw_mutex */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
-index 6ccd993..618d592 100644
+index 6eae14d..aa311b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
@@ -43655,7 +43453,7 @@ index 6ccd993..618d592 100644
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
vmw_marker_queue_init(&fifo->marker_queue);
return vmw_fifo_send_fence(dev_priv, &dummy);
-@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
+@@ -373,7 +373,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
@@ -43664,7 +43462,7 @@ index 6ccd993..618d592 100644
} else {
need_bounce = true;
}
-@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+@@ -493,7 +493,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
@@ -43673,7 +43471,7 @@ index 6ccd993..618d592 100644
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
false, 3*HZ);
-@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
+@@ -501,7 +501,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
}
do {
@@ -43683,10 +43481,10 @@ index 6ccd993..618d592 100644
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
-index b1273e8..9c274fd 100644
+index 26f8bdd..90a0008 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
-@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
+@@ -165,9 +165,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
}
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
@@ -43759,7 +43557,7 @@ index 8a8725c2..afed796 100644
marker = list_first_entry(&queue->head,
struct vmw_marker, head);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
-index 6866448..2ad2b34 100644
+index 37ac7b5..d52a5c9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
@@ -43771,7 +43569,7 @@ index 6866448..2ad2b34 100644
{
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
-@@ -689,7 +689,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
+@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
return ret;
}
@@ -43802,51 +43600,6 @@ index 8ed66fd..38ff772 100644
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
-diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
-index ecc2cbf..29a74c1 100644
---- a/drivers/hid/hid-magicmouse.c
-+++ b/drivers/hid/hid-magicmouse.c
-@@ -290,6 +290,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
- if (size < 4 || ((size - 4) % 9) != 0)
- return 0;
- npoints = (size - 4) / 9;
-+ if (npoints > 15) {
-+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n",
-+ size);
-+ return 0;
-+ }
- msc->ntouches = 0;
- for (ii = 0; ii < npoints; ii++)
- magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
-@@ -307,6 +312,11 @@ static int magicmouse_raw_event(struct hid_device *hdev,
- if (size < 6 || ((size - 6) % 8) != 0)
- return 0;
- npoints = (size - 6) / 8;
-+ if (npoints > 15) {
-+ hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n",
-+ size);
-+ return 0;
-+ }
- msc->ntouches = 0;
- for (ii = 0; ii < npoints; ii++)
- magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
-diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
-index acbb0210..020df3c 100644
---- a/drivers/hid/hid-picolcd_core.c
-+++ b/drivers/hid/hid-picolcd_core.c
-@@ -350,6 +350,12 @@ static int picolcd_raw_event(struct hid_device *hdev,
- if (!data)
- return 1;
-
-+ if (size > 64) {
-+ hid_warn(hdev, "invalid size value (%d) for picolcd raw event\n",
-+ size);
-+ return 0;
-+ }
-+
- if (report->id == REPORT_KEY_STATE) {
- if (data->input_keys)
- ret = picolcd_raw_keypad(data, report, raw_data+1, size-1);
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index c13fb5b..55a3802 100644
--- a/drivers/hid/hid-wiimote-debug.c
@@ -44335,6 +44088,19 @@ index b170bdf..3c76427 100644
/* Wrapper access functions for multiplexed SMBus */
static DEFINE_MUTEX(nforce2_lock);
+diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
+index 93cfc83..b38b052 100644
+--- a/drivers/i2c/busses/i2c-rk3x.c
++++ b/drivers/i2c/busses/i2c-rk3x.c
+@@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
+ for (i = 0; i < 8; ++i) {
+ val = 0;
+ for (j = 0; j < 4; ++j) {
+- if (i2c->processed == i2c->msg->len)
++ if ((i2c->processed == i2c->msg->len) && (cnt != 0))
+ break;
+
+ if (i2c->processed == 0 && cnt == 0)
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 80b47e8..1a6040d9 100644
--- a/drivers/i2c/i2c-dev.c
@@ -46664,10 +46430,10 @@ index 3e6d115..ffecdeb 100644
/*----------------------------------------------------------------*/
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index d7690f8..3db9ef1 100644
+index 55de4f6..b1c57fe 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
-@@ -1931,7 +1931,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
+@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
if (r1_sync_page_io(rdev, sect, s,
bio->bi_io_vec[idx].bv_page,
READ) != 0)
@@ -46676,8 +46442,8 @@ index d7690f8..3db9ef1 100644
}
sectors -= s;
sect += s;
-@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
- test_bit(In_sync, &rdev->flags)) {
+@@ -2170,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+ !test_bit(Faulty, &rdev->flags)) {
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, READ)) {
- atomic_add(s, &rdev->corrected_errors);
@@ -48872,10 +48638,10 @@ index 738fa24..1568451 100644
+} __do_const;
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
-index 249ab80..9314ce1 100644
+index d3f05ad..ba7684b 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
-@@ -1507,7 +1507,9 @@ static int mmci_probe(struct amba_device *dev,
+@@ -1515,7 +1515,9 @@ static int mmci_probe(struct amba_device *dev,
mmc->caps |= MMC_CAP_CMD23;
if (variant->busy_detect) {
@@ -50112,6 +49878,71 @@ index 6cc37c1..fdd9d77 100644
spinlock_t request_lock;
struct list_head req_list;
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index d97d5f3..7edf976 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -708,6 +708,7 @@ int netvsc_send(struct hv_device *device,
+ unsigned int section_index = NETVSC_INVALID_INDEX;
+ u32 msg_size = 0;
+ struct sk_buff *skb;
++ u16 q_idx = packet->q_idx;
+
+
+ net_device = get_outbound_net_device(device);
+@@ -772,24 +773,24 @@ int netvsc_send(struct hv_device *device,
+
+ if (ret == 0) {
+ atomic_inc(&net_device->num_outstanding_sends);
+- atomic_inc(&net_device->queue_sends[packet->q_idx]);
++ atomic_inc(&net_device->queue_sends[q_idx]);
+
+ if (hv_ringbuf_avail_percent(&out_channel->outbound) <
+ RING_AVAIL_PERCENT_LOWATER) {
+ netif_tx_stop_queue(netdev_get_tx_queue(
+- ndev, packet->q_idx));
++ ndev, q_idx));
+
+ if (atomic_read(&net_device->
+- queue_sends[packet->q_idx]) < 1)
++ queue_sends[q_idx]) < 1)
+ netif_tx_wake_queue(netdev_get_tx_queue(
+- ndev, packet->q_idx));
++ ndev, q_idx));
+ }
+ } else if (ret == -EAGAIN) {
+ netif_tx_stop_queue(netdev_get_tx_queue(
+- ndev, packet->q_idx));
+- if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
++ ndev, q_idx));
++ if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
+ netif_tx_wake_queue(netdev_get_tx_queue(
+- ndev, packet->q_idx));
++ ndev, q_idx));
+ ret = -ENOSPC;
+ }
+ } else {
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 4fd71b7..f152972 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
+ int hdr_offset;
+ u32 net_trans_info;
+ u32 hash;
++ u32 skb_length = skb->len;
+
+
+ /* We will atmost need two pages to describe the rndis
+@@ -562,7 +563,7 @@ do_send:
+
+ drop:
+ if (ret == 0) {
+- net->stats.tx_bytes += skb->len;
++ net->stats.tx_bytes += skb_length;
+ net->stats.tx_packets++;
+ } else {
+ kfree(packet);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 99c527a..6a2ce38 100644
--- a/drivers/net/hyperv/rndis_filter.c
@@ -51897,7 +51728,7 @@ index e1e7026..d28dd33 100644
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index e3cf8a2..be1baf0 100644
+index 4170113..7cc5339 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
@@ -53220,32 +53051,6 @@ index 1b3a094..068e683 100644
}
}
EXPORT_SYMBOL(fc_exch_update_stats);
-diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
-index 3d1bc67..874bc95 100644
---- a/drivers/scsi/libiscsi.c
-+++ b/drivers/scsi/libiscsi.c
-@@ -717,11 +717,21 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- return NULL;
- }
-
-+ if (data_size > ISCSI_DEF_MAX_RECV_SEG_LEN) {
-+ iscsi_conn_printk(KERN_ERR, conn, "Invalid buffer len of %u for login task. Max len is %u\n", data_size, ISCSI_DEF_MAX_RECV_SEG_LEN);
-+ return NULL;
-+ }
-+
- task = conn->login_task;
- } else {
- if (session->state != ISCSI_STATE_LOGGED_IN)
- return NULL;
-
-+ if (data_size != 0) {
-+ iscsi_conn_printk(KERN_ERR, conn, "Can not send data buffer of len %u for op 0x%x\n", data_size, opcode);
-+ return NULL;
-+ }
-+
- BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
- BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
-
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 766098a..1c6c971 100644
--- a/drivers/scsi/libsas/sas_ata.c
@@ -56082,7 +55887,7 @@ index bec31e2..b8091cd 100644
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
-index 27f2171..e3dfc22 100644
+index 50e8545..d85ec5b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
@@ -56162,7 +55967,7 @@ index 4d11449..f4ccabf 100644
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
-index dab7927..6f53afc 100644
+index f5b352a..fbe1785 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -615,8 +615,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
@@ -60068,18 +59873,10 @@ index 2946712..f737435 100644
&data);
if (!inode) {
diff --git a/fs/aio.c b/fs/aio.c
-index 1c9c5f0..c935d6e 100644
+index d72588a..aa2fb30 100644
--- a/fs/aio.c
+++ b/fs/aio.c
-@@ -141,6 +141,7 @@ struct kioctx {
-
- struct {
- unsigned tail;
-+ unsigned completed_events;
- spinlock_t completion_lock;
- } ____cacheline_aligned_in_smp;
-
-@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
+@@ -381,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
size += sizeof(struct io_event) * nr_events;
nr_pages = PFN_UP(size);
@@ -60088,135 +59885,6 @@ index 1c9c5f0..c935d6e 100644
return -EINVAL;
file = aio_private_file(ctx, nr_pages);
-@@ -880,6 +881,68 @@ out:
- return ret;
- }
-
-+/* refill_reqs_available
-+ * Updates the reqs_available reference counts used for tracking the
-+ * number of free slots in the completion ring. This can be called
-+ * from aio_complete() (to optimistically update reqs_available) or
-+ * from aio_get_req() (the we're out of events case). It must be
-+ * called holding ctx->completion_lock.
-+ */
-+static void refill_reqs_available(struct kioctx *ctx, unsigned head,
-+ unsigned tail)
-+{
-+ unsigned events_in_ring, completed;
-+
-+ /* Clamp head since userland can write to it. */
-+ head %= ctx->nr_events;
-+ if (head <= tail)
-+ events_in_ring = tail - head;
-+ else
-+ events_in_ring = ctx->nr_events - (head - tail);
-+
-+ completed = ctx->completed_events;
-+ if (events_in_ring < completed)
-+ completed -= events_in_ring;
-+ else
-+ completed = 0;
-+
-+ if (!completed)
-+ return;
-+
-+ ctx->completed_events -= completed;
-+ put_reqs_available(ctx, completed);
-+}
-+
-+/* user_refill_reqs_available
-+ * Called to refill reqs_available when aio_get_req() encounters an
-+ * out of space in the completion ring.
-+ */
-+static void user_refill_reqs_available(struct kioctx *ctx)
-+{
-+ spin_lock_irq(&ctx->completion_lock);
-+ if (ctx->completed_events) {
-+ struct aio_ring *ring;
-+ unsigned head;
-+
-+ /* Access of ring->head may race with aio_read_events_ring()
-+ * here, but that's okay since whether we read the old version
-+ * or the new version, and either will be valid. The important
-+ * part is that head cannot pass tail since we prevent
-+ * aio_complete() from updating tail by holding
-+ * ctx->completion_lock. Even if head is invalid, the check
-+ * against ctx->completed_events below will make sure we do the
-+ * safe/right thing.
-+ */
-+ ring = kmap_atomic(ctx->ring_pages[0]);
-+ head = ring->head;
-+ kunmap_atomic(ring);
-+
-+ refill_reqs_available(ctx, head, ctx->tail);
-+ }
-+
-+ spin_unlock_irq(&ctx->completion_lock);
-+}
-+
- /* aio_get_req
- * Allocate a slot for an aio request.
- * Returns NULL if no requests are free.
-@@ -888,8 +951,11 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
- {
- struct kiocb *req;
-
-- if (!get_reqs_available(ctx))
-- return NULL;
-+ if (!get_reqs_available(ctx)) {
-+ user_refill_reqs_available(ctx);
-+ if (!get_reqs_available(ctx))
-+ return NULL;
-+ }
-
- req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
- if (unlikely(!req))
-@@ -948,8 +1014,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
- struct kioctx *ctx = iocb->ki_ctx;
- struct aio_ring *ring;
- struct io_event *ev_page, *event;
-+ unsigned tail, pos, head;
- unsigned long flags;
-- unsigned tail, pos;
-
- /*
- * Special case handling for sync iocbs:
-@@ -1010,10 +1076,14 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
- ctx->tail = tail;
-
- ring = kmap_atomic(ctx->ring_pages[0]);
-+ head = ring->head;
- ring->tail = tail;
- kunmap_atomic(ring);
- flush_dcache_page(ctx->ring_pages[0]);
-
-+ ctx->completed_events++;
-+ if (ctx->completed_events > 1)
-+ refill_reqs_available(ctx, head, tail);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
- pr_debug("added to ring %p at [%u]\n", iocb, tail);
-@@ -1028,7 +1098,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
-
- /* everything turned out well, dispose of the aiocb. */
- kiocb_free(iocb);
-- put_reqs_available(ctx, 1);
-
- /*
- * We have to order our ring_info tail store above and test
-@@ -1065,6 +1134,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
- tail = ring->tail;
- kunmap_atomic(ring);
-
-+ /*
-+ * Ensure that once we've read the current tail pointer, that
-+ * we also see the events that were stored up to the tail.
-+ */
-+ smp_rmb();
-+
- pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
-
- if (head == tail)
diff --git a/fs/attr.c b/fs/attr.c
index 6530ced..4a827e2 100644
--- a/fs/attr.c
@@ -61491,30 +61159,10 @@ index 7f5b41b..e589c13 100644
static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
diff --git a/fs/buffer.c b/fs/buffer.c
-index eba6e4f..8d8230c 100644
+index 36fdceb..8d8230c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -1029,7 +1029,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
- bh = page_buffers(page);
- if (bh->b_size == size) {
- end_block = init_page_buffers(page, bdev,
-- index << sizebits, size);
-+ (sector_t)index << sizebits,
-+ size);
- goto done;
- }
- if (!try_to_free_buffers(page))
-@@ -1050,7 +1051,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
- */
- spin_lock(&inode->i_mapping->private_lock);
- link_dev_buffers(page, bh);
-- end_block = init_page_buffers(page, bdev, index << sizebits, size);
-+ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
-+ size);
- spin_unlock(&inode->i_mapping->private_lock);
- done:
- ret = (block < end_block) ? 1 : -ENXIO;
-@@ -3429,7 +3431,7 @@ void __init buffer_init(void)
+@@ -3431,7 +3431,7 @@ void __init buffer_init(void)
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
@@ -61524,10 +61172,10 @@ index eba6e4f..8d8230c 100644
/*
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
-index d749731..0fda764 100644
+index fbb08e9..0fda764 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
-@@ -39,29 +39,27 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
+@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
args);
/* start by checking things over */
@@ -61543,36 +61191,8 @@ index d749731..0fda764 100644
cache->bcull_percent < cache->brun_percent &&
cache->brun_percent < 100);
- if (*args) {
-- pr_err("'bind' command doesn't take an argument");
-+ pr_err("'bind' command doesn't take an argument\n");
- return -EINVAL;
- }
-
- if (!cache->rootdirname) {
-- pr_err("No cache directory specified");
-+ pr_err("No cache directory specified\n");
- return -EINVAL;
- }
-
- /* don't permit already bound caches to be re-bound */
- if (test_bit(CACHEFILES_READY, &cache->flags)) {
-- pr_err("Cache already bound");
-+ pr_err("Cache already bound\n");
- return -EBUSY;
- }
-
-@@ -248,7 +246,7 @@ error_open_root:
- kmem_cache_free(cachefiles_object_jar, fsdef);
- error_root_object:
- cachefiles_end_secure(cache, saved_cred);
-- pr_err("Failed to register: %d", ret);
-+ pr_err("Failed to register: %d\n", ret);
- return ret;
- }
-
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
-index b078d30..4a6852c 100644
+index ce1b115..4a6852c 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
@@ -61593,15 +61213,6 @@ index b078d30..4a6852c 100644
return -EOPNOTSUPP;
/* drag the command string into the kernel so we can parse it */
-@@ -315,7 +315,7 @@ static unsigned int cachefiles_daemon_poll(struct file *file,
- static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
- char *args)
- {
-- pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%");
-+ pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n");
-
- return -EINVAL;
- }
@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
@@ -61620,114 +61231,8 @@ index b078d30..4a6852c 100644
return cachefiles_daemon_range_error(cache, args);
cache->bstop_percent = bstop;
-@@ -475,12 +475,12 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
- _enter(",%s", args);
-
- if (!*args) {
-- pr_err("Empty directory specified");
-+ pr_err("Empty directory specified\n");
- return -EINVAL;
- }
-
- if (cache->rootdirname) {
-- pr_err("Second cache directory specified");
-+ pr_err("Second cache directory specified\n");
- return -EEXIST;
- }
-
-@@ -503,12 +503,12 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
- _enter(",%s", args);
-
- if (!*args) {
-- pr_err("Empty security context specified");
-+ pr_err("Empty security context specified\n");
- return -EINVAL;
- }
-
- if (cache->secctx) {
-- pr_err("Second security context specified");
-+ pr_err("Second security context specified\n");
- return -EINVAL;
- }
-
-@@ -531,7 +531,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
- _enter(",%s", args);
-
- if (!*args) {
-- pr_err("Empty tag specified");
-+ pr_err("Empty tag specified\n");
- return -EINVAL;
- }
-
-@@ -562,12 +562,12 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
- goto inval;
-
- if (!test_bit(CACHEFILES_READY, &cache->flags)) {
-- pr_err("cull applied to unready cache");
-+ pr_err("cull applied to unready cache\n");
- return -EIO;
- }
-
- if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
-- pr_err("cull applied to dead cache");
-+ pr_err("cull applied to dead cache\n");
- return -EIO;
- }
-
-@@ -587,11 +587,11 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
-
- notdir:
- path_put(&path);
-- pr_err("cull command requires dirfd to be a directory");
-+ pr_err("cull command requires dirfd to be a directory\n");
- return -ENOTDIR;
-
- inval:
-- pr_err("cull command requires dirfd and filename");
-+ pr_err("cull command requires dirfd and filename\n");
- return -EINVAL;
- }
-
-@@ -614,7 +614,7 @@ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
- return 0;
-
- inval:
-- pr_err("debug command requires mask");
-+ pr_err("debug command requires mask\n");
- return -EINVAL;
- }
-
-@@ -634,12 +634,12 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
- goto inval;
-
- if (!test_bit(CACHEFILES_READY, &cache->flags)) {
-- pr_err("inuse applied to unready cache");
-+ pr_err("inuse applied to unready cache\n");
- return -EIO;
- }
-
- if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
-- pr_err("inuse applied to dead cache");
-+ pr_err("inuse applied to dead cache\n");
- return -EIO;
- }
-
-@@ -659,11 +659,11 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
-
- notdir:
- path_put(&path);
-- pr_err("inuse command requires dirfd to be a directory");
-+ pr_err("inuse command requires dirfd to be a directory\n");
- return -ENOTDIR;
-
- inval:
-- pr_err("inuse command requires dirfd and filename");
-+ pr_err("inuse command requires dirfd and filename\n");
- return -EINVAL;
- }
-
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
-index 3d50998..c4e3a69 100644
+index 8c52472..c4e3a69 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -66,7 +66,7 @@ struct cachefiles_cache {
@@ -61764,30 +61269,8 @@ index 3d50998..c4e3a69 100644
}
#else
-@@ -255,7 +255,7 @@ extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
-
- #define cachefiles_io_error(___cache, FMT, ...) \
- do { \
-- pr_err("I/O Error: " FMT, ##__VA_ARGS__); \
-+ pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
- fscache_io_error(&(___cache)->cache); \
- set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
- } while (0)
-diff --git a/fs/cachefiles/main.c b/fs/cachefiles/main.c
-index 180edfb..711f13d 100644
---- a/fs/cachefiles/main.c
-+++ b/fs/cachefiles/main.c
-@@ -84,7 +84,7 @@ error_proc:
- error_object_jar:
- misc_deregister(&cachefiles_dev);
- error_dev:
-- pr_err("failed to register: %d", ret);
-+ pr_err("failed to register: %d\n", ret);
- return ret;
- }
-
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
-index 5bf2b41..81051b4 100644
+index 55c0acb..81051b4 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -312,7 +312,7 @@ try_again:
@@ -61799,67 +61282,6 @@ index 5bf2b41..81051b4 100644
/* do the multiway lock magic */
trap = lock_rename(cache->graveyard, dir);
-@@ -543,7 +543,7 @@ lookup_again:
- next, next->d_inode, next->d_inode->i_ino);
-
- } else if (!S_ISDIR(next->d_inode->i_mode)) {
-- pr_err("inode %lu is not a directory",
-+ pr_err("inode %lu is not a directory\n",
- next->d_inode->i_ino);
- ret = -ENOBUFS;
- goto error;
-@@ -574,7 +574,7 @@ lookup_again:
- } else if (!S_ISDIR(next->d_inode->i_mode) &&
- !S_ISREG(next->d_inode->i_mode)
- ) {
-- pr_err("inode %lu is not a file or directory",
-+ pr_err("inode %lu is not a file or directory\n",
- next->d_inode->i_ino);
- ret = -ENOBUFS;
- goto error;
-@@ -768,7 +768,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
- ASSERT(subdir->d_inode);
-
- if (!S_ISDIR(subdir->d_inode->i_mode)) {
-- pr_err("%s is not a directory", dirname);
-+ pr_err("%s is not a directory\n", dirname);
- ret = -EIO;
- goto check_error;
- }
-@@ -795,13 +795,13 @@ check_error:
- mkdir_error:
- mutex_unlock(&dir->d_inode->i_mutex);
- dput(subdir);
-- pr_err("mkdir %s failed with error %d", dirname, ret);
-+ pr_err("mkdir %s failed with error %d\n", dirname, ret);
- return ERR_PTR(ret);
-
- lookup_error:
- mutex_unlock(&dir->d_inode->i_mutex);
- ret = PTR_ERR(subdir);
-- pr_err("Lookup %s failed with error %d", dirname, ret);
-+ pr_err("Lookup %s failed with error %d\n", dirname, ret);
- return ERR_PTR(ret);
-
- nomem_d_alloc:
-@@ -891,7 +891,7 @@ lookup_error:
- if (ret == -EIO) {
- cachefiles_io_error(cache, "Lookup failed");
- } else if (ret != -ENOMEM) {
-- pr_err("Internal error: %d", ret);
-+ pr_err("Internal error: %d\n", ret);
- ret = -EIO;
- }
-
-@@ -950,7 +950,7 @@ error:
- }
-
- if (ret != -ENOMEM) {
-- pr_err("Internal error: %d", ret);
-+ pr_err("Internal error: %d\n", ret);
- ret = -EIO;
- }
-
diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
index eccd339..4c1d995 100644
--- a/fs/cachefiles/proc.c
@@ -61903,54 +61325,6 @@ index 4b1fb5c..0d2a699 100644
set_fs(old_fs);
kunmap(page);
file_end_write(file);
-diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
-index 1ad51ffb..acbc1f0 100644
---- a/fs/cachefiles/xattr.c
-+++ b/fs/cachefiles/xattr.c
-@@ -51,7 +51,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
- }
-
- if (ret != -EEXIST) {
-- pr_err("Can't set xattr on %*.*s [%lu] (err %d)",
-+ pr_err("Can't set xattr on %*.*s [%lu] (err %d)\n",
- dentry->d_name.len, dentry->d_name.len,
- dentry->d_name.name, dentry->d_inode->i_ino,
- -ret);
-@@ -64,7 +64,7 @@ int cachefiles_check_object_type(struct cachefiles_object *object)
- if (ret == -ERANGE)
- goto bad_type_length;
-
-- pr_err("Can't read xattr on %*.*s [%lu] (err %d)",
-+ pr_err("Can't read xattr on %*.*s [%lu] (err %d)\n",
- dentry->d_name.len, dentry->d_name.len,
- dentry->d_name.name, dentry->d_inode->i_ino,
- -ret);
-@@ -85,14 +85,14 @@ error:
- return ret;
-
- bad_type_length:
-- pr_err("Cache object %lu type xattr length incorrect",
-+ pr_err("Cache object %lu type xattr length incorrect\n",
- dentry->d_inode->i_ino);
- ret = -EIO;
- goto error;
-
- bad_type:
- xtype[2] = 0;
-- pr_err("Cache object %*.*s [%lu] type %s not %s",
-+ pr_err("Cache object %*.*s [%lu] type %s not %s\n",
- dentry->d_name.len, dentry->d_name.len,
- dentry->d_name.name, dentry->d_inode->i_ino,
- xtype, type);
-@@ -293,7 +293,7 @@ error:
- return ret;
-
- bad_type_length:
-- pr_err("Cache object %lu xattr length incorrect",
-+ pr_err("Cache object %lu xattr length incorrect\n",
- dentry->d_inode->i_ino);
- ret = -EIO;
- goto error;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index c29d6ae..719b9bb 100644
--- a/fs/ceph/dir.c
@@ -62215,9 +61589,18 @@ index 3b0c62e..f7d090c 100644
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
-index 84ca0a4..add8cba 100644
+index 84ca0a4..6395e45 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
+@@ -586,7 +586,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
+ if (tmprc == -EOPNOTSUPP)
+ *symlink = true;
+- else
++ else if (tmprc == 0)
+ CIFSSMBClose(xid, tcon, fid.netfid);
+ }
+
@@ -626,27 +626,27 @@ static void
cifs_clear_stats(struct cifs_tcon *tcon)
{
@@ -62323,6 +61706,19 @@ index 84ca0a4..add8cba 100644
#endif
}
+diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
+index a689514..a491814 100644
+--- a/fs/cifs/smb2maperror.c
++++ b/fs/cifs/smb2maperror.c
+@@ -256,6 +256,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
+ "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
+ {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
++ {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
++ "STATUS_REPARSE_NOT_HANDLED"},
+ {STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
+ "STATUS_DEVICE_REQUIRES_CLEANING"},
+ {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f325c59..6bba517 100644
--- a/fs/cifs/smb2ops.c
@@ -63992,7 +63388,7 @@ index fca3820..e1ea241 100644
if (free_clusters >= (nclusters + dirty_clusters +
resv_clusters))
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
-index 1bbe7c3..6f404a5c 100644
+index b687440..6f404a5c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1276,19 +1276,19 @@ struct ext4_sb_info {
@@ -64025,15 +63421,6 @@ index 1bbe7c3..6f404a5c 100644
atomic_t s_lock_busy;
/* locality groups */
-@@ -1826,7 +1826,7 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
- /*
- * Special error return code only used by dx_probe() and its callers.
- */
--#define ERR_BAD_DX_DIR -75000
-+#define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1))
-
- /*
- * Timeout and state flag for lazy initialization inode thread.
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c3e7418..f62cab3 100644
--- a/fs/ext4/mballoc.c
@@ -64164,151 +63551,6 @@ index 32bce84..112d969 100644
__ext4_warning(sb, function, line,
"MMP failure info: last update time: %llu, last update "
"node: %s, last update device: %s\n",
-diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
-index 9e6eced..5e127be 100644
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -1227,7 +1227,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
- buffer */
- int num = 0;
- ext4_lblk_t nblocks;
-- int i, err;
-+ int i, err = 0;
- int namelen;
-
- *res_dir = NULL;
-@@ -1264,7 +1264,11 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
- * return. Otherwise, fall back to doing a search the
- * old fashioned way.
- */
-- if (bh || (err != ERR_BAD_DX_DIR))
-+ if (err == -ENOENT)
-+ return NULL;
-+ if (err && err != ERR_BAD_DX_DIR)
-+ return ERR_PTR(err);
-+ if (bh)
- return bh;
- dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
- "falling back\n"));
-@@ -1295,6 +1299,11 @@ restart:
- }
- num++;
- bh = ext4_getblk(NULL, dir, b++, 0, &err);
-+ if (unlikely(err)) {
-+ if (ra_max == 0)
-+ return ERR_PTR(err);
-+ break;
-+ }
- bh_use[ra_max] = bh;
- if (bh)
- ll_rw_block(READ | REQ_META | REQ_PRIO,
-@@ -1417,6 +1426,8 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
- return ERR_PTR(-ENAMETOOLONG);
-
- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
-+ if (IS_ERR(bh))
-+ return (struct dentry *) bh;
- inode = NULL;
- if (bh) {
- __u32 ino = le32_to_cpu(de->inode);
-@@ -1450,6 +1461,8 @@ struct dentry *ext4_get_parent(struct dentry *child)
- struct buffer_head *bh;
-
- bh = ext4_find_entry(child->d_inode, &dotdot, &de, NULL);
-+ if (IS_ERR(bh))
-+ return (struct dentry *) bh;
- if (!bh)
- return ERR_PTR(-ENOENT);
- ino = le32_to_cpu(de->inode);
-@@ -2727,6 +2740,8 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
-
- retval = -ENOENT;
- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
-+ if (IS_ERR(bh))
-+ return PTR_ERR(bh);
- if (!bh)
- goto end_rmdir;
-
-@@ -2794,6 +2809,8 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
-
- retval = -ENOENT;
- bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
-+ if (IS_ERR(bh))
-+ return PTR_ERR(bh);
- if (!bh)
- goto end_unlink;
-
-@@ -3121,6 +3138,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
- struct ext4_dir_entry_2 *de;
-
- bh = ext4_find_entry(dir, d_name, &de, NULL);
-+ if (IS_ERR(bh))
-+ return PTR_ERR(bh);
- if (bh) {
- retval = ext4_delete_entry(handle, dir, de, bh);
- brelse(bh);
-@@ -3205,6 +3224,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
- dquot_initialize(new.inode);
-
- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
-+ if (IS_ERR(old.bh))
-+ return PTR_ERR(old.bh);
- /*
- * Check for inode number is _not_ due to possible IO errors.
- * We might rmdir the source, keep it as pwd of some process
-@@ -3217,6 +3238,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
-
- new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
- &new.de, &new.inlined);
-+ if (IS_ERR(new.bh)) {
-+ retval = PTR_ERR(new.bh);
-+ new.bh = NULL;
-+ goto end_rename;
-+ }
- if (new.bh) {
- if (!new.inode) {
- brelse(new.bh);
-@@ -3345,6 +3371,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
-
- old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
- &old.de, &old.inlined);
-+ if (IS_ERR(old.bh))
-+ return PTR_ERR(old.bh);
- /*
- * Check for inode number is _not_ due to possible IO errors.
- * We might rmdir the source, keep it as pwd of some process
-@@ -3357,6 +3385,11 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
-
- new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
- &new.de, &new.inlined);
-+ if (IS_ERR(new.bh)) {
-+ retval = PTR_ERR(new.bh);
-+ new.bh = NULL;
-+ goto end_rename;
-+ }
-
- /* RENAME_EXCHANGE case: old *and* new must both exist */
- if (!new.bh || le32_to_cpu(new.de->inode) != new.inode->i_ino)
-diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
-index bb0e80f..1e43b90 100644
---- a/fs/ext4/resize.c
-+++ b/fs/ext4/resize.c
-@@ -575,6 +575,7 @@ handle_bb:
- bh = bclean(handle, sb, block);
- if (IS_ERR(bh)) {
- err = PTR_ERR(bh);
-+ bh = NULL;
- goto out;
- }
- overhead = ext4_group_overhead_blocks(sb, group);
-@@ -603,6 +604,7 @@ handle_ib:
- bh = bclean(handle, sb, block);
- if (IS_ERR(bh)) {
- err = PTR_ERR(bh);
-+ bh = NULL;
- goto out;
- }
-
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index beeb5c4..998c28d 100644
--- a/fs/ext4/super.c
@@ -66282,7 +65524,7 @@ index acd3947..1f896e2 100644
memcpy(c->data, &cookie, 4);
c->len=4;
diff --git a/fs/locks.c b/fs/locks.c
-index 717fbc4..74628c3 100644
+index be530f9..99a4ea2 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2327,7 +2327,7 @@ void locks_remove_file(struct file *filp)
@@ -66330,7 +65572,7 @@ index d55297f..f5b28c5 100644
#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
diff --git a/fs/namei.c b/fs/namei.c
-index 17ca8b8..d023ae5 100644
+index d4ca420..d023ae5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
@@ -66385,42 +65627,7 @@ index 17ca8b8..d023ae5 100644
return -EACCES;
}
EXPORT_SYMBOL(generic_permission);
-@@ -644,24 +651,22 @@ static int complete_walk(struct nameidata *nd)
-
- static __always_inline void set_root(struct nameidata *nd)
- {
-- if (!nd->root.mnt)
-- get_fs_root(current->fs, &nd->root);
-+ get_fs_root(current->fs, &nd->root);
- }
-
- static int link_path_walk(const char *, struct nameidata *);
-
--static __always_inline void set_root_rcu(struct nameidata *nd)
-+static __always_inline unsigned set_root_rcu(struct nameidata *nd)
- {
-- if (!nd->root.mnt) {
-- struct fs_struct *fs = current->fs;
-- unsigned seq;
-+ struct fs_struct *fs = current->fs;
-+ unsigned seq, res;
-
-- do {
-- seq = read_seqcount_begin(&fs->seq);
-- nd->root = fs->root;
-- nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
-- } while (read_seqcount_retry(&fs->seq, seq));
-- }
-+ do {
-+ seq = read_seqcount_begin(&fs->seq);
-+ nd->root = fs->root;
-+ res = __read_seqcount_begin(&nd->root.dentry->d_seq);
-+ } while (read_seqcount_retry(&fs->seq, seq));
-+ return res;
- }
-
- static void path_put_conditional(struct path *path, struct nameidata *nd)
-@@ -825,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
{
struct dentry *dentry = link->dentry;
int error;
@@ -66429,7 +65636,7 @@ index 17ca8b8..d023ae5 100644
BUG_ON(nd->flags & LOOKUP_RCU);
-@@ -846,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
+@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
if (error)
goto out_put_nd_path;
@@ -66442,17 +65649,7 @@ index 17ca8b8..d023ae5 100644
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
-@@ -861,7 +872,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
- return PTR_ERR(s);
- }
- if (*s == '/') {
-- set_root(nd);
-+ if (!nd->root.mnt)
-+ set_root(nd);
- path_put(&nd->path);
- nd->path = nd->root;
- path_get(&nd->root);
-@@ -1092,10 +1104,10 @@ int follow_down_one(struct path *path)
+@@ -1091,10 +1104,10 @@ int follow_down_one(struct path *path)
}
EXPORT_SYMBOL(follow_down_one);
@@ -66466,7 +65663,7 @@ index 17ca8b8..d023ae5 100644
}
/*
-@@ -1111,11 +1123,18 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+@@ -1110,11 +1123,18 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
* Don't forget we might have a non-mountpoint managed dentry
* that wants to block transit.
*/
@@ -66488,7 +65685,7 @@ index 17ca8b8..d023ae5 100644
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
-@@ -1131,12 +1150,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
+@@ -1130,11 +1150,13 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
*/
*inode = path->dentry->d_inode;
}
@@ -66499,13 +65696,10 @@ index 17ca8b8..d023ae5 100644
static int follow_dotdot_rcu(struct nameidata *nd)
{
-- set_root_rcu(nd);
+ struct inode *inode = nd->inode;
-+ if (!nd->root.mnt)
-+ set_root_rcu(nd);
+ if (!nd->root.mnt)
+ set_root_rcu(nd);
- while (1) {
- if (nd->path.dentry == nd->root.dentry &&
@@ -1148,6 +1170,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
struct dentry *parent = old->d_parent;
unsigned seq;
@@ -66537,17 +65731,7 @@ index 17ca8b8..d023ae5 100644
return 0;
failed:
-@@ -1249,7 +1274,8 @@ static void follow_mount(struct path *path)
-
- static void follow_dotdot(struct nameidata *nd)
- {
-- set_root(nd);
-+ if (!nd->root.mnt)
-+ set_root(nd);
-
- while(1) {
- struct dentry *old = nd->path.dentry;
-@@ -1403,11 +1429,8 @@ static int lookup_fast(struct nameidata *nd,
+@@ -1404,11 +1429,8 @@ static int lookup_fast(struct nameidata *nd,
}
path->mnt = mnt;
path->dentry = dentry;
@@ -66561,7 +65745,7 @@ index 17ca8b8..d023ae5 100644
unlazy:
if (unlazy_walk(nd, dentry))
return -ECHILD;
-@@ -1597,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
+@@ -1598,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
if (res)
break;
res = walk_component(nd, path, LOOKUP_FOLLOW);
@@ -66570,7 +65754,7 @@ index 17ca8b8..d023ae5 100644
put_link(nd, &link, cookie);
} while (res > 0);
-@@ -1669,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
+@@ -1670,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
unsigned long a, b, adata, bdata, mask, hash, len;
@@ -66579,16 +65763,7 @@ index 17ca8b8..d023ae5 100644
hash = a = 0;
len = -sizeof(unsigned long);
-@@ -1847,7 +1872,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
- if (*name=='/') {
- if (flags & LOOKUP_RCU) {
- rcu_read_lock();
-- set_root_rcu(nd);
-+ nd->seq = set_root_rcu(nd);
- } else {
- set_root(nd);
- path_get(&nd->root);
-@@ -1898,7 +1923,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
+@@ -1899,7 +1923,14 @@ static int path_init(int dfd, const char *name, unsigned int flags,
}
nd->inode = nd->path.dentry->d_inode;
@@ -66604,7 +65779,7 @@ index 17ca8b8..d023ae5 100644
}
static inline int lookup_last(struct nameidata *nd, struct path *path)
-@@ -1953,6 +1985,8 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1954,6 +1985,8 @@ static int path_lookupat(int dfd, const char *name,
if (err)
break;
err = lookup_last(nd, &path);
@@ -66613,7 +65788,7 @@ index 17ca8b8..d023ae5 100644
put_link(nd, &link, cookie);
}
}
-@@ -1960,6 +1994,13 @@ static int path_lookupat(int dfd, const char *name,
+@@ -1961,6 +1994,13 @@ static int path_lookupat(int dfd, const char *name,
if (!err)
err = complete_walk(nd);
@@ -66627,7 +65802,7 @@ index 17ca8b8..d023ae5 100644
if (!err && nd->flags & LOOKUP_DIRECTORY) {
if (!d_can_lookup(nd->path.dentry)) {
path_put(&nd->path);
-@@ -1987,8 +2028,15 @@ static int filename_lookup(int dfd, struct filename *name,
+@@ -1988,8 +2028,15 @@ static int filename_lookup(int dfd, struct filename *name,
retval = path_lookupat(dfd, name->name,
flags | LOOKUP_REVAL, nd);
@@ -66644,7 +65819,7 @@ index 17ca8b8..d023ae5 100644
return retval;
}
-@@ -2570,6 +2618,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
+@@ -2571,6 +2618,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
if (flag & O_NOATIME && !inode_owner_or_capable(inode))
return -EPERM;
@@ -66658,7 +65833,7 @@ index 17ca8b8..d023ae5 100644
return 0;
}
-@@ -2801,7 +2856,7 @@ looked_up:
+@@ -2802,7 +2856,7 @@ looked_up:
* cleared otherwise prior to returning.
*/
static int lookup_open(struct nameidata *nd, struct path *path,
@@ -66667,7 +65842,7 @@ index 17ca8b8..d023ae5 100644
const struct open_flags *op,
bool got_write, int *opened)
{
-@@ -2836,6 +2891,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2837,6 +2891,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
/* Negative dentry, just create the file */
if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
umode_t mode = op->mode;
@@ -66685,7 +65860,7 @@ index 17ca8b8..d023ae5 100644
if (!IS_POSIXACL(dir->d_inode))
mode &= ~current_umask();
/*
-@@ -2857,6 +2923,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
+@@ -2858,6 +2923,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
nd->flags & LOOKUP_EXCL);
if (error)
goto out_dput;
@@ -66694,7 +65869,7 @@ index 17ca8b8..d023ae5 100644
}
out_no_open:
path->dentry = dentry;
-@@ -2871,7 +2939,7 @@ out_dput:
+@@ -2872,7 +2939,7 @@ out_dput:
/*
* Handle the last step of open()
*/
@@ -66703,7 +65878,7 @@ index 17ca8b8..d023ae5 100644
struct file *file, const struct open_flags *op,
int *opened, struct filename *name)
{
-@@ -2921,6 +2989,15 @@ static int do_last(struct nameidata *nd, struct path *path,
+@@ -2922,6 +2989,15 @@ static int do_last(struct nameidata *nd, struct path *path,
if (error)
return error;
@@ -66719,7 +65894,7 @@ index 17ca8b8..d023ae5 100644
audit_inode(name, dir, LOOKUP_PARENT);
error = -EISDIR;
/* trailing slashes? */
-@@ -2940,7 +3017,7 @@ retry_lookup:
+@@ -2941,7 +3017,7 @@ retry_lookup:
*/
}
mutex_lock(&dir->d_inode->i_mutex);
@@ -66728,7 +65903,7 @@ index 17ca8b8..d023ae5 100644
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
-@@ -2964,11 +3041,28 @@ retry_lookup:
+@@ -2965,11 +3041,28 @@ retry_lookup:
goto finish_open_created;
}
@@ -66758,7 +65933,7 @@ index 17ca8b8..d023ae5 100644
/*
* If atomic_open() acquired write access it is dropped now due to
-@@ -3009,6 +3103,11 @@ finish_lookup:
+@@ -3010,6 +3103,11 @@ finish_lookup:
}
}
BUG_ON(inode != path->dentry->d_inode);
@@ -66770,7 +65945,7 @@ index 17ca8b8..d023ae5 100644
return 1;
}
-@@ -3018,7 +3117,6 @@ finish_lookup:
+@@ -3019,7 +3117,6 @@ finish_lookup:
save_parent.dentry = nd->path.dentry;
save_parent.mnt = mntget(path->mnt);
nd->path.dentry = path->dentry;
@@ -66778,7 +65953,7 @@ index 17ca8b8..d023ae5 100644
}
nd->inode = inode;
/* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
-@@ -3028,7 +3126,18 @@ finish_open:
+@@ -3029,7 +3126,18 @@ finish_open:
path_put(&save_parent);
return error;
}
@@ -66797,7 +65972,7 @@ index 17ca8b8..d023ae5 100644
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
-@@ -3191,7 +3300,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3192,7 +3300,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(error))
goto out;
@@ -66806,7 +65981,7 @@ index 17ca8b8..d023ae5 100644
while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
-@@ -3209,7 +3318,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
+@@ -3210,7 +3318,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
break;
@@ -66815,7 +65990,7 @@ index 17ca8b8..d023ae5 100644
put_link(nd, &link, cookie);
}
out:
-@@ -3309,9 +3418,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
+@@ -3310,9 +3418,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
goto unlock;
error = -EEXIST;
@@ -66829,7 +66004,7 @@ index 17ca8b8..d023ae5 100644
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
-@@ -3363,6 +3474,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
+@@ -3364,6 +3474,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
}
EXPORT_SYMBOL(user_path_create);
@@ -66850,7 +66025,7 @@ index 17ca8b8..d023ae5 100644
int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int error = may_create(dir, dentry);
-@@ -3426,6 +3551,17 @@ retry:
+@@ -3427,6 +3551,17 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -66868,7 +66043,7 @@ index 17ca8b8..d023ae5 100644
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
goto out;
-@@ -3441,6 +3577,8 @@ retry:
+@@ -3442,6 +3577,8 @@ retry:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
@@ -66877,7 +66052,7 @@ index 17ca8b8..d023ae5 100644
out:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
-@@ -3495,9 +3633,16 @@ retry:
+@@ -3496,9 +3633,16 @@ retry:
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
@@ -66894,7 +66069,7 @@ index 17ca8b8..d023ae5 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3580,6 +3725,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
+@@ -3581,6 +3725,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
struct filename *name;
struct dentry *dentry;
struct nameidata nd;
@@ -66903,7 +66078,7 @@ index 17ca8b8..d023ae5 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3612,10 +3759,21 @@ retry:
+@@ -3613,10 +3759,21 @@ retry:
error = -ENOENT;
goto exit3;
}
@@ -66925,7 +66100,7 @@ index 17ca8b8..d023ae5 100644
exit3:
dput(dentry);
exit2:
-@@ -3706,6 +3864,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
+@@ -3707,6 +3864,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
struct nameidata nd;
struct inode *inode = NULL;
struct inode *delegated_inode = NULL;
@@ -66934,7 +66109,7 @@ index 17ca8b8..d023ae5 100644
unsigned int lookup_flags = 0;
retry:
name = user_path_parent(dfd, pathname, &nd, lookup_flags);
-@@ -3732,10 +3892,22 @@ retry_deleg:
+@@ -3733,10 +3892,22 @@ retry_deleg:
if (d_is_negative(dentry))
goto slashes;
ihold(inode);
@@ -66957,7 +66132,7 @@ index 17ca8b8..d023ae5 100644
exit2:
dput(dentry);
}
-@@ -3824,9 +3996,17 @@ retry:
+@@ -3825,9 +3996,17 @@ retry:
if (IS_ERR(dentry))
goto out_putname;
@@ -66975,7 +66150,7 @@ index 17ca8b8..d023ae5 100644
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
-@@ -3930,6 +4110,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
+@@ -3931,6 +4110,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
struct dentry *new_dentry;
struct path old_path, new_path;
struct inode *delegated_inode = NULL;
@@ -66983,7 +66158,7 @@ index 17ca8b8..d023ae5 100644
int how = 0;
int error;
-@@ -3953,7 +4134,7 @@ retry:
+@@ -3954,7 +4134,7 @@ retry:
if (error)
return error;
@@ -66992,7 +66167,7 @@ index 17ca8b8..d023ae5 100644
(how & LOOKUP_REVAL));
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
-@@ -3965,11 +4146,28 @@ retry:
+@@ -3966,11 +4146,28 @@ retry:
error = may_linkat(&old_path);
if (unlikely(error))
goto out_dput;
@@ -67021,7 +66196,7 @@ index 17ca8b8..d023ae5 100644
done_path_create(&new_path, new_dentry);
if (delegated_inode) {
error = break_deleg_wait(&delegated_inode);
-@@ -4279,6 +4477,12 @@ retry_deleg:
+@@ -4280,6 +4477,12 @@ retry_deleg:
if (new_dentry == trap)
goto exit5;
@@ -67034,7 +66209,7 @@ index 17ca8b8..d023ae5 100644
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry, flags);
if (error)
-@@ -4286,6 +4490,9 @@ retry_deleg:
+@@ -4287,6 +4490,9 @@ retry_deleg:
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry,
&delegated_inode, flags);
@@ -67044,7 +66219,7 @@ index 17ca8b8..d023ae5 100644
exit5:
dput(new_dentry);
exit4:
-@@ -4328,14 +4535,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
+@@ -4329,14 +4535,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
int readlink_copy(char __user *buffer, int buflen, const char *link)
{
@@ -67249,7 +66424,7 @@ index 8f029db..3688b84 100644
static struct nfsd4_operation nfsd4_ops[];
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
-index 944275c..6fc40a7 100644
+index 1d5103d..7e18dd9 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1539,7 +1539,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
@@ -67450,28 +66625,6 @@ index 2685bc9..f3462c7 100644
goto out_close_fd;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
-index 238a593..9d7e2b9 100644
---- a/fs/notify/fdinfo.c
-+++ b/fs/notify/fdinfo.c
-@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
- {
- struct {
- struct file_handle handle;
-- u8 pad[64];
-+ u8 pad[MAX_HANDLE_SZ];
- } f;
- int size, ret, i;
-
-@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode)
- size = f.handle.handle_bytes >> 2;
-
- ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0);
-- if ((ret == 255) || (ret == -ENOSPC)) {
-+ if ((ret == FILEID_INVALID) || (ret < 0)) {
- WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret);
- return 0;
- }
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 25a07c7..4f1163c 100644
--- a/fs/notify/notification.c
@@ -89064,27 +88217,29 @@ index 6f8fbcf..4efc177 100644
+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
-index 502073a..a7de024 100644
+index b483abd..af305ad 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
-@@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
+@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
--int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
+ void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
+-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
#else
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
-@@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
+@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
--static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
+ static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
+-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
#endif
@@ -90499,7 +89654,7 @@ index 168ff50..a921df2 100644
} __attribute__ ((packed));
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
-index c38355c..17a57bc 100644
+index 1590c49..5eab462 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -73,5 +73,9 @@
@@ -90544,7 +89699,7 @@ index 30f5362..8ed8ac9 100644
void *pmi_pal;
u8 *vbe_state_orig; /*
diff --git a/init/Kconfig b/init/Kconfig
-index 9d76b99..d378b1e 100644
+index 9d76b99..f8e6d37 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1105,6 +1105,7 @@ endif # CGROUPS
@@ -90555,7 +89710,15 @@ index 9d76b99..d378b1e 100644
default n
help
Enables additional kernel features in a sake of checkpoint/restore.
-@@ -1589,7 +1590,7 @@ config SLUB_DEBUG
+@@ -1432,6 +1433,7 @@ config FUTEX
+
+ config HAVE_FUTEX_CMPXCHG
+ bool
++ depends on FUTEX
+ help
+ Architectures should select this if futex_atomic_cmpxchg_inatomic()
+ is implemented and always working. This removes a couple of runtime
+@@ -1589,7 +1591,7 @@ config SLUB_DEBUG
config COMPAT_BRK
bool "Disable heap randomization"
@@ -90564,7 +89727,7 @@ index 9d76b99..d378b1e 100644
help
Randomizing heap placement makes heap exploits harder, but it
also breaks ancient binaries (including anything libc5 based).
-@@ -1877,7 +1878,7 @@ config INIT_ALL_POSSIBLE
+@@ -1877,7 +1879,7 @@ config INIT_ALL_POSSIBLE
config STOP_MACHINE
bool
default y
@@ -91468,10 +90631,10 @@ index 989f5bf..d317ca0 100644
+}
+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 70776ae..09c4988 100644
+index 0a46b2a..53174d4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -5146,6 +5146,14 @@ static void cgroup_release_agent(struct work_struct *work)
+@@ -5155,6 +5155,14 @@ static void cgroup_release_agent(struct work_struct *work)
release_list);
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
@@ -91486,7 +90649,7 @@ index 70776ae..09c4988 100644
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathbuf)
goto continue_free;
-@@ -5336,7 +5344,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
+@@ -5345,7 +5353,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct task_struct *task;
int count = 0;
@@ -91906,7 +91069,7 @@ index 2f7c760..95b6a66 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 6b17ac1..00fd505 100644
+index f626c9f..5486cad 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -160,8 +160,15 @@ static struct srcu_struct pmus_srcu;
@@ -91944,7 +91107,7 @@ index 6b17ac1..00fd505 100644
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
-@@ -3023,7 +3030,7 @@ static void __perf_event_read(void *info)
+@@ -3033,7 +3040,7 @@ static void __perf_event_read(void *info)
static inline u64 perf_event_count(struct perf_event *event)
{
@@ -91953,7 +91116,7 @@ index 6b17ac1..00fd505 100644
}
static u64 perf_event_read(struct perf_event *event)
-@@ -3399,9 +3406,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
+@@ -3409,9 +3416,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
@@ -91965,7 +91128,7 @@ index 6b17ac1..00fd505 100644
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
-@@ -3830,10 +3837,10 @@ void perf_event_update_userpage(struct perf_event *event)
+@@ -3840,10 +3847,10 @@ void perf_event_update_userpage(struct perf_event *event)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
@@ -91978,7 +91141,7 @@ index 6b17ac1..00fd505 100644
arch_perf_update_userpage(userpg, now);
-@@ -4397,7 +4404,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
+@@ -4407,7 +4414,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
/* Data. */
sp = perf_user_stack_pointer(regs);
@@ -91987,7 +91150,7 @@ index 6b17ac1..00fd505 100644
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
-@@ -4488,11 +4495,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+@@ -4498,11 +4505,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
@@ -92001,7 +91164,7 @@ index 6b17ac1..00fd505 100644
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
-@@ -6801,7 +6808,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+@@ -6811,7 +6818,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
@@ -92010,7 +91173,7 @@ index 6b17ac1..00fd505 100644
event->state = PERF_EVENT_STATE_INACTIVE;
-@@ -7080,6 +7087,11 @@ SYSCALL_DEFINE5(perf_event_open,
+@@ -7090,6 +7097,11 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
@@ -92022,7 +91185,7 @@ index 6b17ac1..00fd505 100644
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
-@@ -7432,10 +7444,10 @@ static void sync_child_event(struct perf_event *child_event,
+@@ -7442,10 +7454,10 @@ static void sync_child_event(struct perf_event *child_event,
/*
* Add back the child's count to the parent's count:
*/
@@ -92036,6 +91199,18 @@ index 6b17ac1..00fd505 100644
&parent_event->child_total_time_running);
/*
+@@ -7921,8 +7933,10 @@ int perf_event_init_task(struct task_struct *child)
+
+ for_each_task_context_nr(ctxn) {
+ ret = perf_event_init_context(child, ctxn);
+- if (ret)
++ if (ret) {
++ perf_event_free_task(child);
+ return ret;
++ }
+ }
+
+ return 0;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 569b2187..19940d9 100644
--- a/kernel/events/internal.h
@@ -92145,7 +91320,7 @@ index e5c4668..592d2e5 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 6a13c46..a623c8e 100644
+index 6a13c46..461e9c2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
@@ -92515,6 +91690,15 @@ index 6a13c46..a623c8e 100644
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (p->real_cred->user != INIT_USER &&
+@@ -1326,7 +1431,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ goto bad_fork_cleanup_policy;
+ retval = audit_alloc(p);
+ if (retval)
+- goto bad_fork_cleanup_policy;
++ goto bad_fork_cleanup_perf;
+ /* copy all the process information */
+ retval = copy_semundo(clone_flags, p);
+ if (retval)
@@ -1452,6 +1557,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
}
@@ -92527,7 +91711,18 @@ index 6a13c46..a623c8e 100644
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
-@@ -1541,6 +1651,8 @@ bad_fork_cleanup_count:
+@@ -1525,8 +1635,9 @@ bad_fork_cleanup_semundo:
+ exit_sem(p);
+ bad_fork_cleanup_audit:
+ audit_free(p);
+-bad_fork_cleanup_policy:
++bad_fork_cleanup_perf:
+ perf_event_free_task(p);
++bad_fork_cleanup_policy:
+ #ifdef CONFIG_NUMA
+ mpol_put(p->mempolicy);
+ bad_fork_cleanup_threadgroup_lock:
+@@ -1541,6 +1652,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -92536,7 +91731,7 @@ index 6a13c46..a623c8e 100644
return ERR_PTR(retval);
}
-@@ -1602,6 +1714,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1602,6 +1715,7 @@ long do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace);
@@ -92544,7 +91739,7 @@ index 6a13c46..a623c8e 100644
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
-@@ -1618,6 +1731,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1618,6 +1732,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -92553,7 +91748,7 @@ index 6a13c46..a623c8e 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1736,7 +1851,7 @@ void __init proc_caches_init(void)
+@@ -1736,7 +1852,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -92562,7 +91757,7 @@ index 6a13c46..a623c8e 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1776,7 +1891,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1776,7 +1892,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -92571,7 +91766,7 @@ index 6a13c46..a623c8e 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1883,7 +1998,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1883,7 +1999,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -92582,7 +91777,7 @@ index 6a13c46..a623c8e 100644
else
new_fs = fs;
diff --git a/kernel/futex.c b/kernel/futex.c
-index b632b5f..0aa434d 100644
+index c20fb39..0aa434d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -202,7 +202,7 @@ struct futex_pi_state {
@@ -92624,15 +91819,7 @@ index b632b5f..0aa434d 100644
pagefault_disable();
ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -2628,6 +2633,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
- * shared futexes. We need to compare the keys:
- */
- if (match_futex(&q.key, &key2)) {
-+ queue_unlock(hb);
- ret = -EINVAL;
- goto out_put_keys;
- }
-@@ -3033,6 +3039,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3034,6 +3039,7 @@ static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
@@ -92640,7 +91827,7 @@ index b632b5f..0aa434d 100644
/*
* This will fail and we want it. Some arch implementations do
-@@ -3044,8 +3051,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -3045,8 +3051,11 @@ static void __init futex_detect_cmpxchg(void)
* implementation, the non-functional ones will return
* -ENOSYS.
*/
@@ -92874,26 +92061,10 @@ index cb0cf37..b69e161 100644
return -ENOMEM;
reset_iter(iter, 0);
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
-index e30ac0f..a7fcafb 100644
+index 0aa69ea..a7fcafb 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
-@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type)
- */
- static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
- {
-- long ret;
-+ long t1, t2;
-
-- ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
-+ t1 = kptr_obfuscate((long)v1, type);
-+ t2 = kptr_obfuscate((long)v2, type);
-
-- return (ret < 0) | ((ret > 0) << 1);
-+ return (t1 < t2) | ((t1 > t2) << 1);
- }
-
- /* The caller must have pinned the task */
-@@ -99,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
struct task_struct *task1, *task2;
int ret;
@@ -93494,7 +92665,7 @@ index 1d96dd0..994ff19 100644
default:
diff --git a/kernel/module.c b/kernel/module.c
-index 81e727c..a8ea6f9 100644
+index 673aeb0..40e276d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -61,6 +61,7 @@
@@ -94229,7 +93400,7 @@ index 81e727c..a8ea6f9 100644
dynamic_debug_setup(info->debug, info->num_debug);
/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
-@@ -3311,11 +3468,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
+@@ -3316,11 +3473,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup:
dynamic_debug_remove(info->debug);
synchronize_sched();
@@ -94242,7 +93413,7 @@ index 81e727c..a8ea6f9 100644
free_unload:
module_unload_free(mod);
unlink_mod:
-@@ -3398,10 +3554,16 @@ static const char *get_ksymbol(struct module *mod,
+@@ -3403,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
unsigned long nextval;
/* At worse, next value is at end of module */
@@ -94262,7 +93433,7 @@ index 81e727c..a8ea6f9 100644
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
-@@ -3652,7 +3814,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3657,7 +3819,7 @@ static int m_show(struct seq_file *m, void *p)
return 0;
seq_printf(m, "%s %u",
@@ -94271,7 +93442,7 @@ index 81e727c..a8ea6f9 100644
print_unload_info(m, mod);
/* Informative for users. */
-@@ -3661,7 +3823,7 @@ static int m_show(struct seq_file *m, void *p)
+@@ -3666,7 +3828,7 @@ static int m_show(struct seq_file *m, void *p)
mod->state == MODULE_STATE_COMING ? "Loading":
"Live");
/* Used by oprofile and other similar tools. */
@@ -94280,7 +93451,7 @@ index 81e727c..a8ea6f9 100644
/* Taints info */
if (mod->taints)
-@@ -3697,7 +3859,17 @@ static const struct file_operations proc_modules_operations = {
+@@ -3702,7 +3864,17 @@ static const struct file_operations proc_modules_operations = {
static int __init proc_modules_init(void)
{
@@ -94298,7 +93469,7 @@ index 81e727c..a8ea6f9 100644
return 0;
}
module_init(proc_modules_init);
-@@ -3758,14 +3930,14 @@ struct module *__module_address(unsigned long addr)
+@@ -3763,14 +3935,14 @@ struct module *__module_address(unsigned long addr)
{
struct module *mod;
@@ -94316,7 +93487,7 @@ index 81e727c..a8ea6f9 100644
return mod;
}
return NULL;
-@@ -3800,11 +3972,20 @@ bool is_module_text_address(unsigned long addr)
+@@ -3805,11 +3977,20 @@ bool is_module_text_address(unsigned long addr)
*/
struct module *__module_text_address(unsigned long addr)
{
@@ -94713,7 +93884,7 @@ index 4ee194e..925778f 100644
if (pm_wakeup_pending()) {
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
-index 13e839d..8a71f12 100644
+index 971285d..553e02a 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -480,6 +480,11 @@ static int check_syslog_permissions(int type, bool from_file)
@@ -97098,71 +96269,10 @@ index 7c7964c..2a0d412 100644
update_vsyscall_tz();
if (firsttime) {
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
-index fe75444..b8a1463 100644
+index cd45a07..b8a1463 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
-@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid)
- static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
- ktime_t now)
- {
-+ unsigned long flags;
- struct k_itimer *ptr = container_of(alarm, struct k_itimer,
- it.alarm.alarmtimer);
-- if (posix_timer_event(ptr, 0) != 0)
-- ptr->it_overrun++;
-+ enum alarmtimer_restart result = ALARMTIMER_NORESTART;
-+
-+ spin_lock_irqsave(&ptr->it_lock, flags);
-+ if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) {
-+ if (posix_timer_event(ptr, 0) != 0)
-+ ptr->it_overrun++;
-+ }
-
- /* Re-add periodic timers */
- if (ptr->it.alarm.interval.tv64) {
- ptr->it_overrun += alarm_forward(alarm, now,
- ptr->it.alarm.interval);
-- return ALARMTIMER_RESTART;
-+ result = ALARMTIMER_RESTART;
- }
-- return ALARMTIMER_NORESTART;
-+ spin_unlock_irqrestore(&ptr->it_lock, flags);
-+
-+ return result;
- }
-
- /**
-@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer)
- * @new_timer: k_itimer pointer
- * @cur_setting: itimerspec data to fill
- *
-- * Copies the itimerspec data out from the k_itimer
-+ * Copies out the current itimerspec data
- */
- static void alarm_timer_get(struct k_itimer *timr,
- struct itimerspec *cur_setting)
- {
-- memset(cur_setting, 0, sizeof(struct itimerspec));
-+ ktime_t relative_expiry_time =
-+ alarm_expires_remaining(&(timr->it.alarm.alarmtimer));
-
-- cur_setting->it_interval =
-- ktime_to_timespec(timr->it.alarm.interval);
-- cur_setting->it_value =
-- ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires);
-- return;
-+ if (ktime_to_ns(relative_expiry_time) > 0) {
-+ cur_setting->it_value = ktime_to_timespec(relative_expiry_time);
-+ } else {
-+ cur_setting->it_value.tv_sec = 0;
-+ cur_setting->it_value.tv_nsec = 0;
-+ }
-+
-+ cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval);
- }
-
- /**
-@@ -811,7 +823,7 @@ static int __init alarmtimer_init(void)
+@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
struct platform_device *pdev;
int error = 0;
int i;
@@ -97403,10 +96513,10 @@ index c1bd4ad..4b861dc 100644
ret = -EIO;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index ac9d1da..ce98b35 100644
+index ca167e6..6cf8f83 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
-@@ -1920,12 +1920,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
+@@ -1964,12 +1964,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
if (unlikely(ftrace_disabled))
return 0;
@@ -97426,7 +96536,7 @@ index ac9d1da..ce98b35 100644
}
/*
-@@ -4126,8 +4131,10 @@ static int ftrace_process_locs(struct module *mod,
+@@ -4170,8 +4175,10 @@ static int ftrace_process_locs(struct module *mod,
if (!count)
return 0;
@@ -97438,7 +96548,7 @@ index ac9d1da..ce98b35 100644
start_pg = ftrace_allocate_pages(count);
if (!start_pg)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
-index b95381e..af2fddd 100644
+index 2ff0580..af2fddd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -352,9 +352,9 @@ struct buffer_data_page {
@@ -97464,31 +96574,7 @@ index b95381e..af2fddd 100644
local_t dropped_events;
local_t committing;
local_t commits;
-@@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
- work = &cpu_buffer->irq_work;
- }
-
-- work->waiters_pending = true;
- poll_wait(filp, &work->waiters, poll_table);
-+ work->waiters_pending = true;
-+ /*
-+ * There's a tight race between setting the waiters_pending and
-+ * checking if the ring buffer is empty. Once the waiters_pending bit
-+ * is set, the next event will wake the task up, but we can get stuck
-+ * if there's only a single event in.
-+ *
-+ * FIXME: Ideally, we need a memory barrier on the writer side as well,
-+ * but adding a memory barrier to all events will cause too much of a
-+ * performance hit in the fast path. We only need a memory barrier when
-+ * the buffer goes from empty to having content. But as this race is
-+ * extremely small, and it's not a problem if another event comes in, we
-+ * will fix it later.
-+ */
-+ smp_mb();
-
- if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
- (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-@@ -991,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -1005,8 +1005,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*
* We add a counter to the write field to denote this.
*/
@@ -97499,7 +96585,7 @@ index b95381e..af2fddd 100644
/*
* Just make sure we have seen our old_write and synchronize
-@@ -1020,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -1034,8 +1034,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
* cmpxchg to only update if an interrupt did not already
* do it for us. If the cmpxchg fails, we don't care.
*/
@@ -97510,7 +96596,7 @@ index b95381e..af2fddd 100644
/*
* No need to worry about races with clearing out the commit.
-@@ -1388,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
+@@ -1402,12 +1402,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
{
@@ -97525,7 +96611,7 @@ index b95381e..af2fddd 100644
}
static int
-@@ -1488,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+@@ -1502,7 +1502,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
* bytes consumed in ring buffer from here.
* Increment overrun to account for the lost events.
*/
@@ -97534,7 +96620,7 @@ index b95381e..af2fddd 100644
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
-@@ -2066,7 +2080,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2080,7 +2080,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* it is our responsibility to update
* the counters.
*/
@@ -97543,7 +96629,7 @@ index b95381e..af2fddd 100644
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
/*
-@@ -2216,7 +2230,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2230,7 +2230,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
if (tail == BUF_PAGE_SIZE)
tail_page->real_end = 0;
@@ -97552,7 +96638,7 @@ index b95381e..af2fddd 100644
return;
}
-@@ -2251,7 +2265,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2265,7 +2265,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
rb_event_set_padding(event);
/* Set the write back to the previous setting */
@@ -97561,7 +96647,7 @@ index b95381e..af2fddd 100644
return;
}
-@@ -2263,7 +2277,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2277,7 +2277,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE;
@@ -97570,7 +96656,7 @@ index b95381e..af2fddd 100644
}
/*
-@@ -2289,7 +2303,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2303,7 +2303,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* about it.
*/
if (unlikely(next_page == commit_page)) {
@@ -97579,7 +96665,7 @@ index b95381e..af2fddd 100644
goto out_reset;
}
-@@ -2345,7 +2359,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2359,7 +2359,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
cpu_buffer->tail_page) &&
(cpu_buffer->commit_page ==
cpu_buffer->reader_page))) {
@@ -97588,7 +96674,7 @@ index b95381e..af2fddd 100644
goto out_reset;
}
}
-@@ -2393,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
length += RB_LEN_TIME_EXTEND;
tail_page = cpu_buffer->tail_page;
@@ -97597,7 +96683,7 @@ index b95381e..af2fddd 100644
/* set write to only the index of the write */
write &= RB_WRITE_MASK;
-@@ -2417,7 +2431,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2431,7 +2431,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
@@ -97606,7 +96692,7 @@ index b95381e..af2fddd 100644
/*
* If this is the first commit on the page, then update
-@@ -2450,7 +2464,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2464,7 +2464,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
unsigned long write_mask =
@@ -97615,7 +96701,7 @@ index b95381e..af2fddd 100644
unsigned long event_length = rb_event_length(event);
/*
* This is on the tail page. It is possible that
-@@ -2460,7 +2474,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2474,7 +2474,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
*/
old_index += write_mask;
new_index += write_mask;
@@ -97624,7 +96710,7 @@ index b95381e..af2fddd 100644
if (index == old_index) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
-@@ -2852,7 +2866,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2866,7 +2866,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
/* Do the likely case first */
if (likely(bpage->page == (void *)addr)) {
@@ -97633,7 +96719,7 @@ index b95381e..af2fddd 100644
return;
}
-@@ -2864,7 +2878,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
+@@ -2878,7 +2878,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
start = bpage;
do {
if (bpage->page == (void *)addr) {
@@ -97642,7 +96728,7 @@ index b95381e..af2fddd 100644
return;
}
rb_inc_page(cpu_buffer, &bpage);
-@@ -3148,7 +3162,7 @@ static inline unsigned long
+@@ -3162,7 +3162,7 @@ static inline unsigned long
rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
{
return local_read(&cpu_buffer->entries) -
@@ -97651,7 +96737,7 @@ index b95381e..af2fddd 100644
}
/**
-@@ -3237,7 +3251,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3251,7 +3251,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -97660,7 +96746,7 @@ index b95381e..af2fddd 100644
return ret;
}
-@@ -3260,7 +3274,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
+@@ -3274,7 +3274,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
return 0;
cpu_buffer = buffer->buffers[cpu];
@@ -97669,7 +96755,7 @@ index b95381e..af2fddd 100644
return ret;
}
-@@ -3345,7 +3359,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
+@@ -3359,7 +3359,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
@@ -97678,7 +96764,7 @@ index b95381e..af2fddd 100644
}
return overruns;
-@@ -3516,8 +3530,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3530,8 +3530,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/*
* Reset the reader page to size zero.
*/
@@ -97689,7 +96775,7 @@ index b95381e..af2fddd 100644
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
-@@ -3551,7 +3565,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -3565,7 +3565,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* want to compare with the last_overrun.
*/
smp_mb();
@@ -97698,7 +96784,7 @@ index b95381e..af2fddd 100644
/*
* Here's the tricky part.
-@@ -4123,8 +4137,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4137,8 +4137,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
@@ -97709,7 +96795,7 @@ index b95381e..af2fddd 100644
local_set(&cpu_buffer->head_page->page->commit, 0);
cpu_buffer->head_page->read = 0;
-@@ -4134,14 +4148,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+@@ -4148,14 +4148,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
@@ -97728,7 +96814,7 @@ index b95381e..af2fddd 100644
local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
-@@ -4546,8 +4560,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
+@@ -4560,8 +4560,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
rb_init_page(bpage);
bpage = reader->page;
reader->page = *data_page;
@@ -100854,19 +99940,6 @@ index 1706cbb..f89dbca 100644
if (err) {
bdi_destroy(bdi);
return err;
-diff --git a/mm/dmapool.c b/mm/dmapool.c
-index 306baa5..ba8019b 100644
---- a/mm/dmapool.c
-+++ b/mm/dmapool.c
-@@ -176,7 +176,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- if (list_empty(&dev->dma_pools) &&
- device_create_file(dev, &dev_attr_pools)) {
- kfree(retval);
-- return NULL;
-+ retval = NULL;
- } else
- list_add(&retval->pools, &dev->dma_pools);
- mutex_unlock(&pools_lock);
diff --git a/mm/filemap.c b/mm/filemap.c
index 8163e04..191cb97 100644
--- a/mm/filemap.c
@@ -100983,6 +100056,31 @@ index b32b70c..e512eb0 100644
pkmap_count[last_pkmap_nr] = 1;
set_page_address(page, (void *)vaddr);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 33514d8..03e5063 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1780,6 +1780,11 @@ static int __split_huge_page_map(struct page *page,
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ BUG_ON(PageCompound(page+i));
++ /*
++ * Note that pmd_numa is not transferred deliberately
++ * to avoid any possibility that pte_numa leaks to
++ * a PROT_NONE VMA by accident.
++ */
+ entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (!pmd_write(*pmd))
+@@ -1788,8 +1793,6 @@ static int __split_huge_page_map(struct page *page,
+ BUG_ON(page_mapcount(page) != 1);
+ if (!pmd_young(*pmd))
+ entry = pte_mkold(entry);
+- if (pmd_numa(*pmd))
+- entry = pte_mknuma(entry);
+ pte = pte_offset_map(&_pmd, haddr);
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, haddr, pte, entry);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7ae5444..aea22b2 100644
--- a/mm/hugetlb.c
@@ -101427,7 +100525,7 @@ index a013bc9..a897a14 100644
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory.c b/mm/memory.c
-index 0a21f3d..babeaec 100644
+index 533023d..32da202 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -413,6 +413,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -102157,10 +101255,23 @@ index 8f5330d..b41914b 100644
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
diff --git a/mm/migrate.c b/mm/migrate.c
-index be6dbf9..febb8ec 100644
+index be6dbf9..75c0f45 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
-@@ -1506,8 +1506,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+@@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+ pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+ if (pte_swp_soft_dirty(*ptep))
+ pte = pte_mksoft_dirty(pte);
++
++ /* Recheck VMA as permissions can change since migration started */
+ if (is_write_migration_entry(entry))
+- pte = pte_mkwrite(pte);
++ pte = maybe_mkwrite(pte, vma);
++
+ #ifdef CONFIG_HUGETLB_PAGE
+ if (PageHuge(new)) {
+ pte = pte_mkhuge(pte);
+@@ -1506,8 +1509,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
*/
tcred = __task_cred(task);
if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
@@ -104023,7 +103134,7 @@ index ef44ad7..1056bc7 100644
}
diff --git a/mm/percpu.c b/mm/percpu.c
-index 2ddf9a9..f8fc075 100644
+index 492f601..a32872d 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
@@ -104203,7 +103314,7 @@ index 22a4a76..9551288 100644
/*
diff --git a/mm/shmem.c b/mm/shmem.c
-index af68b15..f7f853d 100644
+index e53ab3a..f7f853d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -33,7 +33,7 @@
@@ -104224,19 +103335,7 @@ index af68b15..f7f853d 100644
/*
* shmem_fallocate communicates with shmem_fault or shmem_writepage via
-@@ -2064,8 +2064,10 @@ static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct
-
- if (new_dentry->d_inode) {
- (void) shmem_unlink(new_dir, new_dentry);
-- if (they_are_dirs)
-+ if (they_are_dirs) {
-+ drop_nlink(new_dentry->d_inode);
- drop_nlink(old_dir);
-+ }
- } else if (they_are_dirs) {
- drop_nlink(old_dir);
- inc_nlink(new_dir);
-@@ -2219,6 +2221,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+@@ -2221,6 +2221,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
@@ -104248,7 +103347,7 @@ index af68b15..f7f853d 100644
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
-@@ -2274,6 +2281,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
+@@ -2276,6 +2281,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
if (err)
return err;
@@ -104264,7 +103363,7 @@ index af68b15..f7f853d 100644
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
-@@ -2586,8 +2602,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
+@@ -2588,8 +2602,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
@@ -104275,7 +103374,7 @@ index af68b15..f7f853d 100644
return -ENOMEM;
diff --git a/mm/slab.c b/mm/slab.c
-index 3070b92..6596d86 100644
+index c9103e4..6596d86 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -311,10 +311,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -104328,32 +103427,7 @@ index 3070b92..6596d86 100644
slab_early_init = 0;
-@@ -2224,7 +2228,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
- int
- __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
- {
-- size_t left_over, freelist_size, ralign;
-+ size_t left_over, freelist_size;
-+ size_t ralign = BYTES_PER_WORD;
- gfp_t gfp;
- int err;
- size_t size = cachep->size;
-@@ -2257,14 +2262,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
- size &= ~(BYTES_PER_WORD - 1);
- }
-
-- /*
-- * Redzoning and user store require word alignment or possibly larger.
-- * Note this will be overridden by architecture or caller mandated
-- * alignment if either is greater than BYTES_PER_WORD.
-- */
-- if (flags & SLAB_STORE_USER)
-- ralign = BYTES_PER_WORD;
--
- if (flags & SLAB_RED_ZONE) {
- ralign = REDZONE_ALIGN;
- /* If redzoning, ensure that the second redzone is suitably
-@@ -3512,6 +3509,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
+@@ -3505,6 +3509,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
@@ -104375,7 +103449,7 @@ index 3070b92..6596d86 100644
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
-@@ -3735,6 +3747,7 @@ void kfree(const void *objp)
+@@ -3728,6 +3747,7 @@ void kfree(const void *objp)
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
@@ -104383,7 +103457,7 @@ index 3070b92..6596d86 100644
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
-@@ -4176,14 +4189,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
+@@ -4169,14 +4189,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
}
/* cpu stats */
{
@@ -104410,7 +103484,7 @@ index 3070b92..6596d86 100644
#endif
}
-@@ -4404,13 +4425,69 @@ static const struct file_operations proc_slabstats_operations = {
+@@ -4397,13 +4425,69 @@ static const struct file_operations proc_slabstats_operations = {
static int __init slab_proc_init(void)
{
#ifdef CONFIG_DEBUG_SLAB_LEAK
@@ -112245,7 +111319,7 @@ index 610e19c..08d0c3f 100644
if (!todrop_rate[i]) return 0;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
-index e683675..67cb16b 100644
+index 5c34e8d..0d8eb7f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
@@ -112391,7 +111465,7 @@ index db80126..ef7110e 100644
cp->old_state = cp->state;
/*
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
-index 73ba1cc..1adfc7a 100644
+index 6f70bdd..fb96a71 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -112692,10 +111766,10 @@ index 0000000..c566332
+MODULE_ALIAS("ipt_gradm");
+MODULE_ALIAS("ip6t_gradm");
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
-index a3910fc..2d2ba14 100644
+index 47dc683..2e0d52c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
-@@ -870,11 +870,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
+@@ -871,11 +871,11 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
@@ -113074,6 +112148,43 @@ index 48f8ffc..0ef3eec 100644
struct rds_sock {
struct sock rs_sk;
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 2371816..0a64541 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
+ sock_put(rds_rs_to_sk(rs));
+ }
+ rs = rm->m_rs;
+- sock_hold(rds_rs_to_sk(rs));
++ if (rs)
++ sock_hold(rds_rs_to_sk(rs));
+ }
++ if (!rs)
++ goto unlock_and_drop;
+ spin_lock(&rs->rs_lock);
+
+ if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
+@@ -638,9 +641,6 @@ unlock_and_drop:
+ * queue. This means that in the TCP case, the message may not have been
+ * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
+ * checks the RDS_MSG_HAS_ACK_SEQ bit.
+- *
+- * XXX It's not clear to me how this is safely serialized with socket
+- * destruction. Maybe it should bail if it sees SOCK_DEAD.
+ */
+ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
+ is_acked_func is_acked)
+@@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
+ */
+ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
+ spin_unlock_irqrestore(&conn->c_lock, flags);
++ spin_lock_irqsave(&rm->m_rs_lock, flags);
++ rm->m_rs = NULL;
++ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+ continue;
+ }
+ list_del_init(&rm->m_conn_item);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef..16bcb98 100644
--- a/net/rds/tcp.c
@@ -117204,10 +116315,10 @@ index 78ccfa4..7a0857b 100644
struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
};
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
-index f96bf4c..f3bd1e0 100644
+index 95fc2eaf..ea5d6a7 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
-@@ -512,7 +512,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
+@@ -521,7 +521,7 @@ static void update_pcm_pointers(struct amdtp_stream *s,
ptr = s->pcm_buffer_pointer + frames;
if (ptr >= pcm->runtime->buffer_size)
ptr -= pcm->runtime->buffer_size;
@@ -117216,7 +116327,7 @@ index f96bf4c..f3bd1e0 100644
s->pcm_period_pointer += frames;
if (s->pcm_period_pointer >= pcm->runtime->period_size) {
-@@ -952,7 +952,7 @@ EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
+@@ -961,7 +961,7 @@ EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
*/
void amdtp_stream_update(struct amdtp_stream *s)
{
@@ -117226,10 +116337,10 @@ index f96bf4c..f3bd1e0 100644
}
EXPORT_SYMBOL(amdtp_stream_update);
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
-index d8ee7b0..79dfa2a 100644
+index 4823c08..47a5f7c 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
-@@ -211,7 +211,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
+@@ -212,7 +212,7 @@ static inline bool amdtp_stream_pcm_running(struct amdtp_stream *s)
static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
struct snd_pcm_substream *pcm)
{
@@ -117238,7 +116349,7 @@ index d8ee7b0..79dfa2a 100644
}
/**
-@@ -229,7 +229,7 @@ static inline void amdtp_stream_midi_trigger(struct amdtp_stream *s,
+@@ -230,7 +230,7 @@ static inline void amdtp_stream_midi_trigger(struct amdtp_stream *s,
struct snd_rawmidi_substream *midi)
{
if (port < s->midi_ports)
@@ -125304,10 +124415,10 @@ index 0000000..4378111
+}
diff --git a/tools/gcc/size_overflow_plugin/size_overflow_hash.data b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
new file mode 100644
-index 0000000..84ee907
+index 0000000..3350a9f
--- /dev/null
+++ b/tools/gcc/size_overflow_plugin/size_overflow_hash.data
-@@ -0,0 +1,5716 @@
+@@ -0,0 +1,5718 @@
+intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
+storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
+compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
@@ -126595,7 +125706,8 @@ index 0000000..84ee907
+sta_dev_read_14782 sta_dev_read 3 14782 NULL
+keys_proc_write_14792 keys_proc_write 3 14792 NULL
+ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
-+__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
++__kfifo_in_14797 __kfifo_in 3-0 14797 NULL nohasharray
++ttm_page_pool_free_14797 ttm_page_pool_free 2 14797 &__kfifo_in_14797
+hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
+snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
+mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
@@ -126996,6 +126108,7 @@ index 0000000..84ee907
+kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
+ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
+batadv_tvlv_container_register_19520 batadv_tvlv_container_register 5 19520 NULL
++ttm_dma_page_pool_free_19527 ttm_dma_page_pool_free 2 19527 NULL
+nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
+gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
+ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
diff --git a/3.16.3/4425_grsec_remove_EI_PAX.patch b/3.16.4/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.16.3/4425_grsec_remove_EI_PAX.patch
rename to 3.16.4/4425_grsec_remove_EI_PAX.patch
diff --git a/3.16.3/4427_force_XATTR_PAX_tmpfs.patch b/3.16.4/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 3.16.3/4427_force_XATTR_PAX_tmpfs.patch
rename to 3.16.4/4427_force_XATTR_PAX_tmpfs.patch
diff --git a/3.16.3/4430_grsec-remove-localversion-grsec.patch b/3.16.4/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.16.3/4430_grsec-remove-localversion-grsec.patch
rename to 3.16.4/4430_grsec-remove-localversion-grsec.patch
diff --git a/3.16.3/4435_grsec-mute-warnings.patch b/3.16.4/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.16.3/4435_grsec-mute-warnings.patch
rename to 3.16.4/4435_grsec-mute-warnings.patch
diff --git a/3.16.3/4440_grsec-remove-protected-paths.patch b/3.16.4/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.16.3/4440_grsec-remove-protected-paths.patch
rename to 3.16.4/4440_grsec-remove-protected-paths.patch
diff --git a/3.16.3/4450_grsec-kconfig-default-gids.patch b/3.16.4/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 3.16.3/4450_grsec-kconfig-default-gids.patch
rename to 3.16.4/4450_grsec-kconfig-default-gids.patch
diff --git a/3.16.3/4465_selinux-avc_audit-log-curr_ip.patch b/3.16.4/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 3.16.3/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.16.4/4465_selinux-avc_audit-log-curr_ip.patch
diff --git a/3.16.3/4470_disable-compat_vdso.patch b/3.16.4/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.16.3/4470_disable-compat_vdso.patch
rename to 3.16.4/4470_disable-compat_vdso.patch
diff --git a/3.16.3/4475_emutramp_default_on.patch b/3.16.4/4475_emutramp_default_on.patch
similarity index 100%
rename from 3.16.3/4475_emutramp_default_on.patch
rename to 3.16.4/4475_emutramp_default_on.patch
diff --git a/3.2.63/0000_README b/3.2.63/0000_README
index 90399c4..c849374 100644
--- a/3.2.63/0000_README
+++ b/3.2.63/0000_README
@@ -170,7 +170,7 @@ Patch: 1062_linux-3.2.63.patch
From: http://www.kernel.org
Desc: Linux 3.2.63
-Patch: 4420_grsecurity-3.0-3.2.63-201409282020.patch
+Patch: 4420_grsecurity-3.0-3.2.63-201410062032.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/3.2.63/4420_grsecurity-3.0-3.2.63-201409282020.patch b/3.2.63/4420_grsecurity-3.0-3.2.63-201410062032.patch
similarity index 99%
rename from 3.2.63/4420_grsecurity-3.0-3.2.63-201409282020.patch
rename to 3.2.63/4420_grsecurity-3.0-3.2.63-201410062032.patch
index e277955..bb64ee2 100644
--- a/3.2.63/4420_grsecurity-3.0-3.2.63-201409282020.patch
+++ b/3.2.63/4420_grsecurity-3.0-3.2.63-201410062032.patch
@@ -14559,7 +14559,7 @@ index cb00ccc..17e9054 100644
/*
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index 6be9909..ee359f2 100644
+index 6be9909..5c476fc 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
@@ -14570,7 +14570,7 @@ index 6be9909..ee359f2 100644
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
-@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
+@@ -81,12 +82,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
#define arch_end_context_switch(prev) do {} while(0)
@@ -14591,6 +14591,7 @@ index 6be9909..ee359f2 100644
+ cr0 = read_cr0() ^ X86_CR0_WP;
+ BUG_ON(cr0 & X86_CR0_WP);
+ write_cr0(cr0);
++ barrier();
+ return cr0 ^ X86_CR0_WP;
+}
+
@@ -14598,6 +14599,7 @@ index 6be9909..ee359f2 100644
+{
+ unsigned long cr0;
+
++ barrier();
+ cr0 = read_cr0() ^ X86_CR0_WP;
+ BUG_ON(!(cr0 & X86_CR0_WP));
+ write_cr0(cr0);
@@ -14622,7 +14624,7 @@ index 6be9909..ee359f2 100644
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
-@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
+@@ -147,6 +189,11 @@ static inline unsigned long pud_pfn(pud_t pud)
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
@@ -14634,7 +14636,7 @@ index 6be9909..ee359f2 100644
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
-@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+@@ -200,9 +247,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
return pte_clear_flags(pte, _PAGE_RW);
}
@@ -14665,7 +14667,7 @@ index 6be9909..ee359f2 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -394,6 +461,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -14681,7 +14683,7 @@ index 6be9909..ee359f2 100644
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
-@@ -515,7 +589,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+@@ -515,7 +591,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -14690,7 +14692,7 @@ index 6be9909..ee359f2 100644
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-@@ -555,7 +629,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+@@ -555,7 +631,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -14699,7 +14701,7 @@ index 6be9909..ee359f2 100644
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
-@@ -570,7 +644,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -570,7 +646,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -14708,7 +14710,7 @@ index 6be9909..ee359f2 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -593,7 +667,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -593,7 +669,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -14722,7 +14724,7 @@ index 6be9909..ee359f2 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -604,6 +683,22 @@ static inline int pgd_none(pgd_t pgd)
+@@ -604,6 +685,22 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -14745,7 +14747,7 @@ index 6be9909..ee359f2 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -768,11 +863,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+@@ -768,11 +865,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -87205,7 +87207,7 @@ index 63786e7..0780cac 100644
#ifdef CONFIG_MODULE_UNLOAD
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 14c111c..6e273e7 100644
+index 14c111c..98d977c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -145,8 +145,15 @@ static struct srcu_struct pmus_srcu;
@@ -87332,6 +87334,18 @@ index 14c111c..6e273e7 100644
&parent_event->child_total_time_running);
/*
+@@ -7071,8 +7083,10 @@ int perf_event_init_task(struct task_struct *child)
+
+ for_each_task_context_nr(ctxn) {
+ ret = perf_event_init_context(child, ctxn);
+- if (ret)
++ if (ret) {
++ perf_event_free_task(child);
+ return ret;
++ }
+ }
+
+ return 0;
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index a2101bb..f2e0354 100644
--- a/kernel/events/internal.h
@@ -87463,7 +87477,7 @@ index fde15f9..99f1b97 100644
{
struct signal_struct *sig = current->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
-index 13bba30..6557764 100644
+index 13bba30..ee14dbd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -34,6 +34,7 @@
@@ -87905,6 +87919,15 @@ index 13bba30..6557764 100644
goto bad_fork_free;
}
current->flags &= ~PF_NPROC_EXCEEDED;
+@@ -1221,7 +1345,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
+ goto bad_fork_cleanup_policy;
+ retval = audit_alloc(p);
+ if (retval)
+- goto bad_fork_cleanup_policy;
++ goto bad_fork_cleanup_perf;
+ /* copy all the process information */
+ retval = copy_semundo(clone_flags, p);
+ if (retval)
@@ -1341,6 +1465,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
}
@@ -87917,7 +87940,18 @@ index 13bba30..6557764 100644
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
-@@ -1423,6 +1552,8 @@ bad_fork_cleanup_count:
+@@ -1406,8 +1535,9 @@ bad_fork_cleanup_semundo:
+ exit_sem(p);
+ bad_fork_cleanup_audit:
+ audit_free(p);
+-bad_fork_cleanup_policy:
++bad_fork_cleanup_perf:
+ perf_event_free_task(p);
++bad_fork_cleanup_policy:
+ #ifdef CONFIG_NUMA
+ mpol_put(p->mempolicy);
+ bad_fork_cleanup_cgroup:
+@@ -1423,6 +1553,8 @@ bad_fork_cleanup_count:
bad_fork_free:
free_task(p);
fork_out:
@@ -87926,7 +87960,7 @@ index 13bba30..6557764 100644
return ERR_PTR(retval);
}
-@@ -1509,6 +1640,7 @@ long do_fork(unsigned long clone_flags,
+@@ -1509,6 +1641,7 @@ long do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
@@ -87934,7 +87968,7 @@ index 13bba30..6557764 100644
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
-@@ -1525,6 +1657,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1525,6 +1658,8 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
@@ -87943,7 +87977,7 @@ index 13bba30..6557764 100644
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
-@@ -1597,7 +1731,7 @@ void __init proc_caches_init(void)
+@@ -1597,7 +1732,7 @@ void __init proc_caches_init(void)
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -87952,7 +87986,7 @@ index 13bba30..6557764 100644
mmap_init();
nsproxy_cache_init();
}
-@@ -1636,7 +1770,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1636,7 +1771,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
@@ -87961,7 +87995,7 @@ index 13bba30..6557764 100644
return 0;
*new_fsp = copy_fs_struct(fs);
-@@ -1725,7 +1859,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1725,7 +1860,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
@@ -94783,7 +94817,7 @@ index 09fc744..3936897 100644
set_page_address(page, (void *)vaddr);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
-index ed0ed8a..cc835b97 100644
+index ed0ed8a..d629a89 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -704,7 +704,7 @@ out:
@@ -94795,6 +94829,18 @@ index ed0ed8a..cc835b97 100644
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
+@@ -1347,6 +1347,11 @@ static int __split_huge_page_map(struct page *page,
+ i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ BUG_ON(PageCompound(page+i));
++ /*
++ * Note that pmd_numa is not transferred deliberately
++ * to avoid any possibility that pte_numa leaks to
++ * a PROT_NONE VMA by accident.
++ */
+ entry = mk_pte(page + i, vma->vm_page_prot);
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ if (!pmd_write(*pmd))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d2c43a2..2213df3 100644
--- a/mm/hugetlb.c
@@ -95959,10 +96005,23 @@ index a72fa33..0b12a09 100644
err = -EPERM;
goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
-index 09d6a9d..c514c22 100644
+index 09d6a9d..e2941874 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
-@@ -1389,6 +1389,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+@@ -141,8 +141,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+
+ get_page(new);
+ pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
++
++ /* Recheck VMA as permissions can change since migration started */
+ if (is_write_migration_entry(entry))
+- pte = pte_mkwrite(pte);
++ pte = maybe_mkwrite(pte, vma);
++
+ #ifdef CONFIG_HUGETLB_PAGE
+ if (PageHuge(new))
+ pte = pte_mkhuge(pte);
+@@ -1389,6 +1392,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
if (!mm)
return -EINVAL;
@@ -95977,7 +96036,7 @@ index 09d6a9d..c514c22 100644
/*
* Check if this process has the right to modify the specified
* process. The right exists if the process has administrative
-@@ -1398,8 +1406,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+@@ -1398,8 +1409,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
rcu_read_lock();
tcred = __task_cred(task);
if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
@@ -106065,6 +106124,43 @@ index 7eaba18..a3c303f 100644
struct rds_sock {
struct sock rs_sk;
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 88eace5..b5947e7 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -593,8 +593,11 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
+ sock_put(rds_rs_to_sk(rs));
+ }
+ rs = rm->m_rs;
+- sock_hold(rds_rs_to_sk(rs));
++ if (rs)
++ sock_hold(rds_rs_to_sk(rs));
+ }
++ if (!rs)
++ goto unlock_and_drop;
+ spin_lock(&rs->rs_lock);
+
+ if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
+@@ -638,9 +641,6 @@ unlock_and_drop:
+ * queue. This means that in the TCP case, the message may not have been
+ * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
+ * checks the RDS_MSG_HAS_ACK_SEQ bit.
+- *
+- * XXX It's not clear to me how this is safely serialized with socket
+- * destruction. Maybe it should bail if it sees SOCK_DEAD.
+ */
+ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
+ is_acked_func is_acked)
+@@ -711,6 +711,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
+ */
+ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
+ spin_unlock_irqrestore(&conn->c_lock, flags);
++ spin_lock_irqsave(&rm->m_rs_lock, flags);
++ rm->m_rs = NULL;
++ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+ continue;
+ }
+ list_del_init(&rm->m_conn_item);
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef..16bcb98 100644
--- a/net/rds/tcp.c
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2014-10-07 20:30 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-10-07 20:30 [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.20/, 3.16.3/, 3.16.4/, 3.14.19/, 3.2.63/ Anthony G. Basile
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox