public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 3.17.1/, 3.14.22/, 3.2.63/
@ 2014-10-26 23:36 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2014-10-26 23:36 UTC (permalink / raw
  To: gentoo-commits

commit:     8801439b591b20c32fe51a572be45604841c2ac6
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Sun Oct 26 23:37:32 2014 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Sun Oct 26 23:37:32 2014 +0000
URL:        http://sources.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=8801439b

Grsec/PaX: 3.0-{3.2.63,3.14.22,3.17.1}-201410250027

---
 3.14.22/0000_README                                |    2 +-
 ...4420_grsecurity-3.0-3.14.22-201410250026.patch} |  788 +++++++++-
 3.17.1/0000_README                                 |    2 +-
 ... 4420_grsecurity-3.0-3.17.1-201410250027.patch} | 1634 +++++++++++++++++++-
 3.2.63/0000_README                                 |    2 +-
 ... 4420_grsecurity-3.0-3.2.63-201410250023.patch} |  653 +++++++-
 6 files changed, 2992 insertions(+), 89 deletions(-)

diff --git a/3.14.22/0000_README b/3.14.22/0000_README
index 9652232..de2e1c4 100644
--- a/3.14.22/0000_README
+++ b/3.14.22/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.0-3.14.22-201410192047.patch
+Patch:	4420_grsecurity-3.0-3.14.22-201410250026.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.14.22/4420_grsecurity-3.0-3.14.22-201410192047.patch b/3.14.22/4420_grsecurity-3.0-3.14.22-201410250026.patch
similarity index 99%
rename from 3.14.22/4420_grsecurity-3.0-3.14.22-201410192047.patch
rename to 3.14.22/4420_grsecurity-3.0-3.14.22-201410250026.patch
index 8d0df77..9bb50c5 100644
--- a/3.14.22/4420_grsecurity-3.0-3.14.22-201410192047.patch
+++ b/3.14.22/4420_grsecurity-3.0-3.14.22-201410250026.patch
@@ -854,6 +854,22 @@ index 98838a0..b304fb4 100644
  	} else if (!cause) {
  		/* Allow reads even for write-only mappings */
  		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
+index a2ff5c5..ecf6a78 100644
+--- a/arch/arc/kernel/kgdb.c
++++ b/arch/arc/kernel/kgdb.c
+@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ 	return -1;
+ }
+ 
+-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+-{
+-	return instruction_pointer(regs);
+-}
+-
+ int kgdb_arch_init(void)
+ {
+ 	single_step_data.armed = 0;
 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
 index 4733d32..b142a40 100644
 --- a/arch/arm/Kconfig
@@ -9846,6 +9862,20 @@ index 370ca1e..d4f4a98 100644
  extern unsigned long sparc64_elf_hwcap;
  #define ELF_HWCAP	sparc64_elf_hwcap
  
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index a12dbe3..0337e85 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
+ /* You must call prom_init() before using any of the library services,
+  * preferably as early as possible.  Pass it the romvec pointer.
+  */
+-extern void prom_init(void *cif_handler, void *cif_stack);
++void prom_init(void *cif_handler);
++void prom_init_report(void);
+ 
+ /* Boot argument acquisition, returns the boot command line string. */
+ extern char *prom_getbootargs(void);
 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
 index 9b1c36d..209298b 100644
 --- a/arch/sparc/include/asm/pgalloc_32.h
@@ -9938,6 +9968,21 @@ index 79da178..c2eede8 100644
  #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
  				    SRMMU_DIRTY | SRMMU_REF)
  
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 5e35e05..b1a29e9 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -24,6 +24,10 @@ static inline int con_is_present(void)
+ }
+ #endif
+ 
++#ifdef CONFIG_SPARC64
++void __init start_early_boot(void);
++#endif
++
+ extern void sun_do_break(void);
+ extern int stop_a_enabled;
+ extern int scons_pwroff;
 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
 index 9689176..63c18ea 100644
 --- a/arch/sparc/include/asm/spinlock_64.h
@@ -10227,6 +10272,108 @@ index d15cc17..d0ae796 100644
  
  extra-y     := head_$(BITS).o
  
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index 140966f..620009d 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -66,13 +66,10 @@ struct pause_patch_entry {
+ extern struct pause_patch_entry __pause_3insn_patch,
+ 	__pause_3insn_patch_end;
+ 
+-extern void __init per_cpu_patch(void);
+-extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+-				    struct sun4v_1insn_patch_entry *);
+-extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+-				    struct sun4v_2insn_patch_entry *);
+-extern void __init sun4v_patch(void);
+-extern void __init boot_cpu_id_too_large(int cpu);
++void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
++			     struct sun4v_1insn_patch_entry *);
++void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
++			     struct sun4v_2insn_patch_entry *);
+ extern unsigned int dcache_parity_tl1_occurred;
+ extern unsigned int icache_parity_tl1_occurred;
+ 
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 452f04fe..fbea0ac 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -660,14 +660,12 @@ tlb_fixup_done:
+ 	sethi	%hi(init_thread_union), %g6
+ 	or	%g6, %lo(init_thread_union), %g6
+ 	ldx	[%g6 + TI_TASK], %g4
+-	mov	%sp, %l6
+ 
+ 	wr	%g0, ASI_P, %asi
+ 	mov	1, %g1
+ 	sllx	%g1, THREAD_SHIFT, %g1
+ 	sub	%g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ 	add	%g6, %g1, %sp
+-	mov	0, %fp
+ 
+ 	/* Set per-cpu pointer initially to zero, this makes
+ 	 * the boot-cpu use the in-kernel-image per-cpu areas
+@@ -694,44 +692,14 @@ tlb_fixup_done:
+ 	 nop
+ #endif
+ 
+-	mov	%l6, %o1			! OpenPROM stack
+ 	call	prom_init
+ 	 mov	%l7, %o0			! OpenPROM cif handler
+ 
+-	/* Initialize current_thread_info()->cpu as early as possible.
+-	 * In order to do that accurately we have to patch up the get_cpuid()
+-	 * assembler sequences.  And that, in turn, requires that we know
+-	 * if we are on a Starfire box or not.  While we're here, patch up
+-	 * the sun4v sequences as well.
++	/* To create a one-register-window buffer between the kernel's
++	 * initial stack and the last stack frame we use from the firmware,
++	 * do the rest of the boot from a C helper function.
+ 	 */
+-	call	check_if_starfire
+-	 nop
+-	call	per_cpu_patch
+-	 nop
+-	call	sun4v_patch
+-	 nop
+-
+-#ifdef CONFIG_SMP
+-	call	hard_smp_processor_id
+-	 nop
+-	cmp	%o0, NR_CPUS
+-	blu,pt	%xcc, 1f
+-	 nop
+-	call	boot_cpu_id_too_large
+-	 nop
+-	/* Not reached... */
+-
+-1:
+-#else
+-	mov	0, %o0
+-#endif
+-	sth	%o0, [%g6 + TI_CPU]
+-
+-	call	prom_init_report
+-	 nop
+-
+-	/* Off we go.... */
+-	call	start_kernel
++	call	start_early_boot
+ 	 nop
+ 	/* Not reached... */
+ 
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
+index b7ddcdd..cdbfec2 100644
+--- a/arch/sparc/kernel/hvtramp.S
++++ b/arch/sparc/kernel/hvtramp.S
+@@ -109,7 +109,6 @@ hv_cpu_startup:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	call		init_irqwork_curcpu
+ 	 nop
 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
 index 510baec..9ff2607 100644
 --- a/arch/sparc/kernel/process_32.c
@@ -10349,6 +10496,68 @@ index c13c9f2..d572c34 100644
  	audit_syscall_exit(regs);
  
  	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 3fdb455..949f773 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/cpu.h>
+ #include <linux/initrd.h>
+ #include <linux/module.h>
++#include <linux/start_kernel.h>
+ 
+ #include <asm/io.h>
+ #include <asm/processor.h>
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
+ 
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+ 
+-void __init per_cpu_patch(void)
++static void __init per_cpu_patch(void)
+ {
+ 	struct cpuid_patch_entry *p;
+ 	unsigned long ver;
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ 	}
+ }
+ 
+-void __init sun4v_patch(void)
++static void __init sun4v_patch(void)
+ {
+ 	extern void sun4v_hvapi_init(void);
+ 
+@@ -335,14 +336,25 @@ static void __init pause_patch(void)
+ 	}
+ }
+ 
+-#ifdef CONFIG_SMP
+-void __init boot_cpu_id_too_large(int cpu)
++void __init start_early_boot(void)
+ {
+-	prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+-		    cpu, NR_CPUS);
+-	prom_halt();
++	int cpu;
++
++	check_if_starfire();
++	per_cpu_patch();
++	sun4v_patch();
++
++	cpu = hard_smp_processor_id();
++	if (cpu >= NR_CPUS) {
++		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
++			    cpu, NR_CPUS);
++		prom_halt();
++	}
++	current_thread_info()->cpu = cpu;
++
++	prom_init_report();
++	start_kernel();
+ }
+-#endif
+ 
+ /* On Ultra, we support all of the v8 capabilities. */
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
 index 8416d7f..f83823c 100644
 --- a/arch/sparc/kernel/smp_64.c
@@ -10626,6 +10835,36 @@ index 33a17e7..d87fb1f 100644
  	ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
  
  2:
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index 737f8cb..88ede1d 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -109,10 +109,13 @@ startup_continue:
+ 	brnz,pn		%g1, 1b
+ 	 nop
+ 
+-	sethi		%hi(p1275buf), %g2
+-	or		%g2, %lo(p1275buf), %g2
+-	ldx		[%g2 + 0x10], %l2
+-	add		%l2, -(192 + 128), %sp
++	/* Get onto temporary stack which will be in the locked
++	 * kernel image.
++	 */
++	sethi		%hi(tramp_stack), %g1
++	or		%g1, %lo(tramp_stack), %g1
++	add		%g1, TRAMP_STACK_SIZE, %g1
++	sub		%g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
+ 	flushw
+ 
+ 	/* Setup the loop variables:
+@@ -394,7 +397,6 @@ after_lock_tlb:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	rdpr		%pstate, %o1
+ 	or		%o1, PSTATE_IE, %o1
 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
 index 6629829..036032d 100644
 --- a/arch/sparc/kernel/traps_32.c
@@ -11962,6 +12201,47 @@ index 4ced3fc..234f1e4 100644
  	/* Pure DTLB misses do not tell us whether the fault causing
  	 * load/store/atomic was a write or not, it only says that there
  	 * was no match.  So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
+index 1aed043..ae6ce38 100644
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ 	return 1;
+ }
+ 
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
++			  struct page **pages)
++{
++	struct mm_struct *mm = current->mm;
++	unsigned long addr, len, end;
++	unsigned long next, flags;
++	pgd_t *pgdp;
++	int nr = 0;
++
++	start &= PAGE_MASK;
++	addr = start;
++	len = (unsigned long) nr_pages << PAGE_SHIFT;
++	end = start + len;
++
++	local_irq_save(flags);
++	pgdp = pgd_offset(mm, addr);
++	do {
++		pgd_t pgd = *pgdp;
++
++		next = pgd_addr_end(addr, end);
++		if (pgd_none(pgd))
++			break;
++		if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++			break;
++	} while (pgdp++, addr = next, addr != end);
++	local_irq_restore(flags);
++
++	return nr;
++}
++
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ 			struct page **pages)
+ {
 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
 index d329537..2c3746a 100644
 --- a/arch/sparc/mm/hugetlbpage.c
@@ -12106,6 +12386,63 @@ index 9686224..dfbdb10 100644
  #endif /* CONFIG_SMP */
  #endif /* CONFIG_DEBUG_DCFLUSH */
  }
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
+index 9c86b4b..8050f38 100644
+--- a/arch/sparc/prom/cif.S
++++ b/arch/sparc/prom/cif.S
+@@ -11,11 +11,10 @@
+ 	.text
+ 	.globl	prom_cif_direct
+ prom_cif_direct:
++	save	%sp, -192, %sp
+ 	sethi	%hi(p1275buf), %o1
+ 	or	%o1, %lo(p1275buf), %o1
+-	ldx	[%o1 + 0x0010], %o2	! prom_cif_stack
+-	save	%o2, -192, %sp
+-	ldx	[%i1 + 0x0008], %l2	! prom_cif_handler
++	ldx	[%o1 + 0x0008], %l2	! prom_cif_handler
+ 	mov	%g4, %l0
+ 	mov	%g5, %l1
+ 	mov	%g6, %l3
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index d95db75..110b0d7 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,13 +26,13 @@ phandle prom_chosen_node;
+  * It gets passed the pointer to the PROM vector.
+  */
+ 
+-extern void prom_cif_init(void *, void *);
++extern void prom_cif_init(void *);
+ 
+-void __init prom_init(void *cif_handler, void *cif_stack)
++void __init prom_init(void *cif_handler)
+ {
+ 	phandle node;
+ 
+-	prom_cif_init(cif_handler, cif_stack);
++	prom_cif_init(cif_handler);
+ 
+ 	prom_chosen_node = prom_finddevice(prom_chosen_path);
+ 	if (!prom_chosen_node || (s32)prom_chosen_node == -1)
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index e58b817..c27c30e4 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -19,7 +19,6 @@
+ struct {
+ 	long prom_callback;			/* 0x00 */
+ 	void (*prom_cif_handler)(long *);	/* 0x08 */
+-	unsigned long prom_cif_stack;		/* 0x10 */
+ } p1275buf;
+ 
+ extern void prom_world(int);
+@@ -51,5 +50,4 @@ void p1275_cmd_direct(unsigned long *args)
+ void prom_cif_init(void *cif_handler, void *cif_stack)
+ {
+ 	p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+-	p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+ }
 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
 index b3692ce..e4517c9 100644
 --- a/arch/tile/Kconfig
@@ -33139,7 +33476,7 @@ index f35c66c..84b95ef 100644
  	if (vma == &gate_vma)
  		return "[vsyscall]";
 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index 7b179b4..6bd17777 100644
+index 7b179b49..6bd17777 100644
 --- a/arch/x86/mm/iomap_32.c
 +++ b/arch/x86/mm/iomap_32.c
 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
@@ -35205,6 +35542,56 @@ index 1bbedc4..eb795b5 100644
  }
  
  static unsigned long __init intel_mid_calibrate_tsc(void)
+diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+index 46aa25c..59a68ed 100644
+--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
++++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+@@ -10,10 +10,9 @@
+  */
+ 
+ 
+-/* __attribute__((weak)) makes these declarations overridable */
+ /* For every CPU addition a new get_<cpuname>_ops interface needs
+  * to be added.
+  */
+-extern void *get_penwell_ops(void) __attribute__((weak));
+-extern void *get_cloverview_ops(void) __attribute__((weak));
+-extern void *get_tangier_ops(void) __attribute__((weak));
++extern const void *get_penwell_ops(void);
++extern const void *get_cloverview_ops(void);
++extern const void *get_tangier_ops(void);
+diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
+index 23381d2..8ddc10e 100644
+--- a/arch/x86/platform/intel-mid/mfld.c
++++ b/arch/x86/platform/intel-mid/mfld.c
+@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
+ 	pm_power_off = mfld_power_off;
+ }
+ 
+-void *get_penwell_ops(void)
++const void *get_penwell_ops(void)
+ {
+ 	return &penwell_ops;
+ }
+ 
+-void *get_cloverview_ops(void)
++const void *get_cloverview_ops(void)
+ {
+ 	return &penwell_ops;
+ }
+diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
+index aaca917..66eadbc 100644
+--- a/arch/x86/platform/intel-mid/mrfl.c
++++ b/arch/x86/platform/intel-mid/mrfl.c
+@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
+ 	.arch_setup = tangier_arch_setup,
+ };
+ 
+-void *get_tangier_ops(void)
++const void *get_tangier_ops(void)
+ {
+ 	return &tangier_ops;
+ }
 diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
 index d6ee929..3637cb5 100644
 --- a/arch/x86/platform/olpc/olpc_dt.c
@@ -36473,6 +36860,20 @@ index 7bdd61b..afec999 100644
  
  static void cryptd_queue_worker(struct work_struct *work);
  
+diff --git a/crypto/cts.c b/crypto/cts.c
+index 042223f..133f087 100644
+--- a/crypto/cts.c
++++ b/crypto/cts.c
+@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
+ 	/* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
+ 	memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
+ 	/* 6. Decrypt En to create Pn-1 */
+-	memset(iv, 0, sizeof(iv));
++	memzero_explicit(iv, sizeof(iv));
++
+ 	sg_set_buf(&sgsrc[0], s + bsize, bsize);
+ 	sg_set_buf(&sgdst[0], d, bsize);
+ 	err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
 index 309d345..1632720 100644
 --- a/crypto/pcrypt.c
@@ -36486,6 +36887,118 @@ index 309d345..1632720 100644
  	if (!ret)
  		kobject_uevent(&pinst->kobj, KOBJ_ADD);
  
+diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
+index 4279480..7bb0474 100644
+--- a/crypto/sha1_generic.c
++++ b/crypto/sha1_generic.c
+@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ 			src = data + done;
+ 		} while (done + SHA1_BLOCK_SIZE <= len);
+ 
+-		memset(temp, 0, sizeof(temp));
++		memzero_explicit(temp, sizeof(temp));
+ 		partial = 0;
+ 	}
+ 	memcpy(sctx->buffer + partial, src, len - done);
+diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
+index 5433667..32c5e5e 100644
+--- a/crypto/sha256_generic.c
++++ b/crypto/sha256_generic.c
+@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input)
+ 
+ 	/* clear any sensitive info... */
+ 	a = b = c = d = e = f = g = h = t1 = t2 = 0;
+-	memset(W, 0, 64 * sizeof(u32));
++	memzero_explicit(W, 64 * sizeof(u32));
+ }
+ 
+-
+ static int sha224_init(struct shash_desc *desc)
+ {
+ 	struct sha256_state *sctx = shash_desc_ctx(desc);
+@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
+ 	sha256_final(desc, D);
+ 
+ 	memcpy(hash, D, SHA224_DIGEST_SIZE);
+-	memset(D, 0, SHA256_DIGEST_SIZE);
++	memzero_explicit(D, SHA256_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 6ed124f..04d295a 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
+ 	sha512_final(desc, D);
+ 
+ 	memcpy(hash, D, 48);
+-	memset(D, 0, 64);
++	memzero_explicit(D, 64);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/tgr192.c b/crypto/tgr192.c
+index 8740355..3c7af0d 100644
+--- a/crypto/tgr192.c
++++ b/crypto/tgr192.c
+@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
+ 
+ 	tgr192_final(desc, D);
+ 	memcpy(out, D, TGR160_DIGEST_SIZE);
+-	memset(D, 0, TGR192_DIGEST_SIZE);
++	memzero_explicit(D, TGR192_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
+ 
+ 	tgr192_final(desc, D);
+ 	memcpy(out, D, TGR128_DIGEST_SIZE);
+-	memset(D, 0, TGR192_DIGEST_SIZE);
++	memzero_explicit(D, TGR192_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/vmac.c b/crypto/vmac.c
+index 2eb11a3..d84c24b 100644
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
+ 	}
+ 	mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
+ 	memcpy(out, &mac, sizeof(vmac_t));
+-	memset(&mac, 0, sizeof(vmac_t));
++	memzero_explicit(&mac, sizeof(vmac_t));
+ 	memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ 	ctx->partial_size = 0;
+ 	return 0;
+diff --git a/crypto/wp512.c b/crypto/wp512.c
+index 180f1d6..ec64e77 100644
+--- a/crypto/wp512.c
++++ b/crypto/wp512.c
+@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
+ 	u8 D[64];
+ 
+ 	wp512_final(desc, D);
+-	memcpy (out, D, WP384_DIGEST_SIZE);
+-	memset (D, 0, WP512_DIGEST_SIZE);
++	memcpy(out, D, WP384_DIGEST_SIZE);
++	memzero_explicit(D, WP512_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
+ 	u8 D[64];
+ 
+ 	wp512_final(desc, D);
+-	memcpy (out, D, WP256_DIGEST_SIZE);
+-	memset (D, 0, WP512_DIGEST_SIZE);
++	memcpy(out, D, WP256_DIGEST_SIZE);
++	memzero_explicit(D, WP512_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
 diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
 index 15dddc1..b61cf0c 100644
 --- a/drivers/acpi/acpica/hwxfsleep.c
@@ -39176,7 +39689,7 @@ index 8320abd..ec48108 100644
  
  	if (cmd != SIOCWANDEV)
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 429b75b..de805d0 100644
+index 429b75b..58488cc 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -284,9 +284,6 @@
@@ -39222,6 +39735,35 @@ index 429b75b..de805d0 100644
  			unsigned int add =
  				((pool_size - entropy_count)*anfrac*3) >> s;
  
+@@ -1063,8 +1060,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	 * pool while mixing, and hash one final time.
+ 	 */
+ 	sha_transform(hash.w, extract, workspace);
+-	memset(extract, 0, sizeof(extract));
+-	memset(workspace, 0, sizeof(workspace));
++	memzero_explicit(extract, sizeof(extract));
++	memzero_explicit(workspace, sizeof(workspace));
+ 
+ 	/*
+ 	 * In case the hash function has some recognizable output
+@@ -1076,7 +1073,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	hash.w[2] ^= rol32(hash.w[2], 16);
+ 
+ 	memcpy(out, &hash, EXTRACT_SIZE);
+-	memset(&hash, 0, sizeof(hash));
++	memzero_explicit(&hash, sizeof(hash));
+ }
+ 
+ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -1124,7 +1121,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
 @@ -1151,7 +1148,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
  
  		extract_buf(r, tmp);
@@ -39231,6 +39773,15 @@ index 429b75b..de805d0 100644
  			ret = -EFAULT;
  			break;
  		}
+@@ -1162,7 +1159,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
 @@ -1507,7 +1504,7 @@ EXPORT_SYMBOL(generate_random_uuid);
  #include <linux/sysctl.h>
  
@@ -64708,7 +65259,7 @@ index 2183fcf..3c32a98 100644
   	help
  	  Various /proc files exist to monitor process memory utilization:
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index baf3464..6873520 100644
+index baf3464..5b394ec 100644
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
 @@ -60,6 +60,7 @@
@@ -64846,14 +65397,22 @@ index baf3464..6873520 100644
  	if (mm) {
  		size = task_statm(mm, &shared, &text, &data, &resident);
  		mmput(mm);
-@@ -581,6 +649,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+@@ -581,6 +649,21 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
  	return 0;
  }
  
 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
 +{
-+	return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
++	unsigned long flags;
++	u32 curr_ip = 0;
++
++	if (lock_task_sighand(task, &flags)) {
++		curr_ip = task->signal->curr_ip;
++		unlock_task_sighand(task, &flags);
++	}
++
++	return sprintf(buffer, "%pI4\n", &curr_ip);
 +}
 +#endif
 +
@@ -77402,7 +77961,7 @@ index 0000000..3860c7e
 +}
 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
 new file mode 100644
-index 0000000..c0aef3a
+index 0000000..e3650b6
 --- /dev/null
 +++ b/grsecurity/grsec_sock.c
 @@ -0,0 +1,244 @@
@@ -77529,10 +78088,10 @@ index 0000000..c0aef3a
 +
 +#endif
 +
-+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++void gr_update_task_in_ip_table(const struct inet_sock *inet)
 +{
 +#ifdef CONFIG_GRKERNSEC
-+	struct signal_struct *sig = task->signal;
++	struct signal_struct *sig = current->signal;
 +	struct conn_table_entry *newent;
 +
 +	newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
@@ -79383,6 +79942,19 @@ index 939533d..cf0a57c 100644
  
  /**
   * struct clk_init_data - holds init data that's common to all clocks and is
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index 67301a4..879065d 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -289,7 +289,7 @@ extern struct clocksource* clocksource_get_next(void);
+ extern void clocksource_change_rating(struct clocksource *cs, int rating);
+ extern void clocksource_suspend(void);
+ extern void clocksource_resume(void);
+-extern struct clocksource * __init __weak clocksource_default_clock(void);
++extern struct clocksource * __init clocksource_default_clock(void);
+ extern void clocksource_mark_unstable(struct clocksource *cs);
+ 
+ extern u64
 diff --git a/include/linux/compat.h b/include/linux/compat.h
 index 3f448c6..df3ce1d 100644
 --- a/include/linux/compat.h
@@ -79717,6 +80289,32 @@ index d08e4d2..95fad61 100644
  int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
  
  /**
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index 7032518..60023e5 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -14,14 +14,13 @@
+ extern unsigned long long elfcorehdr_addr;
+ extern unsigned long long elfcorehdr_size;
+ 
+-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
+-				   unsigned long long *size);
+-extern void __weak elfcorehdr_free(unsigned long long addr);
+-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+-					 unsigned long from, unsigned long pfn,
+-					 unsigned long size, pgprot_t prot);
++extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
++extern void elfcorehdr_free(unsigned long long addr);
++extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
++extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
++extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
++				  unsigned long from, unsigned long pfn,
++				  unsigned long size, pgprot_t prot);
+ 
+ extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
+ 						unsigned long, int);
 diff --git a/include/linux/cred.h b/include/linux/cred.h
 index 04421e8..a85afd4 100644
 --- a/include/linux/cred.h
@@ -81917,7 +82515,7 @@ index a74c3a8..28d3f21 100644
  extern struct key_type key_type_keyring;
  
 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
-index 6b06d37..c134867 100644
+index 6b06d37..19f605f 100644
 --- a/include/linux/kgdb.h
 +++ b/include/linux/kgdb.h
 @@ -52,7 +52,7 @@ extern int kgdb_connected;
@@ -81938,7 +82536,7 @@ index 6b06d37..c134867 100644
  
  /**
   * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
-@@ -279,7 +279,7 @@ struct kgdb_io {
+@@ -279,11 +279,11 @@ struct kgdb_io {
  	void			(*pre_exception) (void);
  	void			(*post_exception) (void);
  	int			is_console;
@@ -81947,6 +82545,11 @@ index 6b06d37..c134867 100644
  
  extern struct kgdb_arch		arch_kgdb_ops;
  
+-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
++extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
+ 
+ #ifdef CONFIG_SERIAL_KGDB_NMI
+ extern int kgdb_register_nmi_console(void);
 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
 index 0555cc6..40116ce 100644
 --- a/include/linux/kmod.h
@@ -82196,6 +82799,19 @@ index c45c089..298841c 100644
  {
  	u32 remainder;
  	return div_u64_rem(dividend, divisor, &remainder);
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index bb7384e..8b8d8d1 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -35,7 +35,7 @@ struct memory_block {
+ };
+ 
+ int arch_get_memory_phys_device(unsigned long start_pfn);
+-unsigned long __weak memory_block_size_bytes(void);
++unsigned long memory_block_size_bytes(void);
+ 
+ /* These states are exposed to userspace as text strings in sysfs */
+ #define	MEM_ONLINE		(1<<0) /* exposed to userspace */
 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
 index 5bba088..7ad4ae7 100644
 --- a/include/linux/mempolicy.h
@@ -84304,6 +84920,29 @@ index 680f9a3..f13aeb0 100644
  	__SONET_ITEMS
  #undef __HANDLE_ITEM
  };
+diff --git a/include/linux/string.h b/include/linux/string.h
+index ac889c5..0ed878d 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+ 
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+-			const void *from, size_t available);
++				       const void *from, size_t available);
+ 
+ /**
+  * strstarts - does @str start with @prefix?
+@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix)
+ 	return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
+ 
+-extern size_t memweight(const void *ptr, size_t bytes);
++size_t memweight(const void *ptr, size_t bytes);
++void memzero_explicit(void *s, size_t count);
+ 
+ /**
+  * kbasename - return the last part of a pathname.
 diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
 index 07d8e53..dc934c9 100644
 --- a/include/linux/sunrpc/addr.h
@@ -93941,10 +94580,33 @@ index 0922579..9d7adb9 100644
  #endif
  }
 diff --git a/lib/string.c b/lib/string.c
-index e5878de..315fad2 100644
+index e5878de..64941b2 100644
 --- a/lib/string.c
 +++ b/lib/string.c
-@@ -789,9 +789,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
+@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+ 
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ *		      keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++	memset(s, 0, count);
++	OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+  * memcpy - Copy one area of memory to another
+@@ -789,9 +805,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
  		return check_bytes8(start, value, bytes);
  
  	value64 = value;
@@ -94697,7 +95359,7 @@ index 33365e9..2234ef9 100644
  	}
  	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
 diff --git a/mm/memory.c b/mm/memory.c
-index 492e36f..3771c0a 100644
+index 492e36f..55613ed 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -403,6 +403,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -94734,6 +95396,19 @@ index 492e36f..3771c0a 100644
  }
  
  /*
+@@ -679,10 +685,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+ 	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+ 	 */
+ 	if (vma->vm_ops)
+-		printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
++		printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
+ 		       vma->vm_ops->fault);
+ 	if (vma->vm_file)
+-		printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
++		printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
+ 		       vma->vm_file->f_op->mmap);
+ 	dump_stack();
+ 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 @@ -1636,12 +1642,6 @@ no_page_table:
  	return page;
  }
@@ -100952,7 +101627,7 @@ index 0d1e2cb..4501a2c 100644
  
  void inet_get_local_port_range(struct net *net, int *low, int *high)
 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
-index 8b9cf27..0d8d592 100644
+index 8b9cf27..9c17cab 100644
 --- a/net/ipv4/inet_hashtables.c
 +++ b/net/ipv4/inet_hashtables.c
 @@ -18,6 +18,7 @@
@@ -100967,7 +101642,7 @@ index 8b9cf27..0d8d592 100644
  	return inet_ehashfn(net, laddr, lport, faddr, fport);
  }
  
-+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
 +
  /*
   * Allocate and initialize a new local port bind bucket.
@@ -100976,7 +101651,7 @@ index 8b9cf27..0d8d592 100644
  			twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
  		spin_unlock(&head->lock);
  
-+		gr_update_task_in_ip_table(current, inet_sk(sk));
++		gr_update_task_in_ip_table(inet_sk(sk));
 +
  		if (tw) {
  			inet_twsk_deschedule(tw, death_row);
@@ -102786,10 +103461,45 @@ index 20b63d2..31a777d 100644
  
  	kfree_skb(skb);
 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index 5f8e128..865d38e 100644
+index 5f8e128..d32ac8c 100644
 --- a/net/ipv6/xfrm6_policy.c
 +++ b/net/ipv6/xfrm6_policy.c
-@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 		case IPPROTO_DCCP:
+ 			if (!onlyproto && (nh + offset + 4 < skb->data ||
+ 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
+-				__be16 *ports = (__be16 *)exthdr;
++				__be16 *ports;
+ 
++				nh = skb_network_header(skb);
++				ports = (__be16 *)(nh + offset);
+ 				fl6->fl6_sport = ports[!!reverse];
+ 				fl6->fl6_dport = ports[!reverse];
+ 			}
+@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 
+ 		case IPPROTO_ICMPV6:
+ 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+-				u8 *icmp = (u8 *)exthdr;
++				u8 *icmp;
+ 
++				nh = skb_network_header(skb);
++				icmp = (u8 *)(nh + offset);
+ 				fl6->fl6_icmp_type = icmp[0];
+ 				fl6->fl6_icmp_code = icmp[1];
+ 			}
+@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 		case IPPROTO_MH:
+ 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+ 				struct ip6_mh *mh;
+-				mh = (struct ip6_mh *)exthdr;
+ 
++				nh = skb_network_header(skb);
++				mh = (struct ip6_mh *)(nh + offset);
+ 				fl6->fl6_mh_type = mh->ip6mh_type;
+ 			}
+ 			fl6->flowi6_proto = nexthdr;
+@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
  	}
  }
  
@@ -102803,7 +103513,7 @@ index 5f8e128..865d38e 100644
  	return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
  }
  
-@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
+@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = {
  
  static int __net_init xfrm6_net_init(struct net *net)
  {
@@ -102828,7 +103538,7 @@ index 5f8e128..865d38e 100644
  	if (!hdr)
  		goto err_reg;
  
-@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
+@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net)
  	return 0;
  
  err_reg:
@@ -123835,6 +124545,44 @@ index 0a578fe..b81f62d 100644
  	0;							\
  })
  
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 714b949..1f0dc1e 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ 				gfn_t base_gfn, unsigned long npages);
+ 
+ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+-			   unsigned long size)
++			   unsigned long npages)
+ {
+ 	gfn_t end_gfn;
+ 	pfn_t pfn;
+ 
+ 	pfn     = gfn_to_pfn_memslot(slot, gfn);
+-	end_gfn = gfn + (size >> PAGE_SHIFT);
++	end_gfn = gfn + npages;
+ 	gfn    += 1;
+ 
+ 	if (is_error_noslot_pfn(pfn))
+@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		 * Pin all pages we are about to map in memory. This is
+ 		 * important because we unmap and unpin in 4kb steps later.
+ 		 */
+-		pfn = kvm_pin_pages(slot, gfn, page_size);
++		pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
+ 		if (is_error_noslot_pfn(pfn)) {
+ 			gfn += 1;
+ 			continue;
+@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		if (r) {
+ 			printk(KERN_ERR "kvm_iommu_map_address:"
+ 			       "iommu failed to map pfn=%llx\n", pfn);
+-			kvm_unpin_pages(kvm, pfn, page_size);
++			kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ 			goto unmap_pages;
+ 		}
+ 
 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
 index 03a0381..8b31923 100644
 --- a/virt/kvm/kvm_main.c

diff --git a/3.17.1/0000_README b/3.17.1/0000_README
index 8290db0..8ff44c0 100644
--- a/3.17.1/0000_README
+++ b/3.17.1/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.0-3.17.1-201410192051.patch
+Patch:	4420_grsecurity-3.0-3.17.1-201410250027.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.17.1/4420_grsecurity-3.0-3.17.1-201410192051.patch b/3.17.1/4420_grsecurity-3.0-3.17.1-201410250027.patch
similarity index 98%
rename from 3.17.1/4420_grsecurity-3.0-3.17.1-201410192051.patch
rename to 3.17.1/4420_grsecurity-3.0-3.17.1-201410250027.patch
index 77eea49..9f9c8f5 100644
--- a/3.17.1/4420_grsecurity-3.0-3.17.1-201410192051.patch
+++ b/3.17.1/4420_grsecurity-3.0-3.17.1-201410250027.patch
@@ -936,6 +936,22 @@ index 98838a0..b304fb4 100644
  	} else if (!cause) {
  		/* Allow reads even for write-only mappings */
  		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
+index a2ff5c5..ecf6a78 100644
+--- a/arch/arc/kernel/kgdb.c
++++ b/arch/arc/kernel/kgdb.c
+@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ 	return -1;
+ }
+ 
+-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+-{
+-	return instruction_pointer(regs);
+-}
+-
+ int kgdb_arch_init(void)
+ {
+ 	single_step_data.armed = 0;
 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
 index 32cbbd5..c102df9 100644
 --- a/arch/arm/Kconfig
@@ -9934,6 +9950,20 @@ index 370ca1e..d4f4a98 100644
  extern unsigned long sparc64_elf_hwcap;
  #define ELF_HWCAP	sparc64_elf_hwcap
  
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index f346824..2e3a4ad 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
+ /* You must call prom_init() before using any of the library services,
+  * preferably as early as possible.  Pass it the romvec pointer.
+  */
+-void prom_init(void *cif_handler, void *cif_stack);
++void prom_init(void *cif_handler);
++void prom_init_report(void);
+ 
+ /* Boot argument acquisition, returns the boot command line string. */
+ char *prom_getbootargs(void);
 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
 index a3890da..f6a408e 100644
 --- a/arch/sparc/include/asm/pgalloc_32.h
@@ -10027,10 +10057,17 @@ index 79da178..c2eede8 100644
  				    SRMMU_DIRTY | SRMMU_REF)
  
 diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
-index f5fffd8..a0669f0 100644
+index f5fffd8..4272fe8 100644
 --- a/arch/sparc/include/asm/setup.h
 +++ b/arch/sparc/include/asm/setup.h
-@@ -53,8 +53,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+@@ -48,13 +48,15 @@ unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int);
+ #endif
+ 
+ #ifdef CONFIG_SPARC64
++void __init start_early_boot(void);
++
+ /* unaligned_64.c */
+ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
  void handle_ld_nf(u32 insn, struct pt_regs *regs);
  
  /* init_64.c */
@@ -10330,6 +10367,104 @@ index 7cf9c6e..6206648 100644
  
  extra-y     := head_$(BITS).o
  
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index ebaba61..88d322b 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -65,13 +65,10 @@ struct pause_patch_entry {
+ extern struct pause_patch_entry __pause_3insn_patch,
+ 	__pause_3insn_patch_end;
+ 
+-void __init per_cpu_patch(void);
+ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+ 			     struct sun4v_1insn_patch_entry *);
+ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+ 			     struct sun4v_2insn_patch_entry *);
+-void __init sun4v_patch(void);
+-void __init boot_cpu_id_too_large(int cpu);
+ extern unsigned int dcache_parity_tl1_occurred;
+ extern unsigned int icache_parity_tl1_occurred;
+ 
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 452f04fe..fbea0ac 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -660,14 +660,12 @@ tlb_fixup_done:
+ 	sethi	%hi(init_thread_union), %g6
+ 	or	%g6, %lo(init_thread_union), %g6
+ 	ldx	[%g6 + TI_TASK], %g4
+-	mov	%sp, %l6
+ 
+ 	wr	%g0, ASI_P, %asi
+ 	mov	1, %g1
+ 	sllx	%g1, THREAD_SHIFT, %g1
+ 	sub	%g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ 	add	%g6, %g1, %sp
+-	mov	0, %fp
+ 
+ 	/* Set per-cpu pointer initially to zero, this makes
+ 	 * the boot-cpu use the in-kernel-image per-cpu areas
+@@ -694,44 +692,14 @@ tlb_fixup_done:
+ 	 nop
+ #endif
+ 
+-	mov	%l6, %o1			! OpenPROM stack
+ 	call	prom_init
+ 	 mov	%l7, %o0			! OpenPROM cif handler
+ 
+-	/* Initialize current_thread_info()->cpu as early as possible.
+-	 * In order to do that accurately we have to patch up the get_cpuid()
+-	 * assembler sequences.  And that, in turn, requires that we know
+-	 * if we are on a Starfire box or not.  While we're here, patch up
+-	 * the sun4v sequences as well.
++	/* To create a one-register-window buffer between the kernel's
++	 * initial stack and the last stack frame we use from the firmware,
++	 * do the rest of the boot from a C helper function.
+ 	 */
+-	call	check_if_starfire
+-	 nop
+-	call	per_cpu_patch
+-	 nop
+-	call	sun4v_patch
+-	 nop
+-
+-#ifdef CONFIG_SMP
+-	call	hard_smp_processor_id
+-	 nop
+-	cmp	%o0, NR_CPUS
+-	blu,pt	%xcc, 1f
+-	 nop
+-	call	boot_cpu_id_too_large
+-	 nop
+-	/* Not reached... */
+-
+-1:
+-#else
+-	mov	0, %o0
+-#endif
+-	sth	%o0, [%g6 + TI_CPU]
+-
+-	call	prom_init_report
+-	 nop
+-
+-	/* Off we go.... */
+-	call	start_kernel
++	call	start_early_boot
+ 	 nop
+ 	/* Not reached... */
+ 
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
+index b7ddcdd..cdbfec2 100644
+--- a/arch/sparc/kernel/hvtramp.S
++++ b/arch/sparc/kernel/hvtramp.S
+@@ -109,7 +109,6 @@ hv_cpu_startup:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	call		init_irqwork_curcpu
+ 	 nop
 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
 index 50e7b62..79fae35 100644
 --- a/arch/sparc/kernel/process_32.c
@@ -10452,6 +10587,68 @@ index c13c9f2..d572c34 100644
  	audit_syscall_exit(regs);
  
  	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index 3fdb455..949f773 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/cpu.h>
+ #include <linux/initrd.h>
+ #include <linux/module.h>
++#include <linux/start_kernel.h>
+ 
+ #include <asm/io.h>
+ #include <asm/processor.h>
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
+ 
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+ 
+-void __init per_cpu_patch(void)
++static void __init per_cpu_patch(void)
+ {
+ 	struct cpuid_patch_entry *p;
+ 	unsigned long ver;
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ 	}
+ }
+ 
+-void __init sun4v_patch(void)
++static void __init sun4v_patch(void)
+ {
+ 	extern void sun4v_hvapi_init(void);
+ 
+@@ -335,14 +336,25 @@ static void __init pause_patch(void)
+ 	}
+ }
+ 
+-#ifdef CONFIG_SMP
+-void __init boot_cpu_id_too_large(int cpu)
++void __init start_early_boot(void)
+ {
+-	prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+-		    cpu, NR_CPUS);
+-	prom_halt();
++	int cpu;
++
++	check_if_starfire();
++	per_cpu_patch();
++	sun4v_patch();
++
++	cpu = hard_smp_processor_id();
++	if (cpu >= NR_CPUS) {
++		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
++			    cpu, NR_CPUS);
++		prom_halt();
++	}
++	current_thread_info()->cpu = cpu;
++
++	prom_init_report();
++	start_kernel();
+ }
+-#endif
+ 
+ /* On Ultra, we support all of the v8 capabilities. */
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
 index f7ba875..b65677e 100644
 --- a/arch/sparc/kernel/smp_64.c
@@ -10718,6 +10915,36 @@ index 33a17e7..d87fb1f 100644
  	ldx	[%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
  
  2:
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index 737f8cb..88ede1d 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -109,10 +109,13 @@ startup_continue:
+ 	brnz,pn		%g1, 1b
+ 	 nop
+ 
+-	sethi		%hi(p1275buf), %g2
+-	or		%g2, %lo(p1275buf), %g2
+-	ldx		[%g2 + 0x10], %l2
+-	add		%l2, -(192 + 128), %sp
++	/* Get onto temporary stack which will be in the locked
++	 * kernel image.
++	 */
++	sethi		%hi(tramp_stack), %g1
++	or		%g1, %lo(tramp_stack), %g1
++	add		%g1, TRAMP_STACK_SIZE, %g1
++	sub		%g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
+ 	flushw
+ 
+ 	/* Setup the loop variables:
+@@ -394,7 +397,6 @@ after_lock_tlb:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	rdpr		%pstate, %o1
+ 	or		%o1, PSTATE_IE, %o1
 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
 index 6fd386c5..6907d81 100644
 --- a/arch/sparc/kernel/traps_32.c
@@ -12054,6 +12281,47 @@ index 587cd05..fbdf17a 100644
  	/* Pure DTLB misses do not tell us whether the fault causing
  	 * load/store/atomic was a write or not, it only says that there
  	 * was no match.  So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
+index 1aed043..ae6ce38 100644
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ 	return 1;
+ }
+ 
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
++			  struct page **pages)
++{
++	struct mm_struct *mm = current->mm;
++	unsigned long addr, len, end;
++	unsigned long next, flags;
++	pgd_t *pgdp;
++	int nr = 0;
++
++	start &= PAGE_MASK;
++	addr = start;
++	len = (unsigned long) nr_pages << PAGE_SHIFT;
++	end = start + len;
++
++	local_irq_save(flags);
++	pgdp = pgd_offset(mm, addr);
++	do {
++		pgd_t pgd = *pgdp;
++
++		next = pgd_addr_end(addr, end);
++		if (pgd_none(pgd))
++			break;
++		if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++			break;
++	} while (pgdp++, addr = next, addr != end);
++	local_irq_restore(flags);
++
++	return nr;
++}
++
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ 			struct page **pages)
+ {
 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
 index d329537..2c3746a 100644
 --- a/arch/sparc/mm/hugetlbpage.c
@@ -12210,6 +12478,63 @@ index ece4af0..f04b862 100644
 +
 +	bpf_prog_unlock_free(fp);
  }
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
+index 9c86b4b..8050f38 100644
+--- a/arch/sparc/prom/cif.S
++++ b/arch/sparc/prom/cif.S
+@@ -11,11 +11,10 @@
+ 	.text
+ 	.globl	prom_cif_direct
+ prom_cif_direct:
++	save	%sp, -192, %sp
+ 	sethi	%hi(p1275buf), %o1
+ 	or	%o1, %lo(p1275buf), %o1
+-	ldx	[%o1 + 0x0010], %o2	! prom_cif_stack
+-	save	%o2, -192, %sp
+-	ldx	[%i1 + 0x0008], %l2	! prom_cif_handler
++	ldx	[%o1 + 0x0008], %l2	! prom_cif_handler
+ 	mov	%g4, %l0
+ 	mov	%g5, %l1
+ 	mov	%g6, %l3
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index d95db75..110b0d7 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,13 +26,13 @@ phandle prom_chosen_node;
+  * It gets passed the pointer to the PROM vector.
+  */
+ 
+-extern void prom_cif_init(void *, void *);
++extern void prom_cif_init(void *);
+ 
+-void __init prom_init(void *cif_handler, void *cif_stack)
++void __init prom_init(void *cif_handler)
+ {
+ 	phandle node;
+ 
+-	prom_cif_init(cif_handler, cif_stack);
++	prom_cif_init(cif_handler);
+ 
+ 	prom_chosen_node = prom_finddevice(prom_chosen_path);
+ 	if (!prom_chosen_node || (s32)prom_chosen_node == -1)
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index e58b817..c27c30e4 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -19,7 +19,6 @@
+ struct {
+ 	long prom_callback;			/* 0x00 */
+ 	void (*prom_cif_handler)(long *);	/* 0x08 */
+-	unsigned long prom_cif_stack;		/* 0x10 */
+ } p1275buf;
+ 
+ extern void prom_world(int);
+@@ -51,5 +50,4 @@ void p1275_cmd_direct(unsigned long *args)
+ void prom_cif_init(void *cif_handler, void *cif_stack)
+ {
+ 	p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+-	p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+ }
 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
 index 7fcd492..1311074 100644
 --- a/arch/tile/Kconfig
@@ -17012,6 +17337,40 @@ index 53cdfb2..d1369e6 100644
  
  #define flush_insn_slot(p)	do { } while (0)
  
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 7c492ed..d16311f 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -990,6 +990,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
+ 	kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ }
+ 
++static inline u64 get_canonical(u64 la)
++{
++	return ((int64_t)la << 16) >> 16;
++}
++
++static inline bool is_noncanonical_address(u64 la)
++{
++#ifdef CONFIG_X86_64
++	return get_canonical(la) != la;
++#else
++	return false;
++#endif
++}
++
+ #define TSS_IOPB_BASE_OFFSET 0x66
+ #define TSS_BASE_SIZE 0x68
+ #define TSS_IOPB_SIZE (65536 / 8)
+@@ -1048,7 +1062,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
+ void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
+ 
+ void kvm_define_shared_msr(unsigned index, u32 msr);
+-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
++int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+ 
+ bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
+ 
 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
 index 4ad6560..75c7bdd 100644
 --- a/arch/x86/include/asm/local.h
@@ -20495,6 +20854,26 @@ index 7b0a55a..ad115bf 100644
  #endif /* __ASSEMBLY__ */
  
  /* top of stack page */
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
+index 0e79420..990a2fe 100644
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -67,6 +67,7 @@
+ #define EXIT_REASON_EPT_MISCONFIG       49
+ #define EXIT_REASON_INVEPT              50
+ #define EXIT_REASON_PREEMPTION_TIMER    52
++#define EXIT_REASON_INVVPID             53
+ #define EXIT_REASON_WBINVD              54
+ #define EXIT_REASON_XSETBV              55
+ #define EXIT_REASON_APIC_WRITE          56
+@@ -114,6 +115,7 @@
+ 	{ EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
+ 	{ EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
+ 	{ EXIT_REASON_INVD,                  "INVD" }, \
++	{ EXIT_REASON_INVVPID,               "INVVPID" }, \
+ 	{ EXIT_REASON_INVPCID,               "INVPCID" }
+ 
+ #endif /* _UAPIVMX_H */
 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
 index ada2e2d..ca69e16 100644
 --- a/arch/x86/kernel/Makefile
@@ -28485,6 +28864,543 @@ index 38a0afe..94421a9 100644
  	return 0;
  
  out:
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 03954f7..48daa1a 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+ 	masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
+ }
+ 
+-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+-{
+-	register_address_increment(ctxt, &ctxt->_eip, rel);
+-}
+-
+ static u32 desc_limit_scaled(struct desc_struct *desc)
+ {
+ 	u32 limit = get_desc_limit(desc);
+@@ -568,6 +563,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
+ 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
+ }
+ 
++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
++			       int cs_l)
++{
++	switch (ctxt->op_bytes) {
++	case 2:
++		ctxt->_eip = (u16)dst;
++		break;
++	case 4:
++		ctxt->_eip = (u32)dst;
++		break;
++	case 8:
++		if ((cs_l && is_noncanonical_address(dst)) ||
++		    (!cs_l && (dst & ~(u32)-1)))
++			return emulate_gp(ctxt, 0);
++		ctxt->_eip = dst;
++		break;
++	default:
++		WARN(1, "unsupported eip assignment size\n");
++	}
++	return X86EMUL_CONTINUE;
++}
++
++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
++{
++	return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
++}
++
++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
++{
++	return assign_eip_near(ctxt, ctxt->_eip + rel);
++}
++
+ static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
+ {
+ 	u16 selector;
+@@ -750,8 +777,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
+ static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
+ 					       unsigned size)
+ {
+-	if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
+-		return __do_insn_fetch_bytes(ctxt, size);
++	unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
++
++	if (unlikely(done_size < size))
++		return __do_insn_fetch_bytes(ctxt, size - done_size);
+ 	else
+ 		return X86EMUL_CONTINUE;
+ }
+@@ -1415,7 +1444,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 
+ /* Does not support long mode */
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+-				     u16 selector, int seg, u8 cpl, bool in_task_switch)
++				     u16 selector, int seg, u8 cpl,
++				     bool in_task_switch,
++				     struct desc_struct *desc)
+ {
+ 	struct desc_struct seg_desc, old_desc;
+ 	u8 dpl, rpl;
+@@ -1547,6 +1578,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 	}
+ load:
+ 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
++	if (desc)
++		*desc = seg_desc;
+ 	return X86EMUL_CONTINUE;
+ exception:
+ 	emulate_exception(ctxt, err_vec, err_code, true);
+@@ -1557,7 +1590,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 				   u16 selector, int seg)
+ {
+ 	u8 cpl = ctxt->ops->cpl(ctxt);
+-	return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
++	return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
+ }
+ 
+ static void write_register_operand(struct operand *op)
+@@ -1951,17 +1984,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
+ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned short sel;
++	unsigned short sel, old_sel;
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++	u8 cpl = ctxt->ops->cpl(ctxt);
++
++	/* Assignment of RIP may only fail in 64-bit mode */
++	if (ctxt->mode == X86EMUL_MODE_PROT64)
++		ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
++				 VCPU_SREG_CS);
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+ 
+-	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
++	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
++				       &new_desc);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	ctxt->_eip = 0;
+-	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
+-	return X86EMUL_CONTINUE;
++	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++	if (rc != X86EMUL_CONTINUE) {
++		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		/* assigning eip failed; restore the old cs */
++		ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
++		return rc;
++	}
++	return rc;
+ }
+ 
+ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+@@ -1972,13 +2019,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
+ 	case 2: /* call near abs */ {
+ 		long int old_eip;
+ 		old_eip = ctxt->_eip;
+-		ctxt->_eip = ctxt->src.val;
++		rc = assign_eip_near(ctxt, ctxt->src.val);
++		if (rc != X86EMUL_CONTINUE)
++			break;
+ 		ctxt->src.val = old_eip;
+ 		rc = em_push(ctxt);
+ 		break;
+ 	}
+ 	case 4: /* jmp abs */
+-		ctxt->_eip = ctxt->src.val;
++		rc = assign_eip_near(ctxt, ctxt->src.val);
+ 		break;
+ 	case 5: /* jmp far */
+ 		rc = em_jmp_far(ctxt);
+@@ -2013,30 +2062,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_ret(struct x86_emulate_ctxt *ctxt)
+ {
+-	ctxt->dst.type = OP_REG;
+-	ctxt->dst.addr.reg = &ctxt->_eip;
+-	ctxt->dst.bytes = ctxt->op_bytes;
+-	return em_pop(ctxt);
++	int rc;
++	unsigned long eip;
++
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++
++	return assign_eip_near(ctxt, eip);
+ }
+ 
+ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
+-	unsigned long cs;
++	unsigned long eip, cs;
++	u16 old_cs;
+ 	int cpl = ctxt->ops->cpl(ctxt);
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
+ 
+-	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
++	if (ctxt->mode == X86EMUL_MODE_PROT64)
++		ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
++				 VCPU_SREG_CS);
++
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+-	if (ctxt->op_bytes == 4)
+-		ctxt->_eip = (u32)ctxt->_eip;
+ 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	/* Outer-privilege level return is not implemented */
+ 	if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ 		return X86EMUL_UNHANDLEABLE;
+-	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
++	rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
++				       &new_desc);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	rc = assign_eip_far(ctxt, eip, new_desc.l);
++	if (rc != X86EMUL_CONTINUE) {
++		WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
++		ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++	}
+ 	return rc;
+ }
+ 
+@@ -2297,7 +2363,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ {
+ 	const struct x86_emulate_ops *ops = ctxt->ops;
+ 	struct desc_struct cs, ss;
+-	u64 msr_data;
++	u64 msr_data, rcx, rdx;
+ 	int usermode;
+ 	u16 cs_sel = 0, ss_sel = 0;
+ 
+@@ -2313,6 +2379,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 	else
+ 		usermode = X86EMUL_MODE_PROT32;
+ 
++	rcx = reg_read(ctxt, VCPU_REGS_RCX);
++	rdx = reg_read(ctxt, VCPU_REGS_RDX);
++
+ 	cs.dpl = 3;
+ 	ss.dpl = 3;
+ 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
+@@ -2330,6 +2399,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 		ss_sel = cs_sel + 8;
+ 		cs.d = 0;
+ 		cs.l = 1;
++		if (is_noncanonical_address(rcx) ||
++		    is_noncanonical_address(rdx))
++			return emulate_gp(ctxt, 0);
+ 		break;
+ 	}
+ 	cs_sel |= SELECTOR_RPL_MASK;
+@@ -2338,8 +2410,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
+ 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+ 
+-	ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
+-	*reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
++	ctxt->_eip = rdx;
++	*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+ 
+ 	return X86EMUL_CONTINUE;
+ }
+@@ -2457,19 +2529,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ 	 * Now load segment descriptors. If fault happens at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2594,25 +2671,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ 	 * Now load segment descriptors. If fault happenes at this stage
+ 	 * it is handled in a context of new task
+ 	 */
+-	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
++					cpl, true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+-	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
++	ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
++					true, NULL);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+@@ -2880,10 +2964,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_call(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc;
+ 	long rel = ctxt->src.val;
+ 
+ 	ctxt->src.val = (unsigned long)ctxt->_eip;
+-	jmp_rel(ctxt, rel);
++	rc = jmp_rel(ctxt, rel);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
+ 	return em_push(ctxt);
+ }
+ 
+@@ -2892,34 +2979,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ 	u16 sel, old_cs;
+ 	ulong old_eip;
+ 	int rc;
++	struct desc_struct old_desc, new_desc;
++	const struct x86_emulate_ops *ops = ctxt->ops;
++	int cpl = ctxt->ops->cpl(ctxt);
+ 
+-	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
+ 	old_eip = ctxt->_eip;
++	ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
+ 
+ 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
+-	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
++	rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
++				       &new_desc);
++	if (rc != X86EMUL_CONTINUE)
+ 		return X86EMUL_CONTINUE;
+ 
+-	ctxt->_eip = 0;
+-	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
++	rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
++	if (rc != X86EMUL_CONTINUE)
++		goto fail;
+ 
+ 	ctxt->src.val = old_cs;
+ 	rc = em_push(ctxt);
+ 	if (rc != X86EMUL_CONTINUE)
+-		return rc;
++		goto fail;
+ 
+ 	ctxt->src.val = old_eip;
+-	return em_push(ctxt);
++	rc = em_push(ctxt);
++	/* If we failed, we tainted the memory, but the very least we should
++	   restore cs */
++	if (rc != X86EMUL_CONTINUE)
++		goto fail;
++	return rc;
++fail:
++	ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
++	return rc;
++
+ }
+ 
+ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
+ {
+ 	int rc;
++	unsigned long eip;
+ 
+-	ctxt->dst.type = OP_REG;
+-	ctxt->dst.addr.reg = &ctxt->_eip;
+-	ctxt->dst.bytes = ctxt->op_bytes;
+-	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
++	rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
++	if (rc != X86EMUL_CONTINUE)
++		return rc;
++	rc = assign_eip_near(ctxt, eip);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 	rsp_increment(ctxt, ctxt->src.val);
+@@ -3250,20 +3353,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
+ 
+ static int em_loop(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc = X86EMUL_CONTINUE;
++
+ 	register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
+ 	if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
+ 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 
+-	return X86EMUL_CONTINUE;
++	return rc;
+ }
+ 
+ static int em_jcxz(struct x86_emulate_ctxt *ctxt)
+ {
++	int rc = X86EMUL_CONTINUE;
++
+ 	if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 
+-	return X86EMUL_CONTINUE;
++	return rc;
+ }
+ 
+ static int em_in(struct x86_emulate_ctxt *ctxt)
+@@ -3351,6 +3458,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
+ 	return X86EMUL_CONTINUE;
+ }
+ 
++static int em_clflush(struct x86_emulate_ctxt *ctxt)
++{
++	/* emulating clflush regardless of cpuid */
++	return X86EMUL_CONTINUE;
++}
++
+ static bool valid_cr(int nr)
+ {
+ 	switch (nr) {
+@@ -3683,6 +3796,16 @@ static const struct opcode group11[] = {
+ 	X7(D(Undefined)),
+ };
+ 
++static const struct gprefix pfx_0f_ae_7 = {
++	I(SrcMem | ByteOp, em_clflush), N, N, N,
++};
++
++static const struct group_dual group15 = { {
++	N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
++}, {
++	N, N, N, N, N, N, N, N,
++} };
++
+ static const struct gprefix pfx_0f_6f_0f_7f = {
+ 	I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
+ };
+@@ -3887,10 +4010,11 @@ static const struct opcode twobyte_table[256] = {
+ 	N, I(ImplicitOps | EmulateOnUD, em_syscall),
+ 	II(ImplicitOps | Priv, em_clts, clts), N,
+ 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
+-	N, D(ImplicitOps | ModRM), N, N,
++	N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
+ 	/* 0x10 - 0x1F */
+ 	N, N, N, N, N, N, N, N,
+-	D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
++	D(ImplicitOps | ModRM | SrcMem | NoAccess),
++	N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
+ 	/* 0x20 - 0x2F */
+ 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
+ 	DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
+@@ -3942,7 +4066,7 @@ static const struct opcode twobyte_table[256] = {
+ 	F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
+ 	F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
+ 	F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
+-	D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
++	GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
+ 	/* 0xB0 - 0xB7 */
+ 	I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
+ 	I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
+@@ -4458,10 +4582,10 @@ done_prefixes:
+ 	/* Decode and fetch the destination operand: register or memory. */
+ 	rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
+ 
+-done:
+ 	if (ctxt->rip_relative)
+ 		ctxt->memopp->addr.mem.ea += ctxt->_eip;
+ 
++done:
+ 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
+ }
+ 
+@@ -4711,7 +4835,7 @@ special_insn:
+ 		break;
+ 	case 0x70 ... 0x7f: /* jcc (short) */
+ 		if (test_cc(ctxt->b, ctxt->eflags))
+-			jmp_rel(ctxt, ctxt->src.val);
++			rc = jmp_rel(ctxt, ctxt->src.val);
+ 		break;
+ 	case 0x8d: /* lea r16/r32, m */
+ 		ctxt->dst.val = ctxt->src.addr.mem.ea;
+@@ -4741,7 +4865,7 @@ special_insn:
+ 		break;
+ 	case 0xe9: /* jmp rel */
+ 	case 0xeb: /* jmp rel short */
+-		jmp_rel(ctxt, ctxt->src.val);
++		rc = jmp_rel(ctxt, ctxt->src.val);
+ 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
+ 		break;
+ 	case 0xf4:              /* hlt */
+@@ -4864,13 +4988,11 @@ twobyte_insn:
+ 		break;
+ 	case 0x80 ... 0x8f: /* jnz rel, etc*/
+ 		if (test_cc(ctxt->b, ctxt->eflags))
+-			jmp_rel(ctxt, ctxt->src.val);
++			rc = jmp_rel(ctxt, ctxt->src.val);
+ 		break;
+ 	case 0x90 ... 0x9f:     /* setcc r/m8 */
+ 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
+ 		break;
+-	case 0xae:              /* clflush */
+-		break;
+ 	case 0xb6 ... 0xb7:	/* movzx */
+ 		ctxt->dst.bytes = ctxt->op_bytes;
+ 		ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 518d864..298781d 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+ 		return;
+ 
+ 	timer = &pit->pit_state.timer;
++	mutex_lock(&pit->pit_state.lock);
+ 	if (hrtimer_cancel(timer))
+ 		hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++	mutex_unlock(&pit->pit_state.lock);
+ }
+ 
+ static void destroy_pit_timer(struct kvm_pit *pit)
 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
 index 08e8a89..0e9183e 100644
 --- a/arch/x86/kvm/lapic.c
@@ -28512,9 +29428,31 @@ index 4107765..d9eb358 100644
  			goto error;
  		walker->ptep_user[walker->level - 1] = ptep_user;
 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index ddf7427..e3b93f9 100644
+index ddf7427..fd84599 100644
 --- a/arch/x86/kvm/svm.c
 +++ b/arch/x86/kvm/svm.c
+@@ -3234,7 +3234,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
+ 	msr.host_initiated = false;
+ 
+ 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+-	if (svm_set_msr(&svm->vcpu, &msr)) {
++	if (kvm_set_msr(&svm->vcpu, &msr)) {
+ 		trace_kvm_msr_write_ex(ecx, data);
+ 		kvm_inject_gp(&svm->vcpu, 0);
+ 	} else {
+@@ -3534,9 +3534,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
+ 
+ 	if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+ 	    || !svm_exit_handlers[exit_code]) {
+-		kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+-		kvm_run->hw.hardware_exit_reason = exit_code;
+-		return 0;
++		WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
+ 	}
+ 
+ 	return svm_exit_handlers[exit_code](svm);
 @@ -3547,7 +3547,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
  	int cpu = raw_smp_processor_id();
  
@@ -28539,7 +29477,7 @@ index ddf7427..e3b93f9 100644
  
  	local_irq_disable();
 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index bfe11cf..d567dc0 100644
+index bfe11cf..deb3959 100644
 --- a/arch/x86/kvm/vmx.c
 +++ b/arch/x86/kvm/vmx.c
 @@ -453,6 +453,7 @@ struct vcpu_vmx {
@@ -28597,7 +29535,25 @@ index bfe11cf..d567dc0 100644
  {
  	u64 host_tsc, tsc_offset;
  
-@@ -3110,8 +3119,11 @@ static __init int hardware_setup(void)
+@@ -2631,12 +2640,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	default:
+ 		msr = find_msr_entry(vmx, msr_index);
+ 		if (msr) {
++			u64 old_msr_data = msr->data;
+ 			msr->data = data;
+ 			if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ 				preempt_disable();
+-				kvm_set_shared_msr(msr->index, msr->data,
+-						   msr->mask);
++				ret = kvm_set_shared_msr(msr->index, msr->data,
++							 msr->mask);
+ 				preempt_enable();
++				if (ret)
++					msr->data = old_msr_data;
+ 			}
+ 			break;
+ 		}
+@@ -3110,8 +3122,11 @@ static __init int hardware_setup(void)
  	if (!cpu_has_vmx_flexpriority())
  		flexpriority_enabled = 0;
  
@@ -28611,7 +29567,7 @@ index bfe11cf..d567dc0 100644
  
  	if (enable_ept && !cpu_has_vmx_ept_2m_page())
  		kvm_disable_largepages();
-@@ -3122,13 +3134,15 @@ static __init int hardware_setup(void)
+@@ -3122,13 +3137,15 @@ static __init int hardware_setup(void)
  	if (!cpu_has_vmx_apicv())
  		enable_apicv = 0;
  
@@ -28631,7 +29587,7 @@ index bfe11cf..d567dc0 100644
  
  	if (nested)
  		nested_vmx_setup_ctls_msrs();
-@@ -4235,10 +4249,17 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4235,10 +4252,17 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
  	u32 low32, high32;
  	unsigned long tmpl;
  	struct desc_ptr dt;
@@ -28650,7 +29606,7 @@ index bfe11cf..d567dc0 100644
  
  	vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
  #ifdef CONFIG_X86_64
-@@ -4260,7 +4281,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4260,7 +4284,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
  	vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
  	vmx->host_idt_base = dt.address;
  
@@ -28659,7 +29615,60 @@ index bfe11cf..d567dc0 100644
  
  	rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
  	vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -7376,7 +7397,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+@@ -5257,7 +5281,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
+ 	msr.data = data;
+ 	msr.index = ecx;
+ 	msr.host_initiated = false;
+-	if (vmx_set_msr(vcpu, &msr) != 0) {
++	if (kvm_set_msr(vcpu, &msr) != 0) {
+ 		trace_kvm_msr_write_ex(ecx, data);
+ 		kvm_inject_gp(vcpu, 0);
+ 		return 1;
+@@ -6630,6 +6654,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	return 1;
+ }
+ 
++static int handle_invvpid(struct kvm_vcpu *vcpu)
++{
++	kvm_queue_exception(vcpu, UD_VECTOR);
++	return 1;
++}
++
+ /*
+  * The exit handlers return 1 if the exit was handled fully and guest execution
+  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
+@@ -6675,6 +6705,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = handle_mwait,
+ 	[EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
+ 	[EXIT_REASON_INVEPT]                  = handle_invept,
++	[EXIT_REASON_INVVPID]                 = handle_invvpid,
+ };
+ 
+ static const int kvm_vmx_max_exit_handlers =
+@@ -6908,7 +6939,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ 	case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
+ 	case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
+ 	case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+-	case EXIT_REASON_INVEPT:
++	case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ 		/*
+ 		 * VMX instructions trap unconditionally. This allows L1 to
+ 		 * emulate them for its L2 guest, i.e., allows 3-level nesting!
+@@ -7049,10 +7080,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ 	    && kvm_vmx_exit_handlers[exit_reason])
+ 		return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ 	else {
+-		vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+-		vcpu->run->hw.hardware_exit_reason = exit_reason;
++		WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
+ 	}
+-	return 0;
+ }
+ 
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+@@ -7376,7 +7407,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
  static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  {
  	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -28668,7 +29677,7 @@ index bfe11cf..d567dc0 100644
  
  	/* Record the guest's net vcpu time for enforced NMI injections. */
  	if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
-@@ -7397,6 +7418,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7397,6 +7428,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
  		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
  
@@ -28681,7 +29690,7 @@ index bfe11cf..d567dc0 100644
  	/* When single-stepping over STI and MOV SS, we must clear the
  	 * corresponding interruptibility bits in the guest state. Otherwise
  	 * vmentry fails as it then expects bit 14 (BS) in pending debug
-@@ -7453,6 +7480,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7453,6 +7490,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  		"jmp 2f \n\t"
  		"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
  		"2: "
@@ -28694,7 +29703,7 @@ index bfe11cf..d567dc0 100644
  		/* Save guest registers, load host registers, keep flags */
  		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
  		"pop %0 \n\t"
-@@ -7505,6 +7538,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7505,6 +7548,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  #endif
  		[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
  		[wordsize]"i"(sizeof(ulong))
@@ -28706,7 +29715,7 @@ index bfe11cf..d567dc0 100644
  	      : "cc", "memory"
  #ifdef CONFIG_X86_64
  		, "rax", "rbx", "rdi", "rsi"
-@@ -7518,7 +7556,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7518,7 +7566,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	if (debugctlmsr)
  		update_debugctlmsr(debugctlmsr);
  
@@ -28715,7 +29724,7 @@ index bfe11cf..d567dc0 100644
  	/*
  	 * The sysexit path does not restore ds/es, so we must set them to
  	 * a reasonable value ourselves.
-@@ -7527,8 +7565,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -7527,8 +7575,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  	 * may be executed in interrupt context, which saves and restore segments
  	 * around it, nullifying its effect.
  	 */
@@ -28737,10 +29746,82 @@ index bfe11cf..d567dc0 100644
  
  	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 8f1e22d..f6eee20 100644
+index 8f1e22d..c23d3c5 100644
 --- a/arch/x86/kvm/x86.c
 +++ b/arch/x86/kvm/x86.c
-@@ -1827,8 +1827,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
+ 		shared_msr_update(i, shared_msrs_global.msrs[i]);
+ }
+ 
+-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+ {
+ 	unsigned int cpu = smp_processor_id();
+ 	struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
++	int err;
+ 
+ 	if (((value ^ smsr->values[slot].curr) & mask) == 0)
+-		return;
++		return 0;
+ 	smsr->values[slot].curr = value;
+-	wrmsrl(shared_msrs_global.msrs[slot], value);
++	err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
++	if (err)
++		return 1;
++
+ 	if (!smsr->registered) {
+ 		smsr->urn.on_user_return = kvm_on_user_return;
+ 		user_return_notifier_register(&smsr->urn);
+ 		smsr->registered = true;
+ 	}
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
+ 
+@@ -984,7 +989,6 @@ void kvm_enable_efer_bits(u64 mask)
+ }
+ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+ 
+-
+ /*
+  * Writes msr value into into the appropriate "register".
+  * Returns 0 on success, non-0 otherwise.
+@@ -992,8 +996,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+  */
+ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ {
++	switch (msr->index) {
++	case MSR_FS_BASE:
++	case MSR_GS_BASE:
++	case MSR_KERNEL_GS_BASE:
++	case MSR_CSTAR:
++	case MSR_LSTAR:
++		if (is_noncanonical_address(msr->data))
++			return 1;
++		break;
++	case MSR_IA32_SYSENTER_EIP:
++	case MSR_IA32_SYSENTER_ESP:
++		/*
++		 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
++		 * non-canonical address is written on Intel but not on
++		 * AMD (which ignores the top 32-bits, because it does
++		 * not implement 64-bit SYSENTER).
++		 *
++		 * 64-bit code should hence be able to write a non-canonical
++		 * value on AMD.  Making the address canonical ensures that
++		 * vmentry does not fail on Intel after writing a non-canonical
++		 * value, and that something deterministic happens if the guest
++		 * invokes 64-bit SYSENTER.
++		 */
++		msr->data = get_canonical(msr->data);
++	}
+ 	return kvm_x86_ops->set_msr(vcpu, msr);
+ }
++EXPORT_SYMBOL_GPL(kvm_set_msr);
+ 
+ /*
+  * Adapt set_msr() to msr_io()'s calling convention
+@@ -1827,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
  {
  	struct kvm *kvm = vcpu->kvm;
  	int lm = is_long_mode(vcpu);
@@ -28751,7 +29832,7 @@ index 8f1e22d..f6eee20 100644
  	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
  		: kvm->arch.xen_hvm_config.blob_size_32;
  	u32 page_num = data & ~PAGE_MASK;
-@@ -2749,6 +2749,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2749,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
  		if (n < msr_list.nmsrs)
  			goto out;
  		r = -EFAULT;
@@ -28760,7 +29841,7 @@ index 8f1e22d..f6eee20 100644
  		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
  				 num_msrs_to_save * sizeof(u32)))
  			goto out;
-@@ -5609,7 +5611,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -5609,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
  };
  #endif
  
@@ -33121,7 +34202,7 @@ index 5621c47..5e17b7390 100644
  };
  
 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index 7b179b4..6bd17777 100644
+index 7b179b49..6bd17777 100644
 --- a/arch/x86/mm/iomap_32.c
 +++ b/arch/x86/mm/iomap_32.c
 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
@@ -34941,19 +36022,23 @@ index 1bbedc4..eb795b5 100644
  
  static unsigned long __init intel_mid_calibrate_tsc(void)
 diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
-index 46aa25c..7208aeb 100644
+index 46aa25c..59a68ed 100644
 --- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
 +++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
-@@ -14,6 +14,6 @@
+@@ -10,10 +10,9 @@
+  */
+ 
+ 
+-/* __attribute__((weak)) makes these declarations overridable */
  /* For every CPU addition a new get_<cpuname>_ops interface needs
   * to be added.
   */
 -extern void *get_penwell_ops(void) __attribute__((weak));
 -extern void *get_cloverview_ops(void) __attribute__((weak));
 -extern void *get_tangier_ops(void) __attribute__((weak));
-+extern const void *get_penwell_ops(void) __attribute__((weak));
-+extern const void *get_cloverview_ops(void) __attribute__((weak));
-+extern const void *get_tangier_ops(void) __attribute__((weak));
++extern const void *get_penwell_ops(void);
++extern const void *get_cloverview_ops(void);
++extern const void *get_tangier_ops(void);
 diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
 index 23381d2..8ddc10e 100644
 --- a/arch/x86/platform/intel-mid/mfld.c
@@ -36215,6 +37300,20 @@ index e592c90..c566114 100644
  
  static void cryptd_queue_worker(struct work_struct *work);
  
+diff --git a/crypto/cts.c b/crypto/cts.c
+index 042223f..133f087 100644
+--- a/crypto/cts.c
++++ b/crypto/cts.c
+@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
+ 	/* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
+ 	memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
+ 	/* 6. Decrypt En to create Pn-1 */
+-	memset(iv, 0, sizeof(iv));
++	memzero_explicit(iv, sizeof(iv));
++
+ 	sg_set_buf(&sgsrc[0], s + bsize, bsize);
+ 	sg_set_buf(&sgdst[0], d, bsize);
+ 	err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
 index 309d345..1632720 100644
 --- a/crypto/pcrypt.c
@@ -36228,6 +37327,118 @@ index 309d345..1632720 100644
  	if (!ret)
  		kobject_uevent(&pinst->kobj, KOBJ_ADD);
  
+diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
+index 4279480..7bb0474 100644
+--- a/crypto/sha1_generic.c
++++ b/crypto/sha1_generic.c
+@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ 			src = data + done;
+ 		} while (done + SHA1_BLOCK_SIZE <= len);
+ 
+-		memset(temp, 0, sizeof(temp));
++		memzero_explicit(temp, sizeof(temp));
+ 		partial = 0;
+ 	}
+ 	memcpy(sctx->buffer + partial, src, len - done);
+diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
+index 5433667..32c5e5e 100644
+--- a/crypto/sha256_generic.c
++++ b/crypto/sha256_generic.c
+@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input)
+ 
+ 	/* clear any sensitive info... */
+ 	a = b = c = d = e = f = g = h = t1 = t2 = 0;
+-	memset(W, 0, 64 * sizeof(u32));
++	memzero_explicit(W, 64 * sizeof(u32));
+ }
+ 
+-
+ static int sha224_init(struct shash_desc *desc)
+ {
+ 	struct sha256_state *sctx = shash_desc_ctx(desc);
+@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
+ 	sha256_final(desc, D);
+ 
+ 	memcpy(hash, D, SHA224_DIGEST_SIZE);
+-	memset(D, 0, SHA256_DIGEST_SIZE);
++	memzero_explicit(D, SHA256_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
+index 6ed124f..04d295a 100644
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
+ 	sha512_final(desc, D);
+ 
+ 	memcpy(hash, D, 48);
+-	memset(D, 0, 64);
++	memzero_explicit(D, 64);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/tgr192.c b/crypto/tgr192.c
+index 8740355..3c7af0d 100644
+--- a/crypto/tgr192.c
++++ b/crypto/tgr192.c
+@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
+ 
+ 	tgr192_final(desc, D);
+ 	memcpy(out, D, TGR160_DIGEST_SIZE);
+-	memset(D, 0, TGR192_DIGEST_SIZE);
++	memzero_explicit(D, TGR192_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
+ 
+ 	tgr192_final(desc, D);
+ 	memcpy(out, D, TGR128_DIGEST_SIZE);
+-	memset(D, 0, TGR192_DIGEST_SIZE);
++	memzero_explicit(D, TGR192_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+diff --git a/crypto/vmac.c b/crypto/vmac.c
+index 2eb11a3..d84c24b 100644
+--- a/crypto/vmac.c
++++ b/crypto/vmac.c
+@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
+ 	}
+ 	mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
+ 	memcpy(out, &mac, sizeof(vmac_t));
+-	memset(&mac, 0, sizeof(vmac_t));
++	memzero_explicit(&mac, sizeof(vmac_t));
+ 	memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
+ 	ctx->partial_size = 0;
+ 	return 0;
+diff --git a/crypto/wp512.c b/crypto/wp512.c
+index 180f1d6..ec64e77 100644
+--- a/crypto/wp512.c
++++ b/crypto/wp512.c
+@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
+ 	u8 D[64];
+ 
+ 	wp512_final(desc, D);
+-	memcpy (out, D, WP384_DIGEST_SIZE);
+-	memset (D, 0, WP512_DIGEST_SIZE);
++	memcpy(out, D, WP384_DIGEST_SIZE);
++	memzero_explicit(D, WP512_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
+@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
+ 	u8 D[64];
+ 
+ 	wp512_final(desc, D);
+-	memcpy (out, D, WP256_DIGEST_SIZE);
+-	memset (D, 0, WP512_DIGEST_SIZE);
++	memcpy(out, D, WP256_DIGEST_SIZE);
++	memzero_explicit(D, WP512_DIGEST_SIZE);
+ 
+ 	return 0;
+ }
 diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
 index 6921c7f..78e1af7 100644
 --- a/drivers/acpi/acpica/hwxfsleep.c
@@ -38886,7 +40097,7 @@ index 0ea9986..e7b07e4 100644
  
  	if (cmd != SIOCWANDEV)
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index c18d41d..a39afb7 100644
+index c18d41d..7c499f3 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -289,9 +289,6 @@
@@ -38921,6 +40132,33 @@ index c18d41d..a39afb7 100644
  			unsigned int add =
  				((pool_size - entropy_count)*anfrac*3) >> s;
  
+@@ -1106,7 +1103,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	__mix_pool_bytes(r, hash.w, sizeof(hash.w));
+ 	spin_unlock_irqrestore(&r->lock, flags);
+ 
+-	memset(workspace, 0, sizeof(workspace));
++	memzero_explicit(workspace, sizeof(workspace));
+ 
+ 	/*
+ 	 * In case the hash function has some recognizable output
+@@ -1118,7 +1115,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	hash.w[2] ^= rol32(hash.w[2], 16);
+ 
+ 	memcpy(out, &hash, EXTRACT_SIZE);
+-	memset(&hash, 0, sizeof(hash));
++	memzero_explicit(&hash, sizeof(hash));
+ }
+ 
+ /*
+@@ -1175,7 +1172,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
 @@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
  
  		extract_buf(r, tmp);
@@ -38930,6 +40168,15 @@ index c18d41d..a39afb7 100644
  			ret = -EFAULT;
  			break;
  		}
+@@ -1218,7 +1215,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
 @@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
  static int proc_do_uuid(struct ctl_table *table, int write,
  			void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -47713,6 +48960,18 @@ index d5e07de..e3bf20a 100644
  
  	spinlock_t request_lock;
  	struct list_head req_list;
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 0fcb5e7..148fda3 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -556,6 +556,7 @@ do_lso:
+ do_send:
+ 	/* Start filling in the page buffers with the rndis hdr */
+ 	rndis_msg->msg_len += rndis_msg_size;
++	packet->total_data_buflen = rndis_msg->msg_len;
+ 	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
+ 					skb, &packet->page_buf[0]);
+ 
 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
 index 2b86f0b..ecc996f 100644
 --- a/drivers/net/hyperv/rndis_filter.c
@@ -64696,7 +65955,7 @@ index 2183fcf..3c32a98 100644
   	help
  	  Various /proc files exist to monitor process memory utilization:
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index cd3653e..25c14e8 100644
+index cd3653e..9b9b79a 100644
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
 @@ -60,6 +60,7 @@
@@ -64834,14 +66093,21 @@ index cd3653e..25c14e8 100644
  	if (mm) {
  		size = task_statm(mm, &shared, &text, &data, &resident);
  		mmput(mm);
-@@ -576,6 +644,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+@@ -576,6 +644,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
  	return 0;
  }
  
 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
 +int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
 +{
-+	return seq_printf(m, "%pI4\n", &task->signal->curr_ip);
++	unsigned long flags;
++	u32 curr_ip = 0;
++
++	if (lock_task_sighand(task, &flags)) {
++		curr_ip = task->signal->curr_ip;
++		unlock_task_sighand(task, &flags);
++	}
++	return seq_printf(m, "%pI4\n", &curr_ip);
 +}
 +#endif
 +
@@ -77316,7 +78582,7 @@ index 0000000..3860c7e
 +}
 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
 new file mode 100644
-index 0000000..c0aef3a
+index 0000000..e3650b6
 --- /dev/null
 +++ b/grsecurity/grsec_sock.c
 @@ -0,0 +1,244 @@
@@ -77443,10 +78709,10 @@ index 0000000..c0aef3a
 +
 +#endif
 +
-+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++void gr_update_task_in_ip_table(const struct inet_sock *inet)
 +{
 +#ifdef CONFIG_GRKERNSEC
-+	struct signal_struct *sig = task->signal;
++	struct signal_struct *sig = current->signal;
 +	struct conn_table_entry *newent;
 +
 +	newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
@@ -79142,9 +80408,18 @@ index c1da539..1dcec55 100644
  struct atmphy_ops {
  	int (*start)(struct atm_dev *dev);
 diff --git a/include/linux/audit.h b/include/linux/audit.h
-index 22cfddb..ab759e8 100644
+index 22cfddb..1514eef 100644
 --- a/include/linux/audit.h
 +++ b/include/linux/audit.h
+@@ -86,7 +86,7 @@ extern unsigned compat_dir_class[];
+ extern unsigned compat_chattr_class[];
+ extern unsigned compat_signal_class[];
+ 
+-extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
++extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+ 
+ /* audit_names->type values */
+ #define	AUDIT_TYPE_UNKNOWN	0	/* we don't know yet */
 @@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
  extern unsigned int audit_serial(void);
  extern int auditsc_get_stamp(struct audit_context *ctx,
@@ -79311,6 +80586,19 @@ index 411dd7e..ee38878 100644
  
  /**
   * struct clk_init_data - holds init data that's common to all clocks and is
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index 653f0e2..abcafaa 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
+ extern void clocksource_change_rating(struct clocksource *cs, int rating);
+ extern void clocksource_suspend(void);
+ extern void clocksource_resume(void);
+-extern struct clocksource * __init __weak clocksource_default_clock(void);
++extern struct clocksource * __init clocksource_default_clock(void);
+ extern void clocksource_mark_unstable(struct clocksource *cs);
+ 
+ extern u64
 diff --git a/include/linux/compat.h b/include/linux/compat.h
 index e649426..a74047b 100644
 --- a/include/linux/compat.h
@@ -79645,6 +80933,32 @@ index 2997af6..424ddc1 100644
  int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
  int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
  
+diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
+index 72ab536..3849fce 100644
+--- a/include/linux/crash_dump.h
++++ b/include/linux/crash_dump.h
+@@ -14,14 +14,13 @@
+ extern unsigned long long elfcorehdr_addr;
+ extern unsigned long long elfcorehdr_size;
+ 
+-extern int __weak elfcorehdr_alloc(unsigned long long *addr,
+-				   unsigned long long *size);
+-extern void __weak elfcorehdr_free(unsigned long long addr);
+-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
+-					 unsigned long from, unsigned long pfn,
+-					 unsigned long size, pgprot_t prot);
++extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
++extern void elfcorehdr_free(unsigned long long addr);
++extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
++extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
++extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
++				  unsigned long from, unsigned long pfn,
++				  unsigned long size, pgprot_t prot);
+ 
+ extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
+ 						unsigned long, int);
 diff --git a/include/linux/cred.h b/include/linux/cred.h
 index b2d0820..2ecafd3 100644
 --- a/include/linux/cred.h
@@ -81925,7 +83239,7 @@ index 44792ee..6172f2a 100644
  extern struct key_type key_type_keyring;
  
 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
-index 6b06d37..c134867 100644
+index 6b06d37..19f605f 100644
 --- a/include/linux/kgdb.h
 +++ b/include/linux/kgdb.h
 @@ -52,7 +52,7 @@ extern int kgdb_connected;
@@ -81946,7 +83260,7 @@ index 6b06d37..c134867 100644
  
  /**
   * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
-@@ -279,7 +279,7 @@ struct kgdb_io {
+@@ -279,11 +279,11 @@ struct kgdb_io {
  	void			(*pre_exception) (void);
  	void			(*post_exception) (void);
  	int			is_console;
@@ -81955,6 +83269,11 @@ index 6b06d37..c134867 100644
  
  extern struct kgdb_arch		arch_kgdb_ops;
  
+-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
++extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
+ 
+ #ifdef CONFIG_SERIAL_KGDB_NMI
+ extern int kgdb_register_nmi_console(void);
 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
 index 0555cc6..40116ce 100644
 --- a/include/linux/kmod.h
@@ -82204,6 +83523,19 @@ index c45c089..298841c 100644
  {
  	u32 remainder;
  	return div_u64_rem(dividend, divisor, &remainder);
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index bb7384e..8b8d8d1 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -35,7 +35,7 @@ struct memory_block {
+ };
+ 
+ int arch_get_memory_phys_device(unsigned long start_pfn);
+-unsigned long __weak memory_block_size_bytes(void);
++unsigned long memory_block_size_bytes(void);
+ 
+ /* These states are exposed to userspace as text strings in sysfs */
+ #define	MEM_ONLINE		(1<<0) /* exposed to userspace */
 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
 index f230a97..714c006 100644
 --- a/include/linux/mempolicy.h
@@ -84334,6 +85666,29 @@ index 680f9a3..f13aeb0 100644
  	__SONET_ITEMS
  #undef __HANDLE_ITEM
  };
+diff --git a/include/linux/string.h b/include/linux/string.h
+index d36977e..3b42b37 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+ 
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+-			const void *from, size_t available);
++				       const void *from, size_t available);
+ 
+ /**
+  * strstarts - does @str start with @prefix?
+@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
+ 	return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
+ 
+-extern size_t memweight(const void *ptr, size_t bytes);
++size_t memweight(const void *ptr, size_t bytes);
++void memzero_explicit(void *s, size_t count);
+ 
+ /**
+  * kbasename - return the last part of a pathname.
 diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
 index 07d8e53..dc934c9 100644
 --- a/include/linux/sunrpc/addr.h
@@ -84787,6 +86142,38 @@ index 99c1b4d..562e6f3 100644
  }
  
  static inline void put_unaligned_le16(u16 val, void *p)
+diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
+index 4f844c6..60beb5d 100644
+--- a/include/linux/uprobes.h
++++ b/include/linux/uprobes.h
+@@ -98,11 +98,11 @@ struct uprobes_state {
+ 	struct xol_area		*xol_area;
+ };
+ 
+-extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+-extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
+-extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
+-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
++extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
++extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
++extern bool is_swbp_insn(uprobe_opcode_t *insn);
++extern bool is_trap_insn(uprobe_opcode_t *insn);
++extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
+ extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
+ extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
+ extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+ extern int  arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+ extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+ extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
+-extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
+-extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
++extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
++extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ 					 void *src, unsigned long len);
+ #else /* !CONFIG_UPROBES */
+ struct uprobes_state {
 diff --git a/include/linux/usb.h b/include/linux/usb.h
 index d2465bc..5256de4 100644
 --- a/include/linux/usb.h
@@ -94050,6 +95437,33 @@ index 0922579..9d7adb9 100644
 +	printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
  #endif
  }
+diff --git a/lib/string.c b/lib/string.c
+index f3c6ff5..70db57a 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -604,6 +604,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+ 
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ *		      keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++	memset(s, 0, count);
++	OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+  * memcpy - Copy one area of memory to another
 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
 index bb2b201..46abaf9 100644
 --- a/lib/strncpy_from_user.c
@@ -94865,7 +96279,7 @@ index 44c6bd2..60369dc3 100644
  	}
  	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
 diff --git a/mm/memory.c b/mm/memory.c
-index e229970..2917c98 100644
+index e229970..a3eb2ce 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -94902,6 +96316,19 @@ index e229970..2917c98 100644
  }
  
  /*
+@@ -691,10 +697,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+ 	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+ 	 */
+ 	if (vma->vm_ops)
+-		printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
++		printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
+ 		       vma->vm_ops->fault);
+ 	if (vma->vm_file)
+-		printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
++		printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
+ 		       vma->vm_file->f_op->mmap);
+ 	dump_stack();
+ 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 @@ -1500,6 +1506,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  	page_add_file_rmap(page);
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -101098,7 +102525,7 @@ index 6556263..db77807 100644
  	skb_reset_mac_header(skb);
  	skb_set_network_header(skb, skb_inner_network_offset(skb));
 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
-index 43116e8..e3e6159 100644
+index 43116e8..ba0916a8 100644
 --- a/net/ipv4/inet_hashtables.c
 +++ b/net/ipv4/inet_hashtables.c
 @@ -18,6 +18,7 @@
@@ -101113,7 +102540,7 @@ index 43116e8..e3e6159 100644
  	return inet_ehashfn(net, laddr, lport, faddr, fport);
  }
  
-+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
 +
  /*
   * Allocate and initialize a new local port bind bucket.
@@ -101122,7 +102549,7 @@ index 43116e8..e3e6159 100644
  			twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
  		spin_unlock(&head->lock);
  
-+		gr_update_task_in_ip_table(current, inet_sk(sk));
++		gr_update_task_in_ip_table(inet_sk(sk));
 +
  		if (tw) {
  			inet_twsk_deschedule(tw, death_row);
@@ -101248,9 +102675,18 @@ index 3d4da2c..40f9c29 100644
  						  ICMP_PROT_UNREACH, 0);
  				}
 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
-index 215af2b..c43a1e2 100644
+index 215af2b..73cbbe1 100644
 --- a/net/ipv4/ip_output.c
 +++ b/net/ipv4/ip_output.c
+@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
+ 	 */
+ 	features = netif_skb_features(skb);
+ 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+-	if (IS_ERR(segs)) {
++	if (IS_ERR_OR_NULL(segs)) {
+ 		kfree_skb(skb);
+ 		return -ENOMEM;
+ 	}
 @@ -1533,6 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
  	struct sk_buff *nskb;
  	struct sock *sk;
@@ -102885,10 +104321,45 @@ index 4836af8..0e52bbd 100644
  
  	kfree_skb(skb);
 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index 2a0bbda..d75ca57 100644
+index 2a0bbda..442240d 100644
 --- a/net/ipv6/xfrm6_policy.c
 +++ b/net/ipv6/xfrm6_policy.c
-@@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 		case IPPROTO_DCCP:
+ 			if (!onlyproto && (nh + offset + 4 < skb->data ||
+ 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
+-				__be16 *ports = (__be16 *)exthdr;
++				__be16 *ports;
+ 
++				nh = skb_network_header(skb);
++				ports = (__be16 *)(nh + offset);
+ 				fl6->fl6_sport = ports[!!reverse];
+ 				fl6->fl6_dport = ports[!reverse];
+ 			}
+@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 
+ 		case IPPROTO_ICMPV6:
+ 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+-				u8 *icmp = (u8 *)exthdr;
++				u8 *icmp;
+ 
++				nh = skb_network_header(skb);
++				icmp = (u8 *)(nh + offset);
+ 				fl6->fl6_icmp_type = icmp[0];
+ 				fl6->fl6_icmp_code = icmp[1];
+ 			}
+@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 		case IPPROTO_MH:
+ 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+ 				struct ip6_mh *mh;
+-				mh = (struct ip6_mh *)exthdr;
+ 
++				nh = skb_network_header(skb);
++				mh = (struct ip6_mh *)(nh + offset);
+ 				fl6->fl6_mh_type = mh->ip6mh_type;
+ 			}
+ 			fl6->flowi6_proto = nexthdr;
+@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
  	}
  }
  
@@ -102902,7 +104373,7 @@ index 2a0bbda..d75ca57 100644
  	return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
  }
  
-@@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
+@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = {
  
  static int __net_init xfrm6_net_init(struct net *net)
  {
@@ -102927,7 +104398,7 @@ index 2a0bbda..d75ca57 100644
  	if (!hdr)
  		goto err_reg;
  
-@@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
+@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net)
  	return 0;
  
  err_reg:
@@ -103809,6 +105280,19 @@ index a11c5ff..aa413a7 100644
  		goto nla_put_failure;
  
  	if (data_len) {
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index 108120f..5b169db 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
+ 	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to
+ 	 * mean 'ignore this hook'.
+ 	 */
+-	if (IS_ERR(segs))
++	if (IS_ERR_OR_NULL(segs))
+ 		goto out_err;
+ 	queued = 0;
+ 	err = 0;
 diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
 index 1840989..6895744 100644
 --- a/net/netfilter/nft_compat.c
@@ -103984,6 +105468,19 @@ index c416725..bd22eea 100644
  			   sock_i_ino(s)
  			);
  
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 64dc864..7a9e2a4 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -332,6 +332,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
+ 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+ 	if (IS_ERR(segs))
+ 		return PTR_ERR(segs);
++	if (segs == NULL)
++		return -EINVAL;
+ 
+ 	/* Queue all of the segments. */
+ 	skb = segs;
 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
 index 93896d2..b701c88 100644
 --- a/net/packet/af_packet.c
@@ -105689,6 +107186,19 @@ index 0917f04..f4e3d8c 100644
  		return -ENOMEM;
  
  	if (!proc_create("x25/route", S_IRUGO, init_net.proc_net,
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index c51e8f7b..e44f360 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb)
+ 	kfree_skb(skb);
+ 	if (IS_ERR(segs))
+ 		return PTR_ERR(segs);
++	if (segs == NULL)
++		return -EINVAL;
+ 
+ 	do {
+ 		struct sk_buff *nskb = segs->next;
 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
 index fdde51f..4839450 100644
 --- a/net/xfrm/xfrm_policy.c
@@ -123865,6 +125375,44 @@ index 0a578fe..b81f62d 100644
  	0;							\
  })
  
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index 714b949..1f0dc1e 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ 				gfn_t base_gfn, unsigned long npages);
+ 
+ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+-			   unsigned long size)
++			   unsigned long npages)
+ {
+ 	gfn_t end_gfn;
+ 	pfn_t pfn;
+ 
+ 	pfn     = gfn_to_pfn_memslot(slot, gfn);
+-	end_gfn = gfn + (size >> PAGE_SHIFT);
++	end_gfn = gfn + npages;
+ 	gfn    += 1;
+ 
+ 	if (is_error_noslot_pfn(pfn))
+@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		 * Pin all pages we are about to map in memory. This is
+ 		 * important because we unmap and unpin in 4kb steps later.
+ 		 */
+-		pfn = kvm_pin_pages(slot, gfn, page_size);
++		pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
+ 		if (is_error_noslot_pfn(pfn)) {
+ 			gfn += 1;
+ 			continue;
+@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		if (r) {
+ 			printk(KERN_ERR "kvm_iommu_map_address:"
+ 			       "iommu failed to map pfn=%llx\n", pfn);
+-			kvm_unpin_pages(kvm, pfn, page_size);
++			kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ 			goto unmap_pages;
+ 		}
+ 
 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
 index 95519bc..43f5d42 100644
 --- a/virt/kvm/kvm_main.c

diff --git a/3.2.63/0000_README b/3.2.63/0000_README
index 860a604..5a21a10 100644
--- a/3.2.63/0000_README
+++ b/3.2.63/0000_README
@@ -170,7 +170,7 @@ Patch:	1062_linux-3.2.63.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.63
 
-Patch:	4420_grsecurity-3.0-3.2.63-201410201736.patch
+Patch:	4420_grsecurity-3.0-3.2.63-201410250023.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.63/4420_grsecurity-3.0-3.2.63-201410201736.patch b/3.2.63/4420_grsecurity-3.0-3.2.63-201410250023.patch
similarity index 99%
rename from 3.2.63/4420_grsecurity-3.0-3.2.63-201410201736.patch
rename to 3.2.63/4420_grsecurity-3.0-3.2.63-201410250023.patch
index 045f3ce..02b9ab1 100644
--- a/3.2.63/4420_grsecurity-3.0-3.2.63-201410201736.patch
+++ b/3.2.63/4420_grsecurity-3.0-3.2.63-201410250023.patch
@@ -7226,6 +7226,20 @@ index 7df8b7f..4946269 100644
  extern unsigned long sparc64_elf_hwcap;
  #define ELF_HWCAP	sparc64_elf_hwcap
  
+diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
+index 97a9047..290b0cd 100644
+--- a/arch/sparc/include/asm/oplib_64.h
++++ b/arch/sparc/include/asm/oplib_64.h
+@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
+ /* You must call prom_init() before using any of the library services,
+  * preferably as early as possible.  Pass it the romvec pointer.
+  */
+-extern void prom_init(void *cif_handler, void *cif_stack);
++void prom_init(void *cif_handler);
++void prom_init_report(void);
+ 
+ /* Boot argument acquisition, returns the boot command line string. */
+ extern char *prom_getbootargs(void);
 diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
 index 156707b..aefa786 100644
 --- a/arch/sparc/include/asm/page_32.h
@@ -7329,6 +7343,21 @@ index f6ae2b2..b03ffc7 100644
  #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
  				    SRMMU_DIRTY | SRMMU_REF)
  
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 64718ba..a7e4178 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -21,6 +21,10 @@ extern unsigned char boot_cpu_id;
+ extern unsigned char boot_cpu_id4;
+ #endif
+ 
++#ifdef CONFIG_SPARC64
++void __init start_early_boot(void);
++#endif
++
+ #endif /* __KERNEL__ */
+ 
+ #endif /* _SPARC_SETUP_H */
 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
 index 9689176..63c18ea 100644
 --- a/arch/sparc/include/asm/spinlock_64.h
@@ -7655,6 +7684,108 @@ index 27728e1..0010e923 100644
  		memset(&pkt, 0, sizeof(pkt));
  		pkt.header.data.tag.type = DS_DATA;
  		pkt.header.data.handle = cp->handle;
+diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
+index 0c218e4..f8125bc 100644
+--- a/arch/sparc/kernel/entry.h
++++ b/arch/sparc/kernel/entry.h
+@@ -59,13 +59,10 @@ struct popc_6insn_patch_entry {
+ extern struct popc_6insn_patch_entry __popc_6insn_patch,
+ 	__popc_6insn_patch_end;
+ 
+-extern void __init per_cpu_patch(void);
+-extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+-				    struct sun4v_1insn_patch_entry *);
+-extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+-				    struct sun4v_2insn_patch_entry *);
+-extern void __init sun4v_patch(void);
+-extern void __init boot_cpu_id_too_large(int cpu);
++void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
++			     struct sun4v_1insn_patch_entry *);
++void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
++			     struct sun4v_2insn_patch_entry *);
+ extern unsigned int dcache_parity_tl1_occurred;
+ extern unsigned int icache_parity_tl1_occurred;
+ 
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 0d810c2..fec9fd6 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -629,14 +629,12 @@ tlb_fixup_done:
+ 	sethi	%hi(init_thread_union), %g6
+ 	or	%g6, %lo(init_thread_union), %g6
+ 	ldx	[%g6 + TI_TASK], %g4
+-	mov	%sp, %l6
+ 
+ 	wr	%g0, ASI_P, %asi
+ 	mov	1, %g1
+ 	sllx	%g1, THREAD_SHIFT, %g1
+ 	sub	%g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ 	add	%g6, %g1, %sp
+-	mov	0, %fp
+ 
+ 	/* Set per-cpu pointer initially to zero, this makes
+ 	 * the boot-cpu use the in-kernel-image per-cpu areas
+@@ -663,44 +661,14 @@ tlb_fixup_done:
+ 	 nop
+ #endif
+ 
+-	mov	%l6, %o1			! OpenPROM stack
+ 	call	prom_init
+ 	 mov	%l7, %o0			! OpenPROM cif handler
+ 
+-	/* Initialize current_thread_info()->cpu as early as possible.
+-	 * In order to do that accurately we have to patch up the get_cpuid()
+-	 * assembler sequences.  And that, in turn, requires that we know
+-	 * if we are on a Starfire box or not.  While we're here, patch up
+-	 * the sun4v sequences as well.
++	/* To create a one-register-window buffer between the kernel's
++	 * initial stack and the last stack frame we use from the firmware,
++	 * do the rest of the boot from a C helper function.
+ 	 */
+-	call	check_if_starfire
+-	 nop
+-	call	per_cpu_patch
+-	 nop
+-	call	sun4v_patch
+-	 nop
+-
+-#ifdef CONFIG_SMP
+-	call	hard_smp_processor_id
+-	 nop
+-	cmp	%o0, NR_CPUS
+-	blu,pt	%xcc, 1f
+-	 nop
+-	call	boot_cpu_id_too_large
+-	 nop
+-	/* Not reached... */
+-
+-1:
+-#else
+-	mov	0, %o0
+-#endif
+-	sth	%o0, [%g6 + TI_CPU]
+-
+-	call	prom_init_report
+-	 nop
+-
+-	/* Off we go.... */
+-	call	start_kernel
++	call	start_early_boot
+ 	 nop
+ 	/* Not reached... */
+ 
+diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
+index 9365432..b69d224 100644
+--- a/arch/sparc/kernel/hvtramp.S
++++ b/arch/sparc/kernel/hvtramp.S
+@@ -111,7 +111,6 @@ hv_cpu_startup:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	call		init_irqwork_curcpu
+ 	 nop
 diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
 index a19c8a0..d04a60b 100644
 --- a/arch/sparc/kernel/leon_kernel.c
@@ -7805,6 +7936,68 @@ index 96ee50a..68ce124 100644
  #ifdef CONFIG_AUDITSYSCALL
  	if (unlikely(current->audit_context)) {
  		unsigned long tstate = regs->tstate;
+diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
+index a854a1c..52488a5 100644
+--- a/arch/sparc/kernel/setup_64.c
++++ b/arch/sparc/kernel/setup_64.c
+@@ -30,6 +30,7 @@
+ #include <linux/cpu.h>
+ #include <linux/initrd.h>
+ #include <linux/module.h>
++#include <linux/start_kernel.h>
+ 
+ #include <asm/system.h>
+ #include <asm/io.h>
+@@ -174,7 +175,7 @@ char reboot_command[COMMAND_LINE_SIZE];
+ 
+ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+ 
+-void __init per_cpu_patch(void)
++static void __init per_cpu_patch(void)
+ {
+ 	struct cpuid_patch_entry *p;
+ 	unsigned long ver;
+@@ -266,7 +267,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+ 	}
+ }
+ 
+-void __init sun4v_patch(void)
++static void __init sun4v_patch(void)
+ {
+ 	extern void sun4v_hvapi_init(void);
+ 
+@@ -316,14 +317,25 @@ static void __init popc_patch(void)
+ 	}
+ }
+ 
+-#ifdef CONFIG_SMP
+-void __init boot_cpu_id_too_large(int cpu)
++void __init start_early_boot(void)
+ {
+-	prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
+-		    cpu, NR_CPUS);
+-	prom_halt();
++	int cpu;
++
++	check_if_starfire();
++	per_cpu_patch();
++	sun4v_patch();
++
++	cpu = hard_smp_processor_id();
++	if (cpu >= NR_CPUS) {
++		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
++			    cpu, NR_CPUS);
++		prom_halt();
++	}
++	current_thread_info()->cpu = cpu;
++
++	prom_init_report();
++	start_kernel();
+ }
+-#endif
+ 
+ /* On Ultra, we support all of the v8 capabilities. */
+ unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
 index ffd1245..948b0b7 100644
 --- a/arch/sparc/kernel/smp_64.c
@@ -8134,6 +8327,36 @@ index 7408201..b349841 100644
  	.notifier_call	= sysfs_cpu_notify,
  };
  
+diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
+index 8fa84a3..3fc8ad5 100644
+--- a/arch/sparc/kernel/trampoline_64.S
++++ b/arch/sparc/kernel/trampoline_64.S
+@@ -112,10 +112,13 @@ startup_continue:
+ 	brnz,pn		%g1, 1b
+ 	 nop
+ 
+-	sethi		%hi(p1275buf), %g2
+-	or		%g2, %lo(p1275buf), %g2
+-	ldx		[%g2 + 0x10], %l2
+-	add		%l2, -(192 + 128), %sp
++	/* Get onto temporary stack which will be in the locked
++	 * kernel image.
++	 */
++	sethi		%hi(tramp_stack), %g1
++	or		%g1, %lo(tramp_stack), %g1
++	add		%g1, TRAMP_STACK_SIZE, %g1
++	sub		%g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
+ 	flushw
+ 
+ 	/* Setup the loop variables:
+@@ -397,7 +400,6 @@ after_lock_tlb:
+ 	sllx		%g5, THREAD_SHIFT, %g5
+ 	sub		%g5, (STACKFRAME_SZ + STACK_BIAS), %g5
+ 	add		%g6, %g5, %sp
+-	mov		0, %fp
+ 
+ 	rdpr		%pstate, %o1
+ 	or		%o1, PSTATE_IE, %o1
 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
 index 591f20c..0f1b925 100644
 --- a/arch/sparc/kernel/traps_32.c
@@ -9603,6 +9826,47 @@ index 2c0b966..00bf94e 100644
  	/* Pure DTLB misses do not tell us whether the fault causing
  	 * load/store/atomic was a write or not, it only says that there
  	 * was no match.  So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
+index 42c55df..20da942 100644
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -106,6 +106,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+ 	return 1;
+ }
+ 
++int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
++			  struct page **pages)
++{
++	struct mm_struct *mm = current->mm;
++	unsigned long addr, len, end;
++	unsigned long next, flags;
++	pgd_t *pgdp;
++	int nr = 0;
++
++	start &= PAGE_MASK;
++	addr = start;
++	len = (unsigned long) nr_pages << PAGE_SHIFT;
++	end = start + len;
++
++	local_irq_save(flags);
++	pgdp = pgd_offset(mm, addr);
++	do {
++		pgd_t pgd = *pgdp;
++
++		next = pgd_addr_end(addr, end);
++		if (pgd_none(pgd))
++			break;
++		if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
++			break;
++	} while (pgdp++, addr = next, addr != end);
++	local_irq_restore(flags);
++
++	return nr;
++}
++
+ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ 			struct page **pages)
+ {
 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
 index 07e1453..ae6e02e 100644
 --- a/arch/sparc/mm/hugetlbpage.c
@@ -9803,6 +10067,63 @@ index cbef74e..c38fead 100644
  	BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
  	page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
  
+diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
+index 9c86b4b..8050f38 100644
+--- a/arch/sparc/prom/cif.S
++++ b/arch/sparc/prom/cif.S
+@@ -11,11 +11,10 @@
+ 	.text
+ 	.globl	prom_cif_direct
+ prom_cif_direct:
++	save	%sp, -192, %sp
+ 	sethi	%hi(p1275buf), %o1
+ 	or	%o1, %lo(p1275buf), %o1
+-	ldx	[%o1 + 0x0010], %o2	! prom_cif_stack
+-	save	%o2, -192, %sp
+-	ldx	[%i1 + 0x0008], %l2	! prom_cif_handler
++	ldx	[%o1 + 0x0008], %l2	! prom_cif_handler
+ 	mov	%g4, %l0
+ 	mov	%g5, %l1
+ 	mov	%g6, %l3
+diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
+index 5016c5e..ffb1cc5 100644
+--- a/arch/sparc/prom/init_64.c
++++ b/arch/sparc/prom/init_64.c
+@@ -26,13 +26,13 @@ phandle prom_chosen_node;
+  * failure.  It gets passed the pointer to the PROM vector.
+  */
+ 
+-extern void prom_cif_init(void *, void *);
++extern void prom_cif_init(void *);
+ 
+-void __init prom_init(void *cif_handler, void *cif_stack)
++void __init prom_init(void *cif_handler)
+ {
+ 	phandle node;
+ 
+-	prom_cif_init(cif_handler, cif_stack);
++	prom_cif_init(cif_handler);
+ 
+ 	prom_chosen_node = prom_finddevice(prom_chosen_path);
+ 	if (!prom_chosen_node || (s32)prom_chosen_node == -1)
+diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
+index d9850c2..5bbbc23 100644
+--- a/arch/sparc/prom/p1275.c
++++ b/arch/sparc/prom/p1275.c
+@@ -21,7 +21,6 @@
+ struct {
+ 	long prom_callback;			/* 0x00 */
+ 	void (*prom_cif_handler)(long *);	/* 0x08 */
+-	unsigned long prom_cif_stack;		/* 0x10 */
+ } p1275buf;
+ 
+ extern void prom_world(int);
+@@ -53,5 +52,4 @@ void p1275_cmd_direct(unsigned long *args)
+ void prom_cif_init(void *cif_handler, void *cif_stack)
+ {
+ 	p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+-	p1275buf.prom_cif_stack = (unsigned long)cif_stack;
+ }
 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
 index 6cb8319..ee12bac 100644
 --- a/arch/tile/Kconfig
@@ -29346,7 +29667,7 @@ index 44b93da..5a0b3ee 100644
  	if (vma == &gate_vma)
  		return "[vsyscall]";
 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index 7b179b4..6bd17777 100644
+index 7b179b49..6bd17777 100644
 --- a/arch/x86/mm/iomap_32.c
 +++ b/arch/x86/mm/iomap_32.c
 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
@@ -32205,6 +32526,18 @@ index 1366a89..88178fe 100644
  	.notifier_call	= blk_cpu_notify,
  };
  
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 5eed6a7..0e8abe9 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -361,6 +361,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
+ 	/* Group allocation failed. Account the IO to root group */
+ 	if (!tg) {
+ 		tg = td->root_tg;
++		rcu_read_unlock();
+ 		return tg;
+ 	}
+ 
 diff --git a/block/bsg.c b/block/bsg.c
 index c0ab25c..9d49f8f 100644
 --- a/block/bsg.c
@@ -32491,6 +32824,26 @@ index 5d41894..22021e4 100644
  }
  EXPORT_SYMBOL_GPL(cper_next_record_id);
  
+diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
+index cfc0cc1..61fdbaa 100644
+--- a/drivers/acpi/atomicio.c
++++ b/drivers/acpi/atomicio.c
+@@ -286,6 +286,7 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+ 		break;
+ #endif
+ 	default:
++		rcu_read_unlock();
+ 		return -EINVAL;
+ 	}
+ 	rcu_read_unlock();
+@@ -315,6 +316,7 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+ 		break;
+ #endif
+ 	default:
++		rcu_read_unlock();
+ 		return -EINVAL;
+ 	}
+ 	rcu_read_unlock();
 diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
 index cb96296..b81293b 100644
 --- a/drivers/acpi/blacklist.c
@@ -35152,7 +35505,7 @@ index da3cfee..a5a6606 100644
  
  	*ppos = i;
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index c244f0e..a86bc96 100644
+index c244f0e..2b94e16 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -255,10 +255,8 @@
@@ -36081,6 +36434,17 @@ index c244f0e..a86bc96 100644
  	spin_lock_irqsave(&r->lock, flags);
  	for (i = 0; i < r->poolinfo->poolwords; i += 16)
  		sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+@@ -954,8 +1056,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
+ 	 * pool while mixing, and hash one final time.
+ 	 */
+ 	sha_transform(hash.w, extract, workspace);
+-	memset(extract, 0, sizeof(extract));
+-	memset(workspace, 0, sizeof(workspace));
++	memzero_explicit(extract, sizeof(extract));
++	memzero_explicit(workspace, sizeof(workspace));
+ 
+ 	/*
+ 	 * In case the hash function has some recognizable output
 @@ -966,27 +1068,43 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
  	hash.w[1] ^= hash.w[4];
  	hash.w[2] ^= rol32(hash.w[2], 16);
@@ -36097,7 +36461,8 @@ index c244f0e..a86bc96 100644
 -	}
 -
  	memcpy(out, &hash, EXTRACT_SIZE);
- 	memset(&hash, 0, sizeof(hash));
+-	memset(&hash, 0, sizeof(hash));
++	memzero_explicit(&hash, sizeof(hash));
  }
  
 +/*
@@ -36145,7 +36510,13 @@ index c244f0e..a86bc96 100644
  			spin_lock_irqsave(&r->lock, flags);
  			if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
  				panic("Hardware RNG duplicated output!\n");
-@@ -1015,12 +1131,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -1010,17 +1126,22 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
  	return ret;
  }
  
@@ -36172,6 +36543,15 @@ index c244f0e..a86bc96 100644
  			ret = -EFAULT;
  			break;
  		}
+@@ -1047,7 +1168,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ 	}
+ 
+ 	/* Wipe data just returned from memory */
+-	memset(tmp, 0, sizeof(tmp));
++	memzero_explicit(tmp, sizeof(tmp));
+ 
+ 	return ret;
+ }
 @@ -1055,11 +1176,20 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
  /*
   * This function is the exported kernel interface.  It returns some
@@ -62880,7 +63260,7 @@ index 15af622..0e9f4467 100644
   	help
  	  Various /proc files exist to monitor process memory utilization:
 diff --git a/fs/proc/array.c b/fs/proc/array.c
-index 439b5a1..61db155 100644
+index 439b5a1..5dec96d 100644
 --- a/fs/proc/array.c
 +++ b/fs/proc/array.c
 @@ -60,6 +60,7 @@
@@ -63005,7 +63385,7 @@ index 439b5a1..61db155 100644
  	if (mm) {
  		size = task_statm(mm, &shared, &text, &data, &resident);
  		mmput(mm);
-@@ -544,3 +608,10 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
+@@ -544,3 +608,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
  
  	return 0;
  }
@@ -63013,7 +63393,15 @@ index 439b5a1..61db155 100644
 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
 +{
-+	return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
++	unsigned long flags;
++	u32 curr_ip = 0;
++
++	if (lock_task_sighand(task, &flags)) {
++		curr_ip = task->signal->curr_ip;
++		unlock_task_sighand(task, &flags);
++	}
++
++	return sprintf(buffer, "%pI4\n", &curr_ip);
 +}
 +#endif
 diff --git a/fs/proc/base.c b/fs/proc/base.c
@@ -76102,7 +76490,7 @@ index 0000000..c6a07aa
 +}
 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
 new file mode 100644
-index 0000000..c0aef3a
+index 0000000..e3650b6
 --- /dev/null
 +++ b/grsecurity/grsec_sock.c
 @@ -0,0 +1,244 @@
@@ -76229,10 +76617,10 @@ index 0000000..c0aef3a
 +
 +#endif
 +
-+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
++void gr_update_task_in_ip_table(const struct inet_sock *inet)
 +{
 +#ifdef CONFIG_GRKERNSEC
-+	struct signal_struct *sig = task->signal;
++	struct signal_struct *sig = current->signal;
 +	struct conn_table_entry *newent;
 +
 +	newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
@@ -78223,6 +78611,19 @@ index 04ffb2e..6799180 100644
  
  extern struct cleancache_ops
  	cleancache_register_ops(struct cleancache_ops *ops);
+diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
+index 081147d..da89543 100644
+--- a/include/linux/clocksource.h
++++ b/include/linux/clocksource.h
+@@ -284,7 +284,7 @@ extern struct clocksource* clocksource_get_next(void);
+ extern void clocksource_change_rating(struct clocksource *cs, int rating);
+ extern void clocksource_suspend(void);
+ extern void clocksource_resume(void);
+-extern struct clocksource * __init __weak clocksource_default_clock(void);
++extern struct clocksource * __init clocksource_default_clock(void);
+ extern void clocksource_mark_unstable(struct clocksource *cs);
+ 
+ extern void
 diff --git a/include/linux/compat.h b/include/linux/compat.h
 index d42bd48..554dcd5 100644
 --- a/include/linux/compat.h
@@ -78245,6 +78646,20 @@ index d42bd48..554dcd5 100644
  
  /*
   * epoll (fs/eventpoll.c) compat bits follow ...
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 5633053..9ac1a7a 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -37,6 +37,9 @@
+     __asm__ ("" : "=r"(__ptr) : "0"(ptr));		\
+     (typeof(ptr)) (__ptr + (off)); })
+ 
++/* Make the optimizer believe the variable can be manipulated arbitrarily. */
++#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
++
+ #ifdef __CHECKER__
+ #define __must_be_array(arr) 0
+ #else
 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
 index e2a360a..1d61efb 100644
 --- a/include/linux/compiler-gcc4.h
@@ -78288,8 +78703,33 @@ index e2a360a..1d61efb 100644
  #endif
  
  #if __GNUC_MINOR__ > 0
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index cba9593..1a97cac 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -15,6 +15,7 @@
+  */
+ #undef barrier
+ #undef RELOC_HIDE
++#undef OPTIMIZER_HIDE_VAR
+ 
+ #define barrier() __memory_barrier()
+ 
+@@ -23,6 +24,12 @@
+      __ptr = (unsigned long) (ptr);				\
+     (typeof(ptr)) (__ptr + (off)); })
+ 
++/* This should act as an optimization barrier on var.
++ * Given that this compiler does not have inline assembly, a compiler barrier
++ * is the best we can do.
++ */
++#define OPTIMIZER_HIDE_VAR(var) barrier()
++
+ /* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
+ #define __must_be_array(a) 0
+ 
 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
-index 320d6c9..f4c24bf 100644
+index 320d6c9..92ea3ae 100644
 --- a/include/linux/compiler.h
 +++ b/include/linux/compiler.h
 @@ -5,31 +5,51 @@
@@ -78356,7 +78796,18 @@ index 320d6c9..f4c24bf 100644
  #endif
  
  #ifdef __KERNEL__
-@@ -264,6 +286,30 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -164,6 +186,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+     (typeof(ptr)) (__ptr + (off)); })
+ #endif
+ 
++#ifndef OPTIMIZER_HIDE_VAR
++#define OPTIMIZER_HIDE_VAR(var) barrier()
++#endif
++
+ #endif /* __KERNEL__ */
+ 
+ #endif /* __ASSEMBLY__ */
+@@ -264,6 +290,30 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  # define __attribute_const__	/* unimplemented */
  #endif
  
@@ -78387,7 +78838,7 @@ index 320d6c9..f4c24bf 100644
  /*
   * Tell gcc if a function is cold. The compiler will assume any path
   * directly leading to the call is unlikely.
-@@ -273,6 +319,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -273,6 +323,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  #define __cold
  #endif
  
@@ -78410,7 +78861,7 @@ index 320d6c9..f4c24bf 100644
  /* Simple shorthand for a section definition */
  #ifndef __section
  # define __section(S) __attribute__ ((__section__(#S)))
-@@ -292,6 +354,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -292,6 +358,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
  #endif
  #ifndef __compiletime_error
  # define __compiletime_error(message)
@@ -78429,7 +78880,7 @@ index 320d6c9..f4c24bf 100644
  #endif
  
  /*
-@@ -306,6 +380,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+@@ -306,6 +384,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
   * use is to mediate communication between process-level code and irq/NMI
   * handlers, all running on the same CPU.
   */
@@ -80860,7 +81311,7 @@ index 9efd081..19f989c 100644
  extern struct key_type key_type_keyring;
  
 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
-index c4d2fc1..5df9c19 100644
+index c4d2fc1..ef36389 100644
 --- a/include/linux/kgdb.h
 +++ b/include/linux/kgdb.h
 @@ -53,7 +53,7 @@ extern int kgdb_connected;
@@ -80881,7 +81332,7 @@ index c4d2fc1..5df9c19 100644
  
  /**
   * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
-@@ -277,7 +277,7 @@ struct kgdb_io {
+@@ -277,11 +277,11 @@ struct kgdb_io {
  	void			(*pre_exception) (void);
  	void			(*post_exception) (void);
  	int			is_console;
@@ -80890,6 +81341,11 @@ index c4d2fc1..5df9c19 100644
  
  extern struct kgdb_arch		arch_kgdb_ops;
  
+-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
++extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
+ 
+ extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
+ extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
 index f8d4b27..8560882 100644
 --- a/include/linux/kmod.h
@@ -83504,6 +83960,29 @@ index 6a40c76..1747b67 100644
  
  enum {
  	false	= 0,
+diff --git a/include/linux/string.h b/include/linux/string.h
+index e033564..e43a65c 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -133,7 +133,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+ #endif
+ 
+ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+-			const void *from, size_t available);
++				       const void *from, size_t available);
+ 
+ /**
+  * strstarts - does @str start with @prefix?
+@@ -144,5 +144,9 @@ static inline bool strstarts(const char *str, const char *prefix)
+ {
+ 	return strncmp(str, prefix, strlen(prefix)) == 0;
+ }
++
++size_t memweight(const void *ptr, size_t bytes);
++void memzero_explicit(void *s, size_t count);
++
+ #endif
+ #endif /* _LINUX_STRING_H_ */
 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
 index 3d8f9c4..349a695 100644
 --- a/include/linux/sunrpc/clnt.h
@@ -94076,7 +94555,7 @@ index b8029a5..2b120e1 100644
 +}
 +EXPORT_SYMBOL(pax_list_del_rcu);
 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
-index d9df745..e73c2fe 100644
+index d9df745..a541641b 100644
 --- a/lib/radix-tree.c
 +++ b/lib/radix-tree.c
 @@ -80,7 +80,7 @@ struct radix_tree_preload {
@@ -94088,6 +94567,18 @@ index d9df745..e73c2fe 100644
  
  static inline void *ptr_to_indirect(void *ptr)
  {
+@@ -1273,8 +1273,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+ 
+ 		node = indirect_to_ptr(node);
+ 		max_index = radix_tree_maxindex(node->height);
+-		if (cur_index > max_index)
++		if (cur_index > max_index) {
++			rcu_read_unlock();
+ 			break;
++		}
+ 
+ 		cur_index = __locate(node, item, cur_index, &found_index);
+ 		rcu_read_unlock();
 diff --git a/lib/random32.c b/lib/random32.c
 index 1f44bdc..009bfe8 100644
 --- a/lib/random32.c
@@ -94589,6 +95080,33 @@ index 1f44bdc..009bfe8 100644
 +		pr_info("prandom: %d self tests passed\n", runs);
 +}
 +#endif
+diff --git a/lib/string.c b/lib/string.c
+index dc4a863..40136f6 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -583,6 +583,22 @@ void *memset(void *s, int c, size_t count)
+ EXPORT_SYMBOL(memset);
+ #endif
+ 
++/**
++ * memzero_explicit - Fill a region of memory (e.g. sensitive
++ *		      keying data) with 0s.
++ * @s: Pointer to the start of the area.
++ * @count: The size of the area.
++ *
++ * memzero_explicit() doesn't need an arch-specific version as
++ * it just invokes the one of memset() implicitly.
++ */
++void memzero_explicit(void *s, size_t count)
++{
++	memset(s, 0, count);
++	OPTIMIZER_HIDE_VAR(s);
++}
++EXPORT_SYMBOL(memzero_explicit);
++
+ #ifndef __HAVE_ARCH_MEMCPY
+ /**
+  * memcpy - Copy one area of memory to another
 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
 index ae02e42..4ffc938 100644
 --- a/lib/vsprintf.c
@@ -102760,7 +103278,7 @@ index 6be5e8e..22df23e 100644
  	tmo = req->expires - jiffies;
  	if (tmo < 0)
 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
-index 4afcf31..392d206 100644
+index 4afcf31..a15c188 100644
 --- a/net/ipv4/inet_hashtables.c
 +++ b/net/ipv4/inet_hashtables.c
 @@ -18,12 +18,15 @@
@@ -102774,7 +103292,7 @@ index 4afcf31..392d206 100644
  #include <net/secure_seq.h>
  #include <net/ip.h>
  
-+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
++extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
 +
  /*
   * Allocate and initialize a new local port bind bucket.
@@ -102783,7 +103301,7 @@ index 4afcf31..392d206 100644
  			twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
  		spin_unlock(&head->lock);
  
-+		gr_update_task_in_ip_table(current, inet_sk(sk));
++		gr_update_task_in_ip_table(inet_sk(sk));
 +
  		if (tw) {
  			inet_twsk_deschedule(tw, death_row);
@@ -104590,10 +105108,45 @@ index f8bec1e..8628321 100644
  
  int udp6_seq_show(struct seq_file *seq, void *v)
 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
-index db78e7d..c88f974 100644
+index db78e7d..df6de01 100644
 --- a/net/ipv6/xfrm6_policy.c
 +++ b/net/ipv6/xfrm6_policy.c
-@@ -202,11 +202,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+@@ -160,8 +160,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 		case IPPROTO_DCCP:
+ 			if (!onlyproto && (nh + offset + 4 < skb->data ||
+ 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
+-				__be16 *ports = (__be16 *)exthdr;
++				__be16 *ports;
+ 
++				nh = skb_network_header(skb);
++				ports = (__be16 *)(nh + offset);
+ 				fl6->fl6_sport = ports[!!reverse];
+ 				fl6->fl6_dport = ports[!reverse];
+ 			}
+@@ -170,8 +172,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 
+ 		case IPPROTO_ICMPV6:
+ 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
+-				u8 *icmp = (u8 *)exthdr;
++				u8 *icmp;
+ 
++				nh = skb_network_header(skb);
++				icmp = (u8 *)(nh + offset);
+ 				fl6->fl6_icmp_type = icmp[0];
+ 				fl6->fl6_icmp_code = icmp[1];
+ 			}
+@@ -182,8 +186,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
+ 		case IPPROTO_MH:
+ 			if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
+ 				struct ip6_mh *mh;
+-				mh = (struct ip6_mh *)exthdr;
+ 
++				nh = skb_network_header(skb);
++				mh = (struct ip6_mh *)(nh + offset);
+ 				fl6->fl6_mh_type = mh->ip6mh_type;
+ 			}
+ 			fl6->flowi6_proto = nexthdr;
+@@ -202,11 +207,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
  	}
  }
  
@@ -105558,6 +106111,22 @@ index f042ae5..30ea486 100644
  	mutex_unlock(&nf_sockopt_mutex);
  }
  EXPORT_SYMBOL(nf_unregister_sockopt);
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index b4f8d84..4ffd251 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -170,8 +170,10 @@ replay:
+ 
+ 		err = nla_parse(cda, ss->cb[cb_id].attr_count,
+ 				attr, attrlen, ss->cb[cb_id].policy);
+-		if (err < 0)
++		if (err < 0) {
++			rcu_read_unlock();
+ 			return err;
++		}
+ 
+ 		if (nc->call_rcu) {
+ 			err = nc->call_rcu(net->nfnl, skb, nlh,
 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
 index 66b2c54..4ea49be 100644
 --- a/net/netfilter/nfnetlink_log.c
@@ -126703,6 +127272,44 @@ index 547628e..74de9f2 100644
 +#endif
 +
  #endif
+diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
+index c946700..e32c93c 100644
+--- a/virt/kvm/iommu.c
++++ b/virt/kvm/iommu.c
+@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
+ 				gfn_t base_gfn, unsigned long npages);
+ 
+ static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
+-			   gfn_t gfn, unsigned long size)
++			   gfn_t gfn, unsigned long npages)
+ {
+ 	gfn_t end_gfn;
+ 	pfn_t pfn;
+ 
+ 	pfn     = gfn_to_pfn_memslot(kvm, slot, gfn);
+-	end_gfn = gfn + (size >> PAGE_SHIFT);
++	end_gfn = gfn + npages;
+ 	gfn    += 1;
+ 
+ 	if (is_error_pfn(pfn))
+@@ -117,7 +117,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		 * Pin all pages we are about to map in memory. This is
+ 		 * important because we unmap and unpin in 4kb steps later.
+ 		 */
+-		pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
++		pfn = kvm_pin_pages(kvm, slot, gfn, page_size >> PAGE_SHIFT);
+ 		if (is_error_pfn(pfn)) {
+ 			gfn += 1;
+ 			continue;
+@@ -129,7 +129,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
+ 		if (r) {
+ 			printk(KERN_ERR "kvm_iommu_map_address:"
+ 			       "iommu failed to map pfn=%llx\n", pfn);
+-			kvm_unpin_pages(kvm, pfn, page_size);
++			kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
+ 			goto unmap_pages;
+ 		}
+ 
 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
 index d83aa5e..52970b8 100644
 --- a/virt/kvm/kvm_main.c


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2014-10-26 23:36 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-10-26 23:36 [gentoo-commits] proj/hardened-patchset:master commit in: 3.17.1/, 3.14.22/, 3.2.63/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox