public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.7.4/, 3.7.3/, 3.2.37/
@ 2013-01-24 15:10 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2013-01-24 15:10 UTC (permalink / raw
  To: gentoo-commits

commit:     bf7531966c1a3c5fb4745899008d64279c16777c
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 24 15:09:54 2013 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Thu Jan 24 15:09:54 2013 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=bf753196

Grsec/PaX: 2.9.1-{2.6.32.60,3.2.37,3.7.4}-201301230048

---
 2.6.32/0000_README                                 |    2 +-
 ..._grsecurity-2.9.1-2.6.32.60-201301230046.patch} | 3843 ++++++++++++--------
 2.6.32/4450_grsec-kconfig-default-gids.patch       |   12 +-
 2.6.32/4465_selinux-avc_audit-log-curr_ip.patch    |    2 +-
 3.2.37/0000_README                                 |    2 +-
 ...420_grsecurity-2.9.1-3.2.37-201301230047.patch} |  878 ++++--
 3.2.37/4450_grsec-kconfig-default-gids.patch       |   12 +-
 3.2.37/4465_selinux-avc_audit-log-curr_ip.patch    |    2 +-
 {3.7.3 => 3.7.4}/0000_README                       |    6 +-
 3.7.4/1003_linux-3.7.4.patch                       | 1266 +++++++
 .../4420_grsecurity-2.9.1-3.7.4-201301230048.patch |  883 ++++--
 {3.7.3 => 3.7.4}/4425_grsec_remove_EI_PAX.patch    |    0
 .../4430_grsec-remove-localversion-grsec.patch     |    0
 {3.7.3 => 3.7.4}/4435_grsec-mute-warnings.patch    |    0
 .../4440_grsec-remove-protected-paths.patch        |    0
 .../4450_grsec-kconfig-default-gids.patch          |   12 +-
 .../4465_selinux-avc_audit-log-curr_ip.patch       |    2 +-
 {3.7.3 => 3.7.4}/4470_disable-compat_vdso.patch    |    0
 18 files changed, 4947 insertions(+), 1975 deletions(-)

diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index bb6d062..e95f139 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -34,7 +34,7 @@ Patch:	1059_linux-2.6.32.60.patch
 From:	http://www.kernel.org
 Desc:	Linux 2.6.32.59
 
-Patch:	4420_grsecurity-2.9.1-2.6.32.60-201301181517.patch
+Patch:	4420_grsecurity-2.9.1-2.6.32.60-201301230046.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201301181517.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201301230046.patch
similarity index 97%
rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201301181517.patch
rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201301230046.patch
index 1eea97a..5cfce60 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201301181517.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201301230046.patch
@@ -624,19 +624,31 @@ index ebc3c89..20cfa63 100644
  
  	for (i = 0; i < n; i++) {
 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index a94e49c..d71dd44 100644
+index a94e49c..ad84d0e 100644
 --- a/arch/alpha/kernel/osf_sys.c
 +++ b/arch/alpha/kernel/osf_sys.c
-@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+@@ -1163,16 +1163,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
+ 
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+-		         unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++		         unsigned long limit, unsigned long flags)
+ {
+ 	struct vm_area_struct *vma = find_vma(current->mm, addr);
+-
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 	while (1) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (limit - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  		addr = vma->vm_end;
  		vma = vma->vm_next;
-@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1208,20 +1208,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	   merely specific addresses, but regions of memory -- perhaps
  	   this feature should be incorporated into all ports?  */
  
@@ -645,19 +657,26 @@ index a94e49c..d71dd44 100644
 +#endif
 +
  	if (addr) {
- 		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+-		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++		addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
  		if (addr != (unsigned long) -ENOMEM)
-@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 			return addr;
  	}
  
  	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
 -	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
 -					 len, limit);
-+	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++	addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
 +
  	if (addr != (unsigned long) -ENOMEM)
  		return addr;
  
+ 	/* Finally, try allocating in low memory.  */
+-	addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++	addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+ 
+ 	return addr;
+ }
 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
 index 00a31de..2ded0f2 100644
 --- a/arch/alpha/mm/fault.c
@@ -1196,18 +1215,19 @@ index 3a32af4..c8def8a 100644
  
  #ifdef MULTI_USER
  extern struct cpu_user_fns cpu_user;
-diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
-index b12cc98..cdb5c51 100644
---- a/arch/arm/include/asm/pgalloc.h
-+++ b/arch/arm/include/asm/pgalloc.h
-@@ -29,6 +29,7 @@
- #define pmd_alloc_one(mm,addr)		({ BUG(); ((pmd_t *)2); })
- #define pmd_free(mm, pmd)		do { } while (0)
- #define pgd_populate(mm,pmd,pte)	BUG()
-+#define pgd_populate_kernel(mm,pmd,pte)	BUG()
+diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
+index 1df6457..c806a73 100644
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -69,7 +69,7 @@
+ /*
+  * ARMv7 groups of APSR bits
+  */
+-#define PSR_ISET_MASK	0x01000010	/* ISA state (J, T) mask */
++#define PSR_ISET_MASK	0x01000020	/* ISA state (J, T) mask */
+ #define PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */
  
- extern pgd_t *get_pgd_slow(struct mm_struct *mm);
- extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
 index d65b2f5..9d87555 100644
 --- a/arch/arm/include/asm/system.h
@@ -1368,6 +1388,21 @@ index a6c66f5..bfdad39 100644
  	bne	__sys_trace
  
  	cmp	scno, #NR_syscalls		@ check upper syscall limit
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 38ccbe1..ca979b0 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -45,7 +45,9 @@
+ 	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
+ 
+ 	.macro	pgtbl, rd
+-	ldr	\rd, =(KERNEL_RAM_PADDR - 0x4000)
++	mov	\rd, #KERNEL_RAM_PADDR
++	sub	\rd, #0x4000
++	add	\rd, \rd, \phys
+ 	.endm
+ 
+ #ifdef CONFIG_XIP_KERNEL
 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
 index ba8ccfe..2dc34dc 100644
 --- a/arch/arm/kernel/kgdb.c
@@ -1444,7 +1479,7 @@ index a2ea385..4783488 100644
  		return scno;
  	if (!(current->ptrace & PT_PTRACED))
 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index c6c57b6..0c3b29e 100644
+index c6c57b6..8ec5c3f 100644
 --- a/arch/arm/kernel/setup.c
 +++ b/arch/arm/kernel/setup.c
 @@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
@@ -1452,19 +1487,19 @@ index c6c57b6..0c3b29e 100644
  #endif
  #ifdef MULTI_TLB
 -struct cpu_tlb_fns cpu_tlb;
-+struct cpu_tlb_fns cpu_tlb __read_only;
++struct cpu_tlb_fns cpu_tlb __read_mostly;
  #endif
  #ifdef MULTI_USER
 -struct cpu_user_fns cpu_user;
-+struct cpu_user_fns cpu_user __read_only;
++struct cpu_user_fns cpu_user __read_mostly;
  #endif
  #ifdef MULTI_CACHE
 -struct cpu_cache_fns cpu_cache;
-+struct cpu_cache_fns cpu_cache __read_only;
++struct cpu_cache_fns cpu_cache __read_mostly;
  #endif
  #ifdef CONFIG_OUTER_CACHE
 -struct outer_cache_fns outer_cache;
-+struct outer_cache_fns outer_cache __read_only;
++struct outer_cache_fns outer_cache __read_mostly;
  #endif
  
  struct stack {
@@ -1754,58 +1789,10 @@ index c83fdc8..ab9fc44 100644
  	.valid		= suspend_valid_only_mem,
  };
 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 3191cd6..c322981 100644
+index 3191cd6..68bd2d7 100644
 --- a/arch/arm/mm/fault.c
 +++ b/arch/arm/mm/fault.c
-@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
- 	}
- #endif
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (fsr & FSR_LNX_PF) {
-+		pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
-+		do_group_exit(SIGKILL);
-+	}
-+#endif
-+
- 	tsk->thread.address = addr;
- 	tsk->thread.error_code = fsr;
- 	tsk->thread.trap_no = 14;
-@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
- }
- #endif					/* CONFIG_MMU */
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
-+{
-+	long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 20; i++) {
-+		unsigned char c;
-+		if (get_user(c, (__force unsigned char __user *)pc+i))
-+			printk(KERN_CONT "?? ");
-+		else
-+			printk(KERN_CONT "%02x ", c);
-+	}
-+	printk("\n");
-+
-+	printk(KERN_ERR "PAX: bytes at SP-4: ");
-+	for (i = -1; i < 20; i++) {
-+		unsigned long c;
-+		if (get_user(c, (__force unsigned long __user *)sp+i))
-+			printk(KERN_CONT "???????? ");
-+		else
-+			printk(KERN_CONT "%08lx ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- /*
-  * First Level Translation Fault Handler
-  *
-@@ -569,6 +603,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+@@ -569,6 +569,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
  	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
  	struct siginfo info;
  
@@ -1827,10 +1814,18 @@ index 3191cd6..c322981 100644
  		return;
  
 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index f5abc51..7ec524c 100644
+index f5abc51..5f5262a 100644
 --- a/arch/arm/mm/mmap.c
 +++ b/arch/arm/mm/mmap.c
-@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -30,6 +30,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ #ifdef CONFIG_CPU_V6
+ 	unsigned int cache_type;
+ 	int do_align = 0, aliasing = 0;
+@@ -63,6 +64,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (len > TASK_SIZE)
  		return -ENOMEM;
  
@@ -1841,13 +1836,13 @@ index f5abc51..7ec524c 100644
  	if (addr) {
  		if (do_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -70,15 +75,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (len > mm->cached_hole_size) {
@@ -1861,7 +1856,7 @@ index f5abc51..7ec524c 100644
  	}
  
  full_search:
-@@ -94,14 +97,14 @@ full_search:
+@@ -94,14 +98,14 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -1875,10 +1870,18 @@ index f5abc51..7ec524c 100644
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
+@@ -116,7 +120,6 @@ full_search:
+ 	}
+ }
+ 
+-
+ /*
+  * You really shouldn't be using read() or write() on /dev/mem.  This
+  * might go away in the future.
 diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
 index 8d97db2..b66cfa5 100644
 --- a/arch/arm/plat-s3c/pm.c
@@ -2130,34 +2133,42 @@ index f8e16b2..c73ff79 100644
  };
  
 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
-index 385fd30..6c3d97e 100644
+index 385fd30..3aaf4fe 100644
 --- a/arch/frv/mm/elf-fdpic.c
 +++ b/arch/frv/mm/elf-fdpic.c
-@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ 	struct vm_area_struct *vma;
+ 	unsigned long limit;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	if (len > TASK_SIZE)
+ 		return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(current->mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			goto success;
  	}
  
-@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  			for (; vma; vma = vma->vm_next) {
  				if (addr > limit)
  					break;
 -				if (addr + len <= vma->vm_start)
-+				if (check_heap_stack_gap(vma, addr, len))
++				if (check_heap_stack_gap(vma, addr, len, offset))
  					goto success;
  				addr = vma->vm_end;
  			}
-@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  		for (; vma; vma = vma->vm_next) {
  			if (addr > limit)
  				break;
 -			if (addr + len <= vma->vm_start)
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
  				goto success;
  			addr = vma->vm_end;
  		}
@@ -2463,27 +2474,76 @@ index 239ecdc..f94170e 100644
  
  static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
-index 449c8c0..432a3d2 100644
+index 449c8c0..50cdf87 100644
 --- a/arch/ia64/include/asm/uaccess.h
 +++ b/arch/ia64/include/asm/uaccess.h
-@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+@@ -42,6 +42,8 @@
+ #include <asm/pgtable.h>
+ #include <asm/io.h>
+ 
++extern void check_object_size(const void *ptr, unsigned long n, bool to);
++
+ /*
+  * For historical reasons, the following macros are grossly misnamed:
+  */
+@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++	if (count > INT_MAX)
++		return count;
++
++	if (!__builtin_constant_p(count))
++		check_object_size(from, count, true);
++
+ 	return __copy_user(to, (__force void __user *) from, count);
+ }
+ 
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++	if (count > INT_MAX)
++		return count;
++
++	if (!__builtin_constant_p(count))
++		check_object_size(to, count, false);
++
+ 	return __copy_user((__force void __user *) to, from, count);
+ }
+ 
+@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({											\
+ 	void __user *__cu_to = (to);							\
  	const void *__cu_from = (from);							\
- 	long __cu_len = (n);								\
+-	long __cu_len = (n);								\
++	unsigned long __cu_len = (n);							\
  											\
 -	if (__access_ok(__cu_to, __cu_len, get_fs()))					\
-+	if (__cu_len > 0  && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs()))			\
++	if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) {		\
++		if (!__builtin_constant_p(n))						\
++			check_object_size(__cu_from, __cu_len, true);			\
  		__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len);	\
++	}										\
  	__cu_len;									\
  })
-@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
- 	long __cu_len = (n);								\
+ 
+@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({											\
+ 	void *__cu_to = (to);								\
+ 	const void __user *__cu_from = (from);						\
+-	long __cu_len = (n);								\
++	unsigned long __cu_len = (n);							\
  											\
  	__chk_user_ptr(__cu_from);							\
 -	if (__access_ok(__cu_from, __cu_len, get_fs()))					\
-+	if (__cu_len > 0 && __cu_len <= INT_MAX  && __access_ok(__cu_from, __cu_len, get_fs()))			\
++	if (__cu_len <= INT_MAX  && __access_ok(__cu_from, __cu_len, get_fs())) {	\
++		if (!__builtin_constant_p(n))						\
++			check_object_size(__cu_to, __cu_len, false);			\
  		__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);	\
++	}										\
  	__cu_len;									\
  })
+ 
 diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
 index f2c1600..969398a 100644
 --- a/arch/ia64/kernel/dma-mapping.c
@@ -2667,10 +2727,18 @@ index 285aae8..61dbab6 100644
  	.free_coherent = swiotlb_free_coherent,
  	.map_page = swiotlb_map_page,
 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
-index 609d500..7dde2a8 100644
+index 609d500..acd0429 100644
 --- a/arch/ia64/kernel/sys_ia64.c
 +++ b/arch/ia64/kernel/sys_ia64.c
-@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ 	unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (len > RGN_MAP_LIMIT)
+ 		return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
  	if (REGION_NUMBER(addr) == RGN_HPAGE)
  		addr = 0;
  #endif
@@ -2684,7 +2752,7 @@ index 609d500..7dde2a8 100644
  	if (!addr)
  		addr = mm->free_area_cache;
  
-@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -2698,7 +2766,7 @@ index 609d500..7dde2a8 100644
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/* Remember the address where we stopped this search:  */
  			mm->free_area_cache = addr + len;
  			return addr;
@@ -2782,15 +2850,23 @@ index 19261a9..1611b7a 100644
  	/*
  	 * If for any reason at all we couldn't handle the fault, make
 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index b0f6157..a082bbc 100644
+index b0f6157..f83c84f 100644
 --- a/arch/ia64/mm/hugetlbpage.c
 +++ b/arch/ia64/mm/hugetlbpage.c
-@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -150,6 +150,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ 		unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct *vmm;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+ 
+ 	if (len > RGN_MAP_LIMIT)
+ 		return -ENOMEM;
+@@ -172,7 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
  		/* At this point:  (!vmm || addr < vmm->vm_end). */
  		if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
  			return -ENOMEM;
 -		if (!vmm || (addr + len) <= vmm->vm_start)
-+		if (check_heap_stack_gap(vmm, addr, len))
++		if (check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  		addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
  	}
@@ -3615,28 +3691,56 @@ index fb59852..32d43e7 100644
  	if ((r = copy_from_user(&s, up, sz)) == 0) {
  		sigset_32to64(set, &s);
 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index 9147391..f3d949a 100644
+index 9147391..d09f456 100644
 --- a/arch/parisc/kernel/sys_parisc.c
 +++ b/arch/parisc/kernel/sys_parisc.c
-@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+@@ -33,9 +33,11 @@
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
+ 
+-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
++				       unsigned long flags)
+ {
+ 	struct vm_area_struct *vma;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	addr = PAGE_ALIGN(addr);
+ 
+@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  		addr = vma->vm_end;
  	}
-@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
+ 	return offset & 0x3FF000;
+ }
+ 
+-static unsigned long get_shared_area(struct address_space *mapping,
+-		unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
++		unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct *vma;
+ 	int offset = mapping ? get_offset(mapping) : 0;
++	unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	addr = DCACHE_ALIGN(addr - offset) + offset;
+ 
+@@ -79,7 +82,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, rand_offset))
  			return addr;
  		addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
  		if (addr < vma->vm_end) /* handle wraparound */
-@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -98,14 +101,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  	if (!addr)
@@ -3644,7 +3748,17 @@ index 9147391..f3d949a 100644
 +		addr = current->mm->mmap_base;
  
  	if (filp) {
- 		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+-		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
++		addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
+ 	} else if(flags & MAP_SHARED) {
+-		addr = get_shared_area(NULL, addr, len, pgoff);
++		addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
+ 	} else {
+-		addr = get_unshared_area(addr, len);
++		addr = get_unshared_area(filp, addr, len, flags);
+ 	}
+ 	return addr;
+ }
 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
 index 8b58bf0..7afff03 100644
 --- a/arch/parisc/kernel/traps.c
@@ -5122,7 +5236,7 @@ index 0d957a4..26d968f 100644
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
-index ba51948..23009d9 100644
+index ba51948..0e45275 100644
 --- a/arch/powerpc/mm/slice.c
 +++ b/arch/powerpc/mm/slice.c
 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
@@ -5130,7 +5244,7 @@ index ba51948..23009d9 100644
  		return 0;
  	vma = find_vma(mm, addr);
 -	return (!vma || (addr + len) <= vma->vm_start);
-+	return check_heap_stack_gap(vma, addr, len);
++	return check_heap_stack_gap(vma, addr, len, 0);
  }
  
  static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
@@ -5139,7 +5253,7 @@ index ba51948..23009d9 100644
  			continue;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, 0)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
@@ -5166,7 +5280,7 @@ index ba51948..23009d9 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || (addr + len) <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, 0)) {
  			/* remember the address as a hint for next time */
  			if (use_cache)
  				mm->free_area_cache = addr;
@@ -5175,7 +5289,7 @@ index ba51948..23009d9 100644
  
  		/* try just below the current vma->vm_start */
 -		addr = vma->vm_start;
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, 0);
  	}
  
  	/*
@@ -5733,48 +5847,64 @@ index 3e532d0..9faa306 100644
  #ifdef CONFIG_CPU_LITTLE_ENDIAN
  	.gdb_bpt_instr		= { 0x3c, 0xc3 },
 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
-index afeb710..d1d1289 100644
+index afeb710..e8366ef 100644
 --- a/arch/sh/mm/mmap.c
 +++ b/arch/sh/mm/mmap.c
-@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
+ 	int do_colour_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -106,7 +105,7 @@ full_search:
+@@ -106,7 +106,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long addr = addr0;
+ 	int do_colour_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -5793,11 +5923,11 @@ index afeb710..d1d1289 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -5805,7 +5935,7 @@ index afeb710..d1d1289 100644
 -		if (do_colour_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
@@ -6729,10 +6859,18 @@ index 6edc4e5..06a69b4 100644
  #include <asm/sigcontext.h>
  #include <asm/fpumacro.h>
 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
-index 3a82e65..ce0a53a 100644
+index 3a82e65..ad9761e 100644
 --- a/arch/sparc/kernel/sys_sparc_32.c
 +++ b/arch/sparc/kernel/sys_sparc_32.c
-@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -40,6 +40,7 @@ asmlinkage unsigned long sys_getpagesize(void)
+ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct * vmm;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -57,7 +58,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (ARCH_SUN4C && len > 0x20000000)
  		return -ENOMEM;
  	if (!addr)
@@ -6741,20 +6879,26 @@ index 3a82e65..ce0a53a 100644
  
  	if (flags & MAP_SHARED)
  		addr = COLOUR_ALIGN(addr);
-@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -72,7 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  		}
  		if (TASK_SIZE - PAGE_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vmm || addr + len <= vmm->vm_start)
-+		if (check_heap_stack_gap(vmm, addr, len))
++		if (check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  		addr = vmm->vm_end;
  		if (flags & MAP_SHARED)
 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index cfa0e19..98972ac 100644
+index cfa0e19..6a250b0 100644
 --- a/arch/sparc/kernel/sys_sparc_64.c
 +++ b/arch/sparc/kernel/sys_sparc_64.c
-@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -120,12 +120,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ 	unsigned long task_size = TASK_SIZE;
+ 	unsigned long start_addr;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
  		/* We do not accept a shared mapping if it would violate
  		 * cache aliasing constraints.
  		 */
@@ -6763,7 +6907,7 @@ index cfa0e19..98972ac 100644
  		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  			return -EINVAL;
  		return addr;
-@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -140,6 +141,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (filp || (flags & MAP_SHARED))
  		do_color_align = 1;
  
@@ -6774,13 +6918,13 @@ index cfa0e19..98972ac 100644
  	if (addr) {
  		if (do_color_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -147,15 +152,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
@@ -6793,7 +6937,7 @@ index cfa0e19..98972ac 100644
  	        mm->cached_hole_size = 0;
  	}
  
-@@ -175,14 +178,14 @@ full_search:
+@@ -175,14 +179,14 @@ full_search:
  			vma = find_vma(mm, VA_EXCLUDE_END);
  		}
  		if (unlikely(task_size < addr)) {
@@ -6807,11 +6951,19 @@ index cfa0e19..98972ac 100644
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -208,6 +212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	unsigned long task_size = STACK_TOP32;
+ 	unsigned long addr = addr0;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/* This should only ever run for 32-bit processes.  */
+ 	BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -216,7 +221,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		/* We do not accept a shared mapping if it would violate
  		 * cache aliasing constraints.
  		 */
@@ -6820,26 +6972,26 @@ index cfa0e19..98972ac 100644
  		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  			return -EINVAL;
  		return addr;
-@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -237,8 +242,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -259,7 +263,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -268,18 +272,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -6858,11 +7010,11 @@ index cfa0e19..98972ac 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -289,10 +293,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -6870,12 +7022,12 @@ index cfa0e19..98972ac 100644
 -		if (do_color_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -384,6 +386,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
  	    sysctl_legacy_va_layout) {
  		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -6888,7 +7040,7 @@ index cfa0e19..98972ac 100644
  		mm->get_unmapped_area = arch_get_unmapped_area;
  		mm->unmap_area = arch_unmap_area;
  	} else {
-@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -398,6 +406,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  			gap = (task_size / 6 * 5);
  
  		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -8369,28 +8521,44 @@ index 43b0da9..f9f9985 100644
  	 * load/store/atomic was a write or not, it only says that there
  	 * was no match.  So in such a case we (carefully) read the
 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index f27d103..1b06377 100644
+index f27d103..d4fd7ba 100644
 --- a/arch/sparc/mm/hugetlbpage.c
 +++ b/arch/sparc/mm/hugetlbpage.c
-@@ -69,7 +69,7 @@ full_search:
+@@ -36,6 +36,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ 	struct vm_area_struct * vma;
+ 	unsigned long task_size = TASK_SIZE;
+ 	unsigned long start_addr;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (test_thread_flag(TIF_32BIT))
+ 		task_size = STACK_TOP32;
+@@ -69,7 +70,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -92,6 +93,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long addr = addr0;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/* This should only ever run for 32-bit processes.  */
+ 	BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -108,7 +110,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -117,16 +119,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -8406,28 +8574,36 @@ index f27d103..1b06377 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -136,8 +139,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
 -		addr = (vma->vm_start-len) & HPAGE_MASK;
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -165,6 +168,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+ 	unsigned long task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (test_thread_flag(TIF_32BIT))
+ 		task_size = STACK_TOP32;
+@@ -183,8 +187,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	if (addr) {
  		addr = ALIGN(addr, HPAGE_SIZE);
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -12166,6 +12342,19 @@ index 0b20bbb..f06479b 100644
  	else
  		clts();
  }
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index 6a63b86..b6a731c 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -170,7 +170,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+ 	return ioremap_nocache(offset, size);
+ }
+ 
+-extern void iounmap(volatile void __iomem *addr);
++extern void iounmap(const volatile void __iomem *addr);
+ 
+ extern void set_iounmap_nonlazy(void);
+ 
 diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
 index a299900..15c5410 100644
 --- a/arch/x86/include/asm/io_32.h
@@ -12879,50 +13068,9 @@ index efb3899..ef30687 100644
  
  #endif /* __ASSEMBLY__ */
 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index 9357473..aeb2de5 100644
+index 9357473..04fa525 100644
 --- a/arch/x86/include/asm/paravirt_types.h
 +++ b/arch/x86/include/asm/paravirt_types.h
-@@ -78,19 +78,19 @@ struct pv_init_ops {
- 	 */
- 	unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
- 			  unsigned long addr, unsigned len);
--};
-+} __no_const;
- 
- 
- struct pv_lazy_ops {
- 	/* Set deferred update mode, used for batching operations. */
- 	void (*enter)(void);
- 	void (*leave)(void);
--};
-+} __no_const;
- 
- struct pv_time_ops {
- 	unsigned long long (*sched_clock)(void);
- 	unsigned long (*get_tsc_khz)(void);
--};
-+} __no_const;
- 
- struct pv_cpu_ops {
- 	/* hooks for various privileged instructions */
-@@ -186,7 +186,7 @@ struct pv_cpu_ops {
- 
- 	void (*start_context_switch)(struct task_struct *prev);
- 	void (*end_context_switch)(struct task_struct *next);
--};
-+} __no_const;
- 
- struct pv_irq_ops {
- 	/*
-@@ -217,7 +217,7 @@ struct pv_apic_ops {
- 				 unsigned long start_eip,
- 				 unsigned long start_esp);
- #endif
--};
-+} __no_const;
- 
- struct pv_mmu_ops {
- 	unsigned long (*read_cr2)(void);
 @@ -301,6 +301,7 @@ struct pv_mmu_ops {
  	struct paravirt_callee_save make_pud;
  
@@ -12944,15 +13092,6 @@ index 9357473..aeb2de5 100644
  };
  
  struct raw_spinlock;
-@@ -326,7 +333,7 @@ struct pv_lock_ops {
- 	void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
- 	int (*spin_trylock)(struct raw_spinlock *lock);
- 	void (*spin_unlock)(struct raw_spinlock *lock);
--};
-+} __no_const;
- 
- /* This contains all the paravirt structures: we get a convenient
-  * number for each function using the offset which we use to indicate
 diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
 index b399988..3f47c38 100644
 --- a/arch/x86/include/asm/pci_x86.h
@@ -13682,7 +13821,7 @@ index 0f0d908..f2e3da2 100644
  }
  
 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
-index 562d4fd..1e42a5b 100644
+index 562d4fd..6e655f2 100644
 --- a/arch/x86/include/asm/reboot.h
 +++ b/arch/x86/include/asm/reboot.h
 @@ -6,19 +6,19 @@
@@ -13698,9 +13837,8 @@ index 562d4fd..1e42a5b 100644
  	void (*shutdown)(void);
  	void (*crash_shutdown)(struct pt_regs *);
 -	void (*emergency_restart)(void);
--};
 +	void (* __noreturn emergency_restart)(void);
-+} __no_const;
+ };
  
  extern struct machine_ops machine_ops;
  
@@ -13919,7 +14057,7 @@ index 14e0ed8..7f7dd5e 100644
  #define __USER_DS     (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
  #define __USER_CS     (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
-index 4c2f63c..5685db2 100644
+index 4c2f63c..57ac225 100644
 --- a/arch/x86/include/asm/smp.h
 +++ b/arch/x86/include/asm/smp.h
 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
@@ -13943,15 +14081,6 @@ index 4c2f63c..5685db2 100644
  
  struct smp_ops {
  	void (*smp_prepare_boot_cpu)(void);
-@@ -60,7 +57,7 @@ struct smp_ops {
- 
- 	void (*send_call_func_ipi)(const struct cpumask *mask);
- 	void (*send_call_func_single_ipi)(int cpu);
--};
-+} __no_const;
- 
- /* Globals due to paravirt */
- extern void set_cpu_sibling_map(int cpu);
 @@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
  extern int safe_smp_processor_id(void);
  
@@ -15160,91 +15289,6 @@ index d0983d2..1f7c9e9 100644
  #endif /* __KERNEL__ */
  
  #endif /* _ASM_X86_VSYSCALL_H */
-diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
-index 2c756fd..3377e37 100644
---- a/arch/x86/include/asm/x86_init.h
-+++ b/arch/x86/include/asm/x86_init.h
-@@ -28,7 +28,7 @@ struct x86_init_mpparse {
- 	void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
- 	void (*find_smp_config)(unsigned int reserve);
- 	void (*get_smp_config)(unsigned int early);
--};
-+} __no_const;
- 
- /**
-  * struct x86_init_resources - platform specific resource related ops
-@@ -42,7 +42,7 @@ struct x86_init_resources {
- 	void (*probe_roms)(void);
- 	void (*reserve_resources)(void);
- 	char *(*memory_setup)(void);
--};
-+} __no_const;
- 
- /**
-  * struct x86_init_irqs - platform specific interrupt setup
-@@ -55,7 +55,7 @@ struct x86_init_irqs {
- 	void (*pre_vector_init)(void);
- 	void (*intr_init)(void);
- 	void (*trap_init)(void);
--};
-+} __no_const;
- 
- /**
-  * struct x86_init_oem - oem platform specific customizing functions
-@@ -65,7 +65,7 @@ struct x86_init_irqs {
- struct x86_init_oem {
- 	void (*arch_setup)(void);
- 	void (*banner)(void);
--};
-+} __no_const;
- 
- /**
-  * struct x86_init_paging - platform specific paging functions
-@@ -75,7 +75,7 @@ struct x86_init_oem {
- struct x86_init_paging {
- 	void (*pagetable_setup_start)(pgd_t *base);
- 	void (*pagetable_setup_done)(pgd_t *base);
--};
-+} __no_const;
- 
- /**
-  * struct x86_init_timers - platform specific timer setup
-@@ -88,7 +88,7 @@ struct x86_init_timers {
- 	void (*setup_percpu_clockev)(void);
- 	void (*tsc_pre_init)(void);
- 	void (*timer_init)(void);
--};
-+} __no_const;
- 
- /**
-  * struct x86_init_ops - functions for platform specific setup
-@@ -101,7 +101,7 @@ struct x86_init_ops {
- 	struct x86_init_oem		oem;
- 	struct x86_init_paging		paging;
- 	struct x86_init_timers		timers;
--};
-+} __no_const;
- 
- /**
-  * struct x86_cpuinit_ops - platform specific cpu hotplug setups
-@@ -109,7 +109,7 @@ struct x86_init_ops {
-  */
- struct x86_cpuinit_ops {
- 	void (*setup_percpu_clockev)(void);
--};
-+} __no_const;
- 
- /**
-  * struct x86_platform_ops - platform specific runtime functions
-@@ -121,7 +121,7 @@ struct x86_platform_ops {
- 	unsigned long (*calibrate_tsc)(void);
- 	unsigned long (*get_wallclock)(void);
- 	int (*set_wallclock)(unsigned long nowtime);
--};
-+} __no_const;
- 
- extern struct x86_init_ops x86_init;
- extern struct x86_cpuinit_ops x86_cpuinit;
 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
 index 727acc1..52c9e4c 100644
 --- a/arch/x86/include/asm/xsave.h
@@ -15582,6 +15626,31 @@ index 8928d97..f799cea 100644
  		spin_lock(&ioapic_lock);
  		__mask_and_edge_IO_APIC_irq(cfg);
  		__unmask_and_level_IO_APIC_irq(cfg);
+diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
+index efa00e2..63f96f5 100644
+--- a/arch/x86/kernel/apic/numaq_32.c
++++ b/arch/x86/kernel/apic/numaq_32.c
+@@ -275,13 +275,13 @@ static __init void early_check_numaq(void)
+ 		early_get_smp_config();
+ 
+ 	if (found_numaq) {
+-		x86_init.mpparse.mpc_record = numaq_mpc_record;
+-		x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
+-		x86_init.mpparse.mpc_apic_id = mpc_apic_id;
+-		x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
+-		x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
+-		x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
+-		x86_init.timers.tsc_pre_init = numaq_tsc_init;
++		*(void **)&x86_init.mpparse.mpc_record = numaq_mpc_record;
++		*(void **)&x86_init.mpparse.setup_ioapic_ids = x86_init_noop;
++		*(void **)&x86_init.mpparse.mpc_apic_id = mpc_apic_id;
++		*(void **)&x86_init.mpparse.smp_read_mpc_oem = smp_read_mpc_oem;
++		*(void **)&x86_init.mpparse.mpc_oem_pci_bus = mpc_oem_pci_bus;
++		*(void **)&x86_init.mpparse.mpc_oem_bus_info = mpc_oem_bus_info;
++		*(void **)&x86_init.timers.tsc_pre_init = numaq_tsc_init;
+ 	}
+ }
+ 
 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
 index 151ace6..f317474 100644
 --- a/arch/x86/kernel/apm_32.c
@@ -15945,7 +16014,7 @@ index 6a77cca..4f4fca0 100644
  }
  #endif
 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
-index 417990f..96dc36b 100644
+index 417990f..8c489b8 100644
 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
@@ -15957,6 +16026,43 @@ index 417990f..96dc36b 100644
  	.show   = show,
  	.store  = store,
  };
+@@ -931,6 +931,11 @@ static struct kobj_type ktype_cache = {
+ 	.default_attrs	= default_attrs,
+ };
+ 
++static struct kobj_type ktype_l3_cache = {
++	.sysfs_ops	= &sysfs_ops,
++	.default_attrs	= default_l3_attrs,
++};
++
+ static struct kobj_type ktype_percpu_entry = {
+ 	.sysfs_ops	= &sysfs_ops,
+ };
+@@ -997,6 +1002,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ 	}
+ 
+ 	for (i = 0; i < num_cache_leaves; i++) {
++		struct kobj_type *ktype;
++
+ 		this_object = INDEX_KOBJECT_PTR(cpu, i);
+ 		this_object->cpu = cpu;
+ 		this_object->index = i;
+@@ -1004,12 +1011,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ 		this_leaf = CPUID4_INFO_IDX(cpu, i);
+ 
+ 		if (this_leaf->can_disable)
+-			ktype_cache.default_attrs = default_l3_attrs;
++			ktype = &ktype_l3_cache;
+ 		else
+-			ktype_cache.default_attrs = default_attrs;
++			ktype = &ktype_cache;
+ 
+ 		retval = kobject_init_and_add(&(this_object->kobj),
+-					      &ktype_cache,
++					      ktype,
+ 					      per_cpu(cache_kobject, cpu),
+ 					      "index%1lu", i);
+ 		if (unlikely(retval)) {
 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
 index 472763d..9831e11 100644
 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -15973,7 +16079,7 @@ index 472763d..9831e11 100644
  	return 0;
  }
 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 0f16a2b..21740f5 100644
+index 0f16a2b..a4a4382 100644
 --- a/arch/x86/kernel/cpu/mcheck/mce.c
 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
 @@ -43,6 +43,7 @@
@@ -16087,6 +16193,15 @@ index 0f16a2b..21740f5 100644
  	open_exclu = 0;
  
  	spin_unlock(&mce_state_lock);
+@@ -2007,7 +2010,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ 	return NOTIFY_OK;
+ }
+ 
+-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
++static struct notifier_block mce_cpu_notifier __cpuinitconst = {
+ 	.notifier_call = mce_cpu_callback,
+ };
+ 
 @@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
  static void mce_reset(void)
  {
@@ -16262,7 +16377,7 @@ index a501dee..816c719 100644
  #define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
  #define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index 0ff02ca..fc49a60 100644
+index 0ff02ca..9994c9d 100644
 --- a/arch/x86/kernel/cpu/perf_event.c
 +++ b/arch/x86/kernel/cpu/perf_event.c
 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
@@ -16296,6 +16411,15 @@ index 0ff02ca..fc49a60 100644
  
  	err = checking_wrmsrl(hwc->event_base + idx,
  			     (u64)(-left) & x86_pmu.event_mask);
+@@ -1940,7 +1940,7 @@ perf_event_nmi_handler(struct notifier_block *self,
+ 	return NOTIFY_STOP;
+ }
+ 
+-static __read_mostly struct notifier_block perf_event_nmi_notifier = {
++static struct notifier_block perf_event_nmi_notifier = {
+ 	.notifier_call		= perf_event_nmi_handler,
+ 	.next			= NULL,
+ 	.priority		= 1
 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
  			break;
  
@@ -16342,6 +16466,19 @@ index 898df97..9e82503 100644
  static struct wd_ops intel_arch_wd_ops __read_mostly = {
  	.reserve	= single_msr_reserve,
  	.unreserve	= single_msr_unreserve,
+diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
+index 9580152..bca82d1 100644
+--- a/arch/x86/kernel/cpu/vmware.c
++++ b/arch/x86/kernel/cpu/vmware.c
+@@ -79,7 +79,7 @@ void __init vmware_platform_setup(void)
+ 	VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+ 
+ 	if (ebx != UINT_MAX)
+-		x86_platform.calibrate_tsc = vmware_get_tsc_khz;
++		*(void **)&x86_platform.calibrate_tsc = vmware_get_tsc_khz;
+ 	else
+ 		printk(KERN_WARNING
+ 		       "Failed to get TSC freq from the hypervisor\n");
 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
 index ff95824..2ffdcb5 100644
 --- a/arch/x86/kernel/crash.c
@@ -16766,6 +16903,21 @@ index b9c830c..1e41a96 100644
  	va_start(ap, fmt);
  	n = vscnprintf(buf, sizeof(buf), fmt, ap);
  	early_console->write(early_console, buf, n);
+diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
+index cdcfb12..15589d9 100644
+--- a/arch/x86/kernel/efi.c
++++ b/arch/x86/kernel/efi.c
+@@ -455,8 +455,8 @@ void __init efi_init(void)
+ 		do_add_efi_memmap();
+ 
+ #ifdef CONFIG_X86_32
+-	x86_platform.get_wallclock = efi_get_time;
+-	x86_platform.set_wallclock = efi_set_rtc_mmss;
++	*(void **)&x86_platform.get_wallclock = efi_get_time;
++	*(void **)&x86_platform.set_wallclock = efi_set_rtc_mmss;
+ #endif
+ 
+ 	/* Setup for EFI runtime service */
 diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
 index 5cab48e..b025f9b 100644
 --- a/arch/x86/kernel/efi_32.c
@@ -18989,10 +19141,10 @@ index 9dbb527..9fe4f21 100644
  		return -EFAULT;
  
 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
-index 4f8e250..df24706 100644
+index 4f8e250..87a66e9 100644
 --- a/arch/x86/kernel/head32.c
 +++ b/arch/x86/kernel/head32.c
-@@ -16,6 +16,7 @@
+@@ -16,13 +16,14 @@
  #include <asm/apic.h>
  #include <asm/io_apic.h>
  #include <asm/bios_ebda.h>
@@ -19000,6 +19152,16 @@ index 4f8e250..df24706 100644
  
  static void __init i386_default_early_setup(void)
  {
+ 	/* Initilize 32bit specific setup functions */
+-	x86_init.resources.probe_roms = probe_roms;
+-	x86_init.resources.reserve_resources = i386_reserve_resources;
+-	x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
++	*(void **)&x86_init.resources.probe_roms = probe_roms;
++	*(void **)&x86_init.resources.reserve_resources = i386_reserve_resources;
++	*(void **)&x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
+ 
+ 	reserve_ebda_region();
+ }
 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
  {
  	reserve_trampoline_memory();
@@ -20332,9 +20494,18 @@ index 7a67820..17c9752 100644
  
  	switch (val) {
 diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
-index 63b0ec8..6d92227 100644
+index 63b0ec8..4211963 100644
 --- a/arch/x86/kernel/kvm.c
 +++ b/arch/x86/kernel/kvm.c
+@@ -201,7 +201,7 @@ static void __init paravirt_ops_setup(void)
+ 	pv_info.paravirt_enabled = 1;
+ 
+ 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+-		pv_cpu_ops.io_delay = kvm_io_delay;
++		*(void **)&pv_cpu_ops.io_delay = kvm_io_delay;
+ 
+ 	if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
+ 		pv_mmu_ops.set_pte = kvm_set_pte;
 @@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
  		pv_mmu_ops.set_pud = kvm_set_pud;
  #if PAGETABLE_LEVELS == 4
@@ -20343,6 +20514,39 @@ index 63b0ec8..6d92227 100644
  #endif
  #endif
  		pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index feaeb0d..9c62757 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -184,20 +184,20 @@ void __init kvmclock_init(void)
+ 	if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+ 		if (kvm_register_clock("boot clock"))
+ 			return;
+-		pv_time_ops.sched_clock = kvm_clock_read;
+-		x86_platform.calibrate_tsc = kvm_get_tsc_khz;
+-		x86_platform.get_wallclock = kvm_get_wallclock;
+-		x86_platform.set_wallclock = kvm_set_wallclock;
++		*(void **)&pv_time_ops.sched_clock = kvm_clock_read;
++		*(void **)&x86_platform.calibrate_tsc = kvm_get_tsc_khz;
++		*(void **)&x86_platform.get_wallclock = kvm_get_wallclock;
++		*(void **)&x86_platform.set_wallclock = kvm_set_wallclock;
+ #ifdef CONFIG_X86_LOCAL_APIC
+-		x86_cpuinit.setup_percpu_clockev =
++		*(void **)&x86_cpuinit.setup_percpu_clockev =
+ 			kvm_setup_secondary_clock;
+ #endif
+ #ifdef CONFIG_SMP
+-		smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
++		*(void **)&smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
+ #endif
+-		machine_ops.shutdown  = kvm_shutdown;
++		*(void **)&machine_ops.shutdown  = kvm_shutdown;
+ #ifdef CONFIG_KEXEC
+-		machine_ops.crash_shutdown  = kvm_crash_shutdown;
++		*(void **)&machine_ops.crash_shutdown  = kvm_crash_shutdown;
+ #endif
+ 		kvm_get_preset_lpj();
+ 		clocksource_register(&kvm_clock);
 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
 index ec6ef60..ab2c824 100644
 --- a/arch/x86/kernel/ldt.c
@@ -20659,6 +20863,19 @@ index 89f386f..9028f51 100644
  #if 0
  			if ((s64)val != *(s32 *)loc)
  				goto overflow;
+diff --git a/arch/x86/kernel/mrst.c b/arch/x86/kernel/mrst.c
+index 3b7078a..7367929 100644
+--- a/arch/x86/kernel/mrst.c
++++ b/arch/x86/kernel/mrst.c
+@@ -19,6 +19,6 @@
+  */
+ void __init x86_mrst_early_setup(void)
+ {
+-	x86_init.resources.probe_roms = x86_init_noop;
+-	x86_init.resources.reserve_resources = x86_init_noop;
++	*(void **)&x86_init.resources.probe_roms = x86_init_noop;
++	*(void **)&x86_init.resources.reserve_resources = x86_init_noop;
+ }
 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
 index 3a7c5a4..9191528 100644
 --- a/arch/x86/kernel/paravirt-spinlocks.c
@@ -20673,7 +20890,7 @@ index 3a7c5a4..9191528 100644
  	.spin_is_locked = __ticket_spin_is_locked,
  	.spin_is_contended = __ticket_spin_is_contended,
 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index 1b1739d..dea6077 100644
+index 1b1739d..e39fa7f 100644
 --- a/arch/x86/kernel/paravirt.c
 +++ b/arch/x86/kernel/paravirt.c
 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
@@ -20736,7 +20953,7 @@ index 1b1739d..dea6077 100644
  
  	return insn_len;
  }
-@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
+@@ -294,7 +303,7 @@ void arch_flush_lazy_mmu_mode(void)
  	preempt_enable();
  }
  
@@ -20745,16 +20962,7 @@ index 1b1739d..dea6077 100644
  	.name = "bare hardware",
  	.paravirt_enabled = 0,
  	.kernel_rpl = 0,
- 	.shared_kernel_pmd = 1,	/* Only used when CONFIG_X86_PAE is set */
- };
- 
--struct pv_init_ops pv_init_ops = {
-+struct pv_init_ops pv_init_ops __read_only = {
- 	.patch = native_patch,
- };
- 
--struct pv_time_ops pv_time_ops = {
-+struct pv_time_ops pv_time_ops __read_only = {
+@@ -309,7 +318,7 @@ struct pv_time_ops pv_time_ops = {
  	.sched_clock = native_sched_clock,
  };
  
@@ -20763,23 +20971,7 @@ index 1b1739d..dea6077 100644
  	.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
  	.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
  	.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
-@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
- #endif
- };
- 
--struct pv_cpu_ops pv_cpu_ops = {
-+struct pv_cpu_ops pv_cpu_ops __read_only = {
- 	.cpuid = native_cpuid,
- 	.get_debugreg = native_get_debugreg,
- 	.set_debugreg = native_set_debugreg,
-@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
- 	.end_context_switch = paravirt_nop,
- };
- 
--struct pv_apic_ops pv_apic_ops = {
-+struct pv_apic_ops pv_apic_ops __read_only = {
- #ifdef CONFIG_X86_LOCAL_APIC
- 	.startup_ipi_hook = paravirt_nop,
+@@ -388,15 +397,20 @@ struct pv_apic_ops pv_apic_ops = {
  #endif
  };
  
@@ -21765,7 +21957,7 @@ index 3149032..14f1053 100644
  				return 0;
  			/* 64-bit mode: REX prefix */
 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
-index dee1ff7..a397f7f 100644
+index dee1ff7..d0e3ef7 100644
 --- a/arch/x86/kernel/sys_i386_32.c
 +++ b/arch/x86/kernel/sys_i386_32.c
 @@ -24,6 +24,21 @@
@@ -21790,7 +21982,7 @@ index dee1ff7..a397f7f 100644
  /*
   * Perform the select(nd, in, out, ex, tv) and mmap() system
   * calls. Linux/i386 didn't use to be able to handle more than
-@@ -58,6 +73,212 @@ out:
+@@ -58,6 +73,214 @@ out:
  	return err;
  }
  
@@ -21801,6 +21993,7 @@ index dee1ff7..a397f7f 100644
 +	struct mm_struct *mm = current->mm;
 +	struct vm_area_struct *vma;
 +	unsigned long start_addr, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -21823,7 +22016,7 @@ index dee1ff7..a397f7f 100644
 +		addr = PAGE_ALIGN(addr);
 +		if (pax_task_size - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
 +	}
@@ -21865,7 +22058,7 @@ index dee1ff7..a397f7f 100644
 +			}
 +			return -ENOMEM;
 +		}
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			break;
 +		if (addr + mm->cached_hole_size < vma->vm_start)
 +			mm->cached_hole_size = vma->vm_start - addr;
@@ -21892,6 +22085,7 @@ index dee1ff7..a397f7f 100644
 +	struct vm_area_struct *vma;
 +	struct mm_struct *mm = current->mm;
 +	unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -21921,7 +22115,7 @@ index dee1ff7..a397f7f 100644
 +		addr = PAGE_ALIGN(addr);
 +		if (pax_task_size - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
 +	}
@@ -21938,7 +22132,7 @@ index dee1ff7..a397f7f 100644
 +	/* make sure it can fit in the remaining address space */
 +	if (addr > len) {
 +		vma = find_vma(mm, addr-len);
-+		if (check_heap_stack_gap(vma, addr - len, len))
++		if (check_heap_stack_gap(vma, addr - len, len, offset))
 +			/* remember the address as a hint for next time */
 +			return (mm->free_area_cache = addr-len);
 +	}
@@ -21955,7 +22149,7 @@ index dee1ff7..a397f7f 100644
 +		 * return with success:
 +		 */
 +		vma = find_vma(mm, addr);
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			/* remember the address as a hint for next time */
 +			return (mm->free_area_cache = addr);
 +
@@ -21964,7 +22158,7 @@ index dee1ff7..a397f7f 100644
 +			mm->cached_hole_size = vma->vm_start - addr;
 +
 +		/* try just below the current vma->vm_start */
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
 +
 +bottomup:
@@ -22003,7 +22197,7 @@ index dee1ff7..a397f7f 100644
  
  struct sel_arg_struct {
  	unsigned long n;
-@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+@@ -93,7 +316,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
  		return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
  	case SEMTIMEDOP:
  		return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
@@ -22012,7 +22206,7 @@ index dee1ff7..a397f7f 100644
  
  	case SEMGET:
  		return sys_semget(first, second, third);
-@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
+@@ -140,7 +363,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
  			ret = do_shmat(first, (char __user *) ptr, second, &raddr);
  			if (ret)
  				return ret;
@@ -22021,7 +22215,7 @@ index dee1ff7..a397f7f 100644
  		}
  		case 1:	/* iBCS2 emulator entry point */
  			if (!segment_eq(get_fs(), get_ds()))
-@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
+@@ -207,17 +430,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
  
  	return error;
  }
@@ -22040,7 +22234,7 @@ index dee1ff7..a397f7f 100644
 -	return __res;
 -}
 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index 8aa2057..b604bc1 100644
+index 8aa2057..4db7318 100644
 --- a/arch/x86/kernel/sys_x86_64.c
 +++ b/arch/x86/kernel/sys_x86_64.c
 @@ -32,8 +32,8 @@ out:
@@ -22063,7 +22257,12 @@ index 8aa2057..b604bc1 100644
  		*end = TASK_SIZE;
  	}
  }
-@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -65,20 +65,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
+ 	unsigned long begin, end;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -22082,29 +22281,30 @@ index 8aa2057..b604bc1 100644
  		vma = find_vma(mm, addr);
 -		if (end - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
-@@ -106,7 +109,7 @@ full_search:
+@@ -106,7 +110,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -128,7 +132,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
 -	unsigned long addr = addr0;
 +	unsigned long base = mm->mmap_base, addr = addr0;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -141,13 +146,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
  		goto bottomup;
  
@@ -22121,42 +22321,42 @@ index 8aa2057..b604bc1 100644
 -			return addr;
 +		if (TASK_SIZE - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -162,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (addr > len) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr - len, len))
++		if (check_heap_stack_gap(vma, addr - len, len, offset))
  			/* remember the address as a hint for next time */
  			return mm->free_area_cache = addr-len;
  	}
-@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -179,7 +189,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || addr+len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			/* remember the address as a hint for next time */
  			return mm->free_area_cache = addr;
  
-@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -188,8 +198,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
 -		addr = vma->vm_start-len;
 -	} while (len < vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -198,13 +206,21 @@ bottomup:
+@@ -198,13 +208,21 @@ bottomup:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -22881,7 +23081,7 @@ index 9c4e625..c992817 100644
  		goto cannot_handle;
  	if ((segoffs >> 16) == BIOSSEG)
 diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
-index d430e4c..831f817 100644
+index d430e4c..9efc93d 100644
 --- a/arch/x86/kernel/vmi_32.c
 +++ b/arch/x86/kernel/vmi_32.c
 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -23004,6 +23204,79 @@ index d430e4c..831f817 100644
  			return 1;
  		}
  	}
+@@ -604,9 +626,9 @@ do {								\
+ 	reloc = call_vrom_long_func(vmi_rom, get_reloc,		\
+ 				    VMI_CALL_##vmicall);	\
+ 	if (rel->type == VMI_RELOCATION_CALL_REL) 		\
+-		opname = (void *)rel->eip;			\
++		*(void **)&opname = (void *)rel->eip;		\
+ 	else if (rel->type == VMI_RELOCATION_NOP) 		\
+-		opname = (void *)vmi_nop;			\
++		*(void **)&opname = (void *)vmi_nop;		\
+ 	else if (rel->type != VMI_RELOCATION_NONE)		\
+ 		printk(KERN_WARNING "VMI: Unknown relocation "	\
+ 				    "type %d for " #vmicall"\n",\
+@@ -626,7 +648,7 @@ do {								\
+ 				    VMI_CALL_##vmicall);	\
+ 	BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);		\
+ 	if (rel->type == VMI_RELOCATION_CALL_REL) {		\
+-		opname = wrapper;				\
++		*(void **)&opname = wrapper;			\
+ 		vmi_ops.cache = (void *)rel->eip;		\
+ 	}							\
+ } while (0)
+@@ -650,7 +672,7 @@ static inline int __init activate_vmi(void)
+ 	pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
+ 	pv_info.name = "vmi [deprecated]";
+ 
+-	pv_init_ops.patch = vmi_patch;
++	*(void **)&pv_init_ops.patch = vmi_patch;
+ 
+ 	/*
+ 	 * Many of these operations are ABI compatible with VMI.
+@@ -706,7 +728,7 @@ static inline int __init activate_vmi(void)
+ 	para_fill(pv_cpu_ops.store_gdt, GetGDT);
+ 	para_fill(pv_cpu_ops.store_idt, GetIDT);
+ 	para_fill(pv_cpu_ops.store_tr, GetTR);
+-	pv_cpu_ops.load_tls = vmi_load_tls;
++	*(void **)&pv_cpu_ops.load_tls = vmi_load_tls;
+ 	para_wrap(pv_cpu_ops.write_ldt_entry, vmi_write_ldt_entry,
+ 		  write_ldt_entry, WriteLDTEntry);
+ 	para_wrap(pv_cpu_ops.write_gdt_entry, vmi_write_gdt_entry,
+@@ -790,8 +812,8 @@ static inline int __init activate_vmi(void)
+ 	 * the backend.  They are performance critical anyway, so requiring
+ 	 * a patch is not a big problem.
+ 	 */
+-	pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
+-	pv_cpu_ops.iret = (void *)0xbadbab0;
++	*(void **)&pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;
++	*(void **)&pv_cpu_ops.iret = (void *)0xbadbab0;
+ 
+ #ifdef CONFIG_SMP
+ 	para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);
+@@ -817,15 +839,15 @@ static inline int __init activate_vmi(void)
+ 		vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
+ 		vmi_timer_ops.cancel_alarm =
+ 			 vmi_get_function(VMI_CALL_CancelAlarm);
+-		x86_init.timers.timer_init = vmi_time_init;
++		*(void **)&x86_init.timers.timer_init = vmi_time_init;
+ #ifdef CONFIG_X86_LOCAL_APIC
+-		x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
+-		x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
++		*(void **)&x86_init.timers.setup_percpu_clockev = vmi_time_bsp_init;
++		*(void **)&x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
+ #endif
+-		pv_time_ops.sched_clock = vmi_sched_clock;
+-		x86_platform.calibrate_tsc = vmi_tsc_khz;
+-		x86_platform.get_wallclock = vmi_get_wallclock;
+-		x86_platform.set_wallclock = vmi_set_wallclock;
++		*(void **)&pv_time_ops.sched_clock = vmi_sched_clock;
++		*(void **)&x86_platform.calibrate_tsc = vmi_tsc_khz;
++		*(void **)&x86_platform.get_wallclock = vmi_get_wallclock;
++		*(void **)&x86_platform.set_wallclock = vmi_set_wallclock;
+ 
+ 		/* We have true wallclock functions; disable CMOS clock sync */
+ 		no_sync_cmos_clock = 1;
 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
  
  	para_fill(pv_irq_ops.safe_halt, Halt);
@@ -23343,6 +23616,19 @@ index 3c68fe2..7a8c35b 100644
  	   "kernel image bigger than KERNEL_IMAGE_SIZE");
  
  #ifdef CONFIG_SMP
+diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
+index a1d804b..1ab845e 100644
+--- a/arch/x86/kernel/vsmp_64.c
++++ b/arch/x86/kernel/vsmp_64.c
+@@ -98,7 +98,7 @@ static void __init set_vsmp_pv_ops(void)
+ 		pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
+ 		pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
+ 		pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
+-		pv_init_ops.patch = vsmp_patch;
++		*(void **)&pv_init_ops.patch = vsmp_patch;
+ 
+ 		ctl &= ~(1 << 4);
+ 		writel(ctl, address + 4);
 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
 index 62f39d7..3bc46a1 100644
 --- a/arch/x86/kernel/vsyscall_64.c
@@ -23708,7 +23994,7 @@ index 271fddf..ea708b4 100644
  	if (kvm_x86_ops) {
  		printk(KERN_ERR "kvm: already loaded the other module\n");
 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index 7e59dc1..80b8a7b 100644
+index 7e59dc1..6673211 100644
 --- a/arch/x86/lguest/boot.c
 +++ b/arch/x86/lguest/boot.c
 @@ -1088,12 +1088,12 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
@@ -23742,6 +24028,81 @@ index 7e59dc1..80b8a7b 100644
  }
  
  /*G:050
+@@ -1264,28 +1265,28 @@ __init void lguest_init(void)
+ 	pv_irq_ops.safe_halt = lguest_safe_halt;
+ 
+ 	/* Setup operations */
+-	pv_init_ops.patch = lguest_patch;
++	*(void **)&pv_init_ops.patch = lguest_patch;
+ 
+ 	/* Intercepts of various CPU instructions */
+-	pv_cpu_ops.load_gdt = lguest_load_gdt;
+-	pv_cpu_ops.cpuid = lguest_cpuid;
+-	pv_cpu_ops.load_idt = lguest_load_idt;
+-	pv_cpu_ops.iret = lguest_iret;
+-	pv_cpu_ops.load_sp0 = lguest_load_sp0;
+-	pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
+-	pv_cpu_ops.set_ldt = lguest_set_ldt;
+-	pv_cpu_ops.load_tls = lguest_load_tls;
+-	pv_cpu_ops.set_debugreg = lguest_set_debugreg;
+-	pv_cpu_ops.clts = lguest_clts;
+-	pv_cpu_ops.read_cr0 = lguest_read_cr0;
+-	pv_cpu_ops.write_cr0 = lguest_write_cr0;
+-	pv_cpu_ops.read_cr4 = lguest_read_cr4;
+-	pv_cpu_ops.write_cr4 = lguest_write_cr4;
+-	pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
+-	pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
+-	pv_cpu_ops.wbinvd = lguest_wbinvd;
+-	pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
+-	pv_cpu_ops.end_context_switch = lguest_end_context_switch;
++	*(void **)&pv_cpu_ops.load_gdt = lguest_load_gdt;
++	*(void **)&pv_cpu_ops.cpuid = lguest_cpuid;
++	*(void **)&pv_cpu_ops.load_idt = lguest_load_idt;
++	*(void **)&pv_cpu_ops.iret = lguest_iret;
++	*(void **)&pv_cpu_ops.load_sp0 = lguest_load_sp0;
++	*(void **)&pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
++	*(void **)&pv_cpu_ops.set_ldt = lguest_set_ldt;
++	*(void **)&pv_cpu_ops.load_tls = lguest_load_tls;
++	*(void **)&pv_cpu_ops.set_debugreg = lguest_set_debugreg;
++	*(void **)&pv_cpu_ops.clts = lguest_clts;
++	*(void **)&pv_cpu_ops.read_cr0 = lguest_read_cr0;
++	*(void **)&pv_cpu_ops.write_cr0 = lguest_write_cr0;
++	*(void **)&pv_cpu_ops.read_cr4 = lguest_read_cr4;
++	*(void **)&pv_cpu_ops.write_cr4 = lguest_write_cr4;
++	*(void **)&pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
++	*(void **)&pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
++	*(void **)&pv_cpu_ops.wbinvd = lguest_wbinvd;
++	*(void **)&pv_cpu_ops.start_context_switch = paravirt_start_context_switch;
++	*(void **)&pv_cpu_ops.end_context_switch = lguest_end_context_switch;
+ 
+ 	/* Pagetable management */
+ 	pv_mmu_ops.write_cr3 = lguest_write_cr3;
+@@ -1313,11 +1314,11 @@ __init void lguest_init(void)
+ 	set_lguest_basic_apic_ops();
+ #endif
+ 
+-	x86_init.resources.memory_setup = lguest_memory_setup;
+-	x86_init.irqs.intr_init = lguest_init_IRQ;
+-	x86_init.timers.timer_init = lguest_time_init;
+-	x86_platform.calibrate_tsc = lguest_tsc_khz;
+-	x86_platform.get_wallclock =  lguest_get_wallclock;
++	*(void **)&x86_init.resources.memory_setup = lguest_memory_setup;
++	*(void **)&x86_init.irqs.intr_init = lguest_init_IRQ;
++	*(void **)&x86_init.timers.timer_init = lguest_time_init;
++	*(void **)&x86_platform.calibrate_tsc = lguest_tsc_khz;
++	*(void **)&x86_platform.get_wallclock =  lguest_get_wallclock;
+ 
+ 	/*
+ 	 * Now is a good time to look at the implementations of these functions
+@@ -1410,7 +1411,7 @@ __init void lguest_init(void)
+ 	 * routine.
+ 	 */
+ 	pm_power_off = lguest_power_off;
+-	machine_ops.restart = lguest_restart;
++	*(void **)&machine_ops.restart = lguest_restart;
+ 
+ 	/*
+ 	 * Now we're set up, call i386_start_kernel() in head32.c and we proceed
 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
 index 824fa0b..c619e96 100644
 --- a/arch/x86/lib/atomic64_32.c
@@ -27049,15 +27410,16 @@ index 63a6ba6..79abd7a 100644
  	return (void *)vaddr;
  }
 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index f46c3407..f7e72b0 100644
+index f46c3407..c56a9d1 100644
 --- a/arch/x86/mm/hugetlbpage.c
 +++ b/arch/x86/mm/hugetlbpage.c
-@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+@@ -267,13 +267,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
 -	unsigned long start_addr;
 +	unsigned long start_addr, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -27077,7 +27439,7 @@ index f46c3407..f7e72b0 100644
  	}
  
  full_search:
-@@ -281,26 +288,27 @@ full_search:
+@@ -281,26 +289,27 @@ full_search:
  
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
@@ -27100,7 +27462,7 @@ index f46c3407..f7e72b0 100644
 -			mm->free_area_cache = addr + len;
 -			return addr;
 -		}
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			break;
  		if (addr + mm->cached_hole_size < vma->vm_start)
  		        mm->cached_hole_size = vma->vm_start - addr;
@@ -27112,7 +27474,7 @@ index f46c3407..f7e72b0 100644
  }
  
  static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -309,10 +318,10 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  {
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
@@ -27122,10 +27484,11 @@ index f46c3407..f7e72b0 100644
 +	unsigned long base = mm->mmap_base, addr;
  	unsigned long largest_hole = mm->cached_hole_size;
 -	int first_time = 1;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
  
  	/* don't allow allocations above current base */
  	if (mm->free_area_cache > base)
-@@ -322,64 +329,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -322,64 +331,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  	        largest_hole = 0;
  		mm->free_area_cache  = base;
  	}
@@ -27155,7 +27518,7 @@ index f46c3407..f7e72b0 100644
  		 */
 -		if (addr + len <= vma->vm_start &&
 -		            (!prev_vma || (addr >= prev_vma->vm_end))) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/* remember the address as a hint for next time */
 -		        mm->cached_hole_size = largest_hole;
 -		        return (mm->free_area_cache = addr);
@@ -27182,7 +27545,7 @@ index f46c3407..f7e72b0 100644
  		/* try just below the current vma->vm_start */
 -		addr = (vma->vm_start - len) & huge_page_mask(h);
 -	} while (len <= vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  fail:
@@ -27221,7 +27584,7 @@ index f46c3407..f7e72b0 100644
  	mm->cached_hole_size = ~0UL;
  	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
  			len, pgoff, flags);
-@@ -387,6 +398,7 @@ fail:
+@@ -387,6 +400,7 @@ fail:
  	/*
  	 * Restore the topdown base:
  	 */
@@ -27229,11 +27592,12 @@ index f46c3407..f7e72b0 100644
  	mm->free_area_cache = base;
  	mm->cached_hole_size = ~0UL;
  
-@@ -400,10 +412,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -400,10 +414,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
 +	unsigned long pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
  
  	if (len & ~huge_page_mask(h))
  		return -EINVAL;
@@ -27250,13 +27614,13 @@ index f46c3407..f7e72b0 100644
  		return -ENOMEM;
  
  	if (flags & MAP_FIXED) {
-@@ -415,8 +436,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -415,8 +439,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	if (addr) {
  		addr = ALIGN(addr, huge_page_size(h));
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -27880,7 +28244,7 @@ index 84e236c..69bd3f6 100644
  
  	return (void *)vaddr;
 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
-index 2feb9bd..ab91e7b 100644
+index 2feb9bd..4d4be88 100644
 --- a/arch/x86/mm/ioremap.c
 +++ b/arch/x86/mm/ioremap.c
 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
@@ -27910,6 +28274,15 @@ index 2feb9bd..ab91e7b 100644
  			return NULL;
  		WARN_ON_ONCE(is_ram);
  	}
+@@ -319,7 +316,7 @@ EXPORT_SYMBOL(ioremap_prot);
+  *
+  * Caller must ensure there is only one unmapping for the same pointer.
+  */
+-void iounmap(volatile void __iomem *addr)
++void iounmap(const volatile void __iomem *addr)
+ {
+ 	struct vm_struct *p, *o;
+ 
 @@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
  
  	/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
@@ -28041,7 +28414,7 @@ index c9e57af..07a321b 100644
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
-index 132772a..b961f11 100644
+index 132772a..3c81fd2 100644
 --- a/arch/x86/mm/mmio-mod.c
 +++ b/arch/x86/mm/mmio-mod.c
 @@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
@@ -28071,6 +28444,24 @@ index 132772a..b961f11 100644
  	};
  	map.map_id = trace->id;
  
+@@ -289,7 +289,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+ 	ioremap_trace_core(offset, size, addr);
+ }
+ 
+-static void iounmap_trace_core(volatile void __iomem *addr)
++static void iounmap_trace_core(const volatile void __iomem *addr)
+ {
+ 	struct mmiotrace_map map = {
+ 		.phys = 0,
+@@ -327,7 +327,7 @@ not_enabled:
+ 	}
+ }
+ 
+-void mmiotrace_iounmap(volatile void __iomem *addr)
++void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+ 	might_sleep();
+ 	if (is_enabled()) /* recheck and proper locking in *_core() */
 diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
 index d253006..e56dd6a 100644
 --- a/arch/x86/mm/numa_32.c
@@ -29440,7 +29831,7 @@ index 21e1aeb..2c0b3c4 100644
 -}
 -__setup("vdso=", vdso_setup);
 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index d52f895..ba03036 100644
+index d52f895..499ef73 100644
 --- a/arch/x86/xen/enlighten.c
 +++ b/arch/x86/xen/enlighten.c
 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
@@ -29452,7 +29843,27 @@ index d52f895..ba03036 100644
  /*
   * Point at some empty memory to start with. We map the real shared_info
   * page as soon as fixmap is up and running.
-@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
+@@ -336,8 +334,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+ 	unsigned long va = dtr->address;
+ 	unsigned int size = dtr->size + 1;
+-	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-	unsigned long frames[pages];
++	unsigned long frames[65536 / PAGE_SIZE];
+ 	int f;
+ 
+ 	/*
+@@ -385,8 +382,7 @@ static __init void xen_load_gdt_boot(const struct desc_ptr *dtr)
+ {
+ 	unsigned long va = dtr->address;
+ 	unsigned int size = dtr->size + 1;
+-	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-	unsigned long frames[pages];
++	unsigned long frames[65536 / PAGE_SIZE];
+ 	int f;
+ 
+ 	/*
+@@ -548,7 +544,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
  
  	preempt_disable();
  
@@ -29461,7 +29872,7 @@ index d52f895..ba03036 100644
  	end = start + __get_cpu_var(idt_desc).size + 1;
  
  	xen_mc_flush();
-@@ -718,12 +716,12 @@ static u32 xen_safe_apic_wait_icr_idle(void)
+@@ -718,12 +714,12 @@ static u32 xen_safe_apic_wait_icr_idle(void)
  
  static void set_xen_basic_apic_ops(void)
  {
@@ -29480,7 +29891,7 @@ index d52f895..ba03036 100644
  }
  
  #endif
-@@ -996,7 +994,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
+@@ -996,7 +992,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
  #endif
  };
  
@@ -29489,7 +29900,7 @@ index d52f895..ba03036 100644
  {
  	struct sched_shutdown r = { .reason = reason };
  
-@@ -1004,17 +1002,17 @@ static void xen_reboot(int reason)
+@@ -1004,17 +1000,17 @@ static void xen_reboot(int reason)
  		BUG();
  }
  
@@ -29510,7 +29921,62 @@ index d52f895..ba03036 100644
  {
  	xen_reboot(SHUTDOWN_poweroff);
  }
-@@ -1098,9 +1096,20 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1040,14 +1036,14 @@ static const struct machine_ops __initdata xen_machine_ops = {
+  */
+ static void __init xen_setup_stackprotector(void)
+ {
+-	pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
+-	pv_cpu_ops.load_gdt = xen_load_gdt_boot;
++	*(void **)&pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
++	*(void **)&pv_cpu_ops.load_gdt = xen_load_gdt_boot;
+ 
+ 	setup_stack_canary_segment(0);
+ 	switch_to_new_gdt(0);
+ 
+-	pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
+-	pv_cpu_ops.load_gdt = xen_load_gdt;
++	*(void **)&pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
++	*(void **)&pv_cpu_ops.load_gdt = xen_load_gdt;
+ }
+ 
+ /* First C function to be called on Xen boot */
+@@ -1062,22 +1058,22 @@ asmlinkage void __init xen_start_kernel(void)
+ 
+ 	/* Install Xen paravirt ops */
+ 	pv_info = xen_info;
+-	pv_init_ops = xen_init_ops;
+-	pv_time_ops = xen_time_ops;
+-	pv_cpu_ops = xen_cpu_ops;
+-	pv_apic_ops = xen_apic_ops;
++	memcpy((void *)&pv_init_ops, &xen_init_ops, sizeof pv_init_ops);
++	memcpy((void *)&pv_time_ops, &xen_time_ops, sizeof pv_time_ops);
++	memcpy((void *)&pv_cpu_ops, &xen_cpu_ops, sizeof pv_cpu_ops);
++	memcpy((void *)&pv_apic_ops, &xen_apic_ops, sizeof pv_apic_ops);
+ 
+-	x86_init.resources.memory_setup = xen_memory_setup;
+-	x86_init.oem.arch_setup = xen_arch_setup;
+-	x86_init.oem.banner = xen_banner;
++	*(void **)&x86_init.resources.memory_setup = xen_memory_setup;
++	*(void **)&x86_init.oem.arch_setup = xen_arch_setup;
++	*(void **)&x86_init.oem.banner = xen_banner;
+ 
+-	x86_init.timers.timer_init = xen_time_init;
+-	x86_init.timers.setup_percpu_clockev = x86_init_noop;
+-	x86_cpuinit.setup_percpu_clockev = x86_init_noop;
++	*(void **)&x86_init.timers.timer_init = xen_time_init;
++	*(void **)&x86_init.timers.setup_percpu_clockev = x86_init_noop;
++	*(void **)&x86_cpuinit.setup_percpu_clockev = x86_init_noop;
+ 
+-	x86_platform.calibrate_tsc = xen_tsc_khz;
+-	x86_platform.get_wallclock = xen_get_wallclock;
+-	x86_platform.set_wallclock = xen_set_wallclock;
++	*(void **)&x86_platform.calibrate_tsc = xen_tsc_khz;
++	*(void **)&x86_platform.get_wallclock = xen_get_wallclock;
++	*(void **)&x86_platform.set_wallclock = xen_set_wallclock;
+ 
+ 	/*
+ 	 * Set up some pagetable state before starting to set any ptes.
+@@ -1098,9 +1094,20 @@ asmlinkage void __init xen_start_kernel(void)
  	 */
  	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  
@@ -29533,22 +29999,35 @@ index d52f895..ba03036 100644
  #endif
  
  	xen_setup_features();
-@@ -1132,13 +1141,6 @@ asmlinkage void __init xen_start_kernel(void)
- 
- 	machine_ops = xen_machine_ops;
+@@ -1130,14 +1137,7 @@ asmlinkage void __init xen_start_kernel(void)
+ 		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
+ 	}
  
+-	machine_ops = xen_machine_ops;
+-
 -	/*
 -	 * The only reliable way to retain the initial address of the
 -	 * percpu gdt_page is to remember it here, so we can go and
 -	 * mark it RW later, when the initial percpu area is freed.
 -	 */
 -	xen_initial_gdt = &per_cpu(gdt_page, 0);
--
++	memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
+ 
  	xen_smp_init();
  
- 	pgd = (pgd_t *)xen_start_info->pt_base;
+diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
+index 9d30105..6774081 100644
+--- a/arch/x86/xen/irq.c
++++ b/arch/x86/xen/irq.c
+@@ -129,5 +129,5 @@ static const struct pv_irq_ops xen_irq_ops __initdata = {
+ void __init xen_init_irq_ops()
+ {
+ 	pv_irq_ops = xen_irq_ops;
+-	x86_init.irqs.intr_init = xen_init_IRQ;
++	*(void **)&x86_init.irqs.intr_init = xen_init_IRQ;
+ }
 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index 8f4452c..38873e5 100644
+index 8f4452c..3dd48c4 100644
 --- a/arch/x86/xen/mmu.c
 +++ b/arch/x86/xen/mmu.c
 @@ -1717,6 +1717,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
@@ -29589,6 +30068,17 @@ index 8f4452c..38873e5 100644
  
  	.alloc_pud = xen_alloc_pmd_init,
  	.release_pud = xen_release_pmd_init,
+@@ -1963,8 +1972,8 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
+ 
+ void __init xen_init_mmu_ops(void)
+ {
+-	x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
+-	x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
++	*(void **)&x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
++	*(void **)&x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
+ 	pv_mmu_ops = xen_mmu_ops;
+ }
+ 
 diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
 index ad0047f..cfec0c4 100644
 --- a/arch/x86/xen/setup.c
@@ -29610,7 +30100,7 @@ index ad0047f..cfec0c4 100644
 +#endif
  }
 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
-index a96204a..fca9b8e 100644
+index a96204a..4d2ebba 100644
 --- a/arch/x86/xen/smp.c
 +++ b/arch/x86/xen/smp.c
 @@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
@@ -29657,6 +30147,38 @@ index a96204a..fca9b8e 100644
  #endif
  	xen_setup_runstate_info(cpu);
  	xen_setup_timer(cpu);
+@@ -485,7 +479,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
+ 
+ void __init xen_smp_init(void)
+ {
+-	smp_ops = xen_smp_ops;
++	memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
+ 	xen_fill_possible_map();
+ 	xen_init_spinlocks();
+ }
+diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
+index 36a5141..4ef9c78 100644
+--- a/arch/x86/xen/spinlock.c
++++ b/arch/x86/xen/spinlock.c
+@@ -372,12 +372,12 @@ void xen_uninit_lock_cpu(int cpu)
+ 
+ void __init xen_init_spinlocks(void)
+ {
+-	pv_lock_ops.spin_is_locked = xen_spin_is_locked;
+-	pv_lock_ops.spin_is_contended = xen_spin_is_contended;
+-	pv_lock_ops.spin_lock = xen_spin_lock;
+-	pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
+-	pv_lock_ops.spin_trylock = xen_spin_trylock;
+-	pv_lock_ops.spin_unlock = xen_spin_unlock;
++	*(void **)&pv_lock_ops.spin_is_locked = xen_spin_is_locked;
++	*(void **)&pv_lock_ops.spin_is_contended = xen_spin_is_contended;
++	*(void **)&pv_lock_ops.spin_lock = xen_spin_lock;
++	*(void **)&pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
++	*(void **)&pv_lock_ops.spin_trylock = xen_spin_trylock;
++	*(void **)&pv_lock_ops.spin_unlock = xen_spin_unlock;
+ }
+ 
+ #ifdef CONFIG_XEN_DEBUG_FS
 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
 index 9a95a9c..4f39e774 100644
 --- a/arch/x86/xen/xen-asm_32.S
@@ -41810,6 +42332,52 @@ index a5d585d..d087be3 100644
  	.show = kobj_pkt_show,
  	.store = kobj_pkt_store
  };
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index a4592ec..432659a 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -410,7 +410,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
+ 	ENSURE(reset, CDC_RESET);
+ 	ENSURE(generic_packet, CDC_GENERIC_PACKET);
+ 	cdi->mc_flags = 0;
+-	cdo->n_minors = 0;
+         cdi->options = CDO_USE_FFLAGS;
+ 	
+ 	if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
+@@ -430,8 +429,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
+ 	else
+ 		cdi->cdda_method = CDDA_OLD;
+ 
+-	if (!cdo->generic_packet)
+-		cdo->generic_packet = cdrom_dummy_generic_packet;
++	if (!cdo->generic_packet) {
++		pax_open_kernel();
++		*(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
++		pax_close_kernel();
++	}
+ 
+ 	cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
+ 	mutex_lock(&cdrom_mutex);
+@@ -452,7 +454,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
+ 	if (cdi->exit)
+ 		cdi->exit(cdi);
+ 
+-	cdi->ops->n_minors--;
+ 	cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+ }
+ 
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index a762283..57cb232 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -487,7 +487,6 @@ static struct cdrom_device_ops gdrom_ops = {
+ 	.audio_ioctl		= gdrom_audio_ioctl,
+ 	.capability		= CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
+ 				  CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
+-	.n_minors		= 1,
+ };
+ 
+ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
 index 6aad99e..89cd142 100644
 --- a/drivers/char/Kconfig
@@ -42652,7 +43220,7 @@ index 62f282e..e45c45c 100644
  	cdev_init(&ptmx_cdev, &ptmx_fops);
  	if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
 diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 446b20a..d0e60f5 100644
+index 446b20a..710568a 100644
 --- a/drivers/char/random.c
 +++ b/drivers/char/random.c
 @@ -269,8 +269,13 @@
@@ -42706,25 +43274,7 @@ index 446b20a..d0e60f5 100644
  	smp_wmb();
  
  	if (out)
-@@ -784,6 +797,17 @@ void add_disk_randomness(struct gendisk *disk)
- }
- #endif
- 
-+#ifdef CONFIG_PAX_LATENT_ENTROPY
-+u64 latent_entropy;
-+
-+__init void transfer_latent_entropy(void)
-+{
-+	mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy), NULL);
-+	mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy), NULL);
-+//	printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
-+}
-+#endif
-+
- /*********************************************************************
-  *
-  * Entropy extraction routines
-@@ -942,6 +966,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -942,6 +955,10 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
  	ssize_t ret = 0, i;
  	__u8 tmp[EXTRACT_SIZE];
  
@@ -42735,7 +43285,7 @@ index 446b20a..d0e60f5 100644
  	xfer_secondary_pool(r, nbytes);
  	nbytes = account(r, nbytes, min, reserved);
  
-@@ -951,6 +979,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+@@ -951,6 +968,17 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
  		if (fips_enabled) {
  			unsigned long flags;
  
@@ -42753,7 +43303,7 @@ index 446b20a..d0e60f5 100644
  			spin_lock_irqsave(&r->lock, flags);
  			if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
  				panic("Hardware RNG duplicated output!\n");
-@@ -1015,7 +1054,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -1015,7 +1043,21 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
   */
  void get_random_bytes(void *buf, int nbytes)
  {
@@ -42776,7 +43326,7 @@ index 446b20a..d0e60f5 100644
  }
  EXPORT_SYMBOL(get_random_bytes);
  
-@@ -1068,6 +1121,7 @@ static void init_std_data(struct entropy_store *r)
+@@ -1068,6 +1110,7 @@ static void init_std_data(struct entropy_store *r)
  
  	r->entropy_count = 0;
  	r->entropy_total = 0;
@@ -42784,7 +43334,7 @@ index 446b20a..d0e60f5 100644
  	mix_pool_bytes(r, &now, sizeof(now), NULL);
  	for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
  		if (!arch_get_random_long(&rv))
-@@ -1322,7 +1376,7 @@ EXPORT_SYMBOL(generate_random_uuid);
+@@ -1322,7 +1365,7 @@ EXPORT_SYMBOL(generate_random_uuid);
  #include <linux/sysctl.h>
  
  static int min_read_thresh = 8, min_write_thresh;
@@ -42793,7 +43343,7 @@ index 446b20a..d0e60f5 100644
  static int max_write_thresh = INPUT_POOL_WORDS * 32;
  static char sysctl_bootid[16];
  
-@@ -1397,6 +1451,7 @@ static int uuid_strategy(ctl_table *table,
+@@ -1397,6 +1440,7 @@ static int uuid_strategy(ctl_table *table,
  }
  
  static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
@@ -42801,7 +43351,7 @@ index 446b20a..d0e60f5 100644
  ctl_table random_table[] = {
  	{
  		.ctl_name 	= RANDOM_POOLSIZE,
-@@ -1472,7 +1527,7 @@ late_initcall(random_int_secret_init);
+@@ -1472,7 +1516,7 @@ late_initcall(random_int_secret_init);
   * value is not cryptographically secure but for several uses the cost of
   * depleting entropy is too high
   */
@@ -44386,18 +44936,9 @@ index 5449239..7e4f68d 100644
  	.open = drm_gem_vm_open,
  	.close = drm_gem_vm_close,
 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 97163f7..c24c7c7 100644
+index 97163f7..65574ff 100644
 --- a/drivers/gpu/drm/i915/i915_drv.h
 +++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
- 	/* display clock increase/decrease */
- 	/* pll clock increase/decrease */
- 	/* clock gating init */
--};
-+} __no_const;
- 
- typedef struct drm_i915_private {
- 	struct drm_device *dev;
 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
  	int page_flipping;
  
@@ -44669,8 +45210,42 @@ index 0d79577..efaa7a5 100644
  
  	if (regcomp
  	    (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index a2ae151..0017d7b 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -89,8 +89,10 @@ int r100_pci_gart_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+-	rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+-	rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++	pax_open_kernel();
++	*(void **)&rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
++	*(void **)&rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++	pax_close_kernel();
+ 	return radeon_gart_table_ram_alloc(rdev);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index d8c4f72..20936ed 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -91,8 +91,10 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
+ 	if (r)
+ 		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+ 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+-	rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+-	rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++	pax_open_kernel();
++	*(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
++	*(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++	pax_close_kernel();
+ 	return radeon_gart_table_vram_alloc(rdev);
+ }
+ 
 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
-index 6735213..38c2c67 100644
+index 6735213..c6ca8e6 100644
 --- a/drivers/gpu/drm/radeon/radeon.h
 +++ b/drivers/gpu/drm/radeon/radeon.h
 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
@@ -44682,15 +45257,6 @@ index 6735213..38c2c67 100644
  	uint32_t			last_seq;
  	unsigned long			count_timeout;
  	wait_queue_head_t		queue;
-@@ -640,7 +640,7 @@ struct radeon_asic {
- 			       uint32_t offset, uint32_t obj_size);
- 	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
- 	void (*bandwidth_update)(struct radeon_device *rdev);
--};
-+} __no_const;
- 
- /*
-  * Asic structures
 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
 index 4e928b9..d8b6008 100644
 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -44728,6 +45294,45 @@ index 4e928b9..d8b6008 100644
  
  	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
  
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 3db54e9..ccfa400 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -322,8 +322,10 @@ int radeon_asic_init(struct radeon_device *rdev)
+ 	case CHIP_RV380:
+ 		rdev->asic = &r300_asic;
+ 		if (rdev->flags & RADEON_IS_PCIE) {
+-			rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+-			rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++			pax_open_kernel();
++			*(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
++			*(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++			pax_close_kernel();
+ 		}
+ 		break;
+ 	case CHIP_R420:
+@@ -506,13 +508,17 @@ void radeon_agp_disable(struct radeon_device *rdev)
+ 			rdev->family == CHIP_R423) {
+ 		DRM_INFO("Forcing AGP to PCIE mode\n");
+ 		rdev->flags |= RADEON_IS_PCIE;
+-		rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+-		rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++		pax_open_kernel();
++		*(void **)&rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
++		*(void **)&rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++		pax_close_kernel();
+ 	} else {
+ 		DRM_INFO("Forcing AGP to PCI mode\n");
+ 		rdev->flags |= RADEON_IS_PCI;
+-		rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+-		rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++		pax_open_kernel();
++		*(void **)&rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
++		*(void **)&rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++		pax_close_kernel();
+ 	}
+ }
+ 
 diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
 index 083a181..ccccae0 100644
 --- a/drivers/gpu/drm/radeon/radeon_display.c
@@ -46879,19 +47484,6 @@ index d36a4c0..11e7d1a 100644
  	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
  
  	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
-index 85784a7..a19ca98 100644
---- a/drivers/isdn/hardware/eicon/divasync.h
-+++ b/drivers/isdn/hardware/eicon/divasync.h
-@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
- } diva_didd_add_adapter_t;
- typedef struct _diva_didd_remove_adapter {
-  IDI_CALL p_request;
--} diva_didd_remove_adapter_t;
-+} __no_const diva_didd_remove_adapter_t;
- typedef struct _diva_didd_read_adapter_array {
-  void   * buffer;
-  dword length;
 diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
 index db87d51..7d09acf 100644
 --- a/drivers/isdn/hardware/eicon/idifunc.c
@@ -46958,19 +47550,6 @@ index a564b75..f3cf8b5 100644
  	DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
  
  	for (x = 0; x < MAX_DESCRIPTORS; x++) {
-diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
-index a3bd163..8956575 100644
---- a/drivers/isdn/hardware/eicon/xdi_adapter.h
-+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
-@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
- typedef struct _diva_os_idi_adapter_interface {
- 	diva_init_card_proc_t cleanup_adapter_proc;
- 	diva_cmd_card_proc_t cmd_proc;
--} diva_os_idi_adapter_interface_t;
-+} __no_const diva_os_idi_adapter_interface_t;
- 
- typedef struct _diva_os_xdi_adapter {
- 	struct list_head link;
 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
 index adb1e8c..21b590b 100644
 --- a/drivers/isdn/i4l/isdn_common.c
@@ -47782,19 +48361,6 @@ index cb22da5..82b686e 100644
  	dprintk("%s\n", __func__);
  
  	/* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
-diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
-index 2fe05d0..a3289c4 100644
---- a/drivers/media/dvb/dvb-core/dvb_demux.h
-+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
-@@ -71,7 +71,7 @@ struct dvb_demux_feed {
- 	union {
- 		dmx_ts_cb ts;
- 		dmx_section_cb sec;
--	} cb;
-+	} __no_const cb;
- 
- 	struct dvb_demux *demux;
- 	void *priv;
 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
 index 94159b9..376bd8e 100644
 --- a/drivers/media/dvb/dvb-core/dvbdev.c
@@ -47969,19 +48535,6 @@ index 299afa4..eb47459 100644
  	memset(&tvdata,0,sizeof(tvdata));
  
  	eeprom = pvr2_eeprom_fetch(hdw);
-diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
-index 5b152ff..3320638 100644
---- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
-+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
-@@ -195,7 +195,7 @@ struct pvr2_hdw {
- 
- 	/* I2C stuff */
- 	struct i2c_adapter i2c_adap;
--	struct i2c_algorithm i2c_algo;
-+	i2c_algorithm_no_const i2c_algo;
- 	pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
- 	int i2c_cx25840_hack_state;
- 	int i2c_linked;
 diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
 index 1eabff6..8e2313a 100644
 --- a/drivers/media/video/saa7134/saa6752hs.c
@@ -61731,19 +62284,67 @@ index cde8ecd..db944d3 100644
 +
  #endif				/* __DL2K_H__ */
 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
-index d1e0563..b9e129c 100644
+index d1e0563..77f3794 100644
 --- a/drivers/net/e1000e/82571.c
 +++ b/drivers/net/e1000e/82571.c
-@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+@@ -212,7 +212,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
  {
  	struct e1000_hw *hw = &adapter->hw;
  	struct e1000_mac_info *mac = &hw->mac;
 -	struct e1000_mac_operations *func = &mac->ops;
-+	e1000_mac_operations_no_const *func = &mac->ops;
  	u32 swsm = 0;
  	u32 swsm2 = 0;
  	bool force_clear_smbi = false;
-@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+@@ -245,22 +244,22 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+ 	/* check for link */
+ 	switch (hw->phy.media_type) {
+ 	case e1000_media_type_copper:
+-		func->setup_physical_interface = e1000_setup_copper_link_82571;
+-		func->check_for_link = e1000e_check_for_copper_link;
+-		func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
++		mac->ops.setup_physical_interface = e1000_setup_copper_link_82571;
++		mac->ops.check_for_link = e1000e_check_for_copper_link;
++		mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper;
+ 		break;
+ 	case e1000_media_type_fiber:
+-		func->setup_physical_interface =
++		mac->ops.setup_physical_interface =
+ 			e1000_setup_fiber_serdes_link_82571;
+-		func->check_for_link = e1000e_check_for_fiber_link;
+-		func->get_link_up_info =
++		mac->ops.check_for_link = e1000e_check_for_fiber_link;
++		mac->ops.get_link_up_info =
+ 			e1000e_get_speed_and_duplex_fiber_serdes;
+ 		break;
+ 	case e1000_media_type_internal_serdes:
+-		func->setup_physical_interface =
++		mac->ops.setup_physical_interface =
+ 			e1000_setup_fiber_serdes_link_82571;
+-		func->check_for_link = e1000_check_for_serdes_link_82571;
+-		func->get_link_up_info =
++		mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
++		mac->ops.get_link_up_info =
+ 			e1000e_get_speed_and_duplex_fiber_serdes;
+ 		break;
+ 	default:
+@@ -271,12 +270,12 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+ 	switch (hw->mac.type) {
+ 	case e1000_82574:
+ 	case e1000_82583:
+-		func->check_mng_mode = e1000_check_mng_mode_82574;
+-		func->led_on = e1000_led_on_82574;
++		mac->ops.check_mng_mode = e1000_check_mng_mode_82574;
++		mac->ops.led_on = e1000_led_on_82574;
+ 		break;
+ 	default:
+-		func->check_mng_mode = e1000e_check_mng_mode_generic;
+-		func->led_on = e1000e_led_on_generic;
++		mac->ops.check_mng_mode = e1000e_check_mng_mode_generic;
++		mac->ops.led_on = e1000e_led_on_generic;
+ 		break;
+ 	}
+ 
+@@ -1656,7 +1655,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
  	temp = er32(ICRXDMTC);
  }
  
@@ -61752,7 +62353,7 @@ index d1e0563..b9e129c 100644
  	/* .check_mng_mode: mac type dependent */
  	/* .check_for_link: media type dependent */
  	.id_led_init		= e1000e_id_led_init,
-@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
+@@ -1674,7 +1673,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
  	.setup_led		= e1000e_setup_led_generic,
  };
  
@@ -61761,7 +62362,7 @@ index d1e0563..b9e129c 100644
  	.acquire_phy		= e1000_get_hw_semaphore_82571,
  	.check_reset_block	= e1000e_check_reset_block_generic,
  	.commit_phy		= NULL,
-@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
+@@ -1691,7 +1690,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
  	.cfg_on_link_up      	= NULL,
  };
  
@@ -61770,7 +62371,7 @@ index d1e0563..b9e129c 100644
  	.acquire_phy		= e1000_get_hw_semaphore_82571,
  	.check_reset_block	= e1000e_check_reset_block_generic,
  	.commit_phy		= e1000e_phy_sw_reset,
-@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
+@@ -1708,7 +1707,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
  	.cfg_on_link_up      	= NULL,
  };
  
@@ -61779,7 +62380,7 @@ index d1e0563..b9e129c 100644
  	.acquire_phy		= e1000_get_hw_semaphore_82571,
  	.check_reset_block	= e1000e_check_reset_block_generic,
  	.commit_phy		= e1000e_phy_sw_reset,
-@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
+@@ -1725,7 +1724,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
  	.cfg_on_link_up      	= NULL,
  };
  
@@ -61806,19 +62407,41 @@ index 47db9bd..fa58ccd 100644
  
  /* hardware capability, feature, and workaround flags */
 diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
-index ae5d736..e9a93a1 100644
+index ae5d736..afc88d6 100644
 --- a/drivers/net/e1000e/es2lan.c
 +++ b/drivers/net/e1000e/es2lan.c
-@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+@@ -207,7 +207,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
  {
  	struct e1000_hw *hw = &adapter->hw;
  	struct e1000_mac_info *mac = &hw->mac;
 -	struct e1000_mac_operations *func = &mac->ops;
-+	e1000_mac_operations_no_const *func = &mac->ops;
  
  	/* Set media type */
  	switch (adapter->pdev->device) {
-@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+@@ -229,16 +228,16 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+ 	/* check for link */
+ 	switch (hw->phy.media_type) {
+ 	case e1000_media_type_copper:
+-		func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
+-		func->check_for_link = e1000e_check_for_copper_link;
++		mac->ops.setup_physical_interface = e1000_setup_copper_link_80003es2lan;
++		mac->ops.check_for_link = e1000e_check_for_copper_link;
+ 		break;
+ 	case e1000_media_type_fiber:
+-		func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+-		func->check_for_link = e1000e_check_for_fiber_link;
++		mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link;
++		mac->ops.check_for_link = e1000e_check_for_fiber_link;
+ 		break;
+ 	case e1000_media_type_internal_serdes:
+-		func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+-		func->check_for_link = e1000e_check_for_serdes_link;
++		mac->ops.setup_physical_interface = e1000e_setup_fiber_serdes_link;
++		mac->ops.check_for_link = e1000e_check_for_serdes_link;
+ 		break;
+ 	default:
+ 		return -E1000_ERR_CONFIG;
+@@ -1365,7 +1364,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
  	temp = er32(ICRXDMTC);
  }
  
@@ -61827,7 +62450,7 @@ index ae5d736..e9a93a1 100644
  	.id_led_init		= e1000e_id_led_init,
  	.check_mng_mode		= e1000e_check_mng_mode_generic,
  	/* check_for_link dependent on media type */
-@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
+@@ -1383,7 +1382,7 @@ static struct e1000_mac_operations es2_mac_ops = {
  	.setup_led		= e1000e_setup_led_generic,
  };
  
@@ -61836,7 +62459,7 @@ index ae5d736..e9a93a1 100644
  	.acquire_phy		= e1000_acquire_phy_80003es2lan,
  	.check_reset_block	= e1000e_check_reset_block_generic,
  	.commit_phy	 	= e1000e_phy_sw_reset,
-@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
+@@ -1400,7 +1399,7 @@ static struct e1000_phy_operations es2_phy_ops = {
  	.cfg_on_link_up      	= e1000_cfg_on_link_up_80003es2lan,
  };
  
@@ -61845,56 +62468,6 @@ index ae5d736..e9a93a1 100644
  	.acquire_nvm		= e1000_acquire_nvm_80003es2lan,
  	.read_nvm		= e1000e_read_nvm_eerd,
  	.release_nvm		= e1000_release_nvm_80003es2lan,
-diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
-index 11f3b7c..6381887 100644
---- a/drivers/net/e1000e/hw.h
-+++ b/drivers/net/e1000e/hw.h
-@@ -753,6 +753,7 @@ struct e1000_mac_operations {
- 	s32  (*setup_physical_interface)(struct e1000_hw *);
- 	s32  (*setup_led)(struct e1000_hw *);
- };
-+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
- 
- /* Function pointers for the PHY. */
- struct e1000_phy_operations {
-@@ -774,6 +775,7 @@ struct e1000_phy_operations {
- 	s32  (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
- 	s32  (*cfg_on_link_up)(struct e1000_hw *);
- };
-+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
- 
- /* Function pointers for the NVM. */
- struct e1000_nvm_operations {
-@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
- 	s32  (*validate_nvm)(struct e1000_hw *);
- 	s32  (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
- };
-+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
- 
- struct e1000_mac_info {
--	struct e1000_mac_operations ops;
-+	e1000_mac_operations_no_const ops;
- 
- 	u8 addr[6];
- 	u8 perm_addr[6];
-@@ -823,7 +826,7 @@ struct e1000_mac_info {
- };
- 
- struct e1000_phy_info {
--	struct e1000_phy_operations ops;
-+	e1000_phy_operations_no_const ops;
- 
- 	enum e1000_phy_type type;
- 
-@@ -857,7 +860,7 @@ struct e1000_phy_info {
- };
- 
- struct e1000_nvm_info {
--	struct e1000_nvm_operations ops;
-+	e1000_nvm_operations_no_const ops;
- 
- 	enum e1000_nvm_type type;
- 	enum e1000_nvm_override override;
 diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
 index de39f9a..e28d3e0 100644
 --- a/drivers/net/e1000e/ich8lan.c
@@ -61994,111 +62567,6 @@ index d617f2d..57b5309 100644
  	.acquire              = igb_acquire_nvm_82575,
  	.read                 = igb_read_nvm_eerd,
  	.release              = igb_release_nvm_82575,
-diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
-index 72081df..d855cf5 100644
---- a/drivers/net/igb/e1000_hw.h
-+++ b/drivers/net/igb/e1000_hw.h
-@@ -288,6 +288,7 @@ struct e1000_mac_operations {
- 	s32  (*read_mac_addr)(struct e1000_hw *);
- 	s32  (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- };
-+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
- 
- struct e1000_phy_operations {
- 	s32  (*acquire)(struct e1000_hw *);
-@@ -303,6 +304,7 @@ struct e1000_phy_operations {
- 	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
- 	s32  (*write_reg)(struct e1000_hw *, u32, u16);
- };
-+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
- 
- struct e1000_nvm_operations {
- 	s32  (*acquire)(struct e1000_hw *);
-@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
- 	void (*release)(struct e1000_hw *);
- 	s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
- };
-+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
- 
- struct e1000_info {
- 	s32 (*get_invariants)(struct e1000_hw *);
-@@ -321,7 +324,7 @@ struct e1000_info {
- extern const struct e1000_info e1000_82575_info;
- 
- struct e1000_mac_info {
--	struct e1000_mac_operations ops;
-+	e1000_mac_operations_no_const ops;
- 
- 	u8 addr[6];
- 	u8 perm_addr[6];
-@@ -365,7 +368,7 @@ struct e1000_mac_info {
- };
- 
- struct e1000_phy_info {
--	struct e1000_phy_operations ops;
-+	e1000_phy_operations_no_const ops;
- 
- 	enum e1000_phy_type type;
- 
-@@ -400,7 +403,7 @@ struct e1000_phy_info {
- };
- 
- struct e1000_nvm_info {
--	struct e1000_nvm_operations ops;
-+	e1000_nvm_operations_no_const ops;
- 
- 	enum e1000_nvm_type type;
- 	enum e1000_nvm_override override;
-@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
- 	s32 (*check_for_ack)(struct e1000_hw *, u16);
- 	s32 (*check_for_rst)(struct e1000_hw *, u16);
- };
-+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
- 
- struct e1000_mbx_stats {
- 	u32 msgs_tx;
-@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
- };
- 
- struct e1000_mbx_info {
--	struct e1000_mbx_operations ops;
-+	e1000_mbx_operations_no_const ops;
- 	struct e1000_mbx_stats stats;
- 	u32 timeout;
- 	u32 usec_delay;
-diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
-index 1e8ce37..549c453 100644
---- a/drivers/net/igbvf/vf.h
-+++ b/drivers/net/igbvf/vf.h
-@@ -187,9 +187,10 @@ struct e1000_mac_operations {
- 	s32  (*read_mac_addr)(struct e1000_hw *);
- 	s32  (*set_vfta)(struct e1000_hw *, u16, bool);
- };
-+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
- 
- struct e1000_mac_info {
--	struct e1000_mac_operations ops;
-+	e1000_mac_operations_no_const ops;
- 	u8 addr[6];
- 	u8 perm_addr[6];
- 
-@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
- 	s32 (*check_for_ack)(struct e1000_hw *);
- 	s32 (*check_for_rst)(struct e1000_hw *);
- };
-+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
- 
- struct e1000_mbx_stats {
- 	u32 msgs_tx;
-@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
- };
- 
- struct e1000_mbx_info {
--	struct e1000_mbx_operations ops;
-+	e1000_mbx_operations_no_const ops;
- 	struct e1000_mbx_stats stats;
- 	u32 timeout;
- 	u32 usec_delay;
 diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
 index aa7286b..a61394f 100644
 --- a/drivers/net/iseries_veth.c
@@ -62148,56 +62616,19 @@ index af35e1d..8781785 100644
  	if (bd >= IXGB_MAX_NIC) {
  		printk(KERN_NOTICE
  			   "Warning: no configuration for board #%i\n", bd);
-diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
-index b17aa73..ed74540 100644
---- a/drivers/net/ixgbe/ixgbe_type.h
-+++ b/drivers/net/ixgbe/ixgbe_type.h
-@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
- 	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
- 	s32 (*update_checksum)(struct ixgbe_hw *);
- };
-+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
- 
- struct ixgbe_mac_operations {
- 	s32 (*init_hw)(struct ixgbe_hw *);
-@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
- 	/* Flow Control */
- 	s32 (*fc_enable)(struct ixgbe_hw *, s32);
- };
-+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
- 
- struct ixgbe_phy_operations {
- 	s32 (*identify)(struct ixgbe_hw *);
-@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
- 	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
- 	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
- };
-+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
- 
- struct ixgbe_eeprom_info {
--	struct ixgbe_eeprom_operations  ops;
-+	ixgbe_eeprom_operations_no_const ops;
- 	enum ixgbe_eeprom_type          type;
- 	u32                             semaphore_delay;
- 	u16                             word_size;
-@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
- };
- 
- struct ixgbe_mac_info {
--	struct ixgbe_mac_operations     ops;
-+	ixgbe_mac_operations_no_const   ops;
- 	enum ixgbe_mac_type             type;
- 	u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- 	u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
-@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
- };
- 
- struct ixgbe_phy_info {
--	struct ixgbe_phy_operations     ops;
-+	ixgbe_phy_operations_no_const   ops;
- 	struct mdio_if_info		mdio;
- 	enum ixgbe_phy_type             type;
- 	u32                             id;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 2490aa3..185d647 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -568,7 +568,7 @@ static void macvlan_dellink(struct net_device *dev)
+ 		macvlan_port_destroy(port->dev);
+ }
+ 
+-static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
++static struct rtnl_link_ops macvlan_link_ops = {
+ 	.kind		= "macvlan",
+ 	.priv_size	= sizeof(struct macvlan_dev),
+ 	.get_tx_queues  = macvlan_get_tx_queues,
 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
 index 291a505..2543756 100644
 --- a/drivers/net/mlx4/main.c
@@ -63180,19 +63611,50 @@ index f450bc9..2b747c8 100644
  				result =
  				    hso_start_serial_device(serial_table[i], GFP_NOIO);
  				hso_kick_transmit(dev2ser(serial_table[i]));
-diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
-index 3e94f0c..ffdd926 100644
---- a/drivers/net/vxge/vxge-config.h
-+++ b/drivers/net/vxge/vxge-config.h
-@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
- 	void (*link_down)(struct __vxge_hw_device *devh);
- 	void (*crit_err)(struct __vxge_hw_device *devh,
- 			enum vxge_hw_event type, u64 ext_data);
--};
-+} __no_const;
+diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
+index 9e94c4b..316ee65 100644
+--- a/drivers/net/vxge/vxge-config.c
++++ b/drivers/net/vxge/vxge-config.c
+@@ -1467,7 +1467,9 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+ 	struct vxge_hw_ring_config *config;
+ 	struct __vxge_hw_device *hldev;
+ 	u32 vp_id;
+-	struct vxge_hw_mempool_cbs ring_mp_callback;
++	static struct vxge_hw_mempool_cbs ring_mp_callback = {
++		.item_func_alloc = __vxge_hw_ring_mempool_item_alloc,
++	};
  
- /*
-  * struct __vxge_hw_blockpool_entry - Block private data structure
+ 	if ((vp == NULL) || (attr == NULL)) {
+ 		status = VXGE_HW_FAIL;
+@@ -1521,7 +1523,6 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+ 
+ 	/* calculate actual RxD block private size */
+ 	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+-	ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
+ 	ring->mempool = __vxge_hw_mempool_create(hldev,
+ 				VXGE_HW_BLOCK_SIZE,
+ 				VXGE_HW_BLOCK_SIZE,
+@@ -2509,7 +2510,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+ 	struct __vxge_hw_fifo *fifo;
+ 	struct vxge_hw_fifo_config *config;
+ 	u32 txdl_size, txdl_per_memblock;
+-	struct vxge_hw_mempool_cbs fifo_mp_callback;
++	static struct vxge_hw_mempool_cbs fifo_mp_callback = {
++		.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
++	};
++
+ 	struct __vxge_hw_virtualpath *vpath;
+ 
+ 	if ((vp == NULL) || (attr == NULL)) {
+@@ -2590,8 +2594,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
+ 		goto exit;
+ 	}
+ 
+-	fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
+-
+ 	fifo->mempool =
+ 		__vxge_hw_mempool_create(vpath->hldev,
+ 			fifo->config->memblock_size,
 diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
 index 068d7a9..35293de 100644
 --- a/drivers/net/vxge/vxge-main.c
@@ -63215,19 +63677,6 @@ index 068d7a9..35293de 100644
  	/*
  	 * Filling
  	 * 	- itable with bucket numbers
-diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
-index 461742b..81be42e 100644
---- a/drivers/net/vxge/vxge-traffic.h
-+++ b/drivers/net/vxge/vxge-traffic.h
-@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
- 			struct vxge_hw_mempool_dma	*dma_object,
- 			u32			index,
- 			u32			is_last);
--};
-+} __no_const;
- 
- void
- __vxge_hw_mempool_destroy(
 diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
 index cd8cb95..4153b79 100644
 --- a/drivers/net/wan/cycx_x25.c
@@ -63873,19 +64322,82 @@ index 73e7d8e..c80f3d2 100644
  	.handler = handle_hotplug_event_func,
  };
  
-diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
-index 9fff878..ad0ad53 100644
---- a/drivers/pci/hotplug/cpci_hotplug.h
-+++ b/drivers/pci/hotplug/cpci_hotplug.h
-@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
- 	int (*hardware_test) (struct slot* slot, u32 value);
- 	u8  (*get_power) (struct slot* slot);
- 	int (*set_power) (struct slot* slot, int value);
--};
-+} __no_const;
+diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
+index 148fb46..6ff9455 100644
+--- a/drivers/pci/hotplug/cpcihp_generic.c
++++ b/drivers/pci/hotplug/cpcihp_generic.c
+@@ -73,7 +73,6 @@ static u16 port;
+ static unsigned int enum_bit;
+ static u8 enum_mask;
  
- struct cpci_hp_controller {
- 	unsigned int irq;
+-static struct cpci_hp_controller_ops generic_hpc_ops;
+ static struct cpci_hp_controller generic_hpc;
+ 
+ static int __init validate_parameters(void)
+@@ -139,6 +138,10 @@ static int query_enum(void)
+ 	return ((value & enum_mask) == enum_mask);
+ }
+ 
++static struct cpci_hp_controller_ops generic_hpc_ops = {
++	.query_enum = query_enum,
++};
++
+ static int __init cpcihp_generic_init(void)
+ {
+ 	int status;
+@@ -168,7 +171,6 @@ static int __init cpcihp_generic_init(void)
+ 	pci_dev_put(dev);
+ 
+ 	memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
+-	generic_hpc_ops.query_enum = query_enum;
+ 	generic_hpc.ops = &generic_hpc_ops;
+ 
+ 	status = cpci_hp_register_controller(&generic_hpc);
+diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
+index 41f6a8d..da73050 100644
+--- a/drivers/pci/hotplug/cpcihp_zt5550.c
++++ b/drivers/pci/hotplug/cpcihp_zt5550.c
+@@ -59,7 +59,6 @@
+ /* local variables */
+ static int debug;
+ static int poll;
+-static struct cpci_hp_controller_ops zt5550_hpc_ops;
+ static struct cpci_hp_controller zt5550_hpc;
+ 
+ /* Primary cPCI bus bridge device */
+@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
+ 	return 0;
+ }
+ 
++static struct cpci_hp_controller_ops zt5550_hpc_ops = {
++	.query_enum = zt5550_hc_query_enum,
++};
++
+ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ 	int status;
+@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
+ 	dbg("returned from zt5550_hc_config");
+ 
+ 	memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
+-	zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
+ 	zt5550_hpc.ops = &zt5550_hpc_ops;
+ 	if(!poll) {
+ 		zt5550_hpc.irq = hc_dev->irq;
+ 		zt5550_hpc.irq_flags = IRQF_SHARED;
+ 		zt5550_hpc.dev_id = hc_dev;
+ 
+-		zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+-		zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+-		zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++		pax_open_kernel();
++		*(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
++		*(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
++		*(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
++		pax_open_kernel();
+ 	} else {
+ 		info("using ENUM# polling mode");
+ 	}
 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
 index 76ba8a1..20ca857 100644
 --- a/drivers/pci/hotplug/cpqphp_nvram.c
@@ -64323,18 +64835,73 @@ index ba97654..66b99d4 100644
  
  	/* check if the resource is reserved */
 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
-index 62bb981..24a2dc9 100644
+index 62bb981..428f9a2 100644
 --- a/drivers/power/bq27x00_battery.c
 +++ b/drivers/power/bq27x00_battery.c
-@@ -44,7 +44,7 @@ struct bq27x00_device_info;
- struct bq27x00_access_methods {
- 	int (*read)(u8 reg, int *rt_value, int b_single,
- 		struct bq27x00_device_info *di);
--};
-+} __no_const;
+@@ -53,7 +53,7 @@ struct bq27x00_device_info {
+ 	int			current_uA;
+ 	int			temp_C;
+ 	int			charge_rsoc;
+-	struct bq27x00_access_methods	*bus;
++	struct bq27x00_access_methods	bus;
+ 	struct power_supply	bat;
+ 
+ 	struct i2c_client	*client;
+@@ -76,7 +76,7 @@ static int bq27x00_read(u8 reg, int *rt_value, int b_single,
+ {
+ 	int ret;
+ 
+-	ret = di->bus->read(reg, rt_value, b_single, di);
++	ret = di->bus.read(reg, rt_value, b_single, di);
+ 	*rt_value = be16_to_cpu(*rt_value);
+ 
+ 	return ret;
+@@ -253,7 +253,6 @@ static int bq27200_battery_probe(struct i2c_client *client,
+ {
+ 	char *name;
+ 	struct bq27x00_device_info *di;
+-	struct bq27x00_access_methods *bus;
+ 	int num;
+ 	int retval = 0;
  
- struct bq27x00_device_info {
- 	struct device 		*dev;
+@@ -282,19 +281,10 @@ static int bq27200_battery_probe(struct i2c_client *client,
+ 	}
+ 	di->id = num;
+ 
+-	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+-	if (!bus) {
+-		dev_err(&client->dev, "failed to allocate access method "
+-					"data\n");
+-		retval = -ENOMEM;
+-		goto batt_failed_3;
+-	}
+-
+ 	i2c_set_clientdata(client, di);
+ 	di->dev = &client->dev;
+ 	di->bat.name = name;
+-	bus->read = &bq27200_read;
+-	di->bus = bus;
++	di->bus.read = &bq27200_read;
+ 	di->client = client;
+ 
+ 	bq27x00_powersupply_init(di);
+@@ -302,15 +292,13 @@ static int bq27200_battery_probe(struct i2c_client *client,
+ 	retval = power_supply_register(&client->dev, &di->bat);
+ 	if (retval) {
+ 		dev_err(&client->dev, "failed to register battery\n");
+-		goto batt_failed_4;
++		goto batt_failed_3;
+ 	}
+ 
+ 	dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
+ 
+ 	return 0;
+ 
+-batt_failed_4:
+-	kfree(bus);
+ batt_failed_3:
+ 	kfree(di);
+ batt_failed_2:
 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
 index 62227cd..b5b538b 100644
 --- a/drivers/rtc/rtc-dev.c
@@ -66922,19 +67489,6 @@ index 3ad61db..c938975 100644
  
  obj-$(CONFIG_ARM)		+= arm/
  
-diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
-index cdbdec9..b7d560b 100644
---- a/drivers/scsi/aacraid/aacraid.h
-+++ b/drivers/scsi/aacraid/aacraid.h
-@@ -471,7 +471,7 @@ struct adapter_ops
- 	int  (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
- 	/* Administrative operations */
- 	int  (*adapter_comm)(struct aac_dev * dev, int comm);
--};
-+} __no_const;
- 
- /*
-  *	Define which interrupt handler needs to be installed
 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
 index a5b8e7b..a6a0e43 100644
 --- a/drivers/scsi/aacraid/commctrl.c
@@ -67168,19 +67722,6 @@ index a601159..55e19d2 100644
  	.phy_reset = ipr_ata_phy_reset,
  	.hardreset = ipr_sata_reset,
  	.post_internal_cmd = ipr_ata_post_internal,
-diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
-index 4e49fbc..97907ff 100644
---- a/drivers/scsi/ips.h
-+++ b/drivers/scsi/ips.h
-@@ -1027,7 +1027,7 @@ typedef struct {
-    int       (*intr)(struct ips_ha *);
-    void      (*enableint)(struct ips_ha *);
-    uint32_t (*statupd)(struct ips_ha *);
--} ips_hw_func_t;
-+} __no_const ips_hw_func_t;
- 
- typedef struct ips_ha {
-    uint8_t            ha_id[IPS_MAX_CHANNELS+1];
 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
 index c1c1574..a9c9348 100644
 --- a/drivers/scsi/libfc/fc_exch.c
@@ -67620,19 +68161,23 @@ index 3441b3f..6cbe8f7 100644
  
  	/* To indicate add/delete/modify during CCN */
  	u8 change_detected;
-diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
-index 2150618..7034215 100644
---- a/drivers/scsi/qla2xxx/qla_def.h
-+++ b/drivers/scsi/qla2xxx/qla_def.h
-@@ -2089,7 +2089,7 @@ struct isp_operations {
- 
- 	int (*get_flash_version) (struct scsi_qla_host *, void *);
- 	int (*start_scsi) (srb_t *);
--};
-+} __no_const;
- 
- /* MSI-X Support *************************************************************/
- 
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 06bbe0d..4b5dc65 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -1274,8 +1274,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
+ 		    !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ 			/* Ok, a 64bit DMA mask is applicable. */
+ 			ha->flags.enable_64bit_addressing = 1;
+-			ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
+-			ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++			pax_open_kernel();
++			*(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
++			*(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
++			pax_close_kernel();
+ 			return;
+ 		}
+ 	}
 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
 index 81b5f29..2ae1fad 100644
 --- a/drivers/scsi/qla4xxx/ql4_def.h
@@ -70379,19 +70924,6 @@ index f890a16..509ece8 100644
  	.owner = THIS_MODULE,
  	.ioctl = sep_ioctl,
  	.poll = sep_poll,
-diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
-index 5e16bc3..7655b10 100644
---- a/drivers/staging/usbip/usbip_common.h
-+++ b/drivers/staging/usbip/usbip_common.h
-@@ -374,7 +374,7 @@ struct usbip_device {
- 		void (*shutdown)(struct usbip_device *);
- 		void (*reset)(struct usbip_device *);
- 		void (*unusable)(struct usbip_device *);
--	} eh_ops;
-+	} __no_const eh_ops;
- };
- 
- 
 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
 index 57f7946..d9df23d 100644
 --- a/drivers/staging/usbip/vhci.h
@@ -70488,19 +71020,6 @@ index 0c8267a..db1f363 100644
  		.ndo_start_xmit         = pDevice->tx_80211,
  	};
  
-diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
-index 925678b..da7f5ed 100644
---- a/drivers/staging/wlan-ng/hfa384x_usb.c
-+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
-@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
- 
- struct usbctlx_completor {
- 	int (*complete) (struct usbctlx_completor *);
--};
-+} __no_const;
- typedef struct usbctlx_completor usbctlx_completor_t;
- 
- static int
 diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
 index 40de151..924f268 100644
 --- a/drivers/telephony/ixj.c
@@ -74350,6 +74869,47 @@ index bd37ee1..cb827e8 100644
  	if (info->screen_base)
  		iounmap(info->screen_base);
  	framebuffer_release(info);
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index 1f6bb28..bd773f4 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -725,8 +725,11 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev,
+ 	 * die notify list to handle a critical NMI. The default is to
+ 	 * be last so other users of the NMI signal can function.
+ 	 */
+-	if (priority)
+-		die_notifier.priority = 0x7FFFFFFF;
++	if (priority) {
++		pax_open_kernel();
++		*(void **)&die_notifier.priority = 0x7FFFFFFF;
++		pax_close_kernel();
++	}
+ 
+ 	retval = register_die_notifier(&die_notifier);
+ 	if (retval != 0) {
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index 4204336..d2f0824 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -395,7 +395,9 @@ static int balloon_init_watcher(struct notifier_block *notifier,
+ 	return NOTIFY_DONE;
+ }
+ 
+-static struct notifier_block xenstore_notifier;
++static struct notifier_block xenstore_notifier = {
++	.notifier_call = balloon_init_watcher,
++};
+ 
+ static int __init balloon_init(void)
+ {
+@@ -427,7 +429,6 @@ static int __init balloon_init(void)
+ 	}
+ 
+ 	target_watch.callback = watch_target;
+-	xenstore_notifier.notifier_call = balloon_init_watcher;
+ 
+ 	register_xenstore_notifier(&xenstore_notifier);
+ 
 diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
 index 88a60e0..6783cc2 100644
 --- a/drivers/xen/sys-hypervisor.c
@@ -77003,7 +77563,7 @@ index ff57421..f65f88a 100644
  
  out_free_fd:
 diff --git a/fs/exec.c b/fs/exec.c
-index 86fafc6..9154c823 100644
+index 86fafc6..509ab19 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -56,12 +56,33 @@
@@ -77721,7 +78281,7 @@ index 86fafc6..9154c823 100644
 +	if (!n)
 +		return;
 +
-+	type = check_heap_object(ptr, n, to);
++	type = check_heap_object(ptr, n);
 +	if (!type) {
 +		if (check_stack_object(ptr, n) != -1)
 +			return;
@@ -83336,7 +83896,7 @@ index fd38ce2..f5381b8 100644
  		return -EINVAL;
  
 diff --git a/fs/seq_file.c b/fs/seq_file.c
-index eae7d9d..c6bba46 100644
+index eae7d9d..69ac0fd 100644
 --- a/fs/seq_file.c
 +++ b/fs/seq_file.c
 @@ -9,6 +9,7 @@
@@ -83410,7 +83970,7 @@ index eae7d9d..c6bba46 100644
  		void *data)
  {
 -	struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
-+	seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
++	seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
  	int res = -ENOMEM;
  
  	if (op) {
@@ -83633,6 +84193,44 @@ index bb92b7c5..5aa72b0 100644
  		ret = -EAGAIN;
  
  	pipe_unlock(ipipe);
+diff --git a/fs/stat.c b/fs/stat.c
+index c4ecd52..a8fca7d 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -27,8 +27,14 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+ 	stat->uid = inode->i_uid;
+ 	stat->gid = inode->i_gid;
+ 	stat->rdev = inode->i_rdev;
+-	stat->atime = inode->i_atime;
+-	stat->mtime = inode->i_mtime;
++	stat->size = i_size_read(inode);
++	if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++		stat->atime = inode->i_ctime;
++		stat->mtime = inode->i_ctime;
++	} else {
++		stat->atime = inode->i_atime;
++		stat->mtime = inode->i_mtime;
++	}
+ 	stat->ctime = inode->i_ctime;
+ 	stat->size = i_size_read(inode);
+ 	stat->blocks = inode->i_blocks;
+@@ -46,8 +52,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ 	if (retval)
+ 		return retval;
+ 
+-	if (inode->i_op->getattr)
+-		return inode->i_op->getattr(mnt, dentry, stat);
++	if (inode->i_op->getattr) {
++		retval = inode->i_op->getattr(mnt, dentry, stat);
++		if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++			stat->atime = stat->ctime;
++			stat->mtime = stat->ctime;
++		}
++		return retval;
++	}
+ 
+ 	generic_fillattr(inode, stat);
+ 	return 0;
 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
 index e020183..18d64b4 100644
 --- a/fs/sysfs/dir.c
@@ -84034,10 +84632,10 @@ index e89734e..5e84d8d 100644
  			return 0;
 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
 new file mode 100644
-index 0000000..7cf22bd
+index 0000000..5e175a6
 --- /dev/null
 +++ b/grsecurity/Kconfig
-@@ -0,0 +1,964 @@
+@@ -0,0 +1,997 @@
 +#
 +# grecurity configuration
 +#
@@ -84100,6 +84698,26 @@ index 0000000..7cf22bd
 +	  IF YOU USE XFree86.  If you use XFree86 and you still want to 
 +	  protect your kernel against modification, use the RBAC system.
 +
++config GRKERNSEC_RAND_THREADSTACK
++	bool "Insert random gaps between thread stacks"
++	default y if GRKERNSEC_CONFIG_AUTO
++	depends on PAX_RANDMMAP && !PPC
++	help
++	  If you say Y here, a random-sized gap will be enforced between allocated
++	  thread stacks.  Glibc's NPTL and other threading libraries that
++	  pass MAP_STACK to the kernel for thread stack allocation are supported.
++	  The implementation currently provides 8 bits of entropy for the gap.
++
++	  Many distributions do not compile threaded remote services with the
++	  -fstack-check argument to GCC, causing the variable-sized stack-based
++	  allocator, alloca(), to not probe the stack on allocation.  This
++	  permits an unbounded alloca() to skip over any guard page and potentially
++	  modify another thread's stack reliably.  An enforced random gap
++	  reduces the reliability of such an attack and increases the chance
++	  that such a read/write to another thread's stack instead lands in
++	  an unmapped area, causing a crash and triggering grsecurity's
++	  anti-bruteforcing logic.
++
 +config GRKERNSEC_PROC_MEMMAP
 +	bool "Harden ASLR against information leaks and entropy reduction"
 +	default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
@@ -84381,6 +84999,19 @@ index 0000000..7cf22bd
 +	  behavior if this option is enabled in an init script on a read-only
 +	  filesystem.  This feature is mainly intended for secure embedded systems.
 +
++config GRKERNSEC_DEVICE_SIDECHANNEL
++	bool "Eliminate stat/notify-based device sidechannels"
++	default y if GRKERNSEC_CONFIG_AUTO
++	help
++	  If you say Y here, timing analyses on block or character
++	  devices like /dev/ptmx using stat or inotify/dnotify/fanotify
++	  will be thwarted for unprivileged users.  If a process without
++	  CAP_MKNOD stats such a device, the last access and last modify times
++	  will match the device's create time.  No access or modify events
++	  will be triggered through inotify/dnotify/fanotify for such devices.
++	  This feature will prevent attacks that may at a minimum
++	  allow an attacker to determine the administrator's password length.
++
 +config GRKERNSEC_CHROOT
 +	bool "Chroot jail restrictions"
 +	default y if GRKERNSEC_CONFIG_AUTO
@@ -85048,7 +85679,7 @@ index 0000000..1b9afa9
 +endif
 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
 new file mode 100644
-index 0000000..5b45eec
+index 0000000..5aba5a8
 --- /dev/null
 +++ b/grsecurity/gracl.c
 @@ -0,0 +1,4197 @@
@@ -85432,7 +86063,7 @@ index 0000000..5b45eec
 +struct acl_subject_label *
 +lookup_subject_map(const struct acl_subject_label *userp)
 +{
-+	unsigned int index = shash(userp, subj_map_set.s_size);
++	unsigned int index = gr_shash(userp, subj_map_set.s_size);
 +	struct subject_map *match;
 +
 +	match = subj_map_set.s_hash[index];
@@ -85449,7 +86080,7 @@ index 0000000..5b45eec
 +static void
 +insert_subj_map_entry(struct subject_map *subjmap)
 +{
-+	unsigned int index = shash(subjmap->user, subj_map_set.s_size);
++	unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
 +	struct subject_map **curr;
 +
 +	subjmap->prev = NULL;
@@ -85468,7 +86099,7 @@ index 0000000..5b45eec
 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
 +		      const gid_t gid)
 +{
-+	unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++	unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
 +	struct acl_role_label *match;
 +	struct role_allowed_ip *ipp;
 +	unsigned int x;
@@ -85491,7 +86122,7 @@ index 0000000..5b45eec
 +found:
 +	if (match == NULL) {
 +	      try_group:
-+		index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++		index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
 +		match = acl_role_set.r_hash[index];
 +
 +		while (match) {
@@ -85537,7 +86168,7 @@ index 0000000..5b45eec
 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
 +		      const struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(ino, dev, role->subj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
 +	struct acl_subject_label *match;
 +
 +	match = role->subj_hash[index];
@@ -85557,7 +86188,7 @@ index 0000000..5b45eec
 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
 +			  const struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(ino, dev, role->subj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
 +	struct acl_subject_label *match;
 +
 +	match = role->subj_hash[index];
@@ -85577,7 +86208,7 @@ index 0000000..5b45eec
 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
 +		     const struct acl_subject_label *subj)
 +{
-+	unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
 +	struct acl_object_label *match;
 +
 +	match = subj->obj_hash[index];
@@ -85597,7 +86228,7 @@ index 0000000..5b45eec
 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
 +		     const struct acl_subject_label *subj)
 +{
-+	unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
 +	struct acl_object_label *match;
 +
 +	match = subj->obj_hash[index];
@@ -85671,7 +86302,7 @@ index 0000000..5b45eec
 +static struct inodev_entry *
 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
 +{
-+	unsigned int index = fhash(ino, dev, inodev_set.i_size);
++	unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
 +	struct inodev_entry *match;
 +
 +	match = inodev_set.i_hash[index];
@@ -85685,7 +86316,7 @@ index 0000000..5b45eec
 +static void
 +insert_inodev_entry(struct inodev_entry *entry)
 +{
-+	unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
++	unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
 +				    inodev_set.i_size);
 +	struct inodev_entry **curr;
 +
@@ -85705,7 +86336,7 @@ index 0000000..5b45eec
 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
 +{
 +	unsigned int index =
-+	    rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++	    gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
 +	struct acl_role_label **curr;
 +	struct acl_role_label *tmp, *tmp2;
 +
@@ -85838,7 +86469,7 @@ index 0000000..5b45eec
 +		     struct acl_subject_label *subj)
 +{
 +	unsigned int index =
-+	    fhash(obj->inode, obj->device, subj->obj_hash_size);
++	    gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
 +	struct acl_object_label **curr;
 +
 +	
@@ -85858,7 +86489,7 @@ index 0000000..5b45eec
 +insert_acl_subj_label(struct acl_subject_label *obj,
 +		      struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
++	unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
 +	struct acl_subject_label **curr;
 +
 +	obj->prev = NULL;
@@ -87704,7 +88335,7 @@ index 0000000..5b45eec
 +		     const ino_t newinode, const dev_t newdevice,
 +		     struct acl_subject_label *subj)
 +{
-+	unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
++	unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
 +	struct acl_object_label *match;
 +
 +	match = subj->obj_hash[index];
@@ -87743,7 +88374,7 @@ index 0000000..5b45eec
 +		      const ino_t newinode, const dev_t newdevice,
 +		      struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
++	unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
 +	struct acl_subject_label *match;
 +
 +	match = role->subj_hash[index];
@@ -87781,7 +88412,7 @@ index 0000000..5b45eec
 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
 +		    const ino_t newinode, const dev_t newdevice)
 +{
-+	unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
++	unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
 +	struct inodev_entry *match;
 +
 +	match = inodev_set.i_hash[index];
@@ -94123,19 +94754,6 @@ index 0000000..9f7b1ac
 +
 +	return retval;
 +}
-diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
-index 3cd9ccd..fe16d47 100644
---- a/include/acpi/acpi_bus.h
-+++ b/include/acpi/acpi_bus.h
-@@ -107,7 +107,7 @@ struct acpi_device_ops {
- 	acpi_op_bind bind;
- 	acpi_op_unbind unbind;
- 	acpi_op_notify notify;
--};
-+} __no_const;
- 
- #define ACPI_DRIVER_ALL_NOTIFY_EVENTS	0x1	/* system AND device events */
- 
 diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
 index f4906f6..71feb73 100644
 --- a/include/acpi/acpi_drivers.h
@@ -94914,6 +95532,19 @@ index b6e818f..21aa58a 100644
  
  /**
   * PERCPU - define output section for percpu area, simple version
+diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
+index 1ffb53f..b156927 100644
+--- a/include/crypto/algapi.h
++++ b/include/crypto/algapi.h
+@@ -32,7 +32,7 @@ struct crypto_type {
+ 	unsigned int maskclear;
+ 	unsigned int maskset;
+ 	unsigned int tfmsize;
+-};
++} __do_const;
+ 
+ struct crypto_instance {
+ 	struct crypto_alg alg;
 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
 index ebab6a6..351dba1 100644
 --- a/include/drm/drmP.h
@@ -94978,18 +95609,9 @@ index ebab6a6..351dba1 100644
  	uint32_t invalidate_domains;    /* domains pending invalidation */
  	uint32_t flush_domains;         /* domains pending flush */
 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
-index b29e201..3413cc9 100644
+index b29e201..c4825f4 100644
 --- a/include/drm/drm_crtc_helper.h
 +++ b/include/drm/drm_crtc_helper.h
-@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
- 
- 	/* reload the current crtc LUT */
- 	void (*load_lut)(struct drm_crtc *crtc);
--};
-+} __no_const;
- 
- struct drm_encoder_helper_funcs {
- 	void (*dpms)(struct drm_encoder *encoder, int mode);
 @@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
  					    struct drm_connector *connector);
  	/* disable encoder when not in use - more explicit than dpms off */
@@ -95570,6 +96192,18 @@ index cb57c30..48078c3 100644
  #endif /* __KERNEL__ */
 -#endif  
 +#endif
+diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
+index 78e9047..ff39f6b 100644
+--- a/include/linux/cdrom.h
++++ b/include/linux/cdrom.h
+@@ -981,7 +981,6 @@ struct cdrom_device_ops {
+ 
+ /* driver specifications */
+ 	const int capability;   /* capability flags */
+-	int n_minors;           /* number of active minor devices */
+ 	/* handle uniform packets for scsi type devices (scsi,atapi) */
+ 	int (*generic_packet) (struct cdrom_device_info *,
+ 			       struct packet_command *);
 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
 index 450fa59..16b904d 100644
 --- a/include/linux/compiler-gcc4.h
@@ -95979,7 +96613,7 @@ index 90a4ed0..d652617 100644
  #endif
  
 diff --git a/include/linux/fs.h b/include/linux/fs.h
-index 860cb6d..9236f46 100644
+index 860cb6d..67c3325 100644
 --- a/include/linux/fs.h
 +++ b/include/linux/fs.h
 @@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
@@ -96149,6 +96783,22 @@ index 860cb6d..9236f46 100644
  extern void bd_forget(struct inode *inode);
  extern void bdput(struct block_device *);
  extern struct block_device *open_by_devnum(dev_t, fmode_t);
+@@ -2489,5 +2491,15 @@ int proc_nr_files(struct ctl_table *table, int write,
+ 
+ int __init get_filesystem_list(char *buf);
+ 
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
++	umode_t mode = inode->i_mode;
++	return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++#else
++	return false;
++#endif
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_FS_H */
 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
 index 78a05bf..2a7d3e1 100644
 --- a/include/linux/fs_struct.h
@@ -96184,18 +96834,30 @@ index 7be0c6f..2f63a2b 100644
  	op->release = release;
  	INIT_LIST_HEAD(&op->pend_link);
  	fscache_set_op_state(op, "Init");
-diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
-index 4d6f47b..00bcedb 100644
---- a/include/linux/fsnotify_backend.h
-+++ b/include/linux/fsnotify_backend.h
-@@ -86,6 +86,7 @@ struct fsnotify_ops {
- 	void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
- 	void (*free_event_priv)(struct fsnotify_event_private_data *priv);
- };
-+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
+diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
+index 936f9aa..d0bd57d 100644
+--- a/include/linux/fsnotify.h
++++ b/include/linux/fsnotify.h
+@@ -188,6 +188,9 @@ static inline void fsnotify_access(struct dentry *dentry)
+ 	struct inode *inode = dentry->d_inode;
+ 	__u32 mask = FS_ACCESS;
+ 
++	if (is_sidechannel_device(inode))
++		return;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		mask |= FS_IN_ISDIR;
+ 
+@@ -205,6 +208,9 @@ static inline void fsnotify_modify(struct dentry *dentry)
+ 	struct inode *inode = dentry->d_inode;
+ 	__u32 mask = FS_MODIFY;
+ 
++	if (is_sidechannel_device(inode))
++		return;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		mask |= FS_IN_ISDIR;
  
- /*
-  * A group is a "thing" that wants to receive notification about filesystem
 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
 index 4ec5e67..42f1eb9 100644
 --- a/include/linux/ftrace_event.h
@@ -96259,7 +96921,7 @@ index 557bdad..b5e8c98 100644
  {
 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
 new file mode 100644
-index 0000000..fc80ba3
+index 0000000..5f646cf
 --- /dev/null
 +++ b/include/linux/gracl.h
 @@ -0,0 +1,320 @@
@@ -96524,25 +97186,25 @@ index 0000000..fc80ba3
 +   Shift/add algorithm with modulus of table size and an XOR*/
 +
 +static __inline__ unsigned int
-+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
 +{
 +	return ((((uid + type) << (16 + type)) ^ uid) % sz);
 +}
 +
 + static __inline__ unsigned int
-+shash(const struct acl_subject_label *userp, const unsigned int sz)
++gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
 +{
 +	return ((const unsigned long)userp % sz);
 +}
 +
 +static __inline__ unsigned int
-+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
++gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
 +{
 +	return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
 +}
 +
 +static __inline__ unsigned int
-+nhash(const char *name, const __u16 len, const unsigned int sz)
++gr_nhash(const char *name, const __u16 len, const unsigned int sz)
 +{
 +	return full_name_hash((const unsigned char *)name, len) % sz;
 +}
@@ -97381,6 +98043,19 @@ index 4c4e57d..f3c5303 100644
  	struct list_head context_list;	/* list of context id's
  					   and pointers */
  #endif
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index 90b5fae..a11a48e 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -175,7 +175,7 @@ struct pppox_proto {
+ 	int		(*ioctl)(struct socket *sock, unsigned int cmd,
+ 				 unsigned long arg);
+ 	struct module	*owner;
+-};
++} __do_const;
+ 
+ extern int register_pppox_proto(int proto_num, struct pppox_proto *pp);
+ extern void unregister_pppox_proto(int proto_num);
 diff --git a/include/linux/init.h b/include/linux/init.h
 index ff8bde5..ed08ca7 100644
 --- a/include/linux/init.h
@@ -97488,19 +98163,6 @@ index 21a6f5d..7c7d19f 100644
  	.fs		= &init_fs,					\
  	.files		= &init_files,					\
  	.signal		= &init_signals,				\
-diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
-index 4f0a72a..a849599 100644
---- a/include/linux/intel-iommu.h
-+++ b/include/linux/intel-iommu.h
-@@ -296,7 +296,7 @@ struct iommu_flush {
- 			      u8 fm, u64 type);
- 	void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- 			    unsigned int size_order, u64 type);
--};
-+} __no_const;
- 
- enum {
- 	SR_DMAR_FECTL_REG,
 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
 index c739150..be577b5 100644
 --- a/include/linux/interrupt.h
@@ -97650,18 +98312,21 @@ index 0546fe7..2a22bc1 100644
  #define request_module_nowait(mod...) __request_module(false, mod)
  #define try_then_request_module(x, mod...) \
 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
-index 58ae8e0..3950d3c 100644
+index 58ae8e0..8ce9617 100644
 --- a/include/linux/kobject.h
 +++ b/include/linux/kobject.h
-@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+@@ -106,9 +106,9 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
  
  struct kobj_type {
  	void (*release)(struct kobject *kobj);
 -	struct sysfs_ops *sysfs_ops;
 +	const struct sysfs_ops *sysfs_ops;
  	struct attribute **default_attrs;
- };
+-};
++} __do_const;
  
+ struct kobj_uevent_env {
+ 	char *envp[UEVENT_NUM_ENVP];
 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
  };
  
@@ -97786,6 +98451,20 @@ index a069916..223edde 100644
  extern int ata_scsi_detect(struct scsi_host_template *sht);
  extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
  extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
+diff --git a/include/linux/list.h b/include/linux/list.h
+index 969f6e9..0327c33 100644
+--- a/include/linux/list.h
++++ b/include/linux/list.h
+@@ -110,6 +110,9 @@ static inline void list_del(struct list_head *entry)
+ extern void list_del(struct list_head *entry);
+ #endif
+ 
++extern void pax_list_add_tail(struct list_head *new, struct list_head *head);
++extern void pax_list_del(struct list_head *entry);
++
+ /**
+  * list_replace - replace old entry by new one
+  * @old : the element to be replaced
 diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
 index fbc48f8..0886e57 100644
 --- a/include/linux/lockd/bind.h
@@ -97850,19 +98529,6 @@ index 3797270..7765ede 100644
  
  struct mca_bus {
  	u64			default_dma_mask;
-diff --git a/include/linux/memory.h b/include/linux/memory.h
-index 37fa19b..b597c85 100644
---- a/include/linux/memory.h
-+++ b/include/linux/memory.h
-@@ -108,7 +108,7 @@ struct memory_accessor {
- 			size_t count);
- 	ssize_t (*write)(struct memory_accessor *, const char *buf,
- 			 off_t offset, size_t count);
--};
-+} __no_const;
- 
- /*
-  * Kernel text modification mutex, used for code patching. Users of this lock
 diff --git a/include/linux/mm.h b/include/linux/mm.h
 index 11e5be6..8ff8c91 100644
 --- a/include/linux/mm.h
@@ -98049,6 +98715,28 @@ index 9d12ed5..9d9dab3 100644
  };
  
  /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
+diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
+index 97491f7..2265523 100644
+--- a/include/linux/mmiotrace.h
++++ b/include/linux/mmiotrace.h
+@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+ /* Called from ioremap.c */
+ extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+ 							void __iomem *addr);
+-extern void mmiotrace_iounmap(volatile void __iomem *addr);
++extern void mmiotrace_iounmap(const volatile void __iomem *addr);
+ 
+ /* For anyone to insert markers. Remember trailing newline. */
+ extern int mmiotrace_printk(const char *fmt, ...)
+@@ -67,7 +67,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
+ {
+ }
+ 
+-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
++static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
+ {
+ }
+ 
 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
 index 4e02ee2..afb159e 100644
 --- a/include/linux/mmu_notifier.h
@@ -98105,10 +98793,10 @@ index f58e9d83..3503935 100644
  struct hid_device_id {
  	__u16 bus;
 diff --git a/include/linux/module.h b/include/linux/module.h
-index 482efc8..642032b 100644
+index 482efc8..4ba6781 100644
 --- a/include/linux/module.h
 +++ b/include/linux/module.h
-@@ -16,6 +16,7 @@
+@@ -16,9 +16,11 @@
  #include <linux/kobject.h>
  #include <linux/moduleparam.h>
  #include <linux/tracepoint.h>
@@ -98116,7 +98804,11 @@ index 482efc8..642032b 100644
  
  #include <asm/local.h>
  #include <asm/module.h>
-@@ -287,16 +288,16 @@ struct module
++#include <asm/pgtable.h>
+ 
+ #include <trace/events/module.h>
+ 
+@@ -287,16 +289,16 @@ struct module
  	int (*init)(void);
  
  	/* If this is non-NULL, vfree after init() returns */
@@ -98137,7 +98829,7 @@ index 482efc8..642032b 100644
  
  	/* Arch-specific module values */
  	struct mod_arch_specific arch;
-@@ -345,6 +346,10 @@ struct module
+@@ -345,6 +347,10 @@ struct module
  #ifdef CONFIG_EVENT_TRACING
  	struct ftrace_event_call *trace_events;
  	unsigned int num_trace_events;
@@ -98148,7 +98840,7 @@ index 482efc8..642032b 100644
  #endif
  #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  	unsigned long *ftrace_callsites;
-@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
+@@ -393,16 +399,46 @@ struct module *__module_address(unsigned long addr);
  bool is_module_address(unsigned long addr);
  bool is_module_text_address(unsigned long addr);
  
@@ -98309,6 +99001,19 @@ index 9d7e8f7..04428c5 100644
  
  /*
   *	The DEVICE structure.
+diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
+index 9f00da2..8304ebc 100644
+--- a/include/linux/netfilter/nfnetlink.h
++++ b/include/linux/netfilter/nfnetlink.h
+@@ -62,7 +62,7 @@ struct nfnl_callback
+ 		    const struct nlattr * const cda[]);
+ 	const struct nla_policy *policy;	/* netlink attribute policy */
+ 	const u_int16_t attr_count;		/* number of nlattr's */
+-};
++} __do_const;
+ 
+ struct nfnetlink_subsystem
+ {
 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
 new file mode 100644
 index 0000000..33f4af8
@@ -98344,6 +99049,20 @@ index b359c4a..c08b334 100644
  })
  
  #define num_online_nodes()	num_node_state(N_ONLINE)
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 5ecdb50..7b98d41 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -51,7 +51,8 @@ struct notifier_block {
+ 	int (*notifier_call)(struct notifier_block *, unsigned long, void *);
+ 	struct notifier_block *next;
+ 	int priority;
+-};
++} __do_const;
++typedef struct notifier_block __no_const notifier_block_no_const;
+ 
+ struct atomic_notifier_head {
+ 	spinlock_t lock;
 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
 index 5171639..7cf4235 100644
 --- a/include/linux/oprofile.h
@@ -98493,19 +99212,6 @@ index 4f71bf4..cd2f68e 100644
  
  void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
  
-diff --git a/include/linux/preempt.h b/include/linux/preempt.h
-index 72b1a10..13303a9 100644
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -110,7 +110,7 @@ struct preempt_ops {
- 	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
- 	void (*sched_out)(struct preempt_notifier *notifier,
- 			  struct task_struct *next);
--};
-+} __no_const;
- 
- /**
-  * preempt_notifier - key for installing preemption notifiers
 diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
 index af7c36a..a93005c 100644
 --- a/include/linux/prefetch.h
@@ -98519,7 +99225,7 @@ index af7c36a..a93005c 100644
  #include <asm/cache.h>
  
 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
-index 379eaed..1bf73e3 100644
+index 379eaed..3471a57 100644
 --- a/include/linux/proc_fs.h
 +++ b/include/linux/proc_fs.h
 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
@@ -98542,15 +99248,6 @@ index 379eaed..1bf73e3 100644
  static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
  	mode_t mode, struct proc_dir_entry *base, 
  	read_proc_t *read_proc, void * data)
-@@ -256,7 +269,7 @@ union proc_op {
- 	int (*proc_show)(struct seq_file *m,
- 		struct pid_namespace *ns, struct pid *pid,
- 		struct task_struct *task);
--};
-+} __no_const;
- 
- struct ctl_table_header;
- struct ctl_table;
 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
 index 7456d7d..6c1cfc9 100644
 --- a/include/linux/ptrace.h
@@ -98569,21 +99266,10 @@ index 7456d7d..6c1cfc9 100644
  static inline int ptrace_reparented(struct task_struct *child)
  {
 diff --git a/include/linux/random.h b/include/linux/random.h
-index 1864957..b2ba8bf 100644
+index 1864957..491144d 100644
 --- a/include/linux/random.h
 +++ b/include/linux/random.h
-@@ -49,6 +49,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
- 				 unsigned int value);
- extern void add_interrupt_randomness(int irq, int irq_flags);
- 
-+#ifdef CONFIG_PAX_LATENT_ENTROPY
-+extern void transfer_latent_entropy(void);
-+#endif
-+
- extern void get_random_bytes(void *buf, int nbytes);
- extern void get_random_bytes_arch(void *buf, int nbytes);
- void generate_random_uuid(unsigned char uuid_out[16]);
-@@ -76,6 +80,11 @@ static inline int arch_get_random_int(unsigned int *v)
+@@ -76,6 +76,11 @@ static inline int arch_get_random_int(unsigned int *v)
  }
  #endif
  
@@ -98634,6 +99320,19 @@ index 988e55f..17cb4ef 100644
  #include <asm/emergency-restart.h>
  
  #endif
+diff --git a/include/linux/regset.h b/include/linux/regset.h
+index 686f373..a72ed78 100644
+--- a/include/linux/regset.h
++++ b/include/linux/regset.h
+@@ -160,7 +160,7 @@ struct user_regset {
+ 	unsigned int 			align;
+ 	unsigned int 			bias;
+ 	unsigned int 			core_note_type;
+-};
++} __do_const;
+ 
+ /**
+  * struct user_regset_view - available regsets
 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
 index dd31e7b..5b03c5c 100644
 --- a/include/linux/reiserfs_fs.h
@@ -98710,20 +99409,8 @@ index 14a86bc..17d0700 100644
  
  /*
   * CONFIG_RELAY kernel API, kernel/relay.c
-diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
-index 3392c59..a746428 100644
---- a/include/linux/rfkill.h
-+++ b/include/linux/rfkill.h
-@@ -144,6 +144,7 @@ struct rfkill_ops {
- 	void	(*query)(struct rfkill *rfkill, void *data);
- 	int	(*set_block)(void *data, bool blocked);
- };
-+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
- 
- #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
- /**
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 71849bf..42936d2 100644
+index 71849bf..fb7ea50 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -101,6 +101,7 @@ struct bio;
@@ -98743,7 +99430,7 @@ index 71849bf..42936d2 100644
  
  struct nsproxy;
  struct user_namespace;
-@@ -371,9 +372,12 @@ struct user_namespace;
+@@ -371,9 +372,21 @@ struct user_namespace;
  #define DEFAULT_MAX_MAP_COUNT	(USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
  
  extern int sysctl_max_map_count;
@@ -98751,12 +99438,21 @@ index 71849bf..42936d2 100644
  
  #include <linux/aio.h>
  
-+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
-+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
++#else
++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++	return 0;
++}
++#endif
++
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  		       unsigned long, unsigned long);
-@@ -666,7 +670,20 @@ struct signal_struct {
+@@ -666,7 +679,20 @@ struct signal_struct {
  	struct tty_audit_buf *tty_audit_buf;
  #endif
  
@@ -98777,7 +99473,7 @@ index 71849bf..42936d2 100644
  };
  
  /* Context switch must be unlocked if interrupts are to be enabled */
-@@ -723,6 +740,11 @@ struct user_struct {
+@@ -723,6 +749,11 @@ struct user_struct {
  	struct key *session_keyring;	/* UID's default session keyring */
  #endif
  
@@ -98789,7 +99485,7 @@ index 71849bf..42936d2 100644
  	/* Hash table maintenance information */
  	struct hlist_node uidhash_node;
  	uid_t uid;
-@@ -1328,8 +1350,8 @@ struct task_struct {
+@@ -1328,8 +1359,8 @@ struct task_struct {
  	struct list_head thread_group;
  
  	struct completion *vfork_done;		/* for vfork() */
@@ -98800,7 +99496,7 @@ index 71849bf..42936d2 100644
  
  	cputime_t utime, stime, utimescaled, stimescaled;
  	cputime_t gtime;
-@@ -1343,16 +1365,6 @@ struct task_struct {
+@@ -1343,16 +1374,6 @@ struct task_struct {
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
  
@@ -98817,7 +99513,7 @@ index 71849bf..42936d2 100644
  	char comm[TASK_COMM_LEN]; /* executable name excluding path
  				     - access with [gs]et_task_comm (which lock
  				       it with task_lock())
-@@ -1369,6 +1381,10 @@ struct task_struct {
+@@ -1369,6 +1390,10 @@ struct task_struct {
  #endif
  /* CPU-specific state of this task */
  	struct thread_struct thread;
@@ -98828,7 +99524,7 @@ index 71849bf..42936d2 100644
  /* filesystem information */
  	struct fs_struct *fs;
  /* open file information */
-@@ -1436,6 +1452,12 @@ struct task_struct {
+@@ -1436,6 +1461,12 @@ struct task_struct {
  	int hardirq_context;
  	int softirq_context;
  #endif
@@ -98841,7 +99537,7 @@ index 71849bf..42936d2 100644
  #ifdef CONFIG_LOCKDEP
  # define MAX_LOCK_DEPTH 48UL
  	u64 curr_chain_key;
-@@ -1456,6 +1478,9 @@ struct task_struct {
+@@ -1456,6 +1487,9 @@ struct task_struct {
  
  	struct backing_dev_info *backing_dev_info;
  
@@ -98851,7 +99547,7 @@ index 71849bf..42936d2 100644
  	struct io_context *io_context;
  
  	unsigned long ptrace_message;
-@@ -1519,6 +1544,28 @@ struct task_struct {
+@@ -1519,6 +1553,28 @@ struct task_struct {
  	unsigned long default_timer_slack_ns;
  
  	struct list_head	*scm_work_list;
@@ -98880,7 +99576,7 @@ index 71849bf..42936d2 100644
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  	/* Index of current stored adress in ret_stack */
  	int curr_ret_stack;
-@@ -1542,6 +1589,57 @@ struct task_struct {
+@@ -1542,6 +1598,57 @@ struct task_struct {
  #endif /* CONFIG_TRACING */
  };
  
@@ -98938,7 +99634,7 @@ index 71849bf..42936d2 100644
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
  
-@@ -1740,7 +1838,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
+@@ -1740,7 +1847,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
  #define PF_DUMPCORE	0x00000200	/* dumped core */
  #define PF_SIGNALED	0x00000400	/* killed by a signal */
  #define PF_MEMALLOC	0x00000800	/* Allocating memory */
@@ -98947,7 +99643,7 @@ index 71849bf..42936d2 100644
  #define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
  #define PF_FREEZING	0x00004000	/* freeze in progress. do not account to load */
  #define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
-@@ -1978,7 +2076,9 @@ void yield(void);
+@@ -1978,7 +2085,9 @@ void yield(void);
  extern struct exec_domain	default_exec_domain;
  
  union thread_union {
@@ -98957,7 +99653,7 @@ index 71849bf..42936d2 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -2011,6 +2111,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2011,6 +2120,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -98965,7 +99661,7 @@ index 71849bf..42936d2 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2155,7 +2256,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2155,7 +2265,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -98974,7 +99670,7 @@ index 71849bf..42936d2 100644
  
  extern void daemonize(const char *, ...);
  extern int allow_signal(int);
-@@ -2284,9 +2385,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2284,9 +2394,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  
  #endif
  
@@ -98986,7 +99682,7 @@ index 71849bf..42936d2 100644
  
  	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
  }
-@@ -2616,6 +2717,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
+@@ -2616,6 +2726,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
  	return task_rlimit_max(current, limit);
  }
  
@@ -99210,7 +99906,7 @@ index 4e647bb..23b3911 100644
  					       int size);
  extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 diff --git a/include/linux/slab.h b/include/linux/slab.h
-index 2da8372..a462292 100644
+index 2da8372..45c2b89 100644
 --- a/include/linux/slab.h
 +++ b/include/linux/slab.h
 @@ -11,12 +11,20 @@
@@ -99255,7 +99951,7 @@ index 2da8372..a462292 100644
  void kfree(const void *);
  void kzfree(const void *);
  size_t ksize(const void *);
-+const char *check_heap_object(const void *ptr, unsigned long n, bool to);
++const char *check_heap_object(const void *ptr, unsigned long n);
 +bool is_usercopy_object(const void *ptr);
  
  /*
@@ -99753,6 +100449,19 @@ index 0000000..3891139
 +int do_syslog(int type, char __user *buf, int count, bool from_file);
 +
 +#endif /* _LINUX_SYSLOG_H */
+diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
+index 99adcdc..09207eb 100644
+--- a/include/linux/sysrq.h
++++ b/include/linux/sysrq.h
+@@ -35,7 +35,7 @@ struct sysrq_key_op {
+ 	char *help_msg;
+ 	char *action_msg;
+ 	int enable_mask;
+-};
++} __do_const;
+ 
+ #ifdef CONFIG_MAGIC_SYSRQ
+ 
 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
 index a8cc4e1..98d3b85 100644
 --- a/include/linux/thread_info.h
@@ -99825,6 +100534,19 @@ index e9c57e9..ee6d489 100644
  /* n_tty.c */
  extern struct tty_ldisc_ops tty_ldisc_N_TTY;
  
+diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
+index db2d227..ea28ae8 100644
+--- a/include/linux/tty_driver.h
++++ b/include/linux/tty_driver.h
+@@ -283,7 +283,7 @@ struct tty_operations {
+ 	void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+ #endif
+ 	const struct file_operations *proc_fops;
+-};
++} __do_const;
+ 
+ struct tty_driver {
+ 	int	magic;		/* magic number for this structure */
 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
 index 0c4ee9b..9f7c426 100644
 --- a/include/linux/tty_ldisc.h
@@ -100112,21 +100834,8 @@ index 5c84af8..1a3b6e2 100644
  struct inode;
  struct dentry;
  
-diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
-index eed5fcc..5080d24 100644
---- a/include/media/saa7146_vv.h
-+++ b/include/media/saa7146_vv.h
-@@ -167,7 +167,7 @@ struct saa7146_ext_vv
- 	int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
- 
- 	/* the extension can override this */
--	struct v4l2_ioctl_ops ops;
-+	v4l2_ioctl_ops_no_const ops;
- 	/* pointer to the saa7146 core ops */
- 	const struct v4l2_ioctl_ops *core_ops;
- 
 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
-index 73c9867..2da8837 100644
+index 73c9867..14cc2cd 100644
 --- a/include/media/v4l2-dev.h
 +++ b/include/media/v4l2-dev.h
 @@ -34,7 +34,7 @@ struct v4l2_device;
@@ -100138,14 +100847,6 @@ index 73c9867..2da8837 100644
  	ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
  	ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
  	unsigned int (*poll) (struct file *, struct poll_table_struct *);
-@@ -46,6 +46,7 @@ struct v4l2_file_operations {
- 	int (*open) (struct file *);
- 	int (*release) (struct file *);
- };
-+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
- 
- /*
-  * Newer version of video_device, handled by videodev2.c
 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
 index 5d5d550..f559ef1 100644
 --- a/include/media/v4l2-device.h
@@ -100159,18 +100860,6 @@ index 5d5d550..f559ef1 100644
  
  /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
     Since the parent disappears this ensures that v4l2_dev doesn't have an
-diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
-index 7a4529d..7244290 100644
---- a/include/media/v4l2-ioctl.h
-+++ b/include/media/v4l2-ioctl.h
-@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
- 	long (*vidioc_default)	       (struct file *file, void *fh,
- 					int cmd, void *arg);
- };
-+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
- 
- 
- /* v4l debugging and diagnostics */
 diff --git a/include/net/flow.h b/include/net/flow.h
 index 809970b..c3df4f3 100644
 --- a/include/net/flow.h
@@ -100184,6 +100873,19 @@ index 809970b..c3df4f3 100644
  
  static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
  {
+diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
+index 696d6e4..9259a0f 100644
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
+ 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
+ 	int	    (*bind_conflict)(const struct sock *sk,
+ 				     const struct inet_bind_bucket *tb);
+-};
++} __do_const;
+ 
+ /** inet_connection_sock - INET connection oriented sock
+  *
 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
 index 15e1f8fe..668837c 100644
 --- a/include/net/inetpeer.h
@@ -100219,19 +100921,6 @@ index 98978e7..2243a3d 100644
  	atomic_t		weight;		/* server weight */
  
  	atomic_t		refcnt;		/* reference counter */
-diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
-index 69b610a..fe3962c 100644
---- a/include/net/irda/ircomm_core.h
-+++ b/include/net/irda/ircomm_core.h
-@@ -51,7 +51,7 @@ typedef struct {
- 	int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
- 	int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *, 
- 				  struct ircomm_info *);	
--} call_t;
-+} __no_const call_t;
- 
- struct ircomm_cb {
- 	irda_queue_t queue;
 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
 index eea2e61..08c692d 100644
 --- a/include/net/irda/ircomm_tty.h
@@ -100307,6 +100996,19 @@ index 8ba8ce2..99b7fff 100644
  		struct sk_buff *skb, int offset, struct iovec *to,
  		size_t len, struct dma_pinned_list *pinned_list);
  
+diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
+index 252fd10..aa1421f 100644
+--- a/include/net/netfilter/nf_queue.h
++++ b/include/net/netfilter/nf_queue.h
+@@ -22,7 +22,7 @@ struct nf_queue_handler {
+ 	int			(*outfn)(struct nf_queue_entry *entry,
+ 					 unsigned int queuenum);
+ 	char			*name;
+-};
++} __do_const;
+ 
+ extern int nf_register_queue_handler(u_int8_t pf,
+ 				     const struct nf_queue_handler *qh);
 diff --git a/include/net/netlink.h b/include/net/netlink.h
 index c344646..4778c71 100644
 --- a/include/net/netlink.h
@@ -100342,6 +101044,28 @@ index 9a4b8b7..e49e077 100644
  
  #ifdef CONFIG_IP_MROUTE
  	struct sock		*mroute_sk;
+diff --git a/include/net/protocol.h b/include/net/protocol.h
+index 60249e5..f9c3724 100644
+--- a/include/net/protocol.h
++++ b/include/net/protocol.h
+@@ -44,7 +44,7 @@ struct net_protocol {
+ 	int			(*gro_complete)(struct sk_buff *skb);
+ 	unsigned int		no_policy:1,
+ 				netns_ok:1;
+-};
++} __do_const;
+ 
+ #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ struct inet6_protocol 
+@@ -64,7 +64,7 @@ struct inet6_protocol
+ 	int	(*gro_complete)(struct sk_buff *skb);
+ 
+ 	unsigned int	flags;	/* INET6_PROTO_xxx */
+-};
++} __do_const;
+ 
+ #define INET6_PROTO_NOPOLICY	0x1
+ #define INET6_PROTO_FINAL	0x2
 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
 index 8a6d529..171f401 100644
 --- a/include/net/sctp/sctp.h
@@ -100357,6 +101081,28 @@ index 8a6d529..171f401 100644
  #define SCTP_ENABLE_DEBUG
  #define SCTP_DISABLE_DEBUG
  #define SCTP_ASSERT(expr, str, func)
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 88daa54..9013116 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -608,7 +608,7 @@ struct sctp_af {
+ 	int		sockaddr_len;
+ 	sa_family_t	sa_family;
+ 	struct list_head list;
+-};
++} __do_const;
+ 
+ struct sctp_af *sctp_get_af_specific(sa_family_t);
+ int sctp_register_af(struct sctp_af *);
+@@ -628,7 +628,7 @@ struct sctp_pf {
+ 					  struct sctp_association *asoc);
+ 	void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
+ 	struct sctp_af *af;
+-};
++} __do_const;
+ 
+ 
+ /* Structure to track chunk fragments that have been acked, but peer
 diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
 index d97f689..f3b90ab 100644
 --- a/include/net/secure_seq.h
@@ -100412,7 +101158,7 @@ index 78adf52..32bb160 100644
  static inline struct page *sk_stream_alloc_page(struct sock *sk)
  {
 diff --git a/include/net/tcp.h b/include/net/tcp.h
-index 6cfe18b..24c05d6 100644
+index 6cfe18b..5f08ff0 100644
 --- a/include/net/tcp.h
 +++ b/include/net/tcp.h
 @@ -237,6 +237,7 @@ extern int sysctl_tcp_base_mss;
@@ -100452,32 +101198,19 @@ index 6cfe18b..24c05d6 100644
  };
  
  #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
-@@ -1444,8 +1445,8 @@ enum tcp_seq_states {
- struct tcp_seq_afinfo {
- 	char			*name;
- 	sa_family_t		family;
--	struct file_operations	seq_fops;
--	struct seq_operations	seq_ops;
-+	file_operations_no_const	seq_fops;
-+	seq_operations_no_const	seq_ops;
- };
- 
- struct tcp_iter_state {
-diff --git a/include/net/udp.h b/include/net/udp.h
-index f98abd2..b4b042f 100644
---- a/include/net/udp.h
-+++ b/include/net/udp.h
-@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
- 	char			*name;
- 	sa_family_t		family;
- 	struct udp_table	*udp_table;
--	struct file_operations	seq_fops;
--	struct seq_operations	seq_ops;
-+	file_operations_no_const	seq_fops;
-+	seq_operations_no_const	seq_ops;
- };
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 223e90a..d652d35 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -389,7 +389,7 @@ struct xfrm_mode {
+ 	struct module *owner;
+ 	unsigned int encap;
+ 	int flags;
+-};
++} __do_const;
  
- struct udp_iter_state {
+ /* Flags for xfrm_mode. */
+ enum {
 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
 index cbb822e..e9c1cbe 100644
 --- a/include/rdma/iw_cm.h
@@ -100595,70 +101328,6 @@ index 3dae3f7..8440d6f 100644
  	void *private_data;
  	void (*private_free) (struct snd_ac97 *ac97);
  	/* --- */
-diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
-index 891cf1a..a94ba2b 100644
---- a/include/sound/ak4xxx-adda.h
-+++ b/include/sound/ak4xxx-adda.h
-@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
- 	void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
- 		      unsigned char val);
- 	void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
--};
-+} __no_const;
- 
- #define AK4XXX_IMAGE_SIZE	(AK4XXX_MAX_CHIPS * 16)	/* 64 bytes */
- 
-diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
-index 8c05e47..2b5df97 100644
---- a/include/sound/hwdep.h
-+++ b/include/sound/hwdep.h
-@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
- 			  struct snd_hwdep_dsp_status *status);
- 	int (*dsp_load)(struct snd_hwdep *hw,
- 			struct snd_hwdep_dsp_image *image);
--};
-+} __no_const;
- 
- struct snd_hwdep {
- 	struct snd_card *card;
-diff --git a/include/sound/info.h b/include/sound/info.h
-index 112e894..6fda5b5 100644
---- a/include/sound/info.h
-+++ b/include/sound/info.h
-@@ -44,7 +44,7 @@ struct snd_info_entry_text {
- 		     struct snd_info_buffer *buffer);
- 	void (*write)(struct snd_info_entry *entry,
- 		      struct snd_info_buffer *buffer);
--};
-+} __no_const;
- 
- struct snd_info_entry_ops {
- 	int (*open)(struct snd_info_entry *entry,
-diff --git a/include/sound/pcm.h b/include/sound/pcm.h
-index de6d981..590a550 100644
---- a/include/sound/pcm.h
-+++ b/include/sound/pcm.h
-@@ -80,6 +80,7 @@ struct snd_pcm_ops {
- 	int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
- 	int (*ack)(struct snd_pcm_substream *substream);
- };
-+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
- 
- /*
-  *
-diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
-index 736eac7..fe8a80f 100644
---- a/include/sound/sb16_csp.h
-+++ b/include/sound/sb16_csp.h
-@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
- 	int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
- 	int (*csp_stop) (struct snd_sb_csp * p);
- 	int (*csp_qsound_transfer) (struct snd_sb_csp * p);
--};
-+} __no_const;
- 
- /*
-  * CSP private data
 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
 index 444cd6b..3327cc5 100644
 --- a/include/sound/ymfpci.h
@@ -101117,7 +101786,7 @@ index 1fd59b8..a01b079 100644
  	next_state = Reset;
  	return 0;
 diff --git a/init/main.c b/init/main.c
-index 1eb4bd5..a2cb48e 100644
+index 1eb4bd5..24fe9c2 100644
 --- a/init/main.c
 +++ b/init/main.c
 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
@@ -101190,7 +101859,7 @@ index 1eb4bd5..a2cb48e 100644
  static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
  char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
  static const char *panic_later, *panic_param;
-@@ -705,52 +761,53 @@ int initcall_debug;
+@@ -705,64 +761,75 @@ int initcall_debug;
  core_param(initcall_debug, initcall_debug, bool, 0644);
  
  static char msgbuf[64];
@@ -101260,7 +101929,13 @@ index 1eb4bd5..a2cb48e 100644
  }
  
  
-@@ -760,9 +817,15 @@ static void __init do_initcalls(void)
+ extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[];
+ 
++#ifdef CONFIG_PAX_LATENT_ENTROPY
++u64 latent_entropy;
++#endif
++
+ static void __init do_initcalls(void)
  {
  	initcall_t *call;
  
@@ -101269,7 +101944,7 @@ index 1eb4bd5..a2cb48e 100644
  		do_one_initcall(*call);
  
 +#ifdef CONFIG_PAX_LATENT_ENTROPY
-+		transfer_latent_entropy();
++		add_device_randomness(&latent_entropy, sizeof(latent_entropy));
 +#endif
 +
 +	}
@@ -101277,7 +101952,7 @@ index 1eb4bd5..a2cb48e 100644
  	/* Make sure there is no pending stuff from the initcall sequence */
  	flush_scheduled_work();
  }
-@@ -790,8 +853,14 @@ static void __init do_pre_smp_initcalls(void)
+@@ -790,8 +857,14 @@ static void __init do_pre_smp_initcalls(void)
  {
  	initcall_t *call;
  
@@ -101286,14 +101961,14 @@ index 1eb4bd5..a2cb48e 100644
  		do_one_initcall(*call);
 +
 +#ifdef CONFIG_PAX_LATENT_ENTROPY
-+		transfer_latent_entropy();
++		add_device_randomness(&latent_entropy, sizeof(latent_entropy));
 +#endif
 +
 +	}
  }
  
  static void run_init_process(char *init_filename)
-@@ -893,11 +962,13 @@ static int __init kernel_init(void * unused)
+@@ -893,11 +966,13 @@ static int __init kernel_init(void * unused)
  	if (!ramdisk_execute_command)
  		ramdisk_execute_command = "/init";
  
@@ -104160,6 +104835,62 @@ index 67578ca..4115fbf 100644
  }
  
  static inline void mutex_clear_owner(struct mutex *lock)
+diff --git a/kernel/notifier.c b/kernel/notifier.c
+index 61d5aa5..451d460 100644
+--- a/kernel/notifier.c
++++ b/kernel/notifier.c
+@@ -5,6 +5,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/vmalloc.h>
+ #include <linux/reboot.h>
++#include <linux/mm.h>
+ 
+ /*
+  *	Notifier list for kernel code which wants to be called
+@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
+ 	while ((*nl) != NULL) {
+ 		if (n->priority > (*nl)->priority)
+ 			break;
+-		nl = &((*nl)->next);
++		nl = (struct notifier_block **)&((*nl)->next);
+ 	}
+-	n->next = *nl;
++	pax_open_kernel();
++	*(const void **)&n->next = *nl;
+ 	rcu_assign_pointer(*nl, n);
++	pax_close_kernel();
+ 	return 0;
+ }
+ 
+@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
+ 			return 0;
+ 		if (n->priority > (*nl)->priority)
+ 			break;
+-		nl = &((*nl)->next);
++		nl = (struct notifier_block **)&((*nl)->next);
+ 	}
+-	n->next = *nl;
++	pax_open_kernel();
++	*(const void **)&n->next = *nl;
+ 	rcu_assign_pointer(*nl, n);
++	pax_close_kernel();
+ 	return 0;
+ }
+ 
+@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
+ {
+ 	while ((*nl) != NULL) {
+ 		if ((*nl) == n) {
++			pax_open_kernel();
+ 			rcu_assign_pointer(*nl, n->next);
++			pax_close_kernel();
+ 			return 0;
+ 		}
+-		nl = &((*nl)->next);
++		nl = (struct notifier_block **)&((*nl)->next);
+ 	}
+ 	return -ENOENT;
+ }
 diff --git a/kernel/panic.c b/kernel/panic.c
 index 96b45d0..98fb1c3 100644
 --- a/kernel/panic.c
@@ -106489,7 +107220,7 @@ index ee5681f..862e921 100644
  		return -ENOMEM;
  	return 0;
 diff --git a/kernel/timer.c b/kernel/timer.c
-index cb3c1f1..8bf5526 100644
+index cb3c1f1..e643008 100644
 --- a/kernel/timer.c
 +++ b/kernel/timer.c
 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
@@ -106501,6 +107232,15 @@ index cb3c1f1..8bf5526 100644
  {
  	struct tvec_base *base = __get_cpu_var(tvec_bases);
  
+@@ -1645,7 +1645,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
+ 	return NOTIFY_OK;
+ }
+ 
+-static struct notifier_block __cpuinitdata timers_nb = {
++static struct notifier_block __cpuinitconst timers_nb = {
+ 	.notifier_call	= timer_cpu_notify,
+ };
+ 
 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
 index d9d6206..f19467e 100644
 --- a/kernel/trace/blktrace.c
@@ -106533,7 +107273,7 @@ index d9d6206..f19467e 100644
  	ret = -EIO;
  	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 4872937..c794d40 100644
+index 4872937..ec96a13 100644
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
@@ -106557,6 +107297,34 @@ index 4872937..c794d40 100644
  }
  
  /*
+@@ -3068,8 +3073,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ 
+ static int ftrace_graph_active;
+-static struct notifier_block ftrace_suspend_notifier;
+-
+ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+ {
+ 	return 0;
+@@ -3213,6 +3216,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+ 	return NOTIFY_DONE;
+ }
+ 
++static struct notifier_block ftrace_suspend_notifier = {
++	.notifier_call = ftrace_suspend_notifier_call
++};
++
+ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ 			trace_func_graph_ent_t entryfunc)
+ {
+@@ -3226,7 +3233,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ 		goto out;
+ 	}
+ 
+-	ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
+ 	register_pm_notifier(&ftrace_suspend_notifier);
+ 
+ 	ftrace_graph_active++;
 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
 index e749a05..029a15b 100644
 --- a/kernel/trace/ring_buffer.c
@@ -106978,7 +107746,7 @@ index 0acd834..b800b56 100644
  	}
  	entry	= ring_buffer_event_data(event);
 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
-index b6c12c6..41fdc53 100644
+index b6c12c6..e61376e 100644
 --- a/kernel/trace/trace_output.c
 +++ b/kernel/trace/trace_output.c
 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
@@ -106990,6 +107758,27 @@ index b6c12c6..41fdc53 100644
  		if (p) {
  			s->len = p - s->buffer;
  			return 1;
+@@ -711,14 +711,16 @@ int register_ftrace_event(struct trace_event *event)
+ 			goto out;
+ 	}
+ 
++	pax_open_kernel();
+ 	if (event->trace == NULL)
+-		event->trace = trace_nop_print;
++		*(void **)&event->trace = trace_nop_print;
+ 	if (event->raw == NULL)
+-		event->raw = trace_nop_print;
++		*(void **)&event->raw = trace_nop_print;
+ 	if (event->hex == NULL)
+-		event->hex = trace_nop_print;
++		*(void **)&event->hex = trace_nop_print;
+ 	if (event->binary == NULL)
+-		event->binary = trace_nop_print;
++		*(void **)&event->binary = trace_nop_print;
++	pax_close_kernel();
+ 
+ 	key = event->type & (EVENT_HASHSIZE - 1);
+ 
 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
 index 8504ac7..ecf0adb 100644
 --- a/kernel/trace/trace_stack.c
@@ -107059,6 +107848,19 @@ index 234ceb1..ad74049 100644
  	help
  	  Enable this option if you want to use the LatencyTOP tool
  	  to find out which userspace is blocking on what kernel operations.
+diff --git a/lib/Makefile b/lib/Makefile
+index 5ecf2ba..dcd31ba 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -42,7 +42,7 @@ obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
+ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
++obj-y += list_debug.o
+ obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
+ 
+ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
 diff --git a/lib/bitmap.c b/lib/bitmap.c
 index 7025658..8d14cab 100644
 --- a/lib/bitmap.c
@@ -107203,9 +108005,18 @@ index a111eb8..5b49191 100644
  
  /*
 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
-index 084e879..0674448 100644
+index 084e879..4f85cf6 100644
 --- a/lib/dma-debug.c
 +++ b/lib/dma-debug.c
+@@ -696,7 +696,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
+ 
+ void dma_debug_add_bus(struct bus_type *bus)
+ {
+-	struct notifier_block *nb;
++	notifier_block_no_const *nb;
+ 
+ 	if (global_disable)
+ 		return;
 @@ -861,7 +861,7 @@ out:
  
  static void check_for_stack(struct device *dev, void *addr)
@@ -107335,12 +108146,29 @@ index 9ecd6e8..12c94c1 100644
  
  	if (atomic_dec_and_test(&kref->refcount)) {
 diff --git a/lib/list_debug.c b/lib/list_debug.c
-index 1a39f4e..745720b 100644
+index 1a39f4e..bdc3153 100644
 --- a/lib/list_debug.c
 +++ b/lib/list_debug.c
-@@ -20,14 +20,18 @@ void __list_add(struct list_head *new,
- 			      struct list_head *prev,
- 			      struct list_head *next)
+@@ -8,7 +8,9 @@
+ 
+ #include <linux/module.h>
+ #include <linux/list.h>
++#include <linux/mm.h>
+ 
++#ifdef CONFIG_DEBUG_LIST
+ /*
+  * Insert a new entry between two known consecutive entries.
+  *
+@@ -16,18 +18,31 @@
+  * the prev/next entries already!
+  */
+ 
+-void __list_add(struct list_head *new,
+-			      struct list_head *prev,
+-			      struct list_head *next)
++static bool __list_add_debug(struct list_head *new,
++			     struct list_head *prev,
++			     struct list_head *next)
  {
 -	WARN(next->prev != prev,
 +	if (WARN(next->prev != prev,
@@ -107357,13 +108185,25 @@ index 1a39f4e..745720b 100644
 +	    WARN(new == prev || new == next,
 +		"list_add double add: new=%p, prev=%p, next=%p.\n",
 +		new, prev, next))
++		return false;
++	return true;
++}
++
++void __list_add(struct list_head *new,
++		struct list_head *prev,
++		struct list_head *next)
++{
++	if (!__list_add_debug(new, prev, next))
 +		return;
  	next->prev = new;
  	new->next = next;
  	new->prev = prev;
-@@ -43,12 +47,13 @@ EXPORT_SYMBOL(__list_add);
+@@ -41,16 +56,61 @@ EXPORT_SYMBOL(__list_add);
+  * Note: list_empty on entry does not return true after this, the entry is
+  * in an undefined state.
   */
- void list_del(struct list_head *entry)
+-void list_del(struct list_head *entry)
++static bool list_del_debug(struct list_head *entry)
  {
 -	WARN(entry->prev->next != entry,
 +	if (WARN(entry->prev->next != entry,
@@ -107375,10 +108215,56 @@ index 1a39f4e..745720b 100644
  		"list_del corruption. next->prev should be %p, "
 -		"but was %p\n", entry, entry->next->prev);
 +		"but was %p\n", entry, entry->next->prev))
++		return false;
++	return true;
++}
++
++void list_del(struct list_head *entry)
++{
++	if (!list_del_debug(entry))
 +		return;
  	__list_del(entry->prev, entry->next);
  	entry->next = LIST_POISON1;
  	entry->prev = LIST_POISON2;
+ }
+ EXPORT_SYMBOL(list_del);
++#endif
++
++void pax_list_add_tail(struct list_head *new, struct list_head *head)
++{
++	struct list_head *prev, *next;
++
++	prev = head->prev;
++	next = head;
++
++#ifdef CONFIG_DEBUG_LIST
++	if (!__list_add_debug(new, prev, next))
++		return;
++#endif
++
++	pax_open_kernel();
++	next->prev = new;
++	new->next = next;
++	new->prev = prev;
++	prev->next = new;
++	pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_add_tail);
++
++void pax_list_del(struct list_head *entry)
++{
++#ifdef CONFIG_DEBUG_LIST
++	if (!list_del_debug(entry))
++		return;
++#endif
++
++	pax_open_kernel();
++	__list_del(entry->prev, entry->next);
++	entry->next = LIST_POISON1;
++	entry->prev = LIST_POISON2;
++	pax_close_kernel();
++}
++EXPORT_SYMBOL(pax_list_del);
 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
 index 92cdd99..a8149d7 100644
 --- a/lib/radix-tree.c
@@ -108871,10 +109757,18 @@ index 2d846cf..8d5cdd8 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index 4b80cbf..f1145be 100644
+index 4b80cbf..5645eab 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
-@@ -45,6 +45,16 @@
+@@ -29,6 +29,7 @@
+ #include <linux/rmap.h>
+ #include <linux/mmu_notifier.h>
+ #include <linux/perf_event.h>
++#include <linux/random.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -45,6 +46,16 @@
  #define arch_rebalance_pgtables(addr, len)		(addr)
  #endif
  
@@ -108891,7 +109785,7 @@ index 4b80cbf..f1145be 100644
  static void unmap_region(struct mm_struct *mm,
  		struct vm_area_struct *vma, struct vm_area_struct *prev,
  		unsigned long start, unsigned long end);
-@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
+@@ -70,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
   *
   */
@@ -108926,7 +109820,7 @@ index 4b80cbf..f1145be 100644
  struct percpu_counter vm_committed_as;
  
  /*
-@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+@@ -231,6 +252,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
  	struct vm_area_struct *next = vma->vm_next;
  
  	might_sleep();
@@ -108934,7 +109828,7 @@ index 4b80cbf..f1145be 100644
  	if (vma->vm_ops && vma->vm_ops->close)
  		vma->vm_ops->close(vma);
  	if (vma->vm_file) {
-@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -267,6 +289,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
  	 * not page aligned -Ram Gupta
  	 */
  	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
@@ -108942,7 +109836,7 @@ index 4b80cbf..f1145be 100644
  	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
  			(mm->end_data - mm->start_data) > rlim)
  		goto out;
-@@ -704,6 +726,12 @@ static int
+@@ -704,6 +727,12 @@ static int
  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
  	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
  {
@@ -108955,7 +109849,7 @@ index 4b80cbf..f1145be 100644
  	if (is_mergeable_vma(vma, file, vm_flags) &&
  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
  		if (vma->vm_pgoff == vm_pgoff)
-@@ -723,6 +751,12 @@ static int
+@@ -723,6 +752,12 @@ static int
  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
  {
@@ -108968,7 +109862,7 @@ index 4b80cbf..f1145be 100644
  	if (is_mergeable_vma(vma, file, vm_flags) &&
  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
  		pgoff_t vm_pglen;
-@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -765,12 +800,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  struct vm_area_struct *vma_merge(struct mm_struct *mm,
  			struct vm_area_struct *prev, unsigned long addr,
  			unsigned long end, unsigned long vm_flags,
@@ -108989,7 +109883,7 @@ index 4b80cbf..f1145be 100644
  	/*
  	 * We later require that vma->vm_flags == vm_flags,
  	 * so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -786,6 +828,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
  		next = next->vm_next;
  
@@ -109005,7 +109899,7 @@ index 4b80cbf..f1145be 100644
  	/*
  	 * Can it merge with the predecessor?
  	 */
-@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -805,9 +856,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  							/* cases 1, 6 */
  			vma_adjust(prev, prev->vm_start,
  				next->vm_end, prev->vm_pgoff, NULL);
@@ -109031,7 +109925,7 @@ index 4b80cbf..f1145be 100644
  		return prev;
  	}
  
-@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -818,12 +884,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
   			mpol_equal(policy, vma_policy(next)) &&
  			can_vma_merge_before(next, vm_flags,
  					anon_vma, file, pgoff+pglen)) {
@@ -109061,7 +109955,7 @@ index 4b80cbf..f1145be 100644
  		return area;
  	}
  
-@@ -898,14 +978,11 @@ none:
+@@ -898,14 +979,11 @@ none:
  void vm_stat_account(struct mm_struct *mm, unsigned long flags,
  						struct file *file, long pages)
  {
@@ -109077,7 +109971,7 @@ index 4b80cbf..f1145be 100644
  		mm->stack_vm += pages;
  	if (flags & (VM_RESERVED|VM_IO))
  		mm->reserved_vm += pages;
-@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -932,7 +1010,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	 * (the exception is when the underlying filesystem is noexec
  	 *  mounted, in which case we dont add PROT_EXEC.)
  	 */
@@ -109086,7 +109980,7 @@ index 4b80cbf..f1145be 100644
  		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
  			prot |= PROT_EXEC;
  
-@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -958,7 +1036,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	/* Obtain the address to map to. we verify (or select) it and ensure
  	 * that it represents a valid section of the address space.
  	 */
@@ -109095,7 +109989,7 @@ index 4b80cbf..f1145be 100644
  	if (addr & ~PAGE_MASK)
  		return addr;
  
-@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -969,6 +1047,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  
@@ -109132,7 +110026,7 @@ index 4b80cbf..f1145be 100644
  	if (flags & MAP_LOCKED)
  		if (!can_do_mlock())
  			return -EPERM;
-@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -980,6 +1088,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  		locked += mm->locked_vm;
  		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  		lock_limit >>= PAGE_SHIFT;
@@ -109140,7 +110034,7 @@ index 4b80cbf..f1145be 100644
  		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  			return -EAGAIN;
  	}
-@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1053,6 +1162,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	if (error)
  		return error;
  
@@ -109150,7 +110044,7 @@ index 4b80cbf..f1145be 100644
  	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
  }
  EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
+@@ -1065,10 +1177,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
   */
  int vma_wants_writenotify(struct vm_area_struct *vma)
  {
@@ -109163,7 +110057,7 @@ index 4b80cbf..f1145be 100644
  		return 0;
  
  	/* The backer wishes to know when pages are first written to? */
-@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1117,14 +1229,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
  	unsigned long charged = 0;
  	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
  
@@ -109190,7 +110084,7 @@ index 4b80cbf..f1145be 100644
  	}
  
  	/* Check against address space limit. */
-@@ -1173,6 +1294,16 @@ munmap_back:
+@@ -1173,6 +1295,16 @@ munmap_back:
  		goto unacct_error;
  	}
  
@@ -109207,7 +110101,7 @@ index 4b80cbf..f1145be 100644
  	vma->vm_mm = mm;
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
-@@ -1180,8 +1311,9 @@ munmap_back:
+@@ -1180,8 +1312,9 @@ munmap_back:
  	vma->vm_page_prot = vm_get_page_prot(vm_flags);
  	vma->vm_pgoff = pgoff;
  
@@ -109218,7 +110112,7 @@ index 4b80cbf..f1145be 100644
  		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
  			goto free_vma;
  		if (vm_flags & VM_DENYWRITE) {
-@@ -1195,6 +1327,19 @@ munmap_back:
+@@ -1195,6 +1328,19 @@ munmap_back:
  		error = file->f_op->mmap(file, vma);
  		if (error)
  			goto unmap_and_free_vma;
@@ -109238,7 +110132,7 @@ index 4b80cbf..f1145be 100644
  		if (vm_flags & VM_EXECUTABLE)
  			added_exe_file_vma(mm);
  
-@@ -1207,6 +1352,8 @@ munmap_back:
+@@ -1207,6 +1353,8 @@ munmap_back:
  		pgoff = vma->vm_pgoff;
  		vm_flags = vma->vm_flags;
  	} else if (vm_flags & VM_SHARED) {
@@ -109247,7 +110141,7 @@ index 4b80cbf..f1145be 100644
  		error = shmem_zero_setup(vma);
  		if (error)
  			goto free_vma;
-@@ -1218,6 +1365,11 @@ munmap_back:
+@@ -1218,6 +1366,11 @@ munmap_back:
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  	file = vma->vm_file;
  
@@ -109259,7 +110153,7 @@ index 4b80cbf..f1145be 100644
  	/* Once vma denies write, undo our temporary denial count */
  	if (correct_wcount)
  		atomic_inc(&inode->i_writecount);
-@@ -1226,6 +1378,7 @@ out:
+@@ -1226,6 +1379,7 @@ out:
  
  	mm->total_vm += len >> PAGE_SHIFT;
  	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -109267,7 +110161,7 @@ index 4b80cbf..f1145be 100644
  	if (vm_flags & VM_LOCKED) {
  		/*
  		 * makes pages present; downgrades, drops, reacquires mmap_sem
-@@ -1248,6 +1401,12 @@ unmap_and_free_vma:
+@@ -1248,6 +1402,12 @@ unmap_and_free_vma:
  	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
  	charged = 0;
  free_vma:
@@ -109280,11 +110174,21 @@ index 4b80cbf..f1145be 100644
  	kmem_cache_free(vm_area_cachep, vma);
  unacct_error:
  	if (charged)
-@@ -1255,6 +1414,44 @@ unacct_error:
+@@ -1255,6 +1415,62 @@ unacct_error:
  	return error;
  }
  
-+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++	if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
++		return (random32() & 0xFF) << PAGE_SHIFT;
++
++	return 0;
++}
++#endif
++
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
 +{
 +	if (!vma) {
 +#ifdef CONFIG_STACK_GROWSUP
@@ -109307,16 +110211,24 @@ index 4b80cbf..f1145be 100644
 +	else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
 +		return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
 +#endif
++	else if (offset)
++		return offset <= vma->vm_start - addr - len;
 +
 +	return true;
 +}
 +
-+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
 +{
 +	if (vma->vm_start < len)
 +		return -ENOMEM;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		return vma->vm_start - len;
++
++	if (!(vma->vm_flags & VM_GROWSDOWN)) {
++		if (offset <= vma->vm_start - len)
++			return vma->vm_start - len - offset;
++		else
++			return -ENOMEM;
++	}
++
 +	if (sysctl_heap_stack_gap <= vma->vm_start - len)
 +		return vma->vm_start - len - sysctl_heap_stack_gap;
 +	return -ENOMEM;
@@ -109325,7 +110237,7 @@ index 4b80cbf..f1145be 100644
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
   *
-@@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1281,18 +1497,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -109356,7 +110268,7 @@ index 4b80cbf..f1145be 100644
  	}
  
  full_search:
-@@ -1303,34 +1505,40 @@ full_search:
+@@ -1303,34 +1524,40 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -109408,7 +110320,7 @@ index 4b80cbf..f1145be 100644
  		mm->free_area_cache = addr;
  		mm->cached_hole_size = ~0UL;
  	}
-@@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1348,7 +1575,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
@@ -109417,7 +110329,7 @@ index 4b80cbf..f1145be 100644
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1357,13 +1584,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -109440,7 +110352,7 @@ index 4b80cbf..f1145be 100644
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1378,7 +1610,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (addr > len) {
  		vma = find_vma(mm, addr-len);
@@ -109449,7 +110361,7 @@ index 4b80cbf..f1145be 100644
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  	}
-@@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1395,7 +1627,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
@@ -109458,7 +110370,7 @@ index 4b80cbf..f1145be 100644
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  
-@@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1404,8 +1636,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -109469,7 +110381,7 @@ index 4b80cbf..f1145be 100644
  
  bottomup:
  	/*
-@@ -1414,13 +1627,21 @@ bottomup:
+@@ -1414,13 +1646,21 @@ bottomup:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -109493,7 +110405,7 @@ index 4b80cbf..f1145be 100644
  	mm->cached_hole_size = ~0UL;
  
  	return addr;
-@@ -1429,6 +1650,12 @@ bottomup:
+@@ -1429,6 +1669,12 @@ bottomup:
  
  void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  {
@@ -109506,7 +110418,7 @@ index 4b80cbf..f1145be 100644
  	/*
  	 * Is this a new hole at the highest possible address?
  	 */
-@@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1436,8 +1682,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  		mm->free_area_cache = addr;
  
  	/* dont allow allocations above current base */
@@ -109518,7 +110430,7 @@ index 4b80cbf..f1145be 100644
  }
  
  unsigned long
-@@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1510,40 +1758,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  
  EXPORT_SYMBOL(find_vma);
  
@@ -109593,7 +110505,7 @@ index 4b80cbf..f1145be 100644
  
  /*
   * Verify that the stack growth is acceptable and
-@@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1561,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		return -ENOMEM;
  
  	/* Stack limit test */
@@ -109601,7 +110513,7 @@ index 4b80cbf..f1145be 100644
  	if (size > rlim[RLIMIT_STACK].rlim_cur)
  		return -ENOMEM;
  
-@@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1570,6 +1828,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		unsigned long limit;
  		locked = mm->locked_vm + grow;
  		limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
@@ -109609,7 +110521,7 @@ index 4b80cbf..f1145be 100644
  		if (locked > limit && !capable(CAP_IPC_LOCK))
  			return -ENOMEM;
  	}
-@@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1600,37 +1859,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
   * vma is the last one with address > vma->vm_end.  Have to extend vma.
   */
@@ -109667,7 +110579,7 @@ index 4b80cbf..f1145be 100644
  		unsigned long size, grow;
  
  		size = address - vma->vm_start;
-@@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1643,6 +1913,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  				vma->vm_end = address;
  		}
  	}
@@ -109676,7 +110588,7 @@ index 4b80cbf..f1145be 100644
  	anon_vma_unlock(vma);
  	return error;
  }
-@@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
+@@ -1655,6 +1927,8 @@ static int expand_downwards(struct vm_area_struct *vma,
  				   unsigned long address)
  {
  	int error;
@@ -109685,7 +110597,7 @@ index 4b80cbf..f1145be 100644
  
  	/*
  	 * We must make sure the anon_vma is allocated
-@@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
+@@ -1668,6 +1942,15 @@ static int expand_downwards(struct vm_area_struct *vma,
  	if (error)
  		return error;
  
@@ -109701,7 +110613,7 @@ index 4b80cbf..f1145be 100644
  	anon_vma_lock(vma);
  
  	/*
-@@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
+@@ -1677,9 +1960,17 @@ static int expand_downwards(struct vm_area_struct *vma,
  	 */
  
  	/* Somebody else might have raced and expanded it already */
@@ -109720,7 +110632,7 @@ index 4b80cbf..f1145be 100644
  		size = vma->vm_end - address;
  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
  
-@@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
+@@ -1689,10 +1980,22 @@ static int expand_downwards(struct vm_area_struct *vma,
  			if (!error) {
  				vma->vm_start = address;
  				vma->vm_pgoff -= grow;
@@ -109743,7 +110655,7 @@ index 4b80cbf..f1145be 100644
  	return error;
  }
  
-@@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1768,6 +2071,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  	do {
  		long nrpages = vma_pages(vma);
  
@@ -109757,7 +110669,7 @@ index 4b80cbf..f1145be 100644
  		mm->total_vm -= nrpages;
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
  		vma = remove_vma(vma);
-@@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1813,6 +2123,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
  	do {
@@ -109774,7 +110686,7 @@ index 4b80cbf..f1145be 100644
  		rb_erase(&vma->vm_rb, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
-@@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1840,10 +2160,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	struct mempolicy *pol;
  	struct vm_area_struct *new;
  
@@ -109800,7 +110712,7 @@ index 4b80cbf..f1145be 100644
  	if (mm->map_count >= sysctl_max_map_count)
  		return -ENOMEM;
  
-@@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1851,6 +2186,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	if (!new)
  		return -ENOMEM;
  
@@ -109817,7 +110729,7 @@ index 4b80cbf..f1145be 100644
  	/* most fields are the same, copy all, and then fixup */
  	*new = *vma;
  
-@@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1861,8 +2206,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  	}
  
@@ -109847,7 +110759,7 @@ index 4b80cbf..f1145be 100644
  		kmem_cache_free(vm_area_cachep, new);
  		return PTR_ERR(pol);
  	}
-@@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1883,6 +2249,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	else
  		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
@@ -109876,7 +110788,7 @@ index 4b80cbf..f1145be 100644
  	return 0;
  }
  
-@@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1891,11 +2279,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
@@ -109907,7 +110819,7 @@ index 4b80cbf..f1145be 100644
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
-@@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -1959,6 +2366,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  	/* Fix up all other VM information */
  	remove_vma_list(mm, vma);
  
@@ -109916,7 +110828,7 @@ index 4b80cbf..f1145be 100644
  	return 0;
  }
  
-@@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -1971,22 +2380,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
  
  	profile_munmap(addr);
  
@@ -109945,7 +110857,7 @@ index 4b80cbf..f1145be 100644
  /*
   *  this is really a simplified "do_mmap".  it only handles
   *  anonymous maps.  eventually we may be able to do some
-@@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2000,6 +2405,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	struct rb_node ** rb_link, * rb_parent;
  	pgoff_t pgoff = addr >> PAGE_SHIFT;
  	int error;
@@ -109953,7 +110865,7 @@ index 4b80cbf..f1145be 100644
  
  	len = PAGE_ALIGN(len);
  	if (!len)
-@@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2011,16 +2417,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -109985,7 +110897,7 @@ index 4b80cbf..f1145be 100644
  		locked += mm->locked_vm;
  		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  		lock_limit >>= PAGE_SHIFT;
-@@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2037,22 +2457,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	/*
  	 * Clear old maps.  this also does some error checking for us
  	 */
@@ -110012,7 +110924,7 @@ index 4b80cbf..f1145be 100644
  		return -ENOMEM;
  
  	/* Can we just expand an old private anonymous mapping? */
-@@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2066,7 +2486,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	 */
  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  	if (!vma) {
@@ -110021,7 +110933,7 @@ index 4b80cbf..f1145be 100644
  		return -ENOMEM;
  	}
  
-@@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2078,11 +2498,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	vma->vm_page_prot = vm_get_page_prot(flags);
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  out:
@@ -110036,7 +110948,7 @@ index 4b80cbf..f1145be 100644
  	return addr;
  }
  
-@@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2129,8 +2550,10 @@ void exit_mmap(struct mm_struct *mm)
  	 * Walk the list again, actually closing and freeing it,
  	 * with preemption enabled, without holding any MM locks.
  	 */
@@ -110048,7 +110960,7 @@ index 4b80cbf..f1145be 100644
  
  	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
  }
-@@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2144,6 +2567,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  	struct vm_area_struct * __vma, * prev;
  	struct rb_node ** rb_link, * rb_parent;
  
@@ -110059,7 +110971,7 @@ index 4b80cbf..f1145be 100644
  	/*
  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  	 * until its first write fault, when page's anon_vma and index
-@@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2166,7 +2593,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  	if ((vma->vm_flags & VM_ACCOUNT) &&
  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
  		return -ENOMEM;
@@ -110082,7 +110994,7 @@ index 4b80cbf..f1145be 100644
  	return 0;
  }
  
-@@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2184,6 +2626,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	struct rb_node **rb_link, *rb_parent;
  	struct mempolicy *pol;
  
@@ -110091,7 +111003,7 @@ index 4b80cbf..f1145be 100644
  	/*
  	 * If anonymous vma has not yet been faulted, update new pgoff
  	 * to match new location, to increase its chance of merging.
-@@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2227,6 +2671,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	return new_vma;
  }
  
@@ -110127,7 +111039,7 @@ index 4b80cbf..f1145be 100644
  /*
   * Return true if the calling process may expand its vm space by the passed
   * number of pages
-@@ -2238,6 +2692,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2238,6 +2711,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
  
  	lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
  
@@ -110140,7 +111052,7 @@ index 4b80cbf..f1145be 100644
  	if (cur + npages > lim)
  		return 0;
  	return 1;
-@@ -2307,6 +2767,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2307,6 +2786,22 @@ int install_special_mapping(struct mm_struct *mm,
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  
@@ -110689,7 +111601,7 @@ index 3e0005b..1d659a8 100644
  		return -ENOMEM;
  
 diff --git a/mm/slab.c b/mm/slab.c
-index c8d466a..1ff8750 100644
+index c8d466a..a2cb875 100644
 --- a/mm/slab.c
 +++ b/mm/slab.c
 @@ -174,7 +174,7 @@
@@ -110873,7 +111785,7 @@ index c8d466a..1ff8750 100644
 +}
 +
 +#ifdef CONFIG_PAX_USERCOPY
-+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++const char *check_heap_object(const void *ptr, unsigned long n)
 +{
 +	struct page *page;
 +	struct kmem_cache *cachep;
@@ -110911,7 +111823,7 @@ index c8d466a..1ff8750 100644
   * ksize - get the actual amount of memory allocated for a given object
   * @objp: Pointer to the object
 diff --git a/mm/slob.c b/mm/slob.c
-index 837ebd6..1f9a479 100644
+index 837ebd6..1ef41ba 100644
 --- a/mm/slob.c
 +++ b/mm/slob.c
 @@ -29,7 +29,7 @@
@@ -111091,7 +112003,7 @@ index 837ebd6..1f9a479 100644
 +}
 +
 +#ifdef CONFIG_PAX_USERCOPY
-+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++const char *check_heap_object(const void *ptr, unsigned long n)
 +{
 +	struct slob_page *sp;
 +	const slob_t *free;
@@ -111264,7 +112176,7 @@ index 837ebd6..1f9a479 100644
  EXPORT_SYMBOL(kmem_cache_free);
  
 diff --git a/mm/slub.c b/mm/slub.c
-index 4996fc7..238bc88 100644
+index 4996fc7..2ecab0a 100644
 --- a/mm/slub.c
 +++ b/mm/slub.c
 @@ -201,7 +201,7 @@ struct track {
@@ -111390,7 +112302,7 @@ index 4996fc7..238bc88 100644
 +}
 +
 +#ifdef CONFIG_PAX_USERCOPY
-+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
++const char *check_heap_object(const void *ptr, unsigned long n)
 +{
 +	struct page *page;
 +	struct kmem_cache *s;
@@ -111934,9 +112846,18 @@ index 42d76c6..5643dc4 100644
  	return 0;
  }
 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
-index a29c5ab..6143f20 100644
+index a29c5ab..d841655 100644
 --- a/net/8021q/vlan.c
 +++ b/net/8021q/vlan.c
+@@ -547,7 +547,7 @@ out:
+ 	return NOTIFY_DONE;
+ }
+ 
+-static struct notifier_block vlan_notifier_block __read_mostly = {
++static struct notifier_block vlan_notifier_block = {
+ 	.notifier_call = vlan_device_event,
+ };
+ 
 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
  		err = -EPERM;
  		if (!capable(CAP_NET_ADMIN))
@@ -112025,19 +112946,6 @@ index 9d14d19..5c145f3 100644
  
  /*
   * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
-diff --git a/net/atm/mpc.h b/net/atm/mpc.h
-index 0919a88..a23d54e 100644
---- a/net/atm/mpc.h
-+++ b/net/atm/mpc.h
-@@ -33,7 +33,7 @@ struct mpoa_client {
- 	struct mpc_parameters parameters;  /* parameters for this client    */
- 
- 	const struct net_device_ops *old_ops;
--	struct net_device_ops new_ops;
-+	net_device_ops_no_const new_ops;
- };
- 
- 
 diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
 index 4504a4b..1733f1e 100644
 --- a/net/atm/mpoa_caches.c
@@ -112237,6 +113145,19 @@ index d73d47f..72df42a 100644
  	if (cmd == EBT_SO_GET_ENTRIES) {
  		entries_size = t->private->entries_size;
  		nentries = t->private->nentries;
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 6068321..a460dd0 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -849,7 +849,7 @@ static struct net_proto_family can_family_ops __read_mostly = {
+ };
+ 
+ /* notifier block for netdevice event */
+-static struct notifier_block can_netdev_notifier __read_mostly = {
++static struct notifier_block can_netdev_notifier = {
+ 	.notifier_call = can_notifier,
+ };
+ 
 diff --git a/net/can/bcm.c b/net/can/bcm.c
 index 2ffd2e0..e002f92 100644
 --- a/net/can/bcm.c
@@ -112504,18 +113425,38 @@ index 9601587..8c4824e 100644
  			if (!fle->object || fle->genid == genid)
  				continue;
 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
-index 9d70042..9adcdc5 100644
+index 9d70042..ef608bd 100644
 --- a/net/core/rtnetlink.c
 +++ b/net/core/rtnetlink.c
-@@ -57,7 +57,7 @@ struct rtnl_link
- {
- 	rtnl_doit_func		doit;
- 	rtnl_dumpit_func	dumpit;
--};
-+} __no_const;
+@@ -160,11 +160,13 @@ int __rtnl_register(int protocol, int msgtype,
+ 		rtnl_msg_handlers[protocol] = tab;
+ 	}
  
- static DEFINE_MUTEX(rtnl_mutex);
++	pax_open_kernel();
+ 	if (doit)
+-		tab[msgindex].doit = doit;
++		*(void **)&tab[msgindex].doit = doit;
+ 
+ 	if (dumpit)
+-		tab[msgindex].dumpit = dumpit;
++		*(void **)&tab[msgindex].dumpit = dumpit;
++	pax_close_kernel();
+ 
+ 	return 0;
+ }
+@@ -208,8 +210,10 @@ int rtnl_unregister(int protocol, int msgtype)
+ 	if (rtnl_msg_handlers[protocol] == NULL)
+ 		return -ENOENT;
+ 
+-	rtnl_msg_handlers[protocol][msgindex].doit = NULL;
+-	rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
++	pax_open_kernel();
++	*(void **)&rtnl_msg_handlers[protocol][msgindex].doit = NULL;
++	*(void **)&rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
++	pax_close_kernel();
  
+ 	return 0;
+ }
 diff --git a/net/core/scm.c b/net/core/scm.c
 index d98eafc..1a190a9 100644
 --- a/net/core/scm.c
@@ -115749,6 +116690,19 @@ index acf7c4d..b29621d 100644
  errout:
  	list_for_each_safe(pos, temp, &msg->chunks) {
  		list_del_init(pos);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index bb280e6..747720f 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -1013,7 +1013,7 @@ void sctp_v6_pf_init(void)
+ 
+ void sctp_v6_pf_exit(void)
+ {
+-	list_del(&sctp_af_inet6.list);
++	pax_list_del((struct list_head *)&sctp_af_inet6.list);
+ }
+ 
+ /* Initialize IPv6 support and register with socket layer.  */
 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
 index d093cbf..9fc36fc 100644
 --- a/net/sctp/proc.c
@@ -115781,6 +116735,32 @@ index d093cbf..9fc36fc 100644
  			   assoc->state, hash,
  			   assoc->assoc_id,
  			   assoc->sndbuf_used,
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 619f965..bed845a 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -727,8 +727,10 @@ int sctp_register_af(struct sctp_af *af)
+ 		return 0;
+ 	}
+ 
+-	INIT_LIST_HEAD(&af->list);
+-	list_add_tail(&af->list, &sctp_address_families);
++	pax_open_kernel();
++	INIT_LIST_HEAD((struct list_head *)&af->list);
++	pax_close_kernel();
++	pax_list_add_tail((struct list_head *)&af->list, &sctp_address_families);
+ 	return 1;
+ }
+ 
+@@ -1015,7 +1017,7 @@ static void sctp_v4_pf_init(void)
+ 
+ static void sctp_v4_pf_exit(void)
+ {
+-	list_del(&sctp_af_inet.list);
++	pax_list_del((struct list_head *)&sctp_af_inet.list);
+ }
+ 
+ static int sctp_v4_protosw_init(void)
 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
 index 1f9843e..9cd0edd 100644
 --- a/net/sctp/socket.c
@@ -116225,6 +117205,39 @@ index 0b15d72..7934fbb 100644
  		int mode = (table->mode >> 6) & 7;
  		return (mode << 6) | (mode << 3) | mode;
  	}
+diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
+index 524ba56..8f2f836 100644
+--- a/net/tipc/eth_media.c
++++ b/net/tipc/eth_media.c
+@@ -60,7 +60,6 @@ struct eth_bearer {
+ 
+ static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+ static int eth_started = 0;
+-static struct notifier_block notifier;
+ 
+ /**
+  * send_msg - send a TIPC message out over an Ethernet interface
+@@ -258,6 +257,11 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
+  * with OS for notifications about device state changes.
+  */
+ 
++static struct notifier_block notifier = {
++	.notifier_call = &recv_notification,
++	.priority = 0,
++};
++
+ int tipc_eth_media_start(void)
+ {
+ 	struct tipc_media_addr bcast_addr;
+@@ -278,8 +282,6 @@ int tipc_eth_media_start(void)
+ 	if (res)
+ 		return res;
+ 
+-	notifier.notifier_call = &recv_notification;
+-	notifier.priority = 0;
+ 	res = register_netdevice_notifier(&notifier);
+ 	if (!res)
+ 		eth_started = 1;
 diff --git a/net/tipc/link.c b/net/tipc/link.c
 index dd4c18b..f40d38d 100644
 --- a/net/tipc/link.c
@@ -116322,19 +117335,6 @@ index db8d51a..608692d 100644
  			atomic_read(&s->sk_refcnt),
  			0,
  			s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
-diff --git a/net/wireless/core.h b/net/wireless/core.h
-index 376798f..109a61f 100644
---- a/net/wireless/core.h
-+++ b/net/wireless/core.h
-@@ -27,7 +27,7 @@ struct cfg80211_registered_device {
- 	struct mutex mtx;
- 
- 	/* rfkill support */
--	struct rfkill_ops rfkill_ops;
-+	rfkill_ops_no_const rfkill_ops;
- 	struct rfkill *rfkill;
- 	struct work_struct rfkill_sync;
- 
 diff --git a/net/wireless/wext.c b/net/wireless/wext.c
 index a2e4c60..0979cbe 100644
 --- a/net/wireless/wext.c
@@ -116467,10 +117467,21 @@ index cb81ca3..e15d49a 100644
  					goto restart;
  				}
 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
-index f2f7c63..bc36b3d 100644
+index f2f7c63..9e0e8cf 100644
 --- a/net/xfrm/xfrm_state.c
 +++ b/net/xfrm/xfrm_state.c
-@@ -2040,8 +2040,10 @@ int xfrm_init_state(struct xfrm_state *x)
+@@ -281,7 +281,9 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
+ 	if (!try_module_get(afinfo->owner))
+ 		goto out;
+ 
+-	mode->afinfo = afinfo;
++	pax_open_kernel();
++	*(void **)&mode->afinfo = afinfo;
++	pax_close_kernel();
+ 	modemap[mode->encap] = mode;
+ 	err = 0;
+ 
+@@ -2040,8 +2042,10 @@ int xfrm_init_state(struct xfrm_state *x)
  		goto error;
  
  	x->outer_mode = xfrm_get_mode(x->props.mode, family);
@@ -116998,7 +118009,7 @@ index d52f7a0..b66cdd9 100755
  		rm -f tags
  		xtags ctags
 diff --git a/security/Kconfig b/security/Kconfig
-index fb363cd..cce5e7f 100644
+index fb363cd..4cf6d28 100644
 --- a/security/Kconfig
 +++ b/security/Kconfig
 @@ -4,6 +4,890 @@
@@ -117820,7 +118831,7 @@ index fb363cd..cce5e7f 100644
 +config PAX_USERCOPY
 +	bool "Harden heap object copies between kernel and userland"
 +	default y if GRKERNSEC_CONFIG_AUTO
-+	depends on X86 || PPC || SPARC || ARM
++	depends on ARM || IA64 || PPC || SPARC || X86
 +	depends on GRKERNSEC && (SLAB || SLUB || SLOB)
 +	select PAX_USERCOPY_SLABS
 +	help
@@ -118568,19 +119579,6 @@ index 60158e2..0a0cc1a 100644
  	int mode[PORTMAN_NUM_INPUT_PORTS];
  	struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
  };
-diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
-index 02f79d2..8691d43 100644
---- a/sound/isa/cmi8330.c
-+++ b/sound/isa/cmi8330.c
-@@ -173,7 +173,7 @@ struct snd_cmi8330 {
- 
- 	struct snd_pcm *pcm;
- 	struct snd_cmi8330_stream {
--		struct snd_pcm_ops ops;
-+		snd_pcm_ops_no_const ops;
- 		snd_pcm_open_callback_t open;
- 		void *private_data; /* sb or wss */
- 	} streams[2];
 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
 index 733b014..56ce96f 100644
 --- a/sound/oss/sb_audio.c
@@ -118937,45 +119935,6 @@ index eeb2e23..82bf625 100644
  	.build_specific	= patch_ucb1400_specific,
  };
  
-diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
-index 99552fb..4dcc2c5 100644
---- a/sound/pci/hda/hda_codec.h
-+++ b/sound/pci/hda/hda_codec.h
-@@ -580,7 +580,7 @@ struct hda_bus_ops {
- 	/* notify power-up/down from codec to controller */
- 	void (*pm_notify)(struct hda_bus *bus);
- #endif
--};
-+} __no_const;
- 
- /* template to pass to the bus constructor */
- struct hda_bus_template {
-@@ -675,6 +675,7 @@ struct hda_codec_ops {
- 	int (*check_power_status)(struct hda_codec *codec, hda_nid_t nid);
- #endif
- };
-+typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
- 
- /* record for amp information cache */
- struct hda_cache_head {
-@@ -705,7 +706,7 @@ struct hda_pcm_ops {
- 		       struct snd_pcm_substream *substream);
- 	int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
- 		       struct snd_pcm_substream *substream);
--};
-+} __no_const;
- 
- /* PCM information for each substream */
- struct hda_pcm_stream {
-@@ -760,7 +761,7 @@ struct hda_codec {
- 	const char *modelname;	/* model name for preset */
- 
- 	/* set by patch */
--	struct hda_codec_ops patch_ops;
-+	hda_codec_ops_no_const patch_ops;
- 
- 	/* PCM to create, set by patch_ops.build_pcms callback */
- 	unsigned int num_pcms;
 diff --git a/sound/pci/hda/patch_atihdmi.c b/sound/pci/hda/patch_atihdmi.c
 index fb684f0..2b11cea 100644
 --- a/sound/pci/hda/patch_atihdmi.c
@@ -119061,28 +120020,6 @@ index 2fcd70d..a143eaf 100644
  	spec->num_pins = STAC92HD71BXX_NUM_PINS;
  	switch (codec->vendor_id) {
  	case 0x111d76b6:
-diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
-index d063149..01599a4 100644
---- a/sound/pci/ice1712/ice1712.h
-+++ b/sound/pci/ice1712/ice1712.h
-@@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
- 	unsigned int mask_flags;	/* total mask bits */
- 	struct snd_akm4xxx_ops {
- 		void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
--	} ops;
-+	} __no_const ops;
- };
- 
- struct snd_ice1712_spdif {
-@@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
- 		int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
- 		void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
- 		int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
--	} ops;
-+	} __no_const ops;
- };
- 
- 
 diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
 index 9e7d12e..3e3bc64 100644
 --- a/sound/pci/intel8x0m.c
@@ -119141,18 +120078,32 @@ index 5518371..45cf7ac 100644
  	chip->pci = pci;
  	chip->irq = -1;
 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
-index 0a1b2f6..776bb19 100644
+index 0a1b2f6..d999b21 100644
 --- a/sound/soc/soc-core.c
 +++ b/sound/soc/soc-core.c
-@@ -609,7 +609,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
- }
+@@ -1107,13 +1107,15 @@ static int soc_new_pcm(struct snd_soc_device *socdev,
+ 
+ 	dai_link->pcm = pcm;
+ 	pcm->private_data = rtd;
+-	soc_pcm_ops.mmap = platform->pcm_ops->mmap;
+-	soc_pcm_ops.pointer = platform->pcm_ops->pointer;
+-	soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
+-	soc_pcm_ops.copy = platform->pcm_ops->copy;
+-	soc_pcm_ops.silence = platform->pcm_ops->silence;
+-	soc_pcm_ops.ack = platform->pcm_ops->ack;
+-	soc_pcm_ops.page = platform->pcm_ops->page;
++	pax_open_kernel();
++	*(void **)&soc_pcm_ops.mmap = platform->pcm_ops->mmap;
++	*(void **)&soc_pcm_ops.pointer = platform->pcm_ops->pointer;
++	*(void **)&soc_pcm_ops.ioctl = platform->pcm_ops->ioctl;
++	*(void **)&soc_pcm_ops.copy = platform->pcm_ops->copy;
++	*(void **)&soc_pcm_ops.silence = platform->pcm_ops->silence;
++	*(void **)&soc_pcm_ops.ack = platform->pcm_ops->ack;
++	*(void **)&soc_pcm_ops.page = platform->pcm_ops->page;
++	pax_close_kernel();
  
- /* ASoC PCM operations */
--static struct snd_pcm_ops soc_pcm_ops = {
-+static snd_pcm_ops_no_const soc_pcm_ops = {
- 	.open		= soc_pcm_open,
- 	.close		= soc_codec_close,
- 	.hw_params	= soc_pcm_hw_params,
+ 	if (playback)
+ 		snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &soc_pcm_ops);
 diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
 index 79633ea..9732e90 100644
 --- a/sound/usb/usbaudio.c
@@ -119607,13 +120558,13 @@ index 0000000..846aeb0
 +}
 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
 new file mode 100644
-index 0000000..92ed719
+index 0000000..1742271
 --- /dev/null
 +++ b/tools/gcc/constify_plugin.c
-@@ -0,0 +1,331 @@
+@@ -0,0 +1,349 @@
 +/*
 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
-+ * Copyright 2011 by PaX Team <pageexec@freemail.hu>
++ * Copyright 2011-2013 by PaX Team <pageexec@freemail.hu>
 + * Licensed under the GPL v2, or (at your option) v3
 + *
 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
@@ -119651,10 +120602,16 @@ index 0000000..92ed719
 +int plugin_is_GPL_compatible;
 +
 +static struct plugin_info const_plugin_info = {
-+	.version	= "201205300030",
++	.version	= "201301150230",
 +	.help		= "no-constify\tturn off constification\n",
 +};
 +
++static tree get_field_type(tree field)
++{
++	return strip_array_types(TREE_TYPE(field));
++}
++
++static bool walk_struct(tree node);
 +static void deconstify_tree(tree node);
 +
 +static void deconstify_type(tree type)
@@ -119662,14 +120619,17 @@ index 0000000..92ed719
 +	tree field;
 +
 +	for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
-+		tree type = TREE_TYPE(field);
++		tree fieldtype = get_field_type(field);
 +
-+		if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
++		if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
 +			continue;
-+		if (!TYPE_READONLY(type))
++		if (!TYPE_READONLY(fieldtype))
++			continue;
++		if (!walk_struct(fieldtype))
 +			continue;
 +
 +		deconstify_tree(field);
++		TREE_READONLY(field) = 0;
 +	}
 +	TYPE_READONLY(type) = 0;
 +	C_TYPE_FIELDS_READONLY(type) = 0;
@@ -119679,8 +120639,14 @@ index 0000000..92ed719
 +{
 +	tree old_type, new_type, field;
 +
++//	TREE_READONLY(node) = 0;
 +	old_type = TREE_TYPE(node);
++	while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
++		node = old_type;
++		old_type = TREE_TYPE(old_type);
++	}
 +
++	gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
 +	gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
 +
 +	new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
@@ -119690,7 +120656,6 @@ index 0000000..92ed719
 +
 +	deconstify_type(new_type);
 +
-+	TREE_READONLY(node) = 0;
 +	TREE_TYPE(node) = new_type;
 +}
 +
@@ -119800,7 +120765,7 @@ index 0000000..92ed719
 +
 +static bool is_fptr(tree field)
 +{
-+	tree ptr = TREE_TYPE(field);
++	tree ptr = get_field_type(field);
 +
 +	if (TREE_CODE(ptr) != POINTER_TYPE)
 +		return false;
@@ -119815,6 +120780,9 @@ index 0000000..92ed719
 +	if (TYPE_FIELDS(node) == NULL_TREE)
 +		return false;
 +
++	if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node)))
++		return true;
++
 +	if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
 +		gcc_assert(!TYPE_READONLY(node));
 +		deconstify_type(node);
@@ -119822,7 +120790,7 @@ index 0000000..92ed719
 +	}
 +
 +	for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
-+		tree type = TREE_TYPE(field);
++		tree type = get_field_type(field);
 +		enum tree_code code = TREE_CODE(type);
 +
 +		if (node == type)
@@ -119848,30 +120816,13 @@ index 0000000..92ed719
 +
 +	if (walk_struct(type))
 +		constify_type(type);
++	else
++		deconstify_type(type);
 +}
 +
-+static unsigned int check_local_variables(void);
-+
-+struct gimple_opt_pass pass_local_variable = {
-+	{
-+		.type			= GIMPLE_PASS,
-+		.name			= "check_local_variables",
-+		.gate			= NULL,
-+		.execute		= check_local_variables,
-+		.sub			= NULL,
-+		.next			= NULL,
-+		.static_pass_number	= 0,
-+		.tv_id			= TV_NONE,
-+		.properties_required	= 0,
-+		.properties_provided	= 0,
-+		.properties_destroyed	= 0,
-+		.todo_flags_start	= 0,
-+		.todo_flags_finish	= 0
-+	}
-+};
-+
 +static unsigned int check_local_variables(void)
 +{
++	unsigned int ret = 0;
 +	tree var;
 +	referenced_var_iterator rvi;
 +
@@ -119899,12 +120850,30 @@ index 0000000..92ed719
 +
 +		if (walk_struct(type)) {
 +			error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
-+			return 1;
++			ret = 1;
 +		}
 +	}
-+	return 0;
++	return ret;
 +}
 +
++struct gimple_opt_pass pass_local_variable = {
++	{
++		.type			= GIMPLE_PASS,
++		.name			= "check_local_variables",
++		.gate			= NULL,
++		.execute		= check_local_variables,
++		.sub			= NULL,
++		.next			= NULL,
++		.static_pass_number	= 0,
++		.tv_id			= TV_NONE,
++		.properties_required	= 0,
++		.properties_provided	= 0,
++		.properties_destroyed	= 0,
++		.todo_flags_start	= 0,
++		.todo_flags_finish	= 0
++	}
++};
++
 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
 +{
 +	const char * const plugin_name = plugin_info->base_name;
@@ -119915,9 +120884,9 @@ index 0000000..92ed719
 +
 +	struct register_pass_info local_variable_pass_info = {
 +		.pass				= &pass_local_variable.pass,
-+		.reference_pass_name		= "*referenced_vars",
++		.reference_pass_name		= "ssa",
 +		.ref_pass_instance_number	= 1,
-+		.pos_op				= PASS_POS_INSERT_AFTER
++		.pos_op				= PASS_POS_INSERT_BEFORE
 +	};
 +
 +	if (!plugin_default_version_check(version, &gcc_version)) {
@@ -123527,10 +124496,10 @@ index 0000000..7cfdcc5
 +atyfb_setup_generic_49151 atyfb_setup_generic 3 49151 NULL
 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
 new file mode 100644
-index 0000000..6387ddc
+index 0000000..792ee60
 --- /dev/null
 +++ b/tools/gcc/size_overflow_plugin.c
-@@ -0,0 +1,1918 @@
+@@ -0,0 +1,1930 @@
 +/*
 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
 + * Licensed under the GPL v2, or (at your option) v3
@@ -123583,6 +124552,8 @@ index 0000000..6387ddc
 +	MARKED_NO, MARKED_YES, MARKED_NOT_INTENTIONAL
 +};
 +
++static unsigned int call_count = 0;
++
 +#define __unused __attribute__((__unused__))
 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
@@ -123610,10 +124581,8 @@ index 0000000..6387ddc
 +static tree get_size_overflow_type(gimple stmt, const_tree node);
 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
 +
-+static unsigned int call_count=0;
-+
 +static struct plugin_info size_overflow_plugin_info = {
-+	.version	= "20121212beta",
++	.version	= "20130109beta",
 +	.help		= "no-size-overflow\tturn off size overflow checking\n",
 +};
 +
@@ -123900,7 +124869,7 @@ index 0000000..6387ddc
 +		  (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
 +}
 +
-+static int find_arg_number(const_tree arg, tree func)
++static unsigned int find_arg_number(const_tree arg, tree func)
 +{
 +	tree var;
 +	unsigned int argnum = 1;
@@ -124163,8 +125132,8 @@ index 0000000..6387ddc
 +	basic_block first_bb;
 +
 +	first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
-+	if (dom_info_available_p(CDI_DOMINATORS))
-+		set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
++	gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++	set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
 +	return first_bb;
 +}
 +
@@ -124310,6 +125279,10 @@ index 0000000..6387ddc
 +	if (rhs_mode == SImode && lhs_mode == DImode && (TYPE_UNSIGNED(rhs_type) || !TYPE_UNSIGNED(lhs_type)))
 +		return false;
 +
++	// skip lhs check on signed SI -> HI cast or signed SI -> QI cast
++	if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
++		return false;
++
 +	return true;
 +}
 +
@@ -124354,21 +125327,37 @@ index 0000000..6387ddc
 +	return true;
 +}
 +
++static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
++{
++	tree rhs1 = gimple_assign_rhs1(stmt);
++	tree lhs = gimple_get_lhs(stmt);
++	const_tree rhs1_type = TREE_TYPE(rhs1);
++	const_tree lhs_type = TREE_TYPE(lhs);
++
++	if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
++		return create_assign(visited, stmt, lhs, AFTER_STMT);
++
++	return create_assign(visited, stmt, rhs1, AFTER_STMT);
++}
++
 +static tree handle_unary_rhs(struct pointer_set_t *visited, gimple stmt)
 +{
-+	gimple def_stmt;
 +	tree size_overflow_type, lhs = gimple_get_lhs(stmt);
-+	tree new_rhs1, rhs1 = gimple_assign_rhs1(stmt);
++	tree new_rhs1 = NULL_TREE;
++	tree rhs1 = gimple_assign_rhs1(stmt);
 +	const_tree rhs1_type = TREE_TYPE(rhs1);
 +	const_tree lhs_type = TREE_TYPE(lhs);
 +
-+	new_rhs1 = expand(visited, rhs1);
++	if (gimple_plf(stmt, MY_STMT))
++		return lhs;
 +
-+	if (new_rhs1 == NULL_TREE || TREE_CODE(rhs1_type) == POINTER_TYPE)
++	if (TREE_CODE(rhs1_type) == POINTER_TYPE)
 +		return create_assign(visited, stmt, lhs, AFTER_STMT);
 +
-+	if (gimple_plf(stmt, MY_STMT))
-+		return lhs;
++	new_rhs1 = expand(visited, rhs1);
++
++	if (new_rhs1 == NULL_TREE)
++		return create_cast_assign(visited, stmt);
 +
 +	if (gimple_plf(stmt, NO_CAST_CHECK))
 +		return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
@@ -124383,18 +125372,14 @@ index 0000000..6387ddc
 +	if (!gimple_assign_cast_p(stmt) || check_undefined_integer_operation(stmt))
 +		return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
 +
++	if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type))
++		return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
++
 +	size_overflow_type = get_size_overflow_type(stmt, rhs1);
 +	new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
 +
 +	check_size_overflow(stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
 +
-+	rhs1 = gimple_assign_rhs1(stmt);
-+	rhs1_type = TREE_TYPE(rhs1);
-+	if (TYPE_UNSIGNED(rhs1_type) != TYPE_UNSIGNED(lhs_type)) {
-+		def_stmt = get_def_stmt(new_rhs1);
-+		rhs1 = gimple_assign_rhs1(def_stmt);
-+		return create_assign(visited, stmt, rhs1, AFTER_STMT);
-+	}
 +	change_rhs1(stmt, new_rhs1);
 +
 +	if (!check_mode_type(stmt))
@@ -124543,10 +125528,9 @@ index 0000000..6387ddc
 +	make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
 +	make_edge(bb_true, join_bb, EDGE_FALLTHRU);
 +
-+	if (dom_info_available_p(CDI_DOMINATORS)) {
-+		set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
-+		set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
-+	}
++	gcc_assert(dom_info_available_p(CDI_DOMINATORS));
++	set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
++	set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
 +
 +	if (current_loops != NULL) {
 +		gcc_assert(cond_bb->loop_father == join_bb->loop_father);
@@ -124571,9 +125555,11 @@ index 0000000..6387ddc
 +	gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == BOOLEAN_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
 +
 +	type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
-+	type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
++	// typemax (-1) < typemin (0)
++	if (TREE_OVERFLOW(type_max))
++		return;
 +
-+	gcc_assert(!TREE_OVERFLOW(type_max));
++	type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
 +
 +	cast_rhs_type = TREE_TYPE(cast_rhs);
 +	type_max_type = TREE_TYPE(type_max);
@@ -124673,7 +125659,6 @@ index 0000000..6387ddc
 +
 +	rhs1 = gimple_assign_rhs1(def_stmt);
 +	rhs1_def_stmt = get_def_stmt(rhs1);
-+	gcc_assert(gimple_code(rhs1_def_stmt) != GIMPLE_NOP);
 +	if (!gimple_assign_cast_p(rhs1_def_stmt))
 +		return rhs1;
 +
@@ -124867,19 +125852,19 @@ index 0000000..6387ddc
 +
 +	switch (TYPE_MODE(type)) {
 +	case QImode:
-+		new_type = (TYPE_UNSIGNED(type)) ? unsigned_intHI_type_node : intHI_type_node;
++		new_type = intHI_type_node;
 +		break;
 +	case HImode:
-+		new_type = (TYPE_UNSIGNED(type)) ? unsigned_intSI_type_node : intSI_type_node;
++		new_type = intSI_type_node;
 +		break;
 +	case SImode:
-+		new_type = (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
++		new_type = intDI_type_node;
 +		break;
 +	case DImode:
 +		if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
-+			new_type = (TYPE_UNSIGNED(type)) ? unsigned_intDI_type_node : intDI_type_node;
++			new_type = intDI_type_node;
 +		else
-+			new_type = (TYPE_UNSIGNED(type)) ? unsigned_intTI_type_node : intTI_type_node;
++			new_type = intTI_type_node;
 +		break;
 +	default:
 +		debug_tree((tree)node);
@@ -124938,10 +125923,6 @@ index 0000000..6387ddc
 +	if (gimple_plf(def_stmt, MY_STMT))
 +		return lhs;
 +
-+	// skip char type, except PHI (FIXME: only kernel)
-+	if (TYPE_MODE(TREE_TYPE(lhs)) == QImode && gimple_code(def_stmt) != GIMPLE_PHI)
-+		return create_assign(visited, def_stmt, lhs, AFTER_STMT);
-+
 +	if (pointer_set_contains(visited, def_stmt))
 +		return expand_visited(def_stmt);
 +
@@ -125795,10 +126776,24 @@ index 83b3dde..835bee7 100644
                         break;
         }
 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 82b6fdc..c807237 100644
+index 82b6fdc..3a1bf69 100644
 --- a/virt/kvm/kvm_main.c
 +++ b/virt/kvm/kvm_main.c
-@@ -2573,7 +2573,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
+@@ -81,7 +81,12 @@ static cpumask_var_t cpus_hardware_enabled;
+ struct kmem_cache *kvm_vcpu_cache;
+ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
+ 
+-static __read_mostly struct preempt_ops kvm_preempt_ops;
++static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
++static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
++static struct preempt_ops kvm_preempt_ops = {
++	.sched_in = kvm_sched_in,
++	.sched_out = kvm_sched_out,
++};
+ 
+ struct dentry *kvm_debugfs_dir;
+ 
+@@ -2573,7 +2578,7 @@ asmlinkage void kvm_handle_fault_on_reboot(void)
  	if (kvm_rebooting)
  		/* spin while reset goes on */
  		while (true)
@@ -125807,7 +126802,7 @@ index 82b6fdc..c807237 100644
  	/* Fault while not rebooting.  We want the trace. */
  	BUG();
  }
-@@ -2793,7 +2793,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
+@@ -2793,7 +2798,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
  	kvm_arch_vcpu_put(vcpu);
  }
  
@@ -125816,7 +126811,7 @@ index 82b6fdc..c807237 100644
  		  struct module *module)
  {
  	int r;
-@@ -2846,15 +2846,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
+@@ -2846,15 +2851,17 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
  	/* A kmem cache lets us meet the alignment requirements of fx_save. */
  	kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
  					   __alignof__(struct kvm_vcpu),
@@ -125838,3 +126833,13 @@ index 82b6fdc..c807237 100644
  
  	r = misc_register(&kvm_dev);
  	if (r) {
+@@ -2862,9 +2869,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
+ 		goto out_free;
+ 	}
+ 
+-	kvm_preempt_ops.sched_in = kvm_sched_in;
+-	kvm_preempt_ops.sched_out = kvm_sched_out;
+-
+ 	kvm_init_debug();
+ 
+ 	return 0;

diff --git a/2.6.32/4450_grsec-kconfig-default-gids.patch b/2.6.32/4450_grsec-kconfig-default-gids.patch
index 7d4f60c..a8c8fed 100644
--- a/2.6.32/4450_grsec-kconfig-default-gids.patch
+++ b/2.6.32/4450_grsec-kconfig-default-gids.patch
@@ -16,7 +16,7 @@ from shooting themselves in the foot.
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2012-10-13 09:51:35.000000000 -0400
 +++ b/grsecurity/Kconfig	2012-10-13 09:52:32.000000000 -0400
-@@ -521,7 +521,7 @@
+@@ -554,7 +554,7 @@
  config GRKERNSEC_AUDIT_GID
  	int "GID for auditing"
  	depends on GRKERNSEC_AUDIT_GROUP
@@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  
  config GRKERNSEC_EXECLOG
  	bool "Exec logging"
-@@ -741,7 +741,7 @@
+@@ -774,7 +774,7 @@
  config GRKERNSEC_TPE_UNTRUSTED_GID
  	int "GID for TPE-untrusted users"
  	depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *enabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -750,7 +750,7 @@
+@@ -783,7 +783,7 @@
  config GRKERNSEC_TPE_TRUSTED_GID
  	int "GID for TPE-trusted users"
  	depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *disabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -843,7 +843,7 @@
+@@ -876,7 +876,7 @@
  config GRKERNSEC_SOCKET_ALL_GID
  	int "GID to deny all sockets for"
  	depends on GRKERNSEC_SOCKET_ALL
@@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable socket access for. Remember to
  	  add the users you want socket access disabled for to the GID
-@@ -864,7 +864,7 @@
+@@ -897,7 +897,7 @@
  config GRKERNSEC_SOCKET_CLIENT_GID
  	int "GID to deny client sockets for"
  	depends on GRKERNSEC_SOCKET_CLIENT
@@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable client socket access for.
  	  Remember to add the users you want client socket access disabled for to
-@@ -882,7 +882,7 @@
+@@ -915,7 +915,7 @@
  config GRKERNSEC_SOCKET_SERVER_GID
  	int "GID to deny server sockets for"
  	depends on GRKERNSEC_SOCKET_SERVER

diff --git a/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch b/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch
index 43ed69a..583259e 100644
--- a/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/2.6.32/4465_selinux-avc_audit-log-curr_ip.patch
@@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2011-04-17 18:47:02.000000000 -0400
 +++ b/grsecurity/Kconfig	2011-04-17 18:51:15.000000000 -0400
-@@ -941,6 +941,27 @@
+@@ -974,6 +974,27 @@
  menu "Logging Options"
  depends on GRKERNSEC
  

diff --git a/3.2.37/0000_README b/3.2.37/0000_README
index e9758d7..4df42aa 100644
--- a/3.2.37/0000_README
+++ b/3.2.37/0000_README
@@ -66,7 +66,7 @@ Patch:	1036_linux-3.2.37.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.37
 
-Patch:	4420_grsecurity-2.9.1-3.2.37-201301181518.patch
+Patch:	4420_grsecurity-2.9.1-3.2.37-201301230047.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.37/4420_grsecurity-2.9.1-3.2.37-201301181518.patch b/3.2.37/4420_grsecurity-2.9.1-3.2.37-201301230047.patch
similarity index 99%
rename from 3.2.37/4420_grsecurity-2.9.1-3.2.37-201301181518.patch
rename to 3.2.37/4420_grsecurity-2.9.1-3.2.37-201301230047.patch
index 0666a7e..b33e963 100644
--- a/3.2.37/4420_grsecurity-2.9.1-3.2.37-201301181518.patch
+++ b/3.2.37/4420_grsecurity-2.9.1-3.2.37-201301230047.patch
@@ -583,19 +583,31 @@ index 2fd00b7..cfd5069 100644
  
  	for (i = 0; i < n; i++) {
 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index 01e8715..be0e80f 100644
+index 01e8715..05ce5f1 100644
 --- a/arch/alpha/kernel/osf_sys.c
 +++ b/arch/alpha/kernel/osf_sys.c
-@@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+@@ -1138,16 +1138,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
+ 
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+-		         unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++		         unsigned long limit, unsigned long flags)
+ {
+ 	struct vm_area_struct *vma = find_vma(current->mm, addr);
+-
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 	while (1) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (limit - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  		addr = vma->vm_end;
  		vma = vma->vm_next;
-@@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1183,20 +1183,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	   merely specific addresses, but regions of memory -- perhaps
  	   this feature should be incorporated into all ports?  */
  
@@ -604,19 +616,26 @@ index 01e8715..be0e80f 100644
 +#endif
 +
  	if (addr) {
- 		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+-		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++		addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
  		if (addr != (unsigned long) -ENOMEM)
-@@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 			return addr;
  	}
  
  	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
 -	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
 -					 len, limit);
-+	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++	addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
 +
  	if (addr != (unsigned long) -ENOMEM)
  		return addr;
  
+ 	/* Finally, try allocating in low memory.  */
+-	addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++	addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+ 
+ 	return addr;
+ }
 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
 index fadd5f8..904e73a 100644
 --- a/arch/alpha/mm/fault.c
@@ -1494,6 +1513,19 @@ index ca94653..6ac0d56 100644
  
  #ifdef MULTI_USER
  extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
+index 96187ff..7a9b049 100644
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -72,7 +72,7 @@
+  * ARMv7 groups of PSR bits
+  */
+ #define APSR_MASK	0xf80f0000	/* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK	0x01000010	/* ISA state (J, T) mask */
++#define PSR_ISET_MASK	0x01000020	/* ISA state (J, T) mask */
+ #define PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */
+ 
 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
 index 984014b..a6d914f 100644
 --- a/arch/arm/include/asm/system.h
@@ -1968,10 +2000,18 @@ index 4b0bc37..e405631 100644
  		return;
  
 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index 44b628e..623ee2a 100644
+index 44b628e..af78415 100644
 --- a/arch/arm/mm/mmap.c
 +++ b/arch/arm/mm/mmap.c
-@@ -54,6 +54,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -33,6 +33,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	unsigned long start_addr;
+ 	int do_align = 0;
+ 	int aliasing = cache_is_vipt_aliasing();
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/*
+ 	 * We only need to do colour alignment if either the I or D
+@@ -54,6 +55,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (len > TASK_SIZE)
  		return -ENOMEM;
  
@@ -1982,13 +2022,13 @@ index 44b628e..623ee2a 100644
  	if (addr) {
  		if (do_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -61,15 +65,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -61,15 +66,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (len > mm->cached_hole_size) {
@@ -2002,7 +2042,7 @@ index 44b628e..623ee2a 100644
  	}
  	/* 8 bits of randomness in 20 address space bits */
  	if ((current->flags & PF_RANDOMIZE) &&
-@@ -89,14 +92,14 @@ full_search:
+@@ -89,14 +93,14 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -2016,10 +2056,18 @@ index 44b628e..623ee2a 100644
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
+@@ -111,7 +115,6 @@ full_search:
+ 	}
+ }
+ 
+-
+ /*
+  * You really shouldn't be using read() or write() on /dev/mem.  This
+  * might go away in the future.
 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
 index 4c1a363..df311d0 100644
 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -2246,34 +2294,42 @@ index f8e16b2..c73ff79 100644
  };
  
 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
-index 385fd30..6c3d97e 100644
+index 385fd30..3aaf4fe 100644
 --- a/arch/frv/mm/elf-fdpic.c
 +++ b/arch/frv/mm/elf-fdpic.c
-@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ 	struct vm_area_struct *vma;
+ 	unsigned long limit;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	if (len > TASK_SIZE)
+ 		return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(current->mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			goto success;
  	}
  
-@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  			for (; vma; vma = vma->vm_next) {
  				if (addr > limit)
  					break;
 -				if (addr + len <= vma->vm_start)
-+				if (check_heap_stack_gap(vma, addr, len))
++				if (check_heap_stack_gap(vma, addr, len, offset))
  					goto success;
  				addr = vma->vm_end;
  			}
-@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  		for (; vma; vma = vma->vm_next) {
  			if (addr > limit)
  				break;
 -			if (addr + len <= vma->vm_start)
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
  				goto success;
  			addr = vma->vm_end;
  		}
@@ -2609,10 +2665,18 @@ index 24603be..948052d 100644
  		DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
  	}
 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
-index 609d500..7dde2a8 100644
+index 609d500..acd0429 100644
 --- a/arch/ia64/kernel/sys_ia64.c
 +++ b/arch/ia64/kernel/sys_ia64.c
-@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ 	unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (len > RGN_MAP_LIMIT)
+ 		return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
  	if (REGION_NUMBER(addr) == RGN_HPAGE)
  		addr = 0;
  #endif
@@ -2626,7 +2690,7 @@ index 609d500..7dde2a8 100644
  	if (!addr)
  		addr = mm->free_area_cache;
  
-@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -2640,7 +2704,7 @@ index 609d500..7dde2a8 100644
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/* Remember the address where we stopped this search:  */
  			mm->free_area_cache = addr + len;
  			return addr;
@@ -2711,15 +2775,23 @@ index 20b3593..1ce77f0 100644
  	 * If for any reason at all we couldn't handle the fault, make
  	 * sure we exit gracefully rather than endlessly redo the
 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 5ca674b..e0e1b70 100644
+index 5ca674b..127c3cb 100644
 --- a/arch/ia64/mm/hugetlbpage.c
 +++ b/arch/ia64/mm/hugetlbpage.c
-@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ 		unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct *vmm;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+ 
+ 	if (len > RGN_MAP_LIMIT)
+ 		return -ENOMEM;
+@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
  		/* At this point:  (!vmm || addr < vmm->vm_end). */
  		if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
  			return -ENOMEM;
 -		if (!vmm || (addr + len) <= vmm->vm_start)
-+		if (check_heap_stack_gap(vmm, addr, len))
++		if (check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  		addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
  	}
@@ -3135,10 +3207,18 @@ index 937cf33..adb39bb 100644
   * This routine handles page faults.  It determines the address,
   * and the problem, and then passes it off to one of the appropriate
 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
-index 302d779..7d35bf8 100644
+index 302d779..ad1772c 100644
 --- a/arch/mips/mm/mmap.c
 +++ b/arch/mips/mm/mmap.c
-@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ 	struct vm_area_struct *vma;
+ 	unsigned long addr = addr0;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (unlikely(len > TASK_SIZE))
+ 		return -ENOMEM;
+@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  		do_color_align = 1;
  
  	/* requesting a specific address */
@@ -3150,44 +3230,44 @@ index 302d779..7d35bf8 100644
  	if (addr) {
  		if (do_color_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  	}
  
-@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  			/* At this point:  (!vma || addr < vma->vm_end). */
  			if (TASK_SIZE - len < addr)
  				return -ENOMEM;
 -			if (!vma || addr + len <= vma->vm_start)
-+			if (check_heap_stack_gap(vmm, addr, len))
++			if (check_heap_stack_gap(vmm, addr, len, offset))
  				return addr;
  			addr = vma->vm_end;
  			if (do_color_align)
-@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  		/* make sure it can fit in the remaining address space */
  		if (likely(addr > len)) {
  			vma = find_vma(mm, addr - len);
 -			if (!vma || addr <= vma->vm_start) {
-+			if (check_heap_stack_gap(vmm, addr - len, len))
++			if (check_heap_stack_gap(vmm, addr - len, len, offset))
  				/* cache the address as a hint for next time */
  				return mm->free_area_cache = addr - len;
  			}
-@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -165,7 +170,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  			 * return with success:
  			 */
  			vma = find_vma(mm, addr);
 -			if (likely(!vma || addr + len <= vma->vm_start)) {
-+			if (check_heap_stack_gap(vmm, addr, len)) {
++			if (check_heap_stack_gap(vmm, addr, len, offset)) {
  				/* cache the address as a hint for next time */
  				return mm->free_area_cache = addr;
  			}
-@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -242,30 +247,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
  }
@@ -3512,28 +3592,56 @@ index 5e34ccf..672bc9c 100644
  	DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
  	       me->arch.unwind_section, table, end, gp);
 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index 7ea75d1..7b64ef5 100644
+index 7ea75d1..38ca97d 100644
 --- a/arch/parisc/kernel/sys_parisc.c
 +++ b/arch/parisc/kernel/sys_parisc.c
-@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+@@ -33,9 +33,11 @@
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
+ 
+-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
++				       unsigned long flags)
+ {
+ 	struct vm_area_struct *vma;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	addr = PAGE_ALIGN(addr);
+ 
+@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  		addr = vma->vm_end;
  	}
-@@ -81,7 +81,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
+ 	return offset & 0x3FF000;
+ }
+ 
+-static unsigned long get_shared_area(struct address_space *mapping,
+-		unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
++		unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct *vma;
+ 	int offset = mapping ? get_offset(mapping) : 0;
++	unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+ 
+@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, rand_offset))
  			return addr;
  		addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
  		if (addr < vma->vm_end) /* handle wraparound */
-@@ -100,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  	if (!addr)
@@ -3541,7 +3649,17 @@ index 7ea75d1..7b64ef5 100644
 +		addr = current->mm->mmap_base;
  
  	if (filp) {
- 		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+-		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
++		addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
+ 	} else if(flags & MAP_SHARED) {
+-		addr = get_shared_area(NULL, addr, len, pgoff);
++		addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
+ 	} else {
+-		addr = get_unshared_area(addr, len);
++		addr = get_unshared_area(filp, addr, len, flags);
+ 	}
+ 	return addr;
+ }
 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
 index f19e660..414fe24 100644
 --- a/arch/parisc/kernel/traps.c
@@ -4684,7 +4802,7 @@ index 5a783d8..c23e14b 100644
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
-index 73709f7..6b90313 100644
+index 73709f7..63db0f7 100644
 --- a/arch/powerpc/mm/slice.c
 +++ b/arch/powerpc/mm/slice.c
 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
@@ -4692,7 +4810,7 @@ index 73709f7..6b90313 100644
  		return 0;
  	vma = find_vma(mm, addr);
 -	return (!vma || (addr + len) <= vma->vm_start);
-+	return check_heap_stack_gap(vma, addr, len);
++	return check_heap_stack_gap(vma, addr, len, 0);
  }
  
  static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
@@ -4701,7 +4819,7 @@ index 73709f7..6b90313 100644
  			continue;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, 0)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
@@ -4728,7 +4846,7 @@ index 73709f7..6b90313 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || (addr + len) <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, 0)) {
  			/* remember the address as a hint for next time */
  			if (use_cache)
  				mm->free_area_cache = addr;
@@ -4737,7 +4855,7 @@ index 73709f7..6b90313 100644
  
  		/* try just below the current vma->vm_start */
 -		addr = vma->vm_start;
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, 0);
  	}
  
  	/*
@@ -5112,48 +5230,64 @@ index ef9e555..331bd29 100644
  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
  
 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
-index afeb710..d1d1289 100644
+index afeb710..e8366ef 100644
 --- a/arch/sh/mm/mmap.c
 +++ b/arch/sh/mm/mmap.c
-@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
+ 	int do_colour_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -106,7 +105,7 @@ full_search:
+@@ -106,7 +106,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long addr = addr0;
+ 	int do_colour_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -5172,11 +5306,11 @@ index afeb710..d1d1289 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -5184,7 +5318,7 @@ index afeb710..d1d1289 100644
 -		if (do_colour_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
@@ -5990,10 +6124,18 @@ index 96ee50a..68ce124 100644
  	if (unlikely(current->audit_context)) {
  		unsigned long tstate = regs->tstate;
 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
-index 42b282f..28ce9f2 100644
+index 42b282f..89c1f61 100644
 --- a/arch/sparc/kernel/sys_sparc_32.c
 +++ b/arch/sparc/kernel/sys_sparc_32.c
-@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
+ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct * vmm;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -56,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (ARCH_SUN4C && len > 0x20000000)
  		return -ENOMEM;
  	if (!addr)
@@ -6002,20 +6144,26 @@ index 42b282f..28ce9f2 100644
  
  	if (flags & MAP_SHARED)
  		addr = COLOUR_ALIGN(addr);
-@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -71,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  		}
  		if (TASK_SIZE - PAGE_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vmm || addr + len <= vmm->vm_start)
-+		if (check_heap_stack_gap(vmm, addr, len))
++		if (check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  		addr = vmm->vm_end;
  		if (flags & MAP_SHARED)
 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index 5e4252b..cbc22e8 100644
+index 5e4252b..dd8882a 100644
 --- a/arch/sparc/kernel/sys_sparc_64.c
 +++ b/arch/sparc/kernel/sys_sparc_64.c
-@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -119,12 +119,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ 	unsigned long task_size = TASK_SIZE;
+ 	unsigned long start_addr;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
  		/* We do not accept a shared mapping if it would violate
  		 * cache aliasing constraints.
  		 */
@@ -6024,7 +6172,7 @@ index 5e4252b..cbc22e8 100644
  		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  			return -EINVAL;
  		return addr;
-@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -139,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (filp || (flags & MAP_SHARED))
  		do_color_align = 1;
  
@@ -6035,13 +6183,13 @@ index 5e4252b..cbc22e8 100644
  	if (addr) {
  		if (do_color_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -146,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
@@ -6054,7 +6202,7 @@ index 5e4252b..cbc22e8 100644
  	        mm->cached_hole_size = 0;
  	}
  
-@@ -174,14 +177,14 @@ full_search:
+@@ -174,14 +178,14 @@ full_search:
  			vma = find_vma(mm, VA_EXCLUDE_END);
  		}
  		if (unlikely(task_size < addr)) {
@@ -6068,11 +6216,19 @@ index 5e4252b..cbc22e8 100644
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -207,6 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	unsigned long task_size = STACK_TOP32;
+ 	unsigned long addr = addr0;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/* This should only ever run for 32-bit processes.  */
+ 	BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -215,7 +220,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		/* We do not accept a shared mapping if it would violate
  		 * cache aliasing constraints.
  		 */
@@ -6081,26 +6237,26 @@ index 5e4252b..cbc22e8 100644
  		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  			return -EINVAL;
  		return addr;
-@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -236,8 +241,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -258,7 +262,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -267,18 +271,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -6119,11 +6275,11 @@ index 5e4252b..cbc22e8 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -288,10 +292,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -6131,12 +6287,12 @@ index 5e4252b..cbc22e8 100644
 -		if (do_color_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -390,6 +392,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  	    gap == RLIM_INFINITY ||
  	    sysctl_legacy_va_layout) {
  		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -6149,7 +6305,7 @@ index 5e4252b..cbc22e8 100644
  		mm->get_unmapped_area = arch_get_unmapped_area;
  		mm->unmap_area = arch_unmap_area;
  	} else {
-@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -402,6 +410,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  			gap = (task_size / 6 * 5);
  
  		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -7514,28 +7670,44 @@ index 504c062..a383267 100644
  	 * load/store/atomic was a write or not, it only says that there
  	 * was no match.  So in such a case we (carefully) read the
 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index 07e1453..0a7d9e9 100644
+index 07e1453..6364e54 100644
 --- a/arch/sparc/mm/hugetlbpage.c
 +++ b/arch/sparc/mm/hugetlbpage.c
-@@ -67,7 +67,7 @@ full_search:
+@@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ 	struct vm_area_struct * vma;
+ 	unsigned long task_size = TASK_SIZE;
+ 	unsigned long start_addr;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (test_thread_flag(TIF_32BIT))
+ 		task_size = STACK_TOP32;
+@@ -67,7 +68,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -90,6 +91,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long addr = addr0;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/* This should only ever run for 32-bit processes.  */
+ 	BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -7551,28 +7723,36 @@ index 07e1453..0a7d9e9 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
 -		addr = (vma->vm_start-len) & HPAGE_MASK;
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+ 	unsigned long task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (test_thread_flag(TIF_32BIT))
+ 		task_size = STACK_TOP32;
+@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	if (addr) {
  		addr = ALIGN(addr, HPAGE_SIZE);
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -19737,10 +19917,10 @@ index c346d11..d43b163 100644
  	for (i = 0; i < copied; i++) {
  		switch (opcode[i]) {
 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
-index 0b0cb5f..db6b9ed 100644
+index 0b0cb5f..26bb1af 100644
 --- a/arch/x86/kernel/sys_i386_32.c
 +++ b/arch/x86/kernel/sys_i386_32.c
-@@ -24,17 +24,224 @@
+@@ -24,17 +24,226 @@
  
  #include <asm/syscalls.h>
  
@@ -19778,6 +19958,7 @@ index 0b0cb5f..db6b9ed 100644
 +	struct mm_struct *mm = current->mm;
 +	struct vm_area_struct *vma;
 +	unsigned long start_addr, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -19800,7 +19981,7 @@ index 0b0cb5f..db6b9ed 100644
 +		addr = PAGE_ALIGN(addr);
 +		if (pax_task_size - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
 +	}
@@ -19842,7 +20023,7 @@ index 0b0cb5f..db6b9ed 100644
 +			}
 +			return -ENOMEM;
 +		}
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			break;
 +		if (addr + mm->cached_hole_size < vma->vm_start)
 +			mm->cached_hole_size = vma->vm_start - addr;
@@ -19869,6 +20050,7 @@ index 0b0cb5f..db6b9ed 100644
 +	struct vm_area_struct *vma;
 +	struct mm_struct *mm = current->mm;
 +	unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -19898,7 +20080,7 @@ index 0b0cb5f..db6b9ed 100644
 +		addr = PAGE_ALIGN(addr);
 +		if (pax_task_size - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
 +	}
@@ -19915,7 +20097,7 @@ index 0b0cb5f..db6b9ed 100644
 +	/* make sure it can fit in the remaining address space */
 +	if (addr > len) {
 +		vma = find_vma(mm, addr-len);
-+		if (check_heap_stack_gap(vma, addr - len, len))
++		if (check_heap_stack_gap(vma, addr - len, len, offset))
 +			/* remember the address as a hint for next time */
 +			return (mm->free_area_cache = addr-len);
 +	}
@@ -19932,7 +20114,7 @@ index 0b0cb5f..db6b9ed 100644
 +		 * return with success:
 +		 */
 +		vma = find_vma(mm, addr);
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			/* remember the address as a hint for next time */
 +			return (mm->free_area_cache = addr);
 +
@@ -19941,7 +20123,7 @@ index 0b0cb5f..db6b9ed 100644
 +			mm->cached_hole_size = vma->vm_start - addr;
 +
 +		/* try just below the current vma->vm_start */
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
 +
 +bottomup:
@@ -19978,7 +20160,7 @@ index 0b0cb5f..db6b9ed 100644
 +	return addr;
  }
 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index 0514890..3dbebce 100644
+index 0514890..37204bc 100644
 --- a/arch/x86/kernel/sys_x86_64.c
 +++ b/arch/x86/kernel/sys_x86_64.c
 @@ -95,8 +95,8 @@ out:
@@ -20001,7 +20183,12 @@ index 0514890..3dbebce 100644
  		*end = TASK_SIZE;
  	}
  }
-@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
+ 	unsigned long begin, end;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -20020,29 +20207,30 @@ index 0514890..3dbebce 100644
  		vma = find_vma(mm, addr);
 -		if (end - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
-@@ -172,7 +175,7 @@ full_search:
+@@ -172,7 +176,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
 -	unsigned long addr = addr0;
 +	unsigned long base = mm->mmap_base, addr = addr0;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
  		goto bottomup;
  
@@ -20059,42 +20247,42 @@ index 0514890..3dbebce 100644
 -			return addr;
 +		if (TASK_SIZE - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -232,7 +242,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  						    ALIGN_TOPDOWN);
  
  		vma = find_vma(mm, tmp_addr);
 -		if (!vma || tmp_addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, tmp_addr, len))
++		if (check_heap_stack_gap(vma, tmp_addr, len, offset))
  			/* remember the address as a hint for next time */
  			return mm->free_area_cache = tmp_addr;
  	}
-@@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -251,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || addr+len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			/* remember the address as a hint for next time */
  			return mm->free_area_cache = addr;
  
-@@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -260,8 +270,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
 -		addr = vma->vm_start-len;
 -	} while (len < vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -270,13 +278,21 @@ bottomup:
+@@ -270,13 +280,21 @@ bottomup:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -25059,15 +25247,16 @@ index f4f29b1..5cac4fb 100644
  
  	return (void *)vaddr;
 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index df7d12c..abafe9e 100644
+index df7d12c..6258d46 100644
 --- a/arch/x86/mm/hugetlbpage.c
 +++ b/arch/x86/mm/hugetlbpage.c
-@@ -277,13 +277,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+@@ -277,13 +277,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
 -	unsigned long start_addr;
 +	unsigned long start_addr, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -25087,7 +25276,7 @@ index df7d12c..abafe9e 100644
  	}
  
  full_search:
-@@ -291,26 +298,27 @@ full_search:
+@@ -291,26 +299,27 @@ full_search:
  
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
@@ -25110,7 +25299,7 @@ index df7d12c..abafe9e 100644
 -			mm->free_area_cache = addr + len;
 -			return addr;
 -		}
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			break;
  		if (addr + mm->cached_hole_size < vma->vm_start)
  		        mm->cached_hole_size = vma->vm_start - addr;
@@ -25122,7 +25311,7 @@ index df7d12c..abafe9e 100644
  }
  
  static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-@@ -319,10 +327,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -319,10 +328,10 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  {
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
@@ -25132,10 +25321,11 @@ index df7d12c..abafe9e 100644
 +	unsigned long base = mm->mmap_base, addr;
  	unsigned long largest_hole = mm->cached_hole_size;
 -	int first_time = 1;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
  
  	/* don't allow allocations above current base */
  	if (mm->free_area_cache > base)
-@@ -332,64 +339,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -332,64 +341,68 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  	        largest_hole = 0;
  		mm->free_area_cache  = base;
  	}
@@ -25165,7 +25355,7 @@ index df7d12c..abafe9e 100644
  		 */
 -		if (addr + len <= vma->vm_start &&
 -		            (!prev_vma || (addr >= prev_vma->vm_end))) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/* remember the address as a hint for next time */
 -		        mm->cached_hole_size = largest_hole;
 -		        return (mm->free_area_cache = addr);
@@ -25192,7 +25382,7 @@ index df7d12c..abafe9e 100644
  		/* try just below the current vma->vm_start */
 -		addr = (vma->vm_start - len) & huge_page_mask(h);
 -	} while (len <= vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  fail:
@@ -25231,7 +25421,7 @@ index df7d12c..abafe9e 100644
  	mm->cached_hole_size = ~0UL;
  	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
  			len, pgoff, flags);
-@@ -397,6 +408,7 @@ fail:
+@@ -397,6 +410,7 @@ fail:
  	/*
  	 * Restore the topdown base:
  	 */
@@ -25239,11 +25429,12 @@ index df7d12c..abafe9e 100644
  	mm->free_area_cache = base;
  	mm->cached_hole_size = ~0UL;
  
-@@ -410,10 +422,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -410,10 +424,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
 +	unsigned long pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
  
  	if (len & ~huge_page_mask(h))
  		return -EINVAL;
@@ -25260,13 +25451,13 @@ index df7d12c..abafe9e 100644
  		return -ENOMEM;
  
  	if (flags & MAP_FIXED) {
-@@ -425,8 +446,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -425,8 +449,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	if (addr) {
  		addr = ALIGN(addr, huge_page_size(h));
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -28220,7 +28411,7 @@ index 153407c..611cba9 100644
 -}
 -__setup("vdso=", vdso_setup);
 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 69b9ef6..8b8429e 100644
+index 69b9ef6..e07ccfc 100644
 --- a/arch/x86/xen/enlighten.c
 +++ b/arch/x86/xen/enlighten.c
 @@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
@@ -28232,7 +28423,27 @@ index 69b9ef6..8b8429e 100644
  RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
  __read_mostly int xen_have_vector_callback;
  EXPORT_SYMBOL_GPL(xen_have_vector_callback);
-@@ -761,12 +759,12 @@ static u32 xen_safe_apic_wait_icr_idle(void)
+@@ -367,8 +365,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
+ {
+ 	unsigned long va = dtr->address;
+ 	unsigned int size = dtr->size + 1;
+-	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-	unsigned long frames[pages];
++	unsigned long frames[65536 / PAGE_SIZE];
+ 	int f;
+ 
+ 	/*
+@@ -416,8 +413,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+ {
+ 	unsigned long va = dtr->address;
+ 	unsigned int size = dtr->size + 1;
+-	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-	unsigned long frames[pages];
++	unsigned long frames[65536 / PAGE_SIZE];
+ 	int f;
+ 
+ 	/*
+@@ -761,12 +757,12 @@ static u32 xen_safe_apic_wait_icr_idle(void)
  
  static void set_xen_basic_apic_ops(void)
  {
@@ -28251,7 +28462,7 @@ index 69b9ef6..8b8429e 100644
  }
  
  #endif
-@@ -1057,7 +1055,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+@@ -1057,7 +1053,7 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
  #endif
  };
  
@@ -28260,7 +28471,7 @@ index 69b9ef6..8b8429e 100644
  {
  	struct sched_shutdown r = { .reason = reason };
  
-@@ -1065,17 +1063,17 @@ static void xen_reboot(int reason)
+@@ -1065,17 +1061,17 @@ static void xen_reboot(int reason)
  		BUG();
  }
  
@@ -28281,7 +28492,7 @@ index 69b9ef6..8b8429e 100644
  {
  	xen_reboot(SHUTDOWN_poweroff);
  }
-@@ -1125,14 +1123,14 @@ static const struct machine_ops xen_machine_ops __initconst = {
+@@ -1125,14 +1121,14 @@ static const struct machine_ops xen_machine_ops __initconst = {
   */
  static void __init xen_setup_stackprotector(void)
  {
@@ -28300,7 +28511,7 @@ index 69b9ef6..8b8429e 100644
  }
  
  /* First C function to be called on Xen boot */
-@@ -1151,13 +1149,13 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1151,13 +1147,13 @@ asmlinkage void __init xen_start_kernel(void)
  
  	/* Install Xen paravirt ops */
  	pv_info = xen_info;
@@ -28320,7 +28531,7 @@ index 69b9ef6..8b8429e 100644
  
  	xen_init_time_ops();
  
-@@ -1181,7 +1179,17 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1181,7 +1177,17 @@ asmlinkage void __init xen_start_kernel(void)
  	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  
  	/* Work out if we support NX */
@@ -28339,7 +28550,7 @@ index 69b9ef6..8b8429e 100644
  
  	xen_setup_features();
  
-@@ -1210,14 +1218,7 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1210,14 +1216,7 @@ asmlinkage void __init xen_start_kernel(void)
  		pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
  	}
  
@@ -28355,7 +28566,7 @@ index 69b9ef6..8b8429e 100644
  
  	xen_smp_init();
  
-@@ -1293,7 +1294,7 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1293,7 +1292,7 @@ asmlinkage void __init xen_start_kernel(void)
  		add_preferred_console("tty", 0, NULL);
  		add_preferred_console("hvc", 0, NULL);
  		if (pci_xen)
@@ -28364,7 +28575,7 @@ index 69b9ef6..8b8429e 100644
  	} else {
  		const struct dom0_vga_console_info *info =
  			(void *)((char *)xen_start_info +
-@@ -1307,8 +1308,8 @@ asmlinkage void __init xen_start_kernel(void)
+@@ -1307,8 +1306,8 @@ asmlinkage void __init xen_start_kernel(void)
  		pci_request_acs();
  
  		/* Avoid searching for BIOS MP tables */
@@ -28375,7 +28586,7 @@ index 69b9ef6..8b8429e 100644
  	}
  #ifdef CONFIG_PCI
  	/* PCI BIOS service won't work from a PV guest. */
-@@ -1420,7 +1421,7 @@ static void __init xen_hvm_guest_init(void)
+@@ -1420,7 +1419,7 @@ static void __init xen_hvm_guest_init(void)
  	xen_hvm_smp_init();
  	register_cpu_notifier(&xen_hvm_cpu_notifier);
  	xen_unplug_emulated_devices();
@@ -52174,6 +52385,43 @@ index 014fcb4..980206f 100644
  		ret = -EAGAIN;
  
  	pipe_unlock(ipipe);
+diff --git a/fs/stat.c b/fs/stat.c
+index 7b21801..ee8fe9b 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+ 	stat->gid = inode->i_gid;
+ 	stat->rdev = inode->i_rdev;
+ 	stat->size = i_size_read(inode);
+-	stat->atime = inode->i_atime;
+-	stat->mtime = inode->i_mtime;
++	if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++		stat->atime = inode->i_ctime;
++		stat->mtime = inode->i_ctime;
++	} else {
++		stat->atime = inode->i_atime;
++		stat->mtime = inode->i_mtime;
++	}
+ 	stat->ctime = inode->i_ctime;
+ 	stat->blksize = (1 << inode->i_blkbits);
+ 	stat->blocks = inode->i_blocks;
+@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ 	if (retval)
+ 		return retval;
+ 
+-	if (inode->i_op->getattr)
+-		return inode->i_op->getattr(mnt, dentry, stat);
++	if (inode->i_op->getattr) {
++		retval = inode->i_op->getattr(mnt, dentry, stat);
++		if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++			stat->atime = stat->ctime;
++			stat->mtime = stat->ctime;
++		}
++		return retval;
++	}
+ 
+ 	generic_fillattr(inode, stat);
+ 	return 0;
 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
 index fabbb81..91a12e3 100644
 --- a/fs/sysfs/dir.c
@@ -52465,10 +52713,10 @@ index 87323f1..dab9d00 100644
  	ip = issum ? mp->m_rsumip : mp->m_rbmip;
 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
 new file mode 100644
-index 0000000..de63c25
+index 0000000..511310f
 --- /dev/null
 +++ b/grsecurity/Kconfig
-@@ -0,0 +1,982 @@
+@@ -0,0 +1,1015 @@
 +#
 +# grecurity configuration
 +#
@@ -52548,6 +52796,26 @@ index 0000000..de63c25
 +	  If you're using KERNEXEC, it's recommended that you enable this option
 +	  to supplement the hardening of the kernel.
 +  
++config GRKERNSEC_RAND_THREADSTACK
++	bool "Insert random gaps between thread stacks"
++	default y if GRKERNSEC_CONFIG_AUTO
++	depends on PAX_RANDMMAP && !PPC
++	help
++	  If you say Y here, a random-sized gap will be enforced between allocated
++	  thread stacks.  Glibc's NPTL and other threading libraries that
++	  pass MAP_STACK to the kernel for thread stack allocation are supported.
++	  The implementation currently provides 8 bits of entropy for the gap.
++
++	  Many distributions do not compile threaded remote services with the
++	  -fstack-check argument to GCC, causing the variable-sized stack-based
++	  allocator, alloca(), to not probe the stack on allocation.  This
++	  permits an unbounded alloca() to skip over any guard page and potentially
++	  modify another thread's stack reliably.  An enforced random gap
++	  reduces the reliability of such an attack and increases the chance
++	  that such a read/write to another thread's stack instead lands in
++	  an unmapped area, causing a crash and triggering grsecurity's
++	  anti-bruteforcing logic.
++
 +config GRKERNSEC_PROC_MEMMAP
 +	bool "Harden ASLR against information leaks and entropy reduction"
 +	default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
@@ -52830,6 +53098,19 @@ index 0000000..de63c25
 +	  behavior if this option is enabled in an init script on a read-only
 +	  filesystem.  This feature is mainly intended for secure embedded systems.
 +
++config GRKERNSEC_DEVICE_SIDECHANNEL
++	bool "Eliminate stat/notify-based device sidechannels"
++	default y if GRKERNSEC_CONFIG_AUTO
++	help
++	  If you say Y here, timing analyses on block or character
++	  devices like /dev/ptmx using stat or inotify/dnotify/fanotify
++	  will be thwarted for unprivileged users.  If a process without
++	  CAP_MKNOD stats such a device, the last access and last modify times
++	  will match the device's create time.  No access or modify events
++	  will be triggered through inotify/dnotify/fanotify for such devices.
++	  This feature will prevent attacks that may at a minimum
++	  allow an attacker to determine the administrator's password length.
++
 +config GRKERNSEC_CHROOT
 +	bool "Chroot jail restrictions"
 +	default y if GRKERNSEC_CONFIG_AUTO
@@ -53497,7 +53778,7 @@ index 0000000..1b9afa9
 +endif
 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
 new file mode 100644
-index 0000000..24d469f
+index 0000000..740ce0b
 --- /dev/null
 +++ b/grsecurity/gracl.c
 @@ -0,0 +1,4212 @@
@@ -53886,7 +54167,7 @@ index 0000000..24d469f
 +struct acl_subject_label *
 +lookup_subject_map(const struct acl_subject_label *userp)
 +{
-+	unsigned int index = shash(userp, subj_map_set.s_size);
++	unsigned int index = gr_shash(userp, subj_map_set.s_size);
 +	struct subject_map *match;
 +
 +	match = subj_map_set.s_hash[index];
@@ -53903,7 +54184,7 @@ index 0000000..24d469f
 +static void
 +insert_subj_map_entry(struct subject_map *subjmap)
 +{
-+	unsigned int index = shash(subjmap->user, subj_map_set.s_size);
++	unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
 +	struct subject_map **curr;
 +
 +	subjmap->prev = NULL;
@@ -53922,7 +54203,7 @@ index 0000000..24d469f
 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
 +		      const gid_t gid)
 +{
-+	unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++	unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
 +	struct acl_role_label *match;
 +	struct role_allowed_ip *ipp;
 +	unsigned int x;
@@ -53945,7 +54226,7 @@ index 0000000..24d469f
 +found:
 +	if (match == NULL) {
 +	      try_group:
-+		index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++		index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
 +		match = acl_role_set.r_hash[index];
 +
 +		while (match) {
@@ -53991,7 +54272,7 @@ index 0000000..24d469f
 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
 +		      const struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(ino, dev, role->subj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
 +	struct acl_subject_label *match;
 +
 +	match = role->subj_hash[index];
@@ -54011,7 +54292,7 @@ index 0000000..24d469f
 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
 +			  const struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(ino, dev, role->subj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
 +	struct acl_subject_label *match;
 +
 +	match = role->subj_hash[index];
@@ -54031,7 +54312,7 @@ index 0000000..24d469f
 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
 +		     const struct acl_subject_label *subj)
 +{
-+	unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
 +	struct acl_object_label *match;
 +
 +	match = subj->obj_hash[index];
@@ -54051,7 +54332,7 @@ index 0000000..24d469f
 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
 +		     const struct acl_subject_label *subj)
 +{
-+	unsigned int index = fhash(ino, dev, subj->obj_hash_size);
++	unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
 +	struct acl_object_label *match;
 +
 +	match = subj->obj_hash[index];
@@ -54125,7 +54406,7 @@ index 0000000..24d469f
 +static struct inodev_entry *
 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
 +{
-+	unsigned int index = fhash(ino, dev, inodev_set.i_size);
++	unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
 +	struct inodev_entry *match;
 +
 +	match = inodev_set.i_hash[index];
@@ -54139,7 +54420,7 @@ index 0000000..24d469f
 +static void
 +insert_inodev_entry(struct inodev_entry *entry)
 +{
-+	unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
++	unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
 +				    inodev_set.i_size);
 +	struct inodev_entry **curr;
 +
@@ -54159,7 +54440,7 @@ index 0000000..24d469f
 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
 +{
 +	unsigned int index =
-+	    rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++	    gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
 +	struct acl_role_label **curr;
 +	struct acl_role_label *tmp, *tmp2;
 +
@@ -54292,7 +54573,7 @@ index 0000000..24d469f
 +		     struct acl_subject_label *subj)
 +{
 +	unsigned int index =
-+	    fhash(obj->inode, obj->device, subj->obj_hash_size);
++	    gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
 +	struct acl_object_label **curr;
 +
 +	
@@ -54312,7 +54593,7 @@ index 0000000..24d469f
 +insert_acl_subj_label(struct acl_subject_label *obj,
 +		      struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
++	unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
 +	struct acl_subject_label **curr;
 +
 +	obj->prev = NULL;
@@ -56169,7 +56450,7 @@ index 0000000..24d469f
 +		     const ino_t newinode, const dev_t newdevice,
 +		     struct acl_subject_label *subj)
 +{
-+	unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
++	unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
 +	struct acl_object_label *match;
 +
 +	match = subj->obj_hash[index];
@@ -56208,7 +56489,7 @@ index 0000000..24d469f
 +		      const ino_t newinode, const dev_t newdevice,
 +		      struct acl_role_label *role)
 +{
-+	unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
++	unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
 +	struct acl_subject_label *match;
 +
 +	match = role->subj_hash[index];
@@ -56246,7 +56527,7 @@ index 0000000..24d469f
 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
 +		    const ino_t newinode, const dev_t newdevice)
 +{
-+	unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
++	unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
 +	struct inodev_entry *match;
 +
 +	match = inodev_set.i_hash[index];
@@ -63717,7 +63998,7 @@ index 8eeb205..d59bfa2 100644
  	struct sock_filter     	insns[0];
  };
 diff --git a/include/linux/fs.h b/include/linux/fs.h
-index 29b6353..295eed1 100644
+index 29b6353..b41fa04 100644
 --- a/include/linux/fs.h
 +++ b/include/linux/fs.h
 @@ -1618,7 +1618,8 @@ struct file_operations {
@@ -63730,6 +64011,22 @@ index 29b6353..295eed1 100644
  
  struct inode_operations {
  	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+@@ -2714,5 +2715,15 @@ static inline void inode_has_no_xattr(struct inode *inode)
+ 		inode->i_flags |= S_NOSEC;
+ }
+ 
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
++	umode_t mode = inode->i_mode;
++	return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++#else
++	return false;
++#endif
++}
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_FS_H */
 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
 index 003dc0f..3c4ea97 100644
 --- a/include/linux/fs_struct.h
@@ -63766,10 +64063,30 @@ index ce31408..b1ad003 100644
  	op->release = release;
  	INIT_LIST_HEAD(&op->pend_link);
 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
-index 2a53f10..0187fdf 100644
+index 2a53f10..e3cd3e0 100644
 --- a/include/linux/fsnotify.h
 +++ b/include/linux/fsnotify.h
-@@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+@@ -194,6 +194,9 @@ static inline void fsnotify_access(struct file *file)
+ 	struct inode *inode = path->dentry->d_inode;
+ 	__u32 mask = FS_ACCESS;
+ 
++	if (is_sidechannel_device(inode))
++		return;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		mask |= FS_ISDIR;
+ 
+@@ -212,6 +215,9 @@ static inline void fsnotify_modify(struct file *file)
+ 	struct inode *inode = path->dentry->d_inode;
+ 	__u32 mask = FS_MODIFY;
+ 
++	if (is_sidechannel_device(inode))
++		return;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		mask |= FS_ISDIR;
+ 
+@@ -314,7 +320,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
   */
  static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
  {
@@ -63849,7 +64166,7 @@ index 3a76faf..c0592c7 100644
  {
 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
 new file mode 100644
-index 0000000..c938b1f
+index 0000000..ebe6d72
 --- /dev/null
 +++ b/include/linux/gracl.h
 @@ -0,0 +1,319 @@
@@ -64113,25 +64430,25 @@ index 0000000..c938b1f
 +   Shift/add algorithm with modulus of table size and an XOR*/
 +
 +static __inline__ unsigned int
-+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
 +{
 +	return ((((uid + type) << (16 + type)) ^ uid) % sz);
 +}
 +
 + static __inline__ unsigned int
-+shash(const struct acl_subject_label *userp, const unsigned int sz)
++gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
 +{
 +	return ((const unsigned long)userp % sz);
 +}
 +
 +static __inline__ unsigned int
-+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
++gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
 +{
 +	return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
 +}
 +
 +static __inline__ unsigned int
-+nhash(const char *name, const __u16 len, const unsigned int sz)
++gr_nhash(const char *name, const __u16 len, const unsigned int sz)
 +{
 +	return full_name_hash((const unsigned char *)name, len) % sz;
 +}
@@ -66219,7 +66536,7 @@ index 2148b12..519b820 100644
  
  static inline void anon_vma_merge(struct vm_area_struct *vma,
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 1e86bb4..ab37e2e 100644
+index 1e86bb4..31f75c9 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -101,6 +101,7 @@ struct bio_list;
@@ -66230,7 +66547,7 @@ index 1e86bb4..ab37e2e 100644
  
  /*
   * List of flags we want to share for kernel threads,
-@@ -381,10 +382,13 @@ struct user_namespace;
+@@ -381,10 +382,23 @@ struct user_namespace;
  #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
  
  extern int sysctl_max_map_count;
@@ -66239,12 +66556,22 @@ index 1e86bb4..ab37e2e 100644
  #include <linux/aio.h>
  
  #ifdef CONFIG_MMU
-+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
-+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
++
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
++#else
++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++	return 0;
++}
++#endif
++
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
  extern void arch_pick_mmap_layout(struct mm_struct *mm);
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -403,6 +407,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+@@ -403,6 +417,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
  extern void set_dumpable(struct mm_struct *mm, int value);
  extern int get_dumpable(struct mm_struct *mm);
  
@@ -66256,7 +66583,7 @@ index 1e86bb4..ab37e2e 100644
  /* mm flags */
  /* dumpable bits */
  #define MMF_DUMPABLE      0  /* core dump is permitted */
-@@ -630,6 +639,17 @@ struct signal_struct {
+@@ -630,6 +649,17 @@ struct signal_struct {
  #ifdef CONFIG_TASKSTATS
  	struct taskstats *stats;
  #endif
@@ -66274,7 +66601,7 @@ index 1e86bb4..ab37e2e 100644
  #ifdef CONFIG_AUDIT
  	unsigned audit_tty;
  	struct tty_audit_buf *tty_audit_buf;
-@@ -711,6 +731,11 @@ struct user_struct {
+@@ -711,6 +741,11 @@ struct user_struct {
  	struct key *session_keyring;	/* UID's default session keyring */
  #endif
  
@@ -66286,7 +66613,7 @@ index 1e86bb4..ab37e2e 100644
  	/* Hash table maintenance information */
  	struct hlist_node uidhash_node;
  	uid_t uid;
-@@ -1341,8 +1366,8 @@ struct task_struct {
+@@ -1341,8 +1376,8 @@ struct task_struct {
  	struct list_head thread_group;
  
  	struct completion *vfork_done;		/* for vfork() */
@@ -66297,7 +66624,7 @@ index 1e86bb4..ab37e2e 100644
  
  	cputime_t utime, stime, utimescaled, stimescaled;
  	cputime_t gtime;
-@@ -1358,13 +1383,6 @@ struct task_struct {
+@@ -1358,13 +1393,6 @@ struct task_struct {
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
  
@@ -66311,7 +66638,7 @@ index 1e86bb4..ab37e2e 100644
  	char comm[TASK_COMM_LEN]; /* executable name excluding path
  				     - access with [gs]et_task_comm (which lock
  				       it with task_lock())
-@@ -1381,8 +1399,16 @@ struct task_struct {
+@@ -1381,8 +1409,16 @@ struct task_struct {
  #endif
  /* CPU-specific state of this task */
  	struct thread_struct thread;
@@ -66328,7 +66655,7 @@ index 1e86bb4..ab37e2e 100644
  /* open file information */
  	struct files_struct *files;
  /* namespaces */
-@@ -1429,6 +1455,11 @@ struct task_struct {
+@@ -1429,6 +1465,11 @@ struct task_struct {
  	struct rt_mutex_waiter *pi_blocked_on;
  #endif
  
@@ -66340,7 +66667,7 @@ index 1e86bb4..ab37e2e 100644
  #ifdef CONFIG_DEBUG_MUTEXES
  	/* mutex deadlock detection */
  	struct mutex_waiter *blocked_on;
-@@ -1544,6 +1575,28 @@ struct task_struct {
+@@ -1544,6 +1585,28 @@ struct task_struct {
  	unsigned long default_timer_slack_ns;
  
  	struct list_head	*scm_work_list;
@@ -66369,7 +66696,7 @@ index 1e86bb4..ab37e2e 100644
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  	/* Index of current stored address in ret_stack */
  	int curr_ret_stack;
-@@ -1578,6 +1631,51 @@ struct task_struct {
+@@ -1578,6 +1641,51 @@ struct task_struct {
  #endif
  };
  
@@ -66421,7 +66748,7 @@ index 1e86bb4..ab37e2e 100644
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
-@@ -2093,7 +2191,9 @@ void yield(void);
+@@ -2093,7 +2201,9 @@ void yield(void);
  extern struct exec_domain	default_exec_domain;
  
  union thread_union {
@@ -66431,7 +66758,7 @@ index 1e86bb4..ab37e2e 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -2126,6 +2226,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2126,6 +2236,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -66439,7 +66766,7 @@ index 1e86bb4..ab37e2e 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2247,6 +2348,12 @@ static inline void mmdrop(struct mm_struct * mm)
+@@ -2247,6 +2358,12 @@ static inline void mmdrop(struct mm_struct * mm)
  extern void mmput(struct mm_struct *);
  /* Grab a reference to a task's mm, if it is not already going away */
  extern struct mm_struct *get_task_mm(struct task_struct *task);
@@ -66452,7 +66779,7 @@ index 1e86bb4..ab37e2e 100644
  /* Remove the current tasks stale references to the old mm_struct */
  extern void mm_release(struct task_struct *, struct mm_struct *);
  /* Allocate a new mm structure and copy contents from tsk->mm */
-@@ -2263,7 +2370,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2263,7 +2380,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -66461,7 +66788,7 @@ index 1e86bb4..ab37e2e 100644
  
  extern void daemonize(const char *, ...);
  extern int allow_signal(int);
-@@ -2428,9 +2535,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2428,9 +2545,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  
  #endif
  
@@ -73784,7 +74111,7 @@ index fd3c8aa..5f324a6 100644
  	}
  	entry	= ring_buffer_event_data(event);
 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
-index 1dcf253..f84c561 100644
+index 1dcf253..e1568b3 100644
 --- a/kernel/trace/trace_output.c
 +++ b/kernel/trace/trace_output.c
 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
@@ -73796,9 +74123,11 @@ index 1dcf253..f84c561 100644
  		if (p) {
  			s->len = p - s->buffer;
  			return 1;
-@@ -811,13 +811,13 @@ int register_ftrace_event(struct trace_event *event)
+@@ -810,14 +810,16 @@ int register_ftrace_event(struct trace_event *event)
+ 			goto out;
  	}
  
++	pax_open_kernel();
  	if (event->funcs->trace == NULL)
 -		event->funcs->trace = trace_nop_print;
 +		*(void **)&event->funcs->trace = trace_nop_print;
@@ -73811,6 +74140,7 @@ index 1dcf253..f84c561 100644
  	if (event->funcs->binary == NULL)
 -		event->funcs->binary = trace_nop_print;
 +		*(void **)&event->funcs->binary = trace_nop_print;
++	pax_close_kernel();
  
  	key = event->type & (EVENT_HASHSIZE - 1);
  
@@ -75687,10 +76017,18 @@ index 4f4f53b..de8e432 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index eae90af..67b94e0 100644
+index eae90af..145f1d4 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
-@@ -46,6 +46,16 @@
+@@ -30,6 +30,7 @@
+ #include <linux/perf_event.h>
+ #include <linux/audit.h>
+ #include <linux/khugepaged.h>
++#include <linux/random.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -46,6 +47,16 @@
  #define arch_rebalance_pgtables(addr, len)		(addr)
  #endif
  
@@ -75707,7 +76045,7 @@ index eae90af..67b94e0 100644
  static void unmap_region(struct mm_struct *mm,
  		struct vm_area_struct *vma, struct vm_area_struct *prev,
  		unsigned long start, unsigned long end);
-@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
+@@ -71,22 +82,32 @@ static void unmap_region(struct mm_struct *mm,
   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
   *
   */
@@ -75743,7 +76081,7 @@ index eae90af..67b94e0 100644
  /*
   * Make sure vm_committed_as in one cacheline and not cacheline shared with
   * other variables. It can be updated by several CPUs frequently.
-@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+@@ -228,6 +249,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
  	struct vm_area_struct *next = vma->vm_next;
  
  	might_sleep();
@@ -75751,7 +76089,7 @@ index eae90af..67b94e0 100644
  	if (vma->vm_ops && vma->vm_ops->close)
  		vma->vm_ops->close(vma);
  	if (vma->vm_file) {
-@@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -272,6 +294,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
  	 * not page aligned -Ram Gupta
  	 */
  	rlim = rlimit(RLIMIT_DATA);
@@ -75759,7 +76097,7 @@ index eae90af..67b94e0 100644
  	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
  			(mm->end_data - mm->start_data) > rlim)
  		goto out;
-@@ -689,6 +711,12 @@ static int
+@@ -689,6 +712,12 @@ static int
  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
  	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
  {
@@ -75772,7 +76110,7 @@ index eae90af..67b94e0 100644
  	if (is_mergeable_vma(vma, file, vm_flags) &&
  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
  		if (vma->vm_pgoff == vm_pgoff)
-@@ -708,6 +736,12 @@ static int
+@@ -708,6 +737,12 @@ static int
  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
  {
@@ -75785,7 +76123,7 @@ index eae90af..67b94e0 100644
  	if (is_mergeable_vma(vma, file, vm_flags) &&
  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
  		pgoff_t vm_pglen;
-@@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -750,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  struct vm_area_struct *vma_merge(struct mm_struct *mm,
  			struct vm_area_struct *prev, unsigned long addr,
  			unsigned long end, unsigned long vm_flags,
@@ -75807,7 +76145,7 @@ index eae90af..67b94e0 100644
  	/*
  	 * We later require that vma->vm_flags == vm_flags,
  	 * so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -772,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
  		next = next->vm_next;
  
@@ -75823,7 +76161,7 @@ index eae90af..67b94e0 100644
  	/*
  	 * Can it merge with the predecessor?
  	 */
-@@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -791,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  							/* cases 1, 6 */
  			err = vma_adjust(prev, prev->vm_start,
  				next->vm_end, prev->vm_pgoff, NULL);
@@ -75849,7 +76187,7 @@ index eae90af..67b94e0 100644
  		if (err)
  			return NULL;
  		khugepaged_enter_vma_merge(prev);
-@@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -807,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
   			mpol_equal(policy, vma_policy(next)) &&
  			can_vma_merge_before(next, vm_flags,
  					anon_vma, file, pgoff+pglen)) {
@@ -75879,7 +76217,7 @@ index eae90af..67b94e0 100644
  		if (err)
  			return NULL;
  		khugepaged_enter_vma_merge(area);
-@@ -921,14 +1001,11 @@ none:
+@@ -921,14 +1002,11 @@ none:
  void vm_stat_account(struct mm_struct *mm, unsigned long flags,
  						struct file *file, long pages)
  {
@@ -75895,7 +76233,7 @@ index eae90af..67b94e0 100644
  		mm->stack_vm += pages;
  	if (flags & (VM_RESERVED|VM_IO))
  		mm->reserved_vm += pages;
-@@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -955,7 +1033,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	 * (the exception is when the underlying filesystem is noexec
  	 *  mounted, in which case we dont add PROT_EXEC.)
  	 */
@@ -75904,7 +76242,7 @@ index eae90af..67b94e0 100644
  		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
  			prot |= PROT_EXEC;
  
-@@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -981,7 +1059,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	/* Obtain the address to map to. we verify (or select) it and ensure
  	 * that it represents a valid section of the address space.
  	 */
@@ -75913,7 +76251,7 @@ index eae90af..67b94e0 100644
  	if (addr & ~PAGE_MASK)
  		return addr;
  
-@@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -992,6 +1070,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  
@@ -75950,7 +76288,7 @@ index eae90af..67b94e0 100644
  	if (flags & MAP_LOCKED)
  		if (!can_do_mlock())
  			return -EPERM;
-@@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1003,6 +1111,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
@@ -75958,7 +76296,7 @@ index eae90af..67b94e0 100644
  		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  			return -EAGAIN;
  	}
-@@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1073,6 +1182,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	if (error)
  		return error;
  
@@ -75968,7 +76306,7 @@ index eae90af..67b94e0 100644
  	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
  }
  EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1153,7 +1265,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
  	vm_flags_t vm_flags = vma->vm_flags;
  
  	/* If it was private or non-writable, the write bit is already clear */
@@ -75977,7 +76315,7 @@ index eae90af..67b94e0 100644
  		return 0;
  
  	/* The backer wishes to know when pages are first written to? */
-@@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1202,14 +1314,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
  	unsigned long charged = 0;
  	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
  
@@ -76004,7 +76342,7 @@ index eae90af..67b94e0 100644
  	}
  
  	/* Check against address space limit. */
-@@ -1258,6 +1379,16 @@ munmap_back:
+@@ -1258,6 +1380,16 @@ munmap_back:
  		goto unacct_error;
  	}
  
@@ -76021,7 +76359,7 @@ index eae90af..67b94e0 100644
  	vma->vm_mm = mm;
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
-@@ -1266,8 +1397,9 @@ munmap_back:
+@@ -1266,8 +1398,9 @@ munmap_back:
  	vma->vm_pgoff = pgoff;
  	INIT_LIST_HEAD(&vma->anon_vma_chain);
  
@@ -76032,7 +76370,7 @@ index eae90af..67b94e0 100644
  		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
  			goto free_vma;
  		if (vm_flags & VM_DENYWRITE) {
-@@ -1281,6 +1413,19 @@ munmap_back:
+@@ -1281,6 +1414,19 @@ munmap_back:
  		error = file->f_op->mmap(file, vma);
  		if (error)
  			goto unmap_and_free_vma;
@@ -76052,7 +76390,7 @@ index eae90af..67b94e0 100644
  		if (vm_flags & VM_EXECUTABLE)
  			added_exe_file_vma(mm);
  
-@@ -1293,6 +1438,8 @@ munmap_back:
+@@ -1293,6 +1439,8 @@ munmap_back:
  		pgoff = vma->vm_pgoff;
  		vm_flags = vma->vm_flags;
  	} else if (vm_flags & VM_SHARED) {
@@ -76061,7 +76399,7 @@ index eae90af..67b94e0 100644
  		error = shmem_zero_setup(vma);
  		if (error)
  			goto free_vma;
-@@ -1316,6 +1463,11 @@ munmap_back:
+@@ -1316,6 +1464,11 @@ munmap_back:
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  	file = vma->vm_file;
  
@@ -76073,7 +76411,7 @@ index eae90af..67b94e0 100644
  	/* Once vma denies write, undo our temporary denial count */
  	if (correct_wcount)
  		atomic_inc(&inode->i_writecount);
-@@ -1324,6 +1476,7 @@ out:
+@@ -1324,6 +1477,7 @@ out:
  
  	mm->total_vm += len >> PAGE_SHIFT;
  	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -76081,7 +76419,7 @@ index eae90af..67b94e0 100644
  	if (vm_flags & VM_LOCKED) {
  		if (!mlock_vma_pages_range(vma, addr, addr + len))
  			mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1341,6 +1494,12 @@ unmap_and_free_vma:
+@@ -1341,6 +1495,12 @@ unmap_and_free_vma:
  	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
  	charged = 0;
  free_vma:
@@ -76094,11 +76432,21 @@ index eae90af..67b94e0 100644
  	kmem_cache_free(vm_area_cachep, vma);
  unacct_error:
  	if (charged)
-@@ -1348,6 +1507,44 @@ unacct_error:
+@@ -1348,6 +1508,62 @@ unacct_error:
  	return error;
  }
  
-+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++	if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
++		return (random32() & 0xFF) << PAGE_SHIFT;
++
++	return 0;
++}
++#endif
++
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
 +{
 +	if (!vma) {
 +#ifdef CONFIG_STACK_GROWSUP
@@ -76121,16 +76469,24 @@ index eae90af..67b94e0 100644
 +	else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
 +		return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
 +#endif
++	else if (offset)
++		return offset <= vma->vm_start - addr - len;
 +
 +	return true;
 +}
 +
-+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
 +{
 +	if (vma->vm_start < len)
 +		return -ENOMEM;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		return vma->vm_start - len;
++
++	if (!(vma->vm_flags & VM_GROWSDOWN)) {
++		if (offset <= vma->vm_start - len)
++			return vma->vm_start - len - offset;
++		else
++			return -ENOMEM;
++	}
++
 +	if (sysctl_heap_stack_gap <= vma->vm_start - len)
 +		return vma->vm_start - len - sysctl_heap_stack_gap;
 +	return -ENOMEM;
@@ -76139,7 +76495,7 @@ index eae90af..67b94e0 100644
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
   *
-@@ -1374,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1374,18 +1590,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -76170,7 +76526,7 @@ index eae90af..67b94e0 100644
  	}
  
  full_search:
-@@ -1396,34 +1598,40 @@ full_search:
+@@ -1396,34 +1617,40 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -76222,7 +76578,7 @@ index eae90af..67b94e0 100644
  		mm->free_area_cache = addr;
  		mm->cached_hole_size = ~0UL;
  	}
-@@ -1441,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1441,7 +1668,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
@@ -76231,7 +76587,7 @@ index eae90af..67b94e0 100644
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -1450,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1450,13 +1677,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -76254,7 +76610,7 @@ index eae90af..67b94e0 100644
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -1471,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1471,7 +1703,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (addr > len) {
  		vma = find_vma(mm, addr-len);
@@ -76263,7 +76619,7 @@ index eae90af..67b94e0 100644
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  	}
-@@ -1488,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1488,7 +1720,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
@@ -76272,7 +76628,7 @@ index eae90af..67b94e0 100644
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  
-@@ -1497,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1497,8 +1729,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -76283,7 +76639,7 @@ index eae90af..67b94e0 100644
  
  bottomup:
  	/*
-@@ -1507,13 +1720,21 @@ bottomup:
+@@ -1507,13 +1739,21 @@ bottomup:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -76307,7 +76663,7 @@ index eae90af..67b94e0 100644
  	mm->cached_hole_size = ~0UL;
  
  	return addr;
-@@ -1522,6 +1743,12 @@ bottomup:
+@@ -1522,6 +1762,12 @@ bottomup:
  
  void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  {
@@ -76320,7 +76676,7 @@ index eae90af..67b94e0 100644
  	/*
  	 * Is this a new hole at the highest possible address?
  	 */
-@@ -1529,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1529,8 +1775,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  		mm->free_area_cache = addr;
  
  	/* dont allow allocations above current base */
@@ -76332,7 +76688,7 @@ index eae90af..67b94e0 100644
  }
  
  unsigned long
-@@ -1603,40 +1832,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1603,40 +1851,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  
  EXPORT_SYMBOL(find_vma);
  
@@ -76408,7 +76764,7 @@ index eae90af..67b94e0 100644
  
  /*
   * Verify that the stack growth is acceptable and
-@@ -1654,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1654,6 +1912,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		return -ENOMEM;
  
  	/* Stack limit test */
@@ -76416,7 +76772,7 @@ index eae90af..67b94e0 100644
  	if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
  		return -ENOMEM;
  
-@@ -1664,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1664,6 +1923,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		locked = mm->locked_vm + grow;
  		limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
  		limit >>= PAGE_SHIFT;
@@ -76424,7 +76780,7 @@ index eae90af..67b94e0 100644
  		if (locked > limit && !capable(CAP_IPC_LOCK))
  			return -ENOMEM;
  	}
-@@ -1694,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1694,37 +1954,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
   * vma is the last one with address > vma->vm_end.  Have to extend vma.
   */
@@ -76482,7 +76838,7 @@ index eae90af..67b94e0 100644
  		unsigned long size, grow;
  
  		size = address - vma->vm_start;
-@@ -1739,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1739,6 +2010,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  			}
  		}
  	}
@@ -76491,7 +76847,7 @@ index eae90af..67b94e0 100644
  	vma_unlock_anon_vma(vma);
  	khugepaged_enter_vma_merge(vma);
  	return error;
-@@ -1752,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1752,6 +2025,8 @@ int expand_downwards(struct vm_area_struct *vma,
  				   unsigned long address)
  {
  	int error;
@@ -76500,7 +76856,7 @@ index eae90af..67b94e0 100644
  
  	/*
  	 * We must make sure the anon_vma is allocated
-@@ -1765,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1765,6 +2040,15 @@ int expand_downwards(struct vm_area_struct *vma,
  	if (error)
  		return error;
  
@@ -76516,7 +76872,7 @@ index eae90af..67b94e0 100644
  	vma_lock_anon_vma(vma);
  
  	/*
-@@ -1774,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1774,9 +2058,17 @@ int expand_downwards(struct vm_area_struct *vma,
  	 */
  
  	/* Somebody else might have raced and expanded it already */
@@ -76535,7 +76891,7 @@ index eae90af..67b94e0 100644
  		size = vma->vm_end - address;
  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
  
-@@ -1786,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1786,11 +2078,22 @@ int expand_downwards(struct vm_area_struct *vma,
  			if (!error) {
  				vma->vm_start = address;
  				vma->vm_pgoff -= grow;
@@ -76558,7 +76914,7 @@ index eae90af..67b94e0 100644
  	khugepaged_enter_vma_merge(vma);
  	return error;
  }
-@@ -1860,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1860,6 +2163,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  	do {
  		long nrpages = vma_pages(vma);
  
@@ -76572,7 +76928,7 @@ index eae90af..67b94e0 100644
  		mm->total_vm -= nrpages;
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
  		vma = remove_vma(vma);
-@@ -1905,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1905,6 +2215,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
  	do {
@@ -76589,7 +76945,7 @@ index eae90af..67b94e0 100644
  		rb_erase(&vma->vm_rb, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
-@@ -1933,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1933,14 +2253,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	struct vm_area_struct *new;
  	int err = -ENOMEM;
  
@@ -76623,7 +76979,7 @@ index eae90af..67b94e0 100644
  	/* most fields are the same, copy all, and then fixup */
  	*new = *vma;
  
-@@ -1953,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1953,6 +2292,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  	}
  
@@ -76646,7 +77002,7 @@ index eae90af..67b94e0 100644
  	pol = mpol_dup(vma_policy(vma));
  	if (IS_ERR(pol)) {
  		err = PTR_ERR(pol);
-@@ -1978,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1978,6 +2333,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	else
  		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
@@ -76689,7 +77045,7 @@ index eae90af..67b94e0 100644
  	/* Success. */
  	if (!err)
  		return 0;
-@@ -1990,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1990,10 +2381,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  			removed_exe_file_vma(mm);
  		fput(new->vm_file);
  	}
@@ -76709,7 +77065,7 @@ index eae90af..67b94e0 100644
  	kmem_cache_free(vm_area_cachep, new);
   out_err:
  	return err;
-@@ -2006,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2006,6 +2405,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	      unsigned long addr, int new_below)
  {
@@ -76725,7 +77081,7 @@ index eae90af..67b94e0 100644
  	if (mm->map_count >= sysctl_max_map_count)
  		return -ENOMEM;
  
-@@ -2017,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2017,11 +2425,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
@@ -76756,7 +77112,7 @@ index eae90af..67b94e0 100644
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
-@@ -2096,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2096,6 +2523,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  	/* Fix up all other VM information */
  	remove_vma_list(mm, vma);
  
@@ -76765,7 +77121,7 @@ index eae90af..67b94e0 100644
  	return 0;
  }
  
-@@ -2108,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2108,22 +2537,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
  
  	profile_munmap(addr);
  
@@ -76794,7 +77150,7 @@ index eae90af..67b94e0 100644
  /*
   *  this is really a simplified "do_mmap".  it only handles
   *  anonymous maps.  eventually we may be able to do some
-@@ -2137,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2137,6 +2562,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	struct rb_node ** rb_link, * rb_parent;
  	pgoff_t pgoff = addr >> PAGE_SHIFT;
  	int error;
@@ -76802,7 +77158,7 @@ index eae90af..67b94e0 100644
  
  	len = PAGE_ALIGN(len);
  	if (!len)
-@@ -2148,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2148,16 +2574,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -76834,7 +77190,7 @@ index eae90af..67b94e0 100644
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
-@@ -2174,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2174,22 +2614,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	/*
  	 * Clear old maps.  this also does some error checking for us
  	 */
@@ -76861,7 +77217,7 @@ index eae90af..67b94e0 100644
  		return -ENOMEM;
  
  	/* Can we just expand an old private anonymous mapping? */
-@@ -2203,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2203,7 +2643,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	 */
  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  	if (!vma) {
@@ -76870,7 +77226,7 @@ index eae90af..67b94e0 100644
  		return -ENOMEM;
  	}
  
-@@ -2217,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2217,11 +2657,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  out:
  	perf_event_mmap(vma);
@@ -76885,7 +77241,7 @@ index eae90af..67b94e0 100644
  	return addr;
  }
  
-@@ -2268,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2268,8 +2709,10 @@ void exit_mmap(struct mm_struct *mm)
  	 * Walk the list again, actually closing and freeing it,
  	 * with preemption enabled, without holding any MM locks.
  	 */
@@ -76897,7 +77253,7 @@ index eae90af..67b94e0 100644
  
  	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
  }
-@@ -2283,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2283,6 +2726,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  	struct vm_area_struct * __vma, * prev;
  	struct rb_node ** rb_link, * rb_parent;
  
@@ -76911,7 +77267,7 @@ index eae90af..67b94e0 100644
  	/*
  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  	 * until its first write fault, when page's anon_vma and index
-@@ -2305,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2305,7 +2755,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  	if ((vma->vm_flags & VM_ACCOUNT) &&
  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
  		return -ENOMEM;
@@ -76934,7 +77290,7 @@ index eae90af..67b94e0 100644
  	return 0;
  }
  
-@@ -2323,6 +2769,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2323,6 +2788,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	struct rb_node **rb_link, *rb_parent;
  	struct mempolicy *pol;
  
@@ -76943,7 +77299,7 @@ index eae90af..67b94e0 100644
  	/*
  	 * If anonymous vma has not yet been faulted, update new pgoff
  	 * to match new location, to increase its chance of merging.
-@@ -2373,6 +2821,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2373,6 +2840,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	return NULL;
  }
  
@@ -76983,7 +77339,7 @@ index eae90af..67b94e0 100644
  /*
   * Return true if the calling process may expand its vm space by the passed
   * number of pages
-@@ -2384,6 +2865,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2384,6 +2884,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
  
  	lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
  
@@ -76996,7 +77352,7 @@ index eae90af..67b94e0 100644
  	if (cur + npages > lim)
  		return 0;
  	return 1;
-@@ -2454,6 +2941,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2454,6 +2960,22 @@ int install_special_mapping(struct mm_struct *mm,
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  

diff --git a/3.2.37/4450_grsec-kconfig-default-gids.patch b/3.2.37/4450_grsec-kconfig-default-gids.patch
index e5d7e60..3dfdc8f 100644
--- a/3.2.37/4450_grsec-kconfig-default-gids.patch
+++ b/3.2.37/4450_grsec-kconfig-default-gids.patch
@@ -16,7 +16,7 @@ from shooting themselves in the foot.
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2012-10-13 09:51:35.000000000 -0400
 +++ b/grsecurity/Kconfig	2012-10-13 09:52:32.000000000 -0400
-@@ -539,7 +539,7 @@
+@@ -572,7 +572,7 @@
  config GRKERNSEC_AUDIT_GID
  	int "GID for auditing"
  	depends on GRKERNSEC_AUDIT_GROUP
@@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  
  config GRKERNSEC_EXECLOG
  	bool "Exec logging"
-@@ -759,7 +759,7 @@
+@@ -792,7 +792,7 @@
  config GRKERNSEC_TPE_UNTRUSTED_GID
  	int "GID for TPE-untrusted users"
  	depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *enabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -768,7 +768,7 @@
+@@ -801,7 +801,7 @@
  config GRKERNSEC_TPE_TRUSTED_GID
  	int "GID for TPE-trusted users"
  	depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *disabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -861,7 +861,7 @@
+@@ -894,7 +894,7 @@
  config GRKERNSEC_SOCKET_ALL_GID
  	int "GID to deny all sockets for"
  	depends on GRKERNSEC_SOCKET_ALL
@@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable socket access for. Remember to
  	  add the users you want socket access disabled for to the GID
-@@ -882,7 +882,7 @@
+@@ -915,7 +915,7 @@
  config GRKERNSEC_SOCKET_CLIENT_GID
  	int "GID to deny client sockets for"
  	depends on GRKERNSEC_SOCKET_CLIENT
@@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable client socket access for.
  	  Remember to add the users you want client socket access disabled for to
-@@ -900,7 +900,7 @@
+@@ -933,7 +933,7 @@
  config GRKERNSEC_SOCKET_SERVER_GID
  	int "GID to deny server sockets for"
  	depends on GRKERNSEC_SOCKET_SERVER

diff --git a/3.2.37/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.37/4465_selinux-avc_audit-log-curr_ip.patch
index 169fdf1..a7cc9cd 100644
--- a/3.2.37/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.2.37/4465_selinux-avc_audit-log-curr_ip.patch
@@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2011-04-17 19:25:54.000000000 -0400
 +++ b/grsecurity/Kconfig	2011-04-17 19:32:53.000000000 -0400
-@@ -959,6 +959,27 @@
+@@ -992,6 +992,27 @@
  menu "Logging Options"
  depends on GRKERNSEC
  

diff --git a/3.7.3/0000_README b/3.7.4/0000_README
similarity index 91%
rename from 3.7.3/0000_README
rename to 3.7.4/0000_README
index 9eea012..b3b6291 100644
--- a/3.7.3/0000_README
+++ b/3.7.4/0000_README
@@ -2,7 +2,11 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-2.9.1-3.7.3-201301181518.patch
+Patch:	1003_linux-3.7.4.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.7.4
+
+Patch:	4420_grsecurity-2.9.1-3.7.4-201301230048.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.7.4/1003_linux-3.7.4.patch b/3.7.4/1003_linux-3.7.4.patch
new file mode 100644
index 0000000..a0c6ff7
--- /dev/null
+++ b/3.7.4/1003_linux-3.7.4.patch
@@ -0,0 +1,1266 @@
+diff --git a/Makefile b/Makefile
+index 51a9bda..f9196bc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 7
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Terrified Chipmunk
+ 
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 64b1339..7adf414 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -132,9 +132,8 @@ extern struct page *empty_zero_page;
+ #define pte_write(pte)		(!(pte_val(pte) & PTE_RDONLY))
+ #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
+ 
+-#define pte_present_exec_user(pte) \
+-	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
+-	 (PTE_VALID | PTE_USER))
++#define pte_present_user(pte) \
++	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+ 
+ #define PTE_BIT_FUNC(fn,op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+@@ -157,10 +156,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ 			      pte_t *ptep, pte_t pte)
+ {
+-	if (pte_present_exec_user(pte))
+-		__sync_icache_dcache(pte, addr);
+-	if (!pte_dirty(pte))
+-		pte = pte_wrprotect(pte);
++	if (pte_present_user(pte)) {
++		if (pte_exec(pte))
++			__sync_icache_dcache(pte, addr);
++		if (!pte_dirty(pte))
++			pte = pte_wrprotect(pte);
++	}
++
+ 	set_pte(ptep, pte);
+ }
+ 
+diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
+index fba4d66..4c060bb 100644
+--- a/arch/s390/include/asm/timex.h
++++ b/arch/s390/include/asm/timex.h
+@@ -128,4 +128,32 @@ static inline unsigned long long get_clock_monotonic(void)
+ 	return get_clock_xt() - sched_clock_base_cc;
+ }
+ 
++/**
++ * tod_to_ns - convert a TOD format value to nanoseconds
++ * @todval: to be converted TOD format value
++ * Returns: number of nanoseconds that correspond to the TOD format value
++ *
++ * Converting a 64 Bit TOD format value to nanoseconds means that the value
++ * must be divided by 4.096. In order to achieve that we multiply with 125
++ * and divide by 512:
++ *
++ *    ns = (todval * 125) >> 9;
++ *
++ * In order to avoid an overflow with the multiplication we can rewrite this.
++ * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits)
++ * we end up with
++ *
++ *    ns = ((2^32 * th + tl) * 125 ) >> 9;
++ * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9);
++ *
++ */
++static inline unsigned long long tod_to_ns(unsigned long long todval)
++{
++	unsigned long long ns;
++
++	ns = ((todval >> 32) << 23) * 125;
++	ns += ((todval & 0xffffffff) * 125) >> 9;
++	return ns;
++}
++
+ #endif
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index 7fcd690..b5d8a18 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
+  */
+ unsigned long long notrace __kprobes sched_clock(void)
+ {
+-	return (get_clock_monotonic() * 125) >> 9;
++	return tod_to_ns(get_clock_monotonic());
+ }
+ 
+ /*
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index ff1e2f8..d533389 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -408,7 +408,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+ 		return 0;
+ 	}
+ 
+-	sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
++	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
+ 
+ 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
+ 	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
+diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
+index 37924af..bf9f44f 100644
+--- a/arch/sh/include/asm/elf.h
++++ b/arch/sh/include/asm/elf.h
+@@ -203,9 +203,9 @@ extern void __kernel_vsyscall;
+ 	if (vdso_enabled)					\
+ 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE);	\
+ 	else							\
+-		NEW_AUX_ENT(AT_IGNORE, 0);
++		NEW_AUX_ENT(AT_IGNORE, 0)
+ #else
+-#define VSYSCALL_AUX_ENT
++#define VSYSCALL_AUX_ENT	NEW_AUX_ENT(AT_IGNORE, 0)
+ #endif /* CONFIG_VSYSCALL */
+ 
+ #ifdef CONFIG_SH_FPU
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index 88b725a..cf8639b 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -1084,7 +1084,6 @@ ENTRY(xen_failsafe_callback)
+ 	lea 16(%esp),%esp
+ 	CFI_ADJUST_CFA_OFFSET -16
+ 	jz 5f
+-	addl $16,%esp
+ 	jmp iret_exc
+ 5:	pushl_cfi $-1 /* orig_ax = -1 => not a system call */
+ 	SAVE_ALL
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index ca45696..86c524c 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -614,6 +614,81 @@ static __init void reserve_ibft_region(void)
+ 
+ static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
+ 
++static bool __init snb_gfx_workaround_needed(void)
++{
++	int i;
++	u16 vendor, devid;
++	static const u16 snb_ids[] = {
++		0x0102,
++		0x0112,
++		0x0122,
++		0x0106,
++		0x0116,
++		0x0126,
++		0x010a,
++	};
++
++	/* Assume no if something weird is going on with PCI */
++	if (!early_pci_allowed())
++		return false;
++
++	vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
++	if (vendor != 0x8086)
++		return false;
++
++	devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
++	for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
++		if (devid == snb_ids[i])
++			return true;
++
++	return false;
++}
++
++/*
++ * Sandy Bridge graphics has trouble with certain ranges, exclude
++ * them from allocation.
++ */
++static void __init trim_snb_memory(void)
++{
++	static const unsigned long bad_pages[] = {
++		0x20050000,
++		0x20110000,
++		0x20130000,
++		0x20138000,
++		0x40004000,
++	};
++	int i;
++
++	if (!snb_gfx_workaround_needed())
++		return;
++
++	printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
++
++	/*
++	 * Reserve all memory below the 1 MB mark that has not
++	 * already been reserved.
++	 */
++	memblock_reserve(0, 1<<20);
++
++	for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
++		if (memblock_reserve(bad_pages[i], PAGE_SIZE))
++			printk(KERN_WARNING "failed to reserve 0x%08lx\n",
++			       bad_pages[i]);
++	}
++}
++
++/*
++ * Here we put platform-specific memory range workarounds, i.e.
++ * memory known to be corrupt or otherwise in need to be reserved on
++ * specific platforms.
++ *
++ * If this gets used more widely it could use a real dispatch mechanism.
++ */
++static void __init trim_platform_memory_ranges(void)
++{
++	trim_snb_memory();
++}
++
+ static void __init trim_bios_range(void)
+ {
+ 	/*
+@@ -634,6 +709,7 @@ static void __init trim_bios_range(void)
+ 	 * take them out.
+ 	 */
+ 	e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
++
+ 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ }
+ 
+@@ -912,6 +988,8 @@ void __init setup_arch(char **cmdline_p)
+ 
+ 	setup_real_mode();
+ 
++	trim_platform_memory_ranges();
++
+ 	init_gbpages();
+ 
+ 	/* max_pfn_mapped is updated here */
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index be5f7aa..3724891 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -295,7 +295,7 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
+ 	char *buf;
+ 
+ 	size = fw_file_size(file);
+-	if (size < 0)
++	if (size <= 0)
+ 		return false;
+ 	buf = vmalloc(size);
+ 	if (!buf)
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 211c402..1a8594b 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -2429,8 +2429,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+ 	kfree(parser->relocs);
+ 	for (i = 0; i < parser->nchunks; i++) {
+ 		kfree(parser->chunks[i].kdata);
+-		kfree(parser->chunks[i].kpage[0]);
+-		kfree(parser->chunks[i].kpage[1]);
++		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
++			kfree(parser->chunks[i].kpage[0]);
++			kfree(parser->chunks[i].kpage[1]);
++		}
+ 	}
+ 	kfree(parser->chunks);
+ 	kfree(parser->chunks_array);
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 41672cc..dc8d15a 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -266,7 +266,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 				  p->chunks[p->chunk_ib_idx].length_dw);
+ 			return -EINVAL;
+ 		}
+-		if ((p->rdev->flags & RADEON_IS_AGP)) {
++		if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
+ 			p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ 			p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ 			if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+@@ -570,7 +570,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+ 	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ 	int i;
+ 	int size = PAGE_SIZE;
+-	bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
++	bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
++		false : true;
+ 
+ 	for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
+ 		if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 9476c1b..c2c07a4 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2327,8 +2327,39 @@ static int domain_add_dev_info(struct dmar_domain *domain,
+ 	return 0;
+ }
+ 
++static bool device_has_rmrr(struct pci_dev *dev)
++{
++	struct dmar_rmrr_unit *rmrr;
++	int i;
++
++	for_each_rmrr_units(rmrr) {
++		for (i = 0; i < rmrr->devices_cnt; i++) {
++			/*
++			 * Return TRUE if this RMRR contains the device that
++			 * is passed in.
++			 */
++			if (rmrr->devices[i] == dev)
++				return true;
++		}
++	}
++	return false;
++}
++
+ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
+ {
++
++	/*
++	 * We want to prevent any device associated with an RMRR from
++	 * getting placed into the SI Domain. This is done because
++	 * problems exist when devices are moved in and out of domains
++	 * and their respective RMRR info is lost. We exempt USB devices
++	 * from this process due to their usage of RMRRs that are known
++	 * to not be needed after BIOS hand-off to OS.
++	 */
++	if (device_has_rmrr(pdev) &&
++	    (pdev->class >> 8) != PCI_CLASS_SERIAL_USB)
++		return 0;
++
+ 	if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ 		return 1;
+ 
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index e1ceb37..9b178a3 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -909,17 +909,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ {
+ 	struct net_device *netdev = adapter->netdev;
+ 	struct e1000_hw *hw = &adapter->hw;
+-	int i, err = 0, vector = 0;
++	int i, err = 0, vector = 0, free_vector = 0;
+ 
+ 	err = request_irq(adapter->msix_entries[vector].vector,
+ 	                  igb_msix_other, 0, netdev->name, adapter);
+ 	if (err)
+-		goto out;
+-	vector++;
++		goto err_out;
+ 
+ 	for (i = 0; i < adapter->num_q_vectors; i++) {
+ 		struct igb_q_vector *q_vector = adapter->q_vector[i];
+ 
++		vector++;
++
+ 		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+ 
+ 		if (q_vector->rx.ring && q_vector->tx.ring)
+@@ -938,13 +939,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
+ 		                  igb_msix_ring, 0, q_vector->name,
+ 		                  q_vector);
+ 		if (err)
+-			goto out;
+-		vector++;
++			goto err_free;
+ 	}
+ 
+ 	igb_configure_msix(adapter);
+ 	return 0;
+-out:
++
++err_free:
++	/* free already assigned IRQs */
++	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
++
++	vector--;
++	for (i = 0; i < vector; i++) {
++		free_irq(adapter->msix_entries[free_vector++].vector,
++			 adapter->q_vector[i]);
++	}
++err_out:
+ 	return err;
+ }
+ 
+diff --git a/drivers/staging/vt6656/bssdb.h b/drivers/staging/vt6656/bssdb.h
+index 6b2ec39..806cbf7 100644
+--- a/drivers/staging/vt6656/bssdb.h
++++ b/drivers/staging/vt6656/bssdb.h
+@@ -90,7 +90,6 @@ typedef struct tagSRSNCapObject {
+ } SRSNCapObject, *PSRSNCapObject;
+ 
+ // BSS info(AP)
+-#pragma pack(1)
+ typedef struct tagKnownBSS {
+     // BSS info
+     BOOL            bActive;
+diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
+index 3734e2c..91ceb77 100644
+--- a/drivers/staging/vt6656/int.h
++++ b/drivers/staging/vt6656/int.h
+@@ -34,7 +34,6 @@
+ #include "device.h"
+ 
+ /*---------------------  Export Definitions -------------------------*/
+-#pragma pack(1)
+ typedef struct tagSINTData {
+ 	BYTE byTSR0;
+ 	BYTE byPkt0;
+diff --git a/drivers/staging/vt6656/iocmd.h b/drivers/staging/vt6656/iocmd.h
+index 22710ce..ae6e2d2 100644
+--- a/drivers/staging/vt6656/iocmd.h
++++ b/drivers/staging/vt6656/iocmd.h
+@@ -95,13 +95,12 @@ typedef enum tagWZONETYPE {
+ // Ioctl interface structure
+ // Command structure
+ //
+-#pragma pack(1)
+ typedef struct tagSCmdRequest {
+ 	u8 name[16];
+ 	void	*data;
+ 	u16	    wResult;
+ 	u16     wCmdCode;
+-} SCmdRequest, *PSCmdRequest;
++} __packed SCmdRequest, *PSCmdRequest;
+ 
+ //
+ // Scan
+@@ -111,7 +110,7 @@ typedef struct tagSCmdScan {
+ 
+     u8	    ssid[SSID_MAXLEN + 2];
+ 
+-} SCmdScan, *PSCmdScan;
++} __packed SCmdScan, *PSCmdScan;
+ 
+ //
+ // BSS Join
+@@ -126,7 +125,7 @@ typedef struct tagSCmdBSSJoin {
+     BOOL    bPSEnable;
+     BOOL    bShareKeyAuth;
+ 
+-} SCmdBSSJoin, *PSCmdBSSJoin;
++} __packed SCmdBSSJoin, *PSCmdBSSJoin;
+ 
+ //
+ // Zonetype Setting
+@@ -137,7 +136,7 @@ typedef struct tagSCmdZoneTypeSet {
+  BOOL       bWrite;
+  WZONETYPE  ZoneType;
+ 
+-} SCmdZoneTypeSet, *PSCmdZoneTypeSet;
++} __packed SCmdZoneTypeSet, *PSCmdZoneTypeSet;
+ 
+ typedef struct tagSWPAResult {
+          char	ifname[100];
+@@ -145,7 +144,7 @@ typedef struct tagSWPAResult {
+ 	u8 key_mgmt;
+ 	u8 eap_type;
+          BOOL authenticated;
+-} SWPAResult, *PSWPAResult;
++} __packed SWPAResult, *PSWPAResult;
+ 
+ typedef struct tagSCmdStartAP {
+ 
+@@ -157,7 +156,7 @@ typedef struct tagSCmdStartAP {
+     BOOL    bShareKeyAuth;
+     u8      byBasicRate;
+ 
+-} SCmdStartAP, *PSCmdStartAP;
++} __packed SCmdStartAP, *PSCmdStartAP;
+ 
+ typedef struct tagSCmdSetWEP {
+ 
+@@ -167,7 +166,7 @@ typedef struct tagSCmdSetWEP {
+     BOOL    bWepKeyAvailable[WEP_NKEYS];
+     u32     auWepKeyLength[WEP_NKEYS];
+ 
+-} SCmdSetWEP, *PSCmdSetWEP;
++} __packed SCmdSetWEP, *PSCmdSetWEP;
+ 
+ typedef struct tagSBSSIDItem {
+ 
+@@ -180,14 +179,14 @@ typedef struct tagSBSSIDItem {
+     BOOL    bWEPOn;
+     u32     uRSSI;
+ 
+-} SBSSIDItem;
++} __packed SBSSIDItem;
+ 
+ 
+ typedef struct tagSBSSIDList {
+ 
+ 	u32		    uItem;
+ 	SBSSIDItem	sBSSIDList[0];
+-} SBSSIDList, *PSBSSIDList;
++} __packed SBSSIDList, *PSBSSIDList;
+ 
+ 
+ typedef struct tagSNodeItem {
+@@ -208,7 +207,7 @@ typedef struct tagSNodeItem {
+     u32            uTxAttempts;
+     u16            wFailureRatio;
+ 
+-} SNodeItem;
++} __packed SNodeItem;
+ 
+ 
+ typedef struct tagSNodeList {
+@@ -216,7 +215,7 @@ typedef struct tagSNodeList {
+ 	u32		    uItem;
+ 	SNodeItem	sNodeList[0];
+ 
+-} SNodeList, *PSNodeList;
++} __packed SNodeList, *PSNodeList;
+ 
+ 
+ typedef struct tagSCmdLinkStatus {
+@@ -229,7 +228,7 @@ typedef struct tagSCmdLinkStatus {
+     u32     uChannel;
+     u32     uLinkRate;
+ 
+-} SCmdLinkStatus, *PSCmdLinkStatus;
++} __packed SCmdLinkStatus, *PSCmdLinkStatus;
+ 
+ //
+ // 802.11 counter
+@@ -247,7 +246,7 @@ typedef struct tagSDot11MIBCount {
+     u32 ReceivedFragmentCount;
+     u32 MulticastReceivedFrameCount;
+     u32 FCSErrorCount;
+-} SDot11MIBCount, *PSDot11MIBCount;
++} __packed SDot11MIBCount, *PSDot11MIBCount;
+ 
+ 
+ 
+@@ -355,13 +354,13 @@ typedef struct tagSStatMIBCount {
+     u32   ullTxBroadcastBytes[2];
+     u32   ullTxMulticastBytes[2];
+     u32   ullTxDirectedBytes[2];
+-} SStatMIBCount, *PSStatMIBCount;
++} __packed SStatMIBCount, *PSStatMIBCount;
+ 
+ typedef struct tagSCmdValue {
+ 
+     u32     dwValue;
+ 
+-} SCmdValue,  *PSCmdValue;
++} __packed SCmdValue,  *PSCmdValue;
+ 
+ //
+ // hostapd & viawget ioctl related
+@@ -431,7 +430,7 @@ struct viawget_hostapd_param {
+ 			u8 ssid[32];
+ 		} scan_req;
+ 	} u;
+-};
++} __packed;
+ 
+ /*---------------------  Export Classes  ----------------------------*/
+ 
+diff --git a/drivers/staging/vt6656/iowpa.h b/drivers/staging/vt6656/iowpa.h
+index 959c886..2522dde 100644
+--- a/drivers/staging/vt6656/iowpa.h
++++ b/drivers/staging/vt6656/iowpa.h
+@@ -67,12 +67,11 @@ enum {
+ 
+ 
+ 
+-#pragma pack(1)
+ typedef struct viawget_wpa_header {
+ 	u8 type;
+ 	u16 req_ie_len;
+ 	u16 resp_ie_len;
+-} viawget_wpa_header;
++} __packed viawget_wpa_header;
+ 
+ struct viawget_wpa_param {
+ 	u32 cmd;
+@@ -113,9 +112,8 @@ struct viawget_wpa_param {
+ 			u8 *buf;
+ 		} scan_results;
+ 	} u;
+-};
++} __packed;
+ 
+-#pragma pack(1)
+ struct viawget_scan_result {
+ 	u8 bssid[6];
+ 	u8 ssid[32];
+@@ -130,7 +128,7 @@ struct viawget_scan_result {
+ 	int noise;
+ 	int level;
+ 	int maxrate;
+-};
++} __packed;
+ 
+ /*---------------------  Export Classes  ----------------------------*/
+ 
+diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
+index 4efa9bc..89bfd85 100644
+--- a/drivers/staging/wlan-ng/prism2mgmt.c
++++ b/drivers/staging/wlan-ng/prism2mgmt.c
+@@ -406,7 +406,7 @@ int prism2mgmt_scan_results(wlandevice_t *wlandev, void *msgp)
+ 	/* SSID */
+ 	req->ssid.status = P80211ENUM_msgitem_status_data_ok;
+ 	req->ssid.data.len = le16_to_cpu(item->ssid.len);
+-	req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_BSSID_LEN);
++	req->ssid.data.len = min_t(u16, req->ssid.data.len, WLAN_SSID_MAXLEN);
+ 	memcpy(req->ssid.data.data, item->ssid.data, req->ssid.data.len);
+ 
+ 	/* supported rates */
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 9abef9f..0943ff0 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1624,6 +1624,7 @@ int core_dev_setup_virtual_lun0(void)
+ 		ret = PTR_ERR(dev);
+ 		goto out;
+ 	}
++	dev->dev_link_magic = SE_DEV_LINK_MAGIC;
+ 	se_dev->se_dev_ptr = dev;
+ 	g_lun0_dev = dev;
+ 
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index bca737b..a55f91a 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -71,6 +71,12 @@ static int target_fabric_mappedlun_link(
+ 	struct se_portal_group *se_tpg;
+ 	struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
+ 	int ret = 0, lun_access;
++
++	if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
++		pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
++			" %p to struct lun: %p\n", lun_ci, lun);
++		return -EFAULT;
++	}
+ 	/*
+ 	 * Ensure that the source port exists
+ 	 */
+@@ -745,6 +751,12 @@ static int target_fabric_port_link(
+ 	struct target_fabric_configfs *tf;
+ 	int ret;
+ 
++	if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) {
++		pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:"
++			" %p to struct se_device: %p\n", se_dev_ci, dev);
++		return -EFAULT;
++	}
++
+ 	tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
+ 	se_tpg = container_of(to_config_group(tpg_ci),
+ 				struct se_portal_group, tpg_group);
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index a531fe2..4c34665 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -672,6 +672,7 @@ int core_tpg_register(
+ 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+ 		lun = se_tpg->tpg_lun_list[i];
+ 		lun->unpacked_lun = i;
++		lun->lun_link_magic = SE_LUN_LINK_MAGIC;
+ 		lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
+ 		atomic_set(&lun->lun_acl_count, 0);
+ 		init_completion(&lun->lun_shutdown_comp);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index dcecbfb..13fe16c 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -545,9 +545,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+ 
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
+-	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+-		transport_lun_remove_cmd(cmd);
+-
+ 	if (transport_cmd_check_stop_to_fabric(cmd))
+ 		return;
+ 	if (remove)
+@@ -1074,6 +1071,7 @@ struct se_device *transport_add_device_to_core_hba(
+ 	dev->se_hba		= hba;
+ 	dev->se_sub_dev		= se_dev;
+ 	dev->transport		= transport;
++	dev->dev_link_magic	= SE_DEV_LINK_MAGIC;
+ 	INIT_LIST_HEAD(&dev->dev_list);
+ 	INIT_LIST_HEAD(&dev->dev_sep_list);
+ 	INIT_LIST_HEAD(&dev->dev_tmr_list);
+@@ -1616,6 +1614,8 @@ static void target_complete_tmr_failure(struct work_struct *work)
+ 
+ 	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ 	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
++
++	transport_cmd_check_stop_to_fabric(se_cmd);
+ }
+ 
+ /**
+@@ -1853,6 +1853,7 @@ void target_execute_cmd(struct se_cmd *cmd)
+ 	}
+ 
+ 	cmd->t_state = TRANSPORT_PROCESSING;
++	cmd->transport_state |= CMD_T_ACTIVE;
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+ 	if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+@@ -3024,7 +3025,7 @@ int transport_send_check_condition_and_sense(
+ 		/* ILLEGAL REQUEST */
+ 		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ 		/* LOGICAL UNIT COMMUNICATION FAILURE */
+-		buffer[SPC_ASC_KEY_OFFSET] = 0x80;
++		buffer[SPC_ASC_KEY_OFFSET] = 0x08;
+ 		break;
+ 	}
+ 	/*
+@@ -3089,6 +3090,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ 	}
+ 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ 
++	transport_lun_remove_cmd(cmd);
++
+ 	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+ 		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
+ 		cmd->se_tfo->get_task_tag(cmd));
+diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
+index 12d6fa2..6659dd3 100644
+--- a/drivers/target/tcm_fc/tfc_sess.c
++++ b/drivers/target/tcm_fc/tfc_sess.c
+@@ -355,11 +355,11 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+ 
+ 	tport = ft_tport_create(rdata->local_port);
+ 	if (!tport)
+-		return 0;	/* not a target for this local port */
++		goto not_target;	/* not a target for this local port */
+ 
+ 	acl = ft_acl_get(tport->tpg, rdata);
+ 	if (!acl)
+-		return 0;
++		goto not_target;	/* no target for this remote */
+ 
+ 	if (!rspp)
+ 		goto fill;
+@@ -396,12 +396,18 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
+ 
+ 	/*
+ 	 * OR in our service parameters with other provider (initiator), if any.
+-	 * TBD XXX - indicate RETRY capability?
+ 	 */
+ fill:
+ 	fcp_parm = ntohl(spp->spp_params);
++	fcp_parm &= ~FCP_SPPF_RETRY;
+ 	spp->spp_params = htonl(fcp_parm | FCP_SPPF_TARG_FCN);
+ 	return FC_SPP_RESP_ACK;
++
++not_target:
++	fcp_parm = ntohl(spp->spp_params);
++	fcp_parm &= ~FCP_SPPF_TARG_FCN;
++	spp->spp_params = htonl(fcp_parm);
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index a82b399..8cf8d0a 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -395,6 +395,8 @@ static int pty_bsd_ioctl(struct tty_struct *tty,
+ 		return pty_set_lock(tty, (int __user *) arg);
+ 	case TIOCSIG:    /* Send signal to other side of pty */
+ 		return pty_signal(tty, (int) arg);
++	case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */
++		return -EINVAL;
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
+diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
+index 3ba4234..2c09e64 100644
+--- a/drivers/tty/serial/8250/8250.c
++++ b/drivers/tty/serial/8250/8250.c
+@@ -290,6 +290,12 @@ static const struct serial8250_config uart_config[] = {
+ 				  UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
+ 		.flags		= UART_CAP_FIFO,
+ 	},
++	[PORT_BRCM_TRUMANAGE] = {
++		.name		= "TruManage",
++		.fifo_size	= 1,
++		.tx_loadsz	= 1024,
++		.flags		= UART_CAP_HFIFO,
++	},
+ 	[PORT_8250_CIR] = {
+ 		.name		= "CIR port"
+ 	}
+@@ -1441,6 +1447,11 @@ void serial8250_tx_chars(struct uart_8250_port *up)
+ 		port->icount.tx++;
+ 		if (uart_circ_empty(xmit))
+ 			break;
++		if (up->capabilities & UART_CAP_HFIFO) {
++			if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
++			    BOTH_EMPTY)
++				break;
++		}
+ 	} while (--count > 0);
+ 
+ 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index 5a76f9c..c0be2fa 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -40,6 +40,7 @@ struct serial8250_config {
+ #define UART_CAP_AFE	(1 << 11)	/* MCR-based hw flow control */
+ #define UART_CAP_UUE	(1 << 12)	/* UART needs IER bit 6 set (Xscale) */
+ #define UART_CAP_RTOIE	(1 << 13)	/* UART needs IER bit 4 set (Xscale, Tegra) */
++#define UART_CAP_HFIFO	(1 << 14)	/* UART has a "hidden" FIFO */
+ 
+ #define UART_BUG_QUOT	(1 << 0)	/* UART has buggy quot LSB */
+ #define UART_BUG_TXEN	(1 << 1)	/* UART has buggy TX IIR status */
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index c3b2ec0..71ce540 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -79,7 +79,7 @@ static int dw8250_handle_irq(struct uart_port *p)
+ 	} else if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) {
+ 		/* Clear the USR and write the LCR again. */
+ 		(void)p->serial_in(p, UART_USR);
+-		p->serial_out(p, d->last_lcr, UART_LCR);
++		p->serial_out(p, UART_LCR, d->last_lcr);
+ 
+ 		return 1;
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 17b7d26..a29df69 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1085,6 +1085,18 @@ pci_omegapci_setup(struct serial_private *priv,
+ 	return setup_port(priv, port, 2, idx * 8, 0);
+ }
+ 
++static int
++pci_brcm_trumanage_setup(struct serial_private *priv,
++			 const struct pciserial_board *board,
++			 struct uart_8250_port *port, int idx)
++{
++	int ret = pci_default_setup(priv, board, port, idx);
++
++	port->port.type = PORT_BRCM_TRUMANAGE;
++	port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
++	return ret;
++}
++
+ static int skip_tx_en_setup(struct serial_private *priv,
+ 			const struct pciserial_board *board,
+ 			struct uart_8250_port *port, int idx)
+@@ -1213,6 +1225,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
+ #define PCI_VENDOR_ID_AGESTAR		0x5372
+ #define PCI_DEVICE_ID_AGESTAR_9375	0x6872
+ #define PCI_VENDOR_ID_ASIX		0x9710
++#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+ 
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
+@@ -1788,6 +1801,17 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 		.setup		= pci_asix_setup,
+ 	},
+ 	/*
++	 * Broadcom TruManage (NetXtreme)
++	 */
++	{
++		.vendor		= PCI_VENDOR_ID_BROADCOM,
++		.device		= PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= pci_brcm_trumanage_setup,
++	},
++
++	/*
+ 	 * Default "match everything" terminator entry
+ 	 */
+ 	{
+@@ -1975,6 +1999,7 @@ enum pci_board_num_t {
+ 	pbn_ce4100_1_115200,
+ 	pbn_omegapci,
+ 	pbn_NETMOS9900_2s_115200,
++	pbn_brcm_trumanage,
+ };
+ 
+ /*
+@@ -2674,6 +2699,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
+ 		.num_ports	= 2,
+ 		.base_baud	= 115200,
+ 	},
++	[pbn_brcm_trumanage] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 1,
++		.reg_shift	= 2,
++		.base_baud	= 115200,
++	},
+ };
+ 
+ static const struct pci_device_id blacklist[] = {
+@@ -4238,6 +4269,13 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		pbn_omegapci },
+ 
+ 	/*
++	 * Broadcom TruManage
++	 */
++	{	PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BROADCOM_TRUMANAGE,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_brcm_trumanage },
++
++	/*
+ 	 * AgeStar as-prs2-009
+ 	 */
+ 	{	PCI_VENDOR_ID_AGESTAR, PCI_DEVICE_ID_AGESTAR_9375,
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index 5b9bc19..f5e9666 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -552,6 +552,7 @@ static void ifx_port_shutdown(struct tty_port *port)
+ 		container_of(port, struct ifx_spi_device, tty_port);
+ 
+ 	mrdy_set_low(ifx_dev);
++	del_timer(&ifx_dev->spi_timer);
+ 	clear_bit(IFX_SPI_STATE_TIMER_PENDING, &ifx_dev->flags);
+ 	tasklet_kill(&ifx_dev->io_work_tasklet);
+ }
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 6db3baa..ea513c9 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -260,10 +260,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
+ 
+ 	u32 ctrl = readl(u->membase + AUART_CTRL2);
+ 
+-	ctrl &= ~AUART_CTRL2_RTSEN;
++	ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);
+ 	if (mctrl & TIOCM_RTS) {
+ 		if (tty_port_cts_enabled(&u->state->port))
+ 			ctrl |= AUART_CTRL2_RTSEN;
++		else
++			ctrl |= AUART_CTRL2_RTS;
+ 	}
+ 
+ 	s->ctrl = mctrl;
+diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
+index 205d4cf..f528cc2 100644
+--- a/drivers/tty/serial/vt8500_serial.c
++++ b/drivers/tty/serial/vt8500_serial.c
+@@ -604,7 +604,7 @@ static int __devinit vt8500_serial_probe(struct platform_device *pdev)
+ 	vt8500_port->uart.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
+ 
+ 	vt8500_port->clk = of_clk_get(pdev->dev.of_node, 0);
+-	if (vt8500_port->clk) {
++	if (!IS_ERR(vt8500_port->clk)) {
+ 		vt8500_port->uart.uartclk = clk_get_rate(vt8500_port->clk);
+ 	} else {
+ 		/* use the default of 24Mhz if not specified and warn */
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index ebff9f4..7c212f5 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -129,6 +129,9 @@ static int host_start(struct ci13xxx *ci)
+ 	else
+ 		ci->hcd = hcd;
+ 
++	if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING)
++		hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index 60023c2..ed83e7a 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -534,6 +534,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
+ 	wait_queue_t wait;
+ 	unsigned long flags;
+ 
++	if (!tty)
++		return;
++
+ 	if (!timeout)
+ 		timeout = (HZ * EDGE_CLOSING_WAIT)/100;
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index fd47369..f2727e4 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -450,6 +450,10 @@ static void option_instat_callback(struct urb *urb);
+ #define PETATEL_VENDOR_ID			0x1ff4
+ #define PETATEL_PRODUCT_NP10T			0x600e
+ 
++/* TP-LINK Incorporated products */
++#define TPLINK_VENDOR_ID			0x2357
++#define TPLINK_PRODUCT_MA180			0x0201
++
+ /* some devices interfaces need special handling due to a number of reasons */
+ enum option_blacklist_reason {
+ 		OPTION_BLACKLIST_NONE = 0,
+@@ -931,7 +935,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+@@ -1312,6 +1317,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
++	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
+index b91f14e..95ce9d0 100644
+--- a/drivers/xen/grant-table.c
++++ b/drivers/xen/grant-table.c
+@@ -56,10 +56,6 @@
+ /* External tools reserve first few grant table entries. */
+ #define NR_RESERVED_ENTRIES 8
+ #define GNTTAB_LIST_END 0xffffffff
+-#define GREFS_PER_GRANT_FRAME \
+-(grant_table_version == 1 ?                      \
+-(PAGE_SIZE / sizeof(struct grant_entry_v1)) :   \
+-(PAGE_SIZE / sizeof(union grant_entry_v2)))
+ 
+ static grant_ref_t **gnttab_list;
+ static unsigned int nr_grant_frames;
+@@ -154,6 +150,7 @@ static struct gnttab_ops *gnttab_interface;
+ static grant_status_t *grstatus;
+ 
+ static int grant_table_version;
++static int grefs_per_grant_frame;
+ 
+ static struct gnttab_free_callback *gnttab_free_callback_list;
+ 
+@@ -767,12 +764,14 @@ static int grow_gnttab_list(unsigned int more_frames)
+ 	unsigned int new_nr_grant_frames, extra_entries, i;
+ 	unsigned int nr_glist_frames, new_nr_glist_frames;
+ 
++	BUG_ON(grefs_per_grant_frame == 0);
++
+ 	new_nr_grant_frames = nr_grant_frames + more_frames;
+-	extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
++	extra_entries       = more_frames * grefs_per_grant_frame;
+ 
+-	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
++	nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ 	new_nr_glist_frames =
+-		(new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
++		(new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ 	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
+ 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
+ 		if (!gnttab_list[i])
+@@ -780,12 +779,12 @@ static int grow_gnttab_list(unsigned int more_frames)
+ 	}
+ 
+ 
+-	for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
+-	     i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
++	for (i = grefs_per_grant_frame * nr_grant_frames;
++	     i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
+ 		gnttab_entry(i) = i + 1;
+ 
+ 	gnttab_entry(i) = gnttab_free_head;
+-	gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
++	gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
+ 	gnttab_free_count += extra_entries;
+ 
+ 	nr_grant_frames = new_nr_grant_frames;
+@@ -957,7 +956,8 @@ EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+ 
+ static unsigned nr_status_frames(unsigned nr_grant_frames)
+ {
+-	return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
++	BUG_ON(grefs_per_grant_frame == 0);
++	return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
+ }
+ 
+ static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
+@@ -1115,6 +1115,7 @@ static void gnttab_request_version(void)
+ 	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ 	if (rc == 0 && gsv.version == 2) {
+ 		grant_table_version = 2;
++		grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
+ 		gnttab_interface = &gnttab_v2_ops;
+ 	} else if (grant_table_version == 2) {
+ 		/*
+@@ -1127,17 +1128,17 @@ static void gnttab_request_version(void)
+ 		panic("we need grant tables version 2, but only version 1 is available");
+ 	} else {
+ 		grant_table_version = 1;
++		grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
+ 		gnttab_interface = &gnttab_v1_ops;
+ 	}
+ 	printk(KERN_INFO "Grant tables using version %d layout.\n",
+ 		grant_table_version);
+ }
+ 
+-int gnttab_resume(void)
++static int gnttab_setup(void)
+ {
+ 	unsigned int max_nr_gframes;
+ 
+-	gnttab_request_version();
+ 	max_nr_gframes = gnttab_max_grant_frames();
+ 	if (max_nr_gframes < nr_grant_frames)
+ 		return -ENOSYS;
+@@ -1160,6 +1161,12 @@ int gnttab_resume(void)
+ 	return 0;
+ }
+ 
++int gnttab_resume(void)
++{
++	gnttab_request_version();
++	return gnttab_setup();
++}
++
+ int gnttab_suspend(void)
+ {
+ 	gnttab_interface->unmap_frames();
+@@ -1171,9 +1178,10 @@ static int gnttab_expand(unsigned int req_entries)
+ 	int rc;
+ 	unsigned int cur, extra;
+ 
++	BUG_ON(grefs_per_grant_frame == 0);
+ 	cur = nr_grant_frames;
+-	extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
+-		 GREFS_PER_GRANT_FRAME);
++	extra = ((req_entries + (grefs_per_grant_frame-1)) /
++		 grefs_per_grant_frame);
+ 	if (cur + extra > gnttab_max_grant_frames())
+ 		return -ENOSPC;
+ 
+@@ -1191,21 +1199,23 @@ int gnttab_init(void)
+ 	unsigned int nr_init_grefs;
+ 	int ret;
+ 
++	gnttab_request_version();
+ 	nr_grant_frames = 1;
+ 	boot_max_nr_grant_frames = __max_nr_grant_frames();
+ 
+ 	/* Determine the maximum number of frames required for the
+ 	 * grant reference free list on the current hypervisor.
+ 	 */
++	BUG_ON(grefs_per_grant_frame == 0);
+ 	max_nr_glist_frames = (boot_max_nr_grant_frames *
+-			       GREFS_PER_GRANT_FRAME / RPP);
++			       grefs_per_grant_frame / RPP);
+ 
+ 	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
+ 			      GFP_KERNEL);
+ 	if (gnttab_list == NULL)
+ 		return -ENOMEM;
+ 
+-	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
++	nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
+ 	for (i = 0; i < nr_glist_frames; i++) {
+ 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
+ 		if (gnttab_list[i] == NULL) {
+@@ -1214,12 +1224,12 @@ int gnttab_init(void)
+ 		}
+ 	}
+ 
+-	if (gnttab_resume() < 0) {
++	if (gnttab_setup() < 0) {
+ 		ret = -ENODEV;
+ 		goto ini_nomem;
+ 	}
+ 
+-	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
++	nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
+ 
+ 	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
+ 		gnttab_entry(i) = i + 1;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index b3c243b..f89c0e5 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1503,6 +1503,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
+ 
+ 	index = mpd->first_page;
+ 	end   = mpd->next_page - 1;
++
++	pagevec_init(&pvec, 0);
+ 	while (index <= end) {
+ 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+ 		if (nr_pages == 0)
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 5be8937..fca8bbe 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -734,6 +734,8 @@ struct se_subsystem_dev {
+ };
+ 
+ struct se_device {
++#define SE_DEV_LINK_MAGIC			0xfeeddeef
++	u32			dev_link_magic;
+ 	/* RELATIVE TARGET PORT IDENTIFER Counter */
+ 	u16			dev_rpti_counter;
+ 	/* Used for SAM Task Attribute ordering */
+@@ -820,6 +822,8 @@ struct se_port_stat_grps {
+ };
+ 
+ struct se_lun {
++#define SE_LUN_LINK_MAGIC			0xffff7771
++	u32			lun_link_magic;
+ 	/* See transport_lun_status_table */
+ 	enum transport_lun_status_table lun_status;
+ 	u32			lun_access;
+diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
+index 7e1ab20..dbb95db 100644
+--- a/include/uapi/linux/serial_core.h
++++ b/include/uapi/linux/serial_core.h
+@@ -49,7 +49,9 @@
+ #define PORT_XR17D15X	21	/* Exar XR17D15x UART */
+ #define PORT_LPC3220	22	/* NXP LPC32xx SoC "Standard" UART */
+ #define PORT_8250_CIR	23	/* CIR infrared port, has its own driver */
+-#define PORT_MAX_8250	23	/* max port ID */
++#define PORT_XR17V35X	24	/* Exar XR17V35x UARTs */
++#define PORT_BRCM_TRUMANAGE	24
++#define PORT_MAX_8250	25	/* max port ID */
+ 
+ /*
+  * ARM specific type numbers.  These are not currently guaranteed
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 4642c68..a95e198 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1499,7 +1499,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
+ 	ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ 	substream = snd_pcm_chmap_substream(info, ctl_idx);
+ 	if (!substream || !substream->runtime)
+-		return -EBADFD;
++		return 0; /* just for avoiding error from alsactl restore */
+ 	switch (substream->runtime->status->state) {
+ 	case SNDRV_PCM_STATE_OPEN:
+ 	case SNDRV_PCM_STATE_SETUP:
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 0f58b4b..b8d1ad1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -387,11 +387,13 @@ static int snd_usb_fasttrackpro_boot_quirk(struct usb_device *dev)
+ 		 * rules
+ 		 */
+ 		err = usb_driver_set_configuration(dev, 2);
+-		if (err < 0) {
++		if (err < 0)
+ 			snd_printdd("error usb_driver_set_configuration: %d\n",
+ 				    err);
+-			return -ENODEV;
+-		}
++		/* Always return an error, so that we stop creating a device
++		   that will just be destroyed and recreated with a new
++		   configuration */
++		return -ENODEV;
+ 	} else
+ 		snd_printk(KERN_INFO "usb-audio: Fast Track Pro config OK\n");
+ 

diff --git a/3.7.3/4420_grsecurity-2.9.1-3.7.3-201301181518.patch b/3.7.4/4420_grsecurity-2.9.1-3.7.4-201301230048.patch
similarity index 99%
rename from 3.7.3/4420_grsecurity-2.9.1-3.7.3-201301181518.patch
rename to 3.7.4/4420_grsecurity-2.9.1-3.7.4-201301230048.patch
index 1b79126..3577167 100644
--- a/3.7.3/4420_grsecurity-2.9.1-3.7.3-201301181518.patch
+++ b/3.7.4/4420_grsecurity-2.9.1-3.7.4-201301230048.patch
@@ -251,7 +251,7 @@ index 9776f06..18b1856 100644
  
  	pcd.		[PARIDE]
 diff --git a/Makefile b/Makefile
-index 51a9bda..3097345 100644
+index f9196bc..63b33e4 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -579,19 +579,31 @@ index 2fd00b7..cfd5069 100644
  
  	for (i = 0; i < n; i++) {
 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
-index 14db93e..65de923 100644
+index 14db93e..47bed62 100644
 --- a/arch/alpha/kernel/osf_sys.c
 +++ b/arch/alpha/kernel/osf_sys.c
-@@ -1304,7 +1304,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+    generic version except that we know how to honor ADDR_LIMIT_32BIT.  */
+ 
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+-		         unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++		         unsigned long limit, unsigned long flags)
+ {
+ 	struct vm_area_struct *vma = find_vma(current->mm, addr);
+-
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 	while (1) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (limit - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  		addr = vma->vm_end;
  		vma = vma->vm_next;
-@@ -1340,6 +1340,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	   merely specific addresses, but regions of memory -- perhaps
  	   this feature should be incorporated into all ports?  */
  
@@ -600,19 +612,26 @@ index 14db93e..65de923 100644
 +#endif
 +
  	if (addr) {
- 		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+-		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++		addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
  		if (addr != (unsigned long) -ENOMEM)
-@@ -1347,8 +1351,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 			return addr;
  	}
  
  	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
 -	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
 -					 len, limit);
-+	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
++	addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
 +
  	if (addr != (unsigned long) -ENOMEM)
  		return addr;
  
+ 	/* Finally, try allocating in low memory.  */
+-	addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++	addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+ 
+ 	return addr;
+ }
 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
 index 0c4132d..88f0d53 100644
 --- a/arch/alpha/mm/fault.c
@@ -1882,6 +1901,19 @@ index 7e1f760..1af891c 100644
  	if (access_ok(VERIFY_WRITE, to, n))
  		n = __copy_to_user(to, from, n);
  	return n;
+diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
+index 96ee092..37f1844 100644
+--- a/arch/arm/include/uapi/asm/ptrace.h
++++ b/arch/arm/include/uapi/asm/ptrace.h
+@@ -73,7 +73,7 @@
+  * ARMv7 groups of PSR bits
+  */
+ #define APSR_MASK	0xf80f0000	/* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK	0x01000010	/* ISA state (J, T) mask */
++#define PSR_ISET_MASK	0x01000020	/* ISA state (J, T) mask */
+ #define PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */
+ 
 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
 index 60d3b73..9168db0 100644
 --- a/arch/arm/kernel/armksyms.c
@@ -2019,7 +2051,7 @@ index 739db3a..7f4a272 100644
  		return scno;
  
 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index da1d1aa..0a606e7 100644
+index da1d1aa..5aaf182 100644
 --- a/arch/arm/kernel/setup.c
 +++ b/arch/arm/kernel/setup.c
 @@ -99,19 +99,19 @@ EXPORT_SYMBOL(elf_hwcap);
@@ -2047,6 +2079,15 @@ index da1d1aa..0a606e7 100644
  EXPORT_SYMBOL(outer_cache);
  #endif
  
+@@ -455,7 +455,7 @@ static void __init setup_processor(void)
+ 	__cpu_architecture = __get_cpu_architecture();
+ 
+ #ifdef MULTI_CPU
+-	processor = *list->proc;
++	memcpy((void *)&processor, list->proc, sizeof processor);
+ #endif
+ #ifdef MULTI_TLB
+ 	cpu_tlb = *list->tlb;
 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
 index fbc8b26..000ded0 100644
 --- a/arch/arm/kernel/smp.c
@@ -2521,10 +2562,18 @@ index ad722f1..46b670e 100644
  	totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
  				    __phys_to_pfn(__pa(&__tcm_end)),
 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
-index ce8cb19..3e96d5f 100644
+index ce8cb19..b9fe4d7 100644
 --- a/arch/arm/mm/mmap.c
 +++ b/arch/arm/mm/mmap.c
-@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -72,6 +72,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	unsigned long start_addr;
+ 	int do_align = 0;
+ 	int aliasing = cache_is_vipt_aliasing();
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/*
+ 	 * We only need to do colour alignment if either the I or D
+@@ -93,6 +94,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (len > TASK_SIZE)
  		return -ENOMEM;
  
@@ -2535,13 +2584,13 @@ index ce8cb19..3e96d5f 100644
  	if (addr) {
  		if (do_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -100,15 +105,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (len > mm->cached_hole_size) {
@@ -2555,7 +2604,7 @@ index ce8cb19..3e96d5f 100644
  	}
  
  full_search:
-@@ -124,14 +127,14 @@ full_search:
+@@ -124,14 +128,14 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -2569,11 +2618,19 @@ index ce8cb19..3e96d5f 100644
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -175,6 +178,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -156,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	unsigned long addr = addr0;
+ 	int do_align = 0;
+ 	int aliasing = cache_is_vipt_aliasing();
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/*
+ 	 * We only need to do colour alignment if either the I or D
+@@ -175,6 +180,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		return addr;
  	}
  
@@ -2584,26 +2641,26 @@ index ce8cb19..3e96d5f 100644
  	/* requesting a specific address */
  	if (addr) {
  		if (do_align)
-@@ -182,8 +189,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -182,8 +191,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		else
  			addr = PAGE_ALIGN(addr);
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -				(!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -203,7 +209,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -203,7 +211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (addr > len) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr - len, len))
++		if (check_heap_stack_gap(vma, addr - len, len, offset))
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  	}
-@@ -212,17 +218,17 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -212,17 +220,17 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		goto bottomup;
  
  	addr = mm->mmap_base - len;
@@ -2620,11 +2677,11 @@ index ce8cb19..3e96d5f 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || addr+len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  
-@@ -231,10 +237,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -231,10 +239,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -2632,12 +2689,12 @@ index ce8cb19..3e96d5f 100644
 -		if (do_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (len < vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -266,10 +270,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -266,10 +272,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  
  	if (mmap_is_legacy()) {
  		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -3023,34 +3080,42 @@ index 43901f2..0d8b865 100644
  
  #endif
 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
-index 385fd30..6c3d97e 100644
+index 385fd30..3aaf4fe 100644
 --- a/arch/frv/mm/elf-fdpic.c
 +++ b/arch/frv/mm/elf-fdpic.c
-@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ 	struct vm_area_struct *vma;
+ 	unsigned long limit;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	if (len > TASK_SIZE)
+ 		return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (addr) {
  		addr = PAGE_ALIGN(addr);
  		vma = find_vma(current->mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			goto success;
  	}
  
-@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  			for (; vma; vma = vma->vm_next) {
  				if (addr > limit)
  					break;
 -				if (addr + len <= vma->vm_start)
-+				if (check_heap_stack_gap(vma, addr, len))
++				if (check_heap_stack_gap(vma, addr, len, offset))
  					goto success;
  				addr = vma->vm_end;
  			}
-@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  		for (; vma; vma = vma->vm_next) {
  			if (addr > limit)
  				break;
 -			if (addr + len <= vma->vm_start)
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
  				goto success;
  			addr = vma->vm_end;
  		}
@@ -3370,10 +3435,18 @@ index 24603be..948052d 100644
  		DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
  	}
 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
-index d9439ef..b9a4303 100644
+index d9439ef..d0cac6b 100644
 --- a/arch/ia64/kernel/sys_ia64.c
 +++ b/arch/ia64/kernel/sys_ia64.c
-@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ 	unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (len > RGN_MAP_LIMIT)
+ 		return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
  	if (REGION_NUMBER(addr) == RGN_HPAGE)
  		addr = 0;
  #endif
@@ -3387,7 +3460,7 @@ index d9439ef..b9a4303 100644
  	if (!addr)
  		addr = mm->free_area_cache;
  
-@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
@@ -3401,7 +3474,7 @@ index d9439ef..b9a4303 100644
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/* Remember the address where we stopped this search:  */
  			mm->free_area_cache = addr + len;
  			return addr;
@@ -3470,15 +3543,23 @@ index 6cf0341..d352594 100644
  	/*
  	 * If for any reason at all we couldn't handle the fault, make
 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 5ca674b..e0e1b70 100644
+index 5ca674b..127c3cb 100644
 --- a/arch/ia64/mm/hugetlbpage.c
 +++ b/arch/ia64/mm/hugetlbpage.c
-@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ 		unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct *vmm;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+ 
+ 	if (len > RGN_MAP_LIMIT)
+ 		return -ENOMEM;
+@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
  		/* At this point:  (!vmm || addr < vmm->vm_end). */
  		if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
  			return -ENOMEM;
 -		if (!vmm || (addr + len) <= vmm->vm_start)
-+		if (check_heap_stack_gap(vmm, addr, len))
++		if (check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  		addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
  	}
@@ -3894,10 +3975,18 @@ index ddcec1e..c7f983e 100644
   * This routine handles page faults.  It determines the address,
   * and the problem, and then passes it off to one of the appropriate
 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
-index 302d779..7d35bf8 100644
+index 302d779..ad1772c 100644
 --- a/arch/mips/mm/mmap.c
 +++ b/arch/mips/mm/mmap.c
-@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -71,6 +71,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ 	struct vm_area_struct *vma;
+ 	unsigned long addr = addr0;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (unlikely(len > TASK_SIZE))
+ 		return -ENOMEM;
+@@ -95,6 +96,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  		do_color_align = 1;
  
  	/* requesting a specific address */
@@ -3909,44 +3998,44 @@ index 302d779..7d35bf8 100644
  	if (addr) {
  		if (do_color_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -102,8 +108,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  	}
  
-@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -118,7 +123,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  			/* At this point:  (!vma || addr < vma->vm_end). */
  			if (TASK_SIZE - len < addr)
  				return -ENOMEM;
 -			if (!vma || addr + len <= vma->vm_start)
-+			if (check_heap_stack_gap(vmm, addr, len))
++			if (check_heap_stack_gap(vmm, addr, len, offset))
  				return addr;
  			addr = vma->vm_end;
  			if (do_color_align)
-@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -145,7 +150,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  		/* make sure it can fit in the remaining address space */
  		if (likely(addr > len)) {
  			vma = find_vma(mm, addr - len);
 -			if (!vma || addr <= vma->vm_start) {
-+			if (check_heap_stack_gap(vmm, addr - len, len))
++			if (check_heap_stack_gap(vmm, addr - len, len, offset))
  				/* cache the address as a hint for next time */
  				return mm->free_area_cache = addr - len;
  			}
-@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+@@ -165,7 +170,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
  			 * return with success:
  			 */
  			vma = find_vma(mm, addr);
 -			if (likely(!vma || addr + len <= vma->vm_start)) {
-+			if (check_heap_stack_gap(vmm, addr, len)) {
++			if (check_heap_stack_gap(vmm, addr, len, offset)) {
  				/* cache the address as a hint for next time */
  				return mm->free_area_cache = addr;
  			}
-@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -242,30 +247,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
  }
@@ -4271,28 +4360,56 @@ index 5e34ccf..672bc9c 100644
  	DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
  	       me->arch.unwind_section, table, end, gp);
 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
-index f76c108..8117482 100644
+index f76c108..92bad82 100644
 --- a/arch/parisc/kernel/sys_parisc.c
 +++ b/arch/parisc/kernel/sys_parisc.c
-@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+@@ -33,9 +33,11 @@
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
+ 
+-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
++				       unsigned long flags)
+ {
+ 	struct vm_area_struct *vma;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	addr = PAGE_ALIGN(addr);
+ 
+@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  		addr = vma->vm_end;
  	}
-@@ -81,7 +81,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
+ 	return offset & 0x3FF000;
+ }
+ 
+-static unsigned long get_shared_area(struct address_space *mapping,
+-		unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
++		unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct *vma;
+ 	int offset = mapping ? get_offset(mapping) : 0;
++	unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+ 
+@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
  		/* At this point:  (!vma || addr < vma->vm_end). */
  		if (TASK_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vma || addr + len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, rand_offset))
  			return addr;
  		addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
  		if (addr < vma->vm_end) /* handle wraparound */
-@@ -100,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  	if (!addr)
@@ -4300,7 +4417,17 @@ index f76c108..8117482 100644
 +		addr = current->mm->mmap_base;
  
  	if (filp) {
- 		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+-		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
++		addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
+ 	} else if(flags & MAP_SHARED) {
+-		addr = get_shared_area(NULL, addr, len, pgoff);
++		addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
+ 	} else {
+-		addr = get_unshared_area(addr, len);
++		addr = get_unshared_area(filp, addr, len, flags);
+ 	}
+ 	return addr;
+ }
 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
 index 45ba99f..8e22c33 100644
 --- a/arch/parisc/kernel/traps.c
@@ -5388,7 +5515,7 @@ index 67a42ed..1c7210c 100644
  		mm->unmap_area = arch_unmap_area_topdown;
  	}
 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
-index 5829d2a..b64ed2e 100644
+index 5829d2a..af84242 100644
 --- a/arch/powerpc/mm/slice.c
 +++ b/arch/powerpc/mm/slice.c
 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
@@ -5396,7 +5523,7 @@ index 5829d2a..b64ed2e 100644
  		return 0;
  	vma = find_vma(mm, addr);
 -	return (!vma || (addr + len) <= vma->vm_start);
-+	return check_heap_stack_gap(vma, addr, len);
++	return check_heap_stack_gap(vma, addr, len, 0);
  }
  
  static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
@@ -5405,7 +5532,7 @@ index 5829d2a..b64ed2e 100644
  			continue;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, 0)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
@@ -5432,7 +5559,7 @@ index 5829d2a..b64ed2e 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || (addr + len) <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, 0)) {
  			/* remember the address as a hint for next time */
  			if (use_cache)
  				mm->free_area_cache = addr;
@@ -5441,7 +5568,7 @@ index 5829d2a..b64ed2e 100644
  
  		/* try just below the current vma->vm_start */
 -		addr = vma->vm_start;
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, 0);
  	}
  
  	/*
@@ -5806,48 +5933,64 @@ index ef9e555..331bd29 100644
  #define __read_mostly __attribute__((__section__(".data..read_mostly")))
  
 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
-index afeb710..d1d1289 100644
+index afeb710..e8366ef 100644
 --- a/arch/sh/mm/mmap.c
 +++ b/arch/sh/mm/mmap.c
-@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
+ 	int do_colour_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -106,7 +105,7 @@ full_search:
+@@ -106,7 +106,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long addr = addr0;
+ 	int do_colour_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -179,7 +179,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -188,18 +188,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -5866,11 +6009,11 @@ index afeb710..d1d1289 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -209,10 +209,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -5878,7 +6021,7 @@ index afeb710..d1d1289 100644
 -		if (do_colour_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
@@ -6606,10 +6749,18 @@ index 7ff45e4..a58f271 100644
  
  	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
-index 0c9b31b..7cb7aee 100644
+index 0c9b31b..55a8ba6 100644
 --- a/arch/sparc/kernel/sys_sparc_32.c
 +++ b/arch/sparc/kernel/sys_sparc_32.c
-@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -39,6 +39,7 @@ asmlinkage unsigned long sys_getpagesize(void)
+ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ 	struct vm_area_struct * vmm;
++	unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
+ 		/* We do not accept a shared mapping if it would violate
+@@ -54,7 +55,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (len > TASK_SIZE - PAGE_SIZE)
  		return -ENOMEM;
  	if (!addr)
@@ -6618,20 +6769,26 @@ index 0c9b31b..7cb7aee 100644
  
  	if (flags & MAP_SHARED)
  		addr = COLOUR_ALIGN(addr);
-@@ -65,7 +65,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -65,7 +66,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  		/* At this point:  (!vmm || addr < vmm->vm_end). */
  		if (TASK_SIZE - PAGE_SIZE - len < addr)
  			return -ENOMEM;
 -		if (!vmm || addr + len <= vmm->vm_start)
-+		if (check_heap_stack_gap(vmm, addr, len))
++		if (check_heap_stack_gap(vmm, addr, len, offset))
  			return addr;
  		addr = vmm->vm_end;
  		if (flags & MAP_SHARED)
 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
-index 878ef3d..8742f10 100644
+index 878ef3d..609e41f 100644
 --- a/arch/sparc/kernel/sys_sparc_64.c
 +++ b/arch/sparc/kernel/sys_sparc_64.c
-@@ -107,7 +107,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -102,12 +102,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ 	unsigned long task_size = TASK_SIZE;
+ 	unsigned long start_addr;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (flags & MAP_FIXED) {
  		/* We do not accept a shared mapping if it would violate
  		 * cache aliasing constraints.
  		 */
@@ -6640,7 +6797,7 @@ index 878ef3d..8742f10 100644
  		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  			return -EINVAL;
  		return addr;
-@@ -122,6 +122,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -122,6 +123,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  	if (filp || (flags & MAP_SHARED))
  		do_color_align = 1;
  
@@ -6651,13 +6808,13 @@ index 878ef3d..8742f10 100644
  	if (addr) {
  		if (do_color_align)
  			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -129,15 +133,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+@@ -129,15 +134,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
@@ -6670,7 +6827,7 @@ index 878ef3d..8742f10 100644
  	        mm->cached_hole_size = 0;
  	}
  
-@@ -157,14 +160,14 @@ full_search:
+@@ -157,14 +161,14 @@ full_search:
  			vma = find_vma(mm, VA_EXCLUDE_END);
  		}
  		if (unlikely(task_size < addr)) {
@@ -6684,11 +6841,19 @@ index 878ef3d..8742f10 100644
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -198,7 +201,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -190,6 +194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	unsigned long task_size = STACK_TOP32;
+ 	unsigned long addr = addr0;
+ 	int do_color_align;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/* This should only ever run for 32-bit processes.  */
+ 	BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -198,7 +203,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		/* We do not accept a shared mapping if it would violate
  		 * cache aliasing constraints.
  		 */
@@ -6697,26 +6862,26 @@ index 878ef3d..8742f10 100644
  		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
  			return -EINVAL;
  		return addr;
-@@ -219,8 +222,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -219,8 +224,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  			addr = PAGE_ALIGN(addr);
  
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  
-@@ -241,7 +243,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -241,7 +245,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -250,18 +252,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -250,18 +254,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -6735,11 +6900,11 @@ index 878ef3d..8742f10 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -271,10 +273,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -271,10 +275,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -6747,12 +6912,12 @@ index 878ef3d..8742f10 100644
 -		if (do_color_align)
 -			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -373,6 +373,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -373,6 +375,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  	    gap == RLIM_INFINITY ||
  	    sysctl_legacy_va_layout) {
  		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
@@ -6765,7 +6930,7 @@ index 878ef3d..8742f10 100644
  		mm->get_unmapped_area = arch_get_unmapped_area;
  		mm->unmap_area = arch_unmap_area;
  	} else {
-@@ -385,6 +391,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+@@ -385,6 +393,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
  			gap = (task_size / 6 * 5);
  
  		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
@@ -8106,28 +8271,44 @@ index 097aee7..5ca6697 100644
  	 * load/store/atomic was a write or not, it only says that there
  	 * was no match.  So in such a case we (carefully) read the
 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index f76f83d..0f28457 100644
+index f76f83d..ee0d859 100644
 --- a/arch/sparc/mm/hugetlbpage.c
 +++ b/arch/sparc/mm/hugetlbpage.c
-@@ -67,7 +67,7 @@ full_search:
+@@ -34,6 +34,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ 	struct vm_area_struct * vma;
+ 	unsigned long task_size = TASK_SIZE;
+ 	unsigned long start_addr;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (test_thread_flag(TIF_32BIT))
+ 		task_size = STACK_TOP32;
+@@ -67,7 +68,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (likely(!vma || addr + len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -90,6 +91,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ 	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
+ 	unsigned long addr = addr0;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	/* This should only ever run for 32-bit processes.  */
+ 	BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -106,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	/* make sure it can fit in the remaining address space */
  	if (likely(addr > len)) {
  		vma = find_vma(mm, addr-len);
 -		if (!vma || addr <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr - len, len)) {
++		if (check_heap_stack_gap(vma, addr - len, len, offset)) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr-len);
  		}
-@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -115,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (unlikely(mm->mmap_base < len))
  		goto bottomup;
  
@@ -8143,28 +8324,36 @@ index f76f83d..0f28457 100644
  		 */
  		vma = find_vma(mm, addr);
 -		if (likely(!vma || addr+len <= vma->vm_start)) {
-+		if (likely(check_heap_stack_gap(vma, addr, len))) {
++		if (likely(check_heap_stack_gap(vma, addr, len, offset))) {
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  		}
-@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -134,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
 -		addr = (vma->vm_start-len) & HPAGE_MASK;
 -	} while (likely(len < vma->vm_start));
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  bottomup:
  	/*
-@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -163,6 +166,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 	struct mm_struct *mm = current->mm;
+ 	struct vm_area_struct *vma;
+ 	unsigned long task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
+ 	if (test_thread_flag(TIF_32BIT))
+ 		task_size = STACK_TOP32;
+@@ -181,8 +185,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	if (addr) {
  		addr = ALIGN(addr, HPAGE_SIZE);
  		vma = find_vma(mm, addr);
 -		if (task_size - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -15872,7 +16061,7 @@ index 9b9f18b..9fcaa04 100644
  #include <asm/processor.h>
  #include <asm/fcntl.h>
 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index 88b725a..3efabf6 100644
+index cf8639b..98fcee6 100644
 --- a/arch/x86/kernel/entry_32.S
 +++ b/arch/x86/kernel/entry_32.S
 @@ -177,13 +177,153 @@
@@ -16549,7 +16738,7 @@ index 88b725a..3efabf6 100644
  /*
   * End of kprobes section
   */
-@@ -1121,7 +1390,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
+@@ -1120,7 +1389,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
  
  ENTRY(mcount)
  	ret
@@ -16558,7 +16747,7 @@ index 88b725a..3efabf6 100644
  
  ENTRY(ftrace_caller)
  	cmpl $0, function_trace_stop
-@@ -1154,7 +1423,7 @@ ftrace_graph_call:
+@@ -1153,7 +1422,7 @@ ftrace_graph_call:
  .globl ftrace_stub
  ftrace_stub:
  	ret
@@ -16567,7 +16756,7 @@ index 88b725a..3efabf6 100644
  
  ENTRY(ftrace_regs_caller)
  	pushf	/* push flags before compare (in cs location) */
-@@ -1255,7 +1524,7 @@ trace:
+@@ -1254,7 +1523,7 @@ trace:
  	popl %ecx
  	popl %eax
  	jmp ftrace_stub
@@ -16576,7 +16765,7 @@ index 88b725a..3efabf6 100644
  #endif /* CONFIG_DYNAMIC_FTRACE */
  #endif /* CONFIG_FUNCTION_TRACER */
  
-@@ -1273,7 +1542,7 @@ ENTRY(ftrace_graph_caller)
+@@ -1272,7 +1541,7 @@ ENTRY(ftrace_graph_caller)
  	popl %ecx
  	popl %eax
  	ret
@@ -16585,7 +16774,7 @@ index 88b725a..3efabf6 100644
  
  .globl return_to_handler
  return_to_handler:
-@@ -1329,15 +1598,18 @@ error_code:
+@@ -1328,15 +1597,18 @@ error_code:
  	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart
  	REG_TO_PTGS %ecx
  	SET_KERNEL_GS %ecx
@@ -16606,7 +16795,7 @@ index 88b725a..3efabf6 100644
  
  /*
   * Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1380,7 +1652,7 @@ debug_stack_correct:
+@@ -1379,7 +1651,7 @@ debug_stack_correct:
  	call do_debug
  	jmp ret_from_exception
  	CFI_ENDPROC
@@ -16615,7 +16804,7 @@ index 88b725a..3efabf6 100644
  
  /*
   * NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1418,6 +1690,9 @@ nmi_stack_correct:
+@@ -1417,6 +1689,9 @@ nmi_stack_correct:
  	xorl %edx,%edx		# zero error code
  	movl %esp,%eax		# pt_regs pointer
  	call do_nmi
@@ -16625,7 +16814,7 @@ index 88b725a..3efabf6 100644
  	jmp restore_all_notrace
  	CFI_ENDPROC
  
-@@ -1454,12 +1729,15 @@ nmi_espfix_stack:
+@@ -1453,12 +1728,15 @@ nmi_espfix_stack:
  	FIXUP_ESPFIX_STACK		# %eax == %esp
  	xorl %edx,%edx			# zero error code
  	call do_nmi
@@ -16642,7 +16831,7 @@ index 88b725a..3efabf6 100644
  
  ENTRY(int3)
  	RING0_INT_FRAME
-@@ -1472,14 +1750,14 @@ ENTRY(int3)
+@@ -1471,14 +1749,14 @@ ENTRY(int3)
  	call do_int3
  	jmp ret_from_exception
  	CFI_ENDPROC
@@ -16659,7 +16848,7 @@ index 88b725a..3efabf6 100644
  
  #ifdef CONFIG_KVM_GUEST
  ENTRY(async_page_fault)
-@@ -1488,7 +1766,7 @@ ENTRY(async_page_fault)
+@@ -1487,7 +1765,7 @@ ENTRY(async_page_fault)
  	pushl_cfi $do_async_page_fault
  	jmp error_code
  	CFI_ENDPROC
@@ -20118,7 +20307,7 @@ index 7a6f3b3..bed145d7 100644
  
  1:
 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index ca45696..6384a09 100644
+index 86c524c..bae70c5 100644
 --- a/arch/x86/kernel/setup.c
 +++ b/arch/x86/kernel/setup.c
 @@ -441,7 +441,7 @@ static void __init parse_setup_data(void)
@@ -20130,16 +20319,16 @@ index ca45696..6384a09 100644
  			break;
  		case SETUP_DTB:
  			add_dtb(pa_data);
-@@ -633,7 +633,7 @@ static void __init trim_bios_range(void)
+@@ -708,7 +708,7 @@ static void __init trim_bios_range(void)
  	 * area (640->1Mb) as ram even though it is not.
  	 * take them out.
  	 */
 -	e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
 +	e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
+ 
  	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
  }
- 
-@@ -756,14 +756,14 @@ void __init setup_arch(char **cmdline_p)
+@@ -832,14 +832,14 @@ void __init setup_arch(char **cmdline_p)
  
  	if (!boot_params.hdr.root_flags)
  		root_mountflags &= ~MS_RDONLY;
@@ -20365,10 +20554,10 @@ index cd3b243..4ba27a4 100644
  		switch (opcode[i]) {
 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
 new file mode 100644
-index 0000000..db6b9ed
+index 0000000..26bb1af
 --- /dev/null
 +++ b/arch/x86/kernel/sys_i386_32.c
-@@ -0,0 +1,247 @@
+@@ -0,0 +1,249 @@
 +/*
 + * This file contains various random system calls that
 + * have a non-standard calling sequence on the Linux/i386
@@ -20417,6 +20606,7 @@ index 0000000..db6b9ed
 +	struct mm_struct *mm = current->mm;
 +	struct vm_area_struct *vma;
 +	unsigned long start_addr, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -20439,7 +20629,7 @@ index 0000000..db6b9ed
 +		addr = PAGE_ALIGN(addr);
 +		if (pax_task_size - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
 +	}
@@ -20481,7 +20671,7 @@ index 0000000..db6b9ed
 +			}
 +			return -ENOMEM;
 +		}
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			break;
 +		if (addr + mm->cached_hole_size < vma->vm_start)
 +			mm->cached_hole_size = vma->vm_start - addr;
@@ -20508,6 +20698,7 @@ index 0000000..db6b9ed
 +	struct vm_area_struct *vma;
 +	struct mm_struct *mm = current->mm;
 +	unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -20537,7 +20728,7 @@ index 0000000..db6b9ed
 +		addr = PAGE_ALIGN(addr);
 +		if (pax_task_size - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
 +	}
@@ -20554,7 +20745,7 @@ index 0000000..db6b9ed
 +	/* make sure it can fit in the remaining address space */
 +	if (addr > len) {
 +		vma = find_vma(mm, addr-len);
-+		if (check_heap_stack_gap(vma, addr - len, len))
++		if (check_heap_stack_gap(vma, addr - len, len, offset))
 +			/* remember the address as a hint for next time */
 +			return (mm->free_area_cache = addr-len);
 +	}
@@ -20571,7 +20762,7 @@ index 0000000..db6b9ed
 +		 * return with success:
 +		 */
 +		vma = find_vma(mm, addr);
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			/* remember the address as a hint for next time */
 +			return (mm->free_area_cache = addr);
 +
@@ -20580,7 +20771,7 @@ index 0000000..db6b9ed
 +			mm->cached_hole_size = vma->vm_start - addr;
 +
 +		/* try just below the current vma->vm_start */
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
 +
 +bottomup:
@@ -20617,7 +20808,7 @@ index 0000000..db6b9ed
 +	return addr;
 +}
 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
-index b4d3c39..82bb73b 100644
+index b4d3c39..d699d77 100644
 --- a/arch/x86/kernel/sys_x86_64.c
 +++ b/arch/x86/kernel/sys_x86_64.c
 @@ -95,8 +95,8 @@ out:
@@ -20640,7 +20831,12 @@ index b4d3c39..82bb73b 100644
  		*end = TASK_SIZE;
  	}
  }
-@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -128,20 +128,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ 	struct vm_area_struct *vma;
+ 	unsigned long start_addr;
+ 	unsigned long begin, end;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ 
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -20659,29 +20855,30 @@ index b4d3c39..82bb73b 100644
  		vma = find_vma(mm, addr);
 -		if (end - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
-@@ -172,7 +175,7 @@ full_search:
+@@ -172,7 +176,7 @@ full_search:
  			}
  			return -ENOMEM;
  		}
 -		if (!vma || addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/*
  			 * Remember the place where we stopped the search:
  			 */
-@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -195,7 +199,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
 -	unsigned long addr = addr0, start_addr;
 +	unsigned long base = mm->mmap_base, addr = addr0, start_addr;
++	unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -208,13 +213,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
  		goto bottomup;
  
@@ -20698,33 +20895,33 @@ index b4d3c39..82bb73b 100644
 -			return addr;
 +		if (TASK_SIZE - len >= addr) {
 +			vma = find_vma(mm, addr);
-+			if (check_heap_stack_gap(vma, addr, len))
++			if (check_heap_stack_gap(vma, addr, len, offset))
 +				return addr;
 +		}
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -240,7 +248,7 @@ try_again:
+@@ -240,7 +250,7 @@ try_again:
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
 -		if (!vma || addr+len <= vma->vm_start)
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
  			/* remember the address as a hint for next time */
  			return mm->free_area_cache = addr;
  
-@@ -249,8 +257,8 @@ try_again:
+@@ -249,8 +259,8 @@ try_again:
  			mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
 -		addr = vma->vm_start-len;
 -	} while (len < vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  fail:
  	/*
-@@ -270,13 +278,21 @@ bottomup:
+@@ -270,13 +280,21 @@ bottomup:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -25581,15 +25778,16 @@ index 6f31ee5..8ee4164 100644
  
  	return (void *)vaddr;
 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
-index 937bff5..d777418 100644
+index 937bff5..a354c44 100644
 --- a/arch/x86/mm/hugetlbpage.c
 +++ b/arch/x86/mm/hugetlbpage.c
-@@ -276,13 +276,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+@@ -276,13 +276,21 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
 -	unsigned long start_addr;
 +	unsigned long start_addr, pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
 +
 +#ifdef CONFIG_PAX_SEGMEXEC
 +	if (mm->pax_flags & MF_PAX_SEGMEXEC)
@@ -25609,7 +25807,7 @@ index 937bff5..d777418 100644
  	}
  
  full_search:
-@@ -290,26 +297,27 @@ full_search:
+@@ -290,26 +298,27 @@ full_search:
  
  	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  		/* At this point:  (!vma || addr < vma->vm_end). */
@@ -25632,7 +25830,7 @@ index 937bff5..d777418 100644
 -			mm->free_area_cache = addr + len;
 -			return addr;
 -		}
-+		if (check_heap_stack_gap(vma, addr, len))
++		if (check_heap_stack_gap(vma, addr, len, offset))
 +			break;
  		if (addr + mm->cached_hole_size < vma->vm_start)
  		        mm->cached_hole_size = vma->vm_start - addr;
@@ -25644,7 +25842,7 @@ index 937bff5..d777418 100644
  }
  
  static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
-@@ -320,9 +328,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -320,9 +329,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	unsigned long base = mm->mmap_base;
@@ -25652,10 +25850,11 @@ index 937bff5..d777418 100644
 +	unsigned long addr;
  	unsigned long largest_hole = mm->cached_hole_size;
 -	unsigned long start_addr;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
  
  	/* don't allow allocations above current base */
  	if (mm->free_area_cache > base)
-@@ -332,16 +339,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -332,16 +341,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  	        largest_hole = 0;
  		mm->free_area_cache  = base;
  	}
@@ -25674,12 +25873,12 @@ index 937bff5..d777418 100644
  		/*
  		 * Lookup failure means no vma is above this address,
  		 * i.e. return with success:
-@@ -350,10 +356,10 @@ try_again:
+@@ -350,10 +358,10 @@ try_again:
  		if (!vma)
  			return addr;
  
 -		if (addr + len <= vma->vm_start) {
-+		if (check_heap_stack_gap(vma, addr, len)) {
++		if (check_heap_stack_gap(vma, addr, len, offset)) {
  			/* remember the address as a hint for next time */
 -		        mm->cached_hole_size = largest_hole;
 -		        return (mm->free_area_cache = addr);
@@ -25688,7 +25887,7 @@ index 937bff5..d777418 100644
  		} else if (mm->free_area_cache == vma->vm_end) {
  			/* pull free_area_cache down to the first hole */
  			mm->free_area_cache = vma->vm_start;
-@@ -362,29 +368,34 @@ try_again:
+@@ -362,29 +370,34 @@ try_again:
  
  		/* remember the largest hole we saw so far */
  		if (addr + largest_hole < vma->vm_start)
@@ -25698,7 +25897,7 @@ index 937bff5..d777418 100644
  		/* try just below the current vma->vm_start */
 -		addr = (vma->vm_start - len) & huge_page_mask(h);
 -	} while (len <= vma->vm_start);
-+		addr = skip_heap_stack_gap(vma, len);
++		addr = skip_heap_stack_gap(vma, len, offset);
 +	} while (!IS_ERR_VALUE(addr));
  
  fail:
@@ -25736,7 +25935,7 @@ index 937bff5..d777418 100644
  	mm->cached_hole_size = ~0UL;
  	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
  			len, pgoff, flags);
-@@ -392,6 +403,7 @@ fail:
+@@ -392,6 +405,7 @@ fail:
  	/*
  	 * Restore the topdown base:
  	 */
@@ -25744,11 +25943,12 @@ index 937bff5..d777418 100644
  	mm->free_area_cache = base;
  	mm->cached_hole_size = ~0UL;
  
-@@ -405,10 +417,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -405,10 +419,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	struct hstate *h = hstate_file(file);
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
 +	unsigned long pax_task_size = TASK_SIZE;
++	unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
  
  	if (len & ~huge_page_mask(h))
  		return -EINVAL;
@@ -25765,13 +25965,13 @@ index 937bff5..d777418 100644
  		return -ENOMEM;
  
  	if (flags & MAP_FIXED) {
-@@ -420,8 +441,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+@@ -420,8 +444,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  	if (addr) {
  		addr = ALIGN(addr, huge_page_size(h));
  		vma = find_vma(mm, addr);
 -		if (TASK_SIZE - len >= addr &&
 -		    (!vma || addr + len <= vma->vm_start))
-+		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
++		if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
  			return addr;
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
@@ -28896,7 +29096,7 @@ index 7005ced..530d6eb 100644
 +	*(void **)&x86_io_apic_ops.read = xen_io_apic_read;
  }
 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index 586d838..a973e1c 100644
+index 586d838..7082fc8 100644
 --- a/arch/x86/xen/enlighten.c
 +++ b/arch/x86/xen/enlighten.c
 @@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
@@ -28908,6 +29108,15 @@ index 586d838..a973e1c 100644
  RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
  __read_mostly int xen_have_vector_callback;
  EXPORT_SYMBOL_GPL(xen_have_vector_callback);
+@@ -523,7 +521,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
+ 	unsigned long va = dtr->address;
+ 	unsigned int size = dtr->size + 1;
+ 	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+-	unsigned long frames[pages];
++	unsigned long frames[65536 / PAGE_SIZE];
+ 	int f;
+ 
+ 	/*
 @@ -918,21 +916,21 @@ static u32 xen_safe_apic_wait_icr_idle(void)
  
  static void set_xen_basic_apic_ops(void)
@@ -39062,10 +39271,10 @@ index 0d4aa82..f7832d4 100644
  
  /* core tmem accessor functions */
 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
-index dcecbfb..4fad7d5 100644
+index 13fe16c..cbdc39a 100644
 --- a/drivers/target/target_core_transport.c
 +++ b/drivers/target/target_core_transport.c
-@@ -1087,7 +1087,7 @@ struct se_device *transport_add_device_to_core_hba(
+@@ -1085,7 +1085,7 @@ struct se_device *transport_add_device_to_core_hba(
  	spin_lock_init(&dev->se_port_lock);
  	spin_lock_init(&dev->se_tmr_lock);
  	spin_lock_init(&dev->qf_cmd_lock);
@@ -39074,7 +39283,7 @@ index dcecbfb..4fad7d5 100644
  
  	se_dev_set_default_attribs(dev, dev_limits);
  
-@@ -1277,7 +1277,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
+@@ -1275,7 +1275,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
  	 * Used to determine when ORDERED commands should go from
  	 * Dormant to Active status.
  	 */
@@ -39430,10 +39639,10 @@ index 8c0b7b4..e88f052 100644
  }
  EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
-index a82b399..331a220 100644
+index 8cf8d0a..4ef9ed0 100644
 --- a/drivers/tty/pty.c
 +++ b/drivers/tty/pty.c
-@@ -728,8 +728,10 @@ static void __init unix98_pty_init(void)
+@@ -730,8 +730,10 @@ static void __init unix98_pty_init(void)
  		panic("Couldn't register Unix98 pts driver");
  
  	/* Now create the /dev/ptmx special device */
@@ -46917,18 +47126,6 @@ index 3c20de1..6ff2460 100644
  	atomic_t s_lock_busy;
  
  	/* locality groups */
-diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
-index b3c243b..772c318 100644
---- a/fs/ext4/inode.c
-+++ b/fs/ext4/inode.c
-@@ -1503,6 +1503,7 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
- 
- 	index = mpd->first_page;
- 	end   = mpd->next_page - 1;
-+	pagevec_init(&pvec, 0);
- 	while (index <= end) {
- 		nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
- 		if (nr_pages == 0)
 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
 index 526e553..3f2de85 100644
 --- a/fs/ext4/mballoc.c
@@ -51615,6 +51812,43 @@ index 48c7bd1..d0740e4 100644
  		ret = -EAGAIN;
  
  	pipe_unlock(ipipe);
+diff --git a/fs/stat.c b/fs/stat.c
+index eae4946..6198f55 100644
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
+ 	stat->gid = inode->i_gid;
+ 	stat->rdev = inode->i_rdev;
+ 	stat->size = i_size_read(inode);
+-	stat->atime = inode->i_atime;
+-	stat->mtime = inode->i_mtime;
++	if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++		stat->atime = inode->i_ctime;
++		stat->mtime = inode->i_ctime;
++	} else {
++		stat->atime = inode->i_atime;
++		stat->mtime = inode->i_mtime;
++	}
+ 	stat->ctime = inode->i_ctime;
+ 	stat->blksize = (1 << inode->i_blkbits);
+ 	stat->blocks = inode->i_blocks;
+@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+ 	if (retval)
+ 		return retval;
+ 
+-	if (inode->i_op->getattr)
+-		return inode->i_op->getattr(mnt, dentry, stat);
++	if (inode->i_op->getattr) {
++		retval = inode->i_op->getattr(mnt, dentry, stat);
++		if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
++			stat->atime = stat->ctime;
++			stat->mtime = stat->ctime;
++		}
++		return retval;
++	}
+ 
+ 	generic_fillattr(inode, stat);
+ 	return 0;
 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
 index 2fbdff6..5530a61 100644
 --- a/fs/sysfs/dir.c
@@ -51879,10 +52113,10 @@ index 4e00cf0..3374374 100644
  		kfree(s);
 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
 new file mode 100644
-index 0000000..ba93743
+index 0000000..5ce8347
 --- /dev/null
 +++ b/grsecurity/Kconfig
-@@ -0,0 +1,982 @@
+@@ -0,0 +1,1015 @@
 +#
 +# grecurity configuration
 +#
@@ -51962,6 +52196,26 @@ index 0000000..ba93743
 +	  If you're using KERNEXEC, it's recommended that you enable this option
 +	  to supplement the hardening of the kernel.
 +  
++config GRKERNSEC_RAND_THREADSTACK
++	bool "Insert random gaps between thread stacks"
++	default y if GRKERNSEC_CONFIG_AUTO
++	depends on PAX_RANDMMAP && !PPC
++	help
++	  If you say Y here, a random-sized gap will be enforced between allocated
++	  thread stacks.  Glibc's NPTL and other threading libraries that
++	  pass MAP_STACK to the kernel for thread stack allocation are supported.
++	  The implementation currently provides 8 bits of entropy for the gap.
++
++	  Many distributions do not compile threaded remote services with the
++	  -fstack-check argument to GCC, causing the variable-sized stack-based
++	  allocator, alloca(), to not probe the stack on allocation.  This
++	  permits an unbounded alloca() to skip over any guard page and potentially
++	  modify another thread's stack reliably.  An enforced random gap
++	  reduces the reliability of such an attack and increases the chance
++	  that such a read/write to another thread's stack instead lands in
++	  an unmapped area, causing a crash and triggering grsecurity's
++	  anti-bruteforcing logic.
++
 +config GRKERNSEC_PROC_MEMMAP
 +	bool "Harden ASLR against information leaks and entropy reduction"
 +	default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
@@ -52244,6 +52498,19 @@ index 0000000..ba93743
 +	  behavior if this option is enabled in an init script on a read-only
 +	  filesystem.  This feature is mainly intended for secure embedded systems.
 +
++config GRKERNSEC_DEVICE_SIDECHANNEL
++	bool "Eliminate stat/notify-based device sidechannels"
++	default y if GRKERNSEC_CONFIG_AUTO
++	help
++	  If you say Y here, timing analyses on block or character
++	  devices like /dev/ptmx using stat or inotify/dnotify/fanotify
++	  will be thwarted for unprivileged users.  If a process without
++	  CAP_MKNOD stats such a device, the last access and last modify times
++	  will match the device's create time.  No access or modify events
++	  will be triggered through inotify/dnotify/fanotify for such devices.
++	  This feature will prevent attacks that may at a minimum
++	  allow an attacker to determine the administrator's password length.
++
 +config GRKERNSEC_CHROOT
 +	bool "Chroot jail restrictions"
 +	default y if GRKERNSEC_CONFIG_AUTO
@@ -62841,7 +63108,7 @@ index 3044254..9767f41 100644
  extern bool frontswap_enabled;
  extern struct frontswap_ops
 diff --git a/include/linux/fs.h b/include/linux/fs.h
-index 75fe9a1..72a4a6b 100644
+index 75fe9a1..8417cac 100644
 --- a/include/linux/fs.h
 +++ b/include/linux/fs.h
 @@ -1543,7 +1543,8 @@ struct file_operations {
@@ -62854,6 +63121,21 @@ index 75fe9a1..72a4a6b 100644
  
  struct inode_operations {
  	struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
+@@ -2667,4 +2668,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
+ 		inode->i_flags |= S_NOSEC;
+ }
+ 
++static inline bool is_sidechannel_device(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
++	umode_t mode = inode->i_mode;
++	return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
++#else
++	return false;
++#endif
++}
++
+ #endif /* _LINUX_FS_H */
 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
 index 003dc0f..3c4ea97 100644
 --- a/include/linux/fs_struct.h
@@ -62890,10 +63172,30 @@ index ce31408..b1ad003 100644
  	op->release = release;
  	INIT_LIST_HEAD(&op->pend_link);
 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
-index 0fbfb46..52a6556 100644
+index 0fbfb46..508eb0d 100644
 --- a/include/linux/fsnotify.h
 +++ b/include/linux/fsnotify.h
-@@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
+ 	struct inode *inode = path->dentry->d_inode;
+ 	__u32 mask = FS_ACCESS;
+ 
++	if (is_sidechannel_device(inode))
++		return;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		mask |= FS_ISDIR;
+ 
+@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
+ 	struct inode *inode = path->dentry->d_inode;
+ 	__u32 mask = FS_MODIFY;
+ 
++	if (is_sidechannel_device(inode))
++		return;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		mask |= FS_ISDIR;
+ 
+@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
   */
  static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
  {
@@ -65241,7 +65543,7 @@ index bfe1f47..6a33ee3 100644
  static inline void anon_vma_merge(struct vm_area_struct *vma,
  				  struct vm_area_struct *next)
 diff --git a/include/linux/sched.h b/include/linux/sched.h
-index 0dd42a0..cc9bffb 100644
+index 0dd42a0..f5dc099 100644
 --- a/include/linux/sched.h
 +++ b/include/linux/sched.h
 @@ -61,6 +61,7 @@ struct bio_list;
@@ -65252,7 +65554,7 @@ index 0dd42a0..cc9bffb 100644
  
  /*
   * List of flags we want to share for kernel threads,
-@@ -344,10 +345,13 @@ struct user_namespace;
+@@ -344,10 +345,23 @@ struct user_namespace;
  #define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
  
  extern int sysctl_max_map_count;
@@ -65261,12 +65563,22 @@ index 0dd42a0..cc9bffb 100644
  #include <linux/aio.h>
  
  #ifdef CONFIG_MMU
-+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
-+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
++
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
++#else
++static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++	return 0;
++}
++#endif
++
++extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
++extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
  extern void arch_pick_mmap_layout(struct mm_struct *mm);
  extern unsigned long
  arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-@@ -614,6 +618,17 @@ struct signal_struct {
+@@ -614,6 +628,17 @@ struct signal_struct {
  #ifdef CONFIG_TASKSTATS
  	struct taskstats *stats;
  #endif
@@ -65284,7 +65596,7 @@ index 0dd42a0..cc9bffb 100644
  #ifdef CONFIG_AUDIT
  	unsigned audit_tty;
  	struct tty_audit_buf *tty_audit_buf;
-@@ -691,6 +706,11 @@ struct user_struct {
+@@ -691,6 +716,11 @@ struct user_struct {
  	struct key *session_keyring;	/* UID's default session keyring */
  #endif
  
@@ -65296,7 +65608,7 @@ index 0dd42a0..cc9bffb 100644
  	/* Hash table maintenance information */
  	struct hlist_node uidhash_node;
  	kuid_t uid;
-@@ -1312,8 +1332,8 @@ struct task_struct {
+@@ -1312,8 +1342,8 @@ struct task_struct {
  	struct list_head thread_group;
  
  	struct completion *vfork_done;		/* for vfork() */
@@ -65307,7 +65619,7 @@ index 0dd42a0..cc9bffb 100644
  
  	cputime_t utime, stime, utimescaled, stimescaled;
  	cputime_t gtime;
-@@ -1329,11 +1349,6 @@ struct task_struct {
+@@ -1329,11 +1359,6 @@ struct task_struct {
  	struct task_cputime cputime_expires;
  	struct list_head cpu_timers[3];
  
@@ -65319,7 +65631,7 @@ index 0dd42a0..cc9bffb 100644
  	char comm[TASK_COMM_LEN]; /* executable name excluding path
  				     - access with [gs]et_task_comm (which lock
  				       it with task_lock())
-@@ -1350,6 +1365,10 @@ struct task_struct {
+@@ -1350,6 +1375,10 @@ struct task_struct {
  #endif
  /* CPU-specific state of this task */
  	struct thread_struct thread;
@@ -65330,7 +65642,7 @@ index 0dd42a0..cc9bffb 100644
  /* filesystem information */
  	struct fs_struct *fs;
  /* open file information */
-@@ -1423,6 +1442,10 @@ struct task_struct {
+@@ -1423,6 +1452,10 @@ struct task_struct {
  	gfp_t lockdep_reclaim_gfp;
  #endif
  
@@ -65341,7 +65653,7 @@ index 0dd42a0..cc9bffb 100644
  /* journalling filesystem info */
  	void *journal_info;
  
-@@ -1461,6 +1484,10 @@ struct task_struct {
+@@ -1461,6 +1494,10 @@ struct task_struct {
  	/* cg_list protected by css_set_lock and tsk->alloc_lock */
  	struct list_head cg_list;
  #endif
@@ -65352,7 +65664,7 @@ index 0dd42a0..cc9bffb 100644
  #ifdef CONFIG_FUTEX
  	struct robust_list_head __user *robust_list;
  #ifdef CONFIG_COMPAT
-@@ -1548,8 +1575,75 @@ struct task_struct {
+@@ -1548,8 +1585,75 @@ struct task_struct {
  #ifdef CONFIG_UPROBES
  	struct uprobe_task *utask;
  #endif
@@ -65428,7 +65740,7 @@ index 0dd42a0..cc9bffb 100644
  /* Future-safe accessor for struct task_struct's cpus_allowed. */
  #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
  
-@@ -2092,7 +2186,9 @@ void yield(void);
+@@ -2092,7 +2196,9 @@ void yield(void);
  extern struct exec_domain	default_exec_domain;
  
  union thread_union {
@@ -65438,7 +65750,7 @@ index 0dd42a0..cc9bffb 100644
  	unsigned long stack[THREAD_SIZE/sizeof(long)];
  };
  
-@@ -2125,6 +2221,7 @@ extern struct pid_namespace init_pid_ns;
+@@ -2125,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
   */
  
  extern struct task_struct *find_task_by_vpid(pid_t nr);
@@ -65446,7 +65758,7 @@ index 0dd42a0..cc9bffb 100644
  extern struct task_struct *find_task_by_pid_ns(pid_t nr,
  		struct pid_namespace *ns);
  
-@@ -2281,7 +2378,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
+@@ -2281,7 +2388,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
  extern void exit_itimers(struct signal_struct *);
  extern void flush_itimer_signals(void);
  
@@ -65455,7 +65767,7 @@ index 0dd42a0..cc9bffb 100644
  
  extern void daemonize(const char *, ...);
  extern int allow_signal(int);
-@@ -2485,9 +2582,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
+@@ -2485,9 +2592,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
  
  #endif
  
@@ -66723,10 +67035,10 @@ index 91244a0..89ca1a7 100644
  struct snd_soc_platform {
  	const char *name;
 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
-index 5be8937..cefbdd5 100644
+index fca8bbe..c0242ea 100644
 --- a/include/target/target_core_base.h
 +++ b/include/target/target_core_base.h
-@@ -758,7 +758,7 @@ struct se_device {
+@@ -760,7 +760,7 @@ struct se_device {
  	spinlock_t		stats_lock;
  	/* Active commands on this virtual SE device */
  	atomic_t		simple_cmds;
@@ -72754,7 +73066,7 @@ index fd3c8aa..5f324a6 100644
  	}
  	entry	= ring_buffer_event_data(event);
 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
-index 123b189..4383774 100644
+index 123b189..1e9e2a6 100644
 --- a/kernel/trace/trace_output.c
 +++ b/kernel/trace/trace_output.c
 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
@@ -72766,9 +73078,11 @@ index 123b189..4383774 100644
  		if (p) {
  			s->len = p - s->buffer;
  			return 1;
-@@ -825,13 +825,13 @@ int register_ftrace_event(struct trace_event *event)
+@@ -824,14 +824,16 @@ int register_ftrace_event(struct trace_event *event)
+ 			goto out;
  	}
  
++	pax_open_kernel();
  	if (event->funcs->trace == NULL)
 -		event->funcs->trace = trace_nop_print;
 +		*(void **)&event->funcs->trace = trace_nop_print;
@@ -72781,6 +73095,7 @@ index 123b189..4383774 100644
  	if (event->funcs->binary == NULL)
 -		event->funcs->binary = trace_nop_print;
 +		*(void **)&event->funcs->binary = trace_nop_print;
++	pax_close_kernel();
  
  	key = event->type & (EVENT_HASHSIZE - 1);
  
@@ -74558,10 +74873,18 @@ index f0b9ce5..da8d069 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index 9a796c4..4fba820 100644
+index 9a796c4..21f8e50 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
-@@ -47,6 +47,16 @@
+@@ -31,6 +31,7 @@
+ #include <linux/audit.h>
+ #include <linux/khugepaged.h>
+ #include <linux/uprobes.h>
++#include <linux/random.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -47,6 +48,16 @@
  #define arch_rebalance_pgtables(addr, len)		(addr)
  #endif
  
@@ -74578,7 +74901,7 @@ index 9a796c4..4fba820 100644
  static void unmap_region(struct mm_struct *mm,
  		struct vm_area_struct *vma, struct vm_area_struct *prev,
  		unsigned long start, unsigned long end);
-@@ -66,22 +76,32 @@ static void unmap_region(struct mm_struct *mm,
+@@ -66,22 +77,32 @@ static void unmap_region(struct mm_struct *mm,
   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
   *
   */
@@ -74614,7 +74937,7 @@ index 9a796c4..4fba820 100644
  /*
   * Make sure vm_committed_as in one cacheline and not cacheline shared with
   * other variables. It can be updated by several CPUs frequently.
-@@ -223,6 +243,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+@@ -223,6 +244,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
  	struct vm_area_struct *next = vma->vm_next;
  
  	might_sleep();
@@ -74622,7 +74945,7 @@ index 9a796c4..4fba820 100644
  	if (vma->vm_ops && vma->vm_ops->close)
  		vma->vm_ops->close(vma);
  	if (vma->vm_file)
-@@ -266,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
+@@ -266,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
  	 * not page aligned -Ram Gupta
  	 */
  	rlim = rlimit(RLIMIT_DATA);
@@ -74630,7 +74953,7 @@ index 9a796c4..4fba820 100644
  	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
  			(mm->end_data - mm->start_data) > rlim)
  		goto out;
-@@ -736,6 +758,12 @@ static int
+@@ -736,6 +759,12 @@ static int
  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
  	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
  {
@@ -74643,7 +74966,7 @@ index 9a796c4..4fba820 100644
  	if (is_mergeable_vma(vma, file, vm_flags) &&
  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
  		if (vma->vm_pgoff == vm_pgoff)
-@@ -755,6 +783,12 @@ static int
+@@ -755,6 +784,12 @@ static int
  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
  {
@@ -74656,7 +74979,7 @@ index 9a796c4..4fba820 100644
  	if (is_mergeable_vma(vma, file, vm_flags) &&
  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
  		pgoff_t vm_pglen;
-@@ -797,13 +831,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
+@@ -797,13 +832,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
  struct vm_area_struct *vma_merge(struct mm_struct *mm,
  			struct vm_area_struct *prev, unsigned long addr,
  			unsigned long end, unsigned long vm_flags,
@@ -74678,7 +75001,7 @@ index 9a796c4..4fba820 100644
  	/*
  	 * We later require that vma->vm_flags == vm_flags,
  	 * so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -819,6 +860,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -819,6 +861,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
  		next = next->vm_next;
  
@@ -74694,7 +75017,7 @@ index 9a796c4..4fba820 100644
  	/*
  	 * Can it merge with the predecessor?
  	 */
-@@ -838,9 +888,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -838,9 +889,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
  							/* cases 1, 6 */
  			err = vma_adjust(prev, prev->vm_start,
  				next->vm_end, prev->vm_pgoff, NULL);
@@ -74720,7 +75043,7 @@ index 9a796c4..4fba820 100644
  		if (err)
  			return NULL;
  		khugepaged_enter_vma_merge(prev);
-@@ -854,12 +919,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
+@@ -854,12 +920,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
   			mpol_equal(policy, vma_policy(next)) &&
  			can_vma_merge_before(next, vm_flags,
  					anon_vma, file, pgoff+pglen)) {
@@ -74750,7 +75073,7 @@ index 9a796c4..4fba820 100644
  		if (err)
  			return NULL;
  		khugepaged_enter_vma_merge(area);
-@@ -968,16 +1048,13 @@ none:
+@@ -968,16 +1049,13 @@ none:
  void vm_stat_account(struct mm_struct *mm, unsigned long flags,
  						struct file *file, long pages)
  {
@@ -74768,7 +75091,7 @@ index 9a796c4..4fba820 100644
  		mm->stack_vm += pages;
  }
  #endif /* CONFIG_PROC_FS */
-@@ -1013,7 +1090,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1013,7 +1091,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	 * (the exception is when the underlying filesystem is noexec
  	 *  mounted, in which case we dont add PROT_EXEC.)
  	 */
@@ -74777,7 +75100,7 @@ index 9a796c4..4fba820 100644
  		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
  			prot |= PROT_EXEC;
  
-@@ -1039,7 +1116,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1039,7 +1117,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	/* Obtain the address to map to. we verify (or select) it and ensure
  	 * that it represents a valid section of the address space.
  	 */
@@ -74786,7 +75109,7 @@ index 9a796c4..4fba820 100644
  	if (addr & ~PAGE_MASK)
  		return addr;
  
-@@ -1050,6 +1127,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1050,6 +1128,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  
@@ -74823,7 +75146,7 @@ index 9a796c4..4fba820 100644
  	if (flags & MAP_LOCKED)
  		if (!can_do_mlock())
  			return -EPERM;
-@@ -1061,6 +1168,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1061,6 +1169,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
@@ -74831,7 +75154,7 @@ index 9a796c4..4fba820 100644
  		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  			return -EAGAIN;
  	}
-@@ -1127,6 +1235,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1127,6 +1236,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  		}
  	}
  
@@ -74841,7 +75164,7 @@ index 9a796c4..4fba820 100644
  	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
  }
  
-@@ -1203,7 +1314,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1203,7 +1315,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
  	vm_flags_t vm_flags = vma->vm_flags;
  
  	/* If it was private or non-writable, the write bit is already clear */
@@ -74850,7 +75173,7 @@ index 9a796c4..4fba820 100644
  		return 0;
  
  	/* The backer wishes to know when pages are first written to? */
-@@ -1252,13 +1363,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1252,13 +1364,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
  	unsigned long charged = 0;
  	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
  
@@ -74875,7 +75198,7 @@ index 9a796c4..4fba820 100644
  	}
  
  	/* Check against address space limit. */
-@@ -1307,6 +1427,16 @@ munmap_back:
+@@ -1307,6 +1428,16 @@ munmap_back:
  		goto unacct_error;
  	}
  
@@ -74892,7 +75215,7 @@ index 9a796c4..4fba820 100644
  	vma->vm_mm = mm;
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
-@@ -1331,6 +1461,13 @@ munmap_back:
+@@ -1331,6 +1462,13 @@ munmap_back:
  		if (error)
  			goto unmap_and_free_vma;
  
@@ -74906,7 +75229,7 @@ index 9a796c4..4fba820 100644
  		/* Can addr have changed??
  		 *
  		 * Answer: Yes, several device drivers can do it in their
-@@ -1365,6 +1502,11 @@ munmap_back:
+@@ -1365,6 +1503,11 @@ munmap_back:
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  	file = vma->vm_file;
  
@@ -74918,7 +75241,7 @@ index 9a796c4..4fba820 100644
  	/* Once vma denies write, undo our temporary denial count */
  	if (correct_wcount)
  		atomic_inc(&inode->i_writecount);
-@@ -1372,6 +1514,7 @@ out:
+@@ -1372,6 +1515,7 @@ out:
  	perf_event_mmap(vma);
  
  	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
@@ -74926,7 +75249,7 @@ index 9a796c4..4fba820 100644
  	if (vm_flags & VM_LOCKED) {
  		if (!mlock_vma_pages_range(vma, addr, addr + len))
  			mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1393,6 +1536,12 @@ unmap_and_free_vma:
+@@ -1393,6 +1537,12 @@ unmap_and_free_vma:
  	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
  	charged = 0;
  free_vma:
@@ -74939,11 +75262,21 @@ index 9a796c4..4fba820 100644
  	kmem_cache_free(vm_area_cachep, vma);
  unacct_error:
  	if (charged)
-@@ -1400,6 +1549,44 @@ unacct_error:
+@@ -1400,6 +1550,62 @@ unacct_error:
  	return error;
  }
  
-+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
++#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
++unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
++{
++	if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
++		return (random32() & 0xFF) << PAGE_SHIFT;
++
++	return 0;
++}
++#endif
++
++bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
 +{
 +	if (!vma) {
 +#ifdef CONFIG_STACK_GROWSUP
@@ -74966,16 +75299,24 @@ index 9a796c4..4fba820 100644
 +	else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
 +		return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
 +#endif
++	else if (offset)
++		return offset <= vma->vm_start - addr - len;
 +
 +	return true;
 +}
 +
-+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
++unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
 +{
 +	if (vma->vm_start < len)
 +		return -ENOMEM;
-+	if (!(vma->vm_flags & VM_GROWSDOWN))
-+		return vma->vm_start - len;
++
++	if (!(vma->vm_flags & VM_GROWSDOWN)) {
++		if (offset <= vma->vm_start - len)
++			return vma->vm_start - len - offset;
++		else
++			return -ENOMEM;
++	}
++
 +	if (sysctl_heap_stack_gap <= vma->vm_start - len)
 +		return vma->vm_start - len - sysctl_heap_stack_gap;
 +	return -ENOMEM;
@@ -74984,7 +75325,7 @@ index 9a796c4..4fba820 100644
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
   *
-@@ -1426,18 +1613,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1426,18 +1632,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -75015,7 +75356,7 @@ index 9a796c4..4fba820 100644
  	}
  
  full_search:
-@@ -1448,34 +1640,40 @@ full_search:
+@@ -1448,34 +1659,40 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -75067,7 +75408,7 @@ index 9a796c4..4fba820 100644
  		mm->free_area_cache = addr;
  }
  
-@@ -1491,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1491,7 +1708,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
@@ -75076,7 +75417,7 @@ index 9a796c4..4fba820 100644
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -1500,13 +1698,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1500,13 +1717,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -75099,7 +75440,7 @@ index 9a796c4..4fba820 100644
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -1530,7 +1733,7 @@ try_again:
+@@ -1530,7 +1752,7 @@ try_again:
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
@@ -75108,7 +75449,7 @@ index 9a796c4..4fba820 100644
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  
-@@ -1539,8 +1742,8 @@ try_again:
+@@ -1539,8 +1761,8 @@ try_again:
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -75119,7 +75460,7 @@ index 9a796c4..4fba820 100644
  
  fail:
  	/*
-@@ -1563,13 +1766,21 @@ fail:
+@@ -1563,13 +1785,21 @@ fail:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -75143,7 +75484,7 @@ index 9a796c4..4fba820 100644
  	mm->cached_hole_size = ~0UL;
  
  	return addr;
-@@ -1578,6 +1789,12 @@ fail:
+@@ -1578,6 +1808,12 @@ fail:
  
  void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  {
@@ -75156,7 +75497,7 @@ index 9a796c4..4fba820 100644
  	/*
  	 * Is this a new hole at the highest possible address?
  	 */
-@@ -1585,8 +1802,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1585,8 +1821,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  		mm->free_area_cache = addr;
  
  	/* dont allow allocations above current base */
@@ -75168,7 +75509,7 @@ index 9a796c4..4fba820 100644
  }
  
  unsigned long
-@@ -1685,6 +1904,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
+@@ -1685,6 +1923,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
  	return vma;
  }
  
@@ -75197,7 +75538,7 @@ index 9a796c4..4fba820 100644
  /*
   * Verify that the stack growth is acceptable and
   * update accounting. This is shared with both the
-@@ -1701,6 +1942,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1701,6 +1961,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		return -ENOMEM;
  
  	/* Stack limit test */
@@ -75205,7 +75546,7 @@ index 9a796c4..4fba820 100644
  	if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
  		return -ENOMEM;
  
-@@ -1711,6 +1953,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1711,6 +1972,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		locked = mm->locked_vm + grow;
  		limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
  		limit >>= PAGE_SHIFT;
@@ -75213,7 +75554,7 @@ index 9a796c4..4fba820 100644
  		if (locked > limit && !capable(CAP_IPC_LOCK))
  			return -ENOMEM;
  	}
-@@ -1740,37 +1983,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1740,37 +2002,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
   * vma is the last one with address > vma->vm_end.  Have to extend vma.
   */
@@ -75271,7 +75612,7 @@ index 9a796c4..4fba820 100644
  		unsigned long size, grow;
  
  		size = address - vma->vm_start;
-@@ -1787,6 +2041,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1787,6 +2060,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  			}
  		}
  	}
@@ -75280,7 +75621,7 @@ index 9a796c4..4fba820 100644
  	vma_unlock_anon_vma(vma);
  	khugepaged_enter_vma_merge(vma);
  	validate_mm(vma->vm_mm);
-@@ -1801,6 +2057,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1801,6 +2076,8 @@ int expand_downwards(struct vm_area_struct *vma,
  				   unsigned long address)
  {
  	int error;
@@ -75289,7 +75630,7 @@ index 9a796c4..4fba820 100644
  
  	/*
  	 * We must make sure the anon_vma is allocated
-@@ -1814,6 +2072,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1814,6 +2091,15 @@ int expand_downwards(struct vm_area_struct *vma,
  	if (error)
  		return error;
  
@@ -75305,7 +75646,7 @@ index 9a796c4..4fba820 100644
  	vma_lock_anon_vma(vma);
  
  	/*
-@@ -1823,9 +2090,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1823,9 +2109,17 @@ int expand_downwards(struct vm_area_struct *vma,
  	 */
  
  	/* Somebody else might have raced and expanded it already */
@@ -75324,7 +75665,7 @@ index 9a796c4..4fba820 100644
  		size = vma->vm_end - address;
  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
  
-@@ -1837,6 +2112,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1837,6 +2131,17 @@ int expand_downwards(struct vm_area_struct *vma,
  				vma->vm_start = address;
  				vma->vm_pgoff -= grow;
  				anon_vma_interval_tree_post_update_vma(vma);
@@ -75342,7 +75683,7 @@ index 9a796c4..4fba820 100644
  				perf_event_mmap(vma);
  			}
  		}
-@@ -1914,6 +2200,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1914,6 +2219,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  	do {
  		long nrpages = vma_pages(vma);
  
@@ -75356,7 +75697,7 @@ index 9a796c4..4fba820 100644
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += nrpages;
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -1959,6 +2252,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1959,6 +2271,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
  	do {
@@ -75373,7 +75714,7 @@ index 9a796c4..4fba820 100644
  		rb_erase(&vma->vm_rb, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
-@@ -1987,14 +2290,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1987,14 +2309,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	struct vm_area_struct *new;
  	int err = -ENOMEM;
  
@@ -75407,7 +75748,7 @@ index 9a796c4..4fba820 100644
  	/* most fields are the same, copy all, and then fixup */
  	*new = *vma;
  
-@@ -2007,6 +2329,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2007,6 +2348,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  	}
  
@@ -75430,7 +75771,7 @@ index 9a796c4..4fba820 100644
  	pol = mpol_dup(vma_policy(vma));
  	if (IS_ERR(pol)) {
  		err = PTR_ERR(pol);
-@@ -2029,6 +2367,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2029,6 +2386,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	else
  		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
@@ -75467,7 +75808,7 @@ index 9a796c4..4fba820 100644
  	/* Success. */
  	if (!err)
  		return 0;
-@@ -2038,10 +2406,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2038,10 +2425,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_ops->close(new);
  	if (new->vm_file)
  		fput(new->vm_file);
@@ -75487,7 +75828,7 @@ index 9a796c4..4fba820 100644
  	kmem_cache_free(vm_area_cachep, new);
   out_err:
  	return err;
-@@ -2054,6 +2430,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2054,6 +2449,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	      unsigned long addr, int new_below)
  {
@@ -75503,7 +75844,7 @@ index 9a796c4..4fba820 100644
  	if (mm->map_count >= sysctl_max_map_count)
  		return -ENOMEM;
  
-@@ -2065,11 +2450,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2065,11 +2469,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
@@ -75534,7 +75875,7 @@ index 9a796c4..4fba820 100644
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
-@@ -2144,6 +2548,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2144,6 +2567,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  	/* Fix up all other VM information */
  	remove_vma_list(mm, vma);
  
@@ -75543,7 +75884,7 @@ index 9a796c4..4fba820 100644
  	return 0;
  }
  
-@@ -2152,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2152,6 +2577,13 @@ int vm_munmap(unsigned long start, size_t len)
  	int ret;
  	struct mm_struct *mm = current->mm;
  
@@ -75557,7 +75898,7 @@ index 9a796c4..4fba820 100644
  	down_write(&mm->mmap_sem);
  	ret = do_munmap(mm, start, len);
  	up_write(&mm->mmap_sem);
-@@ -2165,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2165,16 +2597,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
  	return vm_munmap(addr, len);
  }
  
@@ -75574,7 +75915,7 @@ index 9a796c4..4fba820 100644
  /*
   *  this is really a simplified "do_mmap".  it only handles
   *  anonymous maps.  eventually we may be able to do some
-@@ -2188,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2188,6 +2610,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	struct rb_node ** rb_link, * rb_parent;
  	pgoff_t pgoff = addr >> PAGE_SHIFT;
  	int error;
@@ -75582,7 +75923,7 @@ index 9a796c4..4fba820 100644
  
  	len = PAGE_ALIGN(len);
  	if (!len)
-@@ -2195,16 +2599,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2195,16 +2618,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -75614,7 +75955,7 @@ index 9a796c4..4fba820 100644
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
-@@ -2221,21 +2639,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2221,21 +2658,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	/*
  	 * Clear old maps.  this also does some error checking for us
  	 */
@@ -75639,7 +75980,7 @@ index 9a796c4..4fba820 100644
  		return -ENOMEM;
  
  	/* Can we just expand an old private anonymous mapping? */
-@@ -2249,7 +2666,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2249,7 +2685,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	 */
  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  	if (!vma) {
@@ -75648,7 +75989,7 @@ index 9a796c4..4fba820 100644
  		return -ENOMEM;
  	}
  
-@@ -2263,11 +2680,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2263,11 +2699,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  out:
  	perf_event_mmap(vma);
@@ -75663,7 +76004,7 @@ index 9a796c4..4fba820 100644
  	return addr;
  }
  
-@@ -2325,6 +2743,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2325,6 +2762,7 @@ void exit_mmap(struct mm_struct *mm)
  	while (vma) {
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += vma_pages(vma);
@@ -75671,7 +76012,7 @@ index 9a796c4..4fba820 100644
  		vma = remove_vma(vma);
  	}
  	vm_unacct_memory(nr_accounted);
-@@ -2341,6 +2760,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2341,6 +2779,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  	struct vm_area_struct *prev;
  	struct rb_node **rb_link, *rb_parent;
  
@@ -75685,7 +76026,7 @@ index 9a796c4..4fba820 100644
  	/*
  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  	 * until its first write fault, when page's anon_vma and index
-@@ -2364,7 +2790,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2364,7 +2809,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
  		return -ENOMEM;
  
@@ -75707,7 +76048,7 @@ index 9a796c4..4fba820 100644
  	return 0;
  }
  
-@@ -2384,6 +2824,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2384,6 +2843,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	struct mempolicy *pol;
  	bool faulted_in_anon_vma = true;
  
@@ -75716,7 +76057,7 @@ index 9a796c4..4fba820 100644
  	/*
  	 * If anonymous vma has not yet been faulted, update new pgoff
  	 * to match new location, to increase its chance of merging.
-@@ -2450,6 +2892,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2450,6 +2911,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	return NULL;
  }
  
@@ -75756,7 +76097,7 @@ index 9a796c4..4fba820 100644
  /*
   * Return true if the calling process may expand its vm space by the passed
   * number of pages
-@@ -2461,6 +2936,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2461,6 +2955,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
  
  	lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
  
@@ -75769,7 +76110,7 @@ index 9a796c4..4fba820 100644
  	if (cur + npages > lim)
  		return 0;
  	return 1;
-@@ -2531,6 +3012,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2531,6 +3031,22 @@ int install_special_mapping(struct mm_struct *mm,
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  

diff --git a/3.7.3/4425_grsec_remove_EI_PAX.patch b/3.7.4/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.7.3/4425_grsec_remove_EI_PAX.patch
rename to 3.7.4/4425_grsec_remove_EI_PAX.patch

diff --git a/3.7.3/4430_grsec-remove-localversion-grsec.patch b/3.7.4/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.7.3/4430_grsec-remove-localversion-grsec.patch
rename to 3.7.4/4430_grsec-remove-localversion-grsec.patch

diff --git a/3.7.3/4435_grsec-mute-warnings.patch b/3.7.4/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.7.3/4435_grsec-mute-warnings.patch
rename to 3.7.4/4435_grsec-mute-warnings.patch

diff --git a/3.7.3/4440_grsec-remove-protected-paths.patch b/3.7.4/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.7.3/4440_grsec-remove-protected-paths.patch
rename to 3.7.4/4440_grsec-remove-protected-paths.patch

diff --git a/3.7.3/4450_grsec-kconfig-default-gids.patch b/3.7.4/4450_grsec-kconfig-default-gids.patch
similarity index 97%
rename from 3.7.3/4450_grsec-kconfig-default-gids.patch
rename to 3.7.4/4450_grsec-kconfig-default-gids.patch
index e5d7e60..3dfdc8f 100644
--- a/3.7.3/4450_grsec-kconfig-default-gids.patch
+++ b/3.7.4/4450_grsec-kconfig-default-gids.patch
@@ -16,7 +16,7 @@ from shooting themselves in the foot.
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2012-10-13 09:51:35.000000000 -0400
 +++ b/grsecurity/Kconfig	2012-10-13 09:52:32.000000000 -0400
-@@ -539,7 +539,7 @@
+@@ -572,7 +572,7 @@
  config GRKERNSEC_AUDIT_GID
  	int "GID for auditing"
  	depends on GRKERNSEC_AUDIT_GROUP
@@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  
  config GRKERNSEC_EXECLOG
  	bool "Exec logging"
-@@ -759,7 +759,7 @@
+@@ -792,7 +792,7 @@
  config GRKERNSEC_TPE_UNTRUSTED_GID
  	int "GID for TPE-untrusted users"
  	depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
@@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *enabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -768,7 +768,7 @@
+@@ -801,7 +801,7 @@
  config GRKERNSEC_TPE_TRUSTED_GID
  	int "GID for TPE-trusted users"
  	depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
@@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Setting this GID determines what group TPE restrictions will be
  	  *disabled* for.  If the sysctl option is enabled, a sysctl option
-@@ -861,7 +861,7 @@
+@@ -894,7 +894,7 @@
  config GRKERNSEC_SOCKET_ALL_GID
  	int "GID to deny all sockets for"
  	depends on GRKERNSEC_SOCKET_ALL
@@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable socket access for. Remember to
  	  add the users you want socket access disabled for to the GID
-@@ -882,7 +882,7 @@
+@@ -915,7 +915,7 @@
  config GRKERNSEC_SOCKET_CLIENT_GID
  	int "GID to deny client sockets for"
  	depends on GRKERNSEC_SOCKET_CLIENT
@@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
  	help
  	  Here you can choose the GID to disable client socket access for.
  	  Remember to add the users you want client socket access disabled for to
-@@ -900,7 +900,7 @@
+@@ -933,7 +933,7 @@
  config GRKERNSEC_SOCKET_SERVER_GID
  	int "GID to deny server sockets for"
  	depends on GRKERNSEC_SOCKET_SERVER

diff --git a/3.7.3/4465_selinux-avc_audit-log-curr_ip.patch b/3.7.4/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 99%
rename from 3.7.3/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.7.4/4465_selinux-avc_audit-log-curr_ip.patch
index 7670223..5b614b1 100644
--- a/3.7.3/4465_selinux-avc_audit-log-curr_ip.patch
+++ b/3.7.4/4465_selinux-avc_audit-log-curr_ip.patch
@@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org>
 diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig
 --- a/grsecurity/Kconfig	2011-04-17 19:25:54.000000000 -0400
 +++ b/grsecurity/Kconfig	2011-04-17 19:32:53.000000000 -0400
-@@ -959,6 +959,27 @@
+@@ -992,6 +992,27 @@
  menu "Logging Options"
  depends on GRKERNSEC
  

diff --git a/3.7.3/4470_disable-compat_vdso.patch b/3.7.4/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.7.3/4470_disable-compat_vdso.patch
rename to 3.7.4/4470_disable-compat_vdso.patch


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2013-01-24 15:10 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-01-24 15:10 [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.7.4/, 3.7.3/, 3.2.37/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox