public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.10.4/, 3.10.5/, 3.2.50/
@ 2013-08-06 15:01 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2013-08-06 15:01 UTC (permalink / raw
  To: gentoo-commits

commit:     a7ea37ca51d54b42ca3293bc5cb1522d0b9081d1
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Tue Aug  6 15:04:23 2013 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Tue Aug  6 15:04:23 2013 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=a7ea37ca

Grsec/PaX: 2.9.1-{2.6.32.61,3.2.50.3.10.5}-201308052154

---
 2.6.32/0000_README                                 |   2 +-
 ..._grsecurity-2.9.1-2.6.32.61-201308052140.patch} |  20 +-
 {3.10.4 => 3.10.5}/0000_README                     |   2 +-
 ...4420_grsecurity-2.9.1-3.10.5-201308052154.patch | 487 ++++++++++-----------
 {3.10.4 => 3.10.5}/4425_grsec_remove_EI_PAX.patch  |   0
 .../4427_force_XATTR_PAX_tmpfs.patch               |   0
 .../4430_grsec-remove-localversion-grsec.patch     |   0
 {3.10.4 => 3.10.5}/4435_grsec-mute-warnings.patch  |   0
 .../4440_grsec-remove-protected-paths.patch        |   0
 .../4450_grsec-kconfig-default-gids.patch          |   0
 .../4465_selinux-avc_audit-log-curr_ip.patch       |   0
 {3.10.4 => 3.10.5}/4470_disable-compat_vdso.patch  |   0
 {3.10.4 => 3.10.5}/4475_emutramp_default_on.patch  |   0
 3.2.50/0000_README                                 |   2 +-
 ...420_grsecurity-2.9.1-3.2.50-201308052151.patch} | 392 ++++++++++-------
 15 files changed, 474 insertions(+), 431 deletions(-)

diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index a0fb57e..53f88d5 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -38,7 +38,7 @@ Patch:	1060_linux-2.6.32.61.patch
 From:	http://www.kernel.org
 Desc:	Linux 2.6.32.61
 
-Patch:	4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch
+Patch:	4420_grsecurity-2.9.1-2.6.32.61-201308052140.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308052140.patch
similarity index 99%
rename from 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch
rename to 2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308052140.patch
index d228405..7620046 100644
--- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308030029.patch
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201308052140.patch
@@ -86564,7 +86564,7 @@ index 0000000..36845aa
 +endif
 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
 new file mode 100644
-index 0000000..38b465b
+index 0000000..1276b13
 --- /dev/null
 +++ b/grsecurity/gracl.c
 @@ -0,0 +1,4309 @@
@@ -89754,7 +89754,7 @@ index 0000000..38b465b
 +	unsigned char *sprole_sum = NULL;
 +	int error = 0;
 +	int error2 = 0;
-+	size_t req_count;
++	size_t req_count = 0;
 +
 +	mutex_lock(&gr_dev_mutex);
 +
@@ -118626,6 +118626,18 @@ index 713ac59..306f6ae 100644
  	_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
  
  	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index ab82f14..b022c59 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -628,6 +628,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+ 		struct sockaddr_atmpvc pvc;
+ 		int state;
+ 
++		memset(&pvc, 0, sizeof(pvc));
+ 		pvc.sap_family = AF_ATMPVC;
+ 		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+ 		pvc.sap_addr.vpi = flow->vcc->vpi;
 diff --git a/net/sctp/auth.c b/net/sctp/auth.c
 index 7363b9f..1b055b5 100644
 --- a/net/sctp/auth.c
@@ -120087,7 +120099,7 @@ index d52f7a0..b66cdd9 100755
  		rm -f tags
  		xtags ctags
 diff --git a/security/Kconfig b/security/Kconfig
-index fb363cd..55a557a 100644
+index fb363cd..a869a1d 100644
 --- a/security/Kconfig
 +++ b/security/Kconfig
 @@ -4,6 +4,896 @@
@@ -120764,7 +120776,7 @@ index fb363cd..55a557a 100644
 +
 +config PAX_RANDKSTACK
 +	bool "Randomize kernel stack base"
-+	default y if GRKERNSEC_CONFIG_AUTO
++	default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
 +	depends on X86_TSC && X86
 +	help
 +	  By saying Y here the kernel will randomize every task's kernel

diff --git a/3.10.4/0000_README b/3.10.5/0000_README
similarity index 96%
rename from 3.10.4/0000_README
rename to 3.10.5/0000_README
index 6952dd0..17ea8cb 100644
--- a/3.10.4/0000_README
+++ b/3.10.5/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-2.9.1-3.10.4-201308030031.patch
+Patch:	4420_grsecurity-2.9.1-3.10.5-201308052154.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.10.4/4420_grsecurity-2.9.1-3.10.4-201308030031.patch b/3.10.5/4420_grsecurity-2.9.1-3.10.5-201308052154.patch
similarity index 99%
rename from 3.10.4/4420_grsecurity-2.9.1-3.10.4-201308030031.patch
rename to 3.10.5/4420_grsecurity-2.9.1-3.10.5-201308052154.patch
index 9cf4026..f2633c1 100644
--- a/3.10.4/4420_grsecurity-2.9.1-3.10.4-201308030031.patch
+++ b/3.10.5/4420_grsecurity-2.9.1-3.10.5-201308052154.patch
@@ -267,7 +267,7 @@ index 2fe6e76..df58221 100644
  
  	pcd.		[PARIDE]
 diff --git a/Makefile b/Makefile
-index b4df9b2..256e7cc 100644
+index f8349d0..563a504 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -16647,10 +16647,10 @@ index 230c8ea..f915130 100644
  	 * HP laptops which use a DSDT reporting as HP/SB400/10000,
  	 * which includes some code which overrides all temperature
 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
-index b44577b..27d8443 100644
+index ec94e11..7fbbec0 100644
 --- a/arch/x86/kernel/acpi/sleep.c
 +++ b/arch/x86/kernel/acpi/sleep.c
-@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
+@@ -88,8 +88,12 @@ int acpi_suspend_lowlevel(void)
  #else /* CONFIG_64BIT */
  #ifdef CONFIG_SMP
  	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
@@ -17650,7 +17650,7 @@ index e9a701a..35317d6 100644
  	wmb();
  
 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index 726bf96..81f0526 100644
+index ca22b73..9987afe 100644
 --- a/arch/x86/kernel/cpu/mtrr/main.c
 +++ b/arch/x86/kernel/cpu/mtrr/main.c
 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
@@ -20620,7 +20620,7 @@ index 73afd11..d1670f5 100644
 +	.fill PAGE_SIZE_asm - GDT_SIZE,1,0
 +	.endr
 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index 321d65e..863089b 100644
+index a836860..bdeb7a5 100644
 --- a/arch/x86/kernel/head_64.S
 +++ b/arch/x86/kernel/head_64.S
 @@ -20,6 +20,8 @@
@@ -20862,25 +20862,23 @@ index 321d65e..863089b 100644
  #include "../../x86/xen/xen-head.S"
 -	
 -	.section .bss, "aw", @nobits
--	.align L1_CACHE_BYTES
--ENTRY(idt_table)
++
++	.section .rodata,"a",@progbits
++NEXT_PAGE(empty_zero_page)
++	.skip PAGE_SIZE
++
+ 	.align PAGE_SIZE
+ ENTRY(idt_table)
 -	.skip IDT_ENTRIES * 16
++	.fill 512,8,0
  
--	.align L1_CACHE_BYTES
--ENTRY(nmi_idt_table)
+ 	.align L1_CACHE_BYTES
+ ENTRY(nmi_idt_table)
 -	.skip IDT_ENTRIES * 16
 -
 -	__PAGE_ALIGNED_BSS
-+	.section .rodata,"a",@progbits
- NEXT_PAGE(empty_zero_page)
- 	.skip PAGE_SIZE
-+
-+	.align L1_CACHE_BYTES
-+ENTRY(idt_table)
-+	.fill 512,8,0
-+
-+	.align L1_CACHE_BYTES
-+ENTRY(nmi_idt_table)
+-NEXT_PAGE(empty_zero_page)
+-	.skip PAGE_SIZE
 +	.fill 512,8,0
 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
 index 0fa6912..37fce70 100644
@@ -35636,7 +35634,7 @@ index e913d32..4d9b351 100644
  		if (IS_GEN6(dev) || IS_GEN7(dev)) {
  			seq_printf(m,
 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 3b315ba..aac280f 100644
+index f968590..19115e35 100644
 --- a/drivers/gpu/drm/i915/i915_dma.c
 +++ b/drivers/gpu/drm/i915/i915_dma.c
 @@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
@@ -35649,10 +35647,10 @@ index 3b315ba..aac280f 100644
  	return can_switch;
  }
 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
-index 9669a0b..bb65176 100644
+index 47d8b68..52f5d8d 100644
 --- a/drivers/gpu/drm/i915/i915_drv.h
 +++ b/drivers/gpu/drm/i915/i915_drv.h
-@@ -915,7 +915,7 @@ typedef struct drm_i915_private {
+@@ -916,7 +916,7 @@ typedef struct drm_i915_private {
  	drm_dma_handle_t *status_page_dmah;
  	struct resource mch_res;
  
@@ -35661,7 +35659,7 @@ index 9669a0b..bb65176 100644
  
  	/* protects the irq masks */
  	spinlock_t irq_lock;
-@@ -1811,7 +1811,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
+@@ -1813,7 +1813,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
  		struct drm_i915_private *dev_priv, unsigned port);
  extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
  extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
@@ -35825,10 +35823,10 @@ index e5e32869..1678f36 100644
  	iir = I915_READ(IIR);
  
 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 56746dc..b5a214f 100644
+index e1f4e6e..c94a4b3 100644
 --- a/drivers/gpu/drm/i915/intel_display.c
 +++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -8919,13 +8919,13 @@ struct intel_quirk {
+@@ -8933,13 +8933,13 @@ struct intel_quirk {
  	int subsystem_vendor;
  	int subsystem_device;
  	void (*hook)(struct drm_device *dev);
@@ -35844,7 +35842,7 @@ index 56746dc..b5a214f 100644
  
  static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  {
-@@ -8933,18 +8933,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -8947,18 +8947,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
  	return 1;
  }
  
@@ -38401,10 +38399,10 @@ index 5a2c754..0fa55db 100644
  
  	seq_printf(seq, "\n");
 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
-index aa04f02..2a1309e 100644
+index 81a79b7..87a0f73 100644
 --- a/drivers/md/dm-ioctl.c
 +++ b/drivers/md/dm-ioctl.c
-@@ -1694,7 +1694,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+@@ -1697,7 +1697,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
  	    cmd == DM_LIST_VERSIONS_CMD)
  		return 0;
  
@@ -38567,7 +38565,7 @@ index 60bce43..9b997d0 100644
  	pmd->bl_info.value_type.inc = data_block_inc;
  	pmd->bl_info.value_type.dec = data_block_dec;
 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
-index d5370a9..8761bbc 100644
+index 33f2010..23fb84c 100644
 --- a/drivers/md/dm.c
 +++ b/drivers/md/dm.c
 @@ -169,9 +169,9 @@ struct mapped_device {
@@ -38582,7 +38580,7 @@ index d5370a9..8761bbc 100644
  	struct list_head uevent_list;
  	spinlock_t uevent_lock; /* Protect access to uevent_list */
  
-@@ -1877,8 +1877,8 @@ static struct mapped_device *alloc_dev(int minor)
+@@ -1884,8 +1884,8 @@ static struct mapped_device *alloc_dev(int minor)
  	rwlock_init(&md->map_lock);
  	atomic_set(&md->holders, 1);
  	atomic_set(&md->open_count, 0);
@@ -38593,7 +38591,7 @@ index d5370a9..8761bbc 100644
  	INIT_LIST_HEAD(&md->uevent_list);
  	spin_lock_init(&md->uevent_lock);
  
-@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
+@@ -2033,7 +2033,7 @@ static void event_callback(void *context)
  
  	dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
  
@@ -38602,7 +38600,7 @@ index d5370a9..8761bbc 100644
  	wake_up(&md->eventq);
  }
  
-@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+@@ -2690,18 +2690,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
  
  uint32_t dm_next_uevent_seq(struct mapped_device *md)
  {
@@ -38625,7 +38623,7 @@ index d5370a9..8761bbc 100644
  
  void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
 diff --git a/drivers/md/md.c b/drivers/md/md.c
-index 9b82377..6b6922d 100644
+index 51f0345..c77810e 100644
 --- a/drivers/md/md.c
 +++ b/drivers/md/md.c
 @@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
@@ -38775,7 +38773,7 @@ index 3e6d115..ffecdeb 100644
  /*----------------------------------------------------------------*/
  
 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
-index 6e17f81..140f717 100644
+index 6f48244..7d29145 100644
 --- a/drivers/md/raid1.c
 +++ b/drivers/md/raid1.c
 @@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
@@ -38787,7 +38785,7 @@ index 6e17f81..140f717 100644
  		}
  		sectors -= s;
  		sect += s;
-@@ -2042,7 +2042,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
+@@ -2049,7 +2049,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
  			    test_bit(In_sync, &rdev->flags)) {
  				if (r1_sync_page_io(rdev, sect, s,
  						    conf->tmppage, READ)) {
@@ -38797,7 +38795,7 @@ index 6e17f81..140f717 100644
  					       "md/raid1:%s: read error corrected "
  					       "(%d sectors at %llu on %s)\n",
 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
-index d61eb7e..adfd00a 100644
+index 081bb33..3c4b287 100644
 --- a/drivers/md/raid10.c
 +++ b/drivers/md/raid10.c
 @@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
@@ -38809,7 +38807,7 @@ index d61eb7e..adfd00a 100644
  			   &conf->mirrors[d].rdev->corrected_errors);
  
  	/* for reconstruct, we always reschedule after a read.
-@@ -2292,7 +2292,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2298,7 +2298,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
  {
  	struct timespec cur_time_mon;
  	unsigned long hours_since_last;
@@ -38818,7 +38816,7 @@ index d61eb7e..adfd00a 100644
  
  	ktime_get_ts(&cur_time_mon);
  
-@@ -2314,9 +2314,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
+@@ -2320,9 +2320,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
  	 * overflowing the shift of read_errors by hours_since_last.
  	 */
  	if (hours_since_last >= 8 * sizeof(read_errors))
@@ -38830,7 +38828,7 @@ index d61eb7e..adfd00a 100644
  }
  
  static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
-@@ -2370,8 +2370,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2376,8 +2376,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
  		return;
  
  	check_decay_read_errors(mddev, rdev);
@@ -38841,7 +38839,7 @@ index d61eb7e..adfd00a 100644
  		char b[BDEVNAME_SIZE];
  		bdevname(rdev->bdev, b);
  
-@@ -2379,7 +2379,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2385,7 +2385,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
  		       "md/raid10:%s: %s: Raid device exceeded "
  		       "read_error threshold [cur %d:max %d]\n",
  		       mdname(mddev), b,
@@ -38850,7 +38848,7 @@ index d61eb7e..adfd00a 100644
  		printk(KERN_NOTICE
  		       "md/raid10:%s: %s: Failing raid device\n",
  		       mdname(mddev), b);
-@@ -2534,7 +2534,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
+@@ -2540,7 +2540,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
  					       sect +
  					       choose_data_offset(r10_bio, rdev)),
  				       bdevname(rdev->bdev, b));
@@ -38860,7 +38858,7 @@ index d61eb7e..adfd00a 100644
  
  			rdev_dec_pending(rdev, mddev);
 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index 05e4a10..48fbe37 100644
+index a35b846..e295c6d 100644
 --- a/drivers/md/raid5.c
 +++ b/drivers/md/raid5.c
 @@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
@@ -40348,6 +40346,37 @@ index b0c3de9..fc5857e 100644
  	} else {
  		return -EIO;
  	}
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+index 6acf82b..14b097e 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+@@ -206,10 +206,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
+ 	if (err) {
+ 		dev_info(&adapter->pdev->dev,
+ 			 "Failed to set driver version in firmware\n");
+-		return -EIO;
++		err = -EIO;
+ 	}
+-
+-	return 0;
++	qlcnic_free_mbx_args(&cmd);
++	return err;
+ }
+ 
+ int
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+index d3f8797..82a03d3 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+@@ -262,7 +262,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ 
+ 	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ 	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+-	memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
++	memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
+ 
+ 	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ 	vlan_req->vlan_id = cpu_to_le16(vlan_id);
 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
 index 393f961..d343034 100644
 --- a/drivers/net/ethernet/realtek/r8169.c
@@ -43062,10 +43091,10 @@ index f379c7f..e8fc69c 100644
  
  	transport_setup_device(&rport->dev);
 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
-index 1b1125e..31a2019 100644
+index 610417e..1544fa9 100644
 --- a/drivers/scsi/sd.c
 +++ b/drivers/scsi/sd.c
-@@ -2936,7 +2936,7 @@ static int sd_probe(struct device *dev)
+@@ -2928,7 +2928,7 @@ static int sd_probe(struct device *dev)
  	sdkp->disk = gd;
  	sdkp->index = index;
  	atomic_set(&sdkp->openers, 0);
@@ -44412,7 +44441,7 @@ index 1afe192..73d2c20 100644
  	kfree(ld);
  	raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
-index 121aeb9..0d2c4b9 100644
+index f597e88..b7f68ed 100644
 --- a/drivers/tty/tty_port.c
 +++ b/drivers/tty/tty_port.c
 @@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
@@ -44424,7 +44453,7 @@ index 121aeb9..0d2c4b9 100644
  	port->flags &= ~ASYNC_NORMAL_ACTIVE;
  	tty = port->tty;
  	if (tty)
-@@ -391,7 +391,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+@@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
  	/* The port lock protects the port counts */
  	spin_lock_irqsave(&port->lock, flags);
  	if (!tty_hung_up_p(filp))
@@ -44433,7 +44462,7 @@ index 121aeb9..0d2c4b9 100644
  	port->blocked_open++;
  	spin_unlock_irqrestore(&port->lock, flags);
  
-@@ -433,7 +433,7 @@ int tty_port_block_til_ready(struct tty_port *port,
+@@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
  	   we must not mess that up further */
  	spin_lock_irqsave(&port->lock, flags);
  	if (!tty_hung_up_p(filp))
@@ -44442,7 +44471,7 @@ index 121aeb9..0d2c4b9 100644
  	port->blocked_open--;
  	if (retval == 0)
  		port->flags |= ASYNC_NORMAL_ACTIVE;
-@@ -467,19 +467,19 @@ int tty_port_close_start(struct tty_port *port,
+@@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
  		return 0;
  	}
  
@@ -44469,7 +44498,7 @@ index 121aeb9..0d2c4b9 100644
  		spin_unlock_irqrestore(&port->lock, flags);
  		if (port->ops->drop)
  			port->ops->drop(port);
-@@ -565,7 +565,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
+@@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
  {
  	spin_lock_irq(&port->lock);
  	if (!tty_hung_up_p(filp))
@@ -48683,7 +48712,7 @@ index bce8769..7fc7544 100644
  				fd_offset + ex.a_text);
  		if (error != N_DATADDR(ex)) {
 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index f8a0b0e..8c841c3 100644
+index f8a0b0e..6f036ed 100644
 --- a/fs/binfmt_elf.c
 +++ b/fs/binfmt_elf.c
 @@ -34,6 +34,7 @@
@@ -49497,7 +49526,7 @@ index f8a0b0e..8c841c3 100644
 +	unsigned long oldflags;
 +	bool is_textrel_rw, is_textrel_rx, is_relro;
 +
-+	if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
++	if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
 +		return;
 +
 +	oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
@@ -49505,15 +49534,15 @@ index f8a0b0e..8c841c3 100644
 +
 +#ifdef CONFIG_PAX_ELFRELOCS
 +	/* possible TEXTREL */
-+	is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
-+	is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
++	is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
++	is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
 +#else
 +	is_textrel_rw = false;
 +	is_textrel_rx = false;
 +#endif
 +
 +	/* possible RELRO */
-+	is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
++	is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
 +
 +	if (!is_textrel_rw && !is_textrel_rx && !is_relro)
 +		return;
@@ -54516,10 +54545,10 @@ index e76244e..9fe8f2f1 100644
  	/* Don't cache excessive amounts of data and XDR failures */
  	if (!statp || len > (256 >> 2)) {
 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
-index 84ce601..633d226 100644
+index baf149a..76b86ad 100644
 --- a/fs/nfsd/vfs.c
 +++ b/fs/nfsd/vfs.c
-@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -940,7 +940,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  	} else {
  		oldfs = get_fs();
  		set_fs(KERNEL_DS);
@@ -54528,7 +54557,7 @@ index 84ce601..633d226 100644
  		set_fs(oldfs);
  	}
  
-@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
+@@ -1027,7 +1027,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
  
  	/* Write the data. */
  	oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -54537,7 +54566,7 @@ index 84ce601..633d226 100644
  	set_fs(oldfs);
  	if (host_err < 0)
  		goto out_nfserr;
-@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
+@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
  	 */
  
  	oldfs = get_fs(); set_fs(KERNEL_DS);
@@ -56975,63 +57004,6 @@ index 04ce1ac..a13dd1e 100644
  
  	generic_fillattr(inode, stat);
  	return 0;
-diff --git a/fs/super.c b/fs/super.c
-index 7465d43..68307c0 100644
---- a/fs/super.c
-+++ b/fs/super.c
-@@ -336,19 +336,19 @@ EXPORT_SYMBOL(deactivate_super);
-  *	and want to turn it into a full-blown active reference.  grab_super()
-  *	is called with sb_lock held and drops it.  Returns 1 in case of
-  *	success, 0 if we had failed (superblock contents was already dead or
-- *	dying when grab_super() had been called).
-+ *	dying when grab_super() had been called).  Note that this is only
-+ *	called for superblocks not in rundown mode (== ones still on ->fs_supers
-+ *	of their type), so increment of ->s_count is OK here.
-  */
- static int grab_super(struct super_block *s) __releases(sb_lock)
- {
--	if (atomic_inc_not_zero(&s->s_active)) {
--		spin_unlock(&sb_lock);
--		return 1;
--	}
--	/* it's going away */
- 	s->s_count++;
- 	spin_unlock(&sb_lock);
--	/* wait for it to die */
- 	down_write(&s->s_umount);
-+	if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
-+		put_super(s);
-+		return 1;
-+	}
- 	up_write(&s->s_umount);
- 	put_super(s);
- 	return 0;
-@@ -463,11 +463,6 @@ retry:
- 				destroy_super(s);
- 				s = NULL;
- 			}
--			down_write(&old->s_umount);
--			if (unlikely(!(old->s_flags & MS_BORN))) {
--				deactivate_locked_super(old);
--				goto retry;
--			}
- 			return old;
- 		}
- 	}
-@@ -660,10 +655,10 @@ restart:
- 		if (hlist_unhashed(&sb->s_instances))
- 			continue;
- 		if (sb->s_bdev == bdev) {
--			if (grab_super(sb)) /* drops sb_lock */
--				return sb;
--			else
-+			if (!grab_super(sb))
- 				goto restart;
-+			up_write(&sb->s_umount);
-+			return sb;
- 		}
- 	}
- 	spin_unlock(&sb_lock);
 diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
 index 15c68f9..36a8b3e 100644
 --- a/fs/sysfs/bin.c
@@ -58499,7 +58471,7 @@ index 0000000..36845aa
 +endif
 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
 new file mode 100644
-index 0000000..6907918
+index 0000000..c0793fd
 --- /dev/null
 +++ b/grsecurity/gracl.c
 @@ -0,0 +1,4178 @@
@@ -61757,7 +61729,7 @@ index 0000000..6907918
 +	unsigned char *sprole_sum = NULL;
 +	int error = 0;
 +	int error2 = 0;
-+	size_t req_count;
++	size_t req_count = 0;
 +
 +	mutex_lock(&gr_dev_mutex);
 +
@@ -76398,7 +76370,7 @@ index 7bb73f9..d7978ed 100644
  {
  	struct signal_struct *sig = current->signal;
 diff --git a/kernel/fork.c b/kernel/fork.c
-index 987b28a..e0102b2 100644
+index 987b28a..11ee8a5 100644
 --- a/kernel/fork.c
 +++ b/kernel/fork.c
 @@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
@@ -76684,31 +76656,7 @@ index 987b28a..e0102b2 100644
  	return ERR_PTR(retval);
  }
  
-@@ -1579,6 +1639,23 @@ long do_fork(unsigned long clone_flags,
- 			return -EINVAL;
- 	}
- 
-+#ifdef CONFIG_GRKERNSEC
-+	if (clone_flags & CLONE_NEWUSER) {
-+		/* 
-+		 * This doesn't really inspire confidence:
-+		 * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
-+		 * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
-+		 * Increases kernel attack surface in areas developers
-+		 * previously cared little about ("low importance due
-+		 * to requiring "root" capability")
-+		 * To be removed when this code receives *proper* review
-+		 */
-+		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
-+				!capable(CAP_SETGID))
-+			return -EPERM;
-+	}
-+#endif
-+
- 	/*
- 	 * Determine whether and which event to report to ptracer.  When
- 	 * called from kernel_thread or CLONE_UNTRACED is explicitly
-@@ -1613,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
+@@ -1613,6 +1673,8 @@ long do_fork(unsigned long clone_flags,
  		if (clone_flags & CLONE_PARENT_SETTID)
  			put_user(nr, parent_tidptr);
  
@@ -76717,7 +76665,7 @@ index 987b28a..e0102b2 100644
  		if (clone_flags & CLONE_VFORK) {
  			p->vfork_done = &vfork;
  			init_completion(&vfork);
-@@ -1723,7 +1802,7 @@ void __init proc_caches_init(void)
+@@ -1723,7 +1785,7 @@ void __init proc_caches_init(void)
  	mm_cachep = kmem_cache_create("mm_struct",
  			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
@@ -76726,7 +76674,7 @@ index 987b28a..e0102b2 100644
  	mmap_init();
  	nsproxy_cache_init();
  }
-@@ -1763,7 +1842,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
+@@ -1763,7 +1825,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
  		return 0;
  
  	/* don't need lock here; in the worst case we'll do useless copy */
@@ -76735,7 +76683,7 @@ index 987b28a..e0102b2 100644
  		return 0;
  
  	*new_fsp = copy_fs_struct(fs);
-@@ -1875,7 +1954,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+@@ -1875,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
  			fs = current->fs;
  			spin_lock(&fs->lock);
  			current->fs = new_fs;
@@ -81051,10 +80999,10 @@ index e444ff8..438b8f4 100644
  		*data_page = bpage;
  
 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
-index 0b936d8..306a7eb 100644
+index f7bc3ce..b8ef9b5 100644
 --- a/kernel/trace/trace.c
 +++ b/kernel/trace/trace.c
-@@ -3302,7 +3302,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+@@ -3303,7 +3303,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
  	return 0;
  }
  
@@ -81077,10 +81025,10 @@ index 51b4448..7be601f 100644
  /*
   * Normal trace_printk() and friends allocates special buffers
 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
-index 6dfd48b..a6d88d0 100644
+index 6953263..2004e16 100644
 --- a/kernel/trace/trace_events.c
 +++ b/kernel/trace/trace_events.c
-@@ -1731,10 +1731,6 @@ static LIST_HEAD(ftrace_module_file_list);
+@@ -1748,10 +1748,6 @@ static LIST_HEAD(ftrace_module_file_list);
  struct ftrace_module_file_ops {
  	struct list_head		list;
  	struct module			*mod;
@@ -81091,7 +81039,7 @@ index 6dfd48b..a6d88d0 100644
  };
  
  static struct ftrace_module_file_ops *
-@@ -1775,17 +1771,12 @@ trace_create_file_ops(struct module *mod)
+@@ -1792,17 +1788,12 @@ trace_create_file_ops(struct module *mod)
  
  	file_ops->mod = mod;
  
@@ -81115,7 +81063,7 @@ index 6dfd48b..a6d88d0 100644
  
  	list_add(&file_ops->list, &ftrace_module_file_list);
  
-@@ -1878,8 +1869,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
+@@ -1895,8 +1886,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
  			  struct ftrace_module_file_ops *file_ops)
  {
  	return __trace_add_new_event(call, tr,
@@ -81214,10 +81162,55 @@ index b20428c..4845a10 100644
  
  	local_irq_save(flags);
 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
-index d8c30db..e065e89 100644
+index d8c30db..f2f6af5 100644
 --- a/kernel/user_namespace.c
 +++ b/kernel/user_namespace.c
-@@ -853,7 +853,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
+@@ -79,6 +79,21 @@ int create_user_ns(struct cred *new)
+ 	    !kgid_has_mapping(parent_ns, group))
+ 		return -EPERM;
+ 
++#ifdef CONFIG_GRKERNSEC
++	/*
++	 * This doesn't really inspire confidence:
++	 * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
++	 * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
++	 * Increases kernel attack surface in areas developers
++	 * previously cared little about ("low importance due
++	 * to requiring "root" capability")
++	 * To be removed when this code receives *proper* review
++	 */
++	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
++			!capable(CAP_SETGID))
++		return -EPERM;
++#endif
++
+ 	ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
+ 	if (!ns)
+ 		return -ENOMEM;
+@@ -105,6 +120,7 @@ int create_user_ns(struct cred *new)
+ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
+ {
+ 	struct cred *cred;
++	int err;
+ 
+ 	if (!(unshare_flags & CLONE_NEWUSER))
+ 		return 0;
+@@ -113,8 +129,12 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
+ 	if (!cred)
+ 		return -ENOMEM;
+ 
+-	*new_cred = cred;
+-	return create_user_ns(cred);
++	err = create_user_ns(cred);
++	if (err)
++		put_cred(cred);
++	else
++		*new_cred = cred;
++	return err;
+ }
+ 
+ void free_user_ns(struct user_namespace *ns)
+@@ -853,7 +873,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
  	if (atomic_read(&current->mm->mm_users) > 1)
  		return -EINVAL;
  
@@ -82419,7 +82412,7 @@ index ceb0c7f..b2b8e94 100644
  	} else {
  		pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
 diff --git a/mm/memory.c b/mm/memory.c
-index 61a262b..77a94d1 100644
+index 5e50800..c47ba9a 100644
 --- a/mm/memory.c
 +++ b/mm/memory.c
 @@ -429,6 +429,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -82456,7 +82449,7 @@ index 61a262b..77a94d1 100644
  }
  
  /*
-@@ -1635,12 +1641,6 @@ no_page_table:
+@@ -1638,12 +1644,6 @@ no_page_table:
  	return page;
  }
  
@@ -82469,7 +82462,7 @@ index 61a262b..77a94d1 100644
  /**
   * __get_user_pages() - pin user pages in memory
   * @tsk:	task_struct of target task
-@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1730,10 +1730,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  
  	i = 0;
  
@@ -82482,7 +82475,7 @@ index 61a262b..77a94d1 100644
  		if (!vma && in_gate_area(mm, start)) {
  			unsigned long pg = start & PAGE_MASK;
  			pgd_t *pgd;
-@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1782,7 +1782,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  			goto next_page;
  		}
  
@@ -82491,7 +82484,7 @@ index 61a262b..77a94d1 100644
  		    (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
  		    !(vm_flags & vma->vm_flags))
  			return i ? : -EFAULT;
-@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+@@ -1811,11 +1811,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  				int ret;
  				unsigned int fault_flags = 0;
  
@@ -82503,7 +82496,7 @@ index 61a262b..77a94d1 100644
  				if (foll_flags & FOLL_WRITE)
  					fault_flags |= FAULT_FLAG_WRITE;
  				if (nonblocking)
-@@ -1892,7 +1887,7 @@ next_page:
+@@ -1895,7 +1890,7 @@ next_page:
  			start += page_increm * PAGE_SIZE;
  			nr_pages -= page_increm;
  		} while (nr_pages && start < vma->vm_end);
@@ -82512,7 +82505,7 @@ index 61a262b..77a94d1 100644
  	return i;
  }
  EXPORT_SYMBOL(__get_user_pages);
-@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2102,6 +2097,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
  	page_add_file_rmap(page);
  	set_pte_at(mm, addr, pte, mk_pte(page, prot));
  
@@ -82523,7 +82516,7 @@ index 61a262b..77a94d1 100644
  	retval = 0;
  	pte_unmap_unlock(pte, ptl);
  	return retval;
-@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+@@ -2146,9 +2145,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  	if (!page_count(page))
  		return -EINVAL;
  	if (!(vma->vm_flags & VM_MIXEDMAP)) {
@@ -82545,7 +82538,7 @@ index 61a262b..77a94d1 100644
  	}
  	return insert_page(vma, addr, page, vma->vm_page_prot);
  }
-@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+@@ -2231,6 +2242,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
  			unsigned long pfn)
  {
  	BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
@@ -82553,7 +82546,7 @@ index 61a262b..77a94d1 100644
  
  	if (addr < vma->vm_start || addr >= vma->vm_end)
  		return -EFAULT;
-@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -2478,7 +2490,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
  
  	BUG_ON(pud_huge(*pud));
  
@@ -82564,7 +82557,7 @@ index 61a262b..77a94d1 100644
  	if (!pmd)
  		return -ENOMEM;
  	do {
-@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
+@@ -2498,7 +2512,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
  	unsigned long next;
  	int err;
  
@@ -82575,7 +82568,7 @@ index 61a262b..77a94d1 100644
  	if (!pud)
  		return -ENOMEM;
  	do {
-@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
+@@ -2586,6 +2602,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
  		copy_user_highpage(dst, src, va, vma);
  }
  
@@ -82762,7 +82755,7 @@ index 61a262b..77a94d1 100644
  /*
   * This routine handles present pages, when users try to write
   * to a shared page. It is done by copying the page to a new address
-@@ -2799,6 +2995,12 @@ gotten:
+@@ -2802,6 +2998,12 @@ gotten:
  	 */
  	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -82775,7 +82768,7 @@ index 61a262b..77a94d1 100644
  		if (old_page) {
  			if (!PageAnon(old_page)) {
  				dec_mm_counter_fast(mm, MM_FILEPAGES);
-@@ -2850,6 +3052,10 @@ gotten:
+@@ -2853,6 +3055,10 @@ gotten:
  			page_remove_rmap(old_page);
  		}
  
@@ -82786,7 +82779,7 @@ index 61a262b..77a94d1 100644
  		/* Free the old page.. */
  		new_page = old_page;
  		ret |= VM_FAULT_WRITE;
-@@ -3125,6 +3331,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3128,6 +3334,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	swap_free(entry);
  	if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  		try_to_free_swap(page);
@@ -82798,7 +82791,7 @@ index 61a262b..77a94d1 100644
  	unlock_page(page);
  	if (page != swapcache) {
  		/*
-@@ -3148,6 +3359,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3151,6 +3362,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -82810,7 +82803,7 @@ index 61a262b..77a94d1 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  out:
-@@ -3167,40 +3383,6 @@ out_release:
+@@ -3170,40 +3386,6 @@ out_release:
  }
  
  /*
@@ -82851,7 +82844,7 @@ index 61a262b..77a94d1 100644
   * We enter with non-exclusive mmap_sem (to exclude vma changes,
   * but allow concurrent faults), and pte mapped but not yet locked.
   * We return with mmap_sem still held, but pte unmapped and unlocked.
-@@ -3209,27 +3391,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3212,27 +3394,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  		unsigned long address, pte_t *page_table, pmd_t *pmd,
  		unsigned int flags)
  {
@@ -82884,7 +82877,7 @@ index 61a262b..77a94d1 100644
  	if (unlikely(anon_vma_prepare(vma)))
  		goto oom;
  	page = alloc_zeroed_user_highpage_movable(vma, address);
-@@ -3253,6 +3431,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3256,6 +3434,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (!pte_none(*page_table))
  		goto release;
  
@@ -82896,7 +82889,7 @@ index 61a262b..77a94d1 100644
  	inc_mm_counter_fast(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, address);
  setpte:
-@@ -3260,6 +3443,12 @@ setpte:
+@@ -3263,6 +3446,12 @@ setpte:
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(vma, address, page_table);
@@ -82909,7 +82902,7 @@ index 61a262b..77a94d1 100644
  unlock:
  	pte_unmap_unlock(page_table, ptl);
  	return 0;
-@@ -3403,6 +3592,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3406,6 +3595,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	 */
  	/* Only go through if we didn't race with anybody else... */
  	if (likely(pte_same(*page_table, orig_pte))) {
@@ -82922,7 +82915,7 @@ index 61a262b..77a94d1 100644
  		flush_icache_page(vma, page);
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (flags & FAULT_FLAG_WRITE)
-@@ -3422,6 +3617,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3425,6 +3620,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  
  		/* no need to invalidate: a not-present page won't be cached */
  		update_mmu_cache(vma, address, page_table);
@@ -82937,7 +82930,7 @@ index 61a262b..77a94d1 100644
  	} else {
  		if (cow_page)
  			mem_cgroup_uncharge_page(cow_page);
-@@ -3743,6 +3946,12 @@ int handle_pte_fault(struct mm_struct *mm,
+@@ -3746,6 +3949,12 @@ int handle_pte_fault(struct mm_struct *mm,
  		if (flags & FAULT_FLAG_WRITE)
  			flush_tlb_fix_spurious_fault(vma, address);
  	}
@@ -82950,7 +82943,7 @@ index 61a262b..77a94d1 100644
  unlock:
  	pte_unmap_unlock(pte, ptl);
  	return 0;
-@@ -3759,6 +3968,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3762,6 +3971,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	pmd_t *pmd;
  	pte_t *pte;
  
@@ -82961,7 +82954,7 @@ index 61a262b..77a94d1 100644
  	__set_current_state(TASK_RUNNING);
  
  	count_vm_event(PGFAULT);
-@@ -3770,6 +3983,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -3773,6 +3986,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  	if (unlikely(is_vm_hugetlb_page(vma)))
  		return hugetlb_fault(mm, vma, address, flags);
  
@@ -82996,7 +82989,7 @@ index 61a262b..77a94d1 100644
  retry:
  	pgd = pgd_offset(mm, address);
  	pud = pud_alloc(mm, pgd, address);
-@@ -3868,6 +4109,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+@@ -3871,6 +4112,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -83020,7 +83013,7 @@ index 61a262b..77a94d1 100644
  #endif /* __PAGETABLE_PUD_FOLDED */
  
  #ifndef __PAGETABLE_PMD_FOLDED
-@@ -3898,6 +4156,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+@@ -3901,6 +4159,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
  	spin_unlock(&mm->page_table_lock);
  	return 0;
  }
@@ -83051,7 +83044,7 @@ index 61a262b..77a94d1 100644
  #endif /* __PAGETABLE_PMD_FOLDED */
  
  #if !defined(__HAVE_ARCH_GATE_AREA)
-@@ -3911,7 +4193,7 @@ static int __init gate_vma_init(void)
+@@ -3914,7 +4196,7 @@ static int __init gate_vma_init(void)
  	gate_vma.vm_start = FIXADDR_USER_START;
  	gate_vma.vm_end = FIXADDR_USER_END;
  	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -83060,7 +83053,7 @@ index 61a262b..77a94d1 100644
  
  	return 0;
  }
-@@ -4045,8 +4327,8 @@ out:
+@@ -4048,8 +4330,8 @@ out:
  	return ret;
  }
  
@@ -83071,7 +83064,7 @@ index 61a262b..77a94d1 100644
  {
  	resource_size_t phys_addr;
  	unsigned long prot = 0;
-@@ -4071,8 +4353,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+@@ -4074,8 +4356,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
   * Access another process' address space as given in mm.  If non-NULL, use the
   * given task for page fault accounting.
   */
@@ -83082,7 +83075,7 @@ index 61a262b..77a94d1 100644
  {
  	struct vm_area_struct *vma;
  	void *old_buf = buf;
-@@ -4080,7 +4362,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4083,7 +4365,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
  	down_read(&mm->mmap_sem);
  	/* ignore errors, just check how much was successfully transferred */
  	while (len) {
@@ -83091,7 +83084,7 @@ index 61a262b..77a94d1 100644
  		void *maddr;
  		struct page *page = NULL;
  
-@@ -4139,8 +4421,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+@@ -4142,8 +4424,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
   *
   * The caller must hold a reference on @mm.
   */
@@ -83102,7 +83095,7 @@ index 61a262b..77a94d1 100644
  {
  	return __access_remote_vm(NULL, mm, addr, buf, len, write);
  }
-@@ -4150,11 +4432,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+@@ -4153,11 +4435,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
   * Source/target buffer must be kernel space,
   * Do not walk the page table directly, use get_user_pages
   */
@@ -83118,7 +83111,7 @@ index 61a262b..77a94d1 100644
  	mm = get_task_mm(tsk);
  	if (!mm)
 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
-index 7431001..0f8344e 100644
+index 4baf12e..5497066 100644
 --- a/mm/mempolicy.c
 +++ b/mm/mempolicy.c
 @@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
@@ -83132,11 +83125,7 @@ index 7431001..0f8344e 100644
  	vma = find_vma(mm, start);
  	if (!vma || vma->vm_start > start)
  		return -EFAULT;
-@@ -744,9 +748,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
- 			if (err)
- 				goto out;
- 		}
-+
+@@ -751,6 +755,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
  		err = vma_replace_policy(vma, new_pol);
  		if (err)
  			goto out;
@@ -83153,7 +83142,7 @@ index 7431001..0f8344e 100644
  	}
  
   out:
-@@ -1202,6 +1217,17 @@ static long do_mbind(unsigned long start, unsigned long len,
+@@ -1206,6 +1220,17 @@ static long do_mbind(unsigned long start, unsigned long len,
  
  	if (end < start)
  		return -EINVAL;
@@ -83171,7 +83160,7 @@ index 7431001..0f8344e 100644
  	if (end == start)
  		return 0;
  
-@@ -1430,8 +1456,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1434,8 +1459,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
  	 */
  	tcred = __task_cred(task);
  	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
@@ -83181,7 +83170,7 @@ index 7431001..0f8344e 100644
  		rcu_read_unlock();
  		err = -EPERM;
  		goto out_put;
-@@ -1462,6 +1487,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+@@ -1466,6 +1490,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
  		goto out;
  	}
  
@@ -83283,7 +83272,7 @@ index 79b7cf7..9944291 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index f681e18..623110e 100644
+index 7dbe397..e84c411 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -36,6 +36,7 @@
@@ -83532,7 +83521,7 @@ index f681e18..623110e 100644
 +	if (mm->pax_flags & MF_PAX_MPROTECT) {
 +
 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
-+		if (file && (vm_flags & VM_EXEC) && mm->binfmt &&
++		if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
 +		    mm->binfmt->handle_mmap)
 +			mm->binfmt->handle_mmap(file);
 +#endif
@@ -88145,10 +88134,28 @@ index 2e7f194..0fa4d6d 100644
  
  		ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
-index dfc39d4..0b82c4d 100644
+index dfc39d4..0d4fa52 100644
 --- a/net/ipv4/devinet.c
 +++ b/net/ipv4/devinet.c
-@@ -1529,7 +1529,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
+ 		ci = nla_data(tb[IFA_CACHEINFO]);
+ 		if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
+ 			err = -EINVAL;
+-			goto errout;
++			goto errout_free;
+ 		}
+ 		*pvalid_lft = ci->ifa_valid;
+ 		*pprefered_lft = ci->ifa_prefered;
+@@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
+ 
+ 	return ifa;
+ 
++errout_free:
++	inet_free_ifa(ifa);
+ errout:
+ 	return ERR_PTR(err);
+ }
+@@ -1529,7 +1531,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
  		idx = 0;
  		head = &net->dev_index_head[h];
  		rcu_read_lock();
@@ -88157,7 +88164,7 @@ index dfc39d4..0b82c4d 100644
  			  net->dev_base_seq;
  		hlist_for_each_entry_rcu(dev, head, index_hlist) {
  			if (idx < s_idx)
-@@ -1840,7 +1840,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
+@@ -1840,7 +1842,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
  		idx = 0;
  		head = &net->dev_index_head[h];
  		rcu_read_lock();
@@ -88166,7 +88173,7 @@ index dfc39d4..0b82c4d 100644
  			  net->dev_base_seq;
  		hlist_for_each_entry_rcu(dev, head, index_hlist) {
  			if (idx < s_idx)
-@@ -2065,7 +2065,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+@@ -2065,7 +2067,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
  #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
  	DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
  
@@ -88175,7 +88182,7 @@ index dfc39d4..0b82c4d 100644
  	struct ctl_table_header *sysctl_header;
  	struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
  } devinet_sysctl = {
-@@ -2183,7 +2183,7 @@ static __net_init int devinet_init_net(struct net *net)
+@@ -2183,7 +2185,7 @@ static __net_init int devinet_init_net(struct net *net)
  	int err;
  	struct ipv4_devconf *all, *dflt;
  #ifdef CONFIG_SYSCTL
@@ -88184,7 +88191,7 @@ index dfc39d4..0b82c4d 100644
  	struct ctl_table_header *forw_hdr;
  #endif
  
-@@ -2201,7 +2201,7 @@ static __net_init int devinet_init_net(struct net *net)
+@@ -2201,7 +2203,7 @@ static __net_init int devinet_init_net(struct net *net)
  			goto err_alloc_dflt;
  
  #ifdef CONFIG_SYSCTL
@@ -88193,7 +88200,7 @@ index dfc39d4..0b82c4d 100644
  		if (tbl == NULL)
  			goto err_alloc_ctl;
  
-@@ -2221,7 +2221,10 @@ static __net_init int devinet_init_net(struct net *net)
+@@ -2221,7 +2223,10 @@ static __net_init int devinet_init_net(struct net *net)
  		goto err_reg_dflt;
  
  	err = -ENOMEM;
@@ -88205,7 +88212,7 @@ index dfc39d4..0b82c4d 100644
  	if (forw_hdr == NULL)
  		goto err_reg_ctl;
  	net->ipv4.forw_hdr = forw_hdr;
-@@ -2237,8 +2240,7 @@ err_reg_ctl:
+@@ -2237,8 +2242,7 @@ err_reg_ctl:
  err_reg_dflt:
  	__devinet_sysctl_unregister(all);
  err_reg_all:
@@ -91289,6 +91296,18 @@ index f226709..0e735a8 100644
  	_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
  
  	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index ca8e0a5..1f9c314 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+ 		struct sockaddr_atmpvc pvc;
+ 		int state;
+ 
++		memset(&pvc, 0, sizeof(pvc));
+ 		pvc.sap_family = AF_ATMPVC;
+ 		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+ 		pvc.sap_addr.vpi = flow->vcc->vpi;
 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
 index 391a245..296b3d7 100644
 --- a/net/sctp/ipv6.c
@@ -91906,58 +91925,6 @@ index 8343737..677025e 100644
  		.mode		= 0644,
  		.proc_handler	= read_reset_stat,
  	},
-diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-index 8d2eddd..65b1462 100644
---- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
-@@ -98,6 +98,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
-  */
- static u32 *decode_write_list(u32 *va, u32 *vaend)
- {
-+	unsigned long start, end;
- 	int nchunks;
- 
- 	struct rpcrdma_write_array *ary =
-@@ -113,9 +114,12 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
- 		return NULL;
- 	}
- 	nchunks = ntohl(ary->wc_nchunks);
--	if (((unsigned long)&ary->wc_array[0] +
--	     (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
--	    (unsigned long)vaend) {
-+
-+	start = (unsigned long)&ary->wc_array[0];
-+	end = (unsigned long)vaend;
-+	if (nchunks < 0 ||
-+	    nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
-+	    (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
- 		dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
- 			ary, nchunks, vaend);
- 		return NULL;
-@@ -129,6 +133,7 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
- 
- static u32 *decode_reply_array(u32 *va, u32 *vaend)
- {
-+	unsigned long start, end;
- 	int nchunks;
- 	struct rpcrdma_write_array *ary =
- 		(struct rpcrdma_write_array *)va;
-@@ -143,9 +148,12 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
- 		return NULL;
- 	}
- 	nchunks = ntohl(ary->wc_nchunks);
--	if (((unsigned long)&ary->wc_array[0] +
--	     (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
--	    (unsigned long)vaend) {
-+
-+	start = (unsigned long)&ary->wc_array[0];
-+	end = (unsigned long)vaend;
-+	if (nchunks < 0 ||
-+	    nchunks > (SIZE_MAX - start) / sizeof(struct rpcrdma_write_chunk) ||
-+	    (start + (sizeof(struct rpcrdma_write_chunk) * nchunks)) > end) {
- 		dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
- 			ary, nchunks, vaend);
- 		return NULL;
 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
 index 0ce7552..d074459 100644
 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -92894,7 +92861,7 @@ index f5eb43d..1814de8 100644
  	shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
  	shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
 diff --git a/security/Kconfig b/security/Kconfig
-index e9c6ac7..0d298ea 100644
+index e9c6ac7..a4d558d 100644
 --- a/security/Kconfig
 +++ b/security/Kconfig
 @@ -4,6 +4,956 @@
@@ -93577,7 +93544,7 @@ index e9c6ac7..0d298ea 100644
 +
 +config PAX_RANDKSTACK
 +	bool "Randomize kernel stack base"
-+	default y if GRKERNSEC_CONFIG_AUTO
++	default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
 +	depends on X86_TSC && X86
 +	help
 +	  By saying Y here the kernel will randomize every task's kernel

diff --git a/3.10.4/4425_grsec_remove_EI_PAX.patch b/3.10.5/4425_grsec_remove_EI_PAX.patch
similarity index 100%
rename from 3.10.4/4425_grsec_remove_EI_PAX.patch
rename to 3.10.5/4425_grsec_remove_EI_PAX.patch

diff --git a/3.10.4/4427_force_XATTR_PAX_tmpfs.patch b/3.10.5/4427_force_XATTR_PAX_tmpfs.patch
similarity index 100%
rename from 3.10.4/4427_force_XATTR_PAX_tmpfs.patch
rename to 3.10.5/4427_force_XATTR_PAX_tmpfs.patch

diff --git a/3.10.4/4430_grsec-remove-localversion-grsec.patch b/3.10.5/4430_grsec-remove-localversion-grsec.patch
similarity index 100%
rename from 3.10.4/4430_grsec-remove-localversion-grsec.patch
rename to 3.10.5/4430_grsec-remove-localversion-grsec.patch

diff --git a/3.10.4/4435_grsec-mute-warnings.patch b/3.10.5/4435_grsec-mute-warnings.patch
similarity index 100%
rename from 3.10.4/4435_grsec-mute-warnings.patch
rename to 3.10.5/4435_grsec-mute-warnings.patch

diff --git a/3.10.4/4440_grsec-remove-protected-paths.patch b/3.10.5/4440_grsec-remove-protected-paths.patch
similarity index 100%
rename from 3.10.4/4440_grsec-remove-protected-paths.patch
rename to 3.10.5/4440_grsec-remove-protected-paths.patch

diff --git a/3.10.4/4450_grsec-kconfig-default-gids.patch b/3.10.5/4450_grsec-kconfig-default-gids.patch
similarity index 100%
rename from 3.10.4/4450_grsec-kconfig-default-gids.patch
rename to 3.10.5/4450_grsec-kconfig-default-gids.patch

diff --git a/3.10.4/4465_selinux-avc_audit-log-curr_ip.patch b/3.10.5/4465_selinux-avc_audit-log-curr_ip.patch
similarity index 100%
rename from 3.10.4/4465_selinux-avc_audit-log-curr_ip.patch
rename to 3.10.5/4465_selinux-avc_audit-log-curr_ip.patch

diff --git a/3.10.4/4470_disable-compat_vdso.patch b/3.10.5/4470_disable-compat_vdso.patch
similarity index 100%
rename from 3.10.4/4470_disable-compat_vdso.patch
rename to 3.10.5/4470_disable-compat_vdso.patch

diff --git a/3.10.4/4475_emutramp_default_on.patch b/3.10.5/4475_emutramp_default_on.patch
similarity index 100%
rename from 3.10.4/4475_emutramp_default_on.patch
rename to 3.10.5/4475_emutramp_default_on.patch

diff --git a/3.2.50/0000_README b/3.2.50/0000_README
index 56552a3..7f6cb30 100644
--- a/3.2.50/0000_README
+++ b/3.2.50/0000_README
@@ -118,7 +118,7 @@ Patch:	1049_linux-3.2.50.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.50
 
-Patch:	4420_grsecurity-2.9.1-3.2.50-201308030030.patch
+Patch:	4420_grsecurity-2.9.1-3.2.50-201308052151.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308030030.patch b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308052151.patch
similarity index 99%
rename from 3.2.50/4420_grsecurity-2.9.1-3.2.50-201308030030.patch
rename to 3.2.50/4420_grsecurity-2.9.1-3.2.50-201308052151.patch
index cb05b47..bf119a8 100644
--- a/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308030030.patch
+++ b/3.2.50/4420_grsecurity-2.9.1-3.2.50-201308052151.patch
@@ -48141,7 +48141,7 @@ index a6395bd..f1e376a 100644
  		(unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
  #ifdef __alpha__
 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
-index 8dd615c..ff7ac04 100644
+index 8dd615c..f3bbb60 100644
 --- a/fs/binfmt_elf.c
 +++ b/fs/binfmt_elf.c
 @@ -32,6 +32,7 @@
@@ -48152,7 +48152,7 @@ index 8dd615c..ff7ac04 100644
  #include <asm/uaccess.h>
  #include <asm/param.h>
  #include <asm/page.h>
-@@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
+@@ -51,6 +52,14 @@ static int elf_core_dump(struct coredump_params *cprm);
  #define elf_core_dump	NULL
  #endif
  
@@ -48160,10 +48160,14 @@ index 8dd615c..ff7ac04 100644
 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
 +#endif
 +
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++static void elf_handle_mmap(struct file *file);
++#endif
++
  #if ELF_EXEC_PAGESIZE > PAGE_SIZE
  #define ELF_MIN_ALIGN	ELF_EXEC_PAGESIZE
  #else
-@@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
+@@ -70,6 +79,15 @@ static struct linux_binfmt elf_format = {
  	.load_binary	= load_elf_binary,
  	.load_shlib	= load_elf_library,
  	.core_dump	= elf_core_dump,
@@ -48172,10 +48176,14 @@ index 8dd615c..ff7ac04 100644
 +	.handle_mprotect= elf_handle_mprotect,
 +#endif
 +
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++	.handle_mmap	= elf_handle_mmap,
++#endif
++
  	.min_coredump	= ELF_EXEC_PAGESIZE,
  };
  
-@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
+@@ -77,6 +95,8 @@ static struct linux_binfmt elf_format = {
  
  static int set_brk(unsigned long start, unsigned long end)
  {
@@ -48184,7 +48192,7 @@ index 8dd615c..ff7ac04 100644
  	start = ELF_PAGEALIGN(start);
  	end = ELF_PAGEALIGN(end);
  	if (end > start) {
-@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
+@@ -87,7 +107,7 @@ static int set_brk(unsigned long start, unsigned long end)
  		if (BAD_ADDR(addr))
  			return addr;
  	}
@@ -48193,7 +48201,7 @@ index 8dd615c..ff7ac04 100644
  	return 0;
  }
  
-@@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -148,12 +168,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
  	elf_addr_t __user *u_rand_bytes;
  	const char *k_platform = ELF_PLATFORM;
  	const char *k_base_platform = ELF_BASE_PLATFORM;
@@ -48208,7 +48216,7 @@ index 8dd615c..ff7ac04 100644
  
  	/*
  	 * In some cases (e.g. Hyper-Threading), we want to avoid L1
-@@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -195,8 +216,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
  	 * Generate 16 random bytes for userspace PRNG seeding.
  	 */
  	get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
@@ -48223,7 +48231,7 @@ index 8dd615c..ff7ac04 100644
  	if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
  		return -EFAULT;
  
-@@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
+@@ -308,9 +333,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
  		return -EFAULT;
  	current->mm->env_end = p;
  
@@ -48236,7 +48244,7 @@ index 8dd615c..ff7ac04 100644
  		return -EFAULT;
  	return 0;
  }
-@@ -376,15 +395,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
+@@ -376,15 +403,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
     an ELF header */
  
  static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
@@ -48255,7 +48263,7 @@ index 8dd615c..ff7ac04 100644
  	unsigned long total_size;
  	int retval, i, size;
  
-@@ -430,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -430,6 +456,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
  		goto out_close;
  	}
  
@@ -48267,7 +48275,7 @@ index 8dd615c..ff7ac04 100644
  	eppnt = elf_phdata;
  	for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
  		if (eppnt->p_type == PT_LOAD) {
-@@ -453,8 +476,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -453,8 +484,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
  			map_addr = elf_map(interpreter, load_addr + vaddr,
  					eppnt, elf_prot, elf_type, total_size);
  			total_size = 0;
@@ -48276,7 +48284,7 @@ index 8dd615c..ff7ac04 100644
  			error = map_addr;
  			if (BAD_ADDR(map_addr))
  				goto out_close;
-@@ -473,8 +494,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
+@@ -473,8 +502,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
  			k = load_addr + eppnt->p_vaddr;
  			if (BAD_ADDR(k) ||
  			    eppnt->p_filesz > eppnt->p_memsz ||
@@ -48287,7 +48295,7 @@ index 8dd615c..ff7ac04 100644
  				error = -ENOMEM;
  				goto out_close;
  			}
-@@ -528,6 +549,315 @@ out:
+@@ -528,6 +557,315 @@ out:
  	return error;
  }
  
@@ -48603,7 +48611,7 @@ index 8dd615c..ff7ac04 100644
  /*
   * These are the functions used to load ELF style executables and shared
   * libraries.  There is no binary dependent code anywhere else.
-@@ -544,6 +874,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
+@@ -544,6 +882,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
  {
  	unsigned int random_variable = 0;
  
@@ -48615,7 +48623,7 @@ index 8dd615c..ff7ac04 100644
  	if ((current->flags & PF_RANDOMIZE) &&
  		!(current->personality & ADDR_NO_RANDOMIZE)) {
  		random_variable = get_random_int() & STACK_RND_MASK;
-@@ -562,7 +897,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -562,7 +905,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
   	unsigned long load_addr = 0, load_bias = 0;
  	int load_addr_set = 0;
  	char * elf_interpreter = NULL;
@@ -48624,7 +48632,7 @@ index 8dd615c..ff7ac04 100644
  	struct elf_phdr *elf_ppnt, *elf_phdata;
  	unsigned long elf_bss, elf_brk;
  	int retval, i;
-@@ -572,11 +907,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -572,11 +915,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  	unsigned long start_code, end_code, start_data, end_data;
  	unsigned long reloc_func_desc __maybe_unused = 0;
  	int executable_stack = EXSTACK_DEFAULT;
@@ -48637,7 +48645,7 @@ index 8dd615c..ff7ac04 100644
  
  	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
  	if (!loc) {
-@@ -713,11 +1048,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -713,11 +1056,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  
  	/* OK, This is the point of no return */
  	current->flags &= ~PF_FORKNOEXEC;
@@ -48720,7 +48728,7 @@ index 8dd615c..ff7ac04 100644
  	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
  		current->personality |= READ_IMPLIES_EXEC;
  
-@@ -808,6 +1213,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -808,6 +1221,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  #else
  			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
  #endif
@@ -48741,7 +48749,7 @@ index 8dd615c..ff7ac04 100644
  		}
  
  		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -840,9 +1259,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -840,9 +1267,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  		 * allowed task size. Note that p_filesz must always be
  		 * <= p_memsz so it is only necessary to check p_memsz.
  		 */
@@ -48754,7 +48762,7 @@ index 8dd615c..ff7ac04 100644
  			/* set_brk can never work. Avoid overflows. */
  			send_sig(SIGKILL, current, 0);
  			retval = -EINVAL;
-@@ -881,17 +1300,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+@@ -881,17 +1308,44 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
  		goto out_free_dentry;
  	}
  	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
@@ -48805,7 +48813,7 @@ index 8dd615c..ff7ac04 100644
  					    load_bias);
  		if (!IS_ERR((void *)elf_entry)) {
  			/*
-@@ -1098,7 +1544,7 @@ out:
+@@ -1098,7 +1552,7 @@ out:
   * Decide what to dump of a segment, part, all or none.
   */
  static unsigned long vma_dump_size(struct vm_area_struct *vma,
@@ -48814,7 +48822,7 @@ index 8dd615c..ff7ac04 100644
  {
  #define FILTER(type)	(mm_flags & (1UL << MMF_DUMP_##type))
  
-@@ -1132,7 +1578,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
+@@ -1132,7 +1586,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
  	if (vma->vm_file == NULL)
  		return 0;
  
@@ -48823,7 +48831,7 @@ index 8dd615c..ff7ac04 100644
  		goto whole;
  
  	/*
-@@ -1354,9 +1800,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
+@@ -1354,9 +1808,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
  {
  	elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
  	int i = 0;
@@ -48835,7 +48843,7 @@ index 8dd615c..ff7ac04 100644
  	fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
  }
  
-@@ -1851,14 +2297,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
+@@ -1851,14 +2305,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
  }
  
  static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
@@ -48852,7 +48860,7 @@ index 8dd615c..ff7ac04 100644
  	return size;
  }
  
-@@ -1952,7 +2398,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1952,7 +2406,7 @@ static int elf_core_dump(struct coredump_params *cprm)
  
  	dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
  
@@ -48861,7 +48869,7 @@ index 8dd615c..ff7ac04 100644
  	offset += elf_core_extra_data_size();
  	e_shoff = offset;
  
-@@ -1966,10 +2412,12 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1966,10 +2420,12 @@ static int elf_core_dump(struct coredump_params *cprm)
  	offset = dataoff;
  
  	size += sizeof(*elf);
@@ -48874,7 +48882,7 @@ index 8dd615c..ff7ac04 100644
  	if (size > cprm->limit
  	    || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
  		goto end_coredump;
-@@ -1983,7 +2431,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1983,7 +2439,7 @@ static int elf_core_dump(struct coredump_params *cprm)
  		phdr.p_offset = offset;
  		phdr.p_vaddr = vma->vm_start;
  		phdr.p_paddr = 0;
@@ -48883,7 +48891,7 @@ index 8dd615c..ff7ac04 100644
  		phdr.p_memsz = vma->vm_end - vma->vm_start;
  		offset += phdr.p_filesz;
  		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -1994,6 +2442,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -1994,6 +2450,7 @@ static int elf_core_dump(struct coredump_params *cprm)
  		phdr.p_align = ELF_EXEC_PAGESIZE;
  
  		size += sizeof(phdr);
@@ -48891,7 +48899,7 @@ index 8dd615c..ff7ac04 100644
  		if (size > cprm->limit
  		    || !dump_write(cprm->file, &phdr, sizeof(phdr)))
  			goto end_coredump;
-@@ -2018,7 +2467,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2018,7 +2475,7 @@ static int elf_core_dump(struct coredump_params *cprm)
  		unsigned long addr;
  		unsigned long end;
  
@@ -48900,7 +48908,7 @@ index 8dd615c..ff7ac04 100644
  
  		for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
  			struct page *page;
-@@ -2027,6 +2476,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2027,6 +2484,7 @@ static int elf_core_dump(struct coredump_params *cprm)
  			page = get_dump_page(addr);
  			if (page) {
  				void *kaddr = kmap(page);
@@ -48908,7 +48916,7 @@ index 8dd615c..ff7ac04 100644
  				stop = ((size += PAGE_SIZE) > cprm->limit) ||
  					!dump_write(cprm->file, kaddr,
  						    PAGE_SIZE);
-@@ -2044,6 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+@@ -2044,6 +2502,7 @@ static int elf_core_dump(struct coredump_params *cprm)
  
  	if (e_phnum == PN_XNUM) {
  		size += sizeof(*shdr4extnum);
@@ -48916,7 +48924,7 @@ index 8dd615c..ff7ac04 100644
  		if (size > cprm->limit
  		    || !dump_write(cprm->file, shdr4extnum,
  				   sizeof(*shdr4extnum)))
-@@ -2064,6 +2515,97 @@ out:
+@@ -2064,6 +2523,126 @@ out:
  
  #endif		/* CONFIG_ELF_CORE */
  
@@ -49011,6 +49019,35 @@ index 8dd615c..ff7ac04 100644
 +}
 +#endif
 +
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++
++extern int grsec_enable_log_rwxmaps;
++
++static void elf_handle_mmap(struct file *file)
++{
++	struct elfhdr elf_h;
++	struct elf_phdr elf_p;
++	unsigned long i;
++
++	if (!grsec_enable_log_rwxmaps)
++		return;
++
++	if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
++	    memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++	    (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
++	    elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++	    elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
++		return;
++
++	for (i = 0UL; i < elf_h.e_phnum; i++) {
++		if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
++			return;
++		if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
++			gr_log_ptgnustack(file);
++	}
++}
++#endif
++
  static int __init init_elf_binfmt(void)
  {
  	return register_binfmt(&elf_format);
@@ -50187,7 +50224,7 @@ index 451b9b8..12e5a03 100644
  
  out_free_fd:
 diff --git a/fs/exec.c b/fs/exec.c
-index a2d0e51..25c839c 100644
+index a2d0e51..744f7c6 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,12 +55,35 @@
@@ -50767,7 +50804,7 @@ index a2d0e51..25c839c 100644
  	cn->corename = kmalloc(cn->size, GFP_KERNEL);
  	cn->used = 0;
  
-@@ -1833,6 +2008,280 @@ out:
+@@ -1833,6 +2008,284 @@ out:
  	return ispipe;
  }
  
@@ -50876,7 +50913,11 @@ index a2d0e51..25c839c 100644
 +					} else
 +						path_fault = "<path too long>";
 +				}
-+			} else
++			} else if (pc >= mm->start_brk && pc < mm->brk)
++				path_fault = "<heap>";
++			else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++				path_fault = "<stack>";
++			else
 +				path_fault = "<anonymous mapping>";
 +		}
 +		up_read(&mm->mmap_sem);
@@ -51048,7 +51089,7 @@ index a2d0e51..25c839c 100644
  static int zap_process(struct task_struct *start, int exit_code)
  {
  	struct task_struct *t;
-@@ -2006,17 +2455,17 @@ static void coredump_finish(struct mm_struct *mm)
+@@ -2006,17 +2459,17 @@ static void coredump_finish(struct mm_struct *mm)
  void set_dumpable(struct mm_struct *mm, int value)
  {
  	switch (value) {
@@ -51069,7 +51110,7 @@ index a2d0e51..25c839c 100644
  		set_bit(MMF_DUMP_SECURELY, &mm->flags);
  		smp_wmb();
  		set_bit(MMF_DUMPABLE, &mm->flags);
-@@ -2029,7 +2478,7 @@ static int __get_dumpable(unsigned long mm_flags)
+@@ -2029,7 +2482,7 @@ static int __get_dumpable(unsigned long mm_flags)
  	int ret;
  
  	ret = mm_flags & MMF_DUMPABLE_MASK;
@@ -51078,7 +51119,7 @@ index a2d0e51..25c839c 100644
  }
  
  int get_dumpable(struct mm_struct *mm)
-@@ -2044,17 +2493,17 @@ static void wait_for_dump_helpers(struct file *file)
+@@ -2044,17 +2497,17 @@ static void wait_for_dump_helpers(struct file *file)
  	pipe = file->f_path.dentry->d_inode->i_pipe;
  
  	pipe_lock(pipe);
@@ -51101,7 +51142,7 @@ index a2d0e51..25c839c 100644
  	pipe_unlock(pipe);
  
  }
-@@ -2115,7 +2564,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2115,7 +2568,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
  	int retval = 0;
  	int flag = 0;
  	int ispipe;
@@ -51111,7 +51152,7 @@ index a2d0e51..25c839c 100644
  	struct coredump_params cprm = {
  		.signr = signr,
  		.regs = regs,
-@@ -2130,6 +2580,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2130,6 +2584,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
  
  	audit_core_dumps(signr);
  
@@ -51121,7 +51162,7 @@ index a2d0e51..25c839c 100644
  	binfmt = mm->binfmt;
  	if (!binfmt || !binfmt->core_dump)
  		goto fail;
-@@ -2140,14 +2593,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2140,14 +2597,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
  	if (!cred)
  		goto fail;
  	/*
@@ -51142,7 +51183,7 @@ index a2d0e51..25c839c 100644
  	}
  
  	retval = coredump_wait(exit_code, &core_state);
-@@ -2197,7 +2652,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2197,7 +2656,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
  		}
  		cprm.limit = RLIM_INFINITY;
  
@@ -51151,7 +51192,7 @@ index a2d0e51..25c839c 100644
  		if (core_pipe_limit && (core_pipe_limit < dump_count)) {
  			printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
  			       task_tgid_vnr(current), current->comm);
-@@ -2224,9 +2679,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
+@@ -2224,9 +2683,19 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
  	} else {
  		struct inode *inode;
  
@@ -51171,7 +51212,7 @@ index a2d0e51..25c839c 100644
  		cprm.file = filp_open(cn.corename,
  				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
  				 0600);
-@@ -2267,7 +2732,7 @@ close_fail:
+@@ -2267,7 +2736,7 @@ close_fail:
  		filp_close(cprm.file, NULL);
  fail_dropcount:
  	if (ispipe)
@@ -51180,7 +51221,7 @@ index a2d0e51..25c839c 100644
  fail_unlock:
  	kfree(cn.corename);
  fail_corename:
-@@ -2286,7 +2751,7 @@ fail:
+@@ -2286,7 +2755,7 @@ fail:
   */
  int dump_write(struct file *file, const void *addr, int nr)
  {
@@ -57650,10 +57691,10 @@ index 8a89949..6776861 100644
  xfs_init_zones(void)
 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
 new file mode 100644
-index 0000000..dc33dcd
+index 0000000..0f25032
 --- /dev/null
 +++ b/grsecurity/Kconfig
-@@ -0,0 +1,1054 @@
+@@ -0,0 +1,1043 @@
 +#
 +# grecurity configuration
 +#
@@ -58364,22 +58405,11 @@ index 0000000..dc33dcd
 +	help
 +	  If you say Y here, calls to mmap() and mprotect() with explicit
 +	  usage of PROT_WRITE and PROT_EXEC together will be logged when
-+	  denied by the PAX_MPROTECT feature.  If the sysctl option is
-+	  enabled, a sysctl option with name "rwxmap_logging" is created.
-+
-+config GRKERNSEC_AUDIT_TEXTREL
-+	bool 'ELF text relocations logging (READ HELP)'
-+	depends on PAX_MPROTECT
-+	help
-+	  If you say Y here, text relocations will be logged with the filename
-+	  of the offending library or binary.  The purpose of the feature is
-+	  to help Linux distribution developers get rid of libraries and
-+	  binaries that need text relocations which hinder the future progress
-+	  of PaX.  Only Linux distribution developers should say Y here, and
-+	  never on a production machine, as this option creates an information
-+	  leak that could aid an attacker in defeating the randomization of
-+	  a single memory region.  If the sysctl option is enabled, a sysctl
-+	  option with name "audit_textrel" is created.
++	  denied by the PAX_MPROTECT feature.  This feature will also
++	  log other problematic scenarios that can occur when PAX_MPROTECT
++	  is enabled on a binary, like textrels and PT_GNU_STACK.  If the 
++          sysctl option is enabled, a sysctl option with name "rwxmap_logging"
++	  is created.
 +
 +endmenu
 +
@@ -58758,7 +58788,7 @@ index 0000000..36845aa
 +endif
 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
 new file mode 100644
-index 0000000..1c950b2
+index 0000000..35b85f2
 --- /dev/null
 +++ b/grsecurity/gracl.c
 @@ -0,0 +1,4323 @@
@@ -61963,7 +61993,7 @@ index 0000000..1c950b2
 +	unsigned char *sprole_sum = NULL;
 +	int error = 0;
 +	int error2 = 0;
-+	size_t req_count;
++	size_t req_count = 0;
 +
 +	mutex_lock(&gr_dev_mutex);
 +
@@ -66106,10 +66136,10 @@ index 0000000..8ca18bf
 +}
 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
 new file mode 100644
-index 0000000..05a6015
+index 0000000..e704013
 --- /dev/null
 +++ b/grsecurity/grsec_init.c
-@@ -0,0 +1,283 @@
+@@ -0,0 +1,279 @@
 +#include <linux/kernel.h>
 +#include <linux/sched.h>
 +#include <linux/mm.h>
@@ -66133,7 +66163,6 @@ index 0000000..05a6015
 +int grsec_enable_forkfail;
 +int grsec_enable_audit_ptrace;
 +int grsec_enable_time;
-+int grsec_enable_audit_textrel;
 +int grsec_enable_group;
 +int grsec_audit_gid;
 +int grsec_enable_chdir;
@@ -66265,9 +66294,6 @@ index 0000000..05a6015
 +	grsec_lock = 1;
 +#endif
 +
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+	grsec_enable_audit_textrel = 1;
-+#endif
 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
 +	grsec_enable_log_rwxmaps = 1;
 +#endif
@@ -66459,15 +66485,16 @@ index 0000000..8598e7f
 +}
 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
 new file mode 100644
-index 0000000..a45d2e9
+index 0000000..56b5e9d
 --- /dev/null
 +++ b/grsecurity/grsec_log.c
-@@ -0,0 +1,322 @@
+@@ -0,0 +1,337 @@
 +#include <linux/kernel.h>
 +#include <linux/sched.h>
 +#include <linux/file.h>
 +#include <linux/tty.h>
 +#include <linux/fs.h>
++#include <linux/mm.h>
 +#include <linux/grinternal.h>
 +
 +#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -66610,6 +66637,7 @@ index 0000000..a45d2e9
 +	struct vfsmount *mnt = NULL;
 +	struct file *file = NULL;
 +	struct task_struct *task = NULL;
++	struct vm_area_struct *vma = NULL;
 +	const struct cred *cred, *pcred;
 +	va_list ap;
 +
@@ -66749,6 +66777,19 @@ index 0000000..a45d2e9
 +		file = va_arg(ap, struct file *);
 +		gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
 +		break;
++	case GR_RWXMAPVMA:
++		vma = va_arg(ap, struct vm_area_struct *);
++		if (vma->vm_file)
++			str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
++		else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
++			str1 = "<stack>";
++		else if (vma->vm_start <= current->mm->brk &&
++			 vma->vm_end >= current->mm->start_brk)
++			str1 = "<heap>";
++		else
++			str1 = "<anonymous mapping>";
++		gr_log_middle_varargs(audit, msg, str1);
++		break;
 +	case GR_PSACCT:
 +		{
 +			unsigned int wday, cday;
@@ -66901,10 +66942,10 @@ index 0000000..2131422
 +}
 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
 new file mode 100644
-index 0000000..a3b12a0
+index 0000000..6ee9d50
 --- /dev/null
 +++ b/grsecurity/grsec_pax.c
-@@ -0,0 +1,36 @@
+@@ -0,0 +1,45 @@
 +#include <linux/kernel.h>
 +#include <linux/sched.h>
 +#include <linux/mm.h>
@@ -66915,9 +66956,18 @@ index 0000000..a3b12a0
 +void
 +gr_log_textrel(struct vm_area_struct * vma)
 +{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+	if (grsec_enable_audit_textrel)
-+		gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++	if (grsec_enable_log_rwxmaps)
++		gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
++#endif
++	return;
++}
++
++void gr_log_ptgnustack(struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++	if (grsec_enable_log_rwxmaps)
++		gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
 +#endif
 +	return;
 +}
@@ -66933,11 +66983,11 @@ index 0000000..a3b12a0
 +}
 +
 +void
-+gr_log_rwxmprotect(struct file *file)
++gr_log_rwxmprotect(struct vm_area_struct *vma)
 +{
 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
 +	if (grsec_enable_log_rwxmaps)
-+		gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
++		gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
 +#endif
 +	return;
 +}
@@ -67479,10 +67529,10 @@ index 0000000..4030d57
 +}
 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
 new file mode 100644
-index 0000000..bce198e
+index 0000000..4ebaefc
 --- /dev/null
 +++ b/grsecurity/grsec_sysctl.c
-@@ -0,0 +1,467 @@
+@@ -0,0 +1,458 @@
 +#include <linux/kernel.h>
 +#include <linux/sched.h>
 +#include <linux/sysctl.h>
@@ -67874,15 +67924,6 @@ index 0000000..bce198e
 +		.proc_handler	= &proc_dointvec,
 +	},
 +#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+	{
-+		.procname	= "audit_textrel",
-+		.data		= &grsec_enable_audit_textrel,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
 +#ifdef CONFIG_GRKERNSEC_DMESG
 +	{
 +		.procname	= "dmesg",
@@ -68910,7 +68951,7 @@ index 49a83ca..d0a847e 100644
  struct atmphy_ops {
  	int (*start)(struct atm_dev *dev);
 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
-index acd8d4b..15d2eab 100644
+index acd8d4b..f2defe2 100644
 --- a/include/linux/binfmts.h
 +++ b/include/linux/binfmts.h
 @@ -18,7 +18,7 @@ struct pt_regs;
@@ -68930,11 +68971,12 @@ index acd8d4b..15d2eab 100644
  };
  
  #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
-@@ -86,8 +87,9 @@ struct linux_binfmt {
+@@ -86,8 +87,10 @@ struct linux_binfmt {
  	int (*load_binary)(struct linux_binprm *, struct  pt_regs * regs);
  	int (*load_shlib)(struct file *);
  	int (*core_dump)(struct coredump_params *cprm);
 +	void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
++	void (*handle_mmap)(struct file *);
  	unsigned long min_coredump;	/* minimal dump size */
 -};
 +} __do_const;
@@ -70556,10 +70598,10 @@ index 0000000..be66033
 +#endif
 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
 new file mode 100644
-index 0000000..99019db
+index 0000000..1d1b40e
 --- /dev/null
 +++ b/include/linux/grinternal.h
-@@ -0,0 +1,235 @@
+@@ -0,0 +1,236 @@
 +#ifndef __GRINTERNAL_H
 +#define __GRINTERNAL_H
 +
@@ -70635,7 +70677,6 @@ index 0000000..99019db
 +extern int grsec_socket_server_gid;
 +extern int grsec_audit_gid;
 +extern int grsec_enable_group;
-+extern int grsec_enable_audit_textrel;
 +extern int grsec_enable_log_rwxmaps;
 +extern int grsec_enable_mount;
 +extern int grsec_enable_chdir;
@@ -70751,7 +70792,8 @@ index 0000000..99019db
 +	GR_CRASH1,
 +	GR_CRASH2,
 +	GR_PSACCT,
-+	GR_RWXMAP
++	GR_RWXMAP,
++	GR_RWXMAPVMA
 +};
 +
 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
@@ -70789,6 +70831,7 @@ index 0000000..99019db
 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
++#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
 +
 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
 +
@@ -70797,10 +70840,10 @@ index 0000000..99019db
 +#endif
 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
 new file mode 100644
-index 0000000..2f159b5
+index 0000000..a4396b5
 --- /dev/null
 +++ b/include/linux/grmsg.h
-@@ -0,0 +1,112 @@
+@@ -0,0 +1,113 @@
 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
@@ -70904,7 +70947,8 @@ index 0000000..2f159b5
 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
-+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
++#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
++#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
 +#define GR_VM86_MSG "denied use of vm86 by "
 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
@@ -70915,10 +70959,10 @@ index 0000000..2f159b5
 +#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes.  Please investigate the crash report for "
 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
 new file mode 100644
-index 0000000..4af9529
+index 0000000..f5fa948
 --- /dev/null
 +++ b/include/linux/grsecurity.h
-@@ -0,0 +1,220 @@
+@@ -0,0 +1,221 @@
 +#ifndef GR_SECURITY_H
 +#define GR_SECURITY_H
 +#include <linux/fs.h>
@@ -70996,8 +71040,9 @@ index 0000000..4af9529
 +void gr_log_unmount(const char *devname, const int retval);
 +void gr_log_mount(const char *from, const char *to, const int retval);
 +void gr_log_textrel(struct vm_area_struct *vma);
++void gr_log_ptgnustack(struct file *file);
 +void gr_log_rwxmmap(struct file *file);
-+void gr_log_rwxmprotect(struct file *file);
++void gr_log_rwxmprotect(struct vm_area_struct *vma);
 +
 +int gr_handle_follow_link(const struct inode *parent,
 +				 const struct inode *inode,
@@ -83710,7 +83755,7 @@ index 1ffd97a..240aa20 100644
  int mminit_loglevel;
  
 diff --git a/mm/mmap.c b/mm/mmap.c
-index dff37a6..0e57094 100644
+index dff37a6..49e182f 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -30,6 +30,7 @@
@@ -83954,12 +83999,19 @@ index dff37a6..0e57094 100644
  	if (addr & ~PAGE_MASK)
  		return addr;
  
-@@ -992,6 +1080,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -992,6 +1080,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  
 +#ifdef CONFIG_PAX_MPROTECT
 +	if (mm->pax_flags & MF_PAX_MPROTECT) {
++
++#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
++		if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
++		    mm->binfmt->handle_mmap)
++			mm->binfmt->handle_mmap(file);
++#endif
++
 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
 +		if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
 +			gr_log_rwxmmap(file);
@@ -83991,7 +84043,7 @@ index dff37a6..0e57094 100644
  	if (flags & MAP_LOCKED)
  		if (!can_do_mlock())
  			return -EPERM;
-@@ -1003,6 +1121,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1003,6 +1128,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
@@ -83999,7 +84051,7 @@ index dff37a6..0e57094 100644
  		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  			return -EAGAIN;
  	}
-@@ -1073,6 +1192,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+@@ -1073,6 +1199,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
  	if (error)
  		return error;
  
@@ -84009,7 +84061,7 @@ index dff37a6..0e57094 100644
  	return mmap_region(file, addr, len, flags, vm_flags, pgoff);
  }
  EXPORT_SYMBOL(do_mmap_pgoff);
-@@ -1153,7 +1275,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
+@@ -1153,7 +1282,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
  	vm_flags_t vm_flags = vma->vm_flags;
  
  	/* If it was private or non-writable, the write bit is already clear */
@@ -84018,7 +84070,7 @@ index dff37a6..0e57094 100644
  		return 0;
  
  	/* The backer wishes to know when pages are first written to? */
-@@ -1202,17 +1324,32 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
+@@ -1202,17 +1331,32 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
  	unsigned long charged = 0;
  	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
  
@@ -84053,7 +84105,7 @@ index dff37a6..0e57094 100644
  	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
  		return -ENOMEM;
  
-@@ -1258,6 +1395,16 @@ munmap_back:
+@@ -1258,6 +1402,16 @@ munmap_back:
  		goto unacct_error;
  	}
  
@@ -84070,7 +84122,7 @@ index dff37a6..0e57094 100644
  	vma->vm_mm = mm;
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
-@@ -1266,8 +1413,9 @@ munmap_back:
+@@ -1266,8 +1420,9 @@ munmap_back:
  	vma->vm_pgoff = pgoff;
  	INIT_LIST_HEAD(&vma->anon_vma_chain);
  
@@ -84081,7 +84133,7 @@ index dff37a6..0e57094 100644
  		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
  			goto free_vma;
  		if (vm_flags & VM_DENYWRITE) {
-@@ -1281,6 +1429,19 @@ munmap_back:
+@@ -1281,6 +1436,19 @@ munmap_back:
  		error = file->f_op->mmap(file, vma);
  		if (error)
  			goto unmap_and_free_vma;
@@ -84101,7 +84153,7 @@ index dff37a6..0e57094 100644
  		if (vm_flags & VM_EXECUTABLE)
  			added_exe_file_vma(mm);
  
-@@ -1293,6 +1454,8 @@ munmap_back:
+@@ -1293,6 +1461,8 @@ munmap_back:
  		pgoff = vma->vm_pgoff;
  		vm_flags = vma->vm_flags;
  	} else if (vm_flags & VM_SHARED) {
@@ -84110,7 +84162,7 @@ index dff37a6..0e57094 100644
  		error = shmem_zero_setup(vma);
  		if (error)
  			goto free_vma;
-@@ -1316,14 +1479,19 @@ munmap_back:
+@@ -1316,14 +1486,19 @@ munmap_back:
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  	file = vma->vm_file;
  
@@ -84131,7 +84183,7 @@ index dff37a6..0e57094 100644
  	if (vm_flags & VM_LOCKED) {
  		if (!mlock_vma_pages_range(vma, addr, addr + len))
  			mm->locked_vm += (len >> PAGE_SHIFT);
-@@ -1341,6 +1509,12 @@ unmap_and_free_vma:
+@@ -1341,6 +1516,12 @@ unmap_and_free_vma:
  	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
  	charged = 0;
  free_vma:
@@ -84144,7 +84196,7 @@ index dff37a6..0e57094 100644
  	kmem_cache_free(vm_area_cachep, vma);
  unacct_error:
  	if (charged)
-@@ -1348,6 +1522,73 @@ unacct_error:
+@@ -1348,6 +1529,73 @@ unacct_error:
  	return error;
  }
  
@@ -84218,7 +84270,7 @@ index dff37a6..0e57094 100644
  /* Get an address range which is currently unmapped.
   * For shmat() with addr=0.
   *
-@@ -1367,6 +1608,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1367,6 +1615,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	unsigned long start_addr;
@@ -84226,7 +84278,7 @@ index dff37a6..0e57094 100644
  
  	if (len > TASK_SIZE)
  		return -ENOMEM;
-@@ -1374,18 +1616,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+@@ -1374,18 +1623,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -84257,7 +84309,7 @@ index dff37a6..0e57094 100644
  	}
  
  full_search:
-@@ -1396,34 +1643,40 @@ full_search:
+@@ -1396,34 +1650,40 @@ full_search:
  			 * Start a new search - just in case we missed
  			 * some holes.
  			 */
@@ -84309,7 +84361,7 @@ index dff37a6..0e57094 100644
  		mm->free_area_cache = addr;
  		mm->cached_hole_size = ~0UL;
  	}
-@@ -1441,7 +1694,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1441,7 +1701,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  {
  	struct vm_area_struct *vma;
  	struct mm_struct *mm = current->mm;
@@ -84319,7 +84371,7 @@ index dff37a6..0e57094 100644
  
  	/* requested length too big for entire address space */
  	if (len > TASK_SIZE)
-@@ -1450,13 +1704,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1450,13 +1711,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  	if (flags & MAP_FIXED)
  		return addr;
  
@@ -84342,7 +84394,7 @@ index dff37a6..0e57094 100644
  	}
  
  	/* check if free_area_cache is useful for us */
-@@ -1470,10 +1729,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1470,10 +1736,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  
  	/* make sure it can fit in the remaining address space */
  	if (addr > len) {
@@ -84357,7 +84409,7 @@ index dff37a6..0e57094 100644
  	}
  
  	if (mm->mmap_base < len)
-@@ -1488,7 +1748,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1488,7 +1755,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  		 * return with success:
  		 */
  		vma = find_vma(mm, addr);
@@ -84366,7 +84418,7 @@ index dff37a6..0e57094 100644
  			/* remember the address as a hint for next time */
  			return (mm->free_area_cache = addr);
  
-@@ -1497,8 +1757,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+@@ -1497,8 +1764,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
   		        mm->cached_hole_size = vma->vm_start - addr;
  
  		/* try just below the current vma->vm_start */
@@ -84377,7 +84429,7 @@ index dff37a6..0e57094 100644
  
  bottomup:
  	/*
-@@ -1507,13 +1767,21 @@ bottomup:
+@@ -1507,13 +1774,21 @@ bottomup:
  	 * can happen with large stack limits and large mmap()
  	 * allocations.
  	 */
@@ -84401,7 +84453,7 @@ index dff37a6..0e57094 100644
  	mm->cached_hole_size = ~0UL;
  
  	return addr;
-@@ -1522,6 +1790,12 @@ bottomup:
+@@ -1522,6 +1797,12 @@ bottomup:
  
  void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  {
@@ -84414,7 +84466,7 @@ index dff37a6..0e57094 100644
  	/*
  	 * Is this a new hole at the highest possible address?
  	 */
-@@ -1529,8 +1803,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
+@@ -1529,8 +1810,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
  		mm->free_area_cache = addr;
  
  	/* dont allow allocations above current base */
@@ -84426,7 +84478,7 @@ index dff37a6..0e57094 100644
  }
  
  unsigned long
-@@ -1603,40 +1879,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1603,40 +1886,50 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  
  EXPORT_SYMBOL(find_vma);
  
@@ -84502,7 +84554,7 @@ index dff37a6..0e57094 100644
  
  /*
   * Verify that the stack growth is acceptable and
-@@ -1654,6 +1940,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1654,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		return -ENOMEM;
  
  	/* Stack limit test */
@@ -84510,7 +84562,7 @@ index dff37a6..0e57094 100644
  	if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
  		return -ENOMEM;
  
-@@ -1664,6 +1951,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1664,6 +1958,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		locked = mm->locked_vm + grow;
  		limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
  		limit >>= PAGE_SHIFT;
@@ -84518,7 +84570,7 @@ index dff37a6..0e57094 100644
  		if (locked > limit && !capable(CAP_IPC_LOCK))
  			return -ENOMEM;
  	}
-@@ -1682,7 +1970,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1682,7 +1977,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
  		return -ENOMEM;
  
  	/* Ok, everything looks good - let it rip */
@@ -84526,7 +84578,7 @@ index dff37a6..0e57094 100644
  	if (vma->vm_flags & VM_LOCKED)
  		mm->locked_vm += grow;
  	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
-@@ -1694,37 +1981,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+@@ -1694,37 +1988,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
   * vma is the last one with address > vma->vm_end.  Have to extend vma.
   */
@@ -84584,7 +84636,7 @@ index dff37a6..0e57094 100644
  		unsigned long size, grow;
  
  		size = address - vma->vm_start;
-@@ -1739,6 +2037,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+@@ -1739,6 +2044,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
  			}
  		}
  	}
@@ -84593,7 +84645,7 @@ index dff37a6..0e57094 100644
  	vma_unlock_anon_vma(vma);
  	khugepaged_enter_vma_merge(vma);
  	return error;
-@@ -1752,6 +2052,8 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1752,6 +2059,8 @@ int expand_downwards(struct vm_area_struct *vma,
  				   unsigned long address)
  {
  	int error;
@@ -84602,7 +84654,7 @@ index dff37a6..0e57094 100644
  
  	/*
  	 * We must make sure the anon_vma is allocated
-@@ -1765,6 +2067,15 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1765,6 +2074,15 @@ int expand_downwards(struct vm_area_struct *vma,
  	if (error)
  		return error;
  
@@ -84618,7 +84670,7 @@ index dff37a6..0e57094 100644
  	vma_lock_anon_vma(vma);
  
  	/*
-@@ -1774,9 +2085,17 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1774,9 +2092,17 @@ int expand_downwards(struct vm_area_struct *vma,
  	 */
  
  	/* Somebody else might have raced and expanded it already */
@@ -84637,7 +84689,7 @@ index dff37a6..0e57094 100644
  		size = vma->vm_end - address;
  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
  
-@@ -1786,18 +2105,48 @@ int expand_downwards(struct vm_area_struct *vma,
+@@ -1786,18 +2112,48 @@ int expand_downwards(struct vm_area_struct *vma,
  			if (!error) {
  				vma->vm_start = address;
  				vma->vm_pgoff -= grow;
@@ -84686,7 +84738,7 @@ index dff37a6..0e57094 100644
  	return expand_upwards(vma, address);
  }
  
-@@ -1820,6 +2169,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+@@ -1820,6 +2176,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
  #else
  int expand_stack(struct vm_area_struct *vma, unsigned long address)
  {
@@ -84701,7 +84753,7 @@ index dff37a6..0e57094 100644
  	return expand_downwards(vma, address);
  }
  
-@@ -1860,7 +2217,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -1860,7 +2224,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  	do {
  		long nrpages = vma_pages(vma);
  
@@ -84716,7 +84768,7 @@ index dff37a6..0e57094 100644
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
  		vma = remove_vma(vma);
  	} while (vma);
-@@ -1905,6 +2268,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -1905,6 +2275,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
  	do {
@@ -84733,7 +84785,7 @@ index dff37a6..0e57094 100644
  		rb_erase(&vma->vm_rb, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
-@@ -1933,14 +2306,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1933,14 +2313,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	struct vm_area_struct *new;
  	int err = -ENOMEM;
  
@@ -84767,7 +84819,7 @@ index dff37a6..0e57094 100644
  	/* most fields are the same, copy all, and then fixup */
  	*new = *vma;
  
-@@ -1953,6 +2345,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1953,6 +2352,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  	}
  
@@ -84790,7 +84842,7 @@ index dff37a6..0e57094 100644
  	pol = mpol_dup(vma_policy(vma));
  	if (IS_ERR(pol)) {
  		err = PTR_ERR(pol);
-@@ -1978,6 +2386,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1978,6 +2393,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	else
  		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
@@ -84833,7 +84885,7 @@ index dff37a6..0e57094 100644
  	/* Success. */
  	if (!err)
  		return 0;
-@@ -1990,10 +2434,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -1990,10 +2441,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  			removed_exe_file_vma(mm);
  		fput(new->vm_file);
  	}
@@ -84853,7 +84905,7 @@ index dff37a6..0e57094 100644
  	kmem_cache_free(vm_area_cachep, new);
   out_err:
  	return err;
-@@ -2006,6 +2458,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2006,6 +2465,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	      unsigned long addr, int new_below)
  {
@@ -84869,7 +84921,7 @@ index dff37a6..0e57094 100644
  	if (mm->map_count >= sysctl_max_map_count)
  		return -ENOMEM;
  
-@@ -2017,11 +2478,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2017,11 +2485,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
@@ -84900,7 +84952,7 @@ index dff37a6..0e57094 100644
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
-@@ -2096,6 +2576,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2096,6 +2583,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  	/* Fix up all other VM information */
  	remove_vma_list(mm, vma);
  
@@ -84909,7 +84961,7 @@ index dff37a6..0e57094 100644
  	return 0;
  }
  
-@@ -2108,22 +2590,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2108,22 +2597,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
  
  	profile_munmap(addr);
  
@@ -84938,7 +84990,7 @@ index dff37a6..0e57094 100644
  /*
   *  this is really a simplified "do_mmap".  it only handles
   *  anonymous maps.  eventually we may be able to do some
-@@ -2137,6 +2615,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2137,6 +2622,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	struct rb_node ** rb_link, * rb_parent;
  	pgoff_t pgoff = addr >> PAGE_SHIFT;
  	int error;
@@ -84946,7 +84998,7 @@ index dff37a6..0e57094 100644
  
  	len = PAGE_ALIGN(len);
  	if (!len)
-@@ -2148,16 +2627,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2148,16 +2634,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -84978,7 +85030,7 @@ index dff37a6..0e57094 100644
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
-@@ -2174,22 +2667,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2174,22 +2674,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	/*
  	 * Clear old maps.  this also does some error checking for us
  	 */
@@ -85005,7 +85057,7 @@ index dff37a6..0e57094 100644
  		return -ENOMEM;
  
  	/* Can we just expand an old private anonymous mapping? */
-@@ -2203,7 +2696,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2203,7 +2703,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	 */
  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  	if (!vma) {
@@ -85014,7 +85066,7 @@ index dff37a6..0e57094 100644
  		return -ENOMEM;
  	}
  
-@@ -2217,11 +2710,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2217,11 +2717,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  out:
  	perf_event_mmap(vma);
@@ -85029,7 +85081,7 @@ index dff37a6..0e57094 100644
  	return addr;
  }
  
-@@ -2268,8 +2762,10 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2268,8 +2769,10 @@ void exit_mmap(struct mm_struct *mm)
  	 * Walk the list again, actually closing and freeing it,
  	 * with preemption enabled, without holding any MM locks.
  	 */
@@ -85041,7 +85093,7 @@ index dff37a6..0e57094 100644
  
  	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
  }
-@@ -2283,6 +2779,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2283,6 +2786,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  	struct vm_area_struct * __vma, * prev;
  	struct rb_node ** rb_link, * rb_parent;
  
@@ -85055,7 +85107,7 @@ index dff37a6..0e57094 100644
  	/*
  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  	 * until its first write fault, when page's anon_vma and index
-@@ -2305,7 +2808,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+@@ -2305,7 +2815,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  	if ((vma->vm_flags & VM_ACCOUNT) &&
  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
  		return -ENOMEM;
@@ -85078,7 +85130,7 @@ index dff37a6..0e57094 100644
  	return 0;
  }
  
-@@ -2323,6 +2841,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2323,6 +2848,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	struct rb_node **rb_link, *rb_parent;
  	struct mempolicy *pol;
  
@@ -85087,7 +85139,7 @@ index dff37a6..0e57094 100644
  	/*
  	 * If anonymous vma has not yet been faulted, update new pgoff
  	 * to match new location, to increase its chance of merging.
-@@ -2373,6 +2893,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2373,6 +2900,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	return NULL;
  }
  
@@ -85127,7 +85179,7 @@ index dff37a6..0e57094 100644
  /*
   * Return true if the calling process may expand its vm space by the passed
   * number of pages
-@@ -2384,6 +2937,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2384,6 +2944,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
  
  	lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
  
@@ -85135,7 +85187,7 @@ index dff37a6..0e57094 100644
  	if (cur + npages > lim)
  		return 0;
  	return 1;
-@@ -2454,6 +3008,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2454,6 +3015,22 @@ int install_special_mapping(struct mm_struct *mm,
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  
@@ -85159,7 +85211,7 @@ index dff37a6..0e57094 100644
  	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  
 diff --git a/mm/mprotect.c b/mm/mprotect.c
-index 5a688a2..27e031c 100644
+index 5a688a2..fffb9f6 100644
 --- a/mm/mprotect.c
 +++ b/mm/mprotect.c
 @@ -23,10 +23,16 @@
@@ -85368,7 +85420,7 @@ index 5a688a2..27e031c 100644
  		/* newflags >> 4 shift VM_MAY% in place of VM_% */
  		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
 +			if (prot & (PROT_WRITE | PROT_EXEC))
-+				gr_log_rwxmprotect(vma->vm_file);
++				gr_log_rwxmprotect(vma);
 +
 +			error = -EACCES;
 +			goto out;
@@ -92326,6 +92378,18 @@ index 7635107..4670276 100644
  	_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
  
  	ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index e25e490..6e38ef0 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -606,6 +606,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
+ 		struct sockaddr_atmpvc pvc;
+ 		int state;
+ 
++		memset(&pvc, 0, sizeof(pvc));
+ 		pvc.sap_family = AF_ATMPVC;
+ 		pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
+ 		pvc.sap_addr.vpi = flow->vcc->vpi;
 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
 index 8104278..300d89d 100644
 --- a/net/sctp/ipv6.c
@@ -94499,7 +94563,7 @@ index 38f6617..e70b72b 100755
  
  exuberant()
 diff --git a/security/Kconfig b/security/Kconfig
-index 51bd5a0..2ae77cf 100644
+index 51bd5a0..7ac4fad 100644
 --- a/security/Kconfig
 +++ b/security/Kconfig
 @@ -4,6 +4,956 @@
@@ -95181,7 +95245,7 @@ index 51bd5a0..2ae77cf 100644
 +
 +config PAX_RANDKSTACK
 +	bool "Randomize kernel stack base"
-+	default y if GRKERNSEC_CONFIG_AUTO
++	default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
 +	depends on X86_TSC && X86
 +	help
 +	  By saying Y here the kernel will randomize every task's kernel


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2013-08-06 15:02 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-08-06 15:01 [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.10.4/, 3.10.5/, 3.2.50/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox