public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] linux-patches r1270 - hardened/2.6/trunk/2.6.23
@ 2008-03-22 18:37 Christian Heim (phreak)
  0 siblings, 0 replies; only message in thread
From: Christian Heim (phreak) @ 2008-03-22 18:37 UTC (permalink / raw
  To: gentoo-commits

Author: phreak
Date: 2008-03-22 18:37:36 +0000 (Sat, 22 Mar 2008)
New Revision: 1270

Removed:
   hardened/2.6/trunk/2.6.23/4105_dm-bbr.patch
   hardened/2.6/trunk/2.6.23/4300_squashfs-3.2-r2.patch
   hardened/2.6/trunk/2.6.23/4405_alpha-sysctl-uac.patch
   hardened/2.6/trunk/2.6.23/4450_grsec-2.1.11-2.6.23.9-200712101800.patch
Log:
Cleaning up, to import kerframil patchset for 2.6.23.

Deleted: hardened/2.6/trunk/2.6.23/4105_dm-bbr.patch
===================================================================
--- hardened/2.6/trunk/2.6.23/4105_dm-bbr.patch	2008-03-18 12:35:18 UTC (rev 1269)
+++ hardened/2.6/trunk/2.6.23/4105_dm-bbr.patch	2008-03-22 18:37:36 UTC (rev 1270)
@@ -1,1181 +0,0 @@
-BBR Target, updated by dsd@gentoo.org
-
-Incomplete changelog:
- 2007/07/08: updated for new API in 2.6.22
-
---- a/drivers/md/Kconfig
-+++ b/drivers/md/Kconfig
-@@ -276,4 +276,15 @@ config DM_DELAY
- 
- 	If unsure, say N.
- 
-+config BLK_DEV_DM_BBR
-+	tristate "Bad Block Relocation Device Target (EXPERIMENTAL)"
-+	depends on BLK_DEV_DM && EXPERIMENTAL
-+	---help---
-+	  Support for devices with software-based bad-block-relocation.
-+
-+	  To compile this as a module, choose M here: the module will be
-+	  called dm-bbr.
-+
-+	  If unsure, say N.
-+
- endif # MD
---- a/drivers/md/Makefile
-+++ b/drivers/md/Makefile
-@@ -39,6 +39,7 @@ obj-$(CONFIG_DM_MULTIPATH_RDAC)	+= dm-rd
- obj-$(CONFIG_DM_SNAPSHOT)	+= dm-snapshot.o
- obj-$(CONFIG_DM_MIRROR)		+= dm-mirror.o
- obj-$(CONFIG_DM_ZERO)		+= dm-zero.o
-+obj-$(CONFIG_BLK_DEV_DM_BBR)	+= dm-bbr.o
- 
- quiet_cmd_unroll = UNROLL  $@
-       cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \
---- /dev/null
-+++ b/drivers/md/dm-bbr.c
-@@ -0,0 +1,1012 @@
-+/*
-+ *   (C) Copyright IBM Corp. 2002, 2004
-+ *
-+ *   This program is free software;  you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation; either version 2 of the License, or
-+ *   (at your option) any later version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
-+ *   the GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program;  if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ * linux/drivers/md/dm-bbr.c
-+ *
-+ * Bad-block-relocation (BBR) target for device-mapper.
-+ *
-+ * The BBR target is designed to remap I/O write failures to another safe
-+ * location on disk. Note that most disk drives have BBR built into them,
-+ * this means that our software BBR will be only activated when all hardware
-+ * BBR replacement sectors have been used.
-+ */
-+
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/bio.h>
-+#include <linux/spinlock.h>
-+#include <linux/slab.h>
-+#include <linux/mempool.h>
-+#include <linux/workqueue.h>
-+#include <linux/vmalloc.h>
-+
-+#include "dm.h"
-+#include "dm-bio-list.h"
-+#include "dm-bio-record.h"
-+#include "dm-bbr.h"
-+#include "dm-io.h"
-+
-+#define DM_MSG_PREFIX "bbr"
-+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
-+
-+static struct workqueue_struct *dm_bbr_wq = NULL;
-+static void bbr_remap_handler(struct work_struct *work);
-+static struct kmem_cache *bbr_remap_cache;
-+static struct kmem_cache *bbr_io_cache;
-+static mempool_t *bbr_io_pool;
-+
-+/**
-+ * bbr_binary_tree_destroy
-+ *
-+ * Destroy the binary tree.
-+ **/
-+static void bbr_binary_tree_destroy(struct bbr_runtime_remap *root)
-+{
-+	struct bbr_runtime_remap **link = NULL;
-+	struct bbr_runtime_remap *node = root;
-+
-+	while (node) {
-+		if (node->left) {
-+			link = &node->left;
-+			node = node->left;
-+			continue;
-+		}
-+		if (node->right) {
-+			link = &node->right;
-+			node = node->right;
-+			continue;
-+		}
-+
-+		kmem_cache_free(bbr_remap_cache, node);
-+		if (node == root) {
-+			/* If root is deleted, we're done. */
-+			break;
-+		}
-+
-+		/* Back to root. */
-+		node = root;
-+		*link = NULL;
-+	}
-+}
-+
-+static void bbr_free_remap(struct bbr_private *bbr_id)
-+{
-+	spin_lock_irq(&bbr_id->remap_root_lock);
-+	bbr_binary_tree_destroy(bbr_id->remap_root);
-+	bbr_id->remap_root = NULL;
-+	spin_unlock_irq(&bbr_id->remap_root_lock);
-+}
-+
-+static struct bbr_private *bbr_alloc_private(void)
-+{
-+	struct bbr_private *bbr_id;
-+
-+	bbr_id = kzalloc(sizeof(*bbr_id), GFP_KERNEL);
-+	if (bbr_id == NULL)
-+		return NULL;
-+
-+	INIT_WORK(&bbr_id->remap_work, bbr_remap_handler);
-+	spin_lock_init(&bbr_id->remap_root_lock);
-+	spin_lock_init(&bbr_id->remap_ios_lock);
-+	bbr_id->in_use_replacement_blks = (atomic_t) ATOMIC_INIT(0);
-+
-+	return bbr_id;
-+}
-+
-+static void bbr_free_private(struct bbr_private *bbr_id)
-+{
-+	vfree(bbr_id->bbr_table);
-+	bbr_free_remap(bbr_id);
-+	kfree(bbr_id);
-+}
-+
-+static u32 crc_table[256];
-+static u32 crc_table_built = 0;
-+
-+static void build_crc_table(void)
-+{
-+	u32 i, j, crc;
-+
-+	for (i = 0; i <= 255; i++) {
-+		crc = i;
-+		for (j = 8; j > 0; j--) {
-+			if (crc & 1)
-+				crc = (crc >> 1) ^ CRC_POLYNOMIAL;
-+			else
-+				crc >>= 1;
-+		}
-+		crc_table[i] = crc;
-+	}
-+	crc_table_built = 1;
-+}
-+
-+static u32 calculate_crc(u32 crc, void *buffer, u32 buffersize)
-+{
-+	unsigned char *current_byte;
-+	u32 temp1, temp2, i;
-+
-+	current_byte = (unsigned char *) buffer;
-+	/* Make sure the crc table is available */
-+	if (!crc_table_built)
-+		build_crc_table();
-+	/* Process each byte in the buffer. */
-+	for (i = 0; i < buffersize; i++) {
-+		temp1 = (crc >> 8) & 0x00FFFFFF;
-+		temp2 = crc_table[(crc ^ (u32) * current_byte) &
-+				  (u32) 0xff];
-+		current_byte++;
-+		crc = temp1 ^ temp2;
-+	}
-+	return crc;
-+}
-+
-+/**
-+ * le_bbr_table_sector_to_cpu
-+ *
-+ * Convert bbr meta data from on-disk (LE) format
-+ * to the native cpu endian format.
-+ **/
-+static void le_bbr_table_sector_to_cpu(struct bbr_table *p)
-+{
-+	int i;
-+	p->signature		= le32_to_cpup(&p->signature);
-+	p->crc			= le32_to_cpup(&p->crc);
-+	p->sequence_number	= le32_to_cpup(&p->sequence_number);
-+	p->in_use_cnt		= le32_to_cpup(&p->in_use_cnt);
-+	for (i = 0; i < BBR_ENTRIES_PER_SECT; i++) {
-+		p->entries[i].bad_sect =
-+			le64_to_cpup(&p->entries[i].bad_sect);
-+		p->entries[i].replacement_sect =
-+			le64_to_cpup(&p->entries[i].replacement_sect);
-+	}
-+}
-+
-+/**
-+ * cpu_bbr_table_sector_to_le
-+ *
-+ * Convert bbr meta data from cpu endian format to on-disk (LE) format
-+ **/
-+static void cpu_bbr_table_sector_to_le(struct bbr_table *p,
-+				       struct bbr_table *le)
-+{
-+	int i;
-+	le->signature		= cpu_to_le32p(&p->signature);
-+	le->crc			= cpu_to_le32p(&p->crc);
-+	le->sequence_number	= cpu_to_le32p(&p->sequence_number);
-+	le->in_use_cnt		= cpu_to_le32p(&p->in_use_cnt);
-+	for (i = 0; i < BBR_ENTRIES_PER_SECT; i++) {
-+		le->entries[i].bad_sect =
-+			cpu_to_le64p(&p->entries[i].bad_sect);
-+		le->entries[i].replacement_sect =
-+			cpu_to_le64p(&p->entries[i].replacement_sect);
-+	}
-+}
-+
-+/**
-+ * validate_bbr_table_sector
-+ *
-+ * Check the specified BBR table sector for a valid signature and CRC. If it's
-+ * valid, endian-convert the table sector.
-+ **/
-+static int validate_bbr_table_sector(struct bbr_table *p)
-+{
-+	int org_crc, final_crc;
-+
-+	if (le32_to_cpup(&p->signature) != BBR_TABLE_SIGNATURE) {
-+		DMERR("BBR table signature doesn't match!");
-+		DMERR("Found 0x%x. Expecting 0x%x",
-+		      le32_to_cpup(&p->signature), BBR_TABLE_SIGNATURE);
-+		return -EINVAL;
-+	}
-+
-+	if (!p->crc) {
-+		DMERR("BBR table sector has no CRC!");
-+		return -EINVAL;
-+	}
-+
-+	org_crc = le32_to_cpup(&p->crc);
-+	p->crc = 0;
-+	final_crc = calculate_crc(INITIAL_CRC, (void *)p, sizeof(*p));
-+	if (final_crc != org_crc) {
-+		DMERR("CRC failed!");
-+		DMERR("Found 0x%x. Expecting 0x%x",
-+		      org_crc, final_crc);
-+		return -EINVAL;
-+	}
-+
-+	p->crc = cpu_to_le32p(&org_crc);
-+	le_bbr_table_sector_to_cpu(p);
-+
-+	return 0;
-+}
-+
-+/**
-+ * bbr_binary_tree_insert
-+ *
-+ * Insert a node into the binary tree.
-+ **/
-+static void bbr_binary_tree_insert(struct bbr_runtime_remap **root,
-+				   struct bbr_runtime_remap *newnode)
-+{
-+	struct bbr_runtime_remap **node = root;
-+	while (node && *node) {
-+		node = (newnode->remap.bad_sect > (*node)->remap.bad_sect) ?
-+			&(*node)->right : &(*node)->left;
-+	}
-+
-+	newnode->left = newnode->right = NULL;
-+	*node = newnode;
-+}
-+
-+/**
-+ * bbr_binary_search
-+ *
-+ * Search for a node that contains bad_sect == lsn.
-+ **/
-+static struct bbr_runtime_remap *bbr_binary_search(
-+	struct bbr_runtime_remap *root,
-+	u64 lsn)
-+{
-+	struct bbr_runtime_remap *node = root;
-+	while (node) {
-+		if (node->remap.bad_sect == lsn)
-+			break;
-+
-+		node = (lsn > node->remap.bad_sect) ? node->right : node->left;
-+	}
-+	return node;
-+}
-+
-+/**
-+ * bbr_insert_remap_entry
-+ *
-+ * Create a new remap entry and add it to the binary tree for this node.
-+ **/
-+static int bbr_insert_remap_entry(struct bbr_private *bbr_id,
-+				  struct bbr_table_entry *new_bbr_entry)
-+{
-+	struct bbr_runtime_remap *newnode;
-+
-+	newnode = kmem_cache_alloc(bbr_remap_cache, GFP_NOIO);
-+	if (!newnode) {
-+		DMERR("Could not allocate from remap cache!");
-+		return -ENOMEM;
-+	}
-+	newnode->remap.bad_sect = new_bbr_entry->bad_sect;
-+	newnode->remap.replacement_sect = new_bbr_entry->replacement_sect;
-+	spin_lock_irq(&bbr_id->remap_root_lock);
-+	bbr_binary_tree_insert(&bbr_id->remap_root, newnode);
-+	spin_unlock_irq(&bbr_id->remap_root_lock);
-+	return 0;
-+}
-+
-+/**
-+ * bbr_table_to_remap_list
-+ *
-+ * The on-disk bbr table is sorted by the replacement sector LBA. In order to
-+ * improve run time performance, the in memory remap list must be sorted by
-+ * the bad sector LBA. This function is called at discovery time to initialize
-+ * the remap list. This function assumes that at least one copy of meta data
-+ * is valid.
-+ **/
-+static u32 bbr_table_to_remap_list(struct bbr_private *bbr_id)
-+{
-+	u32 in_use_blks = 0;
-+	int i, j;
-+	struct bbr_table *p;
-+
-+	for (i = 0, p = bbr_id->bbr_table;
-+	     i < bbr_id->nr_sects_bbr_table;
-+	     i++, p++) {
-+		if (!p->in_use_cnt)
-+			break;
-+
-+		in_use_blks += p->in_use_cnt;
-+		for (j = 0; j < p->in_use_cnt; j++)
-+			bbr_insert_remap_entry(bbr_id, &p->entries[j]);
-+	}
-+	if (in_use_blks) {
-+		char b[32];
-+		DMWARN("There are %u BBR entries for device %s",
-+		       in_use_blks, format_dev_t(b, bbr_id->dev->bdev->bd_dev));
-+	}
-+
-+	return in_use_blks;
-+}
-+
-+/**
-+ * bbr_search_remap_entry
-+ *
-+ * Search remap entry for the specified sector. If found, return a pointer to
-+ * the table entry. Otherwise, return NULL.
-+ **/
-+static struct bbr_table_entry *bbr_search_remap_entry(
-+	struct bbr_private *bbr_id,
-+	u64 lsn)
-+{
-+	struct bbr_runtime_remap *p;
-+
-+	spin_lock_irq(&bbr_id->remap_root_lock);
-+	p = bbr_binary_search(bbr_id->remap_root, lsn);
-+	spin_unlock_irq(&bbr_id->remap_root_lock);
-+	return (p) ? &p->remap : NULL;
-+}
-+
-+/**
-+ * bbr_remap
-+ *
-+ * If *lsn is in the remap table, return TRUE and modify *lsn,
-+ * else, return FALSE.
-+ **/
-+static int bbr_remap(struct bbr_private *bbr_id,
-+			    u64 *lsn)
-+{
-+	struct bbr_table_entry *e;
-+
-+	if (atomic_read(&bbr_id->in_use_replacement_blks)) {
-+		e = bbr_search_remap_entry(bbr_id, *lsn);
-+		if (e) {
-+			*lsn = e->replacement_sect;
-+			return 1;
-+		}
-+	}
-+	return 0;
-+}
-+
-+/**
-+ * bbr_remap_probe
-+ *
-+ * If any of the sectors in the range [lsn, lsn+nr_sects] are in the remap
-+ * table return TRUE, Else, return FALSE.
-+ **/
-+static int bbr_remap_probe(struct bbr_private *bbr_id,
-+				  u64 lsn, u64 nr_sects)
-+{
-+	u64 tmp, cnt;
-+
-+	if (atomic_read(&bbr_id->in_use_replacement_blks)) {
-+		for (cnt = 0, tmp = lsn;
-+		     cnt < nr_sects;
-+		     cnt += bbr_id->blksize_in_sects, tmp = lsn + cnt) {
-+			if (bbr_remap(bbr_id,&tmp))
-+				return 1;
-+		}
-+	}
-+	return 0;
-+}
-+
-+static int rw_table(struct bbr_private *bbr_id, void *vma,
-+		    struct io_region *ptr, int rw)
-+{
-+	bbr_id->vma_io_req.bi_rw = rw;
-+	bbr_id->vma_io_req.mem.ptr.vma = vma;
-+	bbr_id->vma_io_req.notify.fn = NULL;
-+
-+	return dm_io(&bbr_id->vma_io_req, 1, ptr, NULL);
-+}
-+
-+static int io_sync(struct bbr_private *bbr_id, struct page_list *pl,
-+		   unsigned offset, struct io_region *ptr, int rw)
-+{
-+	bbr_id->page_io_req.bi_rw = rw;
-+	bbr_id->page_io_req.mem.ptr.pl = pl;
-+	bbr_id->page_io_req.mem.offset = offset;
-+	bbr_id->page_io_req.notify.fn = NULL;
-+
-+	return dm_io(&bbr_id->page_io_req, 1, ptr, NULL);
-+}
-+
-+/**
-+ * bbr_setup
-+ *
-+ * Read the remap tables from disk and set up the initial remap tree.
-+ **/
-+static int bbr_setup(struct bbr_private *bbr_id)
-+{
-+	struct bbr_table *table = bbr_id->bbr_table;
-+	struct io_region job;
-+	int i, rc = 0;
-+
-+	job.bdev = bbr_id->dev->bdev;
-+	job.count = 1;
-+
-+	/* Read and verify each BBR table sector individually. */
-+	for (i = 0; i < bbr_id->nr_sects_bbr_table; i++, table++) {
-+		job.sector = bbr_id->lba_table1 + i;
-+		rc = rw_table(bbr_id, table, &job, READ);
-+		if (rc && bbr_id->lba_table2) {
-+			job.sector = bbr_id->lba_table2 + i;
-+			rc = rw_table(bbr_id, table, &job, READ);
-+		}
-+		if (rc)
-+			goto out;
-+
-+		rc = validate_bbr_table_sector(table);
-+		if (rc)
-+			goto out;
-+	}
-+	atomic_set(&bbr_id->in_use_replacement_blks,
-+		   bbr_table_to_remap_list(bbr_id));
-+
-+out:
-+	if (rc)
-+		DMERR("error during device setup: %d", rc);
-+	return rc;
-+}
-+
-+/**
-+ * bbr_io_remap_error
-+ * @bbr_id:		Private data for the BBR node.
-+ * @rw:			READ or WRITE.
-+ * @starting_lsn:	Starting sector of request to remap.
-+ * @count:		Number of sectors in the request.
-+ * @page:		Page containing the data for the request.
-+ * @offset:		Byte-offset of the data within the page.
-+ *
-+ * For the requested range, try to write each sector individually. For each
-+ * sector that fails, find the next available remap location and write the
-+ * data to that new location. Then update the table and write both copies
-+ * of the table to disk. Finally, update the in-memory mapping and do any
-+ * other necessary bookkeeping.
-+ **/
-+static int bbr_io_remap_error(struct bbr_private *bbr_id,
-+			      int rw,
-+			      u64 starting_lsn,
-+			      u64 count,
-+			      struct page *page,
-+			      unsigned int offset)
-+{
-+	struct bbr_table *bbr_table;
-+	struct io_region job;
-+	struct page_list pl;
-+	unsigned long table_sector_index;
-+	unsigned long table_sector_offset;
-+	unsigned long index;
-+	u64 lsn, new_lsn;
-+	char b[32];
-+	int rc;
-+
-+	job.bdev = bbr_id->dev->bdev;
-+	job.count = 1;
-+	pl.page = page;
-+	pl.next = NULL;
-+
-+	/* For each sector in the request. */
-+	for (lsn = 0; lsn < count; lsn++, offset += SECTOR_SIZE) {
-+		job.sector = starting_lsn + lsn;
-+		rc = io_sync(bbr_id, &pl, offset, &job, rw);
-+		while (rc) {
-+			/* Find the next available relocation sector. */
-+			new_lsn = atomic_read(&bbr_id->in_use_replacement_blks);
-+			if (new_lsn >= bbr_id->nr_replacement_blks) {
-+				/* No more replacement sectors available. */
-+				return -EIO;
-+			}
-+			new_lsn += bbr_id->start_replacement_sect;
-+
-+			/* Write the data to its new location. */
-+			DMWARN("device %s: Trying to remap bad sector "PFU64" to sector "PFU64,
-+			       format_dev_t(b, bbr_id->dev->bdev->bd_dev),
-+			       starting_lsn + lsn, new_lsn);
-+			job.sector = new_lsn;
-+			rc = io_sync(bbr_id, &pl, offset, &job, rw);
-+			if (rc) {
-+				/* This replacement sector is bad.
-+				 * Try the next one.
-+				 */
-+				DMERR("device %s: replacement sector "PFU64" is bad. Skipping.",
-+				      format_dev_t(b, bbr_id->dev->bdev->bd_dev), new_lsn);
-+				atomic_inc(&bbr_id->in_use_replacement_blks);
-+				continue;
-+			}
-+
-+			/* Add this new entry to the on-disk table. */
-+			table_sector_index = new_lsn -
-+					     bbr_id->start_replacement_sect;
-+			table_sector_offset = table_sector_index /
-+					      BBR_ENTRIES_PER_SECT;
-+			index = table_sector_index % BBR_ENTRIES_PER_SECT;
-+
-+			bbr_table = &bbr_id->bbr_table[table_sector_offset];
-+			bbr_table->entries[index].bad_sect = starting_lsn + lsn;
-+			bbr_table->entries[index].replacement_sect = new_lsn;
-+			bbr_table->in_use_cnt++;
-+			bbr_table->sequence_number++;
-+			bbr_table->crc = 0;
-+			bbr_table->crc = calculate_crc(INITIAL_CRC,
-+						       bbr_table,
-+						       sizeof(struct bbr_table));
-+
-+			/* Write the table to disk. */
-+			cpu_bbr_table_sector_to_le(bbr_table, bbr_table);
-+			if (bbr_id->lba_table1) {
-+				job.sector = bbr_id->lba_table1 + table_sector_offset;
-+				rc = rw_table(bbr_id, bbr_table, &job, WRITE);
-+			}
-+			if (bbr_id->lba_table2) {
-+				job.sector = bbr_id->lba_table2 + table_sector_offset;
-+				rc |= rw_table(bbr_id, bbr_table, &job, WRITE);
-+			}
-+			le_bbr_table_sector_to_cpu(bbr_table);
-+
-+			if (rc) {
-+				/* Error writing one of the tables to disk. */
-+				DMERR("device %s: error updating BBR tables on disk.",
-+				      format_dev_t(b, bbr_id->dev->bdev->bd_dev));
-+				return rc;
-+			}
-+
-+			/* Insert a new entry in the remapping binary-tree. */
-+			rc = bbr_insert_remap_entry(bbr_id,
-+						    &bbr_table->entries[index]);
-+			if (rc) {
-+				DMERR("device %s: error adding new entry to remap tree.",
-+				      format_dev_t(b, bbr_id->dev->bdev->bd_dev));
-+				return rc;
-+			}
-+
-+			atomic_inc(&bbr_id->in_use_replacement_blks);
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+/**
-+ * bbr_io_process_request
-+ *
-+ * For each sector in this request, check if the sector has already
-+ * been remapped. If so, process all previous sectors in the request,
-+ * followed by the remapped sector. Then reset the starting lsn and
-+ * count, and keep going with the rest of the request as if it were
-+ * a whole new request. If any of the sync_io's return an error,
-+ * call the remapper to relocate the bad sector(s).
-+ *
-+ * 2.5 Note: When switching over to bio's for the I/O path, we have made
-+ * the assumption that the I/O request described by the bio is one
-+ * virtually contiguous piece of memory (even though the bio vector
-+ * describes it using a series of physical page addresses).
-+ **/
-+static int bbr_io_process_request(struct bbr_private *bbr_id,
-+				  struct bio *bio)
-+{
-+	struct io_region job;
-+	u64 starting_lsn = bio->bi_sector;
-+	u64 count, lsn, remapped_lsn;
-+	struct page_list pl;
-+	unsigned int offset;
-+	int i, rw = bio_data_dir(bio);
-+	int rc = 0;
-+
-+	job.bdev = bbr_id->dev->bdev;
-+	pl.next = NULL;
-+
-+	/* Each bio can contain multiple vectors, each with a different page.
-+	 * Treat each vector as a separate request.
-+	 */
-+	/* KMC: Is this the right way to walk the bvec list? */
-+	for (i = 0;
-+	     i < bio->bi_vcnt;
-+	     i++, bio->bi_idx++, starting_lsn += count) {
-+
-+		/* Bvec info: number of sectors, page,
-+		 * and byte-offset within page.
-+		 */
-+		count = bio_iovec(bio)->bv_len >> SECTOR_SHIFT;
-+		pl.page = bio_iovec(bio)->bv_page;
-+		offset = bio_iovec(bio)->bv_offset;
-+
-+		/* For each sector in this bvec, check if the sector has
-+		 * already been remapped. If so, process all previous sectors
-+		 * in this request, followed by the remapped sector. Then reset
-+		 * the starting lsn and count and keep going with the rest of
-+		 * the request as if it were a whole new request.
-+		 */
-+		for (lsn = 0; lsn < count; lsn++) {
-+			remapped_lsn = starting_lsn + lsn;
-+			rc = bbr_remap(bbr_id, &remapped_lsn);
-+			if (!rc) {
-+				/* This sector is fine. */
-+				continue;
-+			}
-+
-+			/* Process all sectors in the request up to this one. */
-+			if (lsn > 0) {
-+				job.sector = starting_lsn;
-+				job.count = lsn;
-+				rc = io_sync(bbr_id, &pl, offset, &job, rw);
-+				if (rc) {
-+					/* If this I/O failed, then one of the
-+					 * sectors in this request needs to be
-+					 * relocated.
-+					 */
-+					rc = bbr_io_remap_error(bbr_id, rw,
-+								starting_lsn,
-+								lsn, pl.page,
-+								offset);
-+					if (rc) {
-+						/* KMC: Return? Or continue to next bvec? */
-+						return rc;
-+					}
-+				}
-+				offset += (lsn << SECTOR_SHIFT);
-+			}
-+
-+			/* Process the remapped sector. */
-+			job.sector = remapped_lsn;
-+			job.count = 1;
-+			rc = io_sync(bbr_id, &pl, offset, &job, rw);
-+			if (rc) {
-+				/* BUGBUG - Need more processing if this caused
-+				 * an error. If this I/O failed, then the
-+				 * existing remap is now bad, and we need to
-+				 * find a new remap. Can't use
-+				 * bbr_io_remap_error(), because the existing
-+				 * map entry needs to be changed, not added
-+				 * again, and the original table entry also
-+				 * needs to be changed.
-+				 */
-+				return rc;
-+			}
-+
-+			starting_lsn	+= (lsn + 1);
-+			count		-= (lsn + 1);
-+			lsn		= -1;
-+			offset		+= SECTOR_SIZE;
-+		}
-+
-+		/* Check for any remaining sectors after the last split. This
-+		 * could potentially be the whole request, but that should be a
-+		 * rare case because requests should only be processed by the
-+		 * thread if we know an error occurred or they contained one or
-+		 * more remapped sectors.
-+		 */
-+		if (count) {
-+			job.sector = starting_lsn;
-+			job.count = count;
-+			rc = io_sync(bbr_id, &pl, offset, &job, rw);
-+			if (rc) {
-+				/* If this I/O failed, then one of the sectors
-+				 * in this request needs to be relocated.
-+				 */
-+				rc = bbr_io_remap_error(bbr_id, rw, starting_lsn,
-+							count, pl.page, offset);
-+				if (rc) {
-+					/* KMC: Return? Or continue to next bvec? */
-+					return rc;
-+				}
-+			}
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+static void bbr_io_process_requests(struct bbr_private *bbr_id,
-+				    struct bio *bio)
-+{
-+	struct bio *next;
-+	int rc;
-+
-+	while (bio) {
-+		next = bio->bi_next;
-+		bio->bi_next = NULL;
-+
-+		rc = bbr_io_process_request(bbr_id, bio);
-+
-+		bio_endio(bio, bio->bi_size, rc);
-+
-+		bio = next;
-+	}
-+}
-+
-+/**
-+ * bbr_remap_handler
-+ *
-+ * This is the handler for the bbr work-queue.
-+ *
-+ * I/O requests should only be sent to this handler if we know that:
-+ * a) the request contains at least one remapped sector.
-+ *   or
-+ * b) the request caused an error on the normal I/O path.
-+ *
-+ * This function uses synchronous I/O, so sending a request to this
-+ * thread that doesn't need special processing will cause severe
-+ * performance degredation.
-+ **/
-+static void bbr_remap_handler(struct work_struct *work)
-+{
-+	struct bbr_private *bbr_id =
-+		container_of(work, struct bbr_private, remap_work);
-+	struct bio *bio;
-+	unsigned long flags;
-+
-+	spin_lock_irqsave(&bbr_id->remap_ios_lock, flags);
-+	bio = bio_list_get(&bbr_id->remap_ios);
-+	spin_unlock_irqrestore(&bbr_id->remap_ios_lock, flags);
-+
-+	bbr_io_process_requests(bbr_id, bio);
-+}
-+
-+/**
-+ * bbr_endio
-+ *
-+ * This is the callback for normal write requests. Check for an error
-+ * during the I/O, and send to the thread for processing if necessary.
-+ **/
-+static int bbr_endio(struct dm_target *ti, struct bio *bio,
-+		     int error, union map_info *map_context)
-+{
-+	struct bbr_private *bbr_id = ti->private;
-+	struct dm_bio_details *bbr_io = map_context->ptr;
-+
-+	if (error && bbr_io) {
-+		unsigned long flags;
-+		char b[32];
-+
-+		dm_bio_restore(bbr_io, bio);
-+		map_context->ptr = NULL;
-+
-+		DMERR("device %s: I/O failure on sector %lu. "
-+		      "Scheduling for retry.",
-+		      format_dev_t(b, bbr_id->dev->bdev->bd_dev),
-+		      (unsigned long)bio->bi_sector);
-+
-+		spin_lock_irqsave(&bbr_id->remap_ios_lock, flags);
-+		bio_list_add(&bbr_id->remap_ios, bio);
-+		spin_unlock_irqrestore(&bbr_id->remap_ios_lock, flags);
-+
-+		queue_work(dm_bbr_wq, &bbr_id->remap_work);
-+
-+		error = 1;
-+	}
-+
-+	if (bbr_io)
-+		mempool_free(bbr_io, bbr_io_pool);
-+
-+	return error;
-+}
-+
-+/**
-+ * Construct a bbr mapping
-+ **/
-+static int bbr_ctr(struct dm_target *ti, unsigned int argc, char **argv)
-+{
-+	struct bbr_private *bbr_id;
-+	unsigned long block_size;
-+	char *end;
-+	int rc = -EINVAL;
-+
-+	if (argc != 8) {
-+		ti->error = "dm-bbr requires exactly 8 arguments: "
-+			    "device offset table1_lsn table2_lsn table_size start_replacement nr_replacement_blks block_size";
-+		goto out1;
-+	}
-+
-+	bbr_id = bbr_alloc_private();
-+	if (!bbr_id) {
-+		ti->error = "dm-bbr: Error allocating bbr private data.";
-+		goto out1;
-+	}
-+
-+	bbr_id->offset = simple_strtoull(argv[1], &end, 10);
-+	bbr_id->lba_table1 = simple_strtoull(argv[2], &end, 10);
-+	bbr_id->lba_table2 = simple_strtoull(argv[3], &end, 10);
-+	bbr_id->nr_sects_bbr_table = simple_strtoull(argv[4], &end, 10);
-+	bbr_id->start_replacement_sect = simple_strtoull(argv[5], &end, 10);
-+	bbr_id->nr_replacement_blks = simple_strtoull(argv[6], &end, 10);
-+	block_size = simple_strtoul(argv[7], &end, 10);
-+	bbr_id->blksize_in_sects = (block_size >> SECTOR_SHIFT);
-+
-+	bbr_id->vma_io_req.mem.type = DM_IO_VMA;
-+	bbr_id->vma_io_req.client = dm_io_client_create(1);
-+	if (IS_ERR(bbr_id->vma_io_req.client)) {
-+		rc = PTR_ERR(bbr_id->vma_io_req.client);
-+		DMWARN("couldn't allocate disk VMA io client");
-+		goto out2;
-+	}
-+
-+	bbr_id->page_io_req.mem.type = DM_IO_PAGE_LIST;
-+	bbr_id->page_io_req.client = dm_io_client_create(1);
-+	if (IS_ERR(bbr_id->page_io_req.client)) {
-+		rc = PTR_ERR(bbr_id->page_io_req.client);
-+		DMWARN("couldn't allocate pagelist io client");
-+		goto out3;
-+	}
-+
-+	bbr_id->bbr_table = vmalloc(bbr_id->nr_sects_bbr_table << SECTOR_SHIFT);
-+	if (!bbr_id->bbr_table) {
-+		ti->error = "dm-bbr: Error allocating bbr table.";
-+		goto out4;
-+	}
-+
-+	if (dm_get_device(ti, argv[0], 0, ti->len,
-+			  dm_table_get_mode(ti->table), &bbr_id->dev)) {
-+		ti->error = "dm-bbr: Device lookup failed";
-+		goto out4;
-+	}
-+
-+	rc = bbr_setup(bbr_id);
-+	if (rc) {
-+		ti->error = "dm-bbr: Device setup failed";
-+		goto out5;
-+	}
-+
-+	ti->private = bbr_id;
-+	return 0;
-+
-+out5:
-+	dm_put_device(ti, bbr_id->dev);
-+out4:
-+	dm_io_client_destroy(bbr_id->page_io_req.client);
-+out3:
-+	dm_io_client_destroy(bbr_id->vma_io_req.client);
-+out2:
-+	bbr_free_private(bbr_id);
-+out1:
-+	return rc;
-+}
-+
-+static void bbr_dtr(struct dm_target *ti)
-+{
-+	struct bbr_private *bbr_id = ti->private;
-+
-+	dm_put_device(ti, bbr_id->dev);
-+	dm_io_client_destroy(bbr_id->page_io_req.client);
-+	dm_io_client_destroy(bbr_id->vma_io_req.client);
-+	bbr_free_private(bbr_id);
-+}
-+
-+static int bbr_map(struct dm_target *ti, struct bio *bio,
-+		   union map_info *map_context)
-+{
-+	struct bbr_private *bbr_id = ti->private;
-+	struct dm_bio_details *bbr_io;
-+	unsigned long flags;
-+	int rc = 1;
-+
-+	bio->bi_sector += bbr_id->offset;
-+
-+	if (atomic_read(&bbr_id->in_use_replacement_blks) == 0 ||
-+	    !bbr_remap_probe(bbr_id, bio->bi_sector, bio_sectors(bio))) {
-+		/* No existing remaps or this request doesn't
-+		 * contain any remapped sectors.
-+		 */
-+		bio->bi_bdev = bbr_id->dev->bdev;
-+
-+		bbr_io = mempool_alloc(bbr_io_pool, GFP_NOIO);
-+		dm_bio_record(bbr_io, bio);
-+		map_context->ptr = bbr_io;
-+	} else {
-+		/* This request has at least one remapped sector.
-+		 * Give it to the work-queue for processing.
-+		 */
-+		map_context->ptr = NULL;
-+		spin_lock_irqsave(&bbr_id->remap_ios_lock, flags);
-+		bio_list_add(&bbr_id->remap_ios, bio);
-+		spin_unlock_irqrestore(&bbr_id->remap_ios_lock, flags);
-+
-+		queue_work(dm_bbr_wq, &bbr_id->remap_work);
-+		rc = 0;
-+	}
-+
-+	return rc;
-+}
-+
-+static int bbr_status(struct dm_target *ti, status_type_t type,
-+		      char *result, unsigned int maxlen)
-+{
-+	struct bbr_private *bbr_id = ti->private;
-+	char b[BDEVNAME_SIZE];
-+
-+	switch (type) {
-+	case STATUSTYPE_INFO:
-+		result[0] = '\0';
-+		break;
-+
-+	case STATUSTYPE_TABLE:
-+		snprintf(result, maxlen, "%s "PFU64" "PFU64" "PFU64" "PFU64" "PFU64" "PFU64" %u",
-+			 format_dev_t(b, bbr_id->dev->bdev->bd_dev),
-+			 bbr_id->offset, bbr_id->lba_table1, bbr_id->lba_table2,
-+			 bbr_id->nr_sects_bbr_table,
-+			 bbr_id->start_replacement_sect,
-+			 bbr_id->nr_replacement_blks,
-+			 bbr_id->blksize_in_sects << SECTOR_SHIFT);
-+		 break;
-+	}
-+	return 0;
-+}
-+
-+static struct target_type bbr_target = {
-+	.name	= "bbr",
-+	.version= {1, 0, 1},
-+	.module	= THIS_MODULE,
-+	.ctr	= bbr_ctr,
-+	.dtr	= bbr_dtr,
-+	.map	= bbr_map,
-+	.end_io	= bbr_endio,
-+	.status	= bbr_status,
-+};
-+
-+int __init dm_bbr_init(void)
-+{
-+	int rc;
-+
-+	rc = dm_register_target(&bbr_target);
-+	if (rc) {
-+		DMERR("error registering target.");
-+		goto err1;
-+	}
-+
-+	bbr_remap_cache = kmem_cache_create("bbr-remap",
-+					    sizeof(struct bbr_runtime_remap),
-+					    0, SLAB_HWCACHE_ALIGN, NULL);
-+	if (!bbr_remap_cache) {
-+		DMERR("error creating remap cache.");
-+		rc = ENOMEM;
-+		goto err2;
-+	}
-+
-+	bbr_io_cache = kmem_cache_create("bbr-io", sizeof(struct dm_bio_details),
-+					 0, SLAB_HWCACHE_ALIGN, NULL);
-+	if (!bbr_io_cache) {
-+		DMERR("error creating io cache.");
-+		rc = ENOMEM;
-+		goto err3;
-+	}
-+
-+	bbr_io_pool = mempool_create(256, mempool_alloc_slab,
-+				     mempool_free_slab, bbr_io_cache);
-+	if (!bbr_io_pool) {
-+		DMERR("error creating io mempool.");
-+		rc = ENOMEM;
-+		goto err4;
-+	}
-+
-+	dm_bbr_wq = create_workqueue("dm-bbr");
-+	if (!dm_bbr_wq) {
-+		DMERR("error creating work-queue.");
-+		rc = ENOMEM;
-+		goto err5;
-+	}
-+
-+	return 0;
-+
-+err5:
-+	mempool_destroy(bbr_io_pool);
-+err4:
-+	kmem_cache_destroy(bbr_io_cache);
-+err3:
-+	kmem_cache_destroy(bbr_remap_cache);
-+err2:
-+	dm_unregister_target(&bbr_target);
-+err1:
-+	return rc;
-+}
-+
-+void __exit dm_bbr_exit(void)
-+{
-+	destroy_workqueue(dm_bbr_wq);
-+	mempool_destroy(bbr_io_pool);
-+	kmem_cache_destroy(bbr_io_cache);
-+	kmem_cache_destroy(bbr_remap_cache);
-+	dm_unregister_target(&bbr_target);
-+}
-+
-+module_init(dm_bbr_init);
-+module_exit(dm_bbr_exit);
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/md/dm-bbr.h
-@@ -0,0 +1,130 @@
-+/*
-+ *   (C) Copyright IBM Corp. 2002, 2004
-+ *
-+ *   This program is free software;  you can redistribute it and/or modify
-+ *   it under the terms of the GNU General Public License as published by
-+ *   the Free Software Foundation; either version 2 of the License, or
-+ *   (at your option) any later version.
-+ *
-+ *   This program is distributed in the hope that it will be useful,
-+ *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
-+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
-+ *   the GNU General Public License for more details.
-+ *
-+ *   You should have received a copy of the GNU General Public License
-+ *   along with this program;  if not, write to the Free Software
-+ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-+ *
-+ * linux/drivers/md/dm-bbr.h
-+ *
-+ * Bad-block-relocation (BBR) target for device-mapper.
-+ *
-+ * The BBR target is designed to remap I/O write failures to another safe
-+ * location on disk. Note that most disk drives have BBR built into them,
-+ * this means that our software BBR will be only activated when all hardware
-+ * BBR replacement sectors have been used.
-+ */
-+
-+#include "dm-io.h"
-+
-+#define BBR_TABLE_SIGNATURE		0x42627254 /* BbrT */
-+#define BBR_ENTRIES_PER_SECT		31
-+#define INITIAL_CRC			0xFFFFFFFF
-+#define CRC_POLYNOMIAL			0xEDB88320L
-+
-+/**
-+ * Macros to cleanly print 64-bit numbers on both 32-bit and 64-bit machines.
-+ * Use these in place of %Ld, %Lu, and %Lx.
-+ **/
-+#if BITS_PER_LONG > 32
-+#define PFU64 "%llu"
-+#else
-+#define PFU64 "%Lu"
-+#endif
-+
-+/**
-+ * struct bbr_table_entry
-+ * @bad_sect:		LBA of bad location.
-+ * @replacement_sect:	LBA of new location.
-+ *
-+ * Structure to describe one BBR remap.
-+ **/
-+struct bbr_table_entry {
-+	u64 bad_sect;
-+	u64 replacement_sect;
-+};
-+
-+/**
-+ * struct bbr_table
-+ * @signature:		Signature on each BBR table sector.
-+ * @crc:		CRC for this table sector.
-+ * @sequence_number:	Used to resolve conflicts when primary and secondary
-+ *			tables do not match.
-+ * @in_use_cnt:		Number of in-use table entries.
-+ * @entries:		Actual table of remaps.
-+ *
-+ * Structure to describe each sector of the metadata table. Each sector in this
-+ * table can describe 31 remapped sectors.
-+ **/
-+struct bbr_table {
-+	u32			signature;
-+	u32			crc;
-+	u32			sequence_number;
-+	u32			in_use_cnt;
-+	struct bbr_table_entry	entries[BBR_ENTRIES_PER_SECT];
-+};
-+
-+/**
-+ * struct bbr_runtime_remap
-+ *
-+ * Node in the binary tree used to keep track of remaps.
-+ **/
-+struct bbr_runtime_remap {
-+	struct bbr_table_entry		remap;
-+	struct bbr_runtime_remap	*left;
-+	struct bbr_runtime_remap	*right;
-+};
-+
-+/**
-+ * struct bbr_private
-+ * @dev:			Info about underlying device.
-+ * @bbr_table:			Copy of metadata table.
-+ * @remap_root:			Binary tree containing all remaps.
-+ * @remap_root_lock:		Lock for the binary tree.
-+ * @remap_work:			For adding work items to the work-queue.
-+ * @remap_ios:			List of I/Os for the work-queue to handle.
-+ * @remap_ios_lock:		Lock for the remap_ios list.
-+ * @offset:			LBA of data area.
-+ * @lba_table1:			LBA of primary BBR table.
-+ * @lba_table2:			LBA of secondary BBR table.
-+ * @nr_sects_bbr_table:		Size of each BBR table.
-+ * @nr_replacement_blks:	Number of replacement blocks.
-+ * @start_replacement_sect:	LBA of start of replacement blocks.
-+ * @blksize_in_sects:		Size of each block.
-+ * @in_use_replacement_blks:	Current number of remapped blocks.
-+ *
-+ * Private data for each BBR target.
-+ **/
-+struct bbr_private {
-+	struct dm_dev			*dev;
-+	struct bbr_table		*bbr_table;
-+	struct bbr_runtime_remap	*remap_root;
-+	spinlock_t			remap_root_lock;
-+
-+	struct dm_io_request vma_io_req;
-+	struct dm_io_request page_io_req;
-+
-+	struct work_struct		remap_work;
-+	struct bio_list			remap_ios;
-+	spinlock_t			remap_ios_lock;
-+
-+	u64				offset;
-+	u64				lba_table1;
-+	u64				lba_table2;
-+	u64				nr_sects_bbr_table;
-+	u64				start_replacement_sect;
-+	u64				nr_replacement_blks;
-+	u32				blksize_in_sects;
-+	atomic_t			in_use_replacement_blks;
-+};
-+

Deleted: hardened/2.6/trunk/2.6.23/4300_squashfs-3.2-r2.patch
===================================================================
--- hardened/2.6/trunk/2.6.23/4300_squashfs-3.2-r2.patch	2008-03-18 12:35:18 UTC (rev 1269)
+++ hardened/2.6/trunk/2.6.23/4300_squashfs-3.2-r2.patch	2008-03-22 18:37:36 UTC (rev 1270)
@@ -1,4389 +0,0 @@
----
- fs/Kconfig                     |   65 +
- fs/Makefile                    |    1 
- fs/squashfs/Makefile           |    7 
- fs/squashfs/inode.c            | 2327 +++++++++++++++++++++++++++++++++++++++++
- fs/squashfs/squashfs.h         |   87 +
- fs/squashfs/squashfs2_0.c      |  742 +++++++++++++
- include/linux/squashfs_fs.h    |  934 ++++++++++++++++
- include/linux/squashfs_fs_i.h  |   45 
- include/linux/squashfs_fs_sb.h |   74 +
- init/do_mounts_rd.c            |   16 
- 10 files changed, 4298 insertions(+)
-
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -1364,6 +1364,71 @@ config CRAMFS
- 
- 	  If unsure, say N.
- 
-+config SQUASHFS
-+	tristate "SquashFS 3.2 - Squashed file system support"
-+	select ZLIB_INFLATE
-+	help
-+	  Saying Y here includes support for SquashFS 3.2 (a Compressed Read-Only File
-+	  System).  Squashfs is a highly compressed read-only filesystem for Linux.
-+	  It uses zlib compression to compress both files, inodes and directories.
-+	  Inodes in the system are very small and all blocks are packed to minimise
-+	  data overhead. Block sizes greater than 4K are supported up to a maximum of 64K.
-+	  SquashFS 3.1 supports 64 bit filesystems and files (larger than 4GB), full
-+	  uid/gid information, hard links and timestamps.
-+
-+	  Squashfs is intended for general read-only filesystem use, for archival
-+	  use (i.e. in cases where a .tar.gz file may be used), and in embedded
-+	  systems where low overhead is needed.  Further information and filesystem tools
-+	  are available from http://squashfs.sourceforge.net.
-+
-+	  If you want to compile this as a module ( = code which can be
-+	  inserted in and removed from the running kernel whenever you want),
-+	  say M here and read <file:Documentation/modules.txt>.  The module
-+	  will be called squashfs.  Note that the root file system (the one
-+	  containing the directory /) cannot be compiled as a module.
-+
-+	  If unsure, say N.
-+
-+config SQUASHFS_EMBEDDED
-+
-+	bool "Additional options for memory-constrained systems" 
-+	depends on SQUASHFS
-+	default n
-+	help
-+	  Saying Y here allows you to specify cache sizes and how Squashfs
-+	  allocates memory.  This is only intended for memory constrained
-+	  systems.
-+
-+	  If unsure, say N.
-+
-+config SQUASHFS_FRAGMENT_CACHE_SIZE
-+	int "Number of fragments cached" if SQUASHFS_EMBEDDED
-+	depends on SQUASHFS
-+	default "3"
-+	help
-+	  By default SquashFS caches the last 3 fragments read from
-+	  the filesystem.  Increasing this amount may mean SquashFS
-+	  has to re-read fragments less often from disk, at the expense
-+	  of extra system memory.  Decreasing this amount will mean
-+	  SquashFS uses less memory at the expense of extra reads from disk.
-+
-+	  Note there must be at least one cached fragment.  Anything
-+	  much more than three will probably not make much difference.
-+
-+config SQUASHFS_VMALLOC
-+	bool "Use Vmalloc rather than Kmalloc" if SQUASHFS_EMBEDDED
-+	depends on SQUASHFS
-+	default n
-+	help
-+	  By default SquashFS uses kmalloc to obtain fragment cache memory.
-+	  Kmalloc memory is the standard kernel allocator, but it can fail
-+	  on memory constrained systems.  Because of the way Vmalloc works,
-+	  Vmalloc can succeed when kmalloc fails.  Specifying this option
-+	  will make SquashFS always use Vmalloc to allocate the
-+	  fragment cache memory.
-+
-+	  If unsure, say N.
-+
- config VXFS_FS
- 	tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
- 	depends on BLOCK
---- a/fs/Makefile
-+++ b/fs/Makefile
-@@ -72,6 +72,7 @@ obj-$(CONFIG_JBD)		+= jbd/
- obj-$(CONFIG_JBD2)		+= jbd2/
- obj-$(CONFIG_EXT2_FS)		+= ext2/
- obj-$(CONFIG_CRAMFS)		+= cramfs/
-+obj-$(CONFIG_SQUASHFS)		+= squashfs/
- obj-$(CONFIG_RAMFS)		+= ramfs/
- obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
- obj-$(CONFIG_CODA_FS)		+= coda/
---- /dev/null
-+++ b/fs/squashfs/Makefile
-@@ -0,0 +1,7 @@
-+#
-+# Makefile for the linux squashfs routines.
-+#
-+
-+obj-$(CONFIG_SQUASHFS) += squashfs.o
-+squashfs-y += inode.o
-+squashfs-y += squashfs2_0.o
---- /dev/null
-+++ b/fs/squashfs/inode.c
-@@ -0,0 +1,2329 @@
-+/*
-+ * Squashfs - a compressed read only filesystem for Linux
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * inode.c
-+ */
-+
-+#include <linux/squashfs_fs.h>
-+#include <linux/module.h>
-+#include <linux/zlib.h>
-+#include <linux/exportfs.h>
-+#include <linux/fs.h>
-+#include <linux/squashfs_fs_sb.h>
-+#include <linux/squashfs_fs_i.h>
-+#include <linux/buffer_head.h>
-+#include <linux/vfs.h>
-+#include <linux/vmalloc.h>
-+#include <linux/smp_lock.h>
-+#include <linux/sched.h>
-+
-+#include "squashfs.h"
-+
-+static void vfs_read_inode(struct inode *i);
-+static struct dentry *squashfs_get_parent(struct dentry *child);
-+static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode);
-+static int squashfs_statfs(struct dentry *, struct kstatfs *);
-+static int squashfs_symlink_readpage(struct file *file, struct page *page);
-+static long long read_blocklist(struct inode *inode, int index,
-+				int readahead_blks, char *block_list,
-+				unsigned short **block_p, unsigned int *bsize);
-+static int squashfs_readpage(struct file *file, struct page *page);
-+static int squashfs_readpage4K(struct file *file, struct page *page);
-+static int squashfs_readdir(struct file *, void *, filldir_t);
-+static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
-+				struct nameidata *);
-+static int squashfs_remount(struct super_block *s, int *flags, char *data);
-+static void squashfs_put_super(struct super_block *);
-+static int squashfs_get_sb(struct file_system_type *,int, const char *, void *,
-+				struct vfsmount *);
-+static struct inode *squashfs_alloc_inode(struct super_block *sb);
-+static void squashfs_destroy_inode(struct inode *inode);
-+static int init_inodecache(void);
-+static void destroy_inodecache(void);
-+
-+static struct file_system_type squashfs_fs_type = {
-+	.owner = THIS_MODULE,
-+	.name = "squashfs",
-+	.get_sb = squashfs_get_sb,
-+	.kill_sb = kill_block_super,
-+	.fs_flags = FS_REQUIRES_DEV
-+};
-+
-+static const unsigned char squashfs_filetype_table[] = {
-+	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
-+};
-+
-+static struct super_operations squashfs_super_ops = {
-+	.alloc_inode = squashfs_alloc_inode,
-+	.destroy_inode = squashfs_destroy_inode,
-+	.statfs = squashfs_statfs,
-+	.put_super = squashfs_put_super,
-+	.remount_fs = squashfs_remount
-+};
-+
-+static struct super_operations squashfs_export_super_ops = {
-+	.alloc_inode = squashfs_alloc_inode,
-+	.destroy_inode = squashfs_destroy_inode,
-+	.statfs = squashfs_statfs,
-+	.put_super = squashfs_put_super,
-+	.read_inode = vfs_read_inode
-+};
-+
-+static struct export_operations squashfs_export_ops = {
-+	.get_parent = squashfs_get_parent
-+};
-+
-+SQSH_EXTERN const struct address_space_operations squashfs_symlink_aops = {
-+	.readpage = squashfs_symlink_readpage
-+};
-+
-+SQSH_EXTERN const struct address_space_operations squashfs_aops = {
-+	.readpage = squashfs_readpage
-+};
-+
-+SQSH_EXTERN const struct address_space_operations squashfs_aops_4K = {
-+	.readpage = squashfs_readpage4K
-+};
-+
-+static const struct file_operations squashfs_dir_ops = {
-+	.read = generic_read_dir,
-+	.readdir = squashfs_readdir
-+};
-+
-+SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
-+	.lookup = squashfs_lookup
-+};
-+
-+
-+static struct buffer_head *get_block_length(struct super_block *s,
-+				int *cur_index, int *offset, int *c_byte)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	unsigned short temp;
-+	struct buffer_head *bh;
-+
-+	if (!(bh = sb_bread(s, *cur_index)))
-+		goto out;
-+
-+	if (msblk->devblksize - *offset == 1) {
-+		if (msblk->swap)
-+			((unsigned char *) &temp)[1] = *((unsigned char *)
-+				(bh->b_data + *offset));
-+		else
-+			((unsigned char *) &temp)[0] = *((unsigned char *)
-+				(bh->b_data + *offset));
-+		brelse(bh);
-+		if (!(bh = sb_bread(s, ++(*cur_index))))
-+			goto out;
-+		if (msblk->swap)
-+			((unsigned char *) &temp)[0] = *((unsigned char *)
-+				bh->b_data); 
-+		else
-+			((unsigned char *) &temp)[1] = *((unsigned char *)
-+				bh->b_data); 
-+		*c_byte = temp;
-+		*offset = 1;
-+	} else {
-+		if (msblk->swap) {
-+			((unsigned char *) &temp)[1] = *((unsigned char *)
-+				(bh->b_data + *offset));
-+			((unsigned char *) &temp)[0] = *((unsigned char *)
-+				(bh->b_data + *offset + 1)); 
-+		} else {
-+			((unsigned char *) &temp)[0] = *((unsigned char *)
-+				(bh->b_data + *offset));
-+			((unsigned char *) &temp)[1] = *((unsigned char *)
-+				(bh->b_data + *offset + 1)); 
-+		}
-+		*c_byte = temp;
-+		*offset += 2;
-+	}
-+
-+	if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
-+		if (*offset == msblk->devblksize) {
-+			brelse(bh);
-+			if (!(bh = sb_bread(s, ++(*cur_index))))
-+				goto out;
-+			*offset = 0;
-+		}
-+		if (*((unsigned char *) (bh->b_data + *offset)) !=
-+						SQUASHFS_MARKER_BYTE) {
-+			ERROR("Metadata block marker corrupt @ %x\n",
-+						*cur_index);
-+			brelse(bh);
-+			goto out;
-+		}
-+		(*offset)++;
-+	}
-+	return bh;
-+
-+out:
-+	return NULL;
-+}
-+
-+
-+SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
-+			long long index, unsigned int length,
-+			long long *next_index, int srclength)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
-+			msblk->devblksize_log2) + 2];
-+	unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
-+	unsigned int cur_index = index >> msblk->devblksize_log2;
-+	int bytes, avail_bytes, b = 0, k = 0;
-+	unsigned int compressed;
-+	unsigned int c_byte = length;
-+
-+	if (c_byte) {
-+		bytes = msblk->devblksize - offset;
-+		compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
-+		c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
-+
-+		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed
-+					? "" : "un", (unsigned int) c_byte, srclength);
-+
-+		if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used)
-+			goto read_failure;
-+
-+		if (!(bh[0] = sb_getblk(s, cur_index)))
-+			goto block_release;
-+
-+		for (b = 1; bytes < c_byte; b++) {
-+			if (!(bh[b] = sb_getblk(s, ++cur_index)))
-+				goto block_release;
-+			bytes += msblk->devblksize;
-+		}
-+		ll_rw_block(READ, b, bh);
-+	} else {
-+		if (index < 0 || (index + 2) > sblk->bytes_used)
-+			goto read_failure;
-+
-+		if (!(bh[0] = get_block_length(s, &cur_index, &offset,
-+								&c_byte)))
-+			goto read_failure;
-+
-+		bytes = msblk->devblksize - offset;
-+		compressed = SQUASHFS_COMPRESSED(c_byte);
-+		c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
-+
-+		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
-+					? "" : "un", (unsigned int) c_byte);
-+
-+		if (c_byte > srclength || (index + c_byte) > sblk->bytes_used)
-+			goto read_failure;
-+
-+		for (b = 1; bytes < c_byte; b++) {
-+			if (!(bh[b] = sb_getblk(s, ++cur_index)))
-+				goto block_release;
-+			bytes += msblk->devblksize;
-+		}
-+		ll_rw_block(READ, b - 1, bh + 1);
-+	}
-+
-+	if (compressed) {
-+		int zlib_err = 0;
-+
-+		/*
-+	 	* uncompress block
-+	 	*/
-+
-+		mutex_lock(&msblk->read_data_mutex);
-+
-+		msblk->stream.next_out = buffer;
-+		msblk->stream.avail_out = srclength;
-+
-+		for (bytes = 0; k < b; k++) {
-+			avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
-+					msblk->devblksize - offset :
-+					c_byte - bytes;
-+			wait_on_buffer(bh[k]);
-+			if (!buffer_uptodate(bh[k]))
-+				goto release_mutex;
-+
-+			msblk->stream.next_in = bh[k]->b_data + offset;
-+			msblk->stream.avail_in = avail_bytes;
-+
-+			if (k == 0) {
-+				zlib_err = zlib_inflateInit(&msblk->stream);
-+				if (zlib_err != Z_OK) {
-+					ERROR("zlib_inflateInit returned unexpected result 0x%x, srclength %d\n",
-+						zlib_err, srclength);
-+					goto release_mutex;
-+				}
-+
-+				if (avail_bytes == 0) {
-+					offset = 0;
-+					brelse(bh[k]);
-+					continue;
-+				}
-+			}
-+
-+			zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH);
-+			if (zlib_err != Z_OK && zlib_err != Z_STREAM_END) {
-+				ERROR("zlib_inflate returned unexpected result 0x%x, srclength %d, avail_in %d, avail_out %d\n",
-+					zlib_err, srclength, msblk->stream.avail_in, msblk->stream.avail_out);
-+				goto release_mutex;
-+			}
-+
-+			bytes += avail_bytes;
-+			offset = 0;
-+			brelse(bh[k]);
-+		}
-+
-+		if (zlib_err != Z_STREAM_END)
-+			goto release_mutex;
-+
-+		zlib_err = zlib_inflateEnd(&msblk->stream);
-+		if (zlib_err != Z_OK) {
-+			ERROR("zlib_inflateEnd returned unexpected result 0x%x, srclength %d\n",
-+				zlib_err, srclength);
-+			goto release_mutex;
-+		}
-+		bytes = msblk->stream.total_out;
-+		mutex_unlock(&msblk->read_data_mutex);
-+	} else {
-+		int i;
-+
-+		for(i = 0; i < b; i++) {
-+			wait_on_buffer(bh[i]);
-+			if(!buffer_uptodate(bh[i]))
-+				goto block_release;
-+		}
-+
-+		for (bytes = 0; k < b; k++) {
-+			avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
-+					msblk->devblksize - offset :
-+					c_byte - bytes;
-+			memcpy(buffer + bytes, bh[k]->b_data + offset, avail_bytes);
-+			bytes += avail_bytes;
-+			offset = 0;
-+			brelse(bh[k]);
-+		}
-+	}
-+
-+	if (next_index)
-+		*next_index = index + c_byte + (length ? 0 :
-+				(SQUASHFS_CHECK_DATA(msblk->sblk.flags)
-+				 ? 3 : 2));
-+	return bytes;
-+
-+release_mutex:
-+	mutex_unlock(&msblk->read_data_mutex);
-+
-+block_release:
-+	for (; k < b; k++)
-+		brelse(bh[k]);
-+
-+read_failure:
-+	ERROR("sb_bread failed reading block 0x%x\n", cur_index);
-+	return 0;
-+}
-+
-+
-+SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
-+				long long block, unsigned int offset,
-+				int length, long long *next_block,
-+				unsigned int *next_offset)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	int n, i, bytes, return_length = length;
-+	long long next_index;
-+
-+	TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
-+
-+	while ( 1 ) {
-+		for (i = 0; i < SQUASHFS_CACHED_BLKS; i++) 
-+			if (msblk->block_cache[i].block == block)
-+				break; 
-+		
-+		mutex_lock(&msblk->block_cache_mutex);
-+
-+		if (i == SQUASHFS_CACHED_BLKS) {
-+			/* read inode header block */
-+			for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
-+					n ; n --, i = (i + 1) %
-+					SQUASHFS_CACHED_BLKS)
-+				if (msblk->block_cache[i].block !=
-+							SQUASHFS_USED_BLK)
-+					break;
-+
-+			if (n == 0) {
-+				wait_queue_t wait;
-+
-+				init_waitqueue_entry(&wait, current);
-+				add_wait_queue(&msblk->waitq, &wait);
-+				set_current_state(TASK_UNINTERRUPTIBLE);
-+ 				mutex_unlock(&msblk->block_cache_mutex);
-+				schedule();
-+				set_current_state(TASK_RUNNING);
-+				remove_wait_queue(&msblk->waitq, &wait);
-+				continue;
-+			}
-+			msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
-+
-+			if (msblk->block_cache[i].block ==
-+							SQUASHFS_INVALID_BLK) {
-+				if (!(msblk->block_cache[i].data =
-+						kmalloc(SQUASHFS_METADATA_SIZE,
-+						GFP_KERNEL))) {
-+					ERROR("Failed to allocate cache"
-+							"block\n");
-+					mutex_unlock(&msblk->block_cache_mutex);
-+					goto out;
-+				}
-+			}
-+	
-+			msblk->block_cache[i].block = SQUASHFS_USED_BLK;
-+			mutex_unlock(&msblk->block_cache_mutex);
-+
-+			msblk->block_cache[i].length = squashfs_read_data(s,
-+				msblk->block_cache[i].data, block, 0, &next_index, SQUASHFS_METADATA_SIZE);
-+			if (msblk->block_cache[i].length == 0) {
-+				ERROR("Unable to read cache block [%llx:%x]\n",
-+						block, offset);
-+				mutex_lock(&msblk->block_cache_mutex);
-+				msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
-+				kfree(msblk->block_cache[i].data);
-+				wake_up(&msblk->waitq);
-+				mutex_unlock(&msblk->block_cache_mutex);
-+				goto out;
-+			}
-+
-+			mutex_lock(&msblk->block_cache_mutex);
-+			wake_up(&msblk->waitq);
-+			msblk->block_cache[i].block = block;
-+			msblk->block_cache[i].next_index = next_index;
-+			TRACE("Read cache block [%llx:%x]\n", block, offset);
-+		}
-+
-+		if (msblk->block_cache[i].block != block) {
-+			mutex_unlock(&msblk->block_cache_mutex);
-+			continue;
-+		}
-+
-+		bytes = msblk->block_cache[i].length - offset;
-+
-+		if (bytes < 1) {
-+			mutex_unlock(&msblk->block_cache_mutex);
-+			goto out;
-+		} else if (bytes >= length) {
-+			if (buffer)
-+				memcpy(buffer, msblk->block_cache[i].data +
-+						offset, length);
-+			if (msblk->block_cache[i].length - offset == length) {
-+				*next_block = msblk->block_cache[i].next_index;
-+				*next_offset = 0;
-+			} else {
-+				*next_block = block;
-+				*next_offset = offset + length;
-+			}
-+			mutex_unlock(&msblk->block_cache_mutex);
-+			goto finish;
-+		} else {
-+			if (buffer) {
-+				memcpy(buffer, msblk->block_cache[i].data +
-+						offset, bytes);
-+				buffer += bytes;
-+			}
-+			block = msblk->block_cache[i].next_index;
-+			mutex_unlock(&msblk->block_cache_mutex);
-+			length -= bytes;
-+			offset = 0;
-+		}
-+	}
-+
-+finish:
-+	return return_length;
-+out:
-+	return 0;
-+}
-+
-+
-+static int get_fragment_location(struct super_block *s, unsigned int fragment,
-+				long long *fragment_start_block,
-+				unsigned int *fragment_size)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	long long start_block =
-+		msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
-+	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
-+	struct squashfs_fragment_entry fragment_entry;
-+
-+	if (msblk->swap) {
-+		struct squashfs_fragment_entry sfragment_entry;
-+
-+		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
-+					start_block, offset,
-+					sizeof(sfragment_entry), &start_block,
-+					&offset))
-+			goto out;
-+		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
-+	} else
-+		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
-+					start_block, offset,
-+					sizeof(fragment_entry), &start_block,
-+					&offset))
-+			goto out;
-+
-+	*fragment_start_block = fragment_entry.start_block;
-+	*fragment_size = fragment_entry.size;
-+
-+	return 1;
-+
-+out:
-+	return 0;
-+}
-+
-+
-+SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
-+					squashfs_fragment_cache *fragment)
-+{
-+	mutex_lock(&msblk->fragment_mutex);
-+	fragment->locked --;
-+	wake_up(&msblk->fragment_wait_queue);
-+	mutex_unlock(&msblk->fragment_mutex);
-+}
-+
-+
-+SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
-+					*s, long long start_block,
-+					int length)
-+{
-+	int i, n;
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+	while ( 1 ) {
-+		mutex_lock(&msblk->fragment_mutex);
-+
-+		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
-+				msblk->fragment[i].block != start_block; i++);
-+
-+		if (i == SQUASHFS_CACHED_FRAGMENTS) {
-+			for (i = msblk->next_fragment, n =
-+				SQUASHFS_CACHED_FRAGMENTS; n &&
-+				msblk->fragment[i].locked; n--, i = (i + 1) %
-+				SQUASHFS_CACHED_FRAGMENTS);
-+
-+			if (n == 0) {
-+				wait_queue_t wait;
-+
-+				init_waitqueue_entry(&wait, current);
-+				add_wait_queue(&msblk->fragment_wait_queue,
-+									&wait);
-+				set_current_state(TASK_UNINTERRUPTIBLE);
-+				mutex_unlock(&msblk->fragment_mutex);
-+				schedule();
-+				set_current_state(TASK_RUNNING);
-+				remove_wait_queue(&msblk->fragment_wait_queue,
-+									&wait);
-+				continue;
-+			}
-+			msblk->next_fragment = (msblk->next_fragment + 1) %
-+				SQUASHFS_CACHED_FRAGMENTS;
-+			
-+			if (msblk->fragment[i].data == NULL)
-+				if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
-+						(SQUASHFS_FILE_MAX_SIZE))) {
-+					ERROR("Failed to allocate fragment "
-+							"cache block\n");
-+					mutex_unlock(&msblk->fragment_mutex);
-+					goto out;
-+				}
-+
-+			msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
-+			msblk->fragment[i].locked = 1;
-+			mutex_unlock(&msblk->fragment_mutex);
-+
-+			if (!(msblk->fragment[i].length = squashfs_read_data(s,
-+						msblk->fragment[i].data,
-+						start_block, length, NULL, sblk->block_size))) {
-+				ERROR("Unable to read fragment cache block "
-+							"[%llx]\n", start_block);
-+				msblk->fragment[i].locked = 0;
-+				smp_mb();
-+				goto out;
-+			}
-+
-+			mutex_lock(&msblk->fragment_mutex);
-+			msblk->fragment[i].block = start_block;
-+			TRACE("New fragment %d, start block %lld, locked %d\n",
-+						i, msblk->fragment[i].block,
-+						msblk->fragment[i].locked);
-+			mutex_unlock(&msblk->fragment_mutex);
-+			break;
-+		}
-+
-+		msblk->fragment[i].locked++;
-+		mutex_unlock(&msblk->fragment_mutex);
-+		TRACE("Got fragment %d, start block %lld, locked %d\n", i,
-+						msblk->fragment[i].block,
-+						msblk->fragment[i].locked);
-+		break;
-+	}
-+
-+	return &msblk->fragment[i];
-+
-+out:
-+	return NULL;
-+}
-+
-+
-+static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
-+		struct squashfs_base_inode_header *inodeb)
-+{
-+	i->i_ino = inodeb->inode_number;
-+	i->i_mtime.tv_sec = inodeb->mtime;
-+	i->i_atime.tv_sec = inodeb->mtime;
-+	i->i_ctime.tv_sec = inodeb->mtime;
-+	i->i_uid = msblk->uid[inodeb->uid];
-+	i->i_mode = inodeb->mode;
-+	i->i_size = 0;
-+	if (inodeb->guid == SQUASHFS_GUIDS)
-+		i->i_gid = i->i_uid;
-+	else
-+		i->i_gid = msblk->guid[inodeb->guid];
-+}
-+
-+
-+static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)];
-+	int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1);
-+	squashfs_inode_t inode;
-+
-+	TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino);
-+
-+	if (msblk->swap) {
-+		squashfs_inode_t sinode;
-+
-+		if (!squashfs_get_cached_block(s, (char *) &sinode, start, offset,
-+					sizeof(sinode), &start, &offset))
-+			goto out;
-+		SQUASHFS_SWAP_INODE_T((&inode), &sinode);
-+	} else if (!squashfs_get_cached_block(s, (char *) &inode, start, offset,
-+					sizeof(inode), &start, &offset))
-+			goto out;
-+
-+	TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode);
-+
-+	return inode;
-+
-+out:
-+	return SQUASHFS_INVALID_BLK;
-+}
-+	
-+
-+static void vfs_read_inode(struct inode *i)
-+{
-+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+	squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, i->i_ino);
-+
-+	TRACE("Entered vfs_read_inode\n");
-+
-+	if(inode != SQUASHFS_INVALID_BLK)
-+		(msblk->read_inode)(i, inode);
-+}
-+
-+
-+static struct dentry *squashfs_get_parent(struct dentry *child)
-+{
-+	struct inode *i = child->d_inode;
-+	struct inode *parent = iget(i->i_sb, SQUASHFS_I(i)->u.s2.parent_inode);
-+	struct dentry *rv;
-+
-+	TRACE("Entered squashfs_get_parent\n");
-+
-+	if(parent == NULL) {
-+		rv = ERR_PTR(-EACCES);
-+		goto out;
-+	}
-+
-+	rv = d_alloc_anon(parent);
-+	if(rv == NULL)
-+		rv = ERR_PTR(-ENOMEM);
-+
-+out:
-+	return rv;
-+}
-+
-+	
-+SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode, unsigned int inode_number)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct inode *i = iget_locked(s, inode_number);
-+
-+	TRACE("Entered squashfs_iget\n");
-+
-+	if(i && (i->i_state & I_NEW)) {
-+		(msblk->read_inode)(i, inode);
-+		unlock_new_inode(i);
-+	}
-+
-+	return i;
-+}
-+
-+
-+static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode)
-+{
-+	struct super_block *s = i->i_sb;
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	long long block = SQUASHFS_INODE_BLK(inode) +
-+		sblk->inode_table_start;
-+	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
-+	long long next_block;
-+	unsigned int next_offset;
-+	union squashfs_inode_header id, sid;
-+	struct squashfs_base_inode_header *inodeb = &id.base,
-+					  *sinodeb = &sid.base;
-+
-+	TRACE("Entered squashfs_read_inode\n");
-+
-+	if (msblk->swap) {
-+		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
-+					offset, sizeof(*sinodeb), &next_block,
-+					&next_offset))
-+			goto failed_read;
-+		SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
-+					sizeof(*sinodeb));
-+	} else
-+		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
-+					offset, sizeof(*inodeb), &next_block,
-+					&next_offset))
-+			goto failed_read;
-+
-+	squashfs_new_inode(msblk, i, inodeb);
-+
-+	switch(inodeb->inode_type) {
-+		case SQUASHFS_FILE_TYPE: {
-+			unsigned int frag_size;
-+			long long frag_blk;
-+			struct squashfs_reg_inode_header *inodep = &id.reg;
-+			struct squashfs_reg_inode_header *sinodep = &sid.reg;
-+				
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			frag_blk = SQUASHFS_INVALID_BLK;
-+			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
-+					!get_fragment_location(s,
-+					inodep->fragment, &frag_blk, &frag_size))
-+				goto failed_read;
-+				
-+			i->i_nlink = 1;
-+			i->i_size = inodep->file_size;
-+			i->i_fop = &generic_ro_fops;
-+			i->i_mode |= S_IFREG;
-+			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
-+			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
-+			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
-+			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
-+			SQUASHFS_I(i)->offset = next_offset;
-+			if (sblk->block_size > 4096)
-+				i->i_data.a_ops = &squashfs_aops;
-+			else
-+				i->i_data.a_ops = &squashfs_aops_4K;
-+
-+			TRACE("File inode %x:%x, start_block %llx, "
-+					"block_list_start %llx, offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->start_block, next_block,
-+					next_offset);
-+			break;
-+		}
-+		case SQUASHFS_LREG_TYPE: {
-+			unsigned int frag_size;
-+			long long frag_blk;
-+			struct squashfs_lreg_inode_header *inodep = &id.lreg;
-+			struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
-+				
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			frag_blk = SQUASHFS_INVALID_BLK;
-+			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
-+					!get_fragment_location(s,
-+					inodep->fragment, &frag_blk, &frag_size))
-+				goto failed_read;
-+				
-+			i->i_nlink = inodep->nlink;
-+			i->i_size = inodep->file_size;
-+			i->i_fop = &generic_ro_fops;
-+			i->i_mode |= S_IFREG;
-+			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
-+			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
-+			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
-+			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
-+			SQUASHFS_I(i)->offset = next_offset;
-+			if (sblk->block_size > 4096)
-+				i->i_data.a_ops = &squashfs_aops;
-+			else
-+				i->i_data.a_ops = &squashfs_aops_4K;
-+
-+			TRACE("File inode %x:%x, start_block %llx, "
-+					"block_list_start %llx, offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->start_block, next_block,
-+					next_offset);
-+			break;
-+		}
-+		case SQUASHFS_DIR_TYPE: {
-+			struct squashfs_dir_inode_header *inodep = &id.dir;
-+			struct squashfs_dir_inode_header *sinodep = &sid.dir;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_nlink = inodep->nlink;
-+			i->i_size = inodep->file_size;
-+			i->i_op = &squashfs_dir_inode_ops;
-+			i->i_fop = &squashfs_dir_ops;
-+			i->i_mode |= S_IFDIR;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->offset = inodep->offset;
-+			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
-+			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
-+
-+			TRACE("Directory inode %x:%x, start_block %x, offset "
-+					"%x\n", SQUASHFS_INODE_BLK(inode),
-+					offset, inodep->start_block,
-+					inodep->offset);
-+			break;
-+		}
-+		case SQUASHFS_LDIR_TYPE: {
-+			struct squashfs_ldir_inode_header *inodep = &id.ldir;
-+			struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
-+						sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_nlink = inodep->nlink;
-+			i->i_size = inodep->file_size;
-+			i->i_op = &squashfs_dir_inode_ops;
-+			i->i_fop = &squashfs_dir_ops;
-+			i->i_mode |= S_IFDIR;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->offset = inodep->offset;
-+			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
-+			SQUASHFS_I(i)->u.s2.directory_index_offset =
-+								next_offset;
-+			SQUASHFS_I(i)->u.s2.directory_index_count =
-+								inodep->i_count;
-+			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
-+
-+			TRACE("Long directory inode %x:%x, start_block %x, "
-+					"offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->start_block, inodep->offset);
-+			break;
-+		}
-+		case SQUASHFS_SYMLINK_TYPE: {
-+			struct squashfs_symlink_inode_header *inodep =
-+								&id.symlink;
-+			struct squashfs_symlink_inode_header *sinodep =
-+								&sid.symlink;
-+	
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
-+								sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_nlink = inodep->nlink;
-+			i->i_size = inodep->symlink_size;
-+			i->i_op = &page_symlink_inode_operations;
-+			i->i_data.a_ops = &squashfs_symlink_aops;
-+			i->i_mode |= S_IFLNK;
-+			SQUASHFS_I(i)->start_block = next_block;
-+			SQUASHFS_I(i)->offset = next_offset;
-+
-+			TRACE("Symbolic link inode %x:%x, start_block %llx, "
-+					"offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					next_block, next_offset);
-+			break;
-+		 }
-+		 case SQUASHFS_BLKDEV_TYPE:
-+		 case SQUASHFS_CHRDEV_TYPE: {
-+			struct squashfs_dev_inode_header *inodep = &id.dev;
-+			struct squashfs_dev_inode_header *sinodep = &sid.dev;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
-+			} else	
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_nlink = inodep->nlink;
-+			i->i_mode |= (inodeb->inode_type ==
-+					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
-+					S_IFBLK;
-+			init_special_inode(i, i->i_mode,
-+					old_decode_dev(inodep->rdev));
-+
-+			TRACE("Device inode %x:%x, rdev %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->rdev);
-+			break;
-+		 }
-+		 case SQUASHFS_FIFO_TYPE:
-+		 case SQUASHFS_SOCKET_TYPE: {
-+			struct squashfs_ipc_inode_header *inodep = &id.ipc;
-+			struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
-+			} else	
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_nlink = inodep->nlink;
-+			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
-+							? S_IFIFO : S_IFSOCK;
-+			init_special_inode(i, i->i_mode, 0);
-+			break;
-+		 }
-+		 default:
-+			ERROR("Unknown inode type %d in squashfs_iget!\n",
-+					inodeb->inode_type);
-+			goto failed_read1;
-+	}
-+	
-+	return 1;
-+
-+failed_read:
-+	ERROR("Unable to read inode [%llx:%x]\n", block, offset);
-+
-+failed_read1:
-+	make_bad_inode(i);
-+	return 0;
-+}
-+
-+
-+static int read_inode_lookup_table(struct super_block *s)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes);
-+
-+	TRACE("In read_inode_lookup_table, length %d\n", length);
-+
-+	/* Allocate inode lookup table */
-+	if (!(msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL))) {
-+		ERROR("Failed to allocate inode lookup table\n");
-+		return 0;
-+	}
-+   
-+	if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table,
-+			sblk->lookup_table_start, length |
-+			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
-+		ERROR("unable to read inode lookup table\n");
-+		return 0;
-+	}
-+
-+	if (msblk->swap) {
-+		int i;
-+		long long block;
-+
-+		for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) {
-+			SQUASHFS_SWAP_LOOKUP_BLOCKS((&block),
-+						&msblk->inode_lookup_table[i], 1);
-+			msblk->inode_lookup_table[i] = block;
-+		}
-+	}
-+
-+	return 1;
-+}
-+
-+
-+static int read_fragment_index_table(struct super_block *s)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments);
-+
-+	if(length == 0)
-+		return 1;
-+
-+	/* Allocate fragment index table */
-+	if (!(msblk->fragment_index = kmalloc(length, GFP_KERNEL))) {
-+		ERROR("Failed to allocate fragment index table\n");
-+		return 0;
-+	}
-+   
-+	if (!squashfs_read_data(s, (char *) msblk->fragment_index,
-+			sblk->fragment_table_start, length |
-+			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
-+		ERROR("unable to read fragment index table\n");
-+		return 0;
-+	}
-+
-+	if (msblk->swap) {
-+		int i;
-+		long long fragment;
-+
-+		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) {
-+			SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
-+						&msblk->fragment_index[i], 1);
-+			msblk->fragment_index[i] = fragment;
-+		}
-+	}
-+
-+	return 1;
-+}
-+
-+
-+static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
-+{
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+	msblk->read_inode = squashfs_read_inode;
-+	msblk->read_blocklist = read_blocklist;
-+	msblk->read_fragment_index_table = read_fragment_index_table;
-+
-+	if (sblk->s_major == 1) {
-+		if (!squashfs_1_0_supported(msblk)) {
-+			SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
-+				"are unsupported\n");
-+			SERROR("Please recompile with "
-+				"Squashfs 1.0 support enabled\n");
-+			return 0;
-+		}
-+	} else if (sblk->s_major == 2) {
-+		if (!squashfs_2_0_supported(msblk)) {
-+			SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
-+				"are unsupported\n");
-+			SERROR("Please recompile with "
-+				"Squashfs 2.0 support enabled\n");
-+			return 0;
-+		}
-+	} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
-+			SQUASHFS_MINOR) {
-+		SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
-+				"filesystem\n", sblk->s_major, sblk->s_minor);
-+		SERROR("Please update your kernel\n");
-+		return 0;
-+	}
-+
-+	return 1;
-+}
-+
-+
-+static int squashfs_fill_super(struct super_block *s, void *data, int silent)
-+{
-+	struct squashfs_sb_info *msblk;
-+	struct squashfs_super_block *sblk;
-+	int i;
-+	char b[BDEVNAME_SIZE];
-+	struct inode *root;
-+
-+	TRACE("Entered squashfs_read_superblock\n");
-+
-+	if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
-+						GFP_KERNEL))) {
-+		ERROR("Failed to allocate superblock\n");
-+		goto failure;
-+	}
-+	memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
-+	msblk = s->s_fs_info;
-+	if (!(msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
-+		ERROR("Failed to allocate zlib workspace\n");
-+		goto failure;
-+	}
-+	sblk = &msblk->sblk;
-+	
-+	msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
-+	msblk->devblksize_log2 = ffz(~msblk->devblksize);
-+
-+	mutex_init(&msblk->read_data_mutex);
-+	mutex_init(&msblk->read_page_mutex);
-+	mutex_init(&msblk->block_cache_mutex);
-+	mutex_init(&msblk->fragment_mutex);
-+	mutex_init(&msblk->meta_index_mutex);
-+	
-+	init_waitqueue_head(&msblk->waitq);
-+	init_waitqueue_head(&msblk->fragment_wait_queue);
-+
-+	sblk->bytes_used = sizeof(struct squashfs_super_block);
-+	if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
-+					sizeof(struct squashfs_super_block) |
-+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) {
-+		SERROR("unable to read superblock\n");
-+		goto failed_mount;
-+	}
-+
-+	/* Check it is a SQUASHFS superblock */
-+	msblk->swap = 0;
-+	if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) {
-+		if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) {
-+			struct squashfs_super_block ssblk;
-+
-+			WARNING("Mounting a different endian SQUASHFS "
-+				"filesystem on %s\n", bdevname(s->s_bdev, b));
-+
-+			SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
-+			memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
-+			msblk->swap = 1;
-+		} else  {
-+			SERROR("Can't find a SQUASHFS superblock on %s\n",
-+							bdevname(s->s_bdev, b));
-+			goto failed_mount;
-+		}
-+	}
-+
-+	/* Check the MAJOR & MINOR versions */
-+	if(!supported_squashfs_filesystem(msblk, silent))
-+		goto failed_mount;
-+
-+	/* Check the filesystem does not extend beyond the end of the
-+	   block device */
-+	if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode))
-+		goto failed_mount;
-+
-+	/* Check the root inode for sanity */
-+	if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE)
-+		goto failed_mount;
-+
-+	TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
-+	TRACE("Inodes are %scompressed\n",
-+					SQUASHFS_UNCOMPRESSED_INODES
-+					(sblk->flags) ? "un" : "");
-+	TRACE("Data is %scompressed\n",
-+					SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
-+					? "un" : "");
-+	TRACE("Check data is %s present in the filesystem\n",
-+					SQUASHFS_CHECK_DATA(sblk->flags) ?
-+					"" : "not");
-+	TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
-+	TRACE("Block size %d\n", sblk->block_size);
-+	TRACE("Number of inodes %d\n", sblk->inodes);
-+	if (sblk->s_major > 1)
-+		TRACE("Number of fragments %d\n", sblk->fragments);
-+	TRACE("Number of uids %d\n", sblk->no_uids);
-+	TRACE("Number of gids %d\n", sblk->no_guids);
-+	TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
-+	TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
-+	if (sblk->s_major > 1)
-+		TRACE("sblk->fragment_table_start %llx\n",
-+					sblk->fragment_table_start);
-+	TRACE("sblk->uid_start %llx\n", sblk->uid_start);
-+
-+	s->s_flags |= MS_RDONLY;
-+	s->s_op = &squashfs_super_ops;
-+
-+	/* Init inode_table block pointer array */
-+	if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
-+					SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
-+		ERROR("Failed to allocate block cache\n");
-+		goto failed_mount;
-+	}
-+
-+	for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
-+		msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
-+
-+	msblk->next_cache = 0;
-+
-+	/* Allocate read_page block */
-+	if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
-+		ERROR("Failed to allocate read_page block\n");
-+		goto failed_mount;
-+	}
-+
-+	/* Allocate uid and gid tables */
-+	if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
-+					sizeof(unsigned int), GFP_KERNEL))) {
-+		ERROR("Failed to allocate uid/gid table\n");
-+		goto failed_mount;
-+	}
-+	msblk->guid = msblk->uid + sblk->no_uids;
-+   
-+	if (msblk->swap) {
-+		unsigned int suid[sblk->no_uids + sblk->no_guids];
-+
-+		if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
-+					((sblk->no_uids + sblk->no_guids) *
-+					 sizeof(unsigned int)) |
-+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
-+			ERROR("unable to read uid/gid table\n");
-+			goto failed_mount;
-+		}
-+
-+		SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
-+			sblk->no_guids), (sizeof(unsigned int) * 8));
-+	} else
-+		if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
-+					((sblk->no_uids + sblk->no_guids) *
-+					 sizeof(unsigned int)) |
-+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
-+			ERROR("unable to read uid/gid table\n");
-+			goto failed_mount;
-+		}
-+
-+
-+	if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
-+		goto allocate_root;
-+
-+	if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
-+				SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
-+		ERROR("Failed to allocate fragment block cache\n");
-+		goto failed_mount;
-+	}
-+
-+	for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
-+		msblk->fragment[i].locked = 0;
-+		msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
-+		msblk->fragment[i].data = NULL;
-+	}
-+
-+	msblk->next_fragment = 0;
-+
-+	/* Allocate and read fragment index table */
-+	if (msblk->read_fragment_index_table(s) == 0)
-+		goto failed_mount;
-+
-+	if(sblk->s_major < 3 || sblk->lookup_table_start == SQUASHFS_INVALID_BLK)
-+		goto allocate_root;
-+
-+	/* Allocate and read inode lookup table */
-+	if (read_inode_lookup_table(s) == 0)
-+		goto failed_mount;
-+
-+	s->s_op = &squashfs_export_super_ops;
-+	s->s_export_op = &squashfs_export_ops;
-+
-+allocate_root:
-+	root = new_inode(s);
-+	if ((msblk->read_inode)(root, sblk->root_inode) == 0)
-+		goto failed_mount;
-+	insert_inode_hash(root);
-+
-+	if ((s->s_root = d_alloc_root(root)) == NULL) {
-+		ERROR("Root inode create failed\n");
-+		iput(root);
-+		goto failed_mount;
-+	}
-+
-+	TRACE("Leaving squashfs_read_super\n");
-+	return 0;
-+
-+failed_mount:
-+	kfree(msblk->inode_lookup_table);
-+	kfree(msblk->fragment_index);
-+	kfree(msblk->fragment);
-+	kfree(msblk->uid);
-+	kfree(msblk->read_page);
-+	kfree(msblk->block_cache);
-+	kfree(msblk->fragment_index_2);
-+	vfree(msblk->stream.workspace);
-+	kfree(s->s_fs_info);
-+	s->s_fs_info = NULL;
-+	return -EINVAL;
-+
-+failure:
-+	return -ENOMEM;
-+}
-+
-+
-+static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-+{
-+	struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+	TRACE("Entered squashfs_statfs\n");
-+
-+	buf->f_type = SQUASHFS_MAGIC;
-+	buf->f_bsize = sblk->block_size;
-+	buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
-+	buf->f_bfree = buf->f_bavail = 0;
-+	buf->f_files = sblk->inodes;
-+	buf->f_ffree = 0;
-+	buf->f_namelen = SQUASHFS_NAME_LEN;
-+
-+	return 0;
-+}
-+
-+
-+static int squashfs_symlink_readpage(struct file *file, struct page *page)
-+{
-+	struct inode *inode = page->mapping->host;
-+	int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
-+	long long block = SQUASHFS_I(inode)->start_block;
-+	int offset = SQUASHFS_I(inode)->offset;
-+	void *pageaddr = kmap(page);
-+
-+	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
-+				"%llx, offset %x\n", page->index,
-+				SQUASHFS_I(inode)->start_block,
-+				SQUASHFS_I(inode)->offset);
-+
-+	for (length = 0; length < index; length += bytes) {
-+		if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
-+				block, offset, PAGE_CACHE_SIZE, &block,
-+				&offset))) {
-+			ERROR("Unable to read symbolic link [%llx:%x]\n", block,
-+					offset);
-+			goto skip_read;
-+		}
-+	}
-+
-+	if (length != index) {
-+		ERROR("(squashfs_symlink_readpage) length != index\n");
-+		bytes = 0;
-+		goto skip_read;
-+	}
-+
-+	bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
-+					i_size_read(inode) - length;
-+
-+	if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
-+					offset, bytes, &block, &offset)))
-+		ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
-+
-+skip_read:
-+	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-+	kunmap(page);
-+	flush_dcache_page(page);
-+	SetPageUptodate(page);
-+	unlock_page(page);
-+
-+	return 0;
-+}
-+
-+
-+struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
-+{
-+	struct meta_index *meta = NULL;
-+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+	int i;
-+
-+	mutex_lock(&msblk->meta_index_mutex);
-+
-+	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
-+
-+	if(msblk->meta_index == NULL)
-+		goto not_allocated;
-+
-+	for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
-+		if (msblk->meta_index[i].inode_number == inode->i_ino &&
-+				msblk->meta_index[i].offset >= offset &&
-+				msblk->meta_index[i].offset <= index &&
-+				msblk->meta_index[i].locked == 0) {
-+			TRACE("locate_meta_index: entry %d, offset %d\n", i,
-+					msblk->meta_index[i].offset);
-+			meta = &msblk->meta_index[i];
-+			offset = meta->offset;
-+		}
-+
-+	if (meta)
-+		meta->locked = 1;
-+
-+not_allocated:
-+	mutex_unlock(&msblk->meta_index_mutex);
-+
-+	return meta;
-+}
-+
-+
-+struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
-+{
-+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+	struct meta_index *meta = NULL;
-+	int i;
-+
-+	mutex_lock(&msblk->meta_index_mutex);
-+
-+	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
-+
-+	if(msblk->meta_index == NULL) {
-+		if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
-+					SQUASHFS_META_NUMBER, GFP_KERNEL))) {
-+			ERROR("Failed to allocate meta_index\n");
-+			goto failed;
-+		}
-+		for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
-+			msblk->meta_index[i].inode_number = 0;
-+			msblk->meta_index[i].locked = 0;
-+		}
-+		msblk->next_meta_index = 0;
-+	}
-+
-+	for(i = SQUASHFS_META_NUMBER; i &&
-+			msblk->meta_index[msblk->next_meta_index].locked; i --)
-+		msblk->next_meta_index = (msblk->next_meta_index + 1) %
-+			SQUASHFS_META_NUMBER;
-+
-+	if(i == 0) {
-+		TRACE("empty_meta_index: failed!\n");
-+		goto failed;
-+	}
-+
-+	TRACE("empty_meta_index: returned meta entry %d, %p\n",
-+			msblk->next_meta_index,
-+			&msblk->meta_index[msblk->next_meta_index]);
-+
-+	meta = &msblk->meta_index[msblk->next_meta_index];
-+	msblk->next_meta_index = (msblk->next_meta_index + 1) %
-+			SQUASHFS_META_NUMBER;
-+
-+	meta->inode_number = inode->i_ino;
-+	meta->offset = offset;
-+	meta->skip = skip;
-+	meta->entries = 0;
-+	meta->locked = 1;
-+
-+failed:
-+	mutex_unlock(&msblk->meta_index_mutex);
-+	return meta;
-+}
-+
-+
-+void release_meta_index(struct inode *inode, struct meta_index *meta)
-+{
-+	meta->locked = 0;
-+	smp_mb();
-+}
-+
-+
-+static int read_block_index(struct super_block *s, int blocks, char *block_list,
-+		long long *start_block, int *offset)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	unsigned int *block_listp;
-+	int block = 0;
-+	
-+	if (msblk->swap) {
-+		char sblock_list[blocks << 2];
-+
-+		if (!squashfs_get_cached_block(s, sblock_list, *start_block,
-+				*offset, blocks << 2, start_block, offset)) {
-+			ERROR("Unable to read block list [%llx:%x]\n",
-+				*start_block, *offset);
-+			goto failure;
-+		}
-+		SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
-+				((unsigned int *)sblock_list), blocks);
-+	} else
-+		if (!squashfs_get_cached_block(s, block_list, *start_block,
-+				*offset, blocks << 2, start_block, offset)) {
-+			ERROR("Unable to read block list [%llx:%x]\n",
-+				*start_block, *offset);
-+			goto failure;
-+		}
-+
-+	for (block_listp = (unsigned int *) block_list; blocks;
-+				block_listp++, blocks --)
-+		block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
-+
-+	return block;
-+
-+failure:
-+	return -1;
-+}
-+
-+
-+#define SIZE 256
-+
-+static inline int calculate_skip(int blocks) {
-+	int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
-+	return skip >= 7 ? 7 : skip + 1;
-+}
-+
-+
-+static int get_meta_index(struct inode *inode, int index,
-+		long long *index_block, int *index_offset,
-+		long long *data_block, char *block_list)
-+{
-+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
-+	int offset = 0;
-+	struct meta_index *meta;
-+	struct meta_entry *meta_entry;
-+	long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
-+	int cur_offset = SQUASHFS_I(inode)->offset;
-+	long long cur_data_block = SQUASHFS_I(inode)->start_block;
-+	int i;
-+ 
-+	index /= SQUASHFS_META_INDEXES * skip;
-+
-+	while ( offset < index ) {
-+		meta = locate_meta_index(inode, index, offset + 1);
-+
-+		if (meta == NULL) {
-+			if ((meta = empty_meta_index(inode, offset + 1,
-+							skip)) == NULL)
-+				goto all_done;
-+		} else {
-+			if(meta->entries == 0)
-+				goto failed;
-+			offset = index < meta->offset + meta->entries ? index :
-+				meta->offset + meta->entries - 1;
-+			meta_entry = &meta->meta_entry[offset - meta->offset];
-+			cur_index_block = meta_entry->index_block + sblk->inode_table_start;
-+			cur_offset = meta_entry->offset;
-+			cur_data_block = meta_entry->data_block;
-+			TRACE("get_meta_index: offset %d, meta->offset %d, "
-+				"meta->entries %d\n", offset, meta->offset,
-+				meta->entries);
-+			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
-+				" data_block 0x%llx\n", cur_index_block,
-+				cur_offset, cur_data_block);
-+		}
-+
-+		for (i = meta->offset + meta->entries; i <= index &&
-+				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
-+			int blocks = skip * SQUASHFS_META_INDEXES;
-+
-+			while (blocks) {
-+				int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
-+					blocks;
-+				int res = read_block_index(inode->i_sb, block,
-+					block_list, &cur_index_block,
-+					&cur_offset);
-+
-+				if (res == -1)
-+					goto failed;
-+
-+				cur_data_block += res;
-+				blocks -= block;
-+			}
-+
-+			meta_entry = &meta->meta_entry[i - meta->offset];
-+			meta_entry->index_block = cur_index_block - sblk->inode_table_start;
-+			meta_entry->offset = cur_offset;
-+			meta_entry->data_block = cur_data_block;
-+			meta->entries ++;
-+			offset ++;
-+		}
-+
-+		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
-+				meta->offset, meta->entries);
-+
-+		release_meta_index(inode, meta);
-+	}
-+
-+all_done:
-+	*index_block = cur_index_block;
-+	*index_offset = cur_offset;
-+	*data_block = cur_data_block;
-+
-+	return offset * SQUASHFS_META_INDEXES * skip;
-+
-+failed:
-+	release_meta_index(inode, meta);
-+	return -1;
-+}
-+
-+
-+static long long read_blocklist(struct inode *inode, int index,
-+				int readahead_blks, char *block_list,
-+				unsigned short **block_p, unsigned int *bsize)
-+{
-+	long long block_ptr;
-+	int offset;
-+	long long block;
-+	int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
-+		block_list);
-+
-+	TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
-+		       " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
-+		       block);
-+
-+	if(res == -1)
-+		goto failure;
-+
-+	index -= res;
-+
-+	while ( index ) {
-+		int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
-+		int res = read_block_index(inode->i_sb, blocks, block_list,
-+			&block_ptr, &offset);
-+		if (res == -1)
-+			goto failure;
-+		block += res;
-+		index -= blocks;
-+	}
-+
-+	if (read_block_index(inode->i_sb, 1, block_list,
-+			&block_ptr, &offset) == -1)
-+		goto failure;
-+	*bsize = *((unsigned int *) block_list);
-+
-+	return block;
-+
-+failure:
-+	return 0;
-+}
-+
-+
-+static int squashfs_readpage(struct file *file, struct page *page)
-+{
-+	struct inode *inode = page->mapping->host;
-+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	unsigned char *block_list;
-+	long long block;
-+	unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
-+	int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
-+ 	void *pageaddr;
-+	struct squashfs_fragment_cache *fragment = NULL;
-+	char *data_ptr = msblk->read_page;
-+	
-+	int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
-+	int start_index = page->index & ~mask;
-+	int end_index = start_index | mask;
-+
-+	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
-+					page->index,
-+					SQUASHFS_I(inode)->start_block);
-+
-+	if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
-+		ERROR("Failed to allocate block_list\n");
-+		goto skip_read;
-+	}
-+
-+	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-+					PAGE_CACHE_SHIFT))
-+		goto skip_read;
-+
-+	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
-+					|| index < (i_size_read(inode) >>
-+					sblk->block_log)) {
-+		if ((block = (msblk->read_blocklist)(inode, index, 1,
-+					block_list, NULL, &bsize)) == 0)
-+			goto skip_read;
-+
-+		mutex_lock(&msblk->read_page_mutex);
-+		
-+		if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
-+					block, bsize, NULL, sblk->block_size))) {
-+			ERROR("Unable to read page, block %llx, size %x\n", block,
-+					bsize);
-+			mutex_unlock(&msblk->read_page_mutex);
-+			goto skip_read;
-+		}
-+	} else {
-+		if ((fragment = get_cached_fragment(inode->i_sb,
-+					SQUASHFS_I(inode)->
-+					u.s1.fragment_start_block,
-+					SQUASHFS_I(inode)->u.s1.fragment_size))
-+					== NULL) {
-+			ERROR("Unable to read page, block %llx, size %x\n",
-+					SQUASHFS_I(inode)->
-+					u.s1.fragment_start_block,
-+					(int) SQUASHFS_I(inode)->
-+					u.s1.fragment_size);
-+			goto skip_read;
-+		}
-+		bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
-+					(i_size_read(inode) & (sblk->block_size
-+					- 1));
-+		byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
-+		data_ptr = fragment->data;
-+	}
-+
-+	for (i = start_index; i <= end_index && byte_offset < bytes;
-+					i++, byte_offset += PAGE_CACHE_SIZE) {
-+		struct page *push_page;
-+		int avail = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
-+					PAGE_CACHE_SIZE : bytes - byte_offset;
-+
-+		TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
-+					bytes, i, byte_offset, avail);
-+
-+		push_page = (i == page->index) ? page :
-+			grab_cache_page_nowait(page->mapping, i);
-+
-+		if (!push_page)
-+			continue;
-+
-+		if (PageUptodate(push_page))
-+			goto skip_page;
-+
-+ 		pageaddr = kmap_atomic(push_page, KM_USER0);
-+		memcpy(pageaddr, data_ptr + byte_offset, avail);
-+		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
-+		kunmap_atomic(pageaddr, KM_USER0);
-+		flush_dcache_page(push_page);
-+		SetPageUptodate(push_page);
-+skip_page:
-+		unlock_page(push_page);
-+		if(i != page->index)
-+			page_cache_release(push_page);
-+	}
-+
-+	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
-+					|| index < (i_size_read(inode) >>
-+					sblk->block_log))
-+		mutex_unlock(&msblk->read_page_mutex);
-+	else
-+		release_cached_fragment(msblk, fragment);
-+
-+	kfree(block_list);
-+	return 0;
-+
-+skip_read:
-+	pageaddr = kmap_atomic(page, KM_USER0);
-+	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-+	kunmap_atomic(pageaddr, KM_USER0);
-+	flush_dcache_page(page);
-+	SetPageUptodate(page);
-+	unlock_page(page);
-+
-+	kfree(block_list);
-+	return 0;
-+}
-+
-+
-+static int squashfs_readpage4K(struct file *file, struct page *page)
-+{
-+	struct inode *inode = page->mapping->host;
-+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	unsigned char *block_list;
-+	long long block;
-+	unsigned int bsize, bytes = 0;
-+ 	void *pageaddr;
-+	
-+	TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
-+					page->index,
-+					SQUASHFS_I(inode)->start_block);
-+
-+	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-+					PAGE_CACHE_SHIFT)) {
-+		block_list = NULL;
-+		goto skip_read;
-+	}
-+
-+	if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
-+		ERROR("Failed to allocate block_list\n");
-+		goto skip_read;
-+	}
-+
-+	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
-+					|| page->index < (i_size_read(inode) >>
-+					sblk->block_log)) {
-+		block = (msblk->read_blocklist)(inode, page->index, 1,
-+					block_list, NULL, &bsize);
-+		if(block == 0)
-+			goto skip_read;
-+
-+		mutex_lock(&msblk->read_page_mutex);
-+		bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
-+					bsize, NULL, sblk->block_size);
-+		if (bytes) {
-+			pageaddr = kmap_atomic(page, KM_USER0);
-+			memcpy(pageaddr, msblk->read_page, bytes);
-+			kunmap_atomic(pageaddr, KM_USER0);
-+		} else
-+			ERROR("Unable to read page, block %llx, size %x\n",
-+					block, bsize);
-+		mutex_unlock(&msblk->read_page_mutex);
-+	} else {
-+		struct squashfs_fragment_cache *fragment =
-+			get_cached_fragment(inode->i_sb,
-+					SQUASHFS_I(inode)->
-+					u.s1.fragment_start_block,
-+					SQUASHFS_I(inode)-> u.s1.fragment_size);
-+		if (fragment) {
-+			bytes = i_size_read(inode) & (sblk->block_size - 1);
-+			pageaddr = kmap_atomic(page, KM_USER0);
-+			memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
-+					u.s1.fragment_offset, bytes);
-+			kunmap_atomic(pageaddr, KM_USER0);
-+			release_cached_fragment(msblk, fragment);
-+		} else
-+			ERROR("Unable to read page, block %llx, size %x\n",
-+					SQUASHFS_I(inode)->
-+					u.s1.fragment_start_block, (int)
-+					SQUASHFS_I(inode)-> u.s1.fragment_size);
-+	}
-+
-+skip_read:
-+	pageaddr = kmap_atomic(page, KM_USER0);
-+	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
-+	kunmap_atomic(pageaddr, KM_USER0);
-+	flush_dcache_page(page);
-+	SetPageUptodate(page);
-+	unlock_page(page);
-+
-+	kfree(block_list);
-+	return 0;
-+}
-+
-+
-+static int get_dir_index_using_offset(struct super_block *s, long long 
-+				*next_block, unsigned int *next_offset,
-+				long long index_start,
-+				unsigned int index_offset, int i_count,
-+				long long f_pos)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	int i, length = 0;
-+	struct squashfs_dir_index index;
-+
-+	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
-+					i_count, (unsigned int) f_pos);
-+
-+	f_pos =- 3;
-+	if (f_pos == 0)
-+		goto finish;
-+
-+	for (i = 0; i < i_count; i++) {
-+		if (msblk->swap) {
-+			struct squashfs_dir_index sindex;
-+			squashfs_get_cached_block(s, (char *) &sindex,
-+					index_start, index_offset,
-+					sizeof(sindex), &index_start,
-+					&index_offset);
-+			SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
-+		} else
-+			squashfs_get_cached_block(s, (char *) &index,
-+					index_start, index_offset,
-+					sizeof(index), &index_start,
-+					&index_offset);
-+
-+		if (index.index > f_pos)
-+			break;
-+
-+		squashfs_get_cached_block(s, NULL, index_start, index_offset,
-+					index.size + 1, &index_start,
-+					&index_offset);
-+
-+		length = index.index;
-+		*next_block = index.start_block + sblk->directory_table_start;
-+	}
-+
-+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+
-+finish:
-+	return length + 3;
-+}
-+
-+
-+static int get_dir_index_using_name(struct super_block *s, long long
-+				*next_block, unsigned int *next_offset,
-+				long long index_start,
-+				unsigned int index_offset, int i_count,
-+				const char *name, int size)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	int i, length = 0;
-+	struct squashfs_dir_index *index;
-+	char *str;
-+
-+	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
-+
-+	if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
-+		(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
-+		ERROR("Failed to allocate squashfs_dir_index\n");
-+		goto failure;
-+	}
-+
-+	index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
-+	strncpy(str, name, size);
-+	str[size] = '\0';
-+
-+	for (i = 0; i < i_count; i++) {
-+		if (msblk->swap) {
-+			struct squashfs_dir_index sindex;
-+			squashfs_get_cached_block(s, (char *) &sindex,
-+					index_start, index_offset,
-+					sizeof(sindex), &index_start,
-+					&index_offset);
-+			SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
-+		} else
-+			squashfs_get_cached_block(s, (char *) index,
-+					index_start, index_offset,
-+					sizeof(struct squashfs_dir_index),
-+					&index_start, &index_offset);
-+
-+		squashfs_get_cached_block(s, index->name, index_start,
-+					index_offset, index->size + 1,
-+					&index_start, &index_offset);
-+
-+		index->name[index->size + 1] = '\0';
-+
-+		if (strcmp(index->name, str) > 0)
-+			break;
-+
-+		length = index->index;
-+		*next_block = index->start_block + sblk->directory_table_start;
-+	}
-+
-+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+	kfree(str);
-+failure:
-+	return length + 3;
-+}
-+
-+		
-+static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
-+{
-+	struct inode *i = file->f_dentry->d_inode;
-+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	long long next_block = SQUASHFS_I(i)->start_block +
-+		sblk->directory_table_start;
-+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
-+		dir_count;
-+	struct squashfs_dir_header dirh;
-+	struct squashfs_dir_entry *dire;
-+
-+	TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
-+
-+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
-+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
-+		ERROR("Failed to allocate squashfs_dir_entry\n");
-+		goto finish;
-+	}
-+
-+	while(file->f_pos < 3) {
-+		char *name;
-+		int size, i_ino;
-+
-+		if(file->f_pos == 0) {
-+			name = ".";
-+			size = 1;
-+			i_ino = i->i_ino;
-+		} else {
-+			name = "..";
-+			size = 2;
-+			i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
-+		}
-+		TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
-+				(unsigned int) dirent, name, size, (int)
-+				file->f_pos, i_ino,
-+				squashfs_filetype_table[1]);
-+
-+		if (filldir(dirent, name, size,
-+				file->f_pos, i_ino,
-+				squashfs_filetype_table[1]) < 0) {
-+				TRACE("Filldir returned less than 0\n");
-+				goto finish;
-+		}
-+		file->f_pos += size;
-+	}
-+
-+	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_start,
-+				SQUASHFS_I(i)->u.s2.directory_index_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_count,
-+				file->f_pos);
-+
-+	while (length < i_size_read(i)) {
-+		/* read directory header */
-+		if (msblk->swap) {
-+			struct squashfs_dir_header sdirh;
-+			
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+					next_block, next_offset, sizeof(sdirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(sdirh);
-+			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
-+		} else {
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+					next_block, next_offset, sizeof(dirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(dirh);
-+		}
-+
-+		dir_count = dirh.count + 1;
-+		while (dir_count--) {
-+			if (msblk->swap) {
-+				struct squashfs_dir_entry sdire;
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						&sdire, next_block, next_offset,
-+						sizeof(sdire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				
-+				length += sizeof(sdire);
-+				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
-+			} else {
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						dire, next_block, next_offset,
-+						sizeof(*dire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+				length += sizeof(*dire);
-+			}
-+
-+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+						next_block, next_offset,
-+						dire->size + 1, &next_block,
-+						&next_offset))
-+				goto failed_read;
-+
-+			length += dire->size + 1;
-+
-+			if (file->f_pos >= length)
-+				continue;
-+
-+			dire->name[dire->size + 1] = '\0';
-+
-+			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
-+					(unsigned int) dirent, dire->name,
-+					dire->size + 1, (int) file->f_pos,
-+					dirh.start_block, dire->offset,
-+					dirh.inode_number + dire->inode_number,
-+					squashfs_filetype_table[dire->type]);
-+
-+			if (filldir(dirent, dire->name, dire->size + 1,
-+					file->f_pos,
-+					dirh.inode_number + dire->inode_number,
-+					squashfs_filetype_table[dire->type])
-+					< 0) {
-+				TRACE("Filldir returned less than 0\n");
-+				goto finish;
-+			}
-+			file->f_pos = length;
-+		}
-+	}
-+
-+finish:
-+	kfree(dire);
-+	return 0;
-+
-+failed_read:
-+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+		next_offset);
-+	kfree(dire);
-+	return 0;
-+}
-+
-+
-+static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
-+				struct nameidata *nd)
-+{
-+	const unsigned char *name = dentry->d_name.name;
-+	int len = dentry->d_name.len;
-+	struct inode *inode = NULL;
-+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	long long next_block = SQUASHFS_I(i)->start_block +
-+				sblk->directory_table_start;
-+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
-+				dir_count;
-+	struct squashfs_dir_header dirh;
-+	struct squashfs_dir_entry *dire;
-+
-+	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
-+
-+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
-+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
-+		ERROR("Failed to allocate squashfs_dir_entry\n");
-+		goto exit_lookup;
-+	}
-+
-+	if (len > SQUASHFS_NAME_LEN)
-+		goto exit_lookup;
-+
-+	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_start,
-+				SQUASHFS_I(i)->u.s2.directory_index_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_count, name,
-+				len);
-+
-+	while (length < i_size_read(i)) {
-+		/* read directory header */
-+		if (msblk->swap) {
-+			struct squashfs_dir_header sdirh;
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+					next_block, next_offset, sizeof(sdirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(sdirh);
-+			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
-+		} else {
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+					next_block, next_offset, sizeof(dirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(dirh);
-+		}
-+
-+		dir_count = dirh.count + 1;
-+		while (dir_count--) {
-+			if (msblk->swap) {
-+				struct squashfs_dir_entry sdire;
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						&sdire, next_block,next_offset,
-+						sizeof(sdire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				
-+				length += sizeof(sdire);
-+				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
-+			} else {
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						dire, next_block,next_offset,
-+						sizeof(*dire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+				length += sizeof(*dire);
-+			}
-+
-+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+					next_block, next_offset, dire->size + 1,
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += dire->size + 1;
-+
-+			if (name[0] < dire->name[0])
-+				goto exit_lookup;
-+
-+			if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) {
-+				squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block,
-+								dire->offset);
-+
-+				TRACE("calling squashfs_iget for directory "
-+					"entry %s, inode %x:%x, %d\n", name,
-+					dirh.start_block, dire->offset,
-+					dirh.inode_number + dire->inode_number);
-+
-+				inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number);
-+
-+				goto exit_lookup;
-+			}
-+		}
-+	}
-+
-+exit_lookup:
-+	kfree(dire);
-+	if (inode)
-+		return d_splice_alias(inode, dentry);
-+	d_add(dentry, inode);
-+	return ERR_PTR(0);
-+
-+failed_read:
-+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+		next_offset);
-+	goto exit_lookup;
-+}
-+
-+
-+static int squashfs_remount(struct super_block *s, int *flags, char *data)
-+{
-+	*flags |= MS_RDONLY;
-+	return 0;
-+}
-+
-+
-+static void squashfs_put_super(struct super_block *s)
-+{
-+	int i;
-+
-+	if (s->s_fs_info) {
-+		struct squashfs_sb_info *sbi = s->s_fs_info;
-+		if (sbi->block_cache)
-+			for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
-+				if (sbi->block_cache[i].block !=
-+							SQUASHFS_INVALID_BLK)
-+					kfree(sbi->block_cache[i].data);
-+		if (sbi->fragment)
-+			for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) 
-+				SQUASHFS_FREE(sbi->fragment[i].data);
-+		kfree(sbi->fragment);
-+		kfree(sbi->block_cache);
-+		kfree(sbi->read_page);
-+		kfree(sbi->uid);
-+		kfree(sbi->fragment_index);
-+		kfree(sbi->fragment_index_2);
-+		kfree(sbi->meta_index);
-+		vfree(sbi->stream.workspace);
-+		kfree(s->s_fs_info);
-+		s->s_fs_info = NULL;
-+	}
-+}
-+
-+
-+static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
-+				const char *dev_name, void *data,
-+				struct vfsmount *mnt)
-+{
-+	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
-+				mnt);
-+}
-+
-+
-+static int __init init_squashfs_fs(void)
-+{
-+	int err = init_inodecache();
-+	if (err)
-+		goto out;
-+
-+	printk(KERN_INFO "squashfs: version 3.2-r2 (2007/01/15) "
-+		"Phillip Lougher\n");
-+
-+	if ((err = register_filesystem(&squashfs_fs_type)))
-+		destroy_inodecache();
-+
-+out:
-+	return err;
-+}
-+
-+
-+static void __exit exit_squashfs_fs(void)
-+{
-+	unregister_filesystem(&squashfs_fs_type);
-+	destroy_inodecache();
-+}
-+
-+
-+static struct kmem_cache * squashfs_inode_cachep;
-+
-+
-+static struct inode *squashfs_alloc_inode(struct super_block *sb)
-+{
-+	struct squashfs_inode_info *ei;
-+	ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
-+	if (!ei)
-+		return NULL;
-+	return &ei->vfs_inode;
-+}
-+
-+
-+static void squashfs_destroy_inode(struct inode *inode)
-+{
-+	kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
-+}
-+
-+
-+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
-+{
-+	struct squashfs_inode_info *ei = foo;
-+
-+	inode_init_once(&ei->vfs_inode);
-+}
-+ 
-+
-+static int __init init_inodecache(void)
-+{
-+	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
-+	     sizeof(struct squashfs_inode_info),
-+	     0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
-+	     init_once);
-+	if (squashfs_inode_cachep == NULL)
-+		return -ENOMEM;
-+	return 0;
-+}
-+
-+
-+static void destroy_inodecache(void)
-+{
-+	kmem_cache_destroy(squashfs_inode_cachep);
-+}
-+
-+
-+module_init(init_squashfs_fs);
-+module_exit(exit_squashfs_fs);
-+MODULE_DESCRIPTION("squashfs 3.2-r2, a compressed read-only filesystem");
-+MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/fs/squashfs/squashfs.h
-@@ -0,0 +1,87 @@
-+/*
-+ * Squashfs - a compressed read only filesystem for Linux
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs.h
-+ */
-+
-+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+#endif
-+
-+#ifdef SQUASHFS_TRACE
-+#define TRACE(s, args...)	printk(KERN_NOTICE "SQUASHFS: "s, ## args)
-+#else
-+#define TRACE(s, args...)	{}
-+#endif
-+
-+#define ERROR(s, args...)	printk(KERN_ERR "SQUASHFS error: "s, ## args)
-+
-+#define SERROR(s, args...)	do { \
-+				if (!silent) \
-+				printk(KERN_ERR "SQUASHFS error: "s, ## args);\
-+				} while(0)
-+
-+#define WARNING(s, args...)	printk(KERN_WARNING "SQUASHFS: "s, ## args)
-+
-+static inline struct squashfs_inode_info *SQUASHFS_I(struct inode *inode)
-+{
-+	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
-+}
-+
-+#if defined(CONFIG_SQUASHFS_1_0_COMPATIBILITY ) || defined(CONFIG_SQUASHFS_2_0_COMPATIBILITY)
-+#define SQSH_EXTERN
-+extern unsigned int squashfs_read_data(struct super_block *s, char *buffer,
-+				long long index, unsigned int length,
-+				long long *next_index, int srclength);
-+extern int squashfs_get_cached_block(struct super_block *s, char *buffer,
-+				long long block, unsigned int offset,
-+				int length, long long *next_block,
-+				unsigned int *next_offset);
-+extern void release_cached_fragment(struct squashfs_sb_info *msblk, struct
-+					squashfs_fragment_cache *fragment);
-+extern struct squashfs_fragment_cache *get_cached_fragment(struct super_block
-+					*s, long long start_block,
-+					int length);
-+extern struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode, unsigned int inode_number);
-+extern const struct address_space_operations squashfs_symlink_aops;
-+extern const struct address_space_operations squashfs_aops;
-+extern const struct address_space_operations squashfs_aops_4K;
-+extern struct inode_operations squashfs_dir_inode_ops;
-+#else
-+#define SQSH_EXTERN static
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+extern int squashfs_1_0_supported(struct squashfs_sb_info *msblk);
-+#else
-+static inline int squashfs_1_0_supported(struct squashfs_sb_info *msblk)
-+{
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+extern int squashfs_2_0_supported(struct squashfs_sb_info *msblk);
-+#else
-+static inline int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
-+{
-+	return 0;
-+}
-+#endif
---- /dev/null
-+++ b/fs/squashfs/squashfs2_0.c
-@@ -0,0 +1,742 @@
-+/*
-+ * Squashfs - a compressed read only filesystem for Linux
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs2_0.c
-+ */
-+
-+#include <linux/squashfs_fs.h>
-+#include <linux/module.h>
-+#include <linux/zlib.h>
-+#include <linux/fs.h>
-+#include <linux/squashfs_fs_sb.h>
-+#include <linux/squashfs_fs_i.h>
-+
-+#include "squashfs.h"
-+static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir);
-+static struct dentry *squashfs_lookup_2(struct inode *, struct dentry *,
-+				struct nameidata *);
-+
-+static struct file_operations squashfs_dir_ops_2 = {
-+	.read = generic_read_dir,
-+	.readdir = squashfs_readdir_2
-+};
-+
-+static struct inode_operations squashfs_dir_inode_ops_2 = {
-+	.lookup = squashfs_lookup_2
-+};
-+
-+static unsigned char squashfs_filetype_table[] = {
-+	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
-+};
-+
-+static int read_fragment_index_table_2(struct super_block *s)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+	if (!(msblk->fragment_index_2 = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES_2
-+					(sblk->fragments), GFP_KERNEL))) {
-+		ERROR("Failed to allocate uid/gid table\n");
-+		return 0;
-+	}
-+   
-+	if (SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments) &&
-+					!squashfs_read_data(s, (char *)
-+					msblk->fragment_index_2,
-+					sblk->fragment_table_start,
-+					SQUASHFS_FRAGMENT_INDEX_BYTES_2
-+					(sblk->fragments) |
-+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments))) {
-+		ERROR("unable to read fragment index table\n");
-+		return 0;
-+	}
-+
-+	if (msblk->swap) {
-+		int i;
-+		unsigned int fragment;
-+
-+		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES_2(sblk->fragments);
-+									i++) {
-+			SQUASHFS_SWAP_FRAGMENT_INDEXES_2((&fragment),
-+						&msblk->fragment_index_2[i], 1);
-+			msblk->fragment_index_2[i] = fragment;
-+		}
-+	}
-+
-+	return 1;
-+}
-+
-+
-+static int get_fragment_location_2(struct super_block *s, unsigned int fragment,
-+				long long *fragment_start_block,
-+				unsigned int *fragment_size)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	long long start_block =
-+		msblk->fragment_index_2[SQUASHFS_FRAGMENT_INDEX_2(fragment)];
-+	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET_2(fragment);
-+	struct squashfs_fragment_entry_2 fragment_entry;
-+
-+	if (msblk->swap) {
-+		struct squashfs_fragment_entry_2 sfragment_entry;
-+
-+		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
-+					start_block, offset,
-+					sizeof(sfragment_entry), &start_block,
-+					&offset))
-+			goto out;
-+		SQUASHFS_SWAP_FRAGMENT_ENTRY_2(&fragment_entry, &sfragment_entry);
-+	} else
-+		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
-+					start_block, offset,
-+					sizeof(fragment_entry), &start_block,
-+					&offset))
-+			goto out;
-+
-+	*fragment_start_block = fragment_entry.start_block;
-+	*fragment_size = fragment_entry.size;
-+
-+	return 1;
-+
-+out:
-+	return 0;
-+}
-+
-+
-+static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
-+		struct squashfs_base_inode_header_2 *inodeb, unsigned int ino)
-+{
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+	i->i_ino = ino;
-+	i->i_mtime.tv_sec = sblk->mkfs_time;
-+	i->i_atime.tv_sec = sblk->mkfs_time;
-+	i->i_ctime.tv_sec = sblk->mkfs_time;
-+	i->i_uid = msblk->uid[inodeb->uid];
-+	i->i_mode = inodeb->mode;
-+	i->i_nlink = 1;
-+	i->i_size = 0;
-+	if (inodeb->guid == SQUASHFS_GUIDS)
-+		i->i_gid = i->i_uid;
-+	else
-+		i->i_gid = msblk->guid[inodeb->guid];
-+}
-+
-+
-+static int squashfs_read_inode_2(struct inode *i, squashfs_inode_t inode)
-+{
-+	struct super_block *s = i->i_sb;
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	unsigned int block = SQUASHFS_INODE_BLK(inode) +
-+		sblk->inode_table_start;
-+	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
-+	unsigned int ino = i->i_ino;
-+	long long next_block;
-+	unsigned int next_offset;
-+	union squashfs_inode_header_2 id, sid;
-+	struct squashfs_base_inode_header_2 *inodeb = &id.base,
-+					  *sinodeb = &sid.base;
-+
-+	TRACE("Entered squashfs_iget\n");
-+
-+	if (msblk->swap) {
-+		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
-+					offset, sizeof(*sinodeb), &next_block,
-+					&next_offset))
-+			goto failed_read;
-+		SQUASHFS_SWAP_BASE_INODE_HEADER_2(inodeb, sinodeb,
-+					sizeof(*sinodeb));
-+	} else
-+		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
-+					offset, sizeof(*inodeb), &next_block,
-+					&next_offset))
-+			goto failed_read;
-+
-+	squashfs_new_inode(msblk, i, inodeb, ino);
-+
-+	switch(inodeb->inode_type) {
-+		case SQUASHFS_FILE_TYPE: {
-+			struct squashfs_reg_inode_header_2 *inodep = &id.reg;
-+			struct squashfs_reg_inode_header_2 *sinodep = &sid.reg;
-+			long long frag_blk;
-+			unsigned int frag_size = 0;
-+				
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_REG_INODE_HEADER_2(inodep, sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			frag_blk = SQUASHFS_INVALID_BLK;
-+			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
-+					!get_fragment_location_2(s,
-+					inodep->fragment, &frag_blk, &frag_size))
-+				goto failed_read;
-+				
-+			i->i_size = inodep->file_size;
-+			i->i_fop = &generic_ro_fops;
-+			i->i_mode |= S_IFREG;
-+			i->i_mtime.tv_sec = inodep->mtime;
-+			i->i_atime.tv_sec = inodep->mtime;
-+			i->i_ctime.tv_sec = inodep->mtime;
-+			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
-+			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
-+			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
-+			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
-+			SQUASHFS_I(i)->offset = next_offset;
-+			if (sblk->block_size > 4096)
-+				i->i_data.a_ops = &squashfs_aops;
-+			else
-+				i->i_data.a_ops = &squashfs_aops_4K;
-+
-+			TRACE("File inode %x:%x, start_block %x, "
-+					"block_list_start %llx, offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->start_block, next_block,
-+					next_offset);
-+			break;
-+		}
-+		case SQUASHFS_DIR_TYPE: {
-+			struct squashfs_dir_inode_header_2 *inodep = &id.dir;
-+			struct squashfs_dir_inode_header_2 *sinodep = &sid.dir;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_DIR_INODE_HEADER_2(inodep, sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_size = inodep->file_size;
-+			i->i_op = &squashfs_dir_inode_ops_2;
-+			i->i_fop = &squashfs_dir_ops_2;
-+			i->i_mode |= S_IFDIR;
-+			i->i_mtime.tv_sec = inodep->mtime;
-+			i->i_atime.tv_sec = inodep->mtime;
-+			i->i_ctime.tv_sec = inodep->mtime;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->offset = inodep->offset;
-+			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
-+			SQUASHFS_I(i)->u.s2.parent_inode = 0;
-+
-+			TRACE("Directory inode %x:%x, start_block %x, offset "
-+					"%x\n", SQUASHFS_INODE_BLK(inode),
-+					offset, inodep->start_block,
-+					inodep->offset);
-+			break;
-+		}
-+		case SQUASHFS_LDIR_TYPE: {
-+			struct squashfs_ldir_inode_header_2 *inodep = &id.ldir;
-+			struct squashfs_ldir_inode_header_2 *sinodep = &sid.ldir;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_LDIR_INODE_HEADER_2(inodep,
-+						sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_size = inodep->file_size;
-+			i->i_op = &squashfs_dir_inode_ops_2;
-+			i->i_fop = &squashfs_dir_ops_2;
-+			i->i_mode |= S_IFDIR;
-+			i->i_mtime.tv_sec = inodep->mtime;
-+			i->i_atime.tv_sec = inodep->mtime;
-+			i->i_ctime.tv_sec = inodep->mtime;
-+			SQUASHFS_I(i)->start_block = inodep->start_block;
-+			SQUASHFS_I(i)->offset = inodep->offset;
-+			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
-+			SQUASHFS_I(i)->u.s2.directory_index_offset =
-+								next_offset;
-+			SQUASHFS_I(i)->u.s2.directory_index_count =
-+								inodep->i_count;
-+			SQUASHFS_I(i)->u.s2.parent_inode = 0;
-+
-+			TRACE("Long directory inode %x:%x, start_block %x, "
-+					"offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->start_block, inodep->offset);
-+			break;
-+		}
-+		case SQUASHFS_SYMLINK_TYPE: {
-+			struct squashfs_symlink_inode_header_2 *inodep =
-+								&id.symlink;
-+			struct squashfs_symlink_inode_header_2 *sinodep =
-+								&sid.symlink;
-+	
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep,
-+								sinodep);
-+			} else
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_size = inodep->symlink_size;
-+			i->i_op = &page_symlink_inode_operations;
-+			i->i_data.a_ops = &squashfs_symlink_aops;
-+			i->i_mode |= S_IFLNK;
-+			SQUASHFS_I(i)->start_block = next_block;
-+			SQUASHFS_I(i)->offset = next_offset;
-+
-+			TRACE("Symbolic link inode %x:%x, start_block %llx, "
-+					"offset %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					next_block, next_offset);
-+			break;
-+		 }
-+		 case SQUASHFS_BLKDEV_TYPE:
-+		 case SQUASHFS_CHRDEV_TYPE: {
-+			struct squashfs_dev_inode_header_2 *inodep = &id.dev;
-+			struct squashfs_dev_inode_header_2 *sinodep = &sid.dev;
-+
-+			if (msblk->swap) {
-+				if (!squashfs_get_cached_block(s, (char *)
-+						sinodep, block, offset,
-+						sizeof(*sinodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep, sinodep);
-+			} else	
-+				if (!squashfs_get_cached_block(s, (char *)
-+						inodep, block, offset,
-+						sizeof(*inodep), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+			i->i_mode |= (inodeb->inode_type ==
-+					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
-+					S_IFBLK;
-+			init_special_inode(i, i->i_mode,
-+					old_decode_dev(inodep->rdev));
-+
-+			TRACE("Device inode %x:%x, rdev %x\n",
-+					SQUASHFS_INODE_BLK(inode), offset,
-+					inodep->rdev);
-+			break;
-+		 }
-+		 case SQUASHFS_FIFO_TYPE:
-+		 case SQUASHFS_SOCKET_TYPE: {
-+
-+			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
-+							? S_IFIFO : S_IFSOCK;
-+			init_special_inode(i, i->i_mode, 0);
-+			break;
-+		 }
-+		 default:
-+			ERROR("Unknown inode type %d in squashfs_iget!\n",
-+					inodeb->inode_type);
-+			goto failed_read1;
-+	}
-+	
-+	return 1;
-+
-+failed_read:
-+	ERROR("Unable to read inode [%x:%x]\n", block, offset);
-+
-+failed_read1:
-+	return 0;
-+}
-+
-+
-+static int get_dir_index_using_offset(struct super_block *s, long long 
-+				*next_block, unsigned int *next_offset,
-+				long long index_start,
-+				unsigned int index_offset, int i_count,
-+				long long f_pos)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	int i, length = 0;
-+	struct squashfs_dir_index_2 index;
-+
-+	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
-+					i_count, (unsigned int) f_pos);
-+
-+	if (f_pos == 0)
-+		goto finish;
-+
-+	for (i = 0; i < i_count; i++) {
-+		if (msblk->swap) {
-+			struct squashfs_dir_index_2 sindex;
-+			squashfs_get_cached_block(s, (char *) &sindex,
-+					index_start, index_offset,
-+					sizeof(sindex), &index_start,
-+					&index_offset);
-+			SQUASHFS_SWAP_DIR_INDEX_2(&index, &sindex);
-+		} else
-+			squashfs_get_cached_block(s, (char *) &index,
-+					index_start, index_offset,
-+					sizeof(index), &index_start,
-+					&index_offset);
-+
-+		if (index.index > f_pos)
-+			break;
-+
-+		squashfs_get_cached_block(s, NULL, index_start, index_offset,
-+					index.size + 1, &index_start,
-+					&index_offset);
-+
-+		length = index.index;
-+		*next_block = index.start_block + sblk->directory_table_start;
-+	}
-+
-+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+
-+finish:
-+	return length;
-+}
-+
-+
-+static int get_dir_index_using_name(struct super_block *s, long long
-+				*next_block, unsigned int *next_offset,
-+				long long index_start,
-+				unsigned int index_offset, int i_count,
-+				const char *name, int size)
-+{
-+	struct squashfs_sb_info *msblk = s->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	int i, length = 0;
-+	struct squashfs_dir_index_2 *index;
-+	char *str;
-+
-+	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
-+
-+	if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
-+		(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
-+		ERROR("Failed to allocate squashfs_dir_index\n");
-+		goto failure;
-+	}
-+
-+	index = (struct squashfs_dir_index_2 *) (str + SQUASHFS_NAME_LEN + 1);
-+	strncpy(str, name, size);
-+	str[size] = '\0';
-+
-+	for (i = 0; i < i_count; i++) {
-+		if (msblk->swap) {
-+			struct squashfs_dir_index_2 sindex;
-+			squashfs_get_cached_block(s, (char *) &sindex,
-+					index_start, index_offset,
-+					sizeof(sindex), &index_start,
-+					&index_offset);
-+			SQUASHFS_SWAP_DIR_INDEX_2(index, &sindex);
-+		} else
-+			squashfs_get_cached_block(s, (char *) index,
-+					index_start, index_offset,
-+					sizeof(struct squashfs_dir_index_2),
-+					&index_start, &index_offset);
-+
-+		squashfs_get_cached_block(s, index->name, index_start,
-+					index_offset, index->size + 1,
-+					&index_start, &index_offset);
-+
-+		index->name[index->size + 1] = '\0';
-+
-+		if (strcmp(index->name, str) > 0)
-+			break;
-+
-+		length = index->index;
-+		*next_block = index->start_block + sblk->directory_table_start;
-+	}
-+
-+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
-+	kfree(str);
-+failure:
-+	return length;
-+}
-+
-+		
-+static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir)
-+{
-+	struct inode *i = file->f_dentry->d_inode;
-+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	long long next_block = SQUASHFS_I(i)->start_block +
-+		sblk->directory_table_start;
-+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
-+		dir_count;
-+	struct squashfs_dir_header_2 dirh;
-+	struct squashfs_dir_entry_2 *dire;
-+
-+	TRACE("Entered squashfs_readdir_2 [%llx:%x]\n", next_block, next_offset);
-+
-+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
-+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
-+		ERROR("Failed to allocate squashfs_dir_entry\n");
-+		goto finish;
-+	}
-+
-+	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_start,
-+				SQUASHFS_I(i)->u.s2.directory_index_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_count,
-+				file->f_pos);
-+
-+	while (length < i_size_read(i)) {
-+		/* read directory header */
-+		if (msblk->swap) {
-+			struct squashfs_dir_header_2 sdirh;
-+			
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+					next_block, next_offset, sizeof(sdirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(sdirh);
-+			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
-+		} else {
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+					next_block, next_offset, sizeof(dirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(dirh);
-+		}
-+
-+		dir_count = dirh.count + 1;
-+		while (dir_count--) {
-+			if (msblk->swap) {
-+				struct squashfs_dir_entry_2 sdire;
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						&sdire, next_block, next_offset,
-+						sizeof(sdire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				
-+				length += sizeof(sdire);
-+				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
-+			} else {
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						dire, next_block, next_offset,
-+						sizeof(*dire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+				length += sizeof(*dire);
-+			}
-+
-+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+						next_block, next_offset,
-+						dire->size + 1, &next_block,
-+						&next_offset))
-+				goto failed_read;
-+
-+			length += dire->size + 1;
-+
-+			if (file->f_pos >= length)
-+				continue;
-+
-+			dire->name[dire->size + 1] = '\0';
-+
-+			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d)\n",
-+					(unsigned int) dirent, dire->name,
-+					dire->size + 1, (int) file->f_pos,
-+					dirh.start_block, dire->offset,
-+					squashfs_filetype_table[dire->type]);
-+
-+			if (filldir(dirent, dire->name, dire->size + 1,
-+					file->f_pos, SQUASHFS_MK_VFS_INODE(
-+					dirh.start_block, dire->offset),
-+					squashfs_filetype_table[dire->type])
-+					< 0) {
-+				TRACE("Filldir returned less than 0\n");
-+				goto finish;
-+			}
-+			file->f_pos = length;
-+		}
-+	}
-+
-+finish:
-+	kfree(dire);
-+	return 0;
-+
-+failed_read:
-+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+		next_offset);
-+	kfree(dire);
-+	return 0;
-+}
-+
-+
-+static struct dentry *squashfs_lookup_2(struct inode *i, struct dentry *dentry,
-+				struct nameidata *nd)
-+{
-+	const unsigned char *name = dentry->d_name.name;
-+	int len = dentry->d_name.len;
-+	struct inode *inode = NULL;
-+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+	long long next_block = SQUASHFS_I(i)->start_block +
-+				sblk->directory_table_start;
-+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
-+				dir_count;
-+	struct squashfs_dir_header_2 dirh;
-+	struct squashfs_dir_entry_2 *dire;
-+	int sorted = sblk->s_major == 2 && sblk->s_minor >= 1;
-+
-+	TRACE("Entered squashfs_lookup_2 [%llx:%x]\n", next_block, next_offset);
-+
-+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
-+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
-+		ERROR("Failed to allocate squashfs_dir_entry\n");
-+		goto exit_loop;
-+	}
-+
-+	if (len > SQUASHFS_NAME_LEN)
-+		goto exit_loop;
-+
-+	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_start,
-+				SQUASHFS_I(i)->u.s2.directory_index_offset,
-+				SQUASHFS_I(i)->u.s2.directory_index_count, name,
-+				len);
-+
-+	while (length < i_size_read(i)) {
-+		/* read directory header */
-+		if (msblk->swap) {
-+			struct squashfs_dir_header_2 sdirh;
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
-+					next_block, next_offset, sizeof(sdirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(sdirh);
-+			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
-+		} else {
-+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
-+					next_block, next_offset, sizeof(dirh),
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += sizeof(dirh);
-+		}
-+
-+		dir_count = dirh.count + 1;
-+		while (dir_count--) {
-+			if (msblk->swap) {
-+				struct squashfs_dir_entry_2 sdire;
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						&sdire, next_block,next_offset,
-+						sizeof(sdire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+				
-+				length += sizeof(sdire);
-+				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
-+			} else {
-+				if (!squashfs_get_cached_block(i->i_sb, (char *)
-+						dire, next_block,next_offset,
-+						sizeof(*dire), &next_block,
-+						&next_offset))
-+					goto failed_read;
-+
-+				length += sizeof(*dire);
-+			}
-+
-+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
-+					next_block, next_offset, dire->size + 1,
-+					&next_block, &next_offset))
-+				goto failed_read;
-+
-+			length += dire->size + 1;
-+
-+			if (sorted && name[0] < dire->name[0])
-+				goto exit_loop;
-+
-+			if ((len == dire->size + 1) && !strncmp(name,
-+						dire->name, len)) {
-+				squashfs_inode_t ino =
-+					SQUASHFS_MKINODE(dirh.start_block,
-+					dire->offset);
-+				unsigned int inode_number = SQUASHFS_MK_VFS_INODE(dirh.start_block,
-+					dire->offset);
-+
-+				TRACE("calling squashfs_iget for directory "
-+					"entry %s, inode %x:%x, %lld\n", name,
-+					dirh.start_block, dire->offset, ino);
-+
-+				inode = squashfs_iget(i->i_sb, ino, inode_number);
-+
-+				goto exit_loop;
-+			}
-+		}
-+	}
-+
-+exit_loop:
-+	kfree(dire);
-+	d_add(dentry, inode);
-+	return ERR_PTR(0);
-+
-+failed_read:
-+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
-+		next_offset);
-+	goto exit_loop;
-+}
-+
-+
-+int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
-+{
-+	struct squashfs_super_block *sblk = &msblk->sblk;
-+
-+	msblk->read_inode = squashfs_read_inode_2;
-+	msblk->read_fragment_index_table = read_fragment_index_table_2;
-+
-+	sblk->bytes_used = sblk->bytes_used_2;
-+	sblk->uid_start = sblk->uid_start_2;
-+	sblk->guid_start = sblk->guid_start_2;
-+	sblk->inode_table_start = sblk->inode_table_start_2;
-+	sblk->directory_table_start = sblk->directory_table_start_2;
-+	sblk->fragment_table_start = sblk->fragment_table_start_2;
-+
-+	return 1;
-+}
---- /dev/null
-+++ b/include/linux/squashfs_fs.h
-@@ -0,0 +1,934 @@
-+#ifndef SQUASHFS_FS
-+#define SQUASHFS_FS
-+
-+/*
-+ * Squashfs
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs_fs.h
-+ */
-+
-+#ifndef CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+#define CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+#endif
-+
-+#ifdef	CONFIG_SQUASHFS_VMALLOC
-+#define SQUASHFS_ALLOC(a)		vmalloc(a)
-+#define SQUASHFS_FREE(a)		vfree(a)
-+#else
-+#define SQUASHFS_ALLOC(a)		kmalloc(a, GFP_KERNEL)
-+#define SQUASHFS_FREE(a)		kfree(a)
-+#endif
-+#define SQUASHFS_CACHED_FRAGMENTS	CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE	
-+#define SQUASHFS_MAJOR			3
-+#define SQUASHFS_MINOR			0
-+#define SQUASHFS_MAGIC			0x73717368
-+#define SQUASHFS_MAGIC_SWAP		0x68737173
-+#define SQUASHFS_START			0
-+
-+/* size of metadata (inode and directory) blocks */
-+#define SQUASHFS_METADATA_SIZE		8192
-+#define SQUASHFS_METADATA_LOG		13
-+
-+/* default size of data blocks */
-+#define SQUASHFS_FILE_SIZE		65536
-+#define SQUASHFS_FILE_LOG		16
-+
-+#define SQUASHFS_FILE_MAX_SIZE		65536
-+
-+/* Max number of uids and gids */
-+#define SQUASHFS_UIDS			256
-+#define SQUASHFS_GUIDS			255
-+
-+/* Max length of filename (not 255) */
-+#define SQUASHFS_NAME_LEN		256
-+
-+#define SQUASHFS_INVALID		((long long) 0xffffffffffff)
-+#define SQUASHFS_INVALID_FRAG		((unsigned int) 0xffffffff)
-+#define SQUASHFS_INVALID_BLK		((long long) -1)
-+#define SQUASHFS_USED_BLK		((long long) -2)
-+
-+/* Filesystem flags */
-+#define SQUASHFS_NOI			0
-+#define SQUASHFS_NOD			1
-+#define SQUASHFS_CHECK			2
-+#define SQUASHFS_NOF			3
-+#define SQUASHFS_NO_FRAG		4
-+#define SQUASHFS_ALWAYS_FRAG		5
-+#define SQUASHFS_DUPLICATE		6
-+#define SQUASHFS_EXPORT			7
-+
-+#define SQUASHFS_BIT(flag, bit)		((flag >> bit) & 1)
-+
-+#define SQUASHFS_UNCOMPRESSED_INODES(flags)	SQUASHFS_BIT(flags, \
-+						SQUASHFS_NOI)
-+
-+#define SQUASHFS_UNCOMPRESSED_DATA(flags)	SQUASHFS_BIT(flags, \
-+						SQUASHFS_NOD)
-+
-+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
-+						SQUASHFS_NOF)
-+
-+#define SQUASHFS_NO_FRAGMENTS(flags)		SQUASHFS_BIT(flags, \
-+						SQUASHFS_NO_FRAG)
-+
-+#define SQUASHFS_ALWAYS_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
-+						SQUASHFS_ALWAYS_FRAG)
-+
-+#define SQUASHFS_DUPLICATES(flags)		SQUASHFS_BIT(flags, \
-+						SQUASHFS_DUPLICATE)
-+
-+#define SQUASHFS_EXPORTABLE(flags)		SQUASHFS_BIT(flags, \
-+						SQUASHFS_EXPORT)
-+
-+#define SQUASHFS_CHECK_DATA(flags)		SQUASHFS_BIT(flags, \
-+						SQUASHFS_CHECK)
-+
-+#define SQUASHFS_MKFLAGS(noi, nod, check_data, nof, no_frag, always_frag, \
-+		duplicate_checking, exortable)	(noi | (nod << 1) | (check_data << 2) \
-+		| (nof << 3) | (no_frag << 4) | (always_frag << 5) | \
-+		(duplicate_checking << 6) | (exportable << 7))
-+
-+/* Max number of types and file types */
-+#define SQUASHFS_DIR_TYPE		1
-+#define SQUASHFS_FILE_TYPE		2
-+#define SQUASHFS_SYMLINK_TYPE		3
-+#define SQUASHFS_BLKDEV_TYPE		4
-+#define SQUASHFS_CHRDEV_TYPE		5
-+#define SQUASHFS_FIFO_TYPE		6
-+#define SQUASHFS_SOCKET_TYPE		7
-+#define SQUASHFS_LDIR_TYPE		8
-+#define SQUASHFS_LREG_TYPE		9
-+
-+/* 1.0 filesystem type definitions */
-+#define SQUASHFS_TYPES			5
-+#define SQUASHFS_IPC_TYPE		0
-+
-+/* Flag whether block is compressed or uncompressed, bit is set if block is
-+ * uncompressed */
-+#define SQUASHFS_COMPRESSED_BIT		(1 << 15)
-+
-+#define SQUASHFS_COMPRESSED_SIZE(B)	(((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
-+		(B) & ~SQUASHFS_COMPRESSED_BIT :  SQUASHFS_COMPRESSED_BIT)
-+
-+#define SQUASHFS_COMPRESSED(B)		(!((B) & SQUASHFS_COMPRESSED_BIT))
-+
-+#define SQUASHFS_COMPRESSED_BIT_BLOCK		(1 << 24)
-+
-+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B)	(((B) & \
-+	~SQUASHFS_COMPRESSED_BIT_BLOCK) ? (B) & \
-+	~SQUASHFS_COMPRESSED_BIT_BLOCK : SQUASHFS_COMPRESSED_BIT_BLOCK)
-+
-+#define SQUASHFS_COMPRESSED_BLOCK(B)	(!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
-+
-+/*
-+ * Inode number ops.  Inodes consist of a compressed block number, and an
-+ * uncompressed  offset within that block
-+ */
-+#define SQUASHFS_INODE_BLK(a)		((unsigned int) ((a) >> 16))
-+
-+#define SQUASHFS_INODE_OFFSET(a)	((unsigned int) ((a) & 0xffff))
-+
-+#define SQUASHFS_MKINODE(A, B)		((squashfs_inode_t)(((squashfs_inode_t) (A)\
-+					<< 16) + (B)))
-+
-+/* Compute 32 bit VFS inode number from squashfs inode number */
-+#define SQUASHFS_MK_VFS_INODE(a, b)	((unsigned int) (((a) << 8) + \
-+					((b) >> 2) + 1))
-+/* XXX */
-+
-+/* Translate between VFS mode and squashfs mode */
-+#define SQUASHFS_MODE(a)		((a) & 0xfff)
-+
-+/* fragment and fragment table defines */
-+#define SQUASHFS_FRAGMENT_BYTES(A)	((A) * sizeof(struct squashfs_fragment_entry))
-+
-+#define SQUASHFS_FRAGMENT_INDEX(A)	(SQUASHFS_FRAGMENT_BYTES(A) / \
-+					SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A)	(SQUASHFS_FRAGMENT_BYTES(A) % \
-+						SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEXES(A)	((SQUASHFS_FRAGMENT_BYTES(A) + \
-+					SQUASHFS_METADATA_SIZE - 1) / \
-+					SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A)	(SQUASHFS_FRAGMENT_INDEXES(A) *\
-+						sizeof(long long))
-+
-+/* inode lookup table defines */
-+#define SQUASHFS_LOOKUP_BYTES(A)	((A) * sizeof(squashfs_inode_t))
-+
-+#define SQUASHFS_LOOKUP_BLOCK(A)		(SQUASHFS_LOOKUP_BYTES(A) / \
-+						SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_LOOKUP_BLOCK_OFFSET(A)		(SQUASHFS_LOOKUP_BYTES(A) % \
-+						SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_LOOKUP_BLOCKS(A)	((SQUASHFS_LOOKUP_BYTES(A) + \
-+					SQUASHFS_METADATA_SIZE - 1) / \
-+					SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_LOOKUP_BLOCK_BYTES(A)	(SQUASHFS_LOOKUP_BLOCKS(A) *\
-+					sizeof(long long))
-+
-+/* cached data constants for filesystem */
-+#define SQUASHFS_CACHED_BLKS		8
-+
-+#define SQUASHFS_MAX_FILE_SIZE_LOG	64
-+
-+#define SQUASHFS_MAX_FILE_SIZE		((long long) 1 << \
-+					(SQUASHFS_MAX_FILE_SIZE_LOG - 2))
-+
-+#define SQUASHFS_MARKER_BYTE		0xff
-+
-+/* meta index cache */
-+#define SQUASHFS_META_INDEXES	(SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
-+#define SQUASHFS_META_ENTRIES	31
-+#define SQUASHFS_META_NUMBER	8
-+#define SQUASHFS_SLOTS		4
-+
-+struct meta_entry {
-+	long long		data_block;
-+	unsigned int		index_block;
-+	unsigned short		offset;
-+	unsigned short		pad;
-+};
-+
-+struct meta_index {
-+	unsigned int		inode_number;
-+	unsigned int		offset;
-+	unsigned short		entries;
-+	unsigned short		skip;
-+	unsigned short		locked;
-+	unsigned short		pad;
-+	struct meta_entry	meta_entry[SQUASHFS_META_ENTRIES];
-+};
-+
-+
-+/*
-+ * definitions for structures on disk
-+ */
-+
-+typedef long long		squashfs_block_t;
-+typedef long long		squashfs_inode_t;
-+
-+struct squashfs_super_block {
-+	unsigned int		s_magic;
-+	unsigned int		inodes;
-+	unsigned int		bytes_used_2;
-+	unsigned int		uid_start_2;
-+	unsigned int		guid_start_2;
-+	unsigned int		inode_table_start_2;
-+	unsigned int		directory_table_start_2;
-+	unsigned int		s_major:16;
-+	unsigned int		s_minor:16;
-+	unsigned int		block_size_1:16;
-+	unsigned int		block_log:16;
-+	unsigned int		flags:8;
-+	unsigned int		no_uids:8;
-+	unsigned int		no_guids:8;
-+	unsigned int		mkfs_time /* time of filesystem creation */;
-+	squashfs_inode_t	root_inode;
-+	unsigned int		block_size;
-+	unsigned int		fragments;
-+	unsigned int		fragment_table_start_2;
-+	long long		bytes_used;
-+	long long		uid_start;
-+	long long		guid_start;
-+	long long		inode_table_start;
-+	long long		directory_table_start;
-+	long long		fragment_table_start;
-+	long long		lookup_table_start;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_index {
-+	unsigned int		index;
-+	unsigned int		start_block;
-+	unsigned char		size;
-+	unsigned char		name[0];
-+} __attribute__ ((packed));
-+
-+#define SQUASHFS_BASE_INODE_HEADER		\
-+	unsigned int		inode_type:4;	\
-+	unsigned int		mode:12;	\
-+	unsigned int		uid:8;		\
-+	unsigned int		guid:8;		\
-+	unsigned int		mtime;		\
-+	unsigned int 		inode_number;
-+
-+struct squashfs_base_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+} __attribute__ ((packed));
-+
-+struct squashfs_ipc_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	unsigned int		nlink;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dev_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	unsigned int		nlink;
-+	unsigned short		rdev;
-+} __attribute__ ((packed));
-+	
-+struct squashfs_symlink_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	unsigned int		nlink;
-+	unsigned short		symlink_size;
-+	char			symlink[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_reg_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	squashfs_block_t	start_block;
-+	unsigned int		fragment;
-+	unsigned int		offset;
-+	unsigned int		file_size;
-+	unsigned short		block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_lreg_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	unsigned int		nlink;
-+	squashfs_block_t	start_block;
-+	unsigned int		fragment;
-+	unsigned int		offset;
-+	long long		file_size;
-+	unsigned short		block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	unsigned int		nlink;
-+	unsigned int		file_size:19;
-+	unsigned int		offset:13;
-+	unsigned int		start_block;
-+	unsigned int		parent_inode;
-+} __attribute__  ((packed));
-+
-+struct squashfs_ldir_inode_header {
-+	SQUASHFS_BASE_INODE_HEADER;
-+	unsigned int		nlink;
-+	unsigned int		file_size:27;
-+	unsigned int		offset:13;
-+	unsigned int		start_block;
-+	unsigned int		i_count:16;
-+	unsigned int		parent_inode;
-+	struct squashfs_dir_index	index[0];
-+} __attribute__  ((packed));
-+
-+union squashfs_inode_header {
-+	struct squashfs_base_inode_header	base;
-+	struct squashfs_dev_inode_header	dev;
-+	struct squashfs_symlink_inode_header	symlink;
-+	struct squashfs_reg_inode_header	reg;
-+	struct squashfs_lreg_inode_header	lreg;
-+	struct squashfs_dir_inode_header	dir;
-+	struct squashfs_ldir_inode_header	ldir;
-+	struct squashfs_ipc_inode_header	ipc;
-+};
-+	
-+struct squashfs_dir_entry {
-+	unsigned int		offset:13;
-+	unsigned int		type:3;
-+	unsigned int		size:8;
-+	int			inode_number:16;
-+	char			name[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_header {
-+	unsigned int		count:8;
-+	unsigned int		start_block;
-+	unsigned int		inode_number;
-+} __attribute__ ((packed));
-+
-+struct squashfs_fragment_entry {
-+	long long		start_block;
-+	unsigned int		size;
-+	unsigned int		pending;
-+} __attribute__ ((packed));
-+
-+extern int squashfs_uncompress_block(void *d, int dstlen, void *s, int srclen);
-+extern int squashfs_uncompress_init(void);
-+extern int squashfs_uncompress_exit(void);
-+
-+/*
-+ * macros to convert each packed bitfield structure from little endian to big
-+ * endian and vice versa.  These are needed when creating or using a filesystem
-+ * on a machine with different byte ordering to the target architecture.
-+ *
-+ */
-+
-+#define SQUASHFS_SWAP_START \
-+	int bits;\
-+	int b_pos;\
-+	unsigned long long val;\
-+	unsigned char *s;\
-+	unsigned char *d;
-+
-+#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_super_block));\
-+	SQUASHFS_SWAP((s)->s_magic, d, 0, 32);\
-+	SQUASHFS_SWAP((s)->inodes, d, 32, 32);\
-+	SQUASHFS_SWAP((s)->bytes_used_2, d, 64, 32);\
-+	SQUASHFS_SWAP((s)->uid_start_2, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->guid_start_2, d, 128, 32);\
-+	SQUASHFS_SWAP((s)->inode_table_start_2, d, 160, 32);\
-+	SQUASHFS_SWAP((s)->directory_table_start_2, d, 192, 32);\
-+	SQUASHFS_SWAP((s)->s_major, d, 224, 16);\
-+	SQUASHFS_SWAP((s)->s_minor, d, 240, 16);\
-+	SQUASHFS_SWAP((s)->block_size_1, d, 256, 16);\
-+	SQUASHFS_SWAP((s)->block_log, d, 272, 16);\
-+	SQUASHFS_SWAP((s)->flags, d, 288, 8);\
-+	SQUASHFS_SWAP((s)->no_uids, d, 296, 8);\
-+	SQUASHFS_SWAP((s)->no_guids, d, 304, 8);\
-+	SQUASHFS_SWAP((s)->mkfs_time, d, 312, 32);\
-+	SQUASHFS_SWAP((s)->root_inode, d, 344, 64);\
-+	SQUASHFS_SWAP((s)->block_size, d, 408, 32);\
-+	SQUASHFS_SWAP((s)->fragments, d, 440, 32);\
-+	SQUASHFS_SWAP((s)->fragment_table_start_2, d, 472, 32);\
-+	SQUASHFS_SWAP((s)->bytes_used, d, 504, 64);\
-+	SQUASHFS_SWAP((s)->uid_start, d, 568, 64);\
-+	SQUASHFS_SWAP((s)->guid_start, d, 632, 64);\
-+	SQUASHFS_SWAP((s)->inode_table_start, d, 696, 64);\
-+	SQUASHFS_SWAP((s)->directory_table_start, d, 760, 64);\
-+	SQUASHFS_SWAP((s)->fragment_table_start, d, 824, 64);\
-+	SQUASHFS_SWAP((s)->lookup_table_start, d, 888, 64);\
-+}
-+
-+#define SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
-+	SQUASHFS_MEMSET(s, d, n);\
-+	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
-+	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
-+	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
-+	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
-+	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
-+	SQUASHFS_SWAP((s)->inode_number, d, 64, 32);
-+
-+#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, n) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
-+}
-+
-+#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_ipc_inode_header))\
-+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_dev_inode_header)); \
-+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->rdev, d, 128, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_symlink_inode_header));\
-+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->symlink_size, d, 128, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_reg_inode_header));\
-+	SQUASHFS_SWAP((s)->start_block, d, 96, 64);\
-+	SQUASHFS_SWAP((s)->fragment, d, 160, 32);\
-+	SQUASHFS_SWAP((s)->offset, d, 192, 32);\
-+	SQUASHFS_SWAP((s)->file_size, d, 224, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_lreg_inode_header));\
-+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 128, 64);\
-+	SQUASHFS_SWAP((s)->fragment, d, 192, 32);\
-+	SQUASHFS_SWAP((s)->offset, d, 224, 32);\
-+	SQUASHFS_SWAP((s)->file_size, d, 256, 64);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_dir_inode_header));\
-+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->file_size, d, 128, 19);\
-+	SQUASHFS_SWAP((s)->offset, d, 147, 13);\
-+	SQUASHFS_SWAP((s)->start_block, d, 160, 32);\
-+	SQUASHFS_SWAP((s)->parent_inode, d, 192, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
-+			sizeof(struct squashfs_ldir_inode_header));\
-+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->file_size, d, 128, 27);\
-+	SQUASHFS_SWAP((s)->offset, d, 155, 13);\
-+	SQUASHFS_SWAP((s)->start_block, d, 168, 32);\
-+	SQUASHFS_SWAP((s)->i_count, d, 200, 16);\
-+	SQUASHFS_SWAP((s)->parent_inode, d, 216, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INDEX(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index));\
-+	SQUASHFS_SWAP((s)->index, d, 0, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 32, 32);\
-+	SQUASHFS_SWAP((s)->size, d, 64, 8);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_HEADER(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header));\
-+	SQUASHFS_SWAP((s)->count, d, 0, 8);\
-+	SQUASHFS_SWAP((s)->start_block, d, 8, 32);\
-+	SQUASHFS_SWAP((s)->inode_number, d, 40, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_ENTRY(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry));\
-+	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
-+	SQUASHFS_SWAP((s)->type, d, 13, 3);\
-+	SQUASHFS_SWAP((s)->size, d, 16, 8);\
-+	SQUASHFS_SWAP((s)->inode_number, d, 24, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry));\
-+	SQUASHFS_SWAP((s)->start_block, d, 0, 64);\
-+	SQUASHFS_SWAP((s)->size, d, 64, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_INODE_T(s, d) SQUASHFS_SWAP_LONG_LONGS(s, d, 1)
-+
-+#define SQUASHFS_SWAP_SHORTS(s, d, n) {\
-+	int entry;\
-+	int bit_position;\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, n * 2);\
-+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+			16)\
-+		SQUASHFS_SWAP(s[entry], d, bit_position, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_INTS(s, d, n) {\
-+	int entry;\
-+	int bit_position;\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, n * 4);\
-+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+			32)\
-+		SQUASHFS_SWAP(s[entry], d, bit_position, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) {\
-+	int entry;\
-+	int bit_position;\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, n * 8);\
-+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+			64)\
-+		SQUASHFS_SWAP(s[entry], d, bit_position, 64);\
-+}
-+
-+#define SQUASHFS_SWAP_DATA(s, d, n, bits) {\
-+	int entry;\
-+	int bit_position;\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, n * bits / 8);\
-+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
-+			bits)\
-+		SQUASHFS_SWAP(s[entry], d, bit_position, bits);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
-+#define SQUASHFS_SWAP_LOOKUP_BLOCKS(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
-+
-+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
-+
-+struct squashfs_base_inode_header_1 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:4; /* index into uid table */
-+	unsigned int		guid:4; /* index into guid table */
-+} __attribute__ ((packed));
-+
-+struct squashfs_ipc_inode_header_1 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:4; /* index into uid table */
-+	unsigned int		guid:4; /* index into guid table */
-+	unsigned int		type:4;
-+	unsigned int		offset:4;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dev_inode_header_1 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:4; /* index into uid table */
-+	unsigned int		guid:4; /* index into guid table */
-+	unsigned short		rdev;
-+} __attribute__ ((packed));
-+	
-+struct squashfs_symlink_inode_header_1 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:4; /* index into uid table */
-+	unsigned int		guid:4; /* index into guid table */
-+	unsigned short		symlink_size;
-+	char			symlink[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_reg_inode_header_1 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:4; /* index into uid table */
-+	unsigned int		guid:4; /* index into guid table */
-+	unsigned int		mtime;
-+	unsigned int		start_block;
-+	unsigned int		file_size:32;
-+	unsigned short		block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_inode_header_1 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:4; /* index into uid table */
-+	unsigned int		guid:4; /* index into guid table */
-+	unsigned int		file_size:19;
-+	unsigned int		offset:13;
-+	unsigned int		mtime;
-+	unsigned int		start_block:24;
-+} __attribute__  ((packed));
-+
-+#define SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n) \
-+	SQUASHFS_MEMSET(s, d, n);\
-+	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
-+	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
-+	SQUASHFS_SWAP((s)->uid, d, 16, 4);\
-+	SQUASHFS_SWAP((s)->guid, d, 20, 4);
-+
-+#define SQUASHFS_SWAP_BASE_INODE_HEADER_1(s, d, n) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n)\
-+}
-+
-+#define SQUASHFS_SWAP_IPC_INODE_HEADER_1(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+			sizeof(struct squashfs_ipc_inode_header_1));\
-+	SQUASHFS_SWAP((s)->type, d, 24, 4);\
-+	SQUASHFS_SWAP((s)->offset, d, 28, 4);\
-+}
-+
-+#define SQUASHFS_SWAP_DEV_INODE_HEADER_1(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+			sizeof(struct squashfs_dev_inode_header_1));\
-+	SQUASHFS_SWAP((s)->rdev, d, 24, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+			sizeof(struct squashfs_symlink_inode_header_1));\
-+	SQUASHFS_SWAP((s)->symlink_size, d, 24, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_REG_INODE_HEADER_1(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+			sizeof(struct squashfs_reg_inode_header_1));\
-+	SQUASHFS_SWAP((s)->mtime, d, 24, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 56, 32);\
-+	SQUASHFS_SWAP((s)->file_size, d, 88, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INODE_HEADER_1(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
-+			sizeof(struct squashfs_dir_inode_header_1));\
-+	SQUASHFS_SWAP((s)->file_size, d, 24, 19);\
-+	SQUASHFS_SWAP((s)->offset, d, 43, 13);\
-+	SQUASHFS_SWAP((s)->mtime, d, 56, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 88, 24);\
-+}
-+
-+#endif
-+
-+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
-+
-+struct squashfs_dir_index_2 {
-+	unsigned int		index:27;
-+	unsigned int		start_block:29;
-+	unsigned char		size;
-+	unsigned char		name[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_base_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+} __attribute__ ((packed));
-+
-+struct squashfs_ipc_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+} __attribute__ ((packed));
-+
-+struct squashfs_dev_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+	unsigned short		rdev;
-+} __attribute__ ((packed));
-+	
-+struct squashfs_symlink_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+	unsigned short		symlink_size;
-+	char			symlink[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_reg_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+	unsigned int		mtime;
-+	unsigned int		start_block;
-+	unsigned int		fragment;
-+	unsigned int		offset;
-+	unsigned int		file_size:32;
-+	unsigned short		block_list[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+	unsigned int		file_size:19;
-+	unsigned int		offset:13;
-+	unsigned int		mtime;
-+	unsigned int		start_block:24;
-+} __attribute__  ((packed));
-+
-+struct squashfs_ldir_inode_header_2 {
-+	unsigned int		inode_type:4;
-+	unsigned int		mode:12; /* protection */
-+	unsigned int		uid:8; /* index into uid table */
-+	unsigned int		guid:8; /* index into guid table */
-+	unsigned int		file_size:27;
-+	unsigned int		offset:13;
-+	unsigned int		mtime;
-+	unsigned int		start_block:24;
-+	unsigned int		i_count:16;
-+	struct squashfs_dir_index_2	index[0];
-+} __attribute__  ((packed));
-+
-+union squashfs_inode_header_2 {
-+	struct squashfs_base_inode_header_2	base;
-+	struct squashfs_dev_inode_header_2	dev;
-+	struct squashfs_symlink_inode_header_2	symlink;
-+	struct squashfs_reg_inode_header_2	reg;
-+	struct squashfs_dir_inode_header_2	dir;
-+	struct squashfs_ldir_inode_header_2	ldir;
-+	struct squashfs_ipc_inode_header_2	ipc;
-+};
-+	
-+struct squashfs_dir_header_2 {
-+	unsigned int		count:8;
-+	unsigned int		start_block:24;
-+} __attribute__ ((packed));
-+
-+struct squashfs_dir_entry_2 {
-+	unsigned int		offset:13;
-+	unsigned int		type:3;
-+	unsigned int		size:8;
-+	char			name[0];
-+} __attribute__ ((packed));
-+
-+struct squashfs_fragment_entry_2 {
-+	unsigned int		start_block;
-+	unsigned int		size;
-+} __attribute__ ((packed));
-+
-+#define SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
-+	SQUASHFS_MEMSET(s, d, n);\
-+	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
-+	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
-+	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
-+	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
-+
-+#define SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, n) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
-+}
-+
-+#define SQUASHFS_SWAP_IPC_INODE_HEADER_2(s, d) \
-+	SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, sizeof(struct squashfs_ipc_inode_header_2))
-+
-+#define SQUASHFS_SWAP_DEV_INODE_HEADER_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+			sizeof(struct squashfs_dev_inode_header_2)); \
-+	SQUASHFS_SWAP((s)->rdev, d, 32, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+			sizeof(struct squashfs_symlink_inode_header_2));\
-+	SQUASHFS_SWAP((s)->symlink_size, d, 32, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_REG_INODE_HEADER_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+			sizeof(struct squashfs_reg_inode_header_2));\
-+	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 64, 32);\
-+	SQUASHFS_SWAP((s)->fragment, d, 96, 32);\
-+	SQUASHFS_SWAP((s)->offset, d, 128, 32);\
-+	SQUASHFS_SWAP((s)->file_size, d, 160, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INODE_HEADER_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+			sizeof(struct squashfs_dir_inode_header_2));\
-+	SQUASHFS_SWAP((s)->file_size, d, 32, 19);\
-+	SQUASHFS_SWAP((s)->offset, d, 51, 13);\
-+	SQUASHFS_SWAP((s)->mtime, d, 64, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 96, 24);\
-+}
-+
-+#define SQUASHFS_SWAP_LDIR_INODE_HEADER_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
-+			sizeof(struct squashfs_ldir_inode_header_2));\
-+	SQUASHFS_SWAP((s)->file_size, d, 32, 27);\
-+	SQUASHFS_SWAP((s)->offset, d, 59, 13);\
-+	SQUASHFS_SWAP((s)->mtime, d, 72, 32);\
-+	SQUASHFS_SWAP((s)->start_block, d, 104, 24);\
-+	SQUASHFS_SWAP((s)->i_count, d, 128, 16);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_INDEX_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_2));\
-+	SQUASHFS_SWAP((s)->index, d, 0, 27);\
-+	SQUASHFS_SWAP((s)->start_block, d, 27, 29);\
-+	SQUASHFS_SWAP((s)->size, d, 56, 8);\
-+}
-+#define SQUASHFS_SWAP_DIR_HEADER_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_2));\
-+	SQUASHFS_SWAP((s)->count, d, 0, 8);\
-+	SQUASHFS_SWAP((s)->start_block, d, 8, 24);\
-+}
-+
-+#define SQUASHFS_SWAP_DIR_ENTRY_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_2));\
-+	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
-+	SQUASHFS_SWAP((s)->type, d, 13, 3);\
-+	SQUASHFS_SWAP((s)->size, d, 16, 8);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_ENTRY_2(s, d) {\
-+	SQUASHFS_SWAP_START\
-+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_2));\
-+	SQUASHFS_SWAP((s)->start_block, d, 0, 32);\
-+	SQUASHFS_SWAP((s)->size, d, 32, 32);\
-+}
-+
-+#define SQUASHFS_SWAP_FRAGMENT_INDEXES_2(s, d, n) SQUASHFS_SWAP_INTS(s, d, n)
-+
-+/* fragment and fragment table defines */
-+#define SQUASHFS_FRAGMENT_BYTES_2(A)	(A * sizeof(struct squashfs_fragment_entry_2))
-+
-+#define SQUASHFS_FRAGMENT_INDEX_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) / \
-+					SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_OFFSET_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) % \
-+						SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEXES_2(A)	((SQUASHFS_FRAGMENT_BYTES_2(A) + \
-+					SQUASHFS_METADATA_SIZE - 1) / \
-+					SQUASHFS_METADATA_SIZE)
-+
-+#define SQUASHFS_FRAGMENT_INDEX_BYTES_2(A)	(SQUASHFS_FRAGMENT_INDEXES_2(A) *\
-+						sizeof(int))
-+
-+#endif
-+
-+#ifdef __KERNEL__
-+
-+/*
-+ * macros used to swap each structure entry, taking into account
-+ * bitfields and different bitfield placing conventions on differing
-+ * architectures
-+ */
-+
-+#include <asm/byteorder.h>
-+
-+#ifdef __BIG_ENDIAN
-+	/* convert from little endian to big endian */
-+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
-+		tbits, b_pos)
-+#else
-+	/* convert from big endian to little endian */ 
-+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
-+		tbits, 64 - tbits - b_pos)
-+#endif
-+
-+#define _SQUASHFS_SWAP(value, p, pos, tbits, SHIFT) {\
-+	b_pos = pos % 8;\
-+	val = 0;\
-+	s = (unsigned char *)p + (pos / 8);\
-+	d = ((unsigned char *) &val) + 7;\
-+	for(bits = 0; bits < (tbits + b_pos); bits += 8) \
-+		*d-- = *s++;\
-+	value = (val >> (SHIFT))/* & ((1 << tbits) - 1)*/;\
-+}
-+
-+#define SQUASHFS_MEMSET(s, d, n)	memset(s, 0, n);
-+
-+#endif
-+#endif
---- /dev/null
-+++ b/include/linux/squashfs_fs_i.h
-@@ -0,0 +1,45 @@
-+#ifndef SQUASHFS_FS_I
-+#define SQUASHFS_FS_I
-+/*
-+ * Squashfs
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs_fs_i.h
-+ */
-+
-+struct squashfs_inode_info {
-+	long long	start_block;
-+	unsigned int	offset;
-+	union {
-+		struct {
-+			long long	fragment_start_block;
-+			unsigned int	fragment_size;
-+			unsigned int	fragment_offset;
-+			long long	block_list_start;
-+		} s1;
-+		struct {
-+			long long	directory_index_start;
-+			unsigned int	directory_index_offset;
-+			unsigned int	directory_index_count;
-+			unsigned int	parent_inode;
-+		} s2;
-+	} u;
-+	struct inode	vfs_inode;
-+};
-+#endif
---- /dev/null
-+++ b/include/linux/squashfs_fs_sb.h
-@@ -0,0 +1,74 @@
-+#ifndef SQUASHFS_FS_SB
-+#define SQUASHFS_FS_SB
-+/*
-+ * Squashfs
-+ *
-+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
-+ * Phillip Lougher <phillip@lougher.org.uk>
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version 2,
-+ * or (at your option) any later version.
-+ *
-+ * This program is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-+ * GNU General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU General Public License
-+ * along with this program; if not, write to the Free Software
-+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-+ *
-+ * squashfs_fs_sb.h
-+ */
-+
-+#include <linux/squashfs_fs.h>
-+
-+struct squashfs_cache {
-+	long long	block;
-+	int		length;
-+	long long	next_index;
-+	char		*data;
-+};
-+
-+struct squashfs_fragment_cache {
-+	long long	block;
-+	int		length;
-+	unsigned int	locked;
-+	char		*data;
-+};
-+
-+struct squashfs_sb_info {
-+	struct squashfs_super_block	sblk;
-+	int			devblksize;
-+	int			devblksize_log2;
-+	int			swap;
-+	struct squashfs_cache	*block_cache;
-+	struct squashfs_fragment_cache	*fragment;
-+	int			next_cache;
-+	int			next_fragment;
-+	int			next_meta_index;
-+	unsigned int		*uid;
-+	unsigned int		*guid;
-+	long long		*fragment_index;
-+	unsigned int		*fragment_index_2;
-+	char			*read_page;
-+	struct mutex		read_data_mutex;
-+	struct mutex		read_page_mutex;
-+	struct mutex		block_cache_mutex;
-+	struct mutex		fragment_mutex;
-+	struct mutex		meta_index_mutex;
-+	wait_queue_head_t	waitq;
-+	wait_queue_head_t	fragment_wait_queue;
-+	struct meta_index	*meta_index;
-+	z_stream		stream;
-+	long long		*inode_lookup_table;
-+	int			(*read_inode)(struct inode *i,  squashfs_inode_t \
-+				inode);
-+	long long		(*read_blocklist)(struct inode *inode, int \
-+				index, int readahead_blks, char *block_list, \
-+				unsigned short **block_p, unsigned int *bsize);
-+	int			(*read_fragment_index_table)(struct super_block *s);
-+};
-+#endif
---- a/init/do_mounts_rd.c
-+++ b/init/do_mounts_rd.c
-@@ -5,6 +5,7 @@
- #include <linux/ext2_fs.h>
- #include <linux/romfs_fs.h>
- #include <linux/cramfs_fs.h>
-+#include <linux/squashfs_fs.h>
- #include <linux/initrd.h>
- #include <linux/string.h>
- 
-@@ -39,6 +40,7 @@ static int __init crd_load(int in_fd, in
-  * numbers could not be found.
-  *
-  * We currently check for the following magic numbers:
-+ *      squashfs
-  * 	minix
-  * 	ext2
-  *	romfs
-@@ -53,6 +55,7 @@ identify_ramdisk_image(int fd, int start
- 	struct ext2_super_block *ext2sb;
- 	struct romfs_super_block *romfsb;
- 	struct cramfs_super *cramfsb;
-+	struct squashfs_super_block *squashfsb;
- 	int nblocks = -1;
- 	unsigned char *buf;
- 
-@@ -64,6 +67,7 @@ identify_ramdisk_image(int fd, int start
- 	ext2sb = (struct ext2_super_block *) buf;
- 	romfsb = (struct romfs_super_block *) buf;
- 	cramfsb = (struct cramfs_super *) buf;
-+	squashfsb = (struct squashfs_super_block *) buf;
- 	memset(buf, 0xe5, size);
- 
- 	/*
-@@ -101,6 +105,18 @@ identify_ramdisk_image(int fd, int start
- 		goto done;
- 	}
- 
-+	/* squashfs is at block zero too */
-+	if (squashfsb->s_magic == SQUASHFS_MAGIC) {
-+		printk(KERN_NOTICE
-+		       "RAMDISK: squashfs filesystem found at block %d\n",
-+		       start_block);
-+		if (squashfsb->s_major < 3)
-+			nblocks = (squashfsb->bytes_used_2+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
-+		else
-+			nblocks = (squashfsb->bytes_used+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
-+		goto done;
-+	}
-+
- 	/*
- 	 * Read block 1 to test for minix and ext2 superblock
- 	 */

Deleted: hardened/2.6/trunk/2.6.23/4405_alpha-sysctl-uac.patch
===================================================================
--- hardened/2.6/trunk/2.6.23/4405_alpha-sysctl-uac.patch	2008-03-18 12:35:18 UTC (rev 1269)
+++ hardened/2.6/trunk/2.6.23/4405_alpha-sysctl-uac.patch	2008-03-22 18:37:36 UTC (rev 1270)
@@ -1,187 +0,0 @@
----
- arch/alpha/Kconfig        |   26 ++++++++++++++++++++++++
- arch/alpha/kernel/traps.c |   49 ++++++++++++++++++++++++++++++++++++++++++++++
- include/linux/sysctl.h    |   14 +++++++++++++
- kernel/sysctl.c           |   12 ++++++++++-
- 4 files changed, 100 insertions(+), 1 deletion(-)
-
---- a/arch/alpha/Kconfig
-+++ b/arch/alpha/Kconfig
-@@ -616,6 +616,32 @@ config VERBOSE_MCHECK_ON
- 
- 	  Take the default (1) unless you want more control or more info.
- 
-+config ALPHA_UAC_SYSCTL
-+	bool "Configure UAC policy via sysctl"
-+	depends on SYSCTL
-+	default y
-+	---help---
-+	  Configuring the UAC (unaligned access control) policy on a Linux
-+	  system usually involves setting a compile time define. If you say
-+	  Y here, you will be able to modify the UAC policy at runtime using
-+	  the /proc interface.
-+
-+	  The UAC policy defines the action Linux should take when an
-+	  unaligned memory access occurs. The action can include printing a
-+	  warning message (NOPRINT), sending a signal to the offending
-+	  program to help developers debug their applications (SIGBUS), or
-+	  disabling the transparent fixing (NOFIX).
-+
-+	  The sysctls will be initialized to the compile-time defined UAC
-+	  policy. You can change these manually, or with the sysctl(8)
-+	  userspace utility.
-+
-+	  To disable the warning messages at runtime, you would use
-+
-+	    echo 1 > /proc/sys/kernel/uac/noprint
-+
-+	  This is pretty harmless. Say Y if you're not sure.
-+
- source "drivers/pci/Kconfig"
- source "drivers/eisa/Kconfig"
- 
---- a/arch/alpha/kernel/traps.c
-+++ b/arch/alpha/kernel/traps.c
-@@ -14,6 +14,7 @@
- #include <linux/delay.h>
- #include <linux/smp_lock.h>
- #include <linux/module.h>
-+#include <linux/sysctl.h>
- #include <linux/init.h>
- #include <linux/kallsyms.h>
- 
-@@ -102,6 +103,38 @@ static char * ireg_name[] = {"v0", "t0",
- 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
- #endif
- 
-+#ifdef CONFIG_ALPHA_UAC_SYSCTL
-+static struct ctl_table_header *uac_sysctl_header;
-+
-+static int enabled_noprint = 0;
-+static int enabled_sigbus = 0;
-+static int enabled_nofix = 0;
-+
-+ctl_table uac_table[] = {
-+	{KERN_UAC_NOPRINT, "noprint", &enabled_noprint, sizeof (int), 0644, NULL, NULL, &proc_dointvec},
-+	{KERN_UAC_SIGBUS, "sigbus", &enabled_sigbus, sizeof (int), 0644, NULL, NULL, &proc_dointvec},
-+	{KERN_UAC_NOFIX, "nofix", &enabled_nofix, sizeof (int), 0644, NULL, NULL, &proc_dointvec},
-+        {0}
-+};
-+
-+static int __init init_uac_sysctl(void)
-+{
-+	/* Initialize sysctls with the #defined UAC policy */
-+	enabled_noprint = (test_thread_flag (TIF_UAC_NOPRINT)) ? 1 : 0;
-+	enabled_sigbus = (test_thread_flag (TIF_UAC_SIGBUS)) ? 1 : 0;
-+	enabled_nofix = (test_thread_flag (TIF_UAC_NOFIX)) ? 1 : 0;
-+
-+	/* save this for later so we can clean up */
-+	uac_sysctl_header = register_sysctl_table(uac_table);
-+	return 0;
-+}
-+
-+static void __exit exit_uac_sysctl(void)
-+{
-+	unregister_sysctl_table(uac_sysctl_header);
-+}
-+#endif
-+
- static void
- dik_show_code(unsigned int *pc)
- {
-@@ -780,7 +813,11 @@ do_entUnaUser(void __user * va, unsigned
- 	/* Check the UAC bits to decide what the user wants us to do
- 	   with the unaliged access.  */
- 
-+#ifndef CONFIG_ALPHA_UAC_SYSCTL
- 	if (!test_thread_flag (TIF_UAC_NOPRINT)) {
-+#else  /* CONFIG_ALPHA_UAC_SYSCTL */
-+	if (!(enabled_noprint)) {
-+#endif /* CONFIG_ALPHA_UAC_SYSCTL */
- 		if (cnt >= 5 && jiffies - last_time > 5*HZ) {
- 			cnt = 0;
- 		}
-@@ -791,10 +828,18 @@ do_entUnaUser(void __user * va, unsigned
- 		}
- 		last_time = jiffies;
- 	}
-+#ifndef CONFIG_ALPHA_UAC_SYSCTL
- 	if (test_thread_flag (TIF_UAC_SIGBUS))
-+#else  /* CONFIG_ALPHA_UAC_SYSCTL */
-+	if (enabled_sigbus)
-+#endif /* CONFIG_ALPHA_UAC_SYSCTL */
- 		goto give_sigbus;
- 	/* Not sure why you'd want to use this, but... */
-+#ifndef CONFIG_ALPHA_UAC_SYSCTL
- 	if (test_thread_flag (TIF_UAC_NOFIX))
-+#else  /* CONFIG_ALPHA_UAC_SYSCTL */
-+	if (enabled_nofix)
-+#endif /* CONFIG_ALPHA_UAC_SYSCTL */
- 		return;
- 
- 	/* Don't bother reading ds in the access check since we already
-@@ -1089,3 +1134,7 @@ trap_init(void)
- 	wrent(entSys, 5);
- 	wrent(entDbg, 6);
- }
-+
-+#ifdef CONFIG_ALPHA_UAC_SYSCTL
-+__initcall(init_uac_sysctl);
-+#endif
---- a/include/linux/sysctl.h
-+++ b/include/linux/sysctl.h
-@@ -165,6 +165,9 @@ enum
- 	KERN_MAX_LOCK_DEPTH=74,
- 	KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
- 	KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
-+#ifdef CONFIG_ALPHA_UAC_SYSCTL
-+	KERN_UAC_POLICY=78,	/* int: Alpha unaligned access control policy flags */
-+#endif /* CONFIG_ALPHA_UAC_SYSCTL */
- };
- 
- 
-@@ -258,6 +261,17 @@ enum
- 	PTY_NR=2
- };
- 
-+#ifdef CONFIG_ALPHA_UAC_SYSCTL
-+/* /proc/sys/kernel/uac */
-+enum
-+{
-+	/* UAC policy on Alpha */
-+	KERN_UAC_NOPRINT=1,	/* int: printk() on unaligned access */
-+	KERN_UAC_SIGBUS=2,	/* int: send SIGBUS on unaligned access */
-+	KERN_UAC_NOFIX=3,	/* int: don't fix the unaligned access */
-+};
-+#endif /* CONFIG_ALPHA_UAC_SYSCTL */
-+
- /* /proc/sys/bus/isa */
- enum
- {
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -155,6 +155,9 @@ extern ctl_table pty_table[];
- #ifdef CONFIG_INOTIFY_USER
- extern ctl_table inotify_table[];
- #endif
-+#ifdef CONFIG_ALPHA_UAC_SYSCTL
-+extern ctl_table uac_table[];
-+#endif
- 
- #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
- int sysctl_legacy_va_layout;
-@@ -208,6 +211,14 @@ static ctl_table root_table[] = {
-  * NOTE: do not add new entries to this table unless you have read
-  * Documentation/sysctl/ctl_unnumbered.txt
-  */
-+#ifdef CONFIG_ALPHA_UAC_SYSCTL
-+	{
-+		.ctl_name	= KERN_UAC_POLICY,
-+		.procname	= "uac",
-+		.mode		= 0555,
-+		.child		= uac_table,
-+	},
-+#endif /* CONFIG_ALPHA_UAC_SYSCTL */
- 	{ .ctl_name = 0 }
- };
- 

Deleted: hardened/2.6/trunk/2.6.23/4450_grsec-2.1.11-2.6.23.9-200712101800.patch
===================================================================
--- hardened/2.6/trunk/2.6.23/4450_grsec-2.1.11-2.6.23.9-200712101800.patch	2008-03-18 12:35:18 UTC (rev 1269)
+++ hardened/2.6/trunk/2.6.23/4450_grsec-2.1.11-2.6.23.9-200712101800.patch	2008-03-22 18:37:36 UTC (rev 1270)
@@ -1,35106 +0,0 @@
---- a/arch/alpha/kernel/module.c
-+++ b/arch/alpha/kernel/module.c
-@@ -176,7 +176,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, 
- 
- 	/* The small sections were sorted to the end of the segment.
- 	   The following should definitely cover them.  */
--	gp = (u64)me->module_core + me->core_size - 0x8000;
-+	gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
- 	got = sechdrs[me->arch.gotsecindex].sh_addr;
- 
- 	for (i = 0; i < n; i++) {
---- a/arch/alpha/kernel/osf_sys.c
-+++ b/arch/alpha/kernel/osf_sys.c
-@@ -1288,6 +1288,10 @@ arch_get_unmapped_area(struct file *filp
- 	   merely specific addresses, but regions of memory -- perhaps
- 	   this feature should be incorporated into all ports?  */
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(current->mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
- 	if (addr) {
- 		addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
- 		if (addr != (unsigned long) -ENOMEM)
-@@ -1295,8 +1299,8 @@ arch_get_unmapped_area(struct file *filp
- 	}
- 
- 	/* Next, try allocating at TASK_UNMAPPED_BASE.  */
--	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
--					 len, limit);
-+	addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
-+
- 	if (addr != (unsigned long) -ENOMEM)
- 		return addr;
- 
---- a/arch/alpha/kernel/ptrace.c
-+++ b/arch/alpha/kernel/ptrace.c
-@@ -15,6 +15,7 @@
- #include <linux/slab.h>
- #include <linux/security.h>
- #include <linux/signal.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
-@@ -283,6 +284,11 @@ do_sys_ptrace(long request, long pid, lo
- 		goto out_notsk;
- 	}
- 
-+	if (gr_handle_ptrace(child, request)) {
-+		ret = -EPERM;
-+		goto out;
-+	}
-+
- 	if (request == PTRACE_ATTACH) {
- 		ret = ptrace_attach(child);
- 		goto out;
---- a/arch/alpha/mm/fault.c
-+++ b/arch/alpha/mm/fault.c
-@@ -23,6 +23,7 @@
- #include <linux/smp.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/system.h>
- #include <asm/uaccess.h>
-@@ -54,6 +55,124 @@ __load_new_mm_context(struct mm_struct *
- 	__reload_thread(pcb);
- }
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->pc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when patched PLT trampoline was detected
-+ *         3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+	int err;
-+
-+	do { /* PaX: patched PLT emulation #1 */
-+		unsigned int ldah, ldq, jmp;
-+
-+		err = get_user(ldah, (unsigned int *)regs->pc);
-+		err |= get_user(ldq, (unsigned int *)(regs->pc+4));
-+		err |= get_user(jmp, (unsigned int *)(regs->pc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
-+		    (ldq & 0xFFFF0000U) == 0xA77B0000U &&
-+		    jmp == 0x6BFB0000U)
-+		{
-+			unsigned long r27, addr;
-+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
-+			unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
-+
-+			addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
-+			err = get_user(r27, (unsigned long *)addr);
-+			if (err)
-+				break;
-+
-+			regs->r27 = r27;
-+			regs->pc = r27;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #2 */
-+		unsigned int ldah, lda, br;
-+
-+		err = get_user(ldah, (unsigned int *)regs->pc);
-+		err |= get_user(lda, (unsigned int *)(regs->pc+4));
-+		err |= get_user(br, (unsigned int *)(regs->pc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
-+		    (lda & 0xFFFF0000U) == 0xA77B0000U &&
-+		    (br & 0xFFE00000U) == 0xC3E00000U)
-+		{
-+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
-+			unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
-+			unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
-+
-+			regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
-+			regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation */
-+		unsigned int br;
-+
-+		err = get_user(br, (unsigned int *)regs->pc);
-+
-+		if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
-+			unsigned int br2, ldq, nop, jmp;
-+			unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
-+
-+			addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
-+			err = get_user(br2, (unsigned int *)addr);
-+			err |= get_user(ldq, (unsigned int *)(addr+4));
-+			err |= get_user(nop, (unsigned int *)(addr+8));
-+			err |= get_user(jmp, (unsigned int *)(addr+12));
-+			err |= get_user(resolver, (unsigned long *)(addr+16));
-+
-+			if (err)
-+				break;
-+
-+			if (br2 == 0xC3600000U &&
-+			    ldq == 0xA77B000CU &&
-+			    nop == 0x47FF041FU &&
-+			    jmp == 0x6B7B0000U)
-+			{
-+				regs->r28 = regs->pc+4;
-+				regs->r27 = addr+16;
-+				regs->pc = resolver;
-+				return 3;
-+			}
-+		}
-+	} while (0);
-+#endif
-+
-+	return 1;
-+}
-+
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
- 
- /*
-  * This routine handles page faults.  It determines the address,
-@@ -131,8 +250,29 @@ do_page_fault(unsigned long address, uns
-  good_area:
- 	si_code = SEGV_ACCERR;
- 	if (cause < 0) {
--		if (!(vma->vm_flags & VM_EXEC))
-+		if (!(vma->vm_flags & VM_EXEC)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+			if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
-+				goto bad_area;
-+
-+			up_read(&mm->mmap_sem);
-+			switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+			case 2:
-+			case 3:
-+				return;
-+#endif
-+
-+			}
-+			pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
-+			do_exit(SIGKILL);
-+#else
- 			goto bad_area;
-+#endif
-+
-+		}
- 	} else if (!cause) {
- 		/* Allow reads even for write-only mappings */
- 		if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
---- a/arch/arm/mm/mmap.c
-+++ b/arch/arm/mm/mmap.c
-@@ -60,6 +60,10 @@ arch_get_unmapped_area(struct file *filp
- 	if (len > TASK_SIZE)
- 		return -ENOMEM;
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
- 	if (addr) {
- 		if (do_align)
- 			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -72,10 +76,10 @@ arch_get_unmapped_area(struct file *filp
- 			return addr;
- 	}
- 	if (len > mm->cached_hole_size) {
--	        start_addr = addr = mm->free_area_cache;
-+		start_addr = addr = mm->free_area_cache;
- 	} else {
--	        start_addr = addr = TASK_UNMAPPED_BASE;
--	        mm->cached_hole_size = 0;
-+		start_addr = addr = mm->mmap_base;
-+		mm->cached_hole_size = 0;
- 	}
- 
- full_search:
-@@ -91,8 +95,8 @@ full_search:
- 			 * Start a new search - just in case we missed
- 			 * some holes.
- 			 */
--			if (start_addr != TASK_UNMAPPED_BASE) {
--				start_addr = addr = TASK_UNMAPPED_BASE;
-+			if (start_addr != mm->mmap_base) {
-+				start_addr = addr = mm->mmap_base;
- 				mm->cached_hole_size = 0;
- 				goto full_search;
- 			}
---- a/arch/avr32/mm/fault.c
-+++ b/arch/avr32/mm/fault.c
-@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
- 
- int exception_trace = 1;
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 20; i++) {
-+		unsigned char c;
-+		if (get_user(c, (unsigned char *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%02x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- /*
-  * This routine handles page faults. It determines the address and the
-  * problem, and then passes it off to one of the appropriate routines.
-@@ -157,6 +174,16 @@ bad_area:
- 	up_read(&mm->mmap_sem);
- 
- 	if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+			if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
-+				pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
-+				do_exit(SIGKILL);
-+			}
-+		}
-+#endif
-+
- 		if (exception_trace && printk_ratelimit())
- 			printk("%s%s[%d]: segfault at %08lx pc %08lx "
- 			       "sp %08lx ecr %lu\n",
---- a/arch/i386/boot/bitops.h
-+++ b/arch/i386/boot/bitops.h
-@@ -28,7 +28,7 @@ static inline int variable_test_bit(int 
- 	u8 v;
- 	const u32 *p = (const u32 *)addr;
- 
--	asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
-+	asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
- 	return v;
- }
- 
-@@ -39,7 +39,7 @@ static inline int variable_test_bit(int 
- 
- static inline void set_bit(int nr, void *addr)
- {
--	asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
-+	asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
- }
- 
- #endif /* BOOT_BITOPS_H */
---- a/arch/i386/boot/boot.h
-+++ b/arch/i386/boot/boot.h
-@@ -78,7 +78,7 @@ static inline void io_delay(void)
- static inline u16 ds(void)
- {
- 	u16 seg;
--	asm("movw %%ds,%0" : "=rm" (seg));
-+	asm volatile("movw %%ds,%0" : "=rm" (seg));
- 	return seg;
- }
- 
-@@ -174,7 +174,7 @@ static inline void wrgs32(u32 v, addr_t 
- static inline int memcmp(const void *s1, const void *s2, size_t len)
- {
- 	u8 diff;
--	asm("repe; cmpsb; setnz %0"
-+	asm volatile("repe; cmpsb; setnz %0"
- 	    : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
- 	return diff;
- }
---- a/arch/i386/boot/compressed/head.S
-+++ b/arch/i386/boot/compressed/head.S
-@@ -159,9 +159,8 @@ relocated:
-  */
- 
- 1:	subl $4, %edi
--	movl 0(%edi), %ecx
--	testl %ecx, %ecx
--	jz 2f
-+	movl (%edi), %ecx
-+	jecxz 2f
- 	addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
- 	jmp 1b
- 2:
---- a/arch/i386/boot/compressed/relocs.c
-+++ b/arch/i386/boot/compressed/relocs.c
-@@ -10,9 +10,13 @@
- #define USE_BSD
- #include <endian.h>
- 
-+#include "../../../../include/linux/autoconf.h"
-+
-+#define MAX_PHDRS 100
- #define MAX_SHDRS 100
- #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
- static Elf32_Ehdr ehdr;
-+static Elf32_Phdr phdr[MAX_PHDRS];
- static Elf32_Shdr shdr[MAX_SHDRS];
- static Elf32_Sym  *symtab[MAX_SHDRS];
- static Elf32_Rel  *reltab[MAX_SHDRS];
-@@ -246,6 +250,34 @@ static void read_ehdr(FILE *fp)
- 	}
- }
- 
-+static void read_phdrs(FILE *fp)
-+{
-+	int i;
-+	if (ehdr.e_phnum > MAX_PHDRS) {
-+		die("%d program headers supported: %d\n",
-+			ehdr.e_phnum, MAX_PHDRS);
-+	}
-+	if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
-+		die("Seek to %d failed: %s\n",
-+			ehdr.e_phoff, strerror(errno));
-+	}
-+	if (fread(&phdr, sizeof(phdr[0]), ehdr.e_phnum, fp) != ehdr.e_phnum) {
-+		die("Cannot read ELF program headers: %s\n",
-+			strerror(errno));
-+	}
-+	for(i = 0; i < ehdr.e_phnum; i++) {
-+		phdr[i].p_type      = elf32_to_cpu(phdr[i].p_type);
-+		phdr[i].p_offset    = elf32_to_cpu(phdr[i].p_offset);
-+		phdr[i].p_vaddr     = elf32_to_cpu(phdr[i].p_vaddr);
-+		phdr[i].p_paddr     = elf32_to_cpu(phdr[i].p_paddr);
-+		phdr[i].p_filesz    = elf32_to_cpu(phdr[i].p_filesz);
-+		phdr[i].p_memsz     = elf32_to_cpu(phdr[i].p_memsz);
-+		phdr[i].p_flags     = elf32_to_cpu(phdr[i].p_flags);
-+		phdr[i].p_align     = elf32_to_cpu(phdr[i].p_align);
-+	}
-+
-+}
-+
- static void read_shdrs(FILE *fp)
- {
- 	int i;
-@@ -332,6 +364,8 @@ static void read_symtabs(FILE *fp)
- static void read_relocs(FILE *fp)
- {
- 	int i,j;
-+	uint32_t base;
-+
- 	for(i = 0; i < ehdr.e_shnum; i++) {
- 		if (shdr[i].sh_type != SHT_REL) {
- 			continue;
-@@ -349,8 +383,17 @@ static void read_relocs(FILE *fp)
- 			die("Cannot read symbol table: %s\n",
- 				strerror(errno));
- 		}
-+		base = 0;
-+		for (j = 0; j < ehdr.e_phnum; j++) {
-+			if (phdr[j].p_type != PT_LOAD )
-+				continue;
-+			if (shdr[shdr[i].sh_info].sh_offset < phdr[j].p_offset || shdr[shdr[i].sh_info].sh_offset > phdr[j].p_offset + phdr[j].p_filesz)
-+				continue;
-+			base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
-+			break;
-+		}
- 		for(j = 0; j < shdr[i].sh_size/sizeof(reltab[0][0]); j++) {
--			reltab[i][j].r_offset = elf32_to_cpu(reltab[i][j].r_offset);
-+			reltab[i][j].r_offset = elf32_to_cpu(reltab[i][j].r_offset) + base;
- 			reltab[i][j].r_info   = elf32_to_cpu(reltab[i][j].r_info);
- 		}
- 	}
-@@ -487,6 +530,27 @@ static void walk_relocs(void (*visit)(El
- 			if (sym->st_shndx == SHN_ABS) {
- 				continue;
- 			}
-+			/* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
-+			if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strncmp(sym_name(sym_strtab, sym), "__per_cpu_", 10)) {
-+				continue;
-+			}
-+#ifdef CONFIG_PAX_KERNEXEC
-+			/* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
-+			if (!strcmp(sec_name(sym->st_shndx), ".init.text")) {
-+				continue;
-+			}
-+			if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) {
-+				continue;
-+			}
-+			if (!strcmp(sec_name(sym->st_shndx), ".text.head"))
-+				if (strcmp(sym_name(sym_strtab, sym), "__init_end") &&
-+				    strcmp(sym_name(sym_strtab, sym), "KERNEL_TEXT_OFFSET")) {
-+				continue;
-+			}
-+			if (!strcmp(sec_name(sym->st_shndx), ".text")) {
-+				continue;
-+			}
-+#endif
- 			if (r_type == R_386_PC32) {
- 				/* PC relative relocations don't need to be adjusted */
- 			}
-@@ -614,6 +678,7 @@ int main(int argc, char **argv)
- 			fname, strerror(errno));
- 	}
- 	read_ehdr(fp);
-+	read_phdrs(fp);
- 	read_shdrs(fp);
- 	read_strtabs(fp);
- 	read_symtabs(fp);
---- a/arch/i386/boot/cpucheck.c
-+++ b/arch/i386/boot/cpucheck.c
-@@ -90,7 +90,7 @@ static int has_fpu(void)
- 	u16 fcw = -1, fsw = -1;
- 	u32 cr0;
- 
--	asm("movl %%cr0,%0" : "=r" (cr0));
-+	asm volatile("movl %%cr0,%0" : "=r" (cr0));
- 	if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
- 		cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
- 		asm volatile("movl %0,%%cr0" : : "r" (cr0));
-@@ -106,7 +106,7 @@ static int has_eflag(u32 mask)
- {
- 	u32 f0, f1;
- 
--	asm("pushfl ; "
-+	asm volatile("pushfl ; "
- 	    "pushfl ; "
- 	    "popl %0 ; "
- 	    "movl %0,%1 ; "
-@@ -131,7 +131,7 @@ static void get_flags(void)
- 		set_bit(X86_FEATURE_FPU, cpu.flags);
- 
- 	if (has_eflag(X86_EFLAGS_ID)) {
--		asm("cpuid"
-+		asm volatile("cpuid"
- 		    : "=a" (max_intel_level),
- 		      "=b" (cpu_vendor[0]),
- 		      "=d" (cpu_vendor[1]),
-@@ -140,7 +140,7 @@ static void get_flags(void)
- 
- 		if (max_intel_level >= 0x00000001 &&
- 		    max_intel_level <= 0x0000ffff) {
--			asm("cpuid"
-+			asm volatile("cpuid"
- 			    : "=a" (tfms),
- 			      "=c" (cpu.flags[4]),
- 			      "=d" (cpu.flags[0])
-@@ -152,7 +152,7 @@ static void get_flags(void)
- 				cpu.model += ((tfms >> 16) & 0xf) << 4;
- 		}
- 
--		asm("cpuid"
-+		asm volatile("cpuid"
- 		    : "=a" (max_amd_level)
- 		    : "a" (0x80000000)
- 		    : "ebx", "ecx", "edx");
-@@ -160,7 +160,7 @@ static void get_flags(void)
- 		if (max_amd_level >= 0x80000001 &&
- 		    max_amd_level <= 0x8000ffff) {
- 			u32 eax = 0x80000001;
--			asm("cpuid"
-+			asm volatile("cpuid"
- 			    : "+a" (eax),
- 			      "=c" (cpu.flags[6]),
- 			      "=d" (cpu.flags[1])
-@@ -219,9 +219,9 @@ int check_cpu(int *cpu_level_ptr, int *r
- 		u32 ecx = MSR_K7_HWCR;
- 		u32 eax, edx;
- 
--		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+		asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
- 		eax &= ~(1 << 15);
--		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+		asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- 
- 		get_flags();	/* Make sure it really did something */
- 		err = check_flags();
-@@ -234,9 +234,9 @@ int check_cpu(int *cpu_level_ptr, int *r
- 		u32 ecx = MSR_VIA_FCR;
- 		u32 eax, edx;
- 
--		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+		asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
- 		eax |= (1<<1)|(1<<7);
--		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+		asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- 
- 		set_bit(X86_FEATURE_CX8, cpu.flags);
- 		err = check_flags();
-@@ -247,12 +247,12 @@ int check_cpu(int *cpu_level_ptr, int *r
- 		u32 eax, edx;
- 		u32 level = 1;
- 
--		asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
--		asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
--		asm("cpuid"
-+		asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
-+		asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
-+		asm volatile("cpuid"
- 		    : "+a" (level), "=d" (cpu.flags[0])
- 		    : : "ecx", "ebx");
--		asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
-+		asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- 
- 		err = check_flags();
- 	}
---- a/arch/i386/boot/edd.c
-+++ b/arch/i386/boot/edd.c
-@@ -78,7 +78,7 @@ static int get_edd_info(u8 devno, struct
- 	ax = 0x4100;
- 	bx = EDDMAGIC1;
- 	dx = devno;
--	asm("pushfl; stc; int $0x13; setc %%al; popfl"
-+	asm volatile("pushfl; stc; int $0x13; setc %%al; popfl"
- 	    : "+a" (ax), "+b" (bx), "=c" (cx), "+d" (dx)
- 	    : : "esi", "edi");
- 
-@@ -97,7 +97,7 @@ static int get_edd_info(u8 devno, struct
- 	ei->params.length = sizeof(ei->params);
- 	ax = 0x4800;
- 	dx = devno;
--	asm("pushfl; int $0x13; popfl"
-+	asm volatile("pushfl; int $0x13; popfl"
- 	    : "+a" (ax), "+d" (dx), "=m" (ei->params)
- 	    : "S" (&ei->params)
- 	    : "ebx", "ecx", "edi");
-@@ -108,7 +108,7 @@ static int get_edd_info(u8 devno, struct
- 	ax = 0x0800;
- 	dx = devno;
- 	di = 0;
--	asm("pushw %%es; "
-+	asm volatile("pushw %%es; "
- 	    "movw %%di,%%es; "
- 	    "pushfl; stc; int $0x13; setc %%al; popfl; "
- 	    "popw %%es"
---- a/arch/i386/boot/main.c
-+++ b/arch/i386/boot/main.c
-@@ -77,7 +77,7 @@ static void keyboard_set_repeat(void)
-  */
- static void query_ist(void)
- {
--	asm("int $0x15"
-+	asm volatile("int $0x15"
- 	    : "=a" (boot_params.ist_info.signature),
- 	      "=b" (boot_params.ist_info.command),
- 	      "=c" (boot_params.ist_info.event),
---- a/arch/i386/boot/mca.c
-+++ b/arch/i386/boot/mca.c
-@@ -21,7 +21,7 @@ int query_mca(void)
- 	u8 err;
- 	u16 es, bx, len;
- 
--	asm("pushw %%es ; "
-+	asm volatile("pushw %%es ; "
- 	    "int $0x15 ; "
- 	    "setc %0 ; "
- 	    "movw %%es, %1 ; "
---- a/arch/i386/boot/memory.c
-+++ b/arch/i386/boot/memory.c
-@@ -32,7 +32,7 @@ static int detect_memory_e820(void)
- 		/* Important: %edx is clobbered by some BIOSes,
- 		   so it must be either used for the error output
- 		   or explicitly marked clobbered. */
--		asm("int $0x15; setc %0"
-+		asm volatile("int $0x15; setc %0"
- 		    : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
- 		      "=m" (*desc)
- 		    : "D" (desc), "d" (SMAP), "a" (0xe820));
-@@ -64,7 +64,7 @@ static int detect_memory_e801(void)
- 
- 	bx = cx = dx = 0;
- 	ax = 0xe801;
--	asm("stc; int $0x15; setc %0"
-+	asm volatile("stc; int $0x15; setc %0"
- 	    : "=m" (err), "+a" (ax), "+b" (bx), "+c" (cx), "+d" (dx));
- 
- 	if (err)
-@@ -94,7 +94,7 @@ static int detect_memory_88(void)
- 	u8 err;
- 
- 	ax = 0x8800;
--	asm("stc; int $0x15; setc %0" : "=bcdm" (err), "+a" (ax));
-+	asm volatile("stc; int $0x15; setc %0" : "=bcdm" (err), "+a" (ax));
- 
- 	boot_params.screen_info.ext_mem_k = ax;
- 
---- a/arch/i386/boot/video.c
-+++ b/arch/i386/boot/video.c
-@@ -40,7 +40,7 @@ static void store_cursor_position(void)
- 
- 	ax = 0x0300;
- 	bx = 0;
--	asm(INT10
-+	asm volatile(INT10
- 	    : "=d" (curpos), "+a" (ax), "+b" (bx)
- 	    : : "ecx", "esi", "edi");
- 
-@@ -55,7 +55,7 @@ static void store_video_mode(void)
- 	/* N.B.: the saving of the video page here is a bit silly,
- 	   since we pretty much assume page 0 everywhere. */
- 	ax = 0x0f00;
--	asm(INT10
-+	asm volatile(INT10
- 	    : "+a" (ax), "=b" (page)
- 	    : : "ecx", "edx", "esi", "edi");
- 
---- a/arch/i386/boot/video-vesa.c
-+++ b/arch/i386/boot/video-vesa.c
-@@ -41,7 +41,7 @@ static int vesa_probe(void)
- 
- 	ax = 0x4f00;
- 	di = (size_t)&vginfo;
--	asm(INT10
-+	asm volatile(INT10
- 	    : "+a" (ax), "+D" (di), "=m" (vginfo)
- 	    : : "ebx", "ecx", "edx", "esi");
- 
-@@ -68,7 +68,7 @@ static int vesa_probe(void)
- 		ax = 0x4f01;
- 		cx = mode;
- 		di = (size_t)&vminfo;
--		asm(INT10
-+		asm volatile(INT10
- 		    : "+a" (ax), "+c" (cx), "+D" (di), "=m" (vminfo)
- 		    : : "ebx", "edx", "esi");
- 
-@@ -115,7 +115,7 @@ static int vesa_set_mode(struct mode_inf
- 	ax = 0x4f01;
- 	cx = vesa_mode;
- 	di = (size_t)&vminfo;
--	asm(INT10
-+	asm volatile(INT10
- 	    : "+a" (ax), "+c" (cx), "+D" (di), "=m" (vminfo)
- 	    : : "ebx", "edx", "esi");
- 
-@@ -193,19 +193,20 @@ static void vesa_dac_set_8bits(void)
- /* Save the VESA protected mode info */
- static void vesa_store_pm_info(void)
- {
--	u16 ax, bx, di, es;
-+	u16 ax, bx, cx, di, es;
- 
- 	ax = 0x4f0a;
--	bx = di = 0;
--	asm("pushw %%es; "INT10"; movw %%es,%0; popw %%es"
--	    : "=d" (es), "+a" (ax), "+b" (bx), "+D" (di)
--	    : : "ecx", "esi");
-+	bx = cx = di = 0;
-+	asm volatile("pushw %%es; "INT10"; movw %%es,%0; popw %%es"
-+	    : "=d" (es), "+a" (ax), "+b" (bx), "+c" (cx), "+D" (di)
-+	    : : "esi");
- 
- 	if (ax != 0x004f)
- 		return;
- 
- 	boot_params.screen_info.vesapm_seg = es;
- 	boot_params.screen_info.vesapm_off = di;
-+	boot_params.screen_info.vesapm_size = cx;
- }
- 
- /*
-@@ -259,7 +260,7 @@ void vesa_store_edid(void)
- 	/* Note: The VBE DDC spec is different from the main VESA spec;
- 	   we genuinely have to assume all registers are destroyed here. */
- 
--	asm("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
-+	asm volatile("pushw %%es; movw %2,%%es; "INT10"; popw %%es"
- 	    : "+a" (ax), "+b" (bx)
- 	    :  "c" (cx), "D" (di)
- 	    : "esi");
-@@ -275,7 +276,7 @@ void vesa_store_edid(void)
- 	cx = 0;			/* Controller 0 */
- 	dx = 0;			/* EDID block number */
- 	di =(size_t) &boot_params.edid_info; /* (ES:)Pointer to block */
--	asm(INT10
-+	asm volatile(INT10
- 	    : "+a" (ax), "+b" (bx), "+d" (dx), "=m" (boot_params.edid_info)
- 	    : "c" (cx), "D" (di)
- 	    : "esi");
---- a/arch/i386/boot/video-vga.c
-+++ b/arch/i386/boot/video-vga.c
-@@ -225,7 +225,7 @@ static int vga_probe(void)
- 	};
- 	u8 vga_flag;
- 
--	asm(INT10
-+	asm volatile(INT10
- 	    : "=b" (boot_params.screen_info.orig_video_ega_bx)
- 	    : "a" (0x1200), "b" (0x10) /* Check EGA/VGA */
- 	    : "ecx", "edx", "esi", "edi");
-@@ -233,7 +233,7 @@ static int vga_probe(void)
- 	/* If we have MDA/CGA/HGC then BL will be unchanged at 0x10 */
- 	if ((u8)boot_params.screen_info.orig_video_ega_bx != 0x10) {
- 		/* EGA/VGA */
--		asm(INT10
-+		asm volatile(INT10
- 		    : "=a" (vga_flag)
- 		    : "a" (0x1a00)
- 		    : "ebx", "ecx", "edx", "esi", "edi");
---- a/arch/i386/boot/voyager.c
-+++ b/arch/i386/boot/voyager.c
-@@ -27,7 +27,7 @@ int query_voyager(void)
- 
- 	data_ptr[0] = 0xff;	/* Flag on config not found(?) */
- 
--	asm("pushw %%es ; "
-+	asm volatile("pushw %%es ; "
- 	    "int $0x15 ; "
- 	    "setc %0 ; "
- 	    "movw %%es, %1 ; "
---- a/arch/i386/Kconfig
-+++ b/arch/i386/Kconfig
-@@ -592,7 +592,7 @@ config PAGE_OFFSET
- 	hex
- 	default 0xB0000000 if VMSPLIT_3G_OPT
- 	default 0x80000000 if VMSPLIT_2G
--	default 0x78000000 if VMSPLIT_2G_OPT
-+	default 0x70000000 if VMSPLIT_2G_OPT
- 	default 0x40000000 if VMSPLIT_1G
- 	default 0xC0000000
- 
-@@ -831,7 +831,7 @@ config CRASH_DUMP
- config PHYSICAL_START
- 	hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
- 	default "0x1000000" if X86_NUMAQ
--	default "0x100000"
-+	default "0x200000"
- 	help
- 	  This gives the physical address where the kernel is loaded.
- 
-@@ -916,7 +916,7 @@ config HOTPLUG_CPU
- 
- config COMPAT_VDSO
- 	bool "Compat VDSO support"
--	default y
-+	default n
- 	help
- 	  Map the VDSO to the predictable old-style address too.
- 	---help---
-@@ -1092,7 +1092,7 @@ config PCI
- choice
- 	prompt "PCI access mode"
- 	depends on PCI && !X86_VISWS
--	default PCI_GOANY
-+	default PCI_GODIRECT
- 	---help---
- 	  On PCI systems, the BIOS can be used to detect the PCI devices and
- 	  determine their configuration. However, some old PCI motherboards
---- a/arch/i386/Kconfig.cpu
-+++ b/arch/i386/Kconfig.cpu
-@@ -274,7 +274,7 @@ config X86_PPRO_FENCE
- 
- config X86_F00F_BUG
- 	bool
--	depends on M586MMX || M586TSC || M586 || M486 || M386
-+	depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
- 	default y
- 
- config X86_WP_WORKS_OK
-@@ -299,7 +299,7 @@ config X86_POPAD_OK
- 
- config X86_ALIGNMENT_16
- 	bool
--	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
-+	depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
- 	default y
- 
- config X86_GOOD_APIC
---- a/arch/i386/Kconfig.debug
-+++ b/arch/i386/Kconfig.debug
-@@ -46,16 +46,6 @@ config DEBUG_PAGEALLOC
- 	  This results in a large slowdown, but helps to find certain types
- 	  of memory corruptions.
- 
--config DEBUG_RODATA
--	bool "Write protect kernel read-only data structures"
--	depends on DEBUG_KERNEL
--	help
--	  Mark the kernel read-only data as write-protected in the pagetables,
--	  in order to catch accidental (and incorrect) writes to such const
--	  data. This option may have a slight performance impact because a
--	  portion of the kernel code won't be covered by a 2MB TLB anymore.
--	  If in doubt, say "N".
--
- config 4KSTACKS
- 	bool "Use 4Kb for kernel stacks instead of 8Kb"
- 	depends on DEBUG_KERNEL
---- a/arch/i386/kernel/acpi/boot.c
-+++ b/arch/i386/kernel/acpi/boot.c
-@@ -1123,7 +1123,7 @@ static struct dmi_system_id __initdata a
- 		     DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
- 		     },
- 	 },
--	{}
-+	{ NULL, NULL, {{0, NULL}}, NULL}
- };
- 
- #endif				/* __i386__ */
---- a/arch/i386/kernel/acpi/sleep.c
-+++ b/arch/i386/kernel/acpi/sleep.c
-@@ -98,7 +98,7 @@ static __initdata struct dmi_system_id a
- 		     DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
- 		     },
- 	 },
--	{}
-+	{ NULL, NULL, {{0, NULL}}, NULL}
- };
- 
- static int __init acpisleep_dmi_init(void)
---- a/arch/i386/kernel/acpi/wakeup.S
-+++ b/arch/i386/kernel/acpi/wakeup.S
-@@ -2,6 +2,7 @@
- #include <linux/linkage.h>
- #include <asm/segment.h>
- #include <asm/page.h>
-+#include <asm/msr-index.h>
- 
- #
- # wakeup_code runs in real mode, and at unknown address (determined at run-time).
-@@ -84,7 +85,7 @@ wakeup_code:
- 	# restore efer setting
- 	movl	real_save_efer_edx - wakeup_code, %edx
- 	movl	real_save_efer_eax - wakeup_code, %eax
--	mov     $0xc0000080, %ecx
-+	mov     $MSR_EFER, %ecx
- 	wrmsr
- 4:
- 	# make sure %cr4 is set correctly (features, etc)
-@@ -209,13 +210,11 @@ wakeup_pmode_return:
- 	# and restore the stack ... but you need gdt for this to work
- 	movl	saved_context_esp, %esp
- 
--	movl	%cs:saved_magic, %eax
--	cmpl	$0x12345678, %eax
-+	cmpl	$0x12345678, saved_magic
- 	jne	bogus_magic
- 
- 	# jump to place where we left off
--	movl	saved_eip,%eax
--	jmp	*%eax
-+	jmp	*(saved_eip)
- 
- bogus_magic:
- 	movw	$0x0e00 + 'B', 0xb8018
-@@ -247,7 +246,7 @@ ENTRY(acpi_copy_wakeup_routine)
- 	# save efer setting
- 	pushl	%eax
- 	movl	%eax, %ebx
--	mov     $0xc0000080, %ecx
-+	mov     $MSR_EFER, %ecx
- 	rdmsr
- 	movl	%edx, real_save_efer_edx - wakeup_start (%ebx)
- 	movl	%eax, real_save_efer_eax - wakeup_start (%ebx)
---- a/arch/i386/kernel/alternative.c
-+++ b/arch/i386/kernel/alternative.c
-@@ -443,7 +443,20 @@ void __init alternative_instructions(voi
-  */
- void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	addr += __KERNEL_TEXT_OFFSET;
- 	memcpy(addr, opcode, len);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	sync_core();
- 	/* Could also do a CLFLUSH here to speed up CPU recovery; but
- 	   that causes hangs on some VIA CPUs. */
---- a/arch/i386/kernel/apm.c
-+++ b/arch/i386/kernel/apm.c
-@@ -407,7 +407,7 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitq
- static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
- static struct apm_user *	user_list;
- static DEFINE_SPINLOCK(user_list_lock);
--static const struct desc_struct	bad_bios_desc = { 0, 0x00409200 };
-+static const struct desc_struct	bad_bios_desc = { 0, 0x00409300 };
- 
- static const char		driver_version[] = "1.16ac";	/* no spaces */
- 
-@@ -601,19 +601,42 @@ static u8 apm_bios_call(u32 func, u32 eb
- 	struct desc_struct	save_desc_40;
- 	struct desc_struct	*gdt;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long		cr0;
-+#endif
-+
- 	cpus = apm_save_cpus();
- 	
- 	cpu = get_cpu();
- 	gdt = get_cpu_gdt_table(cpu);
- 	save_desc_40 = gdt[0x40 / 8];
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	gdt[0x40 / 8] = bad_bios_desc;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	apm_irq_save(flags);
- 	APM_DO_SAVE_SEGS;
- 	apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
- 	APM_DO_RESTORE_SEGS;
- 	apm_irq_restore(flags);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	gdt[0x40 / 8] = save_desc_40;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	put_cpu();
- 	apm_restore_cpus(cpus);
- 	
-@@ -644,19 +667,42 @@ static u8 apm_bios_call_simple(u32 func,
- 	struct desc_struct	save_desc_40;
- 	struct desc_struct	*gdt;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long		cr0;
-+#endif
-+
- 	cpus = apm_save_cpus();
- 	
- 	cpu = get_cpu();
- 	gdt = get_cpu_gdt_table(cpu);
- 	save_desc_40 = gdt[0x40 / 8];
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	gdt[0x40 / 8] = bad_bios_desc;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	apm_irq_save(flags);
- 	APM_DO_SAVE_SEGS;
- 	error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
- 	APM_DO_RESTORE_SEGS;
- 	apm_irq_restore(flags);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	gdt[0x40 / 8] = save_desc_40;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	put_cpu();
- 	apm_restore_cpus(cpus);
- 	return error;
-@@ -924,7 +970,7 @@ recalc:
-  
- static void apm_power_off(void)
- {
--	unsigned char	po_bios_call[] = {
-+	const unsigned char	po_bios_call[] = {
- 		0xb8, 0x00, 0x10,	/* movw  $0x1000,ax  */
- 		0x8e, 0xd0,		/* movw  ax,ss       */
- 		0xbc, 0x00, 0xf0,	/* movw  $0xf000,sp  */
-@@ -1864,7 +1910,10 @@ static const struct file_operations apm_
- static struct miscdevice apm_device = {
- 	APM_MINOR_DEV,
- 	"apm_bios",
--	&apm_bios_fops
-+	&apm_bios_fops,
-+	{NULL, NULL},
-+	NULL,
-+	NULL
- };
- 
- 
-@@ -1974,210 +2023,210 @@ static struct dmi_system_id __initdata a
- 		print_if_true,
- 		KERN_WARNING "IBM T23 - BIOS 1.03b+ and controller firmware 1.02+ may be needed for Linux APM.",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
--			DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "1AET38WW (1.01b)"), }, NULL
- 	},
- 	{	/* Handle problems with APM on the C600 */
- 		broken_ps2_resume, "Dell Latitude C600",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), },
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C600"), }, NULL
- 	},
- 	{	/* Allow interrupts during suspend on Dell Latitude laptops*/
- 		set_apm_ints, "Dell Latitude",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), }
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude C510"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Dell Inspiron 2500",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
- 			DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
--			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
-+			DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, NULL
- 	},
- 	{	/* Allow interrupts during suspend on Dell Inspiron laptops*/
- 		set_apm_ints, "Dell Inspiron", {
- 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), },
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Inspiron 5000e */
- 		broken_apm_power, "Dell Inspiron 5000e",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "A04"),
--			DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "08/24/2000"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Inspiron 2500 */
- 		broken_apm_power, "Dell Inspiron 2500",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "A12"),
--			DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "02/04/2002"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Dell Dimension 4100",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"),
- 			DMI_MATCH(DMI_BIOS_VENDOR,"Intel Corp."),
--			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
-+			DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, NULL
- 	},
- 	{	/* Allow interrupts during suspend on Compaq Laptops*/
- 		set_apm_ints, "Compaq 12XL125",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Compaq PC"),
- 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
--			DMI_MATCH(DMI_BIOS_VERSION,"4.06"), },
-+			DMI_MATCH(DMI_BIOS_VERSION,"4.06"), }, NULL
- 	},
- 	{	/* Allow interrupts during APM or the clock goes slow */
- 		set_apm_ints, "ASUSTeK",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
--			DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), },
-+			DMI_MATCH(DMI_PRODUCT_NAME, "L8400K series Notebook PC"), }, NULL
- 	},
- 	{	/* APM blows on shutdown */
- 		apm_is_horked, "ABIT KX7-333[R]",
- 		{	DMI_MATCH(DMI_BOARD_VENDOR, "ABIT"),
--			DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), },
-+			DMI_MATCH(DMI_BOARD_NAME, "VT8367-8233A (KX7-333[R])"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Trigem Delhi3",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "TriGem Computer, Inc"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), },
-+			DMI_MATCH(DMI_PRODUCT_NAME, "Delhi3"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Fujitsu-Siemens",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "hoenix/FUJITSU SIEMENS"),
--			DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "Version1.01"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked_d850md, "Intel D850MD",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."),
--			DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "MV85010A.86A.0016.P07.0201251536"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Intel D810EMO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."),
--			DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "MO81010A.86A.0008.P04.0004170800"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Dell XPS-Z",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Intel Corp."),
- 			DMI_MATCH(DMI_BIOS_VERSION, "A11"),
--			DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), },
-+			DMI_MATCH(DMI_PRODUCT_NAME, "XPS-Z"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Sharp PC-PJ/AX",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "PC-PJ/AX"),
- 			DMI_MATCH(DMI_BIOS_VENDOR,"SystemSoft"),
--			DMI_MATCH(DMI_BIOS_VERSION,"Version R2.08"), },
-+			DMI_MATCH(DMI_BIOS_VERSION,"Version R2.08"), }, NULL
- 	},
- 	{	/* APM crashes */
- 		apm_is_horked, "Dell Inspiron 2500",
- 		{	DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
- 			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
- 			DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
--			DMI_MATCH(DMI_BIOS_VERSION,"A11"), },
-+			DMI_MATCH(DMI_BIOS_VERSION,"A11"), }, NULL
- 	},
- 	{	/* APM idle hangs */
- 		apm_likes_to_melt, "Jabil AMD",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
--			DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "0AASNP06"), }, NULL
- 	},
- 	{	/* APM idle hangs */
- 		apm_likes_to_melt, "AMI Bios",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
--			DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), },
-+			DMI_MATCH(DMI_BIOS_VERSION, "0AASNP05"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-N505X(DE) */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0206H"),
--			DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "08/23/99"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-N505VX */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "W2K06H0"),
--			DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "02/03/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-XG29 */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0117A0"),
--			DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "04/25/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-Z600NE */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0121Z1"),
--			DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "05/11/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-Z600NE */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "WME01Z1"),
--			DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "08/11/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-Z600LEK(DE) */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0206Z3"),
--			DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "12/25/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-Z505LS */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0203D0"),
--			DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "05/12/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-Z505LS */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0203Z3"),
--			DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "08/25/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-Z505LS (with updated BIOS) */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0209Z3"),
--			DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "05/12/01"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-F104K */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0204K2"),
--			DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "08/28/00"), }, NULL
- 	},
- 
- 	{	/* Handle problems with APM on Sony Vaio PCG-C1VN/C1VE */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0208P1"),
--			DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "11/09/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-C1VE */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "R0204P1"),
--			DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "09/12/00"), }, NULL
- 	},
- 	{	/* Handle problems with APM on Sony Vaio PCG-C1VE */
- 		swab_apm_power_in_minutes, "Sony VAIO",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- 			DMI_MATCH(DMI_BIOS_VERSION, "WXPO1Z3"),
--			DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "10/26/01"), }, NULL
- 	},
- 	{	/* broken PM poweroff bios */
- 		set_realmode_power_off, "Award Software v4.60 PGMA",
- 		{	DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
- 			DMI_MATCH(DMI_BIOS_VERSION, "4.60 PGMA"),
--			DMI_MATCH(DMI_BIOS_DATE, "134526184"), },
-+			DMI_MATCH(DMI_BIOS_DATE, "134526184"), }, NULL
- 	},
- 
- 	/* Generic per vendor APM settings  */
- 
- 	{	/* Allow interrupts during suspend on IBM laptops */
- 		set_apm_ints, "IBM",
--		{	DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
-+		{	DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, NULL
- 	},
- 
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL}
- };
- 
- /*
-@@ -2196,6 +2245,10 @@ static int __init apm_init(void)
- 	struct desc_struct *gdt;
- 	int err;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	dmi_check_system(apm_dmi_table);
- 
- 	if (apm_info.bios.version == 0 || paravirt_enabled()) {
-@@ -2271,9 +2324,18 @@ static int __init apm_init(void)
- 	 * This is for buggy BIOS's that refer to (real mode) segment 0x40
- 	 * even though they are called in protected mode.
- 	 */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
- 	_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	/*
- 	 * Set up the long jump entry point to the APM BIOS, which is called
- 	 * from inline assembly.
-@@ -2292,6 +2354,11 @@ static int __init apm_init(void)
- 	 * code to that CPU.
- 	 */
- 	gdt = get_cpu_gdt_table(0);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	set_base(gdt[APM_CS >> 3],
- 		 __va((unsigned long)apm_info.bios.cseg << 4));
- 	set_base(gdt[APM_CS_16 >> 3],
-@@ -2299,6 +2366,10 @@ static int __init apm_init(void)
- 	set_base(gdt[APM_DS >> 3],
- 		 __va((unsigned long)apm_info.bios.dseg << 4));
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	apm_proc = create_proc_entry("apm", 0, NULL);
- 	if (apm_proc)
- 		apm_proc->proc_fops = &apm_file_ops;
---- a/arch/i386/kernel/asm-offsets.c
-+++ b/arch/i386/kernel/asm-offsets.c
-@@ -109,6 +109,7 @@ void foo(void)
- 	DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
- 	DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
- 	DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
-+	DEFINE(PERCPU_MODULE_RESERVE, PERCPU_MODULE_RESERVE);
- 
- 	DEFINE(VDSO_PRELINK_asm, VDSO_PRELINK);
- 
-@@ -122,6 +123,7 @@ void foo(void)
- 	OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit);
- 	OFFSET(PARAVIRT_iret, paravirt_ops, iret);
- 	OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
-+	OFFSET(PARAVIRT_write_cr0, paravirt_ops, write_cr0);
- #endif
- 
- #ifdef CONFIG_XEN
---- a/arch/i386/kernel/cpu/common.c
-+++ b/arch/i386/kernel/cpu/common.c
-@@ -4,7 +4,6 @@
- #include <linux/smp.h>
- #include <linux/module.h>
- #include <linux/percpu.h>
--#include <linux/bootmem.h>
- #include <asm/semaphore.h>
- #include <asm/processor.h>
- #include <asm/i387.h>
-@@ -21,39 +20,15 @@
- 
- #include "cpu.h"
- 
--DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
--	[GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
--	[GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
--	[GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
--	[GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
--	/*
--	 * Segments used for calling PnP BIOS have byte granularity.
--	 * They code segments and data segments have fixed 64k limits,
--	 * the transfer segment sizes are set at run time.
--	 */
--	[GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
--	[GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
--	[GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
--	[GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
--	[GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
--	/*
--	 * The APM segments have byte granularity and their bases
--	 * are set at run time.  All have 64k limits.
--	 */
--	[GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
--	/* 16-bit code */
--	[GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
--	[GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
--
--	[GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
--	[GDT_ENTRY_PERCPU] = { 0x00000000, 0x00000000 },
--} };
--EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
--
- static int cachesize_override __cpuinitdata = -1;
- static int disable_x86_fxsr __cpuinitdata;
- static int disable_x86_serial_nr __cpuinitdata = 1;
--static int disable_x86_sep __cpuinitdata;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+int disable_x86_sep __cpuinitdata = 1;
-+#else
-+int disable_x86_sep __cpuinitdata;
-+#endif
- 
- struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
- 
-@@ -261,10 +236,10 @@ static int __cpuinit have_cpuid_p(void)
- void __init cpu_detect(struct cpuinfo_x86 *c)
- {
- 	/* Get vendor name */
--	cpuid(0x00000000, &c->cpuid_level,
--	      (int *)&c->x86_vendor_id[0],
--	      (int *)&c->x86_vendor_id[8],
--	      (int *)&c->x86_vendor_id[4]);
-+	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+	      (unsigned int *)&c->x86_vendor_id[0],
-+	      (unsigned int *)&c->x86_vendor_id[8],
-+	      (unsigned int *)&c->x86_vendor_id[4]);
- 
- 	c->x86 = 4;
- 	if (c->cpuid_level >= 0x00000001) {
-@@ -304,15 +279,14 @@ static void __init early_cpu_detect(void
- 
- static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
- {
--	u32 tfms, xlvl;
--	int ebx;
-+	u32 tfms, xlvl, ebx;
- 
- 	if (have_cpuid_p()) {
- 		/* Get vendor name */
--		cpuid(0x00000000, &c->cpuid_level,
--		      (int *)&c->x86_vendor_id[0],
--		      (int *)&c->x86_vendor_id[8],
--		      (int *)&c->x86_vendor_id[4]);
-+		cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
-+		      (unsigned int *)&c->x86_vendor_id[0],
-+		      (unsigned int *)&c->x86_vendor_id[8],
-+		      (unsigned int *)&c->x86_vendor_id[4]);
- 		
- 		get_cpu_vendor(c, 0);
- 		/* Initialize the standard set of capabilities */
-@@ -644,7 +618,7 @@ void switch_to_new_gdt(void)
- {
- 	struct Xgt_desc_struct gdt_descr;
- 
--	gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
-+	gdt_descr.address = get_cpu_gdt_table(smp_processor_id());
- 	gdt_descr.size = GDT_SIZE - 1;
- 	load_gdt(&gdt_descr);
- 	asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-@@ -660,7 +634,7 @@ void __cpuinit cpu_init(void)
- {
- 	int cpu = smp_processor_id();
- 	struct task_struct *curr = current;
--	struct tss_struct * t = &per_cpu(init_tss, cpu);
-+	struct tss_struct *t = init_tss + cpu;
- 	struct thread_struct *thread = &curr->thread;
- 
- 	if (cpu_test_and_set(cpu, cpu_initialized)) {
---- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
-+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
-@@ -549,7 +549,7 @@ static struct dmi_system_id sw_any_bug_d
- 			DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }
- };
- #endif
- 
---- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
-+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
-@@ -223,7 +223,7 @@ static struct cpu_model models[] =
- 	{ &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
- 	{ &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
- 
--	{ NULL, }
-+	{ NULL, NULL, 0, NULL}
- };
- #undef _BANIAS
- #undef BANIAS
---- a/arch/i386/kernel/cpu/intel_cacheinfo.c
-+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
-@@ -351,8 +351,8 @@ unsigned int __cpuinit init_intel_cachei
- 	 */
- 	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
- 		/* supports eax=2  call */
--		int i, j, n;
--		int regs[4];
-+		int j, n;
-+		unsigned int regs[4];
- 		unsigned char *dp = (unsigned char *)regs;
- 		int only_trace = 0;
- 
-@@ -367,7 +367,7 @@ unsigned int __cpuinit init_intel_cachei
- 
- 			/* If bit 31 is set, this is an unknown format */
- 			for ( j = 0 ; j < 3 ; j++ ) {
--				if ( regs[j] < 0 ) regs[j] = 0;
-+				if ( (int)regs[j] < 0 ) regs[j] = 0;
- 			}
- 
- 			/* Byte 0 is level count, not a descriptor */
---- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
-+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
-@@ -152,7 +152,7 @@ static __cpuinit int thermal_throttle_cp
- 	return NOTIFY_OK;
- }
- 
--static struct notifier_block thermal_throttle_cpu_notifier =
-+static __cpuinitdata struct notifier_block thermal_throttle_cpu_notifier =
- {
- 	.notifier_call = thermal_throttle_cpu_callback,
- };
---- a/arch/i386/kernel/cpu/mtrr/generic.c
-+++ b/arch/i386/kernel/cpu/mtrr/generic.c
-@@ -29,11 +29,11 @@ static struct fixed_range_block fixed_ra
- 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
- 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
- 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
--	{}
-+	{ 0, 0 }
- };
- 
- static unsigned long smp_changes_mask;
--static struct mtrr_state mtrr_state = {};
-+static struct mtrr_state mtrr_state;
- 
- #undef MODULE_PARAM_PREFIX
- #define MODULE_PARAM_PREFIX "mtrr."
---- a/arch/i386/kernel/crash.c
-+++ b/arch/i386/kernel/crash.c
-@@ -55,7 +55,7 @@ static int crash_nmi_callback(struct not
- 		return NOTIFY_STOP;
- 	local_irq_disable();
- 
--	if (!user_mode_vm(regs)) {
-+	if (!user_mode(regs)) {
- 		crash_fixup_ss_esp(&fixed_regs, regs);
- 		regs = &fixed_regs;
- 	}
---- a/arch/i386/kernel/doublefault.c
-+++ b/arch/i386/kernel/doublefault.c
-@@ -11,17 +11,17 @@
- 
- #define DOUBLEFAULT_STACKSIZE (1024)
- static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
--#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
-+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
- 
- #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
- 
- static void doublefault_fn(void)
- {
--	struct Xgt_desc_struct gdt_desc = {0, 0};
-+	struct Xgt_desc_struct gdt_desc = {0, NULL, 0};
- 	unsigned long gdt, tss;
- 
- 	store_gdt(&gdt_desc);
--	gdt = gdt_desc.address;
-+	gdt = (unsigned long)gdt_desc.address;
- 
- 	printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
- 
-@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cach
- 		/* 0x2 bit is always set */
- 		.eflags		= X86_EFLAGS_SF | 0x2,
- 		.esp		= STACK_START,
--		.es		= __USER_DS,
-+		.es		= __KERNEL_DS,
- 		.cs		= __KERNEL_CS,
- 		.ss		= __KERNEL_DS,
--		.ds		= __USER_DS,
-+		.ds		= __KERNEL_DS,
- 		.fs		= __KERNEL_PERCPU,
- 
- 		.__cr3		= __pa(swapper_pg_dir)
---- a/arch/i386/kernel/efi.c
-+++ b/arch/i386/kernel/efi.c
-@@ -63,45 +63,23 @@ extern void * boot_ioremap(unsigned long
- 
- static unsigned long efi_rt_eflags;
- static DEFINE_SPINLOCK(efi_rt_lock);
--static pgd_t efi_bak_pg_dir_pointer[2];
-+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS] __attribute__ ((aligned (4096)));
- 
- static void efi_call_phys_prelog(void) __acquires(efi_rt_lock)
- {
--	unsigned long cr4;
--	unsigned long temp;
- 	struct Xgt_desc_struct gdt_descr;
- 
- 	spin_lock(&efi_rt_lock);
- 	local_irq_save(efi_rt_eflags);
- 
--	/*
--	 * If I don't have PSE, I should just duplicate two entries in page
--	 * directory. If I have PSE, I just need to duplicate one entry in
--	 * page directory.
--	 */
--	cr4 = read_cr4();
--
--	if (cr4 & X86_CR4_PSE) {
--		efi_bak_pg_dir_pointer[0].pgd =
--		    swapper_pg_dir[pgd_index(0)].pgd;
--		swapper_pg_dir[0].pgd =
--		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
--	} else {
--		efi_bak_pg_dir_pointer[0].pgd =
--		    swapper_pg_dir[pgd_index(0)].pgd;
--		efi_bak_pg_dir_pointer[1].pgd =
--		    swapper_pg_dir[pgd_index(0x400000)].pgd;
--		swapper_pg_dir[pgd_index(0)].pgd =
--		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
--		temp = PAGE_OFFSET + 0x400000;
--		swapper_pg_dir[pgd_index(0x400000)].pgd =
--		    swapper_pg_dir[pgd_index(temp)].pgd;
--	}
-+	clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
-+	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
-+			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
- 
- 	/*
- 	 * After the lock is released, the original page table is restored.
- 	 */
--	local_flush_tlb();
-+	__flush_tlb_all();
- 
- 	gdt_descr.address = __pa(get_cpu_gdt_table(0));
- 	gdt_descr.size = GDT_SIZE - 1;
-@@ -110,35 +88,23 @@ static void efi_call_phys_prelog(void) _
- 
- static void efi_call_phys_epilog(void) __releases(efi_rt_lock)
- {
--	unsigned long cr4;
- 	struct Xgt_desc_struct gdt_descr;
- 
--	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
-+	gdt_descr.address = get_cpu_gdt_table(0);
- 	gdt_descr.size = GDT_SIZE - 1;
- 	load_gdt(&gdt_descr);
--
--	cr4 = read_cr4();
--
--	if (cr4 & X86_CR4_PSE) {
--		swapper_pg_dir[pgd_index(0)].pgd =
--		    efi_bak_pg_dir_pointer[0].pgd;
--	} else {
--		swapper_pg_dir[pgd_index(0)].pgd =
--		    efi_bak_pg_dir_pointer[0].pgd;
--		swapper_pg_dir[pgd_index(0x400000)].pgd =
--		    efi_bak_pg_dir_pointer[1].pgd;
--	}
-+	clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
- 
- 	/*
- 	 * After the lock is released, the original page table is restored.
- 	 */
--	local_flush_tlb();
-+	__flush_tlb_all();
- 
- 	local_irq_restore(efi_rt_eflags);
- 	spin_unlock(&efi_rt_lock);
- }
- 
--static efi_status_t
-+static efi_status_t __init
- phys_efi_set_virtual_address_map(unsigned long memory_map_size,
- 				 unsigned long descriptor_size,
- 				 u32 descriptor_version,
-@@ -154,7 +120,7 @@ phys_efi_set_virtual_address_map(unsigne
- 	return status;
- }
- 
--static efi_status_t
-+static efi_status_t __init
- phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
- {
- 	efi_status_t status;
---- a/arch/i386/kernel/efi_stub.S
-+++ b/arch/i386/kernel/efi_stub.S
-@@ -6,6 +6,7 @@
-  */
- 
- #include <linux/linkage.h>
-+#include <linux/init.h>
- #include <asm/page.h>
- 
- /*
-@@ -20,7 +21,7 @@
-  * service functions will comply with gcc calling convention, too.
-  */
- 
--.text
-+__INIT
- ENTRY(efi_call_phys)
- 	/*
- 	 * 0. The function can only be called in Linux kernel. So CS has been
-@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
- 	 * The mapping of lower virtual memory has been created in prelog and
- 	 * epilog.
- 	 */
--	movl	$1f, %edx
--	subl	$__PAGE_OFFSET, %edx
--	jmp	*%edx
-+	jmp	1f-__PAGE_OFFSET
- 1:
- 
- 	/*
-@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
- 	 * parameter 2, ..., param n. To make things easy, we save the return
- 	 * address of efi_call_phys in a global variable.
- 	 */
--	popl	%edx
--	movl	%edx, saved_return_addr
--	/* get the function pointer into ECX*/
--	popl	%ecx
--	movl	%ecx, efi_rt_function_ptr
--	movl	$2f, %edx
--	subl	$__PAGE_OFFSET, %edx
--	pushl	%edx
-+	popl	(saved_return_addr)
-+	popl	(efi_rt_function_ptr)
- 
- 	/*
- 	 * 3. Clear PG bit in %CR0.
-@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
- 	/*
- 	 * 5. Call the physical function.
- 	 */
--	jmp	*%ecx
-+	call	*(efi_rt_function_ptr-__PAGE_OFFSET)
- 
--2:
- 	/*
- 	 * 6. After EFI runtime service returns, control will return to
- 	 * following instruction. We'd better readjust stack pointer first.
-@@ -88,34 +80,27 @@ ENTRY(efi_call_phys)
- 	movl	%cr0, %edx
- 	orl	$0x80000000, %edx
- 	movl	%edx, %cr0
--	jmp	1f
--1:
-+
- 	/*
- 	 * 8. Now restore the virtual mode from flat mode by
- 	 * adding EIP with PAGE_OFFSET.
- 	 */
--	movl	$1f, %edx
--	jmp	*%edx
-+	jmp	1f+__PAGE_OFFSET
- 1:
- 
- 	/*
- 	 * 9. Balance the stack. And because EAX contain the return value,
- 	 * we'd better not clobber it.
- 	 */
--	leal	efi_rt_function_ptr, %edx
--	movl	(%edx), %ecx
--	pushl	%ecx
-+	pushl	(efi_rt_function_ptr)
- 
- 	/*
--	 * 10. Push the saved return address onto the stack and return.
-+	 * 10. Return to the saved return address.
- 	 */
--	leal	saved_return_addr, %edx
--	movl	(%edx), %ecx
--	pushl	%ecx
--	ret
-+	jmpl	*(saved_return_addr)
- .previous
- 
--.data
-+__INITDATA
- saved_return_addr:
- 	.long 0
- efi_rt_function_ptr:
---- a/arch/i386/kernel/entry.S
-+++ b/arch/i386/kernel/entry.S
-@@ -97,7 +97,7 @@ VM_MASK		= 0x00020000
- #define resume_userspace_sig	resume_userspace
- #endif
- 
--#define SAVE_ALL \
-+#define __SAVE_ALL(_DS) \
- 	cld; \
- 	pushl %fs; \
- 	CFI_ADJUST_CFA_OFFSET 4;\
-@@ -129,12 +129,26 @@ VM_MASK		= 0x00020000
- 	pushl %ebx; \
- 	CFI_ADJUST_CFA_OFFSET 4;\
- 	CFI_REL_OFFSET ebx, 0;\
--	movl $(__USER_DS), %edx; \
-+	movl $(_DS), %edx; \
- 	movl %edx, %ds; \
- 	movl %edx, %es; \
- 	movl $(__KERNEL_PERCPU), %edx; \
- 	movl %edx, %fs
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+#define SAVE_ALL \
-+	__SAVE_ALL(__KERNEL_DS); \
-+	GET_CR0_INTO_EDX; \
-+	movl %edx, %esi; \
-+	orl $X86_CR0_WP, %edx; \
-+	xorl %edx, %esi; \
-+	SET_CR0_FROM_EDX
-+#elif defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+#define SAVE_ALL __SAVE_ALL(__KERNEL_DS)
-+#else
-+#define SAVE_ALL __SAVE_ALL(__USER_DS)
-+#endif
-+
- #define RESTORE_INT_REGS \
- 	popl %ebx;	\
- 	CFI_ADJUST_CFA_OFFSET -4;\
-@@ -248,7 +262,17 @@ check_userspace:
- 	movb PT_CS(%esp), %al
- 	andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
- 	cmpl $USER_RPL, %eax
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	jae resume_userspace
-+
-+	GET_CR0_INTO_EDX
-+	xorl %esi, %edx
-+	SET_CR0_FROM_EDX
-+	jmp resume_kernel
-+#else
- 	jb resume_kernel		# not returning to v8086 or userspace
-+#endif
- 
- ENTRY(resume_userspace)
-  	DISABLE_INTERRUPTS(CLBR_ANY)	# make sure we don't miss an interrupt
-@@ -307,10 +331,9 @@ sysenter_past_esp:
- 	/*CFI_REL_OFFSET cs, 0*/
- 	/*
- 	 * Push current_thread_info()->sysenter_return to the stack.
--	 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
--	 * pushed above; +8 corresponds to copy_thread's esp0 setting.
- 	 */
--	pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
-+	GET_THREAD_INFO(%ebp)
-+	pushl TI_sysenter_return(%ebp)
- 	CFI_ADJUST_CFA_OFFSET 4
- 	CFI_REL_OFFSET eip, 0
- 
-@@ -318,9 +341,17 @@ sysenter_past_esp:
-  * Load the potential sixth argument from user stack.
-  * Careful about security.
-  */
-+	movl 12(%esp),%ebp
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+	mov 16(%esp),%ds
-+1:	movl %ds:(%ebp),%ebp
-+#else
- 	cmpl $__PAGE_OFFSET-3,%ebp
- 	jae syscall_fault
- 1:	movl (%ebp),%ebp
-+#endif
-+
- .section __ex_table,"a"
- 	.align 4
- 	.long 1b,syscall_fault
-@@ -343,20 +374,37 @@ sysenter_past_esp:
- 	movl TI_flags(%ebp), %ecx
- 	testw $_TIF_ALLWORK_MASK, %cx
- 	jne syscall_exit_work
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+	pushl %eax
-+	CFI_ADJUST_CFA_OFFSET 4
-+	call pax_randomize_kstack
-+	popl %eax
-+	CFI_ADJUST_CFA_OFFSET -4
-+#endif
-+
- /* if something modifies registers it must also disable sysexit */
- 	movl PT_EIP(%esp), %edx
- 	movl PT_OLDESP(%esp), %ecx
- 	xorl %ebp,%ebp
- 	TRACE_IRQS_ON
- 1:	mov  PT_FS(%esp), %fs
-+2:	mov  PT_DS(%esp), %ds
-+3:	mov  PT_ES(%esp), %es
- 	ENABLE_INTERRUPTS_SYSEXIT
- 	CFI_ENDPROC
- .pushsection .fixup,"ax"
--2:	movl $0,PT_FS(%esp)
-+4:	movl $0,PT_FS(%esp)
- 	jmp 1b
-+5:	movl $0,PT_DS(%esp)
-+	jmp 2b
-+6:	movl $0,PT_ES(%esp)
-+	jmp 3b
- .section __ex_table,"a"
- 	.align 4
--	.long 1b,2b
-+	.long 1b,4b
-+	.long 2b,5b
-+	.long 3b,6b
- .popsection
- ENDPROC(sysenter_entry)
- 
-@@ -389,6 +437,10 @@ no_singlestep:
- 	testw $_TIF_ALLWORK_MASK, %cx	# current->work
- 	jne syscall_exit_work
- 
-+#ifdef CONFIG_PAX_RANDKSTACK
-+	call pax_randomize_kstack
-+#endif
-+
- restore_all:
- 	movl PT_EFLAGS(%esp), %eax	# mix EFLAGS, SS and CS
- 	# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
-@@ -552,17 +604,24 @@ syscall_badsys:
- END(syscall_badsys)
- 	CFI_ENDPROC
- 
--#define FIXUP_ESPFIX_STACK \
--	/* since we are on a wrong stack, we cant make it a C code :( */ \
--	PER_CPU(gdt_page, %ebx); \
--	GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
--	addl %esp, %eax; \
--	pushl $__KERNEL_DS; \
--	CFI_ADJUST_CFA_OFFSET 4; \
--	pushl %eax; \
--	CFI_ADJUST_CFA_OFFSET 4; \
--	lss (%esp), %esp; \
-+.macro FIXUP_ESPFIX_STACK
-+	/* since we are on a wrong stack, we cant make it a C code :( */
-+#ifdef CONFIG_SMP
-+	movl PER_CPU_VAR(cpu_number), %ebx;
-+	shll $PAGE_SHIFT_asm, %ebx;
-+	addl $cpu_gdt_table, %ebx;
-+#else
-+	movl $cpu_gdt_table, %ebx;
-+#endif
-+	GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah);
-+	addl %esp, %eax;
-+	pushl $__KERNEL_DS;
-+	CFI_ADJUST_CFA_OFFSET 4;
-+	pushl %eax;
-+	CFI_ADJUST_CFA_OFFSET 4;
-+	lss (%esp), %esp;
- 	CFI_ADJUST_CFA_OFFSET -8;
-+.endm
- #define UNWIND_ESPFIX_STACK \
- 	movl %ss, %eax; \
- 	/* see if on espfix stack */ \
-@@ -579,7 +638,7 @@ END(syscall_badsys)
-  * Build the entry stubs and pointer table with
-  * some assembler magic.
-  */
--.data
-+.section .rodata,"a",@progbits
- ENTRY(interrupt)
- .text
- 
-@@ -679,12 +738,21 @@ error_code:
- 	popl %ecx
- 	CFI_ADJUST_CFA_OFFSET -4
- 	/*CFI_REGISTER es, ecx*/
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	GET_CR0_INTO_EDX
-+	movl %edx, %esi
-+	orl $X86_CR0_WP, %edx
-+	xorl %edx, %esi
-+	SET_CR0_FROM_EDX
-+#endif
-+
- 	movl PT_FS(%esp), %edi		# get the function address
- 	movl PT_ORIG_EAX(%esp), %edx	# get the error code
- 	movl $-1, PT_ORIG_EAX(%esp)	# no syscall to restart
- 	mov  %ecx, PT_FS(%esp)
- 	/*CFI_REL_OFFSET fs, ES*/
--	movl $(__USER_DS), %ecx
-+	movl $(__KERNEL_DS), %ecx
- 	movl %ecx, %ds
- 	movl %ecx, %es
- 	movl %esp,%eax			# pt_regs pointer
-@@ -818,6 +886,13 @@ nmi_stack_correct:
- 	xorl %edx,%edx		# zero error code
- 	movl %esp,%eax		# pt_regs pointer
- 	call do_nmi
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	GET_CR0_INTO_EDX
-+	xorl %esi, %edx
-+	SET_CR0_FROM_EDX
-+#endif
-+
- 	jmp restore_nocheck_notrace
- 	CFI_ENDPROC
- 
-@@ -858,6 +933,13 @@ nmi_espfix_stack:
- 	FIXUP_ESPFIX_STACK		# %eax == %esp
- 	xorl %edx,%edx			# zero error code
- 	call do_nmi
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	GET_CR0_INTO_EDX
-+	xorl %esi, %edx
-+	SET_CR0_FROM_EDX
-+#endif
-+
- 	RESTORE_REGS
- 	lss 12+4(%esp), %esp		# back to espfix stack
- 	CFI_ADJUST_CFA_OFFSET -24
-@@ -1106,7 +1188,6 @@ ENDPROC(xen_failsafe_callback)
- 
- #endif	/* CONFIG_XEN */
- 
--.section .rodata,"a"
- #include "syscall_table.S"
- 
- syscall_table_size=(.-sys_call_table)
---- a/arch/i386/kernel/head.S
-+++ b/arch/i386/kernel/head.S
-@@ -18,6 +18,7 @@
- #include <asm/thread_info.h>
- #include <asm/asm-offsets.h>
- #include <asm/setup.h>
-+#include <asm/msr-index.h>
- 
- /*
-  * References to members of the new_cpu_data structure.
-@@ -51,17 +52,22 @@
-  */
- LOW_PAGES = 1<<(32-PAGE_SHIFT_asm)
- 
--#if PTRS_PER_PMD > 1
--PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD
--#else
--PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD)
--#endif
-+PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PTE)
- BOOTBITMAP_SIZE = LOW_PAGES / 8
- ALLOCATOR_SLOP = 4
- 
- INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_SIZE_asm
- 
- /*
-+ * Real beginning of normal "text" segment
-+ */
-+ENTRY(stext)
-+ENTRY(_stext)
-+
-+.section .text.startup,"ax",@progbits
-+	ljmp $(__BOOT_CS),$phys_startup_32
-+
-+/*
-  * 32-bit kernel entrypoint; only used by the boot CPU.  On entry,
-  * %esi points to the real-mode code as a 32-bit pointer.
-  * CS and DS must be 4 GB flat segments, but we don't depend on
-@@ -69,6 +75,12 @@ INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + 
-  * can.
-  */
- .section .text.head,"ax",@progbits
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
-+.fill 4096,1,0xcc
-+#endif
-+
- ENTRY(startup_32)
- 
- /*
-@@ -82,6 +94,43 @@ ENTRY(startup_32)
- 	movl %eax,%fs
- 	movl %eax,%gs
- 
-+	movl $__per_cpu_start,%eax
-+	movw %ax,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 2)
-+	rorl $16,%eax
-+	movb %al,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 4)
-+	movb %ah,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 7)
-+	movl $__per_cpu_end + PERCPU_MODULE_RESERVE,%eax
-+	subl $__per_cpu_start,%eax
-+	movw %ax,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_PERCPU + 0)
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+	/* check for VMware */
-+	movl $0x564d5868,%eax
-+	xorl %ebx,%ebx
-+	movl $0xa,%ecx
-+	movl $0x5658,%edx
-+	in (%dx),%eax
-+	cmpl $0x564d5868,%ebx
-+	jz 1f
-+
-+	movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),%eax
-+	movl %eax,(cpu_gdt_table - __PAGE_OFFSET + GDT_ENTRY_KERNEL_DS * 8 + 4)
-+1:
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	movl $KERNEL_TEXT_OFFSET,%eax
-+	movw %ax,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_CS + 2)
-+	rorl $16,%eax
-+	movb %al,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_CS + 4)
-+	movb %ah,(cpu_gdt_table - __PAGE_OFFSET + __KERNEL_CS + 7)
-+
-+	movb %al,(boot_gdt - __PAGE_OFFSET + __BOOT_CS + 4)
-+	movb %ah,(boot_gdt - __PAGE_OFFSET + __BOOT_CS + 7)
-+	rorl $16,%eax
-+	movw %ax,(boot_gdt - __PAGE_OFFSET + __BOOT_CS + 2)
-+#endif
-+
- /*
-  * Clear BSS first so that there are no surprises...
-  * No need to cld as DF is already clear from cld above...
-@@ -129,24 +178,42 @@ ENTRY(startup_32)
-  * Warning: don't use %esi or the stack in this code.  However, %esp
-  * can be used as a GPR if you really need it...
-  */
--page_pde_offset = (__PAGE_OFFSET >> 20);
--
-+#ifdef CONFIG_X86_PAE
-+page_pde_offset = ((__PAGE_OFFSET >> 21) * (PAGE_SIZE_asm / PTRS_PER_PTE));
-+#else
-+page_pde_offset = ((__PAGE_OFFSET >> 22) * (PAGE_SIZE_asm / PTRS_PER_PTE));
-+#endif
- 	movl $(pg0 - __PAGE_OFFSET), %edi
-+#ifdef CONFIG_X86_PAE
-+	movl $(swapper_pm_dir - __PAGE_OFFSET), %edx
-+#else
- 	movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
--	movl $0x007, %eax			/* 0x007 = PRESENT+RW+USER */
-+#endif
-+	movl $0x063, %eax			/* 0x063 = PRESENT+RW+ACCESSED+DIRTY */
- 10:
--	leal 0x007(%edi),%ecx			/* Create PDE entry */
-+	leal 0x063(%edi),%ecx			/* Create PDE entry */
- 	movl %ecx,(%edx)			/* Store identity PDE entry */
- 	movl %ecx,page_pde_offset(%edx)		/* Store kernel PDE entry */
-+#ifdef CONFIG_X86_PAE
-+	movl $0,4(%edx)
-+	movl $0,page_pde_offset+4(%edx)
-+	addl $8,%edx
-+	movl $512, %ecx
-+#else
- 	addl $4,%edx
- 	movl $1024, %ecx
-+#endif
- 11:
- 	stosl
-+#ifdef CONFIG_X86_PAE
-+	movl $0,(%edi)
-+	addl $4,%edi
-+#endif
- 	addl $0x1000,%eax
- 	loop 11b
- 	/* End condition: we must map up to and including INIT_MAP_BEYOND_END */
--	/* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */
--	leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp
-+	/* bytes beyond the end of our own page tables; the +0x063 is the attribute bits */
-+	leal (INIT_MAP_BEYOND_END+0x063)(%edi),%ebp
- 	cmpl %ebp,%eax
- 	jb 10b
- 	movl %edi,(init_pg_tables_end - __PAGE_OFFSET)
-@@ -167,10 +234,12 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
- #endif
- 
- 	/* Do an early initialization of the fixmap area */
--	movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
--	movl $(swapper_pg_pmd - __PAGE_OFFSET), %eax
--	addl $0x007, %eax			/* 0x007 = PRESENT+RW+USER */
--	movl %eax, 4092(%edx)
-+	/* 0x067 = PRESENT+RW+USER+ACCESSED+DIRTY */
-+#ifdef CONFIG_X86_PAE
-+	movl $(swapper_pg_pmd - __PAGE_OFFSET + 0x067), (swapper_pm_dir - __PAGE_OFFSET + 4096 - 8)
-+#else
-+	movl $(swapper_pg_pmd - __PAGE_OFFSET + 0x067), (swapper_pg_dir - __PAGE_OFFSET + 4096 - 4)
-+#endif
- 
- #ifdef CONFIG_SMP
- ENTRY(startup_32_smp)
-@@ -181,6 +250,11 @@ ENTRY(startup_32_smp)
- 	movl %eax,%fs
- 	movl %eax,%gs
- 
-+	/* This is a secondary processor (AP) */
-+	xorl %ebx,%ebx
-+	incl %ebx
-+#endif /* CONFIG_SMP */
-+
- /*
-  *	New page tables may be in 4Mbyte page mode and may
-  *	be using the global pages. 
-@@ -196,42 +270,47 @@ ENTRY(startup_32_smp)
-  *	not yet offset PAGE_OFFSET..
-  */
- #define cr4_bits mmu_cr4_features-__PAGE_OFFSET
-+3:
- 	movl cr4_bits,%edx
- 	andl %edx,%edx
--	jz 6f
-+	jz 5f
- 	movl %cr4,%eax		# Turn on paging options (PSE,PAE,..)
- 	orl %edx,%eax
- 	movl %eax,%cr4
- 
--	btl $5, %eax		# check if PAE is enabled
--	jnc 6f
-+#ifdef CONFIG_X86_PAE
-+	movl %ebx,%edi
- 
- 	/* Check if extended functions are implemented */
- 	movl $0x80000000, %eax
- 	cpuid
- 	cmpl $0x80000000, %eax
--	jbe 6f
-+	jbe 4f
- 	mov $0x80000001, %eax
- 	cpuid
- 	/* Execute Disable bit supported? */
- 	btl $20, %edx
--	jnc 6f
-+	jnc 4f
- 
- 	/* Setup EFER (Extended Feature Enable Register) */
--	movl $0xc0000080, %ecx
-+	movl $MSR_EFER, %ecx
- 	rdmsr
- 
- 	btsl $11, %eax
- 	/* Make changes effective */
- 	wrmsr
- 
--6:
--	/* This is a secondary processor (AP) */
--	xorl %ebx,%ebx
--	incl %ebx
-+	btsl $63-32,__supported_pte_mask+4-__PAGE_OFFSET
-+	movl $1,nx_enabled-__PAGE_OFFSET
- 
--#endif /* CONFIG_SMP */
--3:
-+#if !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) && !defined(CONFIG_PAX_MEMORY_UDEREF)
-+	movl $0,disable_x86_sep-__PAGE_OFFSET
-+#endif
-+
-+4:
-+	movl %edi,%ebx
-+#endif
-+5:
- 
- /*
-  * Enable paging
-@@ -256,9 +335,7 @@ ENTRY(startup_32_smp)
- 
- #ifdef CONFIG_SMP
- 	andl %ebx,%ebx
--	jz  1f				/* Initial CPU cleans BSS */
--	jmp checkCPUtype
--1:
-+	jnz checkCPUtype	/* Initial CPU cleans BSS */
- #endif /* CONFIG_SMP */
- 
- /*
-@@ -335,12 +412,12 @@ is386:	movl $2,%ecx		# set MP
- 	ljmp $(__KERNEL_CS),$1f
- 1:	movl $(__KERNEL_DS),%eax	# reload all the segment registers
- 	movl %eax,%ss			# after changing gdt.
--	movl %eax,%fs			# gets reset once there's real percpu
--
--	movl $(__USER_DS),%eax		# DS/ES contains default USER segment
- 	movl %eax,%ds
- 	movl %eax,%es
- 
-+	movl $(__KERNEL_PERCPU), %eax
-+	movl %eax,%fs		# set this cpu's percpu
-+
- 	xorl %eax,%eax			# Clear GS and LDT
- 	movl %eax,%gs
- 	lldt %ax
-@@ -351,11 +428,7 @@ is386:	movl $2,%ecx		# set MP
- 	movb ready, %cl
- 	movb $1, ready
- 	cmpb $0,%cl		# the first CPU calls start_kernel
--	je   1f
--	movl $(__KERNEL_PERCPU), %eax
--	movl %eax,%fs		# set this cpu's percpu
--	jmp initialize_secondary # all other CPUs call initialize_secondary
--1:
-+	jne initialize_secondary # all other CPUs call initialize_secondary
- #endif /* CONFIG_SMP */
- 	jmp start_kernel
- 
-@@ -441,8 +514,8 @@ early_page_fault:
- 	jmp early_fault
- 
- early_fault:
--	cld
- #ifdef CONFIG_PRINTK
-+	cld
- 	movl $(__KERNEL_DS),%eax
- 	movl %eax,%ds
- 	movl %eax,%es
-@@ -466,8 +539,8 @@ hlt_loop:
- /* This is the default interrupt "handler" :-) */
- 	ALIGN
- ignore_int:
--	cld
- #ifdef CONFIG_PRINTK
-+	cld
- 	pushl %eax
- 	pushl %ecx
- 	pushl %edx
-@@ -498,31 +571,58 @@ ignore_int:
- #endif
- 	iret
- 
--.section .text
--/*
-- * Real beginning of normal "text" segment
-- */
--ENTRY(stext)
--ENTRY(_stext)
--
- /*
-  * BSS section
-  */
--.section ".bss.page_aligned","wa"
-+.section .swapper_pg_dir,"a",@progbits
- 	.align PAGE_SIZE_asm
- ENTRY(swapper_pg_dir)
-+#ifdef CONFIG_X86_PAE
-+	.long swapper_pm_dir-__PAGE_OFFSET+1
-+	.long 0
-+	.long swapper_pm_dir+512*8-__PAGE_OFFSET+1
-+	.long 0
-+	.long swapper_pm_dir+512*16-__PAGE_OFFSET+1
-+	.long 0
-+	.long swapper_pm_dir+512*24-__PAGE_OFFSET+1
-+	.long 0
-+#else
- 	.fill 1024,4,0
-+#endif
-+
-+.section .swapper_pm_dir,"a",@progbits
-+#ifdef CONFIG_X86_PAE
-+ENTRY(swapper_pm_dir)
-+	.fill 512,8,0
-+	.fill 512,8,0
-+	.fill 512,8,0
-+	.fill 512,8,0
-+#endif
-+
- ENTRY(swapper_pg_pmd)
- 	.fill 1024,4,0
-+
-+.section .empty_zero_page,"a",@progbits
- ENTRY(empty_zero_page)
- 	.fill 4096,1,0
- 
- /*
-+ * The IDT has to be page-aligned to simplify the Pentium
-+ * F0 0F bug workaround.. We have a special link segment
-+ * for this.
-+ */
-+.section .idt,"a",@progbits
-+ENTRY(idt_table)
-+	.fill 256,8,0
-+
-+/*
-  * This starts the data section.
-  */
- .data
-+
-+.section .rodata,"a",@progbits
- ENTRY(stack_start)
--	.long init_thread_union+THREAD_SIZE
-+	.long init_thread_union+THREAD_SIZE-8
- 	.long __BOOT_DS
- 
- ready:	.byte 0
-@@ -565,7 +665,7 @@ idt_descr:
- 	.word 0				# 32 bit align gdt_desc.address
- ENTRY(early_gdt_descr)
- 	.word GDT_ENTRIES*8-1
--	.long per_cpu__gdt_page		/* Overwritten for secondary CPUs */
-+	.long cpu_gdt_table		/* Overwritten for secondary CPUs */
- 
- /*
-  * The boot_gdt must mirror the equivalent in setup.S and is
-@@ -574,5 +674,61 @@ ENTRY(early_gdt_descr)
- 	.align L1_CACHE_BYTES
- ENTRY(boot_gdt)
- 	.fill GDT_ENTRY_BOOT_CS,8,0
--	.quad 0x00cf9a000000ffff	/* kernel 4GB code at 0x00000000 */
--	.quad 0x00cf92000000ffff	/* kernel 4GB data at 0x00000000 */
-+	.quad 0x00cf9b000000ffff	/* kernel 4GB code at 0x00000000 */
-+	.quad 0x00cf93000000ffff	/* kernel 4GB data at 0x00000000 */
-+
-+	.align PAGE_SIZE_asm
-+ENTRY(cpu_gdt_table)
-+	.quad 0x0000000000000000	/* NULL descriptor */
-+	.quad 0x0000000000000000	/* 0x0b reserved */
-+	.quad 0x0000000000000000	/* 0x13 reserved */
-+	.quad 0x0000000000000000	/* 0x1b reserved */
-+	.quad 0x0000000000000000	/* 0x20 unused */
-+	.quad 0x0000000000000000	/* 0x28 unused */
-+	.quad 0x0000000000000000	/* 0x33 TLS entry 1 */
-+	.quad 0x0000000000000000	/* 0x3b TLS entry 2 */
-+	.quad 0x0000000000000000	/* 0x43 TLS entry 3 */
-+	.quad 0x0000000000000000	/* 0x4b reserved */
-+	.quad 0x0000000000000000	/* 0x53 reserved */
-+	.quad 0x0000000000000000	/* 0x5b reserved */
-+
-+	.quad 0x00cf9b000000ffff	/* 0x60 kernel 4GB code at 0x00000000 */
-+	.quad 0x00cf93000000ffff	/* 0x68 kernel 4GB data at 0x00000000 */
-+	.quad 0x00cffb000000ffff	/* 0x73 user 4GB code at 0x00000000 */
-+	.quad 0x00cff3000000ffff	/* 0x7b user 4GB data at 0x00000000 */
-+
-+	.quad 0x0000000000000000	/* 0x80 TSS descriptor */
-+	.quad 0x0000000000000000	/* 0x88 LDT descriptor */
-+
-+	/*
-+	 * Segments used for calling PnP BIOS have byte granularity.
-+	 * The code segments and data segments have fixed 64k limits,
-+	 * the transfer segment sizes are set at run time.
-+	 */
-+	.quad 0x00409b000000ffff	/* 0x90 32-bit code */
-+	.quad 0x00009b000000ffff	/* 0x98 16-bit code */
-+	.quad 0x000093000000ffff	/* 0xa0 16-bit data */
-+	.quad 0x0000930000000000	/* 0xa8 16-bit data */
-+	.quad 0x0000930000000000	/* 0xb0 16-bit data */
-+
-+	/*
-+	 * The APM segments have byte granularity and their bases
-+	 * are set at run time.  All have 64k limits.
-+	 */
-+	.quad 0x00409b000000ffff	/* 0xb8 APM CS    code */
-+	.quad 0x00009b000000ffff	/* 0xc0 APM CS 16 code (16 bit) */
-+	.quad 0x004093000000ffff	/* 0xc8 APM DS    data */
-+
-+	.quad 0x00c0930000000000	/* 0xd0 - ESPFIX SS */
-+	.quad 0x0040930000000000	/* 0xd8 - PERCPU */
-+	.quad 0x0000000000000000	/* 0xe0 - PCIBIOS_CS */
-+	.quad 0x0000000000000000	/* 0xe8 - PCIBIOS_DS */
-+	.quad 0x0000000000000000	/* 0xf0 - unused */
-+	.quad 0x0000000000000000	/* 0xf8 - GDT entry 31: double-fault TSS */
-+
-+	/* Be sure this is zeroed to avoid false validations in Xen */
-+	.fill PAGE_SIZE_asm - GDT_ENTRIES,1,0
-+
-+#ifdef CONFIG_SMP
-+	.fill (NR_CPUS-1) * (PAGE_SIZE_asm),1,0 /* other CPU's GDT */
-+#endif
---- a/arch/i386/kernel/hpet.c
-+++ b/arch/i386/kernel/hpet.c
-@@ -96,7 +96,7 @@ static void hpet_reserve_platform_timers
- 	hd.hd_irq[1] = HPET_LEGACY_RTC;
- 
- 	for (i = 2; i < nrtimers; timer++, i++)
--		hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
-+		hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >>
- 			Tn_INT_ROUTE_CNF_SHIFT;
- 
- 	hpet_alloc(&hd);
---- a/arch/i386/kernel/i386_ksyms.c
-+++ b/arch/i386/kernel/i386_ksyms.c
-@@ -2,12 +2,16 @@
- #include <asm/checksum.h>
- #include <asm/desc.h>
- 
-+EXPORT_SYMBOL_GPL(cpu_gdt_table);
-+
- EXPORT_SYMBOL(__down_failed);
- EXPORT_SYMBOL(__down_failed_interruptible);
- EXPORT_SYMBOL(__down_failed_trylock);
- EXPORT_SYMBOL(__up_wakeup);
- /* Networking helper routines. */
- EXPORT_SYMBOL(csum_partial_copy_generic);
-+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
-+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
- 
- EXPORT_SYMBOL(__get_user_1);
- EXPORT_SYMBOL(__get_user_2);
---- a/arch/i386/kernel/i8259.c
-+++ b/arch/i386/kernel/i8259.c
-@@ -350,7 +350,7 @@ static irqreturn_t math_error_irq(int cp
-  * New motherboards sometimes make IRQ 13 be a PCI interrupt,
-  * so allow interrupt sharing.
-  */
--static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL };
-+static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL, 0, NULL };
- 
- void __init init_ISA_irqs (void)
- {
---- a/arch/i386/kernel/init_task.c
-+++ b/arch/i386/kernel/init_task.c
-@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task);
-  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
-  * no more per-task TSS's.
-  */ 
--DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
-+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
- 
---- a/arch/i386/kernel/ioport.c
-+++ b/arch/i386/kernel/ioport.c
-@@ -16,6 +16,7 @@
- #include <linux/slab.h>
- #include <linux/thread_info.h>
- #include <linux/syscalls.h>
-+#include <linux/grsecurity.h>
- 
- /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
- static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-@@ -64,9 +65,16 @@ asmlinkage long sys_ioperm(unsigned long
- 
- 	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
- 		return -EINVAL;
-+#ifdef CONFIG_GRKERNSEC_IO
-+	if (turn_on) {
-+		gr_handle_ioperm();
-+#else
- 	if (turn_on && !capable(CAP_SYS_RAWIO))
-+#endif
- 		return -EPERM;
--
-+#ifdef CONFIG_GRKERNSEC_IO
-+	}
-+#endif
- 	/*
- 	 * If it's the first ioperm() call in this thread's lifetime, set the
- 	 * IO bitmap up. ioperm() is much less timing critical than clone(),
-@@ -89,7 +97,7 @@ asmlinkage long sys_ioperm(unsigned long
- 	 * because the ->io_bitmap_max value must match the bitmap
- 	 * contents:
- 	 */
--	tss = &per_cpu(init_tss, get_cpu());
-+	tss = init_tss + get_cpu();
- 
- 	set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
- 
-@@ -143,8 +151,13 @@ asmlinkage long sys_iopl(unsigned long u
- 		return -EINVAL;
- 	/* Trying to gain more privileges? */
- 	if (level > old) {
-+#ifdef CONFIG_GRKERNSEC_IO
-+		gr_handle_iopl();
-+		return -EPERM;
-+#else
- 		if (!capable(CAP_SYS_RAWIO))
- 			return -EPERM;
-+#endif
- 	}
- 	t->iopl = level << 12;
- 	regs->eflags = (regs->eflags & ~X86_EFLAGS_IOPL) | t->iopl;
---- a/arch/i386/kernel/irq.c
-+++ b/arch/i386/kernel/irq.c
-@@ -117,7 +117,7 @@ fastcall unsigned int do_IRQ(struct pt_r
- 		int arg1, arg2, ebx;
- 
- 		/* build the stack frame on the IRQ stack */
--		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+		isp = (u32*) ((char*)irqctx + sizeof(*irqctx)) - 2;
- 		irqctx->tinfo.task = curctx->tinfo.task;
- 		irqctx->tinfo.previous_esp = current_stack_pointer;
- 
-@@ -213,7 +213,7 @@ asmlinkage void do_softirq(void)
- 		irqctx->tinfo.previous_esp = current_stack_pointer;
- 
- 		/* build the stack frame on the softirq stack */
--		isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
-+		isp = (u32*) ((char*)irqctx + sizeof(*irqctx)) - 2;
- 
- 		asm volatile(
- 			"       xchgl   %%ebx,%%esp     \n"
---- a/arch/i386/kernel/kprobes.c
-+++ b/arch/i386/kernel/kprobes.c
-@@ -49,9 +49,24 @@ static __always_inline void set_jmp_op(v
- 		char op;
- 		long raddr;
- 	} __attribute__((packed)) *jop;
--	jop = (struct __arch_jmp_op *)from;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
-+	jop = (struct __arch_jmp_op *)(from + __KERNEL_TEXT_OFFSET);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	jop->raddr = (long)(to) - ((long)(from) + 5);
- 	jop->op = RELATIVEJUMP_INSTRUCTION;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- 
- /*
-@@ -153,14 +168,28 @@ static int __kprobes is_IF_modifier(kpro
- 
- int __kprobes arch_prepare_kprobe(struct kprobe *p)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	/* insn: must be on special executable page on i386. */
- 	p->ainsn.insn = get_insn_slot();
- 	if (!p->ainsn.insn)
- 		return -ENOMEM;
- 
--	memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
--	p->opcode = *p->addr;
--	if (can_boost(p->addr)) {
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	memcpy(p->ainsn.insn, p->addr + __KERNEL_TEXT_OFFSET, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
-+	p->opcode = *(p->addr + __KERNEL_TEXT_OFFSET);
-+	if (can_boost(p->addr + __KERNEL_TEXT_OFFSET)) {
- 		p->ainsn.boostable = 0;
- 	} else {
- 		p->ainsn.boostable = -1;
-@@ -219,7 +248,7 @@ static void __kprobes prepare_singlestep
- 	if (p->opcode == BREAKPOINT_INSTRUCTION)
- 		regs->eip = (unsigned long)p->addr;
- 	else
--		regs->eip = (unsigned long)p->ainsn.insn;
-+		regs->eip = (unsigned long)p->ainsn.insn - __KERNEL_TEXT_OFFSET;
- }
- 
- /* Called with kretprobe_lock held */
-@@ -325,7 +354,7 @@ ss_probe:
- 	if (p->ainsn.boostable == 1 && !p->post_handler){
- 		/* Boost up -- we can execute copied instructions directly */
- 		reset_current_kprobe();
--		regs->eip = (unsigned long)p->ainsn.insn;
-+		regs->eip = (unsigned long)p->ainsn.insn - __KERNEL_TEXT_OFFSET;
- 		preempt_enable_no_resched();
- 		return 1;
- 	}
-@@ -475,7 +504,7 @@ static void __kprobes resume_execution(s
- 		struct pt_regs *regs, struct kprobe_ctlblk *kcb)
- {
- 	unsigned long *tos = (unsigned long *)&regs->esp;
--	unsigned long copy_eip = (unsigned long)p->ainsn.insn;
-+	unsigned long copy_eip = (unsigned long)p->ainsn.insn - __KERNEL_TEXT_OFFSET;
- 	unsigned long orig_eip = (unsigned long)p->addr;
- 
- 	regs->eflags &= ~TF_MASK;
-@@ -648,7 +677,7 @@ int __kprobes kprobe_exceptions_notify(s
- 	struct die_args *args = (struct die_args *)data;
- 	int ret = NOTIFY_DONE;
- 
--	if (args->regs && user_mode_vm(args->regs))
-+	if (args->regs && user_mode(args->regs))
- 		return ret;
- 
- 	switch (val) {
---- a/arch/i386/kernel/ldt.c
-+++ b/arch/i386/kernel/ldt.c
-@@ -58,7 +58,7 @@ static int alloc_ldt(mm_context_t *pc, i
- #ifdef CONFIG_SMP
- 		cpumask_t mask;
- 		preempt_disable();
--		load_LDT(pc);
-+		load_LDT_nolock(pc);
- 		mask = cpumask_of_cpu(smp_processor_id());
- 		if (!cpus_equal(current->mm->cpu_vm_mask, mask))
- 			smp_call_function(flush_ldt, NULL, 1, 1);
-@@ -102,6 +102,22 @@ int init_new_context(struct task_struct 
- 		retval = copy_ldt(&mm->context, &old_mm->context);
- 		up(&old_mm->context.sem);
- 	}
-+
-+	if (tsk == current) {
-+		mm->context.vdso = ~0UL;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+		mm->context.user_cs_base = 0UL;
-+		mm->context.user_cs_limit = ~0UL;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+		cpus_clear(mm->context.cpu_user_cs_mask);
-+#endif
-+
-+#endif
-+
-+	}
-+
- 	return retval;
- }
- 
-@@ -212,6 +228,13 @@ static int write_ldt(void __user * ptr, 
- 		}
- 	}
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
-+		error = -EINVAL;
-+		goto out_unlock;
-+	}
-+#endif
-+
- 	entry_1 = LDT_entry_a(&ldt_info);
- 	entry_2 = LDT_entry_b(&ldt_info);
- 	if (oldmode)
---- a/arch/i386/kernel/machine_kexec.c
-+++ b/arch/i386/kernel/machine_kexec.c
-@@ -29,25 +29,25 @@ static u32 kexec_pmd1[1024] PAGE_ALIGNED
- static u32 kexec_pte0[1024] PAGE_ALIGNED;
- static u32 kexec_pte1[1024] PAGE_ALIGNED;
- 
--static void set_idt(void *newidt, __u16 limit)
-+static void set_idt(struct desc_struct *newidt, __u16 limit)
- {
- 	struct Xgt_desc_struct curidt;
- 
- 	/* ia32 supports unaliged loads & stores */
- 	curidt.size    = limit;
--	curidt.address = (unsigned long)newidt;
-+	curidt.address = newidt;
- 
- 	load_idt(&curidt);
- };
- 
- 
--static void set_gdt(void *newgdt, __u16 limit)
-+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
- {
- 	struct Xgt_desc_struct curgdt;
- 
- 	/* ia32 supports unaligned loads & stores */
- 	curgdt.size    = limit;
--	curgdt.address = (unsigned long)newgdt;
-+	curgdt.address = newgdt;
- 
- 	load_gdt(&curgdt);
- };
-@@ -110,10 +110,10 @@ NORET_TYPE void machine_kexec(struct kim
- 	local_irq_disable();
- 
- 	control_page = page_address(image->control_code_page);
--	memcpy(control_page, relocate_kernel, PAGE_SIZE);
-+	memcpy(control_page, relocate_kernel + __KERNEL_TEXT_OFFSET, PAGE_SIZE);
- 
- 	page_list[PA_CONTROL_PAGE] = __pa(control_page);
--	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
-+	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel + __KERNEL_TEXT_OFFSET;
- 	page_list[PA_PGD] = __pa(kexec_pgd);
- 	page_list[VA_PGD] = (unsigned long)kexec_pgd;
- #ifdef CONFIG_X86_PAE
---- a/arch/i386/kernel/module.c
-+++ b/arch/i386/kernel/module.c
-@@ -23,6 +23,8 @@
- #include <linux/kernel.h>
- #include <linux/bug.h>
- 
-+#include <asm/desc.h>
-+
- #if 0
- #define DEBUGP printk
- #else
-@@ -33,9 +35,30 @@ void *module_alloc(unsigned long size)
- {
- 	if (size == 0)
- 		return NULL;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	return vmalloc(size);
-+#else
- 	return vmalloc_exec(size);
-+#endif
-+
- }
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+void *module_alloc_exec(unsigned long size)
-+{
-+	struct vm_struct *area;
-+
-+	if (size == 0)
-+		return NULL;
-+
-+	area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_VADDR, (unsigned long)&MODULES_END);
-+	if (area)
-+		return area->addr;
-+
-+	return NULL;
-+}
-+#endif
- 
- /* Free memory returned from module_alloc */
- void module_free(struct module *mod, void *module_region)
-@@ -45,6 +68,45 @@ void module_free(struct module *mod, voi
-            table entries. */
- }
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+void module_free_exec(struct module *mod, void *module_region)
-+{
-+	struct vm_struct **p, *tmp;
-+
-+	if (!module_region)
-+		return;
-+
-+	if ((PAGE_SIZE-1) & (unsigned long)module_region) {
-+		printk(KERN_ERR "Trying to module_free_exec() bad address (%p)\n", module_region);
-+		WARN_ON(1);
-+		return;
-+	}
-+
-+	write_lock(&vmlist_lock);
-+	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next)
-+		 if (tmp->addr == module_region)
-+			break;
-+
-+	if (tmp) {
-+		unsigned long cr0;
-+
-+		pax_open_kernel(cr0);
-+		memset(tmp->addr, 0xCC, tmp->size);
-+		pax_close_kernel(cr0);
-+
-+		*p = tmp->next;
-+		kfree(tmp);
-+	}
-+	write_unlock(&vmlist_lock);
-+
-+	if (!tmp) {
-+		printk(KERN_ERR "Trying to module_free_exec() nonexistent vm area (%p)\n",
-+				module_region);
-+		WARN_ON(1);
-+	}
-+}
-+#endif
-+
- /* We don't need anything special. */
- int module_frob_arch_sections(Elf_Ehdr *hdr,
- 			      Elf_Shdr *sechdrs,
-@@ -63,14 +125,20 @@ int apply_relocate(Elf32_Shdr *sechdrs,
- 	unsigned int i;
- 	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
- 	Elf32_Sym *sym;
--	uint32_t *location;
-+	uint32_t *plocation, location;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
- 
- 	DEBUGP("Applying relocate section %u to %u\n", relsec,
- 	       sechdrs[relsec].sh_info);
- 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- 		/* This is where to make the change */
--		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
--			+ rel[i].r_offset;
-+		plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
-+		location = (uint32_t)plocation;
-+		if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
-+			plocation = (void *)plocation + __KERNEL_TEXT_OFFSET;
- 		/* This is the symbol it is referring to.  Note that all
- 		   undefined symbols have been resolved.  */
- 		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
-@@ -78,12 +146,32 @@ int apply_relocate(Elf32_Shdr *sechdrs,
- 
- 		switch (ELF32_R_TYPE(rel[i].r_info)) {
- 		case R_386_32:
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_open_kernel(cr0);
-+#endif
-+
- 			/* We add the value into the location given */
--			*location += sym->st_value;
-+			*plocation += sym->st_value;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_close_kernel(cr0);
-+#endif
-+
- 			break;
- 		case R_386_PC32:
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_open_kernel(cr0);
-+#endif
-+
- 			/* Add the value, subtract its postition */
--			*location += sym->st_value - (uint32_t)location;
-+			*plocation += sym->st_value - location;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_close_kernel(cr0);
-+#endif
-+
- 			break;
- 		default:
- 			printk(KERN_ERR "module %s: Unknown relocation: %u\n",
---- a/arch/i386/kernel/paravirt.c
-+++ b/arch/i386/kernel/paravirt.c
-@@ -198,7 +198,7 @@ unsigned paravirt_patch_insns(void *insn
- 	if (insn_len > len || start == NULL)
- 		insn_len = len;
- 	else
--		memcpy(insnbuf, start, insn_len);
-+		memcpy(insnbuf, start + __KERNEL_TEXT_OFFSET, insn_len);
- 
- 	return insn_len;
- }
-@@ -273,7 +273,7 @@ int paravirt_disable_iospace(void)
- 	return ret;
- }
- 
--struct paravirt_ops paravirt_ops = {
-+struct paravirt_ops paravirt_ops __read_only = {
- 	.name = "bare hardware",
- 	.paravirt_enabled = 0,
- 	.kernel_rpl = 0,
---- a/arch/i386/kernel/process.c
-+++ b/arch/i386/kernel/process.c
-@@ -68,15 +68,17 @@ EXPORT_SYMBOL(boot_option_idle_override)
- DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
- EXPORT_PER_CPU_SYMBOL(current_task);
- 
-+#ifdef CONFIG_SMP
- DEFINE_PER_CPU(int, cpu_number);
- EXPORT_PER_CPU_SYMBOL(cpu_number);
-+#endif
- 
- /*
-  * Return saved PC of a blocked thread.
-  */
- unsigned long thread_saved_pc(struct task_struct *tsk)
- {
--	return ((unsigned long *)tsk->thread.esp)[3];
-+	return tsk->thread.eip;
- }
- 
- /*
-@@ -307,7 +309,7 @@ void show_regs(struct pt_regs * regs)
- 	printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
- 	print_symbol("EIP is at %s\n", regs->eip);
- 
--	if (user_mode_vm(regs))
-+	if (user_mode(regs))
- 		printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
- 	printk(" EFLAGS: %08lx    %s  (%s %.*s)\n",
- 	       regs->eflags, print_tainted(), init_utsname()->release,
-@@ -358,8 +360,8 @@ int kernel_thread(int (*fn)(void *), voi
- 	regs.ebx = (unsigned long) fn;
- 	regs.edx = (unsigned long) arg;
- 
--	regs.xds = __USER_DS;
--	regs.xes = __USER_DS;
-+	regs.xds = __KERNEL_DS;
-+	regs.xes = __KERNEL_DS;
- 	regs.xfs = __KERNEL_PERCPU;
- 	regs.orig_eax = -1;
- 	regs.eip = (unsigned long) kernel_thread_helper;
-@@ -381,7 +383,7 @@ void exit_thread(void)
- 		struct task_struct *tsk = current;
- 		struct thread_struct *t = &tsk->thread;
- 		int cpu = get_cpu();
--		struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+		struct tss_struct *tss = init_tss + cpu;
- 
- 		kfree(t->io_bitmap_ptr);
- 		t->io_bitmap_ptr = NULL;
-@@ -402,6 +404,7 @@ void flush_thread(void)
- {
- 	struct task_struct *tsk = current;
- 
-+	__asm__("mov %0,%%gs\n" : : "r" (0) : "memory");
- 	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
- 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
- 	clear_tsk_thread_flag(tsk, TIF_DEBUG);
-@@ -435,7 +438,7 @@ int copy_thread(int nr, unsigned long cl
- 	struct task_struct *tsk;
- 	int err;
- 
--	childregs = task_pt_regs(p);
-+	childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
- 	*childregs = *regs;
- 	childregs->eax = 0;
- 	childregs->esp = esp;
-@@ -477,6 +480,11 @@ int copy_thread(int nr, unsigned long cl
- 		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- 			goto out;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
-+			goto out;
-+#endif
-+
- 		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
- 		desc->a = LDT_entry_a(&info);
- 		desc->b = LDT_entry_b(&info);
-@@ -663,7 +671,7 @@ struct task_struct fastcall * __switch_t
- 	struct thread_struct *prev = &prev_p->thread,
- 				 *next = &next_p->thread;
- 	int cpu = smp_processor_id();
--	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+	struct tss_struct *tss = init_tss + cpu;
- 
- 	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- 
-@@ -691,6 +699,11 @@ struct task_struct fastcall * __switch_t
- 	 */
- 	savesegment(gs, prev->gs);
- 
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+	if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit))
-+		__set_fs(task_thread_info(next_p)->addr_limit, cpu);
-+#endif
-+
- 	/*
- 	 * Load the per-thread Thread-Local Storage descriptor.
- 	 */
-@@ -855,6 +868,12 @@ asmlinkage int sys_set_thread_area(struc
- 
- 	if (copy_from_user(&info, u_info, sizeof(info)))
- 		return -EFAULT;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
-+		return -EINVAL;
-+#endif
-+
- 	idx = info.entry_number;
- 
- 	/*
-@@ -943,9 +962,28 @@ asmlinkage int sys_get_thread_area(struc
- 	return 0;
- }
- 
--unsigned long arch_align_stack(unsigned long sp)
-+#ifdef CONFIG_PAX_RANDKSTACK
-+asmlinkage void pax_randomize_kstack(void)
- {
--	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
--		sp -= get_random_int() % 8192;
--	return sp & ~0xf;
-+	struct tss_struct *tss;
-+	unsigned long time;
-+
-+	if (!randomize_va_space)
-+		return;
-+
-+	tss = init_tss + smp_processor_id();
-+	rdtscl(time);
-+
-+	/* P4 seems to return a 0 LSB, ignore it */
-+#ifdef CONFIG_MPENTIUM4
-+	time &= 0x1EUL;
-+	time <<= 2;
-+#else
-+	time &= 0xFUL;
-+	time <<= 3;
-+#endif
-+
-+	tss->x86_tss.esp0 ^= time;
-+	current->thread.esp0 = tss->x86_tss.esp0;
- }
-+#endif
---- a/arch/i386/kernel/ptrace.c
-+++ b/arch/i386/kernel/ptrace.c
-@@ -161,22 +161,20 @@ static unsigned long convert_eip_to_line
- 	 * and APM bios ones we just ignore here.
- 	 */
- 	if (seg & LDT_SEGMENT) {
--		u32 *desc;
-+		struct desc_struct *desc;
- 		unsigned long base;
- 
- 		seg &= ~7UL;
- 
- 		down(&child->mm->context.sem);
- 		if (unlikely((seg >> 3) >= child->mm->context.size))
--			addr = -1L; /* bogus selector, access would fault */
-+			addr = -EINVAL;
- 		else {
--			desc = child->mm->context.ldt + seg;
--			base = ((desc[0] >> 16) |
--				((desc[1] & 0xff) << 16) |
--				(desc[1] & 0xff000000));
-+			desc = &child->mm->context.ldt[seg >> 3];
-+			base = (desc->a >> 16) | ((desc->b & 0xff) << 16) | (desc->b & 0xff000000);
- 
- 			/* 16-bit code segment? */
--			if (!((desc[1] >> 22) & 1))
-+			if (!((desc->b >> 22) & 1))
- 				addr &= 0xffff;
- 			addr += base;
- 		}
-@@ -191,6 +189,9 @@ static inline int is_setting_trap_flag(s
- 	unsigned char opcode[15];
- 	unsigned long addr = convert_eip_to_linear(child, regs);
- 
-+	if (addr == -EINVAL)
-+		return 0;
-+
- 	copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
- 	for (i = 0; i < copied; i++) {
- 		switch (opcode[i]) {
-@@ -341,6 +342,11 @@ ptrace_set_thread_area(struct task_struc
- 	if (copy_from_user(&info, user_desc, sizeof(info)))
- 		return -EFAULT;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((child->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
-+		return -EINVAL;
-+#endif
-+
- 	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
- 		return -EINVAL;
- 
-@@ -420,6 +426,17 @@ long arch_ptrace(struct task_struct *chi
- 			  if(addr == (long) &dummy->u_debugreg[5]) break;
- 			  if(addr < (long) &dummy->u_debugreg[4] &&
- 			     ((unsigned long) data) >= TASK_SIZE-3) break;
-+
-+#ifdef CONFIG_GRKERNSEC
-+			  if(addr >= (long) &dummy->u_debugreg[0] &&
-+			     addr <= (long) &dummy->u_debugreg[3]){
-+				long reg   = (addr - (long) &dummy->u_debugreg[0]) >> 2;
-+				long type  = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 4*reg)) & 3;
-+				long align = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 2 + 4*reg)) & 3;
-+				if((type & 1) && (data & align))
-+					break;
-+			  }
-+#endif
- 			  
- 			  /* Sanity-check data. Take one half-byte at once with
- 			   * check = (val >> (16 + 4*i)) & 0xf. It contains the
-@@ -636,7 +653,7 @@ void send_sigtrap(struct task_struct *ts
- 	info.si_code = TRAP_BRKPT;
- 
- 	/* User-mode eip? */
--	info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
-+	info.si_addr = user_mode(regs) ? (void __user *) regs->eip : NULL;
- 
- 	/* Send us the fakey SIGTRAP */
- 	force_sig_info(SIGTRAP, &info, tsk);
---- a/arch/i386/kernel/reboot.c
-+++ b/arch/i386/kernel/reboot.c
-@@ -26,7 +26,7 @@
- void (*pm_power_off)(void);
- EXPORT_SYMBOL(pm_power_off);
- 
--static int reboot_mode;
-+static unsigned short reboot_mode;
- static int reboot_thru_bios;
- 
- #ifdef CONFIG_SMP
-@@ -138,7 +138,7 @@ static struct dmi_system_id __initdata r
- 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {{0, NULL}}, NULL}
- };
- 
- static int __init reboot_init(void)
-@@ -156,18 +156,18 @@ core_initcall(reboot_init);
-    doesn't work with at least one type of 486 motherboard.  It is easy
-    to stop this code working; hence the copious comments. */
- 
--static unsigned long long
--real_mode_gdt_entries [3] =
-+static struct desc_struct
-+real_mode_gdt_entries [3] __read_only =
- {
--	0x0000000000000000ULL,	/* Null descriptor */
--	0x00009a000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
--	0x000092000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
-+	{0x00000000, 0x00000000},	/* Null descriptor */
-+	{0x0000ffff, 0x00009b00},	/* 16-bit real-mode 64k code at 0x00000000 */
-+	{0x0100ffff, 0x00009300}	/* 16-bit real-mode 64k data at 0x00000100 */
- };
- 
--static struct Xgt_desc_struct
--real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
--real_mode_idt = { 0x3ff, 0 },
--no_idt = { 0, 0 };
-+static const struct Xgt_desc_struct
-+real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (struct desc_struct *)__pa(real_mode_gdt_entries), 0 },
-+real_mode_idt = { 0x3ff, NULL, 0 },
-+no_idt = { 0, NULL, 0 };
- 
- 
- /* This is 16-bit protected mode code to disable paging and the cache,
-@@ -189,7 +189,7 @@ no_idt = { 0, 0 };
-    More could be done here to set up the registers as if a CPU reset had
-    occurred; hopefully real BIOSs don't assume much. */
- 
--static unsigned char real_mode_switch [] =
-+static const unsigned char real_mode_switch [] =
- {
- 	0x66, 0x0f, 0x20, 0xc0,			/*    movl  %cr0,%eax        */
- 	0x66, 0x83, 0xe0, 0x11,			/*    andl  $0x00000011,%eax */
-@@ -203,7 +203,7 @@ static unsigned char real_mode_switch []
- 	0x24, 0x10,				/* f: andb  $0x10,al         */
- 	0x66, 0x0f, 0x22, 0xc0			/*    movl  %eax,%cr0        */
- };
--static unsigned char jump_to_bios [] =
-+static const unsigned char jump_to_bios [] =
- {
- 	0xea, 0x00, 0x00, 0xff, 0xff		/*    ljmp  $0xffff,$0x0000  */
- };
-@@ -213,7 +213,7 @@ static unsigned char jump_to_bios [] =
-  * specified by the code and length parameters.
-  * We assume that length will aways be less that 100!
-  */
--void machine_real_restart(unsigned char *code, int length)
-+void machine_real_restart(const unsigned char *code, unsigned int length)
- {
- 	local_irq_disable();
- 
-@@ -234,9 +234,8 @@ void machine_real_restart(unsigned char 
- 	/* Remap the kernel at virtual address zero, as well as offset zero
- 	   from the kernel segment.  This assumes the kernel segment starts at
- 	   virtual address PAGE_OFFSET. */
--
--	memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
--		sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
-+	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
-+			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
- 
- 	/*
- 	 * Use `swapper_pg_dir' as our page directory.
-@@ -249,7 +248,7 @@ void machine_real_restart(unsigned char 
- 	   REBOOT.COM programs, and the previous reset routine did this
- 	   too. */
- 
--	*((unsigned short *)0x472) = reboot_mode;
-+	*(unsigned short *)(__va(0x472)) = reboot_mode;
- 
- 	/* For the switch to real mode, copy some code to low memory.  It has
- 	   to be in the first 64k because it is running in 16-bit mode, and it
-@@ -257,9 +256,8 @@ void machine_real_restart(unsigned char 
- 	   off paging.  Copy it near the end of the first page, out of the way
- 	   of BIOS variables. */
- 
--	memcpy ((void *) (0x1000 - sizeof (real_mode_switch) - 100),
--		real_mode_switch, sizeof (real_mode_switch));
--	memcpy ((void *) (0x1000 - 100), code, length);
-+	memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
-+	memcpy(__va(0x1000 - 100), code, length);
- 
- 	/* Set up the IDT for real mode. */
- 
-@@ -345,7 +343,7 @@ static void native_machine_emergency_res
- 			__asm__ __volatile__("int3");
- 		}
- 		/* rebooting needs to touch the page at absolute addr 0 */
--		*((unsigned short *)__va(0x472)) = reboot_mode;
-+		*(unsigned short *)(__va(0x472)) = reboot_mode;
- 		for (;;) {
- 			mach_reboot_fixups(); /* for board specific fixups */
- 			mach_reboot();
---- a/arch/i386/kernel/setup.c
-+++ b/arch/i386/kernel/setup.c
-@@ -82,7 +82,11 @@ struct cpuinfo_x86 new_cpu_data __cpuini
- struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
- EXPORT_SYMBOL(boot_cpu_data);
- 
-+#ifdef CONFIG_X86_PAE
-+unsigned long mmu_cr4_features = X86_CR4_PAE;
-+#else
- unsigned long mmu_cr4_features;
-+#endif
- 
- /* for MCA, but anyone else can use it if they want */
- unsigned int machine_id;
-@@ -395,8 +399,8 @@ void __init setup_bootmem_allocator(void
- 	 * the (very unlikely) case of us accidentally initializing the
- 	 * bootmem allocator with an invalid RAM area.
- 	 */
--	reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
--			 bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
-+	reserve_bootmem(LOAD_PHYSICAL_ADDR, (PFN_PHYS(min_low_pfn) +
-+			 bootmap_size + PAGE_SIZE-1) - LOAD_PHYSICAL_ADDR);
- 
- 	/*
- 	 * reserve physical page 0 - it's a special BIOS page on many boxes,
-@@ -549,14 +553,14 @@ void __init setup_arch(char **cmdline_p)
- 
- 	if (!MOUNT_ROOT_RDONLY)
- 		root_mountflags &= ~MS_RDONLY;
--	init_mm.start_code = (unsigned long) _text;
--	init_mm.end_code = (unsigned long) _etext;
-+	init_mm.start_code = (unsigned long) _text + __KERNEL_TEXT_OFFSET;
-+	init_mm.end_code = (unsigned long) _etext + __KERNEL_TEXT_OFFSET;
- 	init_mm.end_data = (unsigned long) _edata;
- 	init_mm.brk = init_pg_tables_end + PAGE_OFFSET;
- 
--	code_resource.start = virt_to_phys(_text);
--	code_resource.end = virt_to_phys(_etext)-1;
--	data_resource.start = virt_to_phys(_etext);
-+	code_resource.start = virt_to_phys(_text + __KERNEL_TEXT_OFFSET);
-+	code_resource.end = virt_to_phys(_etext + __KERNEL_TEXT_OFFSET)-1;
-+	data_resource.start = virt_to_phys(_data);
- 	data_resource.end = virt_to_phys(_edata)-1;
- 
- 	parse_early_param();
-@@ -651,3 +655,24 @@ void __init setup_arch(char **cmdline_p)
- #endif
- #endif
- }
-+
-+unsigned long __per_cpu_offset[NR_CPUS] __read_only;
-+
-+EXPORT_SYMBOL(__per_cpu_offset);
-+
-+void __init setup_per_cpu_areas(void)
-+{
-+	unsigned long size, i;
-+	char *ptr;
-+	unsigned long nr_possible_cpus = num_possible_cpus();
-+
-+	/* Copy section for each CPU (we discard the original) */
-+	size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
-+	ptr = alloc_bootmem_pages(size * nr_possible_cpus);
-+
-+	for_each_possible_cpu(i) {
-+		__per_cpu_offset[i] = (unsigned long)ptr;
-+		memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-+		ptr += size;
-+	}
-+}
---- a/arch/i386/kernel/signal.c
-+++ b/arch/i386/kernel/signal.c
-@@ -357,9 +357,9 @@ static int setup_frame(int sig, struct k
- 	}
- 
- 	if (current->binfmt->hasvdso)
--		restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
-+		restorer = (void __user *)VDSO_SYM(&__kernel_sigreturn);
- 	else
--		restorer = (void *)&frame->retcode;
-+		restorer = (void __user *)&frame->retcode;
- 	if (ka->sa.sa_flags & SA_RESTORER)
- 		restorer = ka->sa.sa_restorer;
- 
-@@ -455,7 +455,8 @@ static int setup_rt_frame(int sig, struc
- 		goto give_sigsegv;
- 
- 	/* Set up to return from userspace.  */
--	restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
-+
-+	restorer = (void __user *)VDSO_SYM(&__kernel_rt_sigreturn);
- 	if (ka->sa.sa_flags & SA_RESTORER)
- 		restorer = ka->sa.sa_restorer;
- 	err |= __put_user(restorer, &frame->pretcode);
-@@ -588,7 +589,7 @@ static void fastcall do_signal(struct pt
-  	 * before reaching here, so testing against kernel
-  	 * CS suffices.
- 	 */
--	if (!user_mode(regs))
-+	if (!user_mode_novm(regs))
- 		return;
- 
- 	if (test_thread_flag(TIF_RESTORE_SIGMASK))
---- a/arch/i386/kernel/smpboot.c
-+++ b/arch/i386/kernel/smpboot.c
-@@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
-  * has made sure it's suitably aligned.
-  */
- 
--static unsigned long __devinit setup_trampoline(void)
-+static unsigned long __cpuinit setup_trampoline(void)
- {
- 	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
- 	return virt_to_phys(trampoline_base);
-@@ -772,6 +772,10 @@ static int __cpuinit do_boot_cpu(int api
- 	unsigned long start_eip;
- 	unsigned short nmi_high = 0, nmi_low = 0;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	/*
- 	 * Save current MTRR state in case it was changed since early boot
- 	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
-@@ -788,7 +792,16 @@ static int __cpuinit do_boot_cpu(int api
- 
- 	init_gdt(cpu);
-  	per_cpu(current_task, cpu) = idle;
--	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	early_gdt_descr.address = get_cpu_gdt_table(cpu);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
- 
- 	idle->thread.eip = (unsigned long) start_secondary;
- 	/* start_eip had better be page-aligned! */
---- a/arch/i386/kernel/smp.c
-+++ b/arch/i386/kernel/smp.c
-@@ -104,7 +104,7 @@
-  *	about nothing of note with C stepping upwards.
-  */
- 
--DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
-+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, {0} };
- 
- /*
-  * the following functions deal with sending IPIs between CPUs.
---- a/arch/i386/kernel/smpcommon.c
-+++ b/arch/i386/kernel/smpcommon.c
-@@ -3,6 +3,7 @@
-  */
- #include <linux/module.h>
- #include <asm/smp.h>
-+#include <asm/sections.h>
- 
- DEFINE_PER_CPU(unsigned long, this_cpu_off);
- EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-@@ -14,10 +15,29 @@ __cpuinit void init_gdt(int cpu)
- {
- 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
- 
--	pack_descriptor((u32 *)&gdt[GDT_ENTRY_PERCPU].a,
--			(u32 *)&gdt[GDT_ENTRY_PERCPU].b,
--			__per_cpu_offset[cpu], 0xFFFFF,
--			0x80 | DESCTYPE_S | 0x2, 0x8);
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	if (cpu)
-+		memcpy(gdt, cpu_gdt_table, GDT_SIZE);
-+
-+	if (PERCPU_ENOUGH_ROOM <= 64*1024*1024)
-+		pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PERCPU].a,
-+				(__u32 *)&gdt[GDT_ENTRY_PERCPU].b,
-+				__per_cpu_offset[cpu], PERCPU_ENOUGH_ROOM-1,
-+				0x80 | DESCTYPE_S | 0x3, 0x4);
-+	else
-+		pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PERCPU].a,
-+				(__u32 *)&gdt[GDT_ENTRY_PERCPU].b,
-+				__per_cpu_offset[cpu], ((PERCPU_ENOUGH_ROOM-1) >> PAGE_SHIFT),
-+				0x80 | DESCTYPE_S | 0x3, 0xC);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
- 
- 	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- 	per_cpu(cpu_number, cpu) = cpu;
---- a/arch/i386/kernel/syscall_table.S
-+++ b/arch/i386/kernel/syscall_table.S
-@@ -1,3 +1,4 @@
-+.section .rodata,"a",@progbits
- ENTRY(sys_call_table)
- 	.long sys_restart_syscall	/* 0 - old "setup()" system call, used for restarting */
- 	.long sys_exit
---- a/arch/i386/kernel/sysenter.c
-+++ b/arch/i386/kernel/sysenter.c
-@@ -177,7 +177,7 @@ static __init void relocate_vdso(Elf32_E
- void enable_sep_cpu(void)
- {
- 	int cpu = get_cpu();
--	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+	struct tss_struct *tss = init_tss + cpu;
- 
- 	if (!boot_cpu_has(X86_FEATURE_SEP)) {
- 		put_cpu();
-@@ -200,7 +200,7 @@ static int __init gate_vma_init(void)
- 	gate_vma.vm_start = FIXADDR_USER_START;
- 	gate_vma.vm_end = FIXADDR_USER_END;
- 	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
--	gate_vma.vm_page_prot = __P101;
-+	gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
- 	/*
- 	 * Make sure the vDSO gets into every core dump.
- 	 * Dumping its contents makes post-mortem fully interpretable later
-@@ -283,7 +283,7 @@ int arch_setup_additional_pages(struct l
- 	if (compat)
- 		addr = VDSO_HIGH_BASE;
- 	else {
--		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
-+		addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
- 		if (IS_ERR_VALUE(addr)) {
- 			ret = addr;
- 			goto up_fail;
-@@ -308,7 +308,7 @@ int arch_setup_additional_pages(struct l
- 			goto up_fail;
- 	}
- 
--	current->mm->context.vdso = (void *)addr;
-+	current->mm->context.vdso = addr;
- 	current_thread_info()->sysenter_return =
- 		(void *)VDSO_SYM(&SYSENTER_RETURN);
- 
-@@ -320,8 +320,14 @@ int arch_setup_additional_pages(struct l
- 
- const char *arch_vma_name(struct vm_area_struct *vma)
- {
--	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
-+	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
- 		return "[vdso]";
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
-+		return "[vdso]";
-+#endif
-+
- 	return NULL;
- }
- 
-@@ -330,7 +336,7 @@ struct vm_area_struct *get_gate_vma(stru
- 	struct mm_struct *mm = tsk->mm;
- 
- 	/* Check to see if this task was created in compat vdso mode */
--	if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
-+	if (mm && mm->context.vdso == VDSO_HIGH_BASE)
- 		return &gate_vma;
- 	return NULL;
- }
---- a/arch/i386/kernel/sys_i386.c
-+++ b/arch/i386/kernel/sys_i386.c
-@@ -41,6 +41,21 @@ asmlinkage int sys_pipe(unsigned long __
- 	return error;
- }
- 
-+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
-+{
-+	unsigned long task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+	if (len > task_size || addr > task_size - len)
-+		return -EINVAL;
-+
-+	return 0;
-+}
-+
- asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
- 			  unsigned long prot, unsigned long flags,
- 			  unsigned long fd, unsigned long pgoff)
-@@ -100,6 +115,205 @@ out:
- 	return err;
- }
- 
-+unsigned long
-+arch_get_unmapped_area(struct file *filp, unsigned long addr,
-+		unsigned long len, unsigned long pgoff, unsigned long flags)
-+{
-+	struct mm_struct *mm = current->mm;
-+	struct vm_area_struct *vma;
-+	unsigned long start_addr, task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+	if (len > task_size)
-+		return -ENOMEM;
-+
-+	if (flags & MAP_FIXED)
-+		return addr;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
-+	if (addr) {
-+		addr = PAGE_ALIGN(addr);
-+		vma = find_vma(mm, addr);
-+		if (task_size - len >= addr &&
-+		    (!vma || addr + len <= vma->vm_start))
-+			return addr;
-+	}
-+	if (len > mm->cached_hole_size) {
-+		start_addr = addr = mm->free_area_cache;
-+	} else {
-+		start_addr = addr = mm->mmap_base;
-+		mm->cached_hole_size = 0;
-+	}
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
-+		start_addr = 0x00110000UL;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			start_addr += mm->delta_mmap & 0x03FFF000UL;
-+#endif
-+
-+		if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
-+			start_addr = addr = mm->mmap_base;
-+		else
-+			addr = start_addr;
-+	}
-+#endif
-+
-+full_search:
-+	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-+		/* At this point:  (!vma || addr < vma->vm_end). */
-+		if (task_size - len < addr) {
-+			/*
-+			 * Start a new search - just in case we missed
-+			 * some holes.
-+			 */
-+			if (start_addr != mm->mmap_base) {
-+				start_addr = addr = mm->mmap_base;
-+				mm->cached_hole_size = 0;
-+				goto full_search;
-+			}
-+			return -ENOMEM;
-+		}
-+		if (!vma || addr + len <= vma->vm_start) {
-+			/*
-+			 * Remember the place where we stopped the search:
-+			 */
-+			mm->free_area_cache = addr + len;
-+			return addr;
-+		}
-+		if (addr + mm->cached_hole_size < vma->vm_start)
-+			mm->cached_hole_size = vma->vm_start - addr;
-+		addr = vma->vm_end;
-+		if (mm->start_brk <= addr && addr < mm->mmap_base) {
-+			start_addr = addr = mm->mmap_base;
-+			mm->cached_hole_size = 0;
-+			goto full_search;
-+		}
-+	}
-+}
-+
-+unsigned long
-+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
-+			  const unsigned long len, const unsigned long pgoff,
-+			  const unsigned long flags)
-+{
-+	struct vm_area_struct *vma;
-+	struct mm_struct *mm = current->mm;
-+	unsigned long base = mm->mmap_base, addr = addr0, task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+	/* requested length too big for entire address space */
-+	if (len > task_size)
-+		return -ENOMEM;
-+
-+	if (flags & MAP_FIXED)
-+		return addr;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
-+		goto bottomup;
-+#endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
-+	/* requesting a specific address */
-+	if (addr) {
-+		addr = PAGE_ALIGN(addr);
-+		vma = find_vma(mm, addr);
-+		if (task_size - len >= addr &&
-+				(!vma || addr + len <= vma->vm_start))
-+			return addr;
-+	}
-+
-+	/* check if free_area_cache is useful for us */
-+	if (len <= mm->cached_hole_size) {
-+		mm->cached_hole_size = 0;
-+		mm->free_area_cache = mm->mmap_base;
-+	}
-+
-+	/* either no address requested or can't fit in requested address hole */
-+	addr = mm->free_area_cache;
-+
-+	/* make sure it can fit in the remaining address space */
-+	if (addr > len) {
-+		vma = find_vma(mm, addr-len);
-+		if (!vma || addr <= vma->vm_start)
-+			/* remember the address as a hint for next time */
-+			return (mm->free_area_cache = addr-len);
-+	}
-+
-+	if (mm->mmap_base < len)
-+		goto bottomup;
-+
-+	addr = mm->mmap_base-len;
-+
-+	do {
-+		/*
-+		 * Lookup failure means no vma is above this address,
-+		 * else if new region fits below vma->vm_start,
-+		 * return with success:
-+		 */
-+		vma = find_vma(mm, addr);
-+		if (!vma || addr+len <= vma->vm_start)
-+			/* remember the address as a hint for next time */
-+			return (mm->free_area_cache = addr);
-+
-+		/* remember the largest hole we saw so far */
-+		if (addr + mm->cached_hole_size < vma->vm_start)
-+			mm->cached_hole_size = vma->vm_start - addr;
-+
-+		/* try just below the current vma->vm_start */
-+		addr = vma->vm_start-len;
-+	} while (len < vma->vm_start);
-+
-+bottomup:
-+	/*
-+	 * A failed mmap() very likely causes application failure,
-+	 * so fall back to the bottom-up function here. This scenario
-+	 * can happen with large stack limits and large mmap()
-+	 * allocations.
-+	 */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
-+	else
-+#endif
-+
-+	mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (mm->pax_flags & MF_PAX_RANDMMAP)
-+		mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+	mm->free_area_cache = mm->mmap_base;
-+	mm->cached_hole_size = ~0UL;
-+	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-+	/*
-+	 * Restore the topdown base:
-+	 */
-+	mm->mmap_base = base;
-+	mm->free_area_cache = base;
-+	mm->cached_hole_size = ~0UL;
-+
-+	return addr;
-+}
- 
- struct sel_arg_struct {
- 	unsigned long n;
---- a/arch/i386/kernel/time.c
-+++ b/arch/i386/kernel/time.c
-@@ -132,20 +132,30 @@ unsigned long profile_pc(struct pt_regs 
- 	if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs) &&
- 	    in_lock_functions(pc)) {
- #ifdef CONFIG_FRAME_POINTER
--		return *(unsigned long *)(regs->ebp + 4);
-+		return *(unsigned long *)(regs->ebp + 4) + __KERNEL_TEXT_OFFSET;
- #else
- 		unsigned long *sp = (unsigned long *)&regs->esp;
- 
- 		/* Return address is either directly at stack pointer
- 		   or above a saved eflags. Eflags has bits 22-31 zero,
- 		   kernel addresses don't. */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		return sp[0] + __KERNEL_TEXT_OFFSET;
-+#else
-  		if (sp[0] >> 22)
- 			return sp[0];
- 		if (sp[1] >> 22)
- 			return sp[1];
- #endif
-+
-+#endif
- 	}
- #endif
-+
-+	if (!v8086_mode(regs) && SEGMENT_IS_KERNEL_CODE(regs->xcs))
-+		pc += __KERNEL_TEXT_OFFSET;
-+
- 	return pc;
- }
- EXPORT_SYMBOL(profile_pc);
---- a/arch/i386/kernel/traps.c
-+++ b/arch/i386/kernel/traps.c
-@@ -31,6 +31,7 @@
- #include <linux/uaccess.h>
- #include <linux/nmi.h>
- #include <linux/bug.h>
-+#include <linux/binfmts.h>
- 
- #ifdef CONFIG_EISA
- #include <linux/ioport.h>
-@@ -70,12 +71,7 @@ asmlinkage int system_call(void);
- /* Do we ignore FPU interrupts ? */
- char ignore_fpu_irq = 0;
- 
--/*
-- * The IDT has to be page-aligned to simplify the Pentium
-- * F0 0F bug workaround.. We have a special link segment
-- * for this.
-- */
--struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
-+extern struct desc_struct idt_table[256];
- 
- asmlinkage void divide_error(void);
- asmlinkage void debug(void);
-@@ -297,7 +293,7 @@ void show_registers(struct pt_regs *regs
- 	esp = (unsigned long) (&regs->esp);
- 	savesegment(ss, ss);
- 	savesegment(gs, gs);
--	if (user_mode_vm(regs)) {
-+	if (user_mode(regs)) {
- 		in_kernel = 0;
- 		esp = regs->esp;
- 		ss = regs->xss & 0xffff;
-@@ -329,17 +325,18 @@ void show_registers(struct pt_regs *regs
- 		unsigned int code_prologue = code_bytes * 43 / 64;
- 		unsigned int code_len = code_bytes;
- 		unsigned char c;
-+		unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->xcs) >> 3]);
- 
- 		printk("\n" KERN_EMERG "Stack: ");
- 		show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
- 
- 		printk(KERN_EMERG "Code: ");
- 
--		eip = (u8 *)regs->eip - code_prologue;
-+		eip = (u8 *)regs->eip - code_prologue + cs_base;
- 		if (eip < (u8 *)PAGE_OFFSET ||
- 			probe_kernel_address(eip, c)) {
- 			/* try starting at EIP */
--			eip = (u8 *)regs->eip;
-+			eip = (u8 *)regs->eip + cs_base;
- 			code_len = code_len - code_prologue + 1;
- 		}
- 		for (i = 0; i < code_len; i++, eip++) {
-@@ -348,7 +345,7 @@ void show_registers(struct pt_regs *regs
- 				printk(" Bad EIP value.");
- 				break;
- 			}
--			if (eip == (u8 *)regs->eip)
-+			if (eip == (u8 *)regs->eip + cs_base)
- 				printk("<%02x> ", c);
- 			else
- 				printk("%02x ", c);
-@@ -361,6 +358,7 @@ int is_valid_bugaddr(unsigned long eip)
- {
- 	unsigned short ud2;
- 
-+	eip += __KERNEL_TEXT_OFFSET;
- 	if (eip < PAGE_OFFSET)
- 		return 0;
- 	if (probe_kernel_address((unsigned short *)eip, ud2))
-@@ -468,7 +466,7 @@ void die(const char * str, struct pt_reg
- 
- static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
- {
--	if (!user_mode_vm(regs))
-+	if (!user_mode(regs))
- 		die(str, regs, err);
- }
- 
-@@ -484,7 +482,7 @@ static void __kprobes do_trap(int trapnr
- 		goto trap_signal;
- 	}
- 
--	if (!user_mode(regs))
-+	if (!user_mode_novm(regs))
- 		goto kernel_trap;
- 
- 	trap_signal: {
-@@ -589,7 +587,7 @@ fastcall void __kprobes do_general_prote
- 					      long error_code)
- {
- 	int cpu = get_cpu();
--	struct tss_struct *tss = &per_cpu(init_tss, cpu);
-+	struct tss_struct *tss = &init_tss[cpu];
- 	struct thread_struct *thread = &current->thread;
- 
- 	/*
-@@ -622,9 +620,25 @@ fastcall void __kprobes do_general_prote
- 	if (regs->eflags & VM_MASK)
- 		goto gp_in_vm86;
- 
--	if (!user_mode(regs))
-+	if (!user_mode_novm(regs))
- 		goto gp_in_kernel;
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (!nx_enabled && current->mm && (current->mm->pax_flags & MF_PAX_PAGEEXEC)) {
-+		struct mm_struct *mm = current->mm;
-+		unsigned long limit;
-+
-+		down_write(&mm->mmap_sem);
-+		limit = mm->context.user_cs_limit;
-+		if (limit < TASK_SIZE) {
-+			track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
-+			up_write(&mm->mmap_sem);
-+			return;
-+		}
-+		up_write(&mm->mmap_sem);
-+	}
-+#endif
-+
- 	current->thread.error_code = error_code;
- 	current->thread.trap_no = 13;
- 	if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
-@@ -649,6 +663,13 @@ gp_in_kernel:
- 		if (notify_die(DIE_GPF, "general protection fault", regs,
- 				error_code, 13, SIGSEGV) == NOTIFY_STOP)
- 			return;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		if ((regs->xcs & 0xFFFF) == __KERNEL_CS)
-+			die("PAX: suspicious general protection fault", regs, error_code);
-+		else
-+#endif
-+
- 		die("general protection fault", regs, error_code);
- 	}
- }
-@@ -738,7 +759,7 @@ void __kprobes die_nmi(struct pt_regs *r
- 	/* If we are in kernel we are probably nested up pretty bad
- 	 * and might aswell get out now while we still can.
- 	*/
--	if (!user_mode_vm(regs)) {
-+	if (!user_mode(regs)) {
- 		current->thread.trap_no = 2;
- 		crash_kexec(regs);
- 	}
-@@ -885,7 +906,7 @@ fastcall void __kprobes do_debug(struct 
- 		 * check for kernel mode by just checking the CPL
- 		 * of CS.
- 		 */
--		if (!user_mode(regs))
-+		if (!user_mode_novm(regs))
- 			goto clear_TF_reenable;
- 	}
- 
-@@ -1063,18 +1084,14 @@ fastcall void do_spurious_interrupt_bug(
- fastcall unsigned long patch_espfix_desc(unsigned long uesp,
- 					  unsigned long kesp)
- {
--	struct desc_struct *gdt = __get_cpu_var(gdt_page).gdt;
- 	unsigned long base = (kesp - uesp) & -THREAD_SIZE;
- 	unsigned long new_kesp = kesp - base;
- 	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
--	__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
-+	__u32 a, b;
-+
- 	/* Set up base for espfix segment */
-- 	desc &= 0x00f0ff0000000000ULL;
-- 	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
--		((((__u64)base) << 32) & 0xff00000000000000ULL) |
--		((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
--		(lim_pages & 0xffff);
--	*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
-+	pack_descriptor(&a, &b, base, lim_pages, 0x93, 0xC);
-+	write_gdt_entry(get_cpu_gdt_table(smp_processor_id()), GDT_ENTRY_ESPFIX_SS, a, b);
- 	return new_kesp;
- }
- 
-@@ -1123,7 +1140,7 @@ void __init trap_init_f00f_bug(void)
- 	 * Update the IDT descriptor and reload the IDT so that
- 	 * it uses the read-only mapped virtual address.
- 	 */
--	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
-+	idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
- 	load_idt(&idt_descr);
- }
- #endif
---- a/arch/i386/kernel/tsc.c
-+++ b/arch/i386/kernel/tsc.c
-@@ -322,7 +322,7 @@ static struct dmi_system_id __initdata b
- 		     DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
- 		     },
- 	 },
--	 {}
-+	{ NULL, NULL, {{0, NULL}}, NULL}
- };
- 
- /*
---- a/arch/i386/kernel/vm86.c
-+++ b/arch/i386/kernel/vm86.c
-@@ -148,7 +148,7 @@ struct pt_regs * fastcall save_v86_state
- 		do_exit(SIGSEGV);
- 	}
- 
--	tss = &per_cpu(init_tss, get_cpu());
-+	tss = init_tss + get_cpu();
- 	current->thread.esp0 = current->thread.saved_esp0;
- 	current->thread.sysenter_cs = __KERNEL_CS;
- 	load_esp0(tss, &current->thread);
-@@ -324,7 +324,7 @@ static void do_sys_vm86(struct kernel_vm
- 	tsk->thread.saved_fs = info->regs32->xfs;
- 	savesegment(gs, tsk->thread.saved_gs);
- 
--	tss = &per_cpu(init_tss, get_cpu());
-+	tss = init_tss + get_cpu();
- 	tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
- 	if (cpu_has_sep)
- 		tsk->thread.sysenter_cs = 0;
---- a/arch/i386/kernel/vmi.c
-+++ b/arch/i386/kernel/vmi.c
-@@ -98,18 +98,43 @@ static unsigned patch_internal(int call,
- {
- 	u64 reloc;
- 	struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	reloc = call_vrom_long_func(vmi_rom, get_reloc,	call);
- 	switch(rel->type) {
- 		case VMI_RELOCATION_CALL_REL:
- 			BUG_ON(len < 5);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_open_kernel(cr0);
-+#endif
-+
- 			*(char *)insnbuf = MNEM_CALL;
- 			patch_offset(insnbuf, eip, (unsigned long)rel->eip);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_close_kernel(cr0);
-+#endif
-+
- 			return 5;
- 
- 		case VMI_RELOCATION_JUMP_REL:
- 			BUG_ON(len < 5);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_open_kernel(cr0);
-+#endif
-+
- 			*(char *)insnbuf = MNEM_JMP;
- 			patch_offset(insnbuf, eip, (unsigned long)rel->eip);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_close_kernel(cr0);
-+#endif
-+
- 			return 5;
- 
- 		case VMI_RELOCATION_NOP:
-@@ -492,14 +517,14 @@ static void vmi_set_pud(pud_t *pudp, pud
- 
- static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
--	const pte_t pte = { 0 };
-+	const pte_t pte = __pte(0ULL);
- 	vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);
- 	vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
- }
- 
- static void vmi_pmd_clear(pmd_t *pmd)
- {
--	const pte_t pte = { 0 };
-+	const pte_t pte = __pte(0ULL);
- 	vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);
- 	vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
- }
-@@ -528,8 +553,8 @@ vmi_startup_ipi_hook(int phys_apicid, un
- 	ap.ss = __KERNEL_DS;
- 	ap.esp = (unsigned long) start_esp;
- 
--	ap.ds = __USER_DS;
--	ap.es = __USER_DS;
-+	ap.ds = __KERNEL_DS;
-+	ap.es = __KERNEL_DS;
- 	ap.fs = __KERNEL_PERCPU;
- 	ap.gs = 0;
- 
-@@ -726,12 +751,20 @@ static inline int __init activate_vmi(vo
- 	u64 reloc;
- 	const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	if (call_vrom_func(vmi_rom, vmi_init) != 0) {
- 		printk(KERN_ERR "VMI ROM failed to initialize!");
- 		return 0;
- 	}
- 	savesegment(cs, kernel_cs);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	paravirt_ops.paravirt_enabled = 1;
- 	paravirt_ops.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
- 
-@@ -910,6 +943,10 @@ static inline int __init activate_vmi(vo
- 
- 	para_fill(safe_halt, Halt);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	/*
- 	 * Alternative instruction rewriting doesn't happen soon enough
- 	 * to convert VMI_IRET to a call instead of a jump; so we have
---- a/arch/i386/kernel/vmlinux.lds.S
-+++ b/arch/i386/kernel/vmlinux.lds.S
-@@ -21,6 +21,13 @@
- #include <asm/page.h>
- #include <asm/cache.h>
- #include <asm/boot.h>
-+#include <asm/segment.h>
-+
-+#ifdef CONFIG_X86_PAE
-+#define PMD_SHIFT 21
-+#else
-+#define PMD_SHIFT 22
-+#endif
- 
- OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
- OUTPUT_ARCH(i386)
-@@ -28,22 +35,124 @@ ENTRY(phys_startup_32)
- jiffies = jiffies_64;
- 
- PHDRS {
--	text PT_LOAD FLAGS(5);	/* R_E */
--	data PT_LOAD FLAGS(7);	/* RWE */
--	note PT_NOTE FLAGS(0);	/* ___ */
-+	initdata PT_LOAD FLAGS(6);	/* RW_ */
-+	percpu   PT_LOAD FLAGS(6);	/* RW_ */
-+	inittext PT_LOAD FLAGS(5);	/* R_E */
-+	text     PT_LOAD FLAGS(5);	/* R_E */
-+	rodata   PT_LOAD FLAGS(4);	/* R__ */
-+	data     PT_LOAD FLAGS(6);	/* RW_ */
-+	note     PT_NOTE FLAGS(0);	/* ___ */
- }
- SECTIONS
- {
-   . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
--  phys_startup_32 = startup_32 - LOAD_OFFSET;
- 
--  .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
--  	_text = .;			/* Text and read-only data */
-+  .text.startup : AT(ADDR(.text.startup) - LOAD_OFFSET) {
-+	phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
-+	*(.text.startup)
-+  } :initdata
-+
-+  /* might get freed after init */
-+  . = ALIGN(4096);
-+  .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
-+  	__smp_locks = .;
-+	*(.smp_locks)
-+	__smp_locks_end = .;
-+  }
-+  /* will be freed after init
-+   * Following ALIGN() is required to make sure no other data falls on the
-+   * same page where __smp_alt_end is pointing as that page might be freed
-+   * after boot. Always make sure that ALIGN() directive is present after
-+   * the section which contains __smp_alt_end.
-+   */
-+  . = ALIGN(4096);
-+
-+  /* will be freed after init */
-+  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
-+	__init_begin = .;
-+	*(.init.data)
-+  }
-+  . = ALIGN(16);
-+  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
-+  	__setup_start = .;
-+	*(.init.setup)
-+  	__setup_end = .;
-+   }
-+  .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
-+  	__initcall_start = .;
-+	INITCALLS
-+  	__initcall_end = .;
-+  }
-+  .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
-+  	__con_initcall_start = .;
-+	*(.con_initcall.init)
-+  	__con_initcall_end = .;
-+  }
-+  SECURITY_INIT
-+  . = ALIGN(4);
-+  .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
-+  	__alt_instructions = .;
-+	*(.altinstructions)
-+	__alt_instructions_end = .;
-+  }
-+  .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
-+	*(.altinstr_replacement)
-+  }
-+  . = ALIGN(4);
-+  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
-+  	__parainstructions = .;
-+	*(.parainstructions)
-+  	__parainstructions_end = .;
-+  }
-+  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
-+#if defined(CONFIG_BLK_DEV_INITRD)
-+  . = ALIGN(4096);
-+  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
-+	__initramfs_start = .;
-+	*(.init.ramfs)
-+	__initramfs_end = .;
-+  }
-+#endif
-+  . = ALIGN(4096);
-+  per_cpu_start = .;
-+  .data.percpu (0) : AT(ADDR(.data.percpu) - LOAD_OFFSET + per_cpu_start) {
-+	__per_cpu_start = . + per_cpu_start;
-+	LONG(0)
-+	*(.data.percpu)
-+	*(.data.percpu.shared_aligned)
-+	__per_cpu_end = . + per_cpu_start;
-+  } :percpu
-+  . += per_cpu_start;
-+
-+  /* read-only */
-+
-+  . = ALIGN(4096);		/* Init code and data */
-+  .init.text (. - __KERNEL_TEXT_OFFSET) : AT(ADDR(.init.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
-+	_sinittext = .;
-+	*(.init.text)
-+	_einittext = .;
-+  } :inittext
-+
-+  /* .exit.text is discard at runtime, not link time, to deal with references
-+     from .altinstructions and .eh_frame */
-+  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { *(.exit.text) }
-+
-+  .filler : AT(ADDR(.filler) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
-+	BYTE(0)
-+	. = ALIGN(4*1024*1024) - 1;
-+  }
-+
-+  /* freed after init ends here */
-+
-+  .text.head : AT(ADDR(.text.head) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
-+	__init_end = . + __KERNEL_TEXT_OFFSET;
-+	KERNEL_TEXT_OFFSET = . + __KERNEL_TEXT_OFFSET;
-+	_text = .;			/* Text and read-only data */
- 	*(.text.head)
-   } :text = 0x9090
- 
-   /* read-only */
--  .text : AT(ADDR(.text) - LOAD_OFFSET) {
-+  .text : AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
- 	TEXT_TEXT
- 	SCHED_TEXT
- 	LOCK_TEXT
-@@ -53,16 +162,17 @@ SECTIONS
-   	_etext = .;			/* End of text section */
-   } :text = 0x9090
- 
--  . = ALIGN(16);		/* Exception table */
-+  . += __KERNEL_TEXT_OFFSET;
-+  . = ALIGN(4096);		/* Exception table */
-   __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
-   	__start___ex_table = .;
- 	 *(__ex_table)
-   	__stop___ex_table = .;
--  }
-+  } :rodata
- 
--  NOTES :text :note
-+  NOTES :rodata :note
- 
--  BUG_TABLE :text
-+  BUG_TABLE :rodata
- 
-   . = ALIGN(4);
-   .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
-@@ -73,9 +183,36 @@ SECTIONS
- 
-   RODATA
- 
-+  . = ALIGN(4096);
-+  .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
-+	*(.idt)
-+	. = ALIGN(4096);
-+	*(.empty_zero_page)
-+	*(.swapper_pm_dir)
-+	*(.swapper_pg_dir)
-+	}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+
-+#ifdef CONFIG_MODULES
-+  . = ALIGN(4096);
-+  .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
-+	MODULES_VADDR = .;
-+	BYTE(0)
-+	. += (6 * 1024 * 1024);
-+	. = ALIGN(1 << PMD_SHIFT) - 1;
-+	MODULES_END = .;
-+  }
-+#else
-+  . = ALIGN(1 << PMD_SHIFT) - 1;
-+#endif
-+
-+#endif
-+
-   /* writeable */
-   . = ALIGN(4096);
-   .data : AT(ADDR(.data) - LOAD_OFFSET) {	/* Data */
-+	_data = .;
- 	DATA_DATA
- 	CONSTRUCTORS
- 	} :data
-@@ -91,7 +228,6 @@ SECTIONS
-   . = ALIGN(4096);
-   .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
- 	*(.data.page_aligned)
--	*(.data.idt)
-   }
- 
-   . = ALIGN(32);
-@@ -111,86 +247,7 @@ SECTIONS
- 	*(.data.init_task)
-   }
- 
--  /* might get freed after init */
--  . = ALIGN(4096);
--  .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
--  	__smp_locks = .;
--	*(.smp_locks)
--	__smp_locks_end = .;
--  }
--  /* will be freed after init
--   * Following ALIGN() is required to make sure no other data falls on the
--   * same page where __smp_alt_end is pointing as that page might be freed
--   * after boot. Always make sure that ALIGN() directive is present after
--   * the section which contains __smp_alt_end.
--   */
--  . = ALIGN(4096);
--
--  /* will be freed after init */
--  . = ALIGN(4096);		/* Init code and data */
--  .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
--  	__init_begin = .;
--	_sinittext = .;
--	*(.init.text)
--	_einittext = .;
--  }
--  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) }
--  . = ALIGN(16);
--  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
--  	__setup_start = .;
--	*(.init.setup)
--  	__setup_end = .;
--   }
--  .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
--  	__initcall_start = .;
--	INITCALLS
--  	__initcall_end = .;
--  }
--  .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
--  	__con_initcall_start = .;
--	*(.con_initcall.init)
--  	__con_initcall_end = .;
--  }
--  SECURITY_INIT
--  . = ALIGN(4);
--  .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
--  	__alt_instructions = .;
--	*(.altinstructions)
--	__alt_instructions_end = .;
--  }
--  .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
--	*(.altinstr_replacement)
--  }
--  . = ALIGN(4);
--  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
--  	__parainstructions = .;
--	*(.parainstructions)
--  	__parainstructions_end = .;
--  }
--  /* .exit.text is discard at runtime, not link time, to deal with references
--     from .altinstructions and .eh_frame */
--  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) }
--  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) }
--#if defined(CONFIG_BLK_DEV_INITRD)
--  . = ALIGN(4096);
--  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
--	__initramfs_start = .;
--	*(.init.ramfs)
--	__initramfs_end = .;
--  }
--#endif
--  . = ALIGN(4096);
--  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
--	__per_cpu_start = .;
--	*(.data.percpu)
--	*(.data.percpu.shared_aligned)
--	__per_cpu_end = .;
--  }
--  . = ALIGN(4096);
--  /* freed after init ends here */
--	
-   .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
--	__init_end = .;
- 	__bss_start = .;		/* BSS */
- 	*(.bss.page_aligned)
- 	*(.bss)
---- a/arch/i386/lib/checksum.S
-+++ b/arch/i386/lib/checksum.S
-@@ -28,7 +28,8 @@
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/errno.h>
--				
-+#include <asm/segment.h>
-+
- /*
-  * computes a partial checksum, e.g. for TCP/UDP fragments
-  */
-@@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (
- 
- #define ARGBASE 16		
- #define FP		12
--		
--ENTRY(csum_partial_copy_generic)
-+
-+ENTRY(csum_partial_copy_generic_to_user)
- 	CFI_STARTPROC
-+	pushl $(__USER_DS)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %es
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jmp csum_partial_copy_generic
-+
-+ENTRY(csum_partial_copy_generic_from_user)
-+	pushl $(__USER_DS)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %ds
-+	CFI_ADJUST_CFA_OFFSET -4
-+
-+ENTRY(csum_partial_copy_generic)
- 	subl  $4,%esp	
- 	CFI_ADJUST_CFA_OFFSET 4
- 	pushl %edi
-@@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic)
- 	jmp 4f
- SRC(1:	movw (%esi), %bx	)
- 	addl $2, %esi
--DST(	movw %bx, (%edi)	)
-+DST(	movw %bx, %es:(%edi)	)
- 	addl $2, %edi
- 	addw %bx, %ax	
- 	adcl $0, %eax
-@@ -343,30 +357,30 @@ DST(	movw %bx, (%edi)	)
- SRC(1:	movl (%esi), %ebx	)
- SRC(	movl 4(%esi), %edx	)
- 	adcl %ebx, %eax
--DST(	movl %ebx, (%edi)	)
-+DST(	movl %ebx, %es:(%edi)	)
- 	adcl %edx, %eax
--DST(	movl %edx, 4(%edi)	)
-+DST(	movl %edx, %es:4(%edi)	)
- 
- SRC(	movl 8(%esi), %ebx	)
- SRC(	movl 12(%esi), %edx	)
- 	adcl %ebx, %eax
--DST(	movl %ebx, 8(%edi)	)
-+DST(	movl %ebx, %es:8(%edi)	)
- 	adcl %edx, %eax
--DST(	movl %edx, 12(%edi)	)
-+DST(	movl %edx, %es:12(%edi)	)
- 
- SRC(	movl 16(%esi), %ebx 	)
- SRC(	movl 20(%esi), %edx	)
- 	adcl %ebx, %eax
--DST(	movl %ebx, 16(%edi)	)
-+DST(	movl %ebx, %es:16(%edi)	)
- 	adcl %edx, %eax
--DST(	movl %edx, 20(%edi)	)
-+DST(	movl %edx, %es:20(%edi)	)
- 
- SRC(	movl 24(%esi), %ebx	)
- SRC(	movl 28(%esi), %edx	)
- 	adcl %ebx, %eax
--DST(	movl %ebx, 24(%edi)	)
-+DST(	movl %ebx, %es:24(%edi)	)
- 	adcl %edx, %eax
--DST(	movl %edx, 28(%edi)	)
-+DST(	movl %edx, %es:28(%edi)	)
- 
- 	lea 32(%esi), %esi
- 	lea 32(%edi), %edi
-@@ -380,7 +394,7 @@ DST(	movl %edx, 28(%edi)	)
- 	shrl $2, %edx			# This clears CF
- SRC(3:	movl (%esi), %ebx	)
- 	adcl %ebx, %eax
--DST(	movl %ebx, (%edi)	)
-+DST(	movl %ebx, %es:(%edi)	)
- 	lea 4(%esi), %esi
- 	lea 4(%edi), %edi
- 	dec %edx
-@@ -392,12 +406,12 @@ DST(	movl %ebx, (%edi)	)
- 	jb 5f
- SRC(	movw (%esi), %cx	)
- 	leal 2(%esi), %esi
--DST(	movw %cx, (%edi)	)
-+DST(	movw %cx, %es:(%edi)	)
- 	leal 2(%edi), %edi
- 	je 6f
- 	shll $16,%ecx
- SRC(5:	movb (%esi), %cl	)
--DST(	movb %cl, (%edi)	)
-+DST(	movb %cl, %es:(%edi)	)
- 6:	addl %ecx, %eax
- 	adcl $0, %eax
- 7:
-@@ -408,7 +422,7 @@ DST(	movb %cl, (%edi)	)
- 
- 6001:
- 	movl ARGBASE+20(%esp), %ebx	# src_err_ptr
--	movl $-EFAULT, (%ebx)
-+	movl $-EFAULT, %ss:(%ebx)
- 
- 	# zero the complete destination - computing the rest
- 	# is too much work 
-@@ -421,11 +435,19 @@ DST(	movb %cl, (%edi)	)
- 
- 6002:
- 	movl ARGBASE+24(%esp), %ebx	# dst_err_ptr
--	movl $-EFAULT,(%ebx)
-+	movl $-EFAULT,%ss:(%ebx)
- 	jmp 5000b
- 
- .previous
- 
-+	pushl %ss
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %ds
-+	CFI_ADJUST_CFA_OFFSET -4
-+	pushl %ss
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %es
-+	CFI_ADJUST_CFA_OFFSET -4
- 	popl %ebx
- 	CFI_ADJUST_CFA_OFFSET -4
- 	CFI_RESTORE ebx
-@@ -439,26 +461,41 @@ DST(	movb %cl, (%edi)	)
- 	CFI_ADJUST_CFA_OFFSET -4
- 	ret	
- 	CFI_ENDPROC
--ENDPROC(csum_partial_copy_generic)
-+ENDPROC(csum_partial_copy_generic_to_user)
- 
- #else
- 
- /* Version for PentiumII/PPro */
- 
- #define ROUND1(x) \
-+	nop; nop; nop;				\
- 	SRC(movl x(%esi), %ebx	)	;	\
- 	addl %ebx, %eax			;	\
--	DST(movl %ebx, x(%edi)	)	; 
-+	DST(movl %ebx, %es:x(%edi))	;
- 
- #define ROUND(x) \
-+	nop; nop; nop;				\
- 	SRC(movl x(%esi), %ebx	)	;	\
- 	adcl %ebx, %eax			;	\
--	DST(movl %ebx, x(%edi)	)	;
-+	DST(movl %ebx, %es:x(%edi))	;
- 
- #define ARGBASE 12
--		
--ENTRY(csum_partial_copy_generic)
-+
-+ENTRY(csum_partial_copy_generic_to_user)
- 	CFI_STARTPROC
-+	pushl $(__USER_DS)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %es
-+	CFI_ADJUST_CFA_OFFSET -4
-+	jmp csum_partial_copy_generic
-+
-+ENTRY(csum_partial_copy_generic_from_user)
-+	pushl $(__USER_DS)
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %ds
-+	CFI_ADJUST_CFA_OFFSET -4
-+
-+ENTRY(csum_partial_copy_generic)
- 	pushl %ebx
- 	CFI_ADJUST_CFA_OFFSET 4
- 	CFI_REL_OFFSET ebx, 0
-@@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic)
- 	subl %ebx, %edi  
- 	lea  -1(%esi),%edx
- 	andl $-32,%edx
--	lea 3f(%ebx,%ebx), %ebx
-+	lea 3f(%ebx,%ebx,2), %ebx
- 	testl %esi, %esi 
- 	jmp *%ebx
- 1:	addl $64,%esi
-@@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic)
- 	jb 5f
- SRC(	movw (%esi), %dx         )
- 	leal 2(%esi), %esi
--DST(	movw %dx, (%edi)         )
-+DST(	movw %dx, %es:(%edi)     )
- 	leal 2(%edi), %edi
- 	je 6f
- 	shll $16,%edx
- 5:
- SRC(	movb (%esi), %dl         )
--DST(	movb %dl, (%edi)         )
-+DST(	movb %dl, %es:(%edi)     )
- 6:	addl %edx, %eax
- 	adcl $0, %eax
- 7:
- .section .fixup, "ax"
- 6001:	movl	ARGBASE+20(%esp), %ebx	# src_err_ptr	
--	movl $-EFAULT, (%ebx)
-+	movl $-EFAULT, %ss:(%ebx)
- 	# zero the complete destination (computing the rest is too much work)
- 	movl ARGBASE+8(%esp),%edi	# dst
- 	movl ARGBASE+12(%esp),%ecx	# len
-@@ -523,10 +560,18 @@ DST(	movb %dl, (%edi)         )
- 	rep; stosb
- 	jmp 7b
- 6002:	movl ARGBASE+24(%esp), %ebx	# dst_err_ptr
--	movl $-EFAULT, (%ebx)
-+	movl $-EFAULT, %ss:(%ebx)
- 	jmp  7b			
- .previous				
- 
-+	pushl %ss
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %ds
-+	CFI_ADJUST_CFA_OFFSET -4
-+	pushl %ss
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %es
-+	CFI_ADJUST_CFA_OFFSET -4
- 	popl %esi
- 	CFI_ADJUST_CFA_OFFSET -4
- 	CFI_RESTORE esi
-@@ -538,7 +583,7 @@ DST(	movb %dl, (%edi)         )
- 	CFI_RESTORE ebx
- 	ret
- 	CFI_ENDPROC
--ENDPROC(csum_partial_copy_generic)
-+ENDPROC(csum_partial_copy_generic_to_user)
- 				
- #undef ROUND
- #undef ROUND1		
---- a/arch/i386/lib/getuser.S
-+++ b/arch/i386/lib/getuser.S
-@@ -11,7 +11,7 @@
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/thread_info.h>
--
-+#include <asm/segment.h>
- 
- /*
-  * __get_user_X
-@@ -31,7 +31,11 @@ ENTRY(__get_user_1)
- 	GET_THREAD_INFO(%edx)
- 	cmpl TI_addr_limit(%edx),%eax
- 	jae bad_get_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 1:	movzbl (%eax),%edx
-+	pushl %ss
-+	pop %ds
- 	xorl %eax,%eax
- 	ret
- 	CFI_ENDPROC
-@@ -44,7 +48,11 @@ ENTRY(__get_user_2)
- 	GET_THREAD_INFO(%edx)
- 	cmpl TI_addr_limit(%edx),%eax
- 	jae bad_get_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 2:	movzwl -1(%eax),%edx
-+	pushl %ss
-+	pop %ds
- 	xorl %eax,%eax
- 	ret
- 	CFI_ENDPROC
-@@ -57,7 +65,11 @@ ENTRY(__get_user_4)
- 	GET_THREAD_INFO(%edx)
- 	cmpl TI_addr_limit(%edx),%eax
- 	jae bad_get_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 3:	movl -3(%eax),%edx
-+	pushl %ss
-+	pop %ds
- 	xorl %eax,%eax
- 	ret
- 	CFI_ENDPROC
-@@ -65,6 +77,8 @@ ENDPROC(__get_user_4)
- 
- bad_get_user:
- 	CFI_STARTPROC
-+	pushl %ss
-+	pop %ds
- 	xorl %edx,%edx
- 	movl $-14,%eax
- 	ret
---- a/arch/i386/lib/mmx.c
-+++ b/arch/i386/lib/mmx.c
-@@ -30,6 +30,7 @@ void *_mmx_memcpy(void *to, const void *
- {
- 	void *p;
- 	int i;
-+	unsigned long cr0;
- 
- 	if (unlikely(in_interrupt()))
- 		return __memcpy(to, from, len);
-@@ -40,52 +41,80 @@ void *_mmx_memcpy(void *to, const void *
- 	kernel_fpu_begin();
- 
- 	__asm__ __volatile__ (
--		"1: prefetch (%0)\n"		/* This set is 28 bytes */
--		"   prefetch 64(%0)\n"
--		"   prefetch 128(%0)\n"
--		"   prefetch 192(%0)\n"
--		"   prefetch 256(%0)\n"
-+		"1: prefetch (%1)\n"		/* This set is 28 bytes */
-+		"   prefetch 64(%1)\n"
-+		"   prefetch 128(%1)\n"
-+		"   prefetch 192(%1)\n"
-+		"   prefetch 256(%1)\n"
- 		"2:  \n"
- 		".section .fixup, \"ax\"\n"
--		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-+		"3:  \n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %%cr0, %0\n"
-+		"   movl %0, %%eax\n"
-+		"   andl $0xFFFEFFFF, %%eax\n"
-+		"   movl %%eax, %%cr0\n"
-+#endif
-+
-+		"   movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %0, %%cr0\n"
-+#endif
-+
- 		"   jmp 2b\n"
- 		".previous\n"
- 		".section __ex_table,\"a\"\n"
- 		"	.align 4\n"
- 		"	.long 1b, 3b\n"
- 		".previous"
--		: : "r" (from) );
-+		: "=&r" (cr0) : "r" (from) : "ax");
- 		
- 	
- 	for(; i>5; i--)
- 	{
- 		__asm__ __volatile__ (
--		"1:  prefetch 320(%0)\n"
--		"2:  movq (%0), %%mm0\n"
--		"  movq 8(%0), %%mm1\n"
--		"  movq 16(%0), %%mm2\n"
--		"  movq 24(%0), %%mm3\n"
--		"  movq %%mm0, (%1)\n"
--		"  movq %%mm1, 8(%1)\n"
--		"  movq %%mm2, 16(%1)\n"
--		"  movq %%mm3, 24(%1)\n"
--		"  movq 32(%0), %%mm0\n"
--		"  movq 40(%0), %%mm1\n"
--		"  movq 48(%0), %%mm2\n"
--		"  movq 56(%0), %%mm3\n"
--		"  movq %%mm0, 32(%1)\n"
--		"  movq %%mm1, 40(%1)\n"
--		"  movq %%mm2, 48(%1)\n"
--		"  movq %%mm3, 56(%1)\n"
-+		"1:  prefetch 320(%1)\n"
-+		"2:  movq (%1), %%mm0\n"
-+		"  movq 8(%1), %%mm1\n"
-+		"  movq 16(%1), %%mm2\n"
-+		"  movq 24(%1), %%mm3\n"
-+		"  movq %%mm0, (%2)\n"
-+		"  movq %%mm1, 8(%2)\n"
-+		"  movq %%mm2, 16(%2)\n"
-+		"  movq %%mm3, 24(%2)\n"
-+		"  movq 32(%1), %%mm0\n"
-+		"  movq 40(%1), %%mm1\n"
-+		"  movq 48(%1), %%mm2\n"
-+		"  movq 56(%1), %%mm3\n"
-+		"  movq %%mm0, 32(%2)\n"
-+		"  movq %%mm1, 40(%2)\n"
-+		"  movq %%mm2, 48(%2)\n"
-+		"  movq %%mm3, 56(%2)\n"
- 		".section .fixup, \"ax\"\n"
--		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-+		"3:\n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %%cr0, %0\n"
-+		"   movl %0, %%eax\n"
-+		"   andl $0xFFFEFFFF, %%eax\n"
-+		"   movl %%eax, %%cr0\n"
-+#endif
-+
-+		"   movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %0, %%cr0\n"
-+#endif
-+
- 		"   jmp 2b\n"
- 		".previous\n"
- 		".section __ex_table,\"a\"\n"
- 		"	.align 4\n"
- 		"	.long 1b, 3b\n"
- 		".previous"
--		: : "r" (from), "r" (to) : "memory");
-+		: "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
- 		from+=64;
- 		to+=64;
- 	}
-@@ -164,6 +193,7 @@ static void fast_clear_page(void *page)
- static void fast_copy_page(void *to, void *from)
- {
- 	int i;
-+	unsigned long cr0;
- 
- 	kernel_fpu_begin();
- 
-@@ -171,51 +201,79 @@ static void fast_copy_page(void *to, voi
- 	 * but that is for later. -AV
- 	 */
- 	__asm__ __volatile__ (
--		"1: prefetch (%0)\n"
--		"   prefetch 64(%0)\n"
--		"   prefetch 128(%0)\n"
--		"   prefetch 192(%0)\n"
--		"   prefetch 256(%0)\n"
-+		"1: prefetch (%1)\n"
-+		"   prefetch 64(%1)\n"
-+		"   prefetch 128(%1)\n"
-+		"   prefetch 192(%1)\n"
-+		"   prefetch 256(%1)\n"
- 		"2:  \n"
- 		".section .fixup, \"ax\"\n"
--		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-+		"3:  \n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %%cr0, %0\n"
-+		"   movl %0, %%eax\n"
-+		"   andl $0xFFFEFFFF, %%eax\n"
-+		"   movl %%eax, %%cr0\n"
-+#endif
-+
-+		"   movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %0, %%cr0\n"
-+#endif
-+
- 		"   jmp 2b\n"
- 		".previous\n"
- 		".section __ex_table,\"a\"\n"
- 		"	.align 4\n"
- 		"	.long 1b, 3b\n"
- 		".previous"
--		: : "r" (from) );
-+		: "=&r" (cr0) : "r" (from) : "ax");
- 
- 	for(i=0; i<(4096-320)/64; i++)
- 	{
- 		__asm__ __volatile__ (
--		"1: prefetch 320(%0)\n"
--		"2: movq (%0), %%mm0\n"
--		"   movntq %%mm0, (%1)\n"
--		"   movq 8(%0), %%mm1\n"
--		"   movntq %%mm1, 8(%1)\n"
--		"   movq 16(%0), %%mm2\n"
--		"   movntq %%mm2, 16(%1)\n"
--		"   movq 24(%0), %%mm3\n"
--		"   movntq %%mm3, 24(%1)\n"
--		"   movq 32(%0), %%mm4\n"
--		"   movntq %%mm4, 32(%1)\n"
--		"   movq 40(%0), %%mm5\n"
--		"   movntq %%mm5, 40(%1)\n"
--		"   movq 48(%0), %%mm6\n"
--		"   movntq %%mm6, 48(%1)\n"
--		"   movq 56(%0), %%mm7\n"
--		"   movntq %%mm7, 56(%1)\n"
-+		"1: prefetch 320(%1)\n"
-+		"2: movq (%1), %%mm0\n"
-+		"   movntq %%mm0, (%2)\n"
-+		"   movq 8(%1), %%mm1\n"
-+		"   movntq %%mm1, 8(%2)\n"
-+		"   movq 16(%1), %%mm2\n"
-+		"   movntq %%mm2, 16(%2)\n"
-+		"   movq 24(%1), %%mm3\n"
-+		"   movntq %%mm3, 24(%2)\n"
-+		"   movq 32(%1), %%mm4\n"
-+		"   movntq %%mm4, 32(%2)\n"
-+		"   movq 40(%1), %%mm5\n"
-+		"   movntq %%mm5, 40(%2)\n"
-+		"   movq 48(%1), %%mm6\n"
-+		"   movntq %%mm6, 48(%2)\n"
-+		"   movq 56(%1), %%mm7\n"
-+		"   movntq %%mm7, 56(%2)\n"
- 		".section .fixup, \"ax\"\n"
--		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-+		"3:\n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %%cr0, %0\n"
-+		"   movl %0, %%eax\n"
-+		"   andl $0xFFFEFFFF, %%eax\n"
-+		"   movl %%eax, %%cr0\n"
-+#endif
-+
-+		"   movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %0, %%cr0\n"
-+#endif
-+
- 		"   jmp 2b\n"
- 		".previous\n"
- 		".section __ex_table,\"a\"\n"
- 		"	.align 4\n"
- 		"	.long 1b, 3b\n"
- 		".previous"
--		: : "r" (from), "r" (to) : "memory");
-+		: "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
- 		from+=64;
- 		to+=64;
- 	}
-@@ -296,56 +354,84 @@ static void fast_clear_page(void *page)
- static void fast_copy_page(void *to, void *from)
- {
- 	int i;
--	
--	
-+	unsigned long cr0;
-+
- 	kernel_fpu_begin();
- 
- 	__asm__ __volatile__ (
--		"1: prefetch (%0)\n"
--		"   prefetch 64(%0)\n"
--		"   prefetch 128(%0)\n"
--		"   prefetch 192(%0)\n"
--		"   prefetch 256(%0)\n"
-+		"1: prefetch (%1)\n"
-+		"   prefetch 64(%1)\n"
-+		"   prefetch 128(%1)\n"
-+		"   prefetch 192(%1)\n"
-+		"   prefetch 256(%1)\n"
- 		"2:  \n"
- 		".section .fixup, \"ax\"\n"
--		"3: movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-+		"3:  \n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %%cr0, %0\n"
-+		"   movl %0, %%eax\n"
-+		"   andl $0xFFFEFFFF, %%eax\n"
-+		"   movl %%eax, %%cr0\n"
-+#endif
-+
-+		"   movw $0x1AEB, 1b\n"	/* jmp on 26 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %0, %%cr0\n"
-+#endif
-+
- 		"   jmp 2b\n"
- 		".previous\n"
- 		".section __ex_table,\"a\"\n"
- 		"	.align 4\n"
- 		"	.long 1b, 3b\n"
- 		".previous"
--		: : "r" (from) );
-+		: "=&r" (cr0) : "r" (from) : "ax");
- 
- 	for(i=0; i<4096/64; i++)
- 	{
- 		__asm__ __volatile__ (
--		"1: prefetch 320(%0)\n"
--		"2: movq (%0), %%mm0\n"
--		"   movq 8(%0), %%mm1\n"
--		"   movq 16(%0), %%mm2\n"
--		"   movq 24(%0), %%mm3\n"
--		"   movq %%mm0, (%1)\n"
--		"   movq %%mm1, 8(%1)\n"
--		"   movq %%mm2, 16(%1)\n"
--		"   movq %%mm3, 24(%1)\n"
--		"   movq 32(%0), %%mm0\n"
--		"   movq 40(%0), %%mm1\n"
--		"   movq 48(%0), %%mm2\n"
--		"   movq 56(%0), %%mm3\n"
--		"   movq %%mm0, 32(%1)\n"
--		"   movq %%mm1, 40(%1)\n"
--		"   movq %%mm2, 48(%1)\n"
--		"   movq %%mm3, 56(%1)\n"
-+		"1: prefetch 320(%1)\n"
-+		"2: movq (%1), %%mm0\n"
-+		"   movq 8(%1), %%mm1\n"
-+		"   movq 16(%1), %%mm2\n"
-+		"   movq 24(%1), %%mm3\n"
-+		"   movq %%mm0, (%2)\n"
-+		"   movq %%mm1, 8(%2)\n"
-+		"   movq %%mm2, 16(%2)\n"
-+		"   movq %%mm3, 24(%2)\n"
-+		"   movq 32(%1), %%mm0\n"
-+		"   movq 40(%1), %%mm1\n"
-+		"   movq 48(%1), %%mm2\n"
-+		"   movq 56(%1), %%mm3\n"
-+		"   movq %%mm0, 32(%2)\n"
-+		"   movq %%mm1, 40(%2)\n"
-+		"   movq %%mm2, 48(%2)\n"
-+		"   movq %%mm3, 56(%2)\n"
- 		".section .fixup, \"ax\"\n"
--		"3: movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-+		"3:\n"
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %%cr0, %0\n"
-+		"   movl %0, %%eax\n"
-+		"   andl $0xFFFEFFFF, %%eax\n"
-+		"   movl %%eax, %%cr0\n"
-+#endif
-+
-+		"   movw $0x05EB, 1b\n"	/* jmp on 5 bytes */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		"   movl %0, %%cr0\n"
-+#endif
-+
- 		"   jmp 2b\n"
- 		".previous\n"
- 		".section __ex_table,\"a\"\n"
- 		"	.align 4\n"
- 		"	.long 1b, 3b\n"
- 		".previous"
--		: : "r" (from), "r" (to) : "memory");
-+		: "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
- 		from+=64;
- 		to+=64;
- 	}
---- a/arch/i386/lib/putuser.S
-+++ b/arch/i386/lib/putuser.S
-@@ -11,7 +11,7 @@
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
- #include <asm/thread_info.h>
--
-+#include <asm/segment.h>
- 
- /*
-  * __put_user_X
-@@ -41,7 +41,11 @@ ENTRY(__put_user_1)
- 	ENTER
- 	cmpl TI_addr_limit(%ebx),%ecx
- 	jae bad_put_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 1:	movb %al,(%ecx)
-+	pushl %ss
-+	popl %ds
- 	xorl %eax,%eax
- 	EXIT
- ENDPROC(__put_user_1)
-@@ -52,7 +56,11 @@ ENTRY(__put_user_2)
- 	subl $1,%ebx
- 	cmpl %ebx,%ecx
- 	jae bad_put_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 2:	movw %ax,(%ecx)
-+	pushl %ss
-+	popl %ds
- 	xorl %eax,%eax
- 	EXIT
- ENDPROC(__put_user_2)
-@@ -63,7 +71,11 @@ ENTRY(__put_user_4)
- 	subl $3,%ebx
- 	cmpl %ebx,%ecx
- 	jae bad_put_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 3:	movl %eax,(%ecx)
-+	pushl %ss
-+	popl %ds
- 	xorl %eax,%eax
- 	EXIT
- ENDPROC(__put_user_4)
-@@ -74,8 +86,12 @@ ENTRY(__put_user_8)
- 	subl $7,%ebx
- 	cmpl %ebx,%ecx
- 	jae bad_put_user
-+	pushl $(__USER_DS)
-+	popl %ds
- 4:	movl %eax,(%ecx)
- 5:	movl %edx,4(%ecx)
-+	pushl %ss
-+	popl %ds
- 	xorl %eax,%eax
- 	EXIT
- ENDPROC(__put_user_8)
-@@ -85,6 +101,10 @@ bad_put_user:
- 	CFI_DEF_CFA esp, 2*4
- 	CFI_OFFSET eip, -1*4
- 	CFI_OFFSET ebx, -2*4
-+	pushl %ss
-+	CFI_ADJUST_CFA_OFFSET 4
-+	popl %ds
-+	CFI_ADJUST_CFA_OFFSET -4
- 	movl $-14,%eax
- 	EXIT
- END(bad_put_user)
---- a/arch/i386/lib/usercopy.c
-+++ b/arch/i386/lib/usercopy.c
-@@ -29,34 +29,41 @@ static inline int __movsl_is_ok(unsigned
-  * Copy a null terminated string from userspace.
-  */
- 
--#define __do_strncpy_from_user(dst,src,count,res)			   \
--do {									   \
--	int __d0, __d1, __d2;						   \
--	might_sleep();							   \
--	__asm__ __volatile__(						   \
--		"	testl %1,%1\n"					   \
--		"	jz 2f\n"					   \
--		"0:	lodsb\n"					   \
--		"	stosb\n"					   \
--		"	testb %%al,%%al\n"				   \
--		"	jz 1f\n"					   \
--		"	decl %1\n"					   \
--		"	jnz 0b\n"					   \
--		"1:	subl %1,%0\n"					   \
--		"2:\n"							   \
--		".section .fixup,\"ax\"\n"				   \
--		"3:	movl %5,%0\n"					   \
--		"	jmp 2b\n"					   \
--		".previous\n"						   \
--		".section __ex_table,\"a\"\n"				   \
--		"	.align 4\n"					   \
--		"	.long 0b,3b\n"					   \
--		".previous"						   \
--		: "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1),	   \
--		  "=&D" (__d2)						   \
--		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
--		: "memory");						   \
--} while (0)
-+static long __do_strncpy_from_user(char *dst, const char __user *src, long count)
-+{
-+	int __d0, __d1, __d2;
-+	long res = -EFAULT;
-+
-+	might_sleep();
-+	__asm__ __volatile__(
-+		"	movw %w10,%%ds\n"
-+		"	testl %1,%1\n"
-+		"	jz 2f\n"
-+		"0:	lodsb\n"
-+		"	stosb\n"
-+		"	testb %%al,%%al\n"
-+		"	jz 1f\n"
-+		"	decl %1\n"
-+		"	jnz 0b\n"
-+		"1:	subl %1,%0\n"
-+		"2:\n"
-+		"	pushl %%ss\n"
-+		"	popl %%ds\n"
-+		".section .fixup,\"ax\"\n"
-+		"3:	movl %5,%0\n"
-+		"	jmp 2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 0b,3b\n"
-+		".previous"
-+		: "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1),
-+		  "=&D" (__d2)
-+		: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst),
-+		  "r"(__USER_DS)
-+		: "memory");
-+	return res;
-+}
- 
- /**
-  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
-@@ -81,9 +88,7 @@ do {									   \
- long
- __strncpy_from_user(char *dst, const char __user *src, long count)
- {
--	long res;
--	__do_strncpy_from_user(dst, src, count, res);
--	return res;
-+	return __do_strncpy_from_user(dst, src, count);
- }
- EXPORT_SYMBOL(__strncpy_from_user);
- 
-@@ -110,7 +115,7 @@ strncpy_from_user(char *dst, const char 
- {
- 	long res = -EFAULT;
- 	if (access_ok(VERIFY_READ, src, 1))
--		__do_strncpy_from_user(dst, src, count, res);
-+		res = __do_strncpy_from_user(dst, src, count);
- 	return res;
- }
- EXPORT_SYMBOL(strncpy_from_user);
-@@ -119,27 +124,33 @@ EXPORT_SYMBOL(strncpy_from_user);
-  * Zero Userspace
-  */
- 
--#define __do_clear_user(addr,size)					\
--do {									\
--	int __d0;							\
--	might_sleep();							\
--  	__asm__ __volatile__(						\
--		"0:	rep; stosl\n"					\
--		"	movl %2,%0\n"					\
--		"1:	rep; stosb\n"					\
--		"2:\n"							\
--		".section .fixup,\"ax\"\n"				\
--		"3:	lea 0(%2,%0,4),%0\n"				\
--		"	jmp 2b\n"					\
--		".previous\n"						\
--		".section __ex_table,\"a\"\n"				\
--		"	.align 4\n"					\
--		"	.long 0b,3b\n"					\
--		"	.long 1b,2b\n"					\
--		".previous"						\
--		: "=&c"(size), "=&D" (__d0)				\
--		: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0));	\
--} while (0)
-+static unsigned long __do_clear_user(void __user *addr, unsigned long size)
-+{
-+	int __d0;
-+
-+	might_sleep();
-+	__asm__ __volatile__(
-+		"	movw %w6,%%es\n"
-+		"0:	rep; stosl\n"
-+		"	movl %2,%0\n"
-+		"1:	rep; stosb\n"
-+		"2:\n"
-+		"	pushl %%ss\n"
-+		"	popl %%es\n"
-+		".section .fixup,\"ax\"\n"
-+		"3:	lea 0(%2,%0,4),%0\n"
-+		"	jmp 2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 0b,3b\n"
-+		"	.long 1b,2b\n"
-+		".previous"
-+		: "=&c"(size), "=&D" (__d0)
-+		: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0),
-+		  "r"(__USER_DS));
-+	return size;
-+}
- 
- /**
-  * clear_user: - Zero a block of memory in user space.
-@@ -156,7 +167,7 @@ clear_user(void __user *to, unsigned lon
- {
- 	might_sleep();
- 	if (access_ok(VERIFY_WRITE, to, n))
--		__do_clear_user(to, n);
-+		n = __do_clear_user(to, n);
- 	return n;
- }
- EXPORT_SYMBOL(clear_user);
-@@ -175,8 +186,7 @@ EXPORT_SYMBOL(clear_user);
- unsigned long
- __clear_user(void __user *to, unsigned long n)
- {
--	__do_clear_user(to, n);
--	return n;
-+	return __do_clear_user(to, n);
- }
- EXPORT_SYMBOL(__clear_user);
- 
-@@ -199,14 +209,17 @@ long strnlen_user(const char __user *s, 
- 	might_sleep();
- 
- 	__asm__ __volatile__(
-+		"	movw %w8,%%es\n"
- 		"	testl %0, %0\n"
- 		"	jz 3f\n"
--		"	andl %0,%%ecx\n"
-+		"	movl %0,%%ecx\n"
- 		"0:	repne; scasb\n"
- 		"	setne %%al\n"
- 		"	subl %%ecx,%0\n"
- 		"	addl %0,%%eax\n"
- 		"1:\n"
-+		"	pushl %%ss\n"
-+		"	popl %%es\n"
- 		".section .fixup,\"ax\"\n"
- 		"2:	xorl %%eax,%%eax\n"
- 		"	jmp 1b\n"
-@@ -218,7 +231,7 @@ long strnlen_user(const char __user *s, 
- 		"	.long 0b,2b\n"
- 		".previous"
- 		:"=r" (n), "=D" (s), "=a" (res), "=c" (tmp)
--		:"0" (n), "1" (s), "2" (0), "3" (mask)
-+		:"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS)
- 		:"cc");
- 	return res & mask;
- }
-@@ -226,10 +239,121 @@ EXPORT_SYMBOL(strnlen_user);
- 
- #ifdef CONFIG_X86_INTEL_USERCOPY
- static unsigned long
--__copy_user_intel(void __user *to, const void *from, unsigned long size)
-+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
-+{
-+	int d0, d1;
-+	__asm__ __volatile__(
-+		       "       movw %w6, %%es\n"
-+		       "       .align 2,0x90\n"
-+		       "1:     movl 32(%4), %%eax\n"
-+		       "       cmpl $67, %0\n"
-+		       "       jbe 3f\n"
-+		       "2:     movl 64(%4), %%eax\n"
-+		       "       .align 2,0x90\n"
-+		       "3:     movl 0(%4), %%eax\n"
-+		       "4:     movl 4(%4), %%edx\n"
-+		       "5:     movl %%eax, %%es:0(%3)\n"
-+		       "6:     movl %%edx, %%es:4(%3)\n"
-+		       "7:     movl 8(%4), %%eax\n"
-+		       "8:     movl 12(%4),%%edx\n"
-+		       "9:     movl %%eax, %%es:8(%3)\n"
-+		       "10:    movl %%edx, %%es:12(%3)\n"
-+		       "11:    movl 16(%4), %%eax\n"
-+		       "12:    movl 20(%4), %%edx\n"
-+		       "13:    movl %%eax, %%es:16(%3)\n"
-+		       "14:    movl %%edx, %%es:20(%3)\n"
-+		       "15:    movl 24(%4), %%eax\n"
-+		       "16:    movl 28(%4), %%edx\n"
-+		       "17:    movl %%eax, %%es:24(%3)\n"
-+		       "18:    movl %%edx, %%es:28(%3)\n"
-+		       "19:    movl 32(%4), %%eax\n"
-+		       "20:    movl 36(%4), %%edx\n"
-+		       "21:    movl %%eax, %%es:32(%3)\n"
-+		       "22:    movl %%edx, %%es:36(%3)\n"
-+		       "23:    movl 40(%4), %%eax\n"
-+		       "24:    movl 44(%4), %%edx\n"
-+		       "25:    movl %%eax, %%es:40(%3)\n"
-+		       "26:    movl %%edx, %%es:44(%3)\n"
-+		       "27:    movl 48(%4), %%eax\n"
-+		       "28:    movl 52(%4), %%edx\n"
-+		       "29:    movl %%eax, %%es:48(%3)\n"
-+		       "30:    movl %%edx, %%es:52(%3)\n"
-+		       "31:    movl 56(%4), %%eax\n"
-+		       "32:    movl 60(%4), %%edx\n"
-+		       "33:    movl %%eax, %%es:56(%3)\n"
-+		       "34:    movl %%edx, %%es:60(%3)\n"
-+		       "       addl $-64, %0\n"
-+		       "       addl $64, %4\n"
-+		       "       addl $64, %3\n"
-+		       "       cmpl $63, %0\n"
-+		       "       ja  1b\n"
-+		       "35:    movl  %0, %%eax\n"
-+		       "       shrl  $2, %0\n"
-+		       "       andl  $3, %%eax\n"
-+		       "       cld\n"
-+		       "99:    rep; movsl\n"
-+		       "36:    movl %%eax, %0\n"
-+		       "37:    rep; movsb\n"
-+		       "100:\n"
-+		       "       pushl %%ss\n"
-+		       "       popl %%es\n"
-+		       ".section .fixup,\"ax\"\n"
-+		       "101:   lea 0(%%eax,%0,4),%0\n"
-+		       "       jmp 100b\n"
-+		       ".previous\n"
-+		       ".section __ex_table,\"a\"\n"
-+		       "       .align 4\n"
-+		       "       .long 1b,100b\n"
-+		       "       .long 2b,100b\n"
-+		       "       .long 3b,100b\n"
-+		       "       .long 4b,100b\n"
-+		       "       .long 5b,100b\n"
-+		       "       .long 6b,100b\n"
-+		       "       .long 7b,100b\n"
-+		       "       .long 8b,100b\n"
-+		       "       .long 9b,100b\n"
-+		       "       .long 10b,100b\n"
-+		       "       .long 11b,100b\n"
-+		       "       .long 12b,100b\n"
-+		       "       .long 13b,100b\n"
-+		       "       .long 14b,100b\n"
-+		       "       .long 15b,100b\n"
-+		       "       .long 16b,100b\n"
-+		       "       .long 17b,100b\n"
-+		       "       .long 18b,100b\n"
-+		       "       .long 19b,100b\n"
-+		       "       .long 20b,100b\n"
-+		       "       .long 21b,100b\n"
-+		       "       .long 22b,100b\n"
-+		       "       .long 23b,100b\n"
-+		       "       .long 24b,100b\n"
-+		       "       .long 25b,100b\n"
-+		       "       .long 26b,100b\n"
-+		       "       .long 27b,100b\n"
-+		       "       .long 28b,100b\n"
-+		       "       .long 29b,100b\n"
-+		       "       .long 30b,100b\n"
-+		       "       .long 31b,100b\n"
-+		       "       .long 32b,100b\n"
-+		       "       .long 33b,100b\n"
-+		       "       .long 34b,100b\n"
-+		       "       .long 35b,100b\n"
-+		       "       .long 36b,100b\n"
-+		       "       .long 37b,100b\n"
-+		       "       .long 99b,101b\n"
-+		       ".previous"
-+		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
-+		       :  "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
-+		       : "eax", "edx", "memory");
-+	return size;
-+}
-+
-+static unsigned long
-+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
- {
- 	int d0, d1;
- 	__asm__ __volatile__(
-+		       "       movw %w6, %%ds\n"
- 		       "       .align 2,0x90\n"
- 		       "1:     movl 32(%4), %%eax\n"
- 		       "       cmpl $67, %0\n"
-@@ -238,36 +362,36 @@ __copy_user_intel(void __user *to, const
- 		       "       .align 2,0x90\n"
- 		       "3:     movl 0(%4), %%eax\n"
- 		       "4:     movl 4(%4), %%edx\n"
--		       "5:     movl %%eax, 0(%3)\n"
--		       "6:     movl %%edx, 4(%3)\n"
-+		       "5:     movl %%eax, %%es:0(%3)\n"
-+		       "6:     movl %%edx, %%es:4(%3)\n"
- 		       "7:     movl 8(%4), %%eax\n"
- 		       "8:     movl 12(%4),%%edx\n"
--		       "9:     movl %%eax, 8(%3)\n"
--		       "10:    movl %%edx, 12(%3)\n"
-+		       "9:     movl %%eax, %%es:8(%3)\n"
-+		       "10:    movl %%edx, %%es:12(%3)\n"
- 		       "11:    movl 16(%4), %%eax\n"
- 		       "12:    movl 20(%4), %%edx\n"
--		       "13:    movl %%eax, 16(%3)\n"
--		       "14:    movl %%edx, 20(%3)\n"
-+		       "13:    movl %%eax, %%es:16(%3)\n"
-+		       "14:    movl %%edx, %%es:20(%3)\n"
- 		       "15:    movl 24(%4), %%eax\n"
- 		       "16:    movl 28(%4), %%edx\n"
--		       "17:    movl %%eax, 24(%3)\n"
--		       "18:    movl %%edx, 28(%3)\n"
-+		       "17:    movl %%eax, %%es:24(%3)\n"
-+		       "18:    movl %%edx, %%es:28(%3)\n"
- 		       "19:    movl 32(%4), %%eax\n"
- 		       "20:    movl 36(%4), %%edx\n"
--		       "21:    movl %%eax, 32(%3)\n"
--		       "22:    movl %%edx, 36(%3)\n"
-+		       "21:    movl %%eax, %%es:32(%3)\n"
-+		       "22:    movl %%edx, %%es:36(%3)\n"
- 		       "23:    movl 40(%4), %%eax\n"
- 		       "24:    movl 44(%4), %%edx\n"
--		       "25:    movl %%eax, 40(%3)\n"
--		       "26:    movl %%edx, 44(%3)\n"
-+		       "25:    movl %%eax, %%es:40(%3)\n"
-+		       "26:    movl %%edx, %%es:44(%3)\n"
- 		       "27:    movl 48(%4), %%eax\n"
- 		       "28:    movl 52(%4), %%edx\n"
--		       "29:    movl %%eax, 48(%3)\n"
--		       "30:    movl %%edx, 52(%3)\n"
-+		       "29:    movl %%eax, %%es:48(%3)\n"
-+		       "30:    movl %%edx, %%es:52(%3)\n"
- 		       "31:    movl 56(%4), %%eax\n"
- 		       "32:    movl 60(%4), %%edx\n"
--		       "33:    movl %%eax, 56(%3)\n"
--		       "34:    movl %%edx, 60(%3)\n"
-+		       "33:    movl %%eax, %%es:56(%3)\n"
-+		       "34:    movl %%edx, %%es:60(%3)\n"
- 		       "       addl $-64, %0\n"
- 		       "       addl $64, %4\n"
- 		       "       addl $64, %3\n"
-@@ -281,6 +405,8 @@ __copy_user_intel(void __user *to, const
- 		       "36:    movl %%eax, %0\n"
- 		       "37:    rep; movsb\n"
- 		       "100:\n"
-+		       "       pushl %%ss\n"
-+		       "       popl %%ds\n"
- 		       ".section .fixup,\"ax\"\n"
- 		       "101:   lea 0(%%eax,%0,4),%0\n"
- 		       "       jmp 100b\n"
-@@ -327,7 +453,7 @@ __copy_user_intel(void __user *to, const
- 		       "       .long 99b,101b\n"
- 		       ".previous"
- 		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
--		       :  "1"(to), "2"(from), "0"(size)
-+		       :  "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- 		       : "eax", "edx", "memory");
- 	return size;
- }
-@@ -337,6 +463,7 @@ __copy_user_zeroing_intel(void *to, cons
- {
- 	int d0, d1;
- 	__asm__ __volatile__(
-+		       "        movw %w6, %%ds\n"
- 		       "        .align 2,0x90\n"
- 		       "0:      movl 32(%4), %%eax\n"
- 		       "        cmpl $67, %0\n"      
-@@ -345,36 +472,36 @@ __copy_user_zeroing_intel(void *to, cons
- 		       "        .align 2,0x90\n"     
- 		       "2:      movl 0(%4), %%eax\n" 
- 		       "21:     movl 4(%4), %%edx\n" 
--		       "        movl %%eax, 0(%3)\n" 
--		       "        movl %%edx, 4(%3)\n" 
-+		       "        movl %%eax, %%es:0(%3)\n" 
-+		       "        movl %%edx, %%es:4(%3)\n" 
- 		       "3:      movl 8(%4), %%eax\n" 
- 		       "31:     movl 12(%4),%%edx\n" 
--		       "        movl %%eax, 8(%3)\n" 
--		       "        movl %%edx, 12(%3)\n"
-+		       "        movl %%eax, %%es:8(%3)\n" 
-+		       "        movl %%edx, %%es:12(%3)\n"
- 		       "4:      movl 16(%4), %%eax\n"
- 		       "41:     movl 20(%4), %%edx\n"
--		       "        movl %%eax, 16(%3)\n"
--		       "        movl %%edx, 20(%3)\n"
-+		       "        movl %%eax, %%es:16(%3)\n"
-+		       "        movl %%edx, %%es:20(%3)\n"
- 		       "10:     movl 24(%4), %%eax\n"
- 		       "51:     movl 28(%4), %%edx\n"
--		       "        movl %%eax, 24(%3)\n"
--		       "        movl %%edx, 28(%3)\n"
-+		       "        movl %%eax, %%es:24(%3)\n"
-+		       "        movl %%edx, %%es:28(%3)\n"
- 		       "11:     movl 32(%4), %%eax\n"
- 		       "61:     movl 36(%4), %%edx\n"
--		       "        movl %%eax, 32(%3)\n"
--		       "        movl %%edx, 36(%3)\n"
-+		       "        movl %%eax, %%es:32(%3)\n"
-+		       "        movl %%edx, %%es:36(%3)\n"
- 		       "12:     movl 40(%4), %%eax\n"
- 		       "71:     movl 44(%4), %%edx\n"
--		       "        movl %%eax, 40(%3)\n"
--		       "        movl %%edx, 44(%3)\n"
-+		       "        movl %%eax, %%es:40(%3)\n"
-+		       "        movl %%edx, %%es:44(%3)\n"
- 		       "13:     movl 48(%4), %%eax\n"
- 		       "81:     movl 52(%4), %%edx\n"
--		       "        movl %%eax, 48(%3)\n"
--		       "        movl %%edx, 52(%3)\n"
-+		       "        movl %%eax, %%es:48(%3)\n"
-+		       "        movl %%edx, %%es:52(%3)\n"
- 		       "14:     movl 56(%4), %%eax\n"
- 		       "91:     movl 60(%4), %%edx\n"
--		       "        movl %%eax, 56(%3)\n"
--		       "        movl %%edx, 60(%3)\n"
-+		       "        movl %%eax, %%es:56(%3)\n"
-+		       "        movl %%edx, %%es:60(%3)\n"
- 		       "        addl $-64, %0\n"     
- 		       "        addl $64, %4\n"      
- 		       "        addl $64, %3\n"      
-@@ -388,6 +515,8 @@ __copy_user_zeroing_intel(void *to, cons
- 		       "        movl %%eax,%0\n"
- 		       "7:      rep; movsb\n"	
- 		       "8:\n"			
-+		       "        pushl %%ss\n"
-+		       "        popl %%ds\n"
- 		       ".section .fixup,\"ax\"\n"
- 		       "9:      lea 0(%%eax,%0,4),%0\n"	
- 		       "16:     pushl %0\n"	
-@@ -422,7 +551,7 @@ __copy_user_zeroing_intel(void *to, cons
- 		       "        .long 7b,16b\n" 
- 		       ".previous"		
- 		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
--		       :  "1"(to), "2"(from), "0"(size)
-+		       :  "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- 		       : "eax", "edx", "memory");
- 	return size;
- }
-@@ -438,6 +567,7 @@ static unsigned long __copy_user_zeroing
-         int d0, d1;
- 
- 	__asm__ __volatile__(
-+	       "        movw %w6, %%ds\n"
- 	       "        .align 2,0x90\n"
- 	       "0:      movl 32(%4), %%eax\n"
- 	       "        cmpl $67, %0\n"
-@@ -446,36 +576,36 @@ static unsigned long __copy_user_zeroing
- 	       "        .align 2,0x90\n"
- 	       "2:      movl 0(%4), %%eax\n"
- 	       "21:     movl 4(%4), %%edx\n"
--	       "        movnti %%eax, 0(%3)\n"
--	       "        movnti %%edx, 4(%3)\n"
-+	       "        movnti %%eax, %%es:0(%3)\n"
-+	       "        movnti %%edx, %%es:4(%3)\n"
- 	       "3:      movl 8(%4), %%eax\n"
- 	       "31:     movl 12(%4),%%edx\n"
--	       "        movnti %%eax, 8(%3)\n"
--	       "        movnti %%edx, 12(%3)\n"
-+	       "        movnti %%eax, %%es:8(%3)\n"
-+	       "        movnti %%edx, %%es:12(%3)\n"
- 	       "4:      movl 16(%4), %%eax\n"
- 	       "41:     movl 20(%4), %%edx\n"
--	       "        movnti %%eax, 16(%3)\n"
--	       "        movnti %%edx, 20(%3)\n"
-+	       "        movnti %%eax, %%es:16(%3)\n"
-+	       "        movnti %%edx, %%es:20(%3)\n"
- 	       "10:     movl 24(%4), %%eax\n"
- 	       "51:     movl 28(%4), %%edx\n"
--	       "        movnti %%eax, 24(%3)\n"
--	       "        movnti %%edx, 28(%3)\n"
-+	       "        movnti %%eax, %%es:24(%3)\n"
-+	       "        movnti %%edx, %%es:28(%3)\n"
- 	       "11:     movl 32(%4), %%eax\n"
- 	       "61:     movl 36(%4), %%edx\n"
--	       "        movnti %%eax, 32(%3)\n"
--	       "        movnti %%edx, 36(%3)\n"
-+	       "        movnti %%eax, %%es:32(%3)\n"
-+	       "        movnti %%edx, %%es:36(%3)\n"
- 	       "12:     movl 40(%4), %%eax\n"
- 	       "71:     movl 44(%4), %%edx\n"
--	       "        movnti %%eax, 40(%3)\n"
--	       "        movnti %%edx, 44(%3)\n"
-+	       "        movnti %%eax, %%es:40(%3)\n"
-+	       "        movnti %%edx, %%es:44(%3)\n"
- 	       "13:     movl 48(%4), %%eax\n"
- 	       "81:     movl 52(%4), %%edx\n"
--	       "        movnti %%eax, 48(%3)\n"
--	       "        movnti %%edx, 52(%3)\n"
-+	       "        movnti %%eax, %%es:48(%3)\n"
-+	       "        movnti %%edx, %%es:52(%3)\n"
- 	       "14:     movl 56(%4), %%eax\n"
- 	       "91:     movl 60(%4), %%edx\n"
--	       "        movnti %%eax, 56(%3)\n"
--	       "        movnti %%edx, 60(%3)\n"
-+	       "        movnti %%eax, %%es:56(%3)\n"
-+	       "        movnti %%edx, %%es:60(%3)\n"
- 	       "        addl $-64, %0\n"
- 	       "        addl $64, %4\n"
- 	       "        addl $64, %3\n"
-@@ -490,6 +620,8 @@ static unsigned long __copy_user_zeroing
- 	       "        movl %%eax,%0\n"
- 	       "7:      rep; movsb\n"
- 	       "8:\n"
-+	       "        pushl %%ss\n"
-+	       "        popl %%ds\n"
- 	       ".section .fixup,\"ax\"\n"
- 	       "9:      lea 0(%%eax,%0,4),%0\n"
- 	       "16:     pushl %0\n"
-@@ -524,7 +656,7 @@ static unsigned long __copy_user_zeroing
- 	       "        .long 7b,16b\n"
- 	       ".previous"
- 	       : "=&c"(size), "=&D" (d0), "=&S" (d1)
--	       :  "1"(to), "2"(from), "0"(size)
-+	       :  "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- 	       : "eax", "edx", "memory");
- 	return size;
- }
-@@ -535,6 +667,7 @@ static unsigned long __copy_user_intel_n
-         int d0, d1;
- 
- 	__asm__ __volatile__(
-+	       "        movw %w6, %%ds\n"
- 	       "        .align 2,0x90\n"
- 	       "0:      movl 32(%4), %%eax\n"
- 	       "        cmpl $67, %0\n"
-@@ -543,36 +676,36 @@ static unsigned long __copy_user_intel_n
- 	       "        .align 2,0x90\n"
- 	       "2:      movl 0(%4), %%eax\n"
- 	       "21:     movl 4(%4), %%edx\n"
--	       "        movnti %%eax, 0(%3)\n"
--	       "        movnti %%edx, 4(%3)\n"
-+	       "        movnti %%eax, %%es:0(%3)\n"
-+	       "        movnti %%edx, %%es:4(%3)\n"
- 	       "3:      movl 8(%4), %%eax\n"
- 	       "31:     movl 12(%4),%%edx\n"
--	       "        movnti %%eax, 8(%3)\n"
--	       "        movnti %%edx, 12(%3)\n"
-+	       "        movnti %%eax, %%es:8(%3)\n"
-+	       "        movnti %%edx, %%es:12(%3)\n"
- 	       "4:      movl 16(%4), %%eax\n"
- 	       "41:     movl 20(%4), %%edx\n"
--	       "        movnti %%eax, 16(%3)\n"
--	       "        movnti %%edx, 20(%3)\n"
-+	       "        movnti %%eax, %%es:16(%3)\n"
-+	       "        movnti %%edx, %%es:20(%3)\n"
- 	       "10:     movl 24(%4), %%eax\n"
- 	       "51:     movl 28(%4), %%edx\n"
--	       "        movnti %%eax, 24(%3)\n"
--	       "        movnti %%edx, 28(%3)\n"
-+	       "        movnti %%eax, %%es:24(%3)\n"
-+	       "        movnti %%edx, %%es:28(%3)\n"
- 	       "11:     movl 32(%4), %%eax\n"
- 	       "61:     movl 36(%4), %%edx\n"
--	       "        movnti %%eax, 32(%3)\n"
--	       "        movnti %%edx, 36(%3)\n"
-+	       "        movnti %%eax, %%es:32(%3)\n"
-+	       "        movnti %%edx, %%es:36(%3)\n"
- 	       "12:     movl 40(%4), %%eax\n"
- 	       "71:     movl 44(%4), %%edx\n"
--	       "        movnti %%eax, 40(%3)\n"
--	       "        movnti %%edx, 44(%3)\n"
-+	       "        movnti %%eax, %%es:40(%3)\n"
-+	       "        movnti %%edx, %%es:44(%3)\n"
- 	       "13:     movl 48(%4), %%eax\n"
- 	       "81:     movl 52(%4), %%edx\n"
--	       "        movnti %%eax, 48(%3)\n"
--	       "        movnti %%edx, 52(%3)\n"
-+	       "        movnti %%eax, %%es:48(%3)\n"
-+	       "        movnti %%edx, %%es:52(%3)\n"
- 	       "14:     movl 56(%4), %%eax\n"
- 	       "91:     movl 60(%4), %%edx\n"
--	       "        movnti %%eax, 56(%3)\n"
--	       "        movnti %%edx, 60(%3)\n"
-+	       "        movnti %%eax, %%es:56(%3)\n"
-+	       "        movnti %%edx, %%es:60(%3)\n"
- 	       "        addl $-64, %0\n"
- 	       "        addl $64, %4\n"
- 	       "        addl $64, %3\n"
-@@ -587,6 +720,8 @@ static unsigned long __copy_user_intel_n
- 	       "        movl %%eax,%0\n"
- 	       "7:      rep; movsb\n"
- 	       "8:\n"
-+	       "        pushl %%ss\n"
-+	       "        popl %%ds\n"
- 	       ".section .fixup,\"ax\"\n"
- 	       "9:      lea 0(%%eax,%0,4),%0\n"
- 	       "16:     jmp 8b\n"
-@@ -615,7 +750,7 @@ static unsigned long __copy_user_intel_n
- 	       "        .long 7b,16b\n"
- 	       ".previous"
- 	       : "=&c"(size), "=&D" (d0), "=&S" (d1)
--	       :  "1"(to), "2"(from), "0"(size)
-+	       :  "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
- 	       : "eax", "edx", "memory");
- 	return size;
- }
-@@ -628,90 +763,146 @@ static unsigned long __copy_user_intel_n
-  */
- unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
- 					unsigned long size);
--unsigned long __copy_user_intel(void __user *to, const void *from,
-+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
-+					unsigned long size);
-+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
- 					unsigned long size);
- unsigned long __copy_user_zeroing_intel_nocache(void *to,
- 				const void __user *from, unsigned long size);
- #endif /* CONFIG_X86_INTEL_USERCOPY */
- 
- /* Generic arbitrary sized copy.  */
--#define __copy_user(to,from,size)					\
--do {									\
--	int __d0, __d1, __d2;						\
--	__asm__ __volatile__(						\
--		"	cmp  $7,%0\n"					\
--		"	jbe  1f\n"					\
--		"	movl %1,%0\n"					\
--		"	negl %0\n"					\
--		"	andl $7,%0\n"					\
--		"	subl %0,%3\n"					\
--		"4:	rep; movsb\n"					\
--		"	movl %3,%0\n"					\
--		"	shrl $2,%0\n"					\
--		"	andl $3,%3\n"					\
--		"	.align 2,0x90\n"				\
--		"0:	rep; movsl\n"					\
--		"	movl %3,%0\n"					\
--		"1:	rep; movsb\n"					\
--		"2:\n"							\
--		".section .fixup,\"ax\"\n"				\
--		"5:	addl %3,%0\n"					\
--		"	jmp 2b\n"					\
--		"3:	lea 0(%3,%0,4),%0\n"				\
--		"	jmp 2b\n"					\
--		".previous\n"						\
--		".section __ex_table,\"a\"\n"				\
--		"	.align 4\n"					\
--		"	.long 4b,5b\n"					\
--		"	.long 0b,3b\n"					\
--		"	.long 1b,2b\n"					\
--		".previous"						\
--		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)	\
--		: "3"(size), "0"(size), "1"(to), "2"(from)		\
--		: "memory");						\
--} while (0)
--
--#define __copy_user_zeroing(to,from,size)				\
--do {									\
--	int __d0, __d1, __d2;						\
--	__asm__ __volatile__(						\
--		"	cmp  $7,%0\n"					\
--		"	jbe  1f\n"					\
--		"	movl %1,%0\n"					\
--		"	negl %0\n"					\
--		"	andl $7,%0\n"					\
--		"	subl %0,%3\n"					\
--		"4:	rep; movsb\n"					\
--		"	movl %3,%0\n"					\
--		"	shrl $2,%0\n"					\
--		"	andl $3,%3\n"					\
--		"	.align 2,0x90\n"				\
--		"0:	rep; movsl\n"					\
--		"	movl %3,%0\n"					\
--		"1:	rep; movsb\n"					\
--		"2:\n"							\
--		".section .fixup,\"ax\"\n"				\
--		"5:	addl %3,%0\n"					\
--		"	jmp 6f\n"					\
--		"3:	lea 0(%3,%0,4),%0\n"				\
--		"6:	pushl %0\n"					\
--		"	pushl %%eax\n"					\
--		"	xorl %%eax,%%eax\n"				\
--		"	rep; stosb\n"					\
--		"	popl %%eax\n"					\
--		"	popl %0\n"					\
--		"	jmp 2b\n"					\
--		".previous\n"						\
--		".section __ex_table,\"a\"\n"				\
--		"	.align 4\n"					\
--		"	.long 4b,5b\n"					\
--		"	.long 0b,3b\n"					\
--		"	.long 1b,6b\n"					\
--		".previous"						\
--		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)	\
--		: "3"(size), "0"(size), "1"(to), "2"(from)		\
--		: "memory");						\
--} while (0)
-+static unsigned long
-+__generic_copy_to_user(void __user *to, const void *from, unsigned long size)
-+{
-+	int __d0, __d1, __d2;
-+
-+	__asm__ __volatile__(
-+		"	movw %w8,%%es\n"
-+		"	cmp  $7,%0\n"
-+		"	jbe  1f\n"
-+		"	movl %1,%0\n"
-+		"	negl %0\n"
-+		"	andl $7,%0\n"
-+		"	subl %0,%3\n"
-+		"4:	rep; movsb\n"
-+		"	movl %3,%0\n"
-+		"	shrl $2,%0\n"
-+		"	andl $3,%3\n"
-+		"	.align 2,0x90\n"
-+		"0:	rep; movsl\n"
-+		"	movl %3,%0\n"
-+		"1:	rep; movsb\n"
-+		"2:\n"
-+		"	pushl %%ss\n"
-+		"	popl %%es\n"
-+		".section .fixup,\"ax\"\n"
-+		"5:	addl %3,%0\n"
-+		"	jmp 2b\n"
-+		"3:	lea 0(%3,%0,4),%0\n"
-+		"	jmp 2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 4b,5b\n"
-+		"	.long 0b,3b\n"
-+		"	.long 1b,2b\n"
-+		".previous"
-+		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-+		: "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
-+		: "memory");
-+	return size;
-+}
-+
-+static unsigned long
-+__generic_copy_from_user(void *to, const void __user *from, unsigned long size)
-+{
-+	int __d0, __d1, __d2;
-+
-+	__asm__ __volatile__(
-+		"	movw %w8,%%ds\n"
-+		"	cmp  $7,%0\n"
-+		"	jbe  1f\n"
-+		"	movl %1,%0\n"
-+		"	negl %0\n"
-+		"	andl $7,%0\n"
-+		"	subl %0,%3\n"
-+		"4:	rep; movsb\n"
-+		"	movl %3,%0\n"
-+		"	shrl $2,%0\n"
-+		"	andl $3,%3\n"
-+		"	.align 2,0x90\n"
-+		"0:	rep; movsl\n"
-+		"	movl %3,%0\n"
-+		"1:	rep; movsb\n"
-+		"2:\n"
-+		"	pushl %%ss\n"
-+		"	popl %%ds\n"
-+		".section .fixup,\"ax\"\n"
-+		"5:	addl %3,%0\n"
-+		"	jmp 2b\n"
-+		"3:	lea 0(%3,%0,4),%0\n"
-+		"	jmp 2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 4b,5b\n"
-+		"	.long 0b,3b\n"
-+		"	.long 1b,2b\n"
-+		".previous"
-+		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-+		: "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
-+		: "memory");
-+	return size;
-+}
-+
-+static unsigned long
-+__copy_user_zeroing(void *to, const void __user *from, unsigned long size)
-+{
-+	int __d0, __d1, __d2;
-+
-+	__asm__ __volatile__(
-+		"	movw %w8,%%ds\n"
-+		"	cmp  $7,%0\n"
-+		"	jbe  1f\n"
-+		"	movl %1,%0\n"
-+		"	negl %0\n"
-+		"	andl $7,%0\n"
-+		"	subl %0,%3\n"
-+		"4:	rep; movsb\n"
-+		"	movl %3,%0\n"
-+		"	shrl $2,%0\n"
-+		"	andl $3,%3\n"
-+		"	.align 2,0x90\n"
-+		"0:	rep; movsl\n"
-+		"	movl %3,%0\n"
-+		"1:	rep; movsb\n"
-+		"2:\n"
-+		"	pushl %%ss\n"
-+		"	popl %%ds\n"
-+		".section .fixup,\"ax\"\n"
-+		"5:	addl %3,%0\n"
-+		"	jmp 6f\n"
-+		"3:	lea 0(%3,%0,4),%0\n"
-+		"6:	pushl %0\n"
-+		"	pushl %%eax\n"
-+		"	xorl %%eax,%%eax\n"
-+		"	rep; stosb\n"
-+		"	popl %%eax\n"
-+		"	popl %0\n"
-+		"	jmp 2b\n"
-+		".previous\n"
-+		".section __ex_table,\"a\"\n"
-+		"	.align 4\n"
-+		"	.long 4b,5b\n"
-+		"	.long 0b,3b\n"
-+		"	.long 1b,6b\n"
-+		".previous"
-+		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
-+		: "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
-+		: "memory");
-+	return size;
-+}
- 
- unsigned long __copy_to_user_ll(void __user *to, const void *from,
- 				unsigned long n)
-@@ -774,9 +965,9 @@ survive:
- 	}
- #endif
- 	if (movsl_is_ok(to, from, n))
--		__copy_user(to, from, n);
-+		n = __generic_copy_to_user(to, from, n);
- 	else
--		n = __copy_user_intel(to, from, n);
-+		n = __generic_copy_to_user_intel(to, from, n);
- 	return n;
- }
- EXPORT_SYMBOL(__copy_to_user_ll);
-@@ -785,7 +976,7 @@ unsigned long __copy_from_user_ll(void *
- 					unsigned long n)
- {
- 	if (movsl_is_ok(to, from, n))
--		__copy_user_zeroing(to, from, n);
-+		n = __copy_user_zeroing(to, from, n);
- 	else
- 		n = __copy_user_zeroing_intel(to, from, n);
- 	return n;
-@@ -796,9 +987,9 @@ unsigned long __copy_from_user_ll_nozero
- 					 unsigned long n)
- {
- 	if (movsl_is_ok(to, from, n))
--		__copy_user(to, from, n);
-+		n = __generic_copy_from_user(to, from, n);
- 	else
--		n = __copy_user_intel((void __user *)to,
-+		n = __generic_copy_from_user_intel((void __user *)to,
- 				      (const void *)from, n);
- 	return n;
- }
-@@ -809,11 +1000,11 @@ unsigned long __copy_from_user_ll_nocach
- {
- #ifdef CONFIG_X86_INTEL_USERCOPY
- 	if ( n > 64 && cpu_has_xmm2)
--                n = __copy_user_zeroing_intel_nocache(to, from, n);
-+		n = __copy_user_zeroing_intel_nocache(to, from, n);
- 	else
--		__copy_user_zeroing(to, from, n);
-+		n = __copy_user_zeroing(to, from, n);
- #else
--        __copy_user_zeroing(to, from, n);
-+	n = __copy_user_zeroing(to, from, n);
- #endif
- 	return n;
- }
-@@ -823,11 +1014,11 @@ unsigned long __copy_from_user_ll_nocach
- {
- #ifdef CONFIG_X86_INTEL_USERCOPY
- 	if ( n > 64 && cpu_has_xmm2)
--                n = __copy_user_intel_nocache(to, from, n);
-+		n = __copy_user_intel_nocache(to, from, n);
- 	else
--		__copy_user(to, from, n);
-+		n = __generic_copy_from_user(to, from, n);
- #else
--        __copy_user(to, from, n);
-+	n = __generic_copy_from_user(to, from, n);
- #endif
- 	return n;
- }
-@@ -880,3 +1071,30 @@ copy_from_user(void *to, const void __us
- 	return n;
- }
- EXPORT_SYMBOL(copy_from_user);
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+void __set_fs(mm_segment_t x, int cpu)
-+{
-+	unsigned long limit = x.seg;
-+	__u32 a, b;
-+
-+	current_thread_info()->addr_limit = x;
-+	if (likely(limit))
-+		limit = (limit - 1UL) >> PAGE_SHIFT;
-+	pack_descriptor(&a, &b, 0UL, limit, 0xF3, 0xC);
-+	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, a, b);
-+}
-+
-+void set_fs(mm_segment_t x)
-+{
-+	__set_fs(x, get_cpu());
-+	put_cpu_no_resched();
-+}
-+#else
-+void set_fs(mm_segment_t x)
-+{
-+	current_thread_info()->addr_limit = x;
-+}
-+#endif
-+
-+EXPORT_SYMBOL(set_fs);
---- a/arch/i386/mach-default/setup.c
-+++ b/arch/i386/mach-default/setup.c
-@@ -35,7 +35,7 @@ void __init pre_intr_init_hook(void)
- /*
-  * IRQ2 is cascade interrupt to second interrupt controller
-  */
--static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
-+static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL, 0, NULL};
- 
- /**
-  * intr_init_hook - post gate setup interrupt initialisation
---- a/arch/i386/mach-voyager/voyager_basic.c
-+++ b/arch/i386/mach-voyager/voyager_basic.c
-@@ -130,7 +130,7 @@ voyager_memory_detect(int region, __u32 
- 	__u8 cmos[4];
- 	ClickMap_t *map;
- 	unsigned long map_addr;
--	unsigned long old;
-+	pte_t old;
- 
- 	if(region >= CLICK_ENTRIES) {
- 		printk("Voyager: Illegal ClickMap region %d\n", region);
-@@ -144,7 +144,7 @@ voyager_memory_detect(int region, __u32 
- 
- 	/* steal page 0 for this */
- 	old = pg0[0];
--	pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
-+	pg0[0] = __pte((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
- 	local_flush_tlb();
- 	/* now clear everything out but page 0 */
- 	map = (ClickMap_t *)(map_addr & (~PAGE_MASK));
---- a/arch/i386/mach-voyager/voyager_smp.c
-+++ b/arch/i386/mach-voyager/voyager_smp.c
-@@ -554,6 +554,10 @@ do_boot_cpu(__u8 cpu)
- 	__u32 *hijack_vector;
- 	__u32 start_phys_address = setup_trampoline();
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	/* There's a clever trick to this: The linux trampoline is
- 	 * compiled to begin at absolute location zero, so make the
- 	 * address zero but have the data segment selector compensate
-@@ -573,7 +577,17 @@ do_boot_cpu(__u8 cpu)
- 
- 	init_gdt(cpu);
-  	per_cpu(current_task, cpu) = idle;
--	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	early_gdt_descr.address = get_cpu_gdt_table(cpu);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	irq_ctx_init(cpu);
- 
- 	/* Note: Don't modify initial ss override */
-@@ -1276,7 +1290,7 @@ smp_local_timer_interrupt(void)
- 						per_cpu(prof_counter, cpu);
- 		}
- 
--		update_process_times(user_mode_vm(get_irq_regs()));
-+		update_process_times(user_mode(get_irq_regs()));
- 	}
- 
- 	if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
---- a/arch/i386/mm/boot_ioremap.c
-+++ b/arch/i386/mm/boot_ioremap.c
-@@ -7,57 +7,37 @@
-  * Written by Dave Hansen <haveblue@us.ibm.com>
-  */
- 
--
--/*
-- * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
-- * keeps that from happenning.  If anyone has a better way, I'm listening.
-- *
-- * boot_pte_t is defined only if this all works correctly
-- */
--
--#undef CONFIG_X86_PAE
- #undef CONFIG_PARAVIRT
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/tlbflush.h>
- #include <linux/init.h>
- #include <linux/stddef.h>
--
--/* 
-- * I'm cheating here.  It is known that the two boot PTE pages are 
-- * allocated next to each other.  I'm pretending that they're just
-- * one big array. 
-- */
--
--#define BOOT_PTE_PTRS (PTRS_PER_PTE*2)
--
--static unsigned long boot_pte_index(unsigned long vaddr) 
--{
--	return __pa(vaddr) >> PAGE_SHIFT;
--}
--
--static inline boot_pte_t* boot_vaddr_to_pte(void *address)
--{
--	boot_pte_t* boot_pg = (boot_pte_t*)pg0;
--	return &boot_pg[boot_pte_index((unsigned long)address)];
--}
-+#include <linux/sched.h>
- 
- /*
-  * This is only for a caller who is clever enough to page-align
-  * phys_addr and virtual_source, and who also has a preference
-  * about which virtual address from which to steal ptes
-  */
--static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, 
--		    void* virtual_source)
-+static void __init __boot_ioremap(unsigned long phys_addr, unsigned long nrpages, 
-+		    char* virtual_source)
- {
--	boot_pte_t* pte;
--	int i;
--	char *vaddr = virtual_source;
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+	pte_t* pte;
-+	unsigned int i;
-+	unsigned long vaddr = (unsigned long)virtual_source;
-+
-+	pgd = pgd_offset_k(vaddr);
-+	pud = pud_offset(pgd, vaddr);
-+	pmd = pmd_offset(pud, vaddr);
-+	pte = pte_offset_kernel(pmd, vaddr);
- 
--	pte = boot_vaddr_to_pte(virtual_source);
- 	for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
- 		set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
--		__flush_tlb_one(&vaddr[i*PAGE_SIZE]);
-+		__flush_tlb_one(&virtual_source[i*PAGE_SIZE]);
- 	}
- }
- 
---- a/arch/i386/mm/extable.c
-+++ b/arch/i386/mm/extable.c
-@@ -4,14 +4,63 @@
- 
- #include <linux/module.h>
- #include <linux/spinlock.h>
-+#include <linux/sort.h>
- #include <asm/uaccess.h>
- 
-+/*
-+ * The exception table needs to be sorted so that the binary
-+ * search that we use to find entries in it works properly.
-+ * This is used both for the kernel exception table and for
-+ * the exception tables of modules that get loaded.
-+ */
-+static int cmp_ex(const void *a, const void *b)
-+{
-+	const struct exception_table_entry *x = a, *y = b;
-+
-+	/* avoid overflow */
-+	if (x->insn > y->insn)
-+		return 1;
-+	if (x->insn < y->insn)
-+		return -1;
-+	return 0;
-+}
-+
-+static void swap_ex(void *a, void *b, int size)
-+{
-+	struct exception_table_entry t, *x = a, *y = b;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
-+	t = *x;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	*x = *y;
-+	*y = t;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
-+}
-+
-+void sort_extable(struct exception_table_entry *start,
-+		  struct exception_table_entry *finish)
-+{
-+	sort(start, finish - start, sizeof(struct exception_table_entry),
-+	     cmp_ex, swap_ex);
-+}
-+
- int fixup_exception(struct pt_regs *regs)
- {
- 	const struct exception_table_entry *fixup;
- 
- #ifdef CONFIG_PNPBIOS
--	if (unlikely(SEGMENT_IS_PNP_CODE(regs->xcs)))
-+	if (unlikely(!(regs->eflags & VM_MASK) && SEGMENT_IS_PNP_CODE(regs->xcs)))
- 	{
- 		extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
- 		extern u32 pnp_bios_is_utter_crap;
---- a/arch/i386/mm/fault.c
-+++ b/arch/i386/mm/fault.c
-@@ -25,10 +25,14 @@
- #include <linux/kprobes.h>
- #include <linux/uaccess.h>
- #include <linux/kdebug.h>
-+#include <linux/unistd.h>
-+#include <linux/compiler.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/system.h>
- #include <asm/desc.h>
- #include <asm/segment.h>
-+#include <asm/tlbflush.h>
- 
- extern void die(const char *,struct pt_regs *,long);
- 
-@@ -79,7 +83,8 @@ static inline unsigned long get_segment_
- {
- 	unsigned long eip = regs->eip;
- 	unsigned seg = regs->xcs & 0xffff;
--	u32 seg_ar, seg_limit, base, *desc;
-+	u32 seg_ar, seg_limit, base;
-+	struct desc_struct *desc;
- 
- 	/* Unlikely, but must come before segment checks. */
- 	if (unlikely(regs->eflags & VM_MASK)) {
-@@ -93,7 +98,7 @@ static inline unsigned long get_segment_
- 	
- 	/* By far the most common cases. */
- 	if (likely(SEGMENT_IS_FLAT_CODE(seg)))
--		return eip;
-+		return eip + (seg == __KERNEL_CS ? __KERNEL_TEXT_OFFSET : 0);
- 
- 	/* Check the segment exists, is within the current LDT/GDT size,
- 	   that kernel/user (ring 0..3) has the appropriate privilege,
-@@ -111,16 +116,19 @@ static inline unsigned long get_segment_
- 	if (seg & (1<<2)) {
- 		/* Must lock the LDT while reading it. */
- 		down(&current->mm->context.sem);
--		desc = current->mm->context.ldt;
--		desc = (void *)desc + (seg & ~7);
-+		if ((seg >> 3) >= current->mm->context.size) {
-+			up(&current->mm->context.sem);
-+			*eip_limit = 0;
-+			return 1;	 /* So that returned eip > *eip_limit. */
-+		}
-+		desc = &current->mm->context.ldt[seg >> 3];
- 	} else {
- 		/* Must disable preemption while reading the GDT. */
-- 		desc = (u32 *)get_cpu_gdt_table(get_cpu());
--		desc = (void *)desc + (seg & ~7);
-+		desc = &get_cpu_gdt_table(get_cpu())[seg >> 3];
- 	}
- 
- 	/* Decode the code segment base from the descriptor */
--	base = get_desc_base((unsigned long *)desc);
-+	base = get_desc_base(desc);
- 
- 	if (seg & (1<<2)) { 
- 		up(&current->mm->context.sem);
-@@ -221,6 +229,30 @@ static noinline void force_sig_info_faul
- 
- fastcall void do_invalid_op(struct pt_regs *, unsigned long);
- 
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+static int pax_handle_fetch_fault(struct pt_regs *regs);
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
-+{
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+
-+	pgd = pgd_offset(mm, address);
-+	if (!pgd_present(*pgd))
-+		return NULL;
-+	pud = pud_offset(pgd, address);
-+	if (!pud_present(*pud))
-+		return NULL;
-+	pmd = pmd_offset(pud, address);
-+	if (!pmd_present(*pmd))
-+		return NULL;
-+	return pmd;
-+}
-+#endif
-+
- static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
- {
- 	unsigned index = pgd_index(address);
-@@ -304,14 +336,21 @@ fastcall void __kprobes do_page_fault(st
- 	struct task_struct *tsk;
- 	struct mm_struct *mm;
- 	struct vm_area_struct * vma;
--	unsigned long address;
- 	int write, si_code;
- 	int fault;
-+	pte_t *pte;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	pmd_t *pmd;
-+	spinlock_t *ptl;
-+	unsigned char pte_mask;
-+#endif
- 
- 	/* get the address */
--        address = read_cr2();
-+	const unsigned long address = read_cr2();
- 
- 	tsk = current;
-+	mm = tsk->mm;
- 
- 	si_code = SEGV_MAPERR;
- 
-@@ -348,14 +387,12 @@ fastcall void __kprobes do_page_fault(st
- 	if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
- 		local_irq_enable();
- 
--	mm = tsk->mm;
--
- 	/*
- 	 * If we're in an interrupt, have no user context or are running in an
- 	 * atomic region then we must not take the fault..
- 	 */
- 	if (in_atomic() || !mm)
--		goto bad_area_nosemaphore;
-+		goto bad_area_nopax;
- 
- 	/* When running in the kernel we expect faults to occur only to
- 	 * addresses in user space.  All other faults represent errors in the
-@@ -375,10 +412,104 @@ fastcall void __kprobes do_page_fault(st
- 	if (!down_read_trylock(&mm->mmap_sem)) {
- 		if ((error_code & 4) == 0 &&
- 		    !search_exception_tables(regs->eip))
--			goto bad_area_nosemaphore;
-+			goto bad_area_nopax;
- 		down_read(&mm->mmap_sem);
- 	}
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (nx_enabled || (error_code & 5) != 5 || (regs->eflags & X86_EFLAGS_VM) ||
-+	    !(mm->pax_flags & MF_PAX_PAGEEXEC))
-+		goto not_pax_fault;
-+
-+	/* PaX: it's our fault, let's handle it if we can */
-+
-+	/* PaX: take a look at read faults before acquiring any locks */
-+	if (unlikely(!(error_code & 2) && (regs->eip == address))) {
-+		/* instruction fetch attempt from a protected page in user mode */
-+		up_read(&mm->mmap_sem);
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+		switch (pax_handle_fetch_fault(regs)) {
-+		case 2:
-+			return;
-+		}
-+#endif
-+
-+		pax_report_fault(regs, (void *)regs->eip, (void *)regs->esp);
-+		do_exit(SIGKILL);
-+	}
-+
-+	pmd = pax_get_pmd(mm, address);
-+	if (unlikely(!pmd))
-+		goto not_pax_fault;
-+
-+	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-+	if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
-+		pte_unmap_unlock(pte, ptl);
-+		goto not_pax_fault;
-+	}
-+
-+	if (unlikely((error_code & 2) && !pte_write(*pte))) {
-+		/* write attempt to a protected page in user mode */
-+		pte_unmap_unlock(pte, ptl);
-+		goto not_pax_fault;
-+	}
-+
-+#ifdef CONFIG_SMP
-+	if (likely(address > get_limit(regs->xcs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
-+#else
-+	if (likely(address > get_limit(regs->xcs)))
-+#endif
-+	{
-+		set_pte(pte, pte_mkread(*pte));
-+		__flush_tlb_one(address);
-+		pte_unmap_unlock(pte, ptl);
-+		up_read(&mm->mmap_sem);
-+		return;
-+	}
-+
-+	pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1));
-+
-+	/*
-+	 * PaX: fill DTLB with user rights and retry
-+	 */
-+	__asm__ __volatile__ (
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+		"movw %w4,%%es\n"
-+#endif
-+		"orb %2,(%1)\n"
-+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
-+/*
-+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
-+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
-+ * page fault when examined during a TLB load attempt. this is true not only
-+ * for PTEs holding a non-present entry but also present entries that will
-+ * raise a page fault (such as those set up by PaX, or the copy-on-write
-+ * mechanism). in effect it means that we do *not* need to flush the TLBs
-+ * for our target pages since their PTEs are simply not in the TLBs at all.
-+
-+ * the best thing in omitting it is that we gain around 15-20% speed in the
-+ * fast path of the page fault handler and can get rid of tracing since we
-+ * can no longer flush unintended entries.
-+ */
-+		"invlpg (%0)\n"
-+#endif
-+		"testb $0,%%es:(%0)\n"
-+		"xorb %3,(%1)\n"
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+		"pushl %%ss\n"
-+		"popl %%es\n"
-+#endif
-+		:
-+		: "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS)
-+		: "memory", "cc");
-+	pte_unmap_unlock(pte, ptl);
-+	up_read(&mm->mmap_sem);
-+	return;
-+
-+not_pax_fault:
-+#endif
-+
- 	vma = find_vma(mm, address);
- 	if (!vma)
- 		goto bad_area;
-@@ -396,6 +527,12 @@ fastcall void __kprobes do_page_fault(st
- 		if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
- 			goto bad_area;
- 	}
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)
-+		goto bad_area;
-+#endif
-+
- 	if (expand_stack(vma, address))
- 		goto bad_area;
- /*
-@@ -405,6 +542,8 @@ fastcall void __kprobes do_page_fault(st
- good_area:
- 	si_code = SEGV_ACCERR;
- 	write = 0;
-+	if (nx_enabled && (error_code & 16) && !(vma->vm_flags & VM_EXEC))
-+		goto bad_area;
- 	switch (error_code & 3) {
- 		default:	/* 3: write, present */
- 				/* fall through */
-@@ -458,6 +597,41 @@ bad_area:
- 	up_read(&mm->mmap_sem);
- 
- bad_area_nosemaphore:
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (mm && (error_code & 4) && !(regs->eflags & X86_EFLAGS_VM)) {
-+		/*
-+		 * It's possible to have interrupts off here.
-+		 */
-+		local_irq_enable();
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if ((nx_enabled && (error_code & 16)) ||
-+		    ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(error_code & 3) && (regs->eip == address))) {
-+			pax_report_fault(regs, (void *)regs->eip, (void *)regs->esp);
-+			do_exit(SIGKILL);
-+		}
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & 3) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) {
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+			switch (pax_handle_fetch_fault(regs)) {
-+			case 2:
-+				return;
-+			}
-+#endif
-+
-+			pax_report_fault(regs, (void *)regs->eip, (void *)regs->esp);
-+			do_exit(SIGKILL);
-+		}
-+#endif
-+
-+	}
-+#endif
-+
-+bad_area_nopax:
- 	/* User mode accesses just cause a SIGSEGV */
- 	if (error_code & 4) {
- 		/*
-@@ -495,7 +669,7 @@ bad_area_nosemaphore:
- 	if (boot_cpu_data.f00f_bug) {
- 		unsigned long nr;
- 		
--		nr = (address - idt_descr.address) >> 3;
-+		nr = (address - (unsigned long)idt_descr.address) >> 3;
- 
- 		if (nr == 6) {
- 			do_invalid_op(regs, 0);
-@@ -528,18 +702,34 @@ no_context:
- 		__typeof__(pte_val(__pte(0))) page;
- 
- #ifdef CONFIG_X86_PAE
--		if (error_code & 16) {
--			pte_t *pte = lookup_address(address);
-+		if (nx_enabled && (error_code & 16)) {
-+			pte = lookup_address(address);
- 
- 			if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
- 				printk(KERN_CRIT "kernel tried to execute "
- 					"NX-protected page - exploit attempt? "
--					"(uid: %d)\n", current->uid);
-+					"(uid: %d, task: %s, pid: %d)\n",
-+					current->uid, current->comm, current->pid);
- 		}
- #endif
- 		if (address < PAGE_SIZE)
- 			printk(KERN_ALERT "BUG: unable to handle kernel NULL "
- 					"pointer dereference");
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef CONFIG_MODULES
-+		else if (init_mm.start_code <= address && address < (unsigned long)MODULES_END)
-+#else
-+		else if (init_mm.start_code <= address && address < init_mm.end_code)
-+#endif
-+			if (tsk->signal->curr_ip)
-+				printk(KERN_ERR "PAX: From %u.%u.%u.%u: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
-+					NIPQUAD(tsk->signal->curr_ip), tsk->comm, tsk->pid, tsk->uid, tsk->euid);
-+			else
-+				printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
-+					tsk->comm, tsk->pid, tsk->uid, tsk->euid);
-+#endif
-+
- 		else
- 			printk(KERN_ALERT "BUG: unable to handle kernel paging"
- 					" request");
-@@ -570,7 +760,7 @@ no_context:
- 		 * it's allocated already.
- 		 */
- 		if ((page >> PAGE_SHIFT) < max_low_pfn
--		    && (page & _PAGE_PRESENT)) {
-+		    && (page & (_PAGE_PRESENT | _PAGE_PSE)) == _PAGE_PRESENT) {
- 			page &= PAGE_MASK;
- 			page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
- 			                                         & (PTRS_PER_PTE - 1)];
-@@ -655,3 +845,92 @@ void vmalloc_sync_all(void)
- 			start = address + PGDIR_SIZE;
- 	}
- }
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+/*
-+ * PaX: decide what to do with offenders (regs->eip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when gcc trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+	int err;
-+
-+	if (regs->eflags & X86_EFLAGS_VM)
-+		return 1;
-+
-+	if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
-+		return 1;
-+
-+	do { /* PaX: gcc trampoline emulation #1 */
-+		unsigned char mov1, mov2;
-+		unsigned short jmp;
-+		unsigned long addr1, addr2;
-+
-+		err = get_user(mov1, (unsigned char __user *)regs->eip);
-+		err |= get_user(addr1, (unsigned long __user *)(regs->eip + 1));
-+		err |= get_user(mov2, (unsigned char __user *)(regs->eip + 5));
-+		err |= get_user(addr2, (unsigned long __user *)(regs->eip + 6));
-+		err |= get_user(jmp, (unsigned short __user *)(regs->eip + 10));
-+
-+		if (err)
-+			break;
-+
-+		if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
-+			regs->ecx = addr1;
-+			regs->eax = addr2;
-+			regs->eip = addr2;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: gcc trampoline emulation #2 */
-+		unsigned char mov, jmp;
-+		unsigned long addr1, addr2;
-+
-+		err = get_user(mov, (unsigned char __user *)regs->eip);
-+		err |= get_user(addr1, (unsigned long __user *)(regs->eip + 1));
-+		err |= get_user(jmp, (unsigned char __user *)(regs->eip + 5));
-+		err |= get_user(addr2, (unsigned long __user *)(regs->eip + 6));
-+
-+		if (err)
-+			break;
-+
-+		if (mov == 0xB9 && jmp == 0xE9) {
-+			regs->ecx = addr1;
-+			regs->eip += addr2 + 10;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	return 1; /* PaX in action */
-+}
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 20; i++) {
-+		unsigned char c;
-+		if (get_user(c, (unsigned char __user *)pc+i))
-+			printk("?? ");
-+		else
-+			printk("%02x ", c);
-+	}
-+	printk("\n");
-+
-+	printk(KERN_ERR "PAX: bytes at SP-4: ");
-+	for (i = -1; i < 20; i++) {
-+		unsigned long c;
-+		if (get_user(c, (unsigned long __user *)sp+i))
-+			printk("???????? ");
-+		else
-+			printk("%08lx ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
---- a/arch/i386/mm/hugetlbpage.c
-+++ b/arch/i386/mm/hugetlbpage.c
-@@ -229,13 +229,18 @@ static unsigned long hugetlb_get_unmappe
- {
- 	struct mm_struct *mm = current->mm;
- 	struct vm_area_struct *vma;
--	unsigned long start_addr;
-+	unsigned long start_addr, task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
- 
- 	if (len > mm->cached_hole_size) {
--	        start_addr = mm->free_area_cache;
-+		start_addr = mm->free_area_cache;
- 	} else {
--	        start_addr = TASK_UNMAPPED_BASE;
--	        mm->cached_hole_size = 0;
-+		start_addr = mm->mmap_base;
-+		mm->cached_hole_size = 0;
- 	}
- 
- full_search:
-@@ -243,13 +248,13 @@ full_search:
- 
- 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- 		/* At this point:  (!vma || addr < vma->vm_end). */
--		if (TASK_SIZE - len < addr) {
-+		if (task_size - len < addr) {
- 			/*
- 			 * Start a new search - just in case we missed
- 			 * some holes.
- 			 */
--			if (start_addr != TASK_UNMAPPED_BASE) {
--				start_addr = TASK_UNMAPPED_BASE;
-+			if (start_addr != mm->mmap_base) {
-+				start_addr = mm->mmap_base;
- 				mm->cached_hole_size = 0;
- 				goto full_search;
- 			}
-@@ -271,9 +276,8 @@ static unsigned long hugetlb_get_unmappe
- {
- 	struct mm_struct *mm = current->mm;
- 	struct vm_area_struct *vma, *prev_vma;
--	unsigned long base = mm->mmap_base, addr = addr0;
-+	unsigned long base = mm->mmap_base, addr;
- 	unsigned long largest_hole = mm->cached_hole_size;
--	int first_time = 1;
- 
- 	/* don't allow allocations above current base */
- 	if (mm->free_area_cache > base)
-@@ -283,7 +287,7 @@ static unsigned long hugetlb_get_unmappe
- 	        largest_hole = 0;
- 		mm->free_area_cache  = base;
- 	}
--try_again:
-+
- 	/* make sure it can fit in the remaining address space */
- 	if (mm->free_area_cache < len)
- 		goto fail;
-@@ -325,22 +329,26 @@ try_again:
- 
- fail:
- 	/*
--	 * if hint left us with no space for the requested
--	 * mapping then try again:
--	 */
--	if (first_time) {
--		mm->free_area_cache = base;
--		largest_hole = 0;
--		first_time = 0;
--		goto try_again;
--	}
--	/*
- 	 * A failed mmap() very likely causes application failure,
- 	 * so fall back to the bottom-up function here. This scenario
- 	 * can happen with large stack limits and large mmap()
- 	 * allocations.
- 	 */
--	mm->free_area_cache = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
-+	else
-+#endif
-+
-+	mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (mm->pax_flags & MF_PAX_RANDMMAP)
-+		mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+	mm->free_area_cache = mm->mmap_base;
- 	mm->cached_hole_size = ~0UL;
- 	addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
- 			len, pgoff, flags);
-@@ -348,6 +356,7 @@ fail:
- 	/*
- 	 * Restore the topdown base:
- 	 */
-+	mm->mmap_base = base;
- 	mm->free_area_cache = base;
- 	mm->cached_hole_size = ~0UL;
- 
-@@ -360,10 +369,17 @@ hugetlb_get_unmapped_area(struct file *f
- {
- 	struct mm_struct *mm = current->mm;
- 	struct vm_area_struct *vma;
-+	unsigned long task_size = TASK_SIZE;
- 
- 	if (len & ~HPAGE_MASK)
- 		return -EINVAL;
--	if (len > TASK_SIZE)
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+	if (len > task_size)
- 		return -ENOMEM;
- 
- 	if (flags & MAP_FIXED) {
-@@ -375,7 +391,7 @@ hugetlb_get_unmapped_area(struct file *f
- 	if (addr) {
- 		addr = ALIGN(addr, HPAGE_SIZE);
- 		vma = find_vma(mm, addr);
--		if (TASK_SIZE - len >= addr &&
-+		if (task_size - len >= addr &&
- 		    (!vma || addr + len <= vma->vm_start))
- 			return addr;
- 	}
---- a/arch/i386/mm/init.c
-+++ b/arch/i386/mm/init.c
-@@ -44,6 +44,7 @@
- #include <asm/tlbflush.h>
- #include <asm/sections.h>
- #include <asm/paravirt.h>
-+#include <asm/desc.h>
- 
- unsigned int __VMALLOC_RESERVE = 128 << 20;
- 
-@@ -53,32 +54,6 @@ unsigned long highstart_pfn, highend_pfn
- static int noinline do_test_wp_bit(void);
- 
- /*
-- * Creates a middle page table and puts a pointer to it in the
-- * given global directory entry. This only returns the gd entry
-- * in non-PAE compilation mode, since the middle layer is folded.
-- */
--static pmd_t * __init one_md_table_init(pgd_t *pgd)
--{
--	pud_t *pud;
--	pmd_t *pmd_table;
--		
--#ifdef CONFIG_X86_PAE
--	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
--		pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
--
--		paravirt_alloc_pd(__pa(pmd_table) >> PAGE_SHIFT);
--		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
--		pud = pud_offset(pgd, 0);
--		if (pmd_table != pmd_offset(pud, 0))
--			BUG();
--	}
--#endif
--	pud = pud_offset(pgd, 0);
--	pmd_table = pmd_offset(pud, 0);
--	return pmd_table;
--}
--
--/*
-  * Create a page table and place a pointer to it in a middle page
-  * directory entry.
-  */
-@@ -88,7 +63,11 @@ static pte_t * __init one_page_table_ini
- 		pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- 
- 		paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+		set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
-+#else
- 		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
-+#endif
- 		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
- 	}
- 	
-@@ -109,6 +88,7 @@ static pte_t * __init one_page_table_ini
- static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
- {
- 	pgd_t *pgd;
-+	pud_t *pud;
- 	pmd_t *pmd;
- 	int pgd_idx, pmd_idx;
- 	unsigned long vaddr;
-@@ -119,8 +99,13 @@ static void __init page_table_range_init
- 	pgd = pgd_base + pgd_idx;
- 
- 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
--		pmd = one_md_table_init(pgd);
--		pmd = pmd + pmd_index(vaddr);
-+		pud = pud_offset(pgd, vaddr);
-+		pmd = pmd_offset(pud, vaddr);
-+
-+#ifdef CONFIG_X86_PAE
-+		paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
-+#endif
-+
- 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
- 			one_page_table_init(pmd);
- 
-@@ -130,11 +115,23 @@ static void __init page_table_range_init
- 	}
- }
- 
--static inline int is_kernel_text(unsigned long addr)
-+static inline int is_kernel_text(unsigned long start, unsigned long end)
- {
--	if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
--		return 1;
--	return 0;
-+	unsigned long etext;
-+
-+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
-+	etext = (unsigned long)&MODULES_END - __KERNEL_TEXT_OFFSET;
-+#else
-+	etext = (unsigned long)&_etext;
-+#endif
-+
-+	if ((start > etext + __KERNEL_TEXT_OFFSET ||
-+	     end <= (unsigned long)_stext + __KERNEL_TEXT_OFFSET) &&
-+	    (start > (unsigned long)_einittext + __KERNEL_TEXT_OFFSET ||
-+	     end <= (unsigned long)_sinittext + __KERNEL_TEXT_OFFSET) &&
-+	    (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
-+		return 0;
-+	return 1;
- }
- 
- /*
-@@ -146,25 +143,29 @@ static void __init kernel_physical_mappi
- {
- 	unsigned long pfn;
- 	pgd_t *pgd;
-+	pud_t *pud;
- 	pmd_t *pmd;
- 	pte_t *pte;
--	int pgd_idx, pmd_idx, pte_ofs;
-+	unsigned int pgd_idx, pmd_idx, pte_ofs;
- 
- 	pgd_idx = pgd_index(PAGE_OFFSET);
- 	pgd = pgd_base + pgd_idx;
- 	pfn = 0;
- 
--	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
--		pmd = one_md_table_init(pgd);
--		if (pfn >= max_low_pfn)
--			continue;
-+	for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
-+		pud = pud_offset(pgd, 0);
-+		pmd = pmd_offset(pud, 0);
-+
-+#ifdef CONFIG_X86_PAE
-+		paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
-+#endif
-+
- 		for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
--			unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
-+			unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
- 
- 			/* Map with big pages if possible, otherwise create normal page tables. */
--			if (cpu_has_pse) {
--				unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
--				if (is_kernel_text(address) || is_kernel_text(address2))
-+			if (cpu_has_pse && address >= (unsigned long)__va(0x100000)) {
-+				if (is_kernel_text(address, address + PMD_SIZE))
- 					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
- 				else
- 					set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
-@@ -176,7 +177,7 @@ static void __init kernel_physical_mappi
- 				for (pte_ofs = 0;
- 				     pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn;
- 				     pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
--					if (is_kernel_text(address))
-+					if (is_kernel_text(address, address + PAGE_SIZE))
- 						set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
- 					else
- 						set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
-@@ -326,9 +327,9 @@ static void __init set_highmem_pages_ini
- #define set_highmem_pages_init(bad_ppro) do { } while (0)
- #endif /* CONFIG_HIGHMEM */
- 
--unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
-+unsigned long long __PAGE_KERNEL __read_only = _PAGE_KERNEL;
- EXPORT_SYMBOL(__PAGE_KERNEL);
--unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
-+unsigned long long __PAGE_KERNEL_EXEC __read_only = _PAGE_KERNEL_EXEC;
- 
- #ifdef CONFIG_NUMA
- extern void __init remap_numa_kva(void);
-@@ -339,26 +340,10 @@ extern void __init remap_numa_kva(void);
- void __init native_pagetable_setup_start(pgd_t *base)
- {
- #ifdef CONFIG_X86_PAE
--	int i;
-+	unsigned int i;
- 
--	/*
--	 * Init entries of the first-level page table to the
--	 * zero page, if they haven't already been set up.
--	 *
--	 * In a normal native boot, we'll be running on a
--	 * pagetable rooted in swapper_pg_dir, but not in PAE
--	 * mode, so this will end up clobbering the mappings
--	 * for the lower 24Mbytes of the address space,
--	 * without affecting the kernel address space.
--	 */
--	for (i = 0; i < USER_PTRS_PER_PGD; i++)
--		set_pgd(&base[i],
--			__pgd(__pa(empty_zero_page) | _PAGE_PRESENT));
--
--	/* Make sure kernel address space is empty so that a pagetable
--	   will be allocated for it. */
--	memset(&base[USER_PTRS_PER_PGD], 0,
--	       KERNEL_PGD_PTRS * sizeof(pgd_t));
-+	for (i = 0; i < PTRS_PER_PGD; i++)
-+		paravirt_alloc_pd(__pa(swapper_pm_dir + i) >> PAGE_SHIFT);
- #else
- 	paravirt_alloc_pd(__pa(swapper_pg_dir) >> PAGE_SHIFT);
- #endif
-@@ -366,16 +351,6 @@ void __init native_pagetable_setup_start
- 
- void __init native_pagetable_setup_done(pgd_t *base)
- {
--#ifdef CONFIG_X86_PAE
--	/*
--	 * Add low memory identity-mappings - SMP needs it when
--	 * starting up on an AP from real-mode. In the non-PAE
--	 * case we already have these mappings through head.S.
--	 * All user-space mappings are explicitly cleared after
--	 * SMP startup.
--	 */
--	set_pgd(&base[0], base[USER_PTRS_PER_PGD]);
--#endif
- }
- 
- /*
-@@ -437,12 +412,12 @@ static void __init pagetable_init (void)
-  * Swap suspend & friends need this for resume because things like the intel-agp
-  * driver might have split up a kernel 4MB mapping.
-  */
--char __nosavedata swsusp_pg_dir[PAGE_SIZE]
-+pgd_t __nosavedata swsusp_pg_dir[PTRS_PER_PGD]
- 	__attribute__ ((aligned (PAGE_SIZE)));
- 
- static inline void save_pg_dir(void)
- {
--	memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
-+	clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
- }
- #else
- static inline void save_pg_dir(void)
-@@ -471,12 +446,11 @@ void zap_low_mappings (void)
- 	flush_tlb_all();
- }
- 
--int nx_enabled = 0;
-+int nx_enabled;
- 
- #ifdef CONFIG_X86_PAE
- 
--static int disable_nx __initdata = 0;
--u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
-+u64 __supported_pte_mask __read_only = ~_PAGE_NX;
- EXPORT_SYMBOL_GPL(__supported_pte_mask);
- 
- /*
-@@ -487,36 +461,31 @@ EXPORT_SYMBOL_GPL(__supported_pte_mask);
-  * on      Enable
-  * off     Disable
-  */
-+#if !defined(CONFIG_PAX_PAGEEXEC)
- static int __init noexec_setup(char *str)
- {
- 	if (!str || !strcmp(str, "on")) {
--		if (cpu_has_nx) {
--			__supported_pte_mask |= _PAGE_NX;
--			disable_nx = 0;
--		}
-+		if (cpu_has_nx)
-+			nx_enabled = 1;
- 	} else if (!strcmp(str,"off")) {
--		disable_nx = 1;
--		__supported_pte_mask &= ~_PAGE_NX;
-+		nx_enabled = 0;
- 	} else
- 		return -EINVAL;
- 
- 	return 0;
- }
- early_param("noexec", noexec_setup);
-+#endif
- 
- static void __init set_nx(void)
- {
--	unsigned int v[4], l, h;
-+	if (!nx_enabled && cpu_has_nx) {
-+		unsigned l, h;
- 
--	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
--		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
--		if ((v[3] & (1 << 20)) && !disable_nx) {
--			rdmsr(MSR_EFER, l, h);
--			l |= EFER_NX;
--			wrmsr(MSR_EFER, l, h);
--			nx_enabled = 1;
--			__supported_pte_mask |= _PAGE_NX;
--		}
-+		__supported_pte_mask &= ~_PAGE_NX;
-+		rdmsr(MSR_EFER, l, h);
-+		l &= ~EFER_NX;
-+		wrmsr(MSR_EFER, l, h);
- 	}
- }
- 
-@@ -569,14 +538,6 @@ void __init paging_init(void)
- 
- 	load_cr3(swapper_pg_dir);
- 
--#ifdef CONFIG_X86_PAE
--	/*
--	 * We will bail out later - printk doesn't work right now so
--	 * the user would just see a hanging kernel.
--	 */
--	if (cpu_has_pae)
--		set_in_cr4(X86_CR4_PAE);
--#endif
- 	__flush_tlb_all();
- 
- 	kmap_init();
-@@ -647,7 +608,7 @@ void __init mem_init(void)
- 	set_highmem_pages_init(bad_ppro);
- 
- 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
--	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
-+	datasize =  (unsigned long) &_edata - (unsigned long) &_data;
- 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
- 
- 	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); 
-@@ -692,10 +653,10 @@ void __init mem_init(void)
- 	       (unsigned long)&__init_begin, (unsigned long)&__init_end,
- 	       ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
- 
--	       (unsigned long)&_etext, (unsigned long)&_edata,
--	       ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
-+	       (unsigned long)&_data, (unsigned long)&_edata,
-+	       ((unsigned long)&_edata - (unsigned long)&_data) >> 10,
- 
--	       (unsigned long)&_text, (unsigned long)&_etext,
-+	       (unsigned long)&_text + __KERNEL_TEXT_OFFSET, (unsigned long)&_etext + __KERNEL_TEXT_OFFSET,
- 	       ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
- 
- #ifdef CONFIG_HIGHMEM
-@@ -706,10 +667,6 @@ void __init mem_init(void)
- 	BUG_ON((unsigned long)high_memory      > VMALLOC_START);
- #endif /* double-sanity-check paranoia */
- 
--#ifdef CONFIG_X86_PAE
--	if (!cpu_has_pae)
--		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
--#endif
- 	if (boot_cpu_data.wp_works_ok < 0)
- 		test_wp_bit();
- 
-@@ -844,6 +801,38 @@ void free_init_pages(char *what, unsigne
- 
- void free_initmem(void)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	/* PaX: limit KERNEL_CS to actual size */
-+	unsigned long addr, limit;
-+	__u32 a, b;
-+	int cpu;
-+	pgd_t *pgd;
-+	pud_t *pud;
-+	pmd_t *pmd;
-+
-+#ifdef CONFIG_MODULES
-+	limit = (unsigned long)&MODULES_END - __KERNEL_TEXT_OFFSET;
-+#else
-+	limit = (unsigned long)&_etext;
-+#endif
-+	limit = (limit - 1UL) >> PAGE_SHIFT;
-+
-+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+		pack_descriptor(&a, &b, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
-+		write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, a, b);
-+	}
-+
-+	/* PaX: make KERNEL_CS read-only */
-+	for (addr = __KERNEL_TEXT_OFFSET; addr < (unsigned long)&_data; addr += PMD_SIZE) {
-+		pgd = pgd_offset_k(addr);
-+		pud = pud_offset(pgd, addr);
-+		pmd = pmd_offset(pud, addr);
-+		set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
-+	}
-+	flush_tlb_all();
-+#endif
-+
- 	free_init_pages("unused kernel memory",
- 			(unsigned long)(&__init_begin),
- 			(unsigned long)(&__init_end));
---- a/arch/i386/mm/mmap.c
-+++ b/arch/i386/mm/mmap.c
-@@ -35,12 +35,18 @@
-  * Leave an at least ~128 MB hole.
-  */
- #define MIN_GAP (128*1024*1024)
--#define MAX_GAP (TASK_SIZE/6*5)
-+#define MAX_GAP (task_size/6*5)
- 
- static inline unsigned long mmap_base(struct mm_struct *mm)
- {
- 	unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
- 	unsigned long random_factor = 0;
-+	unsigned long task_size = TASK_SIZE;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
- 
- 	if (current->flags & PF_RANDOMIZE)
- 		random_factor = get_random_int() % (1024*1024);
-@@ -50,7 +56,7 @@ static inline unsigned long mmap_base(st
- 	else if (gap > MAX_GAP)
- 		gap = MAX_GAP;
- 
--	return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
-+	return PAGE_ALIGN(task_size - gap - random_factor);
- }
- 
- /*
-@@ -66,11 +72,30 @@ void arch_pick_mmap_layout(struct mm_str
- 	if (sysctl_legacy_va_layout ||
- 			(current->personality & ADDR_COMPAT_LAYOUT) ||
- 			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (mm->pax_flags & MF_PAX_SEGMEXEC)
-+			mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
-+		else
-+#endif
-+
- 		mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area;
- 		mm->unmap_area = arch_unmap_area;
- 	} else {
- 		mm->mmap_base = mmap_base(mm);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- 		mm->unmap_area = arch_unmap_area_topdown;
- 	}
---- a/arch/i386/mm/pageattr.c
-+++ b/arch/i386/mm/pageattr.c
-@@ -13,6 +13,7 @@
- #include <asm/tlbflush.h>
- #include <asm/pgalloc.h>
- #include <asm/sections.h>
-+#include <asm/desc.h>
- 
- static DEFINE_SPINLOCK(cpa_lock);
- static struct list_head df_list = LIST_HEAD_INIT(df_list);
-@@ -37,16 +38,16 @@ pte_t *lookup_address(unsigned long addr
- } 
- 
- static struct page *split_large_page(unsigned long address, pgprot_t prot,
--					pgprot_t ref_prot)
-+					pgprot_t ref_prot, unsigned long flags)
- { 
- 	int i; 
- 	unsigned long addr;
- 	struct page *base;
- 	pte_t *pbase;
- 
--	spin_unlock_irq(&cpa_lock);
-+	spin_unlock_irqrestore(&cpa_lock, flags);
- 	base = alloc_pages(GFP_KERNEL, 0);
--	spin_lock_irq(&cpa_lock);
-+	spin_lock_irqsave(&cpa_lock, flags);
- 	if (!base) 
- 		return NULL;
- 
-@@ -99,7 +100,18 @@ static void set_pmd_pte(pte_t *kpte, uns
- 	struct page *page;
- 	unsigned long flags;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	set_pte_atomic(kpte, pte); 	/* change init_mm */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	if (SHARED_KERNEL_PMD)
- 		return;
- 
-@@ -126,7 +138,7 @@ static inline void revert_page(struct pa
- 	pte_t *linear;
- 
- 	ref_prot =
--	((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
-+	((address & LARGE_PAGE_MASK) < (unsigned long)&_etext + __KERNEL_TEXT_OFFSET)
- 		? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
- 
- 	linear = (pte_t *)
-@@ -143,7 +155,7 @@ static inline void save_page(struct page
- }
- 
- static int
--__change_page_attr(struct page *page, pgprot_t prot)
-+__change_page_attr(struct page *page, pgprot_t prot, unsigned long flags)
- { 
- 	pte_t *kpte; 
- 	unsigned long address;
-@@ -167,13 +179,20 @@ __change_page_attr(struct page *page, pg
- 			struct page *split;
- 
- 			ref_prot =
--			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
-+			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext + __KERNEL_TEXT_OFFSET)
- 				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
--			split = split_large_page(address, prot, ref_prot);
-+			split = split_large_page(address, prot, ref_prot, flags);
- 			if (!split)
- 				return -ENOMEM;
--			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
--			kpte_page = split;
-+			if (pte_huge(*kpte)) {
-+				set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
-+				kpte_page = split;
-+			} else {
-+				__free_pages(split, 0);
-+				kpte = lookup_address(address);
-+				kpte_page = virt_to_page(kpte);
-+				set_pte_atomic(kpte, mk_pte(page, prot));
-+			}
- 		}
- 		page_private(kpte_page)++;
- 	} else if (!pte_huge(*kpte)) {
-@@ -225,7 +244,7 @@ int change_page_attr(struct page *page, 
- 
- 	spin_lock_irqsave(&cpa_lock, flags);
- 	for (i = 0; i < numpages; i++, page++) { 
--		err = __change_page_attr(page, prot);
-+		err = __change_page_attr(page, prot, flags);
- 		if (err) 
- 			break; 
- 	} 	
---- a/arch/i386/oprofile/backtrace.c
-+++ b/arch/i386/oprofile/backtrace.c
-@@ -22,7 +22,7 @@ struct frame_head {
- static struct frame_head *
- dump_kernel_backtrace(struct frame_head * head)
- {
--	oprofile_add_trace(head->ret);
-+	oprofile_add_trace(head->ret + __KERNEL_TEXT_OFFSET);
- 
- 	/* frame pointers should strictly progress back up the stack
- 	 * (towards higher addresses) */
-@@ -116,7 +116,7 @@ x86_backtrace(struct pt_regs * const reg
- 	head = (struct frame_head *)regs->ebp;
- #endif
- 
--	if (!user_mode_vm(regs)) {
-+	if (!user_mode(regs)) {
- 		while (depth-- && valid_kernel_stack(head, regs))
- 			head = dump_kernel_backtrace(head);
- 		return;
---- a/arch/i386/oprofile/op_model_p4.c
-+++ b/arch/i386/oprofile/op_model_p4.c
-@@ -47,7 +47,7 @@ static inline void setup_num_counters(vo
- #endif
- }
- 
--static int inline addr_increment(void)
-+static inline int addr_increment(void)
- {
- #ifdef CONFIG_SMP
- 	return smp_num_siblings == 2 ? 2 : 1;
---- a/arch/i386/pci/common.c
-+++ b/arch/i386/pci/common.c
-@@ -287,7 +287,7 @@ static struct dmi_system_id __devinitdat
- 			DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
- 		},
- 	},
--	{}
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL}
- };
- 
- struct pci_bus * __devinit pcibios_scan_root(int busnum)
---- a/arch/i386/pci/early.c
-+++ b/arch/i386/pci/early.c
-@@ -7,7 +7,7 @@
- /* Direct PCI access. This is used for PCI accesses in early boot before
-    the PCI subsystem works. */
- 
--#define PDprintk(x...)
-+#define PDprintk(x...) do {} while (0)
- 
- u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
- {
---- a/arch/i386/pci/fixup.c
-+++ b/arch/i386/pci/fixup.c
-@@ -386,7 +386,7 @@ static struct dmi_system_id __devinitdat
- 			DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }
- };
- 
- static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
---- a/arch/i386/pci/irq.c
-+++ b/arch/i386/pci/irq.c
-@@ -508,7 +508,7 @@ static __init int intel_router_probe(str
- 	static struct pci_device_id __initdata pirq_440gx[] = {
- 		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
- 		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
--		{ },
-+		{ PCI_DEVICE(0, 0) }
- 	};
- 
- 	/* 440GX has a proprietary PIRQ router -- don't use it */
-@@ -1051,7 +1051,7 @@ static struct dmi_system_id __initdata p
- 			DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }
- };
- 
- static int __init pcibios_irq_init(void)
---- a/arch/i386/pci/pcbios.c
-+++ b/arch/i386/pci/pcbios.c
-@@ -57,50 +57,124 @@ union bios32 {
- static struct {
- 	unsigned long address;
- 	unsigned short segment;
--} bios32_indirect = { 0, __KERNEL_CS };
-+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
- 
- /*
-  * Returns the entry point for the given service, NULL on error
-  */
- 
--static unsigned long bios32_service(unsigned long service)
-+static unsigned long __devinit bios32_service(unsigned long service)
- {
- 	unsigned char return_code;	/* %al */
- 	unsigned long address;		/* %ebx */
- 	unsigned long length;		/* %ecx */
- 	unsigned long entry;		/* %edx */
- 	unsigned long flags;
-+	struct desc_struct *gdt;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
- 
- 	local_irq_save(flags);
--	__asm__("lcall *(%%edi); cld"
-+
-+	gdt = get_cpu_gdt_table(smp_processor_id());
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].a,
-+			(__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].b,
-+			0UL, 0xFFFFFUL, 0x9B, 0xC);
-+	pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].a,
-+			(__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].b,
-+			0UL, 0xFFFFFUL, 0x93, 0xC);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
-+	__asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
- 		: "=a" (return_code),
- 		  "=b" (address),
- 		  "=c" (length),
- 		  "=d" (entry)
- 		: "0" (service),
- 		  "1" (0),
--		  "D" (&bios32_indirect));
-+		  "D" (&bios32_indirect),
-+		  "r"(__PCIBIOS_DS)
-+		: "memory");
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
-+	gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
-+	gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
-+	gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	local_irq_restore(flags);
- 
- 	switch (return_code) {
--		case 0:
--			return address + entry;
--		case 0x80:	/* Not present */
--			printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
--			return 0;
--		default: /* Shouldn't happen */
--			printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
--				service, return_code);
-+	case 0: {
-+		int cpu;
-+		unsigned char flags;
-+
-+		printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
-+		if (address >= 0xFFFF0 || length >= 0xFFFF0 - address || length <= entry) {
-+			printk(KERN_WARNING "bios32_service: not valid\n");
- 			return 0;
-+		}
-+		address = address + PAGE_OFFSET;
-+		length += 16UL; /* some BIOSs underreport this... */
-+		flags = 4;
-+		if (length >= 64*1024*1024) {
-+			length >>= PAGE_SHIFT;
-+			flags |= 8;
-+		}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		pax_open_kernel(cr0);
-+#endif
-+
-+		for (cpu = 0; cpu < NR_CPUS; cpu++) {
-+			gdt = get_cpu_gdt_table(cpu);
-+			pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].a,
-+					(__u32 *)&gdt[GDT_ENTRY_PCIBIOS_CS].b,
-+					address, length, 0x9b, flags);
-+			pack_descriptor((__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].a,
-+					(__u32 *)&gdt[GDT_ENTRY_PCIBIOS_DS].b,
-+					address, length, 0x93, flags);
-+		}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		pax_close_kernel(cr0);
-+#endif
-+
-+		return entry;
-+	}
-+	case 0x80:	/* Not present */
-+		printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
-+		return 0;
-+	default: /* Shouldn't happen */
-+		printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
-+			service, return_code);
-+		return 0;
- 	}
- }
- 
- static struct {
- 	unsigned long address;
- 	unsigned short segment;
--} pci_indirect = { 0, __KERNEL_CS };
-+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
- 
--static int pci_bios_present;
-+static int pci_bios_present __read_only;
- 
- static int __devinit check_pcibios(void)
- {
-@@ -109,11 +183,13 @@ static int __devinit check_pcibios(void)
- 	unsigned long flags, pcibios_entry;
- 
- 	if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
--		pci_indirect.address = pcibios_entry + PAGE_OFFSET;
-+		pci_indirect.address = pcibios_entry;
- 
- 		local_irq_save(flags);
--		__asm__(
--			"lcall *(%%edi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%edi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -122,7 +198,8 @@ static int __devinit check_pcibios(void)
- 			  "=b" (ebx),
- 			  "=c" (ecx)
- 			: "1" (PCIBIOS_PCI_BIOS_PRESENT),
--			  "D" (&pci_indirect)
-+			  "D" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS)
- 			: "memory");
- 		local_irq_restore(flags);
- 
-@@ -158,7 +235,10 @@ static int __devinit pci_bios_find_devic
- 	unsigned short bx;
- 	unsigned short ret;
- 
--	__asm__("lcall *(%%edi); cld\n\t"
-+	__asm__("movw %w7, %%ds\n\t"
-+		"lcall *%%ss:(%%edi); cld\n\t"
-+		"push %%ss\n\t"
-+		"pop %%ds\n\t"
- 		"jc 1f\n\t"
- 		"xor %%ah, %%ah\n"
- 		"1:"
-@@ -168,7 +248,8 @@ static int __devinit pci_bios_find_devic
- 		  "c" (device_id),
- 		  "d" (vendor),
- 		  "S" ((int) index),
--		  "D" (&pci_indirect));
-+		  "D" (&pci_indirect),
-+		  "r" (__PCIBIOS_DS));
- 	*bus = (bx >> 8) & 0xff;
- 	*device_fn = bx & 0xff;
- 	return (int) (ret & 0xff00) >> 8;
-@@ -188,7 +269,10 @@ static int pci_bios_read(unsigned int se
- 
- 	switch (len) {
- 	case 1:
--		__asm__("lcall *(%%esi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%esi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -197,10 +281,14 @@ static int pci_bios_read(unsigned int se
- 			: "1" (PCIBIOS_READ_CONFIG_BYTE),
- 			  "b" (bx),
- 			  "D" ((long)reg),
--			  "S" (&pci_indirect));
-+			  "S" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS));
- 		break;
- 	case 2:
--		__asm__("lcall *(%%esi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%esi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -209,10 +297,14 @@ static int pci_bios_read(unsigned int se
- 			: "1" (PCIBIOS_READ_CONFIG_WORD),
- 			  "b" (bx),
- 			  "D" ((long)reg),
--			  "S" (&pci_indirect));
-+			  "S" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS));
- 		break;
- 	case 4:
--		__asm__("lcall *(%%esi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%esi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -221,7 +313,8 @@ static int pci_bios_read(unsigned int se
- 			: "1" (PCIBIOS_READ_CONFIG_DWORD),
- 			  "b" (bx),
- 			  "D" ((long)reg),
--			  "S" (&pci_indirect));
-+			  "S" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS));
- 		break;
- 	}
- 
-@@ -244,7 +337,10 @@ static int pci_bios_write(unsigned int s
- 
- 	switch (len) {
- 	case 1:
--		__asm__("lcall *(%%esi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%esi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -253,10 +349,14 @@ static int pci_bios_write(unsigned int s
- 			  "c" (value),
- 			  "b" (bx),
- 			  "D" ((long)reg),
--			  "S" (&pci_indirect));
-+			  "S" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS));
- 		break;
- 	case 2:
--		__asm__("lcall *(%%esi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%esi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -265,10 +365,14 @@ static int pci_bios_write(unsigned int s
- 			  "c" (value),
- 			  "b" (bx),
- 			  "D" ((long)reg),
--			  "S" (&pci_indirect));
-+			  "S" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS));
- 		break;
- 	case 4:
--		__asm__("lcall *(%%esi); cld\n\t"
-+		__asm__("movw %w6, %%ds\n\t"
-+			"lcall *%%ss:(%%esi); cld\n\t"
-+			"push %%ss\n\t"
-+			"pop %%ds\n\t"
- 			"jc 1f\n\t"
- 			"xor %%ah, %%ah\n"
- 			"1:"
-@@ -277,7 +381,8 @@ static int pci_bios_write(unsigned int s
- 			  "c" (value),
- 			  "b" (bx),
- 			  "D" ((long)reg),
--			  "S" (&pci_indirect));
-+			  "S" (&pci_indirect),
-+			  "r" (__PCIBIOS_DS));
- 		break;
- 	}
- 
-@@ -430,10 +535,13 @@ struct irq_routing_table * pcibios_get_i
- 
- 	DBG("PCI: Fetching IRQ routing table... ");
- 	__asm__("push %%es\n\t"
-+		"movw %w8, %%ds\n\t"
- 		"push %%ds\n\t"
- 		"pop  %%es\n\t"
--		"lcall *(%%esi); cld\n\t"
-+		"lcall *%%ss:(%%esi); cld\n\t"
- 		"pop %%es\n\t"
-+		"push %%ss\n\t"
-+		"pop %%ds\n"
- 		"jc 1f\n\t"
- 		"xor %%ah, %%ah\n"
- 		"1:"
-@@ -444,7 +552,8 @@ struct irq_routing_table * pcibios_get_i
- 		  "1" (0),
- 		  "D" ((long) &opt),
- 		  "S" (&pci_indirect),
--		  "m" (opt)
-+		  "m" (opt),
-+		  "r" (__PCIBIOS_DS)
- 		: "memory");
- 	DBG("OK  ret=%d, size=%d, map=%x\n", ret, opt.size, map);
- 	if (ret & 0xff00)
-@@ -468,7 +577,10 @@ int pcibios_set_irq_routing(struct pci_d
- {
- 	int ret;
- 
--	__asm__("lcall *(%%esi); cld\n\t"
-+	__asm__("movw %w5, %%ds\n\t"
-+		"lcall *%%ss:(%%esi); cld\n\t"
-+		"push %%ss\n\t"
-+		"pop %%ds\n"
- 		"jc 1f\n\t"
- 		"xor %%ah, %%ah\n"
- 		"1:"
-@@ -476,7 +588,8 @@ int pcibios_set_irq_routing(struct pci_d
- 		: "0" (PCIBIOS_SET_PCI_HW_INT),
- 		  "b" ((dev->bus->number << 8) | dev->devfn),
- 		  "c" ((irq << 8) | (pin + 10)),
--		  "S" (&pci_indirect));
-+		  "S" (&pci_indirect),
-+		  "r" (__PCIBIOS_DS));
- 	return !(ret & 0xff00);
- }
- EXPORT_SYMBOL(pcibios_set_irq_routing);
---- a/arch/i386/power/cpu.c
-+++ b/arch/i386/power/cpu.c
-@@ -64,7 +64,7 @@ static void do_fpu_end(void)
- static void fix_processor_context(void)
- {
- 	int cpu = smp_processor_id();
--	struct tss_struct * t = &per_cpu(init_tss, cpu);
-+	struct tss_struct *t = init_tss + cpu;
- 
- 	set_tss_desc(cpu,t);	/* This just modifies memory; should not be necessary. But... This is necessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
- 
---- a/arch/i386/xen/enlighten.c
-+++ b/arch/i386/xen/enlighten.c
-@@ -320,7 +320,7 @@ static void xen_set_ldt(const void *addr
- static void xen_load_gdt(const struct Xgt_desc_struct *dtr)
- {
- 	unsigned long *frames;
--	unsigned long va = dtr->address;
-+	unsigned long va = (unsigned long)dtr->address;
- 	unsigned int size = dtr->size + 1;
- 	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- 	int f;
-@@ -335,7 +335,7 @@ static void xen_load_gdt(const struct Xg
- 	mcs = xen_mc_entry(sizeof(*frames) * pages);
- 	frames = mcs.args;
- 
--	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-+	for (f = 0; va < (unsigned long)dtr->address + size; va += PAGE_SIZE, f++) {
- 		frames[f] = virt_to_mfn(va);
- 		make_lowmem_page_readonly((void *)va);
- 	}
-@@ -429,7 +429,7 @@ static void xen_write_idt_entry(struct d
- 
- 	preempt_disable();
- 
--	start = __get_cpu_var(idt_desc).address;
-+	start = (unsigned long)__get_cpu_var(idt_desc).address;
- 	end = start + __get_cpu_var(idt_desc).size + 1;
- 
- 	xen_mc_flush();
---- a/arch/i386/xen/smp.c
-+++ b/arch/i386/xen/smp.c
-@@ -144,7 +144,7 @@ void __init xen_smp_prepare_boot_cpu(voi
- 
- 	/* We've switched to the "real" per-cpu gdt, so make sure the
- 	   old memory can be recycled */
--	make_lowmem_page_readwrite(&per_cpu__gdt_page);
-+	make_lowmem_page_readwrite(get_cpu_gdt_table(smp_processor_id()));
- 
- 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
- 		cpus_clear(cpu_sibling_map[cpu]);
-@@ -198,7 +198,7 @@ static __cpuinit int
- cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
- {
- 	struct vcpu_guest_context *ctxt;
--	struct gdt_page *gdt = &per_cpu(gdt_page, cpu);
-+	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
- 
- 	if (cpu_test_and_set(cpu, cpu_initialized_map))
- 		return 0;
-@@ -222,11 +222,11 @@ cpu_initialize_context(unsigned int cpu,
- 
- 	ctxt->ldt_ents = 0;
- 
--	BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK);
--	make_lowmem_page_readonly(gdt->gdt);
-+	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
-+	make_lowmem_page_readonly(gdt);
- 
--	ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt);
--	ctxt->gdt_ents      = ARRAY_SIZE(gdt->gdt);
-+	ctxt->gdt_frames[0] = virt_to_mfn(gdt);
-+	ctxt->gdt_ents      = GDT_ENTRIES;
- 
- 	ctxt->user_regs.cs = __KERNEL_CS;
- 	ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
---- a/arch/ia64/ia32/binfmt_elf32.c
-+++ b/arch/ia64/ia32/binfmt_elf32.c
-@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
- 
- #define elf_read_implies_exec(ex, have_pt_gnu_stack)	(!(have_pt_gnu_stack))
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	(current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	(current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#define PAX_DELTA_STACK_LEN	(current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#endif
-+
- /* Ugly but avoids duplication */
- #include "../../../fs/binfmt_elf.c"
- 
---- a/arch/ia64/ia32/ia32priv.h
-+++ b/arch/ia64/ia32/ia32priv.h
-@@ -304,7 +304,14 @@ struct old_linux32_dirent {
- #define ELF_DATA	ELFDATA2LSB
- #define ELF_ARCH	EM_386
- 
--#define IA32_STACK_TOP		IA32_PAGE_OFFSET
-+#ifdef CONFIG_PAX_RANDUSTACK
-+#define __IA32_DELTA_STACK	(current->mm->delta_stack)
-+#else
-+#define __IA32_DELTA_STACK	0UL
-+#endif
-+
-+#define IA32_STACK_TOP		(IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
-+
- #define IA32_GATE_OFFSET	IA32_PAGE_OFFSET
- #define IA32_GATE_END		IA32_PAGE_OFFSET + PAGE_SIZE
- 
---- a/arch/ia64/kernel/module.c
-+++ b/arch/ia64/kernel/module.c
-@@ -321,7 +321,7 @@ module_alloc (unsigned long size)
- void
- module_free (struct module *mod, void *module_region)
- {
--	if (mod->arch.init_unw_table && module_region == mod->module_init) {
-+	if (mod->arch.init_unw_table && module_region == mod->module_init_rx) {
- 		unw_remove_unwind_table(mod->arch.init_unw_table);
- 		mod->arch.init_unw_table = NULL;
- 	}
-@@ -499,15 +499,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
- }
- 
- static inline int
-+in_init_rx (const struct module *mod, uint64_t addr)
-+{
-+	return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
-+}
-+
-+static inline int
-+in_init_rw (const struct module *mod, uint64_t addr)
-+{
-+	return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
-+}
-+
-+static inline int
- in_init (const struct module *mod, uint64_t addr)
- {
--	return addr - (uint64_t) mod->module_init < mod->init_size;
-+	return in_init_rx(mod, value) || in_init_rw(mod, value);
-+}
-+
-+static inline int
-+in_core_rx (const struct module *mod, uint64_t addr)
-+{
-+	return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
-+}
-+
-+static inline int
-+in_core_rw (const struct module *mod, uint64_t addr)
-+{
-+	return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
- }
- 
- static inline int
- in_core (const struct module *mod, uint64_t addr)
- {
--	return addr - (uint64_t) mod->module_core < mod->core_size;
-+	return in_core_rx(mod, value) || in_core_rw(mod, value);
- }
- 
- static inline int
-@@ -691,7 +715,14 @@ do_reloc (struct module *mod, uint8_t r_
- 		break;
- 
- 	      case RV_BDREL:
--		val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
-+		if (in_init_rx(mod, val))
-+			val -= (uint64_t) mod->module_init_rx;
-+		else if (in_init_rw(mod, val))
-+			val -= (uint64_t) mod->module_init_rw;
-+		else if (in_core_rx(mod, val))
-+			val -= (uint64_t) mod->module_core_rx;
-+		else if (in_core_rw(mod, val))
-+			val -= (uint64_t) mod->module_core_rw;
- 		break;
- 
- 	      case RV_LTV:
-@@ -825,15 +856,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
- 		 *     addresses have been selected...
- 		 */
- 		uint64_t gp;
--		if (mod->core_size > MAX_LTOFF)
-+		if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
- 			/*
- 			 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
- 			 * at the end of the module.
- 			 */
--			gp = mod->core_size - MAX_LTOFF / 2;
-+			gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
- 		else
--			gp = mod->core_size / 2;
--		gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
-+			gp = (mod->core_size_rx + mod->core_size_rw) / 2;
-+		gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
- 		mod->arch.gp = gp;
- 		DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
- 	}
---- a/arch/ia64/kernel/ptrace.c
-+++ b/arch/ia64/kernel/ptrace.c
-@@ -17,6 +17,7 @@
- #include <linux/security.h>
- #include <linux/audit.h>
- #include <linux/signal.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/pgtable.h>
- #include <asm/processor.h>
-@@ -1451,6 +1452,9 @@ sys_ptrace (long request, pid_t pid, uns
- 	if (pid == 1)		/* no messing around with init! */
- 		goto out_tsk;
- 
-+	if (gr_handle_ptrace(child, request))
-+		goto out_tsk;
-+
- 	if (request == PTRACE_ATTACH) {
- 		ret = ptrace_attach(child);
- 		goto out_tsk;
---- a/arch/ia64/kernel/sys_ia64.c
-+++ b/arch/ia64/kernel/sys_ia64.c
-@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
- 	if (REGION_NUMBER(addr) == RGN_HPAGE)
- 		addr = 0;
- #endif
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if ((mm->pax_flags & MF_PAX_RANDMMAP) && addr && filp)
-+		addr = mm->free_area_cache;
-+	else
-+#endif
-+
- 	if (!addr)
- 		addr = mm->free_area_cache;
- 
-@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil
- 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- 		/* At this point:  (!vma || addr < vma->vm_end). */
- 		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
--			if (start_addr != TASK_UNMAPPED_BASE) {
-+			if (start_addr != mm->mmap_base) {
- 				/* Start a new search --- just in case we missed some holes.  */
--				addr = TASK_UNMAPPED_BASE;
-+				addr = mm->mmap_base;
- 				goto full_search;
- 			}
- 			return -ENOMEM;
---- a/arch/ia64/mm/fault.c
-+++ b/arch/ia64/mm/fault.c
-@@ -10,6 +10,7 @@
- #include <linux/interrupt.h>
- #include <linux/kprobes.h>
- #include <linux/kdebug.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/pgtable.h>
- #include <asm/processor.h>
-@@ -72,6 +73,23 @@ mapped_kernel_page_is_present (unsigned 
- 	return pte_present(pte);
- }
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 8; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- void __kprobes
- ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
- {
-@@ -145,9 +163,23 @@ ia64_do_page_fault (unsigned long addres
- 	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
- 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
- 
--	if ((vma->vm_flags & mask) != mask)
-+	if ((vma->vm_flags & mask) != mask) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
-+			if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
-+				goto bad_area;
-+
-+			up_read(&mm->mmap_sem);
-+			pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
-+			do_exit(SIGKILL);
-+		}
-+#endif
-+
- 		goto bad_area;
- 
-+	}
-+
-   survive:
- 	/*
- 	 * If for any reason at all we couldn't handle the fault, make
---- a/arch/ia64/mm/init.c
-+++ b/arch/ia64/mm/init.c
-@@ -20,8 +20,8 @@
- #include <linux/proc_fs.h>
- #include <linux/bitops.h>
- #include <linux/kexec.h>
-+#include <linux/a.out.h>
- 
--#include <asm/a.out.h>
- #include <asm/dma.h>
- #include <asm/ia32.h>
- #include <asm/io.h>
-@@ -130,8 +130,21 @@ ia64_init_addr_space (void)
- 		vma->vm_mm = current->mm;
- 		vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
- 		vma->vm_end = vma->vm_start + PAGE_SIZE;
--		vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
- 		vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
-+			vm->vm_flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+			if (current->mm->pax_flags & MF_PAX_MPROTECT)
-+				vma->vm_flags &= ~VM_MAYEXEC;
-+#endif
-+
-+		}
-+#endif
-+
-+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- 		down_write(&current->mm->mmap_sem);
- 		if (insert_vm_struct(current->mm, vma)) {
- 			up_write(&current->mm->mmap_sem);
---- a/arch/mips/kernel/binfmt_elfn32.c
-+++ b/arch/mips/kernel/binfmt_elfn32.c
-@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
- #undef ELF_ET_DYN_BASE
- #define ELF_ET_DYN_BASE         (TASK32_SIZE / 3 * 2)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	((current->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN	((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #include <asm/processor.h>
- #include <linux/module.h>
- #include <linux/elfcore.h>
---- a/arch/mips/kernel/binfmt_elfo32.c
-+++ b/arch/mips/kernel/binfmt_elfo32.c
-@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
- #undef ELF_ET_DYN_BASE
- #define ELF_ET_DYN_BASE         (TASK32_SIZE / 3 * 2)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	((current->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN	((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #include <asm/processor.h>
- #include <linux/module.h>
- #include <linux/elfcore.h>
---- a/arch/mips/kernel/syscall.c
-+++ b/arch/mips/kernel/syscall.c
-@@ -88,6 +88,11 @@ unsigned long arch_get_unmapped_area(str
- 	do_color_align = 0;
- 	if (filp || (flags & MAP_SHARED))
- 		do_color_align = 1;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(current->mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
- 	if (addr) {
- 		if (do_color_align)
- 			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -98,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
- 		    (!vmm || addr + len <= vmm->vm_start))
- 			return addr;
- 	}
--	addr = TASK_UNMAPPED_BASE;
-+	addr = current->mm->mmap_base;
- 	if (do_color_align)
- 		addr = COLOUR_ALIGN(addr, pgoff);
- 	else
---- a/arch/mips/mm/fault.c
-+++ b/arch/mips/mm/fault.c
-@@ -26,6 +26,23 @@
- #include <asm/ptrace.h>
- #include <asm/highmem.h>		/* For VMALLOC_END */
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(void *pc)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- /*
-  * This routine handles page faults.  It determines the address,
-  * and the problem, and then passes it off to one of the appropriate
---- a/arch/parisc/kernel/module.c
-+++ b/arch/parisc/kernel/module.c
-@@ -73,16 +73,38 @@
- 
- /* three functions to determine where in the module core
-  * or init pieces the location is */
-+static inline int in_init_rx(struct module *me, void *loc)
-+{
-+	return (loc >= me->module_init_rx &&
-+		loc < (me->module_init_rx + me->init_size_rx));
-+}
-+
-+static inline int in_init_rw(struct module *me, void *loc)
-+{
-+	return (loc >= me->module_init_rw &&
-+		loc < (me->module_init_rw + me->init_size_rw));
-+}
-+
- static inline int in_init(struct module *me, void *loc)
- {
--	return (loc >= me->module_init &&
--		loc <= (me->module_init + me->init_size));
-+	return in_init_rx(me, loc) || in_init_rw(me, loc);
-+}
-+
-+static inline int in_core_rx(struct module *me, void *loc)
-+{
-+	return (loc >= me->module_core_rx &&
-+		loc < (me->module_core_rx + me->core_size_rx));
-+}
-+
-+static inline int in_core_rw(struct module *me, void *loc)
-+{
-+	return (loc >= me->module_core_rw &&
-+		loc < (me->module_core_rw + me->core_size_rw));
- }
- 
- static inline int in_core(struct module *me, void *loc)
- {
--	return (loc >= me->module_core &&
--		loc <= (me->module_core + me->core_size));
-+	return in_core_rx(me, loc) || in_core_rw(me, loc);
- }
- 
- static inline int in_local(struct module *me, void *loc)
-@@ -296,21 +318,21 @@ int module_frob_arch_sections(CONST Elf_
- 	}
- 
- 	/* align things a bit */
--	me->core_size = ALIGN(me->core_size, 16);
--	me->arch.got_offset = me->core_size;
--	me->core_size += gots * sizeof(struct got_entry);
--
--	me->core_size = ALIGN(me->core_size, 16);
--	me->arch.fdesc_offset = me->core_size;
--	me->core_size += fdescs * sizeof(Elf_Fdesc);
--
--	me->core_size = ALIGN(me->core_size, 16);
--	me->arch.stub_offset = me->core_size;
--	me->core_size += stubs * sizeof(struct stub_entry);
--
--	me->init_size = ALIGN(me->init_size, 16);
--	me->arch.init_stub_offset = me->init_size;
--	me->init_size += init_stubs * sizeof(struct stub_entry);
-+	me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+	me->arch.got_offset = me->core_size_rw;
-+	me->core_size_rw += gots * sizeof(struct got_entry);
-+
-+	me->core_size_rw = ALIGN(me->core_size_rw, 16);
-+	me->arch.fdesc_offset = me->core_size_rw;
-+	me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
-+
-+	me->core_size_rx = ALIGN(me->core_size_rx, 16);
-+	me->arch.stub_offset = me->core_size_rx;
-+	me->core_size_rx += stubs * sizeof(struct stub_entry);
-+
-+	me->init_size_rx = ALIGN(me->init_size_rx, 16);
-+	me->arch.init_stub_offset = me->init_size_rx;
-+	me->init_size_rx += init_stubs * sizeof(struct stub_entry);
- 
- 	me->arch.got_max = gots;
- 	me->arch.fdesc_max = fdescs;
-@@ -330,7 +352,7 @@ static Elf64_Word get_got(struct module 
- 
- 	BUG_ON(value == 0);
- 
--	got = me->module_core + me->arch.got_offset;
-+	got = me->module_core_rw + me->arch.got_offset;
- 	for (i = 0; got[i].addr; i++)
- 		if (got[i].addr == value)
- 			goto out;
-@@ -348,7 +370,7 @@ static Elf64_Word get_got(struct module 
- #ifdef CONFIG_64BIT
- static Elf_Addr get_fdesc(struct module *me, unsigned long value)
- {
--	Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
-+	Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
- 
- 	if (!value) {
- 		printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
-@@ -366,7 +388,7 @@ static Elf_Addr get_fdesc(struct module 
- 
- 	/* Create new one */
- 	fdesc->addr = value;
--	fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
-+	fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
- 	return (Elf_Addr)fdesc;
- }
- #endif /* CONFIG_64BIT */
-@@ -386,12 +408,12 @@ static Elf_Addr get_stub(struct module *
- 	if(init_section) {
- 		i = me->arch.init_stub_count++;
- 		BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max);
--		stub = me->module_init + me->arch.init_stub_offset + 
-+		stub = me->module_init_rx + me->arch.init_stub_offset +
- 			i * sizeof(struct stub_entry);
- 	} else {
- 		i = me->arch.stub_count++;
- 		BUG_ON(me->arch.stub_count > me->arch.stub_max);
--		stub = me->module_core + me->arch.stub_offset + 
-+		stub = me->module_core_rx + me->arch.stub_offset +
- 			i * sizeof(struct stub_entry);
- 	}
- 
-@@ -759,7 +781,7 @@ register_unwind_table(struct module *me,
- 
- 	table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
- 	end = table + sechdrs[me->arch.unwind_section].sh_size;
--	gp = (Elf_Addr)me->module_core + me->arch.got_offset;
-+	gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
- 
- 	DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
- 	       me->arch.unwind_section, table, end, gp);
---- a/arch/parisc/kernel/sys_parisc.c
-+++ b/arch/parisc/kernel/sys_parisc.c
-@@ -111,7 +111,7 @@ unsigned long arch_get_unmapped_area(str
- 	if (flags & MAP_FIXED)
- 		return addr;
- 	if (!addr)
--		addr = TASK_UNMAPPED_BASE;
-+		addr = current->mm->mmap_base;
- 
- 	if (filp) {
- 		addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
---- a/arch/parisc/kernel/traps.c
-+++ b/arch/parisc/kernel/traps.c
-@@ -713,9 +713,7 @@ void handle_interruption(int code, struc
- 
- 			down_read(&current->mm->mmap_sem);
- 			vma = find_vma(current->mm,regs->iaoq[0]);
--			if (vma && (regs->iaoq[0] >= vma->vm_start)
--				&& (vma->vm_flags & VM_EXEC)) {
--
-+			if (vma && (regs->iaoq[0] >= vma->vm_start)) {
- 				fault_address = regs->iaoq[0];
- 				fault_space = regs->iasq[0];
- 
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
-@@ -16,6 +16,8 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/unistd.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/uaccess.h>
- #include <asm/traps.h>
-@@ -53,7 +55,7 @@ DEFINE_PER_CPU(struct exception_data, ex
- static unsigned long
- parisc_acctyp(unsigned long code, unsigned int inst)
- {
--	if (code == 6 || code == 16)
-+	if (code == 6 || code == 7 || code == 16)
- 	    return VM_EXEC;
- 
- 	switch (inst & 0xf0000000) {
-@@ -139,6 +141,116 @@ parisc_acctyp(unsigned long code, unsign
- 			}
- #endif
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when rt_sigreturn trampoline was detected
-+ *         3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+	int err;
-+
-+	do { /* PaX: unpatched PLT emulation */
-+		unsigned int bl, depwi;
-+
-+		err = get_user(bl, (unsigned int *)instruction_pointer(regs));
-+		err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
-+
-+		if (err)
-+			break;
-+
-+		if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
-+			unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
-+
-+			err = get_user(ldw, (unsigned int *)addr);
-+			err |= get_user(bv, (unsigned int *)(addr+4));
-+			err |= get_user(ldw2, (unsigned int *)(addr+8));
-+
-+			if (err)
-+				break;
-+
-+			if (ldw == 0x0E801096U &&
-+			    bv == 0xEAC0C000U &&
-+			    ldw2 == 0x0E881095U)
-+			{
-+				unsigned int resolver, map;
-+
-+				err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
-+				err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
-+				if (err)
-+					break;
-+
-+				regs->gr[20] = instruction_pointer(regs)+8;
-+				regs->gr[21] = map;
-+				regs->gr[22] = resolver;
-+				regs->iaoq[0] = resolver | 3UL;
-+				regs->iaoq[1] = regs->iaoq[0] + 4;
-+				return 3;
-+			}
-+		}
-+	} while (0);
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+
-+#ifndef CONFIG_PAX_EMUSIGRT
-+	if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
-+		return 1;
-+#endif
-+
-+	do { /* PaX: rt_sigreturn emulation */
-+		unsigned int ldi1, ldi2, bel, nop;
-+
-+		err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
-+		err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
-+		err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
-+		err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
-+
-+		if (err)
-+			break;
-+
-+		if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
-+		    ldi2 == 0x3414015AU &&
-+		    bel == 0xE4008200U &&
-+		    nop == 0x08000240U)
-+		{
-+			regs->gr[25] = (ldi1 & 2) >> 1;
-+			regs->gr[20] = __NR_rt_sigreturn;
-+			regs->gr[31] = regs->iaoq[1] + 16;
-+			regs->sr[0] = regs->iasq[1];
-+			regs->iaoq[0] = 0x100UL;
-+			regs->iaoq[1] = regs->iaoq[0] + 4;
-+			regs->iasq[0] = regs->sr[2];
-+			regs->iasq[1] = regs->sr[2];
-+			return 2;
-+		}
-+	} while (0);
-+#endif
-+
-+	return 1;
-+}
-+
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- void do_page_fault(struct pt_regs *regs, unsigned long code,
- 			      unsigned long address)
- {
-@@ -165,8 +277,33 @@ good_area:
- 
- 	acc_type = parisc_acctyp(code,regs->iir);
- 
--	if ((vma->vm_flags & acc_type) != acc_type)
-+	if ((vma->vm_flags & acc_type) != acc_type) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
-+		    (address & ~3UL) == instruction_pointer(regs))
-+		{
-+			up_read(&mm->mmap_sem);
-+			switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+			case 3:
-+				return;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+			case 2:
-+				return;
-+#endif
-+
-+			}
-+			pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
-+			do_exit(SIGKILL);
-+		}
-+#endif
-+
- 		goto bad_area;
-+	}
- 
- 	/*
- 	 * If for any reason at all we couldn't handle the fault, make
---- a/arch/powerpc/kernel/module_32.c
-+++ b/arch/powerpc/kernel/module_32.c
-@@ -126,7 +126,7 @@ int module_frob_arch_sections(Elf32_Ehdr
- 			me->arch.core_plt_section = i;
- 	}
- 	if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
--		printk("Module doesn't contain .plt or .init.plt sections.\n");
-+		printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
- 		return -ENOEXEC;
- 	}
- 
-@@ -167,11 +167,16 @@ static uint32_t do_plt_call(void *locati
- 
- 	DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
- 	/* Init, or core PLT? */
--	if (location >= mod->module_core
--	    && location < mod->module_core + mod->core_size)
-+	if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
-+	    (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
- 		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
--	else
-+	else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
-+		 (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
- 		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
-+	else {
-+		printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
-+		return ~0UL;
-+	}
- 
- 	/* Find this entry, or if that fails, the next avail. entry */
- 	while (entry->jump[0]) {
---- a/arch/powerpc/kernel/signal_32.c
-+++ b/arch/powerpc/kernel/signal_32.c
-@@ -728,7 +728,7 @@ int handle_rt_signal32(unsigned long sig
- 
- 	/* Save user registers on the stack */
- 	frame = &rt_sf->uc.uc_mcontext;
--	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
-+	if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- 		if (save_user_regs(regs, frame, 0))
- 			goto badframe;
- 		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
---- a/arch/powerpc/kernel/signal_64.c
-+++ b/arch/powerpc/kernel/signal_64.c
-@@ -359,7 +359,7 @@ int handle_rt_signal64(int signr, struct
- 	current->thread.fpscr.val = 0;
- 
- 	/* Set up to return from userspace. */
--	if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
-+	if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- 		regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
- 	} else {
- 		err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
---- a/arch/powerpc/kernel/vdso.c
-+++ b/arch/powerpc/kernel/vdso.c
-@@ -211,7 +211,7 @@ int arch_setup_additional_pages(struct l
- 	vdso_base = VDSO32_MBASE;
- #endif
- 
--	current->mm->context.vdso_base = 0;
-+	current->mm->context.vdso_base = ~0UL;
- 
- 	/* vDSO has a problem and was disabled, just don't "enable" it for the
- 	 * process
-@@ -228,7 +228,7 @@ int arch_setup_additional_pages(struct l
- 	 */
- 	down_write(&mm->mmap_sem);
- 	vdso_base = get_unmapped_area(NULL, vdso_base,
--				      vdso_pages << PAGE_SHIFT, 0, 0);
-+				      vdso_pages << PAGE_SHIFT, 0, MAP_PRIVATE | MAP_EXECUTABLE);
- 	if (IS_ERR_VALUE(vdso_base)) {
- 		rc = vdso_base;
- 		goto fail_mmapsem;
---- a/arch/powerpc/mm/fault.c
-+++ b/arch/powerpc/mm/fault.c
-@@ -29,6 +29,12 @@
- #include <linux/module.h>
- #include <linux/kprobes.h>
- #include <linux/kdebug.h>
-+#include <linux/binfmts.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-+#include <linux/binfmts.h>
-+#include <linux/unistd.h>
- 
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -62,6 +68,364 @@ static inline int notify_page_fault(stru
- }
- #endif
- 
-+#ifdef CONFIG_PAX_EMUSIGRT
-+void pax_syscall_close(struct vm_area_struct *vma)
-+{
-+	vma->vm_mm->call_syscall = 0UL;
-+}
-+
-+static struct page *pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
-+{
-+	struct page *page;
-+	unsigned int *kaddr;
-+
-+	page = alloc_page(GFP_HIGHUSER);
-+	if (!page)
-+		return NOPAGE_OOM;
-+
-+	kaddr = kmap(page);
-+	memset(kaddr, 0, PAGE_SIZE);
-+	kaddr[0] = 0x44000002U; /* sc */
-+	__flush_dcache_icache(kaddr);
-+	kunmap(page);
-+	if (type)
-+		*type = VM_FAULT_MAJOR;
-+	return page;
-+}
-+
-+static struct vm_operations_struct pax_vm_ops = {
-+	.close = pax_syscall_close,
-+	.nopage = pax_syscall_nopage,
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+	int ret;
-+
-+	memset(vma, 0, sizeof(*vma));
-+	vma->vm_mm = current->mm;
-+	vma->vm_start = addr;
-+	vma->vm_end = addr + PAGE_SIZE;
-+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+	vma->vm_ops = &pax_vm_ops;
-+
-+	ret = insert_vm_struct(current->mm, vma);
-+	if (ret)
-+		return ret;
-+
-+	++current->mm->total_vm;
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->nip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when patched GOT trampoline was detected
-+ *         3 when patched PLT trampoline was detected
-+ *         4 when unpatched PLT trampoline was detected
-+ *         5 when sigreturn trampoline was detected
-+ *         6 when rt_sigreturn trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT)
-+	int err;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+	do { /* PaX: patched GOT emulation */
-+		unsigned int blrl;
-+
-+		err = get_user(blrl, (unsigned int *)regs->nip);
-+
-+		if (!err && blrl == 0x4E800021U) {
-+			unsigned long temp = regs->nip;
-+
-+			regs->nip = regs->link & 0xFFFFFFFCUL;
-+			regs->link = temp + 4UL;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #1 */
-+		unsigned int b;
-+
-+		err = get_user(b, (unsigned int *)regs->nip);
-+
-+		if (!err && (b & 0xFC000003U) == 0x48000000U) {
-+			regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
-+			return 3;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation #1 */
-+		unsigned int li, b;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(b, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
-+			unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
-+			unsigned long addr = b | 0xFC000000UL;
-+
-+			addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
-+			err = get_user(rlwinm, (unsigned int *)addr);
-+			err |= get_user(add, (unsigned int *)(addr+4));
-+			err |= get_user(li2, (unsigned int *)(addr+8));
-+			err |= get_user(addis2, (unsigned int *)(addr+12));
-+			err |= get_user(mtctr, (unsigned int *)(addr+16));
-+			err |= get_user(li3, (unsigned int *)(addr+20));
-+			err |= get_user(addis3, (unsigned int *)(addr+24));
-+			err |= get_user(bctr, (unsigned int *)(addr+28));
-+
-+			if (err)
-+				break;
-+
-+			if (rlwinm == 0x556C083CU &&
-+			    add == 0x7D6C5A14U &&
-+			    (li2 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    mtctr == 0x7D8903A6U &&
-+			    (li3 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    bctr == 0x4E800420U)
-+			{
-+				regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
-+				regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->ctr += (addis2 & 0xFFFFU) << 16;
-+				regs->nip = regs->ctr;
-+				return 4;
-+			}
-+		}
-+	} while (0);
-+
-+#if 0
-+	do { /* PaX: unpatched PLT emulation #2 */
-+		unsigned int lis, lwzu, b, bctr;
-+
-+		err = get_user(lis, (unsigned int *)regs->nip);
-+		err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
-+		err |= get_user(b, (unsigned int *)(regs->nip+8));
-+		err |= get_user(bctr, (unsigned int *)(regs->nip+12));
-+
-+		if (err)
-+			break;
-+
-+		if ((lis & 0xFFFF0000U) == 0x39600000U &&
-+		    (lwzu & 0xU) == 0xU &&
-+		    (b & 0xFC000003U) == 0x48000000U &&
-+		    bctr == 0x4E800420U)
-+		{
-+			unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
-+			unsigned long addr = b | 0xFC000000UL;
-+
-+			addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
-+			err = get_user(addis, (unsigned int*)addr);
-+			err |= get_user(addi, (unsigned int*)(addr+4));
-+			err |= get_user(rlwinm, (unsigned int*)(addr+8));
-+			err |= get_user(add, (unsigned int*)(addr+12));
-+			err |= get_user(li2, (unsigned int*)(addr+16));
-+			err |= get_user(addis2, (unsigned int*)(addr+20));
-+			err |= get_user(mtctr, (unsigned int*)(addr+24));
-+			err |= get_user(li3, (unsigned int*)(addr+28));
-+			err |= get_user(addis3, (unsigned int*)(addr+32));
-+			err |= get_user(bctr, (unsigned int*)(addr+36));
-+
-+			if (err)
-+				break;
-+
-+			if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
-+			    (addi & 0xFFFF0000U) == 0x396B0000U &&
-+			    rlwinm == 0x556C083CU &&
-+			    add == 0x7D6C5A14U &&
-+			    (li2 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    mtctr == 0x7D8903A6U &&
-+			    (li3 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    bctr == 0x4E800420U)
-+			{
-+				regs->gpr[PT_R11] = 
-+				regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
-+				regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->ctr += (addis2 & 0xFFFFU) << 16;
-+				regs->nip = regs->ctr;
-+				return 4;
-+			}
-+		}
-+	} while (0);
-+#endif
-+
-+	do { /* PaX: unpatched PLT emulation #3 */
-+		unsigned int li, b;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(b, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
-+			unsigned int addis, lwz, mtctr, bctr;
-+			unsigned long addr = b | 0xFC000000UL;
-+
-+			addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
-+			err = get_user(addis, (unsigned int *)addr);
-+			err |= get_user(lwz, (unsigned int *)(addr+4));
-+			err |= get_user(mtctr, (unsigned int *)(addr+8));
-+			err |= get_user(bctr, (unsigned int *)(addr+12));
-+
-+			if (err)
-+				break;
-+
-+			if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
-+			    (lwz & 0xFFFF0000U) == 0x816B0000U &&
-+			    mtctr == 0x7D6903A6U &&
-+			    bctr == 0x4E800420U)
-+			{
-+				unsigned int r11;
-+
-+				addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+
-+				err = get_user(r11, (unsigned int *)addr);
-+				if (err)
-+					break;
-+
-+				regs->gpr[PT_R11] = r11;
-+				regs->ctr = r11;
-+				regs->nip = r11;
-+				return 4;
-+			}
-+		}
-+	} while (0);
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUSIGRT
-+	do { /* PaX: sigreturn emulation */
-+		unsigned int li, sc;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(sc, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) {
-+			struct vm_area_struct *vma;
-+			unsigned long call_syscall;
-+
-+			down_read(&current->mm->mmap_sem);
-+			call_syscall = current->mm->call_syscall;
-+			up_read(&current->mm->mmap_sem);
-+			if (likely(call_syscall))
-+				goto emulate;
-+
-+			vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+
-+			down_write(&current->mm->mmap_sem);
-+			if (current->mm->call_syscall) {
-+				call_syscall = current->mm->call_syscall;
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				goto emulate;
-+			}
-+
-+			call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+			if (!vma || (call_syscall & ~PAGE_MASK)) {
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			if (pax_insert_vma(vma, call_syscall)) {
-+				up_write(&current->mm->mmap_sem);
-+				kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			current->mm->call_syscall = call_syscall;
-+			up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+			regs->gpr[PT_R0] = __NR_sigreturn;
-+			regs->nip = call_syscall;
-+			return 5;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: rt_sigreturn emulation */
-+		unsigned int li, sc;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(sc, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) {
-+			struct vm_area_struct *vma;
-+			unsigned int call_syscall;
-+
-+			down_read(&current->mm->mmap_sem);
-+			call_syscall = current->mm->call_syscall;
-+			up_read(&current->mm->mmap_sem);
-+			if (likely(call_syscall))
-+				goto rt_emulate;
-+
-+			vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+
-+			down_write(&current->mm->mmap_sem);
-+			if (current->mm->call_syscall) {
-+				call_syscall = current->mm->call_syscall;
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				goto rt_emulate;
-+			}
-+
-+			call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+			if (!vma || (call_syscall & ~PAGE_MASK)) {
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			if (pax_insert_vma(vma, call_syscall)) {
-+				up_write(&current->mm->mmap_sem);
-+				kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			current->mm->call_syscall = call_syscall;
-+			up_write(&current->mm->mmap_sem);
-+
-+rt_emulate:
-+			regs->gpr[PT_R0] = __NR_rt_sigreturn;
-+			regs->nip = call_syscall;
-+			return 6;
-+		}
-+	} while (0);
-+#endif
-+
-+	return 1;
-+}
-+
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- /*
-  * Check whether the instruction at regs->nip is a store using
-  * an update addressing form which will update r1.
-@@ -157,7 +521,7 @@ int __kprobes do_page_fault(struct pt_re
- 	 * indicate errors in DSISR but can validly be set in SRR1.
- 	 */
- 	if (trap == 0x400)
--		error_code &= 0x48200000;
-+		error_code &= 0x58200000;
- 	else
- 		is_write = error_code & DSISR_ISSTORE;
- #else
-@@ -357,6 +721,37 @@ bad_area:
- bad_area_nosemaphore:
- 	/* User mode accesses cause a SIGSEGV */
- 	if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+#ifdef CONFIG_PPC64
-+			if (is_exec && (error_code & DSISR_PROTFAULT)) {
-+#else
-+			if (is_exec && regs->nip == address) {
-+#endif
-+				switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+				case 2:
-+				case 3:
-+				case 4:
-+					return 0;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUSIGRT
-+				case 5:
-+				case 6:
-+					return 0;
-+#endif
-+
-+				}
-+
-+				pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[PT_R1]);
-+				do_exit(SIGKILL);
-+			}
-+		}
-+#endif
-+
- 		_exception(SIGSEGV, regs, code, address);
- 		return 0;
- 	}
---- a/arch/powerpc/mm/mmap.c
-+++ b/arch/powerpc/mm/mmap.c
-@@ -75,10 +75,22 @@ void arch_pick_mmap_layout(struct mm_str
- 	 */
- 	if (mmap_is_legacy()) {
- 		mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area;
- 		mm->unmap_area = arch_unmap_area;
- 	} else {
- 		mm->mmap_base = mmap_base();
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- 		mm->unmap_area = arch_unmap_area_topdown;
- 	}
---- a/arch/ppc/mm/fault.c
-+++ b/arch/ppc/mm/fault.c
-@@ -25,6 +25,11 @@
- #include <linux/interrupt.h>
- #include <linux/highmem.h>
- #include <linux/module.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-+#include <linux/binfmts.h>
-+#include <linux/unistd.h>
- 
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -48,6 +53,364 @@ unsigned long pte_misses;	/* updated by 
- unsigned long pte_errors;	/* updated by do_page_fault() */
- unsigned int probingmem;
- 
-+#ifdef CONFIG_PAX_EMUSIGRT
-+void pax_syscall_close(struct vm_area_struct *vma)
-+{
-+	vma->vm_mm->call_syscall = 0UL;
-+}
-+
-+static struct page *pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
-+{
-+	struct page *page;
-+	unsigned int *kaddr;
-+
-+	page = alloc_page(GFP_HIGHUSER);
-+	if (!page)
-+		return NOPAGE_OOM;
-+
-+	kaddr = kmap(page);
-+	memset(kaddr, 0, PAGE_SIZE);
-+	kaddr[0] = 0x44000002U; /* sc */
-+	__flush_dcache_icache(kaddr);
-+	kunmap(page);
-+	if (type)
-+		*type = VM_FAULT_MAJOR;
-+	return page;
-+}
-+
-+static struct vm_operations_struct pax_vm_ops = {
-+	.close = pax_syscall_close,
-+	.nopage = pax_syscall_nopage,
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+	int ret;
-+
-+	memset(vma, 0, sizeof(*vma));
-+	vma->vm_mm = current->mm;
-+	vma->vm_start = addr;
-+	vma->vm_end = addr + PAGE_SIZE;
-+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+	vma->vm_ops = &pax_vm_ops;
-+
-+	ret = insert_vm_struct(current->mm, vma);
-+	if (ret)
-+		return ret;
-+
-+	++current->mm->total_vm;
-+	return 0;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+/*
-+ * PaX: decide what to do with offenders (regs->nip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when patched GOT trampoline was detected
-+ *         3 when patched PLT trampoline was detected
-+ *         4 when unpatched PLT trampoline was detected
-+ *         5 when sigreturn trampoline was detected
-+ *         6 when rt_sigreturn trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT)
-+	int err;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+	do { /* PaX: patched GOT emulation */
-+		unsigned int blrl;
-+
-+		err = get_user(blrl, (unsigned int *)regs->nip);
-+
-+		if (!err && blrl == 0x4E800021U) {
-+			unsigned long temp = regs->nip;
-+
-+			regs->nip = regs->link & 0xFFFFFFFCUL;
-+			regs->link = temp + 4UL;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #1 */
-+		unsigned int b;
-+
-+		err = get_user(b, (unsigned int *)regs->nip);
-+
-+		if (!err && (b & 0xFC000003U) == 0x48000000U) {
-+			regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
-+			return 3;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation #1 */
-+		unsigned int li, b;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(b, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
-+			unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
-+			unsigned long addr = b | 0xFC000000UL;
-+
-+			addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
-+			err = get_user(rlwinm, (unsigned int *)addr);
-+			err |= get_user(add, (unsigned int *)(addr+4));
-+			err |= get_user(li2, (unsigned int *)(addr+8));
-+			err |= get_user(addis2, (unsigned int *)(addr+12));
-+			err |= get_user(mtctr, (unsigned int *)(addr+16));
-+			err |= get_user(li3, (unsigned int *)(addr+20));
-+			err |= get_user(addis3, (unsigned int *)(addr+24));
-+			err |= get_user(bctr, (unsigned int *)(addr+28));
-+
-+			if (err)
-+				break;
-+
-+			if (rlwinm == 0x556C083CU &&
-+			    add == 0x7D6C5A14U &&
-+			    (li2 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    mtctr == 0x7D8903A6U &&
-+			    (li3 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    bctr == 0x4E800420U)
-+			{
-+				regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
-+				regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->ctr += (addis2 & 0xFFFFU) << 16;
-+				regs->nip = regs->ctr;
-+				return 4;
-+			}
-+		}
-+	} while (0);
-+
-+#if 0
-+	do { /* PaX: unpatched PLT emulation #2 */
-+		unsigned int lis, lwzu, b, bctr;
-+
-+		err = get_user(lis, (unsigned int *)regs->nip);
-+		err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
-+		err |= get_user(b, (unsigned int *)(regs->nip+8));
-+		err |= get_user(bctr, (unsigned int *)(regs->nip+12));
-+
-+		if (err)
-+			break;
-+
-+		if ((lis & 0xFFFF0000U) == 0x39600000U &&
-+		    (lwzu & 0xU) == 0xU &&
-+		    (b & 0xFC000003U) == 0x48000000U &&
-+		    bctr == 0x4E800420U)
-+		{
-+			unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
-+			unsigned long addr = b | 0xFC000000UL;
-+
-+			addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
-+			err = get_user(addis, (unsigned int*)addr);
-+			err |= get_user(addi, (unsigned int*)(addr+4));
-+			err |= get_user(rlwinm, (unsigned int*)(addr+8));
-+			err |= get_user(add, (unsigned int*)(addr+12));
-+			err |= get_user(li2, (unsigned int*)(addr+16));
-+			err |= get_user(addis2, (unsigned int*)(addr+20));
-+			err |= get_user(mtctr, (unsigned int*)(addr+24));
-+			err |= get_user(li3, (unsigned int*)(addr+28));
-+			err |= get_user(addis3, (unsigned int*)(addr+32));
-+			err |= get_user(bctr, (unsigned int*)(addr+36));
-+
-+			if (err)
-+				break;
-+
-+			if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
-+			    (addi & 0xFFFF0000U) == 0x396B0000U &&
-+			    rlwinm == 0x556C083CU &&
-+			    add == 0x7D6C5A14U &&
-+			    (li2 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    mtctr == 0x7D8903A6U &&
-+			    (li3 & 0xFFFF0000U) == 0x39800000U &&
-+			    (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
-+			    bctr == 0x4E800420U)
-+			{
-+				regs->gpr[PT_R11] = 
-+				regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
-+				regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				regs->ctr += (addis2 & 0xFFFFU) << 16;
-+				regs->nip = regs->ctr;
-+				return 4;
-+			}
-+		}
-+	} while (0);
-+#endif
-+
-+	do { /* PaX: unpatched PLT emulation #3 */
-+		unsigned int li, b;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(b, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
-+			unsigned int addis, lwz, mtctr, bctr;
-+			unsigned long addr = b | 0xFC000000UL;
-+
-+			addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
-+			err = get_user(addis, (unsigned int *)addr);
-+			err |= get_user(lwz, (unsigned int *)(addr+4));
-+			err |= get_user(mtctr, (unsigned int *)(addr+8));
-+			err |= get_user(bctr, (unsigned int *)(addr+12));
-+
-+			if (err)
-+				break;
-+
-+			if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
-+			    (lwz & 0xFFFF0000U) == 0x816B0000U &&
-+			    mtctr == 0x7D6903A6U &&
-+			    bctr == 0x4E800420U)
-+			{
-+				unsigned int r11;
-+
-+				addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+				addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
-+
-+				err = get_user(r11, (unsigned int *)addr);
-+				if (err)
-+					break;
-+
-+				regs->gpr[PT_R11] = r11;
-+				regs->ctr = r11;
-+				regs->nip = r11;
-+				return 4;
-+			}
-+		}
-+	} while (0);
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUSIGRT
-+	do { /* PaX: sigreturn emulation */
-+		unsigned int li, sc;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(sc, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) {
-+			struct vm_area_struct *vma;
-+			unsigned long call_syscall;
-+
-+			down_read(&current->mm->mmap_sem);
-+			call_syscall = current->mm->call_syscall;
-+			up_read(&current->mm->mmap_sem);
-+			if (likely(call_syscall))
-+				goto emulate;
-+
-+			vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+
-+			down_write(&current->mm->mmap_sem);
-+			if (current->mm->call_syscall) {
-+				call_syscall = current->mm->call_syscall;
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				goto emulate;
-+			}
-+
-+			call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+			if (!vma || (call_syscall & ~PAGE_MASK)) {
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			if (pax_insert_vma(vma, call_syscall)) {
-+				up_write(&current->mm->mmap_sem);
-+				kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			current->mm->call_syscall = call_syscall;
-+			up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+			regs->gpr[PT_R0] = __NR_sigreturn;
-+			regs->nip = call_syscall;
-+			return 5;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: rt_sigreturn emulation */
-+		unsigned int li, sc;
-+
-+		err = get_user(li, (unsigned int *)regs->nip);
-+		err |= get_user(sc, (unsigned int *)(regs->nip+4));
-+
-+		if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) {
-+			struct vm_area_struct *vma;
-+			unsigned int call_syscall;
-+
-+			down_read(&current->mm->mmap_sem);
-+			call_syscall = current->mm->call_syscall;
-+			up_read(&current->mm->mmap_sem);
-+			if (likely(call_syscall))
-+				goto rt_emulate;
-+
-+			vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+
-+			down_write(&current->mm->mmap_sem);
-+			if (current->mm->call_syscall) {
-+				call_syscall = current->mm->call_syscall;
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				goto rt_emulate;
-+			}
-+
-+			call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+			if (!vma || (call_syscall & ~PAGE_MASK)) {
-+				up_write(&current->mm->mmap_sem);
-+				if (vma) kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			if (pax_insert_vma(vma, call_syscall)) {
-+				up_write(&current->mm->mmap_sem);
-+				kmem_cache_free(vm_area_cachep, vma);
-+				return 1;
-+			}
-+
-+			current->mm->call_syscall = call_syscall;
-+			up_write(&current->mm->mmap_sem);
-+
-+rt_emulate:
-+			regs->gpr[PT_R0] = __NR_rt_sigreturn;
-+			regs->nip = call_syscall;
-+			return 6;
-+		}
-+	} while (0);
-+#endif
-+
-+	return 1;
-+}
-+
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- /*
-  * Check whether the instruction at regs->nip is a store using
-  * an update addressing form which will update r1.
-@@ -109,7 +472,7 @@ int do_page_fault(struct pt_regs *regs, 
- 	 * indicate errors in DSISR but can validly be set in SRR1.
- 	 */
- 	if (TRAP(regs) == 0x400)
--		error_code &= 0x48200000;
-+		error_code &= 0x58200000;
- 	else
- 		is_write = error_code & 0x02000000;
- #endif /* CONFIG_4xx || CONFIG_BOOKE */
-@@ -204,15 +567,14 @@ good_area:
- 		pte_t *ptep;
- 		pmd_t *pmdp;
- 
--#if 0
-+#if 1
- 		/* It would be nice to actually enforce the VM execute
- 		   permission on CPUs which can do so, but far too
- 		   much stuff in userspace doesn't get the permissions
- 		   right, so we let any page be executed for now. */
- 		if (! (vma->vm_flags & VM_EXEC))
- 			goto bad_area;
--#endif
--
-+#else
- 		/* Since 4xx/Book-E supports per-page execute permission,
- 		 * we lazily flush dcache to icache. */
- 		ptep = NULL;
-@@ -235,6 +597,7 @@ good_area:
- 			pte_unmap_unlock(ptep, ptl);
- 		}
- #endif
-+#endif
- 	/* a read */
- 	} else {
- 		/* protection fault */
-@@ -278,6 +641,33 @@ bad_area:
- 
- 	/* User mode accesses cause a SIGSEGV */
- 	if (user_mode(regs)) {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if (mm->pax_flags & MF_PAX_PAGEEXEC) {
-+			if ((TRAP(regs) == 0x400) && (regs->nip == address)) {
-+				switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+				case 2:
-+				case 3:
-+				case 4:
-+					return 0;
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUSIGRT
-+				case 5:
-+				case 6:
-+					return 0;
-+#endif
-+
-+				}
-+
-+				pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[1]);
-+				do_exit(SIGKILL);
-+			}
-+		}
-+#endif
-+
- 		_exception(SIGSEGV, regs, code, address);
- 		return 0;
- 	}
---- a/arch/s390/kernel/module.c
-+++ b/arch/s390/kernel/module.c
-@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
- 
- 	/* Increase core size by size of got & plt and set start
- 	   offsets for got and plt. */
--	me->core_size = ALIGN(me->core_size, 4);
--	me->arch.got_offset = me->core_size;
--	me->core_size += me->arch.got_size;
--	me->arch.plt_offset = me->core_size;
--	me->core_size += me->arch.plt_size;
-+	me->core_size_rw = ALIGN(me->core_size_rw, 4);
-+	me->arch.got_offset = me->core_size_rw;
-+	me->core_size_rw += me->arch.got_size;
-+	me->arch.plt_offset = me->core_size_rx;
-+	me->core_size_rx += me->arch.plt_size;
- 	return 0;
- }
- 
-@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
- 		if (info->got_initialized == 0) {
- 			Elf_Addr *gotent;
- 
--			gotent = me->module_core + me->arch.got_offset +
-+			gotent = me->module_core_rw + me->arch.got_offset +
- 				info->got_offset;
- 			*gotent = val;
- 			info->got_initialized = 1;
-@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
- 		else if (r_type == R_390_GOTENT ||
- 			 r_type == R_390_GOTPLTENT)
- 			*(unsigned int *) loc =
--				(val + (Elf_Addr) me->module_core - loc) >> 1;
-+				(val + (Elf_Addr) me->module_core_rw - loc) >> 1;
- 		else if (r_type == R_390_GOT64 ||
- 			 r_type == R_390_GOTPLT64)
- 			*(unsigned long *) loc = val;
-@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
- 	case R_390_PLTOFF64:	/* 16 bit offset from GOT to PLT. */
- 		if (info->plt_initialized == 0) {
- 			unsigned int *ip;
--			ip = me->module_core + me->arch.plt_offset +
-+			ip = me->module_core_rx + me->arch.plt_offset +
- 				info->plt_offset;
- #ifndef CONFIG_64BIT
- 			ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
-@@ -316,7 +316,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
- 			val = me->arch.plt_offset - me->arch.got_offset +
- 				info->plt_offset + rela->r_addend;
- 		else
--			val =  (Elf_Addr) me->module_core +
-+			val =  (Elf_Addr) me->module_core_rx +
- 				me->arch.plt_offset + info->plt_offset + 
- 				rela->r_addend - loc;
- 		if (r_type == R_390_PLT16DBL)
-@@ -336,7 +336,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
- 	case R_390_GOTOFF32:	/* 32 bit offset to GOT.  */
- 	case R_390_GOTOFF64:	/* 64 bit offset to GOT. */
- 		val = val + rela->r_addend -
--			((Elf_Addr) me->module_core + me->arch.got_offset);
-+			((Elf_Addr) me->module_core_rw + me->arch.got_offset);
- 		if (r_type == R_390_GOTOFF16)
- 			*(unsigned short *) loc = val;
- 		else if (r_type == R_390_GOTOFF32)
-@@ -346,7 +346,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
- 		break;
- 	case R_390_GOTPC:	/* 32 bit PC relative offset to GOT. */
- 	case R_390_GOTPCDBL:	/* 32 bit PC rel. off. to GOT shifted by 1. */
--		val = (Elf_Addr) me->module_core + me->arch.got_offset +
-+		val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
- 			rela->r_addend - loc;
- 		if (r_type == R_390_GOTPC)
- 			*(unsigned int *) loc = val;
---- a/arch/sparc64/kernel/Makefile
-+++ b/arch/sparc64/kernel/Makefile
-@@ -3,7 +3,7 @@
- #
- 
- EXTRA_AFLAGS := -ansi
--EXTRA_CFLAGS := -Werror
-+#EXTRA_CFLAGS := -Werror
- 
- extra-y		:= head.o init_task.o vmlinux.lds
- 
---- a/arch/sparc64/kernel/ptrace.c
-+++ b/arch/sparc64/kernel/ptrace.c
-@@ -22,6 +22,7 @@
- #include <linux/seccomp.h>
- #include <linux/audit.h>
- #include <linux/signal.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/asi.h>
- #include <asm/pgtable.h>
-@@ -216,6 +217,11 @@ asmlinkage void do_ptrace(struct pt_regs
- 		goto out;
- 	}
- 
-+	if (gr_handle_ptrace(child, (long)request)) {
-+		pt_error_return(regs, EPERM);
-+		goto out_tsk;
-+	}
-+
- 	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
- 	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
- 		if (ptrace_attach(child)) {
---- a/arch/sparc64/kernel/sys_sparc.c
-+++ b/arch/sparc64/kernel/sys_sparc.c
-@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
- 		/* We do not accept a shared mapping if it would violate
- 		 * cache aliasing constraints.
- 		 */
--		if ((flags & MAP_SHARED) &&
-+		if ((filp || (flags & MAP_SHARED)) &&
- 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
- 			return -EINVAL;
- 		return addr;
-@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
- 	if (filp || (flags & MAP_SHARED))
- 		do_color_align = 1;
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
- 	if (addr) {
- 		if (do_color_align)
- 			addr = COLOUR_ALIGN(addr, pgoff);
-@@ -152,9 +156,9 @@ unsigned long arch_get_unmapped_area(str
- 	}
- 
- 	if (len > mm->cached_hole_size) {
--	        start_addr = addr = mm->free_area_cache;
-+		start_addr = addr = mm->free_area_cache;
- 	} else {
--	        start_addr = addr = TASK_UNMAPPED_BASE;
-+		start_addr = addr = mm->mmap_base;
- 	        mm->cached_hole_size = 0;
- 	}
- 
-@@ -174,8 +178,8 @@ full_search:
- 			vma = find_vma(mm, VA_EXCLUDE_END);
- 		}
- 		if (unlikely(task_size < addr)) {
--			if (start_addr != TASK_UNMAPPED_BASE) {
--				start_addr = addr = TASK_UNMAPPED_BASE;
-+			if (start_addr != mm->mmap_base) {
-+				start_addr = addr = mm->mmap_base;
- 				mm->cached_hole_size = 0;
- 				goto full_search;
- 			}
-@@ -215,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
- 		/* We do not accept a shared mapping if it would violate
- 		 * cache aliasing constraints.
- 		 */
--		if ((flags & MAP_SHARED) &&
-+		if ((filp || (flags & MAP_SHARED)) &&
- 		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
- 			return -EINVAL;
- 		return addr;
-@@ -378,6 +382,12 @@ void arch_pick_mmap_layout(struct mm_str
- 	    current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
- 	    sysctl_legacy_va_layout) {
- 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area;
- 		mm->unmap_area = arch_unmap_area;
- 	} else {
-@@ -392,6 +402,12 @@ void arch_pick_mmap_layout(struct mm_str
- 			gap = (task_size / 6 * 5);
- 
- 		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- 		mm->unmap_area = arch_unmap_area_topdown;
- 	}
---- a/arch/sparc64/mm/fault.c
-+++ b/arch/sparc64/mm/fault.c
-@@ -20,6 +20,10 @@
- #include <linux/kprobes.h>
- #include <linux/kallsyms.h>
- #include <linux/kdebug.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -270,6 +274,369 @@ cannot_handle:
- 	unhandled_fault (address, current, regs);
- }
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#ifdef CONFIG_PAX_EMUPLT
-+static void pax_emuplt_close(struct vm_area_struct *vma)
-+{
-+	vma->vm_mm->call_dl_resolve = 0UL;
-+}
-+
-+static struct page *pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
-+{
-+	struct page *page;
-+	unsigned int *kaddr;
-+
-+	page = alloc_page(GFP_HIGHUSER);
-+	if (!page)
-+		return NOPAGE_OOM;
-+
-+	kaddr = kmap(page);
-+	memset(kaddr, 0, PAGE_SIZE);
-+	kaddr[0] = 0x9DE3BFA8U; /* save */
-+	flush_dcache_page(page);
-+	kunmap(page);
-+	if (type)
-+		*type = VM_FAULT_MAJOR;
-+	return page;
-+}
-+
-+static struct vm_operations_struct pax_vm_ops = {
-+	.close = pax_emuplt_close,
-+	.nopage = pax_emuplt_nopage,
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+	int ret;
-+
-+	memset(vma, 0, sizeof(*vma));
-+	vma->vm_mm = current->mm;
-+	vma->vm_start = addr;
-+	vma->vm_end = addr + PAGE_SIZE;
-+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+	vma->vm_ops = &pax_vm_ops;
-+
-+	ret = insert_vm_struct(current->mm, vma);
-+	if (ret)
-+		return ret;
-+
-+	++current->mm->total_vm;
-+	return 0;
-+}
-+#endif
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->tpc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when patched PLT trampoline was detected
-+ *         3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+	int err;
-+
-+	do { /* PaX: patched PLT emulation #1 */
-+		unsigned int sethi1, sethi2, jmpl;
-+
-+		err = get_user(sethi1, (unsigned int *)regs->tpc);
-+		err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+		    (sethi2 & 0xFFC00000U) == 0x03000000U &&
-+		    (jmpl & 0xFFFFE000U) == 0x81C06000U)
-+		{
-+			unsigned long addr;
-+
-+			regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
-+			addr = regs->u_regs[UREG_G1];
-+			addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	{ /* PaX: patched PLT emulation #2 */
-+		unsigned int ba;
-+
-+		err = get_user(ba, (unsigned int *)regs->tpc);
-+
-+		if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
-+			unsigned long addr;
-+
-+			addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	}
-+
-+	do { /* PaX: patched PLT emulation #3 */
-+		unsigned int sethi, jmpl, nop;
-+
-+		err = get_user(sethi, (unsigned int *)regs->tpc);
-+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+		    (jmpl & 0xFFFFE000U) == 0x81C06000U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned long addr;
-+
-+			addr = (sethi & 0x003FFFFFU) << 10;
-+			regs->u_regs[UREG_G1] = addr;
-+			addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #4 */
-+		unsigned int mov1, call, mov2;
-+
-+		err = get_user(mov1, (unsigned int *)regs->tpc);
-+		err |= get_user(call, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(mov2, (unsigned int *)(regs->tpc+8));
-+
-+		if (err)
-+			break;
-+
-+		if (mov1 == 0x8210000FU &&
-+		    (call & 0xC0000000U) == 0x40000000U &&
-+		    mov2 == 0x9E100001U)
-+		{
-+			unsigned long addr;
-+
-+			regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
-+			addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #5 */
-+		unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
-+
-+		err = get_user(sethi1, (unsigned int *)regs->tpc);
-+		err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(or1, (unsigned int *)(regs->tpc+8));
-+		err |= get_user(or2, (unsigned int *)(regs->tpc+12));
-+		err |= get_user(sllx, (unsigned int *)(regs->tpc+16));
-+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
-+		err |= get_user(nop, (unsigned int *)(regs->tpc+24));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+		    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+		    (or1 & 0xFFFFE000U) == 0x82106000U &&
-+		    (or2 & 0xFFFFE000U) == 0x8A116000U &&
-+		    sllx == 0x83287020 &&
-+		    jmpl == 0x81C04005U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned long addr;
-+
-+			regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
-+			regs->u_regs[UREG_G1] <<= 32;
-+			regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
-+			addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #6 */
-+		unsigned int sethi1, sethi2, sllx, or,  jmpl, nop;
-+
-+		err = get_user(sethi1, (unsigned int *)regs->tpc);
-+		err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(sllx, (unsigned int *)(regs->tpc+8));
-+		err |= get_user(or, (unsigned int *)(regs->tpc+12));
-+		err |= get_user(jmpl, (unsigned int *)(regs->tpc+16));
-+		err |= get_user(nop, (unsigned int *)(regs->tpc+20));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+		    (sethi2 & 0xFFC00000U) == 0x0B000000U &&
-+		    sllx == 0x83287020 &&
-+		    (or & 0xFFFFE000U) == 0x8A116000U &&
-+		    jmpl == 0x81C04005U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned long addr;
-+
-+			regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
-+			regs->u_regs[UREG_G1] <<= 32;
-+			regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
-+			addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: patched PLT emulation #7 */
-+		unsigned int sethi, ba, nop;
-+
-+		err = get_user(sethi, (unsigned int *)regs->tpc);
-+		err |= get_user(ba, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+		    (ba & 0xFFF00000U) == 0x30600000U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned long addr;
-+
-+			addr = (sethi & 0x003FFFFFU) << 10;
-+			regs->u_regs[UREG_G1] = addr;
-+			addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+			regs->tpc = addr;
-+			regs->tnpc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation step 1 */
-+		unsigned int sethi, ba, nop;
-+
-+		err = get_user(sethi, (unsigned int *)regs->tpc);
-+		err |= get_user(ba, (unsigned int *)(regs->tpc+4));
-+		err |= get_user(nop, (unsigned int *)(regs->tpc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+		    ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned long addr;
-+			unsigned int save, call;
-+
-+			if ((ba & 0xFFC00000U) == 0x30800000U)
-+				addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
-+			else
-+				addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
-+
-+			err = get_user(save, (unsigned int *)addr);
-+			err |= get_user(call, (unsigned int *)(addr+4));
-+			err |= get_user(nop, (unsigned int *)(addr+8));
-+			if (err)
-+				break;
-+
-+			if (save == 0x9DE3BFA8U &&
-+			    (call & 0xC0000000U) == 0x40000000U &&
-+			    nop == 0x01000000U)
-+			{
-+				struct vm_area_struct *vma;
-+				unsigned long call_dl_resolve;
-+
-+				down_read(&current->mm->mmap_sem);
-+				call_dl_resolve = current->mm->call_dl_resolve;
-+				up_read(&current->mm->mmap_sem);
-+				if (likely(call_dl_resolve))
-+					goto emulate;
-+
-+				vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+
-+				down_write(&current->mm->mmap_sem);
-+				if (current->mm->call_dl_resolve) {
-+					call_dl_resolve = current->mm->call_dl_resolve;
-+					up_write(&current->mm->mmap_sem);
-+					if (vma) kmem_cache_free(vm_area_cachep, vma);
-+					goto emulate;
-+				}
-+
-+				call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+				if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
-+					up_write(&current->mm->mmap_sem);
-+					if (vma) kmem_cache_free(vm_area_cachep, vma);
-+					return 1;
-+				}
-+
-+				if (pax_insert_vma(vma, call_dl_resolve)) {
-+					up_write(&current->mm->mmap_sem);
-+					kmem_cache_free(vm_area_cachep, vma);
-+					return 1;
-+				}
-+
-+				current->mm->call_dl_resolve = call_dl_resolve;
-+				up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+				regs->tpc = call_dl_resolve;
-+				regs->tnpc = addr+4;
-+				return 3;
-+			}
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation step 2 */
-+		unsigned int save, call, nop;
-+
-+		err = get_user(save, (unsigned int *)(regs->tpc-4));
-+		err |= get_user(call, (unsigned int *)regs->tpc);
-+		err |= get_user(nop, (unsigned int *)(regs->tpc+4));
-+		if (err)
-+			break;
-+
-+		if (save == 0x9DE3BFA8U &&
-+		    (call & 0xC0000000U) == 0x40000000U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
-+
-+			regs->u_regs[UREG_RETPC] = regs->tpc;
-+			regs->tpc = dl_resolve;
-+			regs->tnpc = dl_resolve+4;
-+			return 3;
-+		}
-+	} while (0);
-+#endif
-+
-+	return 1;
-+}
-+
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
- {
- 	struct mm_struct *mm = current->mm;
-@@ -311,8 +678,10 @@ asmlinkage void __kprobes do_sparc64_fau
- 		goto intr_or_no_mm;
- 
- 	if (test_thread_flag(TIF_32BIT)) {
--		if (!(regs->tstate & TSTATE_PRIV))
-+		if (!(regs->tstate & TSTATE_PRIV)) {
- 			regs->tpc &= 0xffffffff;
-+			regs->tnpc &= 0xffffffff;
-+		}
- 		address &= 0xffffffff;
- 	}
- 
-@@ -329,6 +698,29 @@ asmlinkage void __kprobes do_sparc64_fau
- 	if (!vma)
- 		goto bad_area;
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	/* PaX: detect ITLB misses on non-exec pages */
-+	if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
-+	    !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
-+	{
-+		if (address != regs->tpc)
-+			goto good_area;
-+
-+		up_read(&mm->mmap_sem);
-+		switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+		case 2:
-+		case 3:
-+			return;
-+#endif
-+
-+		}
-+		pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS));
-+		do_exit(SIGKILL);
-+	}
-+#endif
-+
- 	/* Pure DTLB misses do not tell us whether the fault causing
- 	 * load/store/atomic was a write or not, it only says that there
- 	 * was no match.  So in such a case we (carefully) read the
---- a/arch/sparc64/mm/Makefile
-+++ b/arch/sparc64/mm/Makefile
-@@ -3,7 +3,7 @@
- #
- 
- EXTRA_AFLAGS := -ansi
--EXTRA_CFLAGS := -Werror
-+#EXTRA_CFLAGS := -Werror
- 
- obj-y    := ultra.o tlb.o tsb.o fault.o init.o generic.o
- 
---- a/arch/sparc/kernel/ptrace.c
-+++ b/arch/sparc/kernel/ptrace.c
-@@ -19,6 +19,7 @@
- #include <linux/smp_lock.h>
- #include <linux/security.h>
- #include <linux/signal.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/pgtable.h>
- #include <asm/system.h>
-@@ -303,6 +304,11 @@ asmlinkage void do_ptrace(struct pt_regs
- 		goto out;
- 	}
- 
-+	if (gr_handle_ptrace(child, request)) {
-+		pt_error_return(regs, EPERM);
-+		goto out_tsk;
-+	}
-+
- 	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
- 	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
- 		if (ptrace_attach(child)) {
---- a/arch/sparc/kernel/sys_sparc.c
-+++ b/arch/sparc/kernel/sys_sparc.c
-@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
- 	if (ARCH_SUN4C_SUN4 && len > 0x20000000)
- 		return -ENOMEM;
- 	if (!addr)
--		addr = TASK_UNMAPPED_BASE;
-+		addr = current->mm->mmap_base;
- 
- 	if (flags & MAP_SHARED)
- 		addr = COLOUR_ALIGN(addr);
---- a/arch/sparc/Makefile
-+++ b/arch/sparc/Makefile
-@@ -36,7 +36,7 @@ drivers-$(CONFIG_OPROFILE)	+= arch/sparc
- # Renaming is done to avoid confusing pattern matching rules in 2.5.45 (multy-)
- INIT_Y		:= $(patsubst %/, %/built-in.o, $(init-y))
- CORE_Y		:= $(core-y)
--CORE_Y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+CORE_Y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
- CORE_Y		:= $(patsubst %/, %/built-in.o, $(CORE_Y))
- DRIVERS_Y	:= $(patsubst %/, %/built-in.o, $(drivers-y))
- NET_Y		:= $(patsubst %/, %/built-in.o, $(net-y))
---- a/arch/sparc/mm/fault.c
-+++ b/arch/sparc/mm/fault.c
-@@ -21,6 +21,10 @@
- #include <linux/interrupt.h>
- #include <linux/module.h>
- #include <linux/kdebug.h>
-+#include <linux/slab.h>
-+#include <linux/pagemap.h>
-+#include <linux/compiler.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/system.h>
- #include <asm/page.h>
-@@ -216,6 +220,252 @@ static unsigned long compute_si_addr(str
- 	return safe_compute_effective_address(regs, insn);
- }
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_emuplt_close(struct vm_area_struct *vma)
-+{
-+	vma->vm_mm->call_dl_resolve = 0UL;
-+}
-+
-+static struct page *pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
-+{
-+	struct page *page;
-+	unsigned int *kaddr;
-+
-+	page = alloc_page(GFP_HIGHUSER);
-+	if (!page)
-+		return NOPAGE_OOM;
-+
-+	kaddr = kmap(page);
-+	memset(kaddr, 0, PAGE_SIZE);
-+	kaddr[0] = 0x9DE3BFA8U; /* save */
-+	flush_dcache_page(page);
-+	kunmap(page);
-+	if (type)
-+		*type = VM_FAULT_MAJOR;
-+
-+	return page;
-+}
-+
-+static struct vm_operations_struct pax_vm_ops = {
-+	.close = pax_emuplt_close,
-+	.nopage = pax_emuplt_nopage,
-+};
-+
-+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
-+{
-+	int ret;
-+
-+	memset(vma, 0, sizeof(*vma));
-+	vma->vm_mm = current->mm;
-+	vma->vm_start = addr;
-+	vma->vm_end = addr + PAGE_SIZE;
-+	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
-+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+	vma->vm_ops = &pax_vm_ops;
-+
-+	ret = insert_vm_struct(current->mm, vma);
-+	if (ret)
-+		return ret;
-+
-+	++current->mm->total_vm;
-+	return 0;
-+}
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->pc = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when patched PLT trampoline was detected
-+ *         3 when unpatched PLT trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+	int err;
-+
-+	do { /* PaX: patched PLT emulation #1 */
-+		unsigned int sethi1, sethi2, jmpl;
-+
-+		err = get_user(sethi1, (unsigned int *)regs->pc);
-+		err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
-+		err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
-+		    (sethi2 & 0xFFC00000U) == 0x03000000U &&
-+		    (jmpl & 0xFFFFE000U) == 0x81C06000U)
-+		{
-+			unsigned int addr;
-+
-+			regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
-+			addr = regs->u_regs[UREG_G1];
-+			addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+			regs->pc = addr;
-+			regs->npc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	{ /* PaX: patched PLT emulation #2 */
-+		unsigned int ba;
-+
-+		err = get_user(ba, (unsigned int *)regs->pc);
-+
-+		if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
-+			unsigned int addr;
-+
-+			addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
-+			regs->pc = addr;
-+			regs->npc = addr+4;
-+			return 2;
-+		}
-+	}
-+
-+	do { /* PaX: patched PLT emulation #3 */
-+		unsigned int sethi, jmpl, nop;
-+
-+		err = get_user(sethi, (unsigned int *)regs->pc);
-+		err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
-+		err |= get_user(nop, (unsigned int *)(regs->pc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+		    (jmpl & 0xFFFFE000U) == 0x81C06000U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned int addr;
-+
-+			addr = (sethi & 0x003FFFFFU) << 10;
-+			regs->u_regs[UREG_G1] = addr;
-+			addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
-+			regs->pc = addr;
-+			regs->npc = addr+4;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation step 1 */
-+		unsigned int sethi, ba, nop;
-+
-+		err = get_user(sethi, (unsigned int *)regs->pc);
-+		err |= get_user(ba, (unsigned int *)(regs->pc+4));
-+		err |= get_user(nop, (unsigned int *)(regs->pc+8));
-+
-+		if (err)
-+			break;
-+
-+		if ((sethi & 0xFFC00000U) == 0x03000000U &&
-+		    ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned int addr, save, call;
-+
-+			if ((ba & 0xFFC00000U) == 0x30800000U)
-+				addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
-+			else
-+				addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
-+
-+			err = get_user(save, (unsigned int *)addr);
-+			err |= get_user(call, (unsigned int *)(addr+4));
-+			err |= get_user(nop, (unsigned int *)(addr+8));
-+			if (err)
-+				break;
-+
-+			if (save == 0x9DE3BFA8U &&
-+			    (call & 0xC0000000U) == 0x40000000U &&
-+			    nop == 0x01000000U)
-+			{
-+				struct vm_area_struct *vma;
-+				unsigned long call_dl_resolve;
-+
-+				down_read(&current->mm->mmap_sem);
-+				call_dl_resolve = current->mm->call_dl_resolve;
-+				up_read(&current->mm->mmap_sem);
-+				if (likely(call_dl_resolve))
-+					goto emulate;
-+
-+				vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+
-+				down_write(&current->mm->mmap_sem);
-+				if (current->mm->call_dl_resolve) {
-+					call_dl_resolve = current->mm->call_dl_resolve;
-+					up_write(&current->mm->mmap_sem);
-+					if (vma) kmem_cache_free(vm_area_cachep, vma);
-+					goto emulate;
-+				}
-+
-+				call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
-+				if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
-+					up_write(&current->mm->mmap_sem);
-+					if (vma) kmem_cache_free(vm_area_cachep, vma);
-+					return 1;
-+				}
-+
-+				if (pax_insert_vma(vma, call_dl_resolve)) {
-+					up_write(&current->mm->mmap_sem);
-+					kmem_cache_free(vm_area_cachep, vma);
-+					return 1;
-+				}
-+
-+				current->mm->call_dl_resolve = call_dl_resolve;
-+				up_write(&current->mm->mmap_sem);
-+
-+emulate:
-+				regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
-+				regs->pc = call_dl_resolve;
-+				regs->npc = addr+4;
-+				return 3;
-+			}
-+		}
-+	} while (0);
-+
-+	do { /* PaX: unpatched PLT emulation step 2 */
-+		unsigned int save, call, nop;
-+
-+		err = get_user(save, (unsigned int *)(regs->pc-4));
-+		err |= get_user(call, (unsigned int *)regs->pc);
-+		err |= get_user(nop, (unsigned int *)(regs->pc+4));
-+		if (err)
-+			break;
-+
-+		if (save == 0x9DE3BFA8U &&
-+		    (call & 0xC0000000U) == 0x40000000U &&
-+		    nop == 0x01000000U)
-+		{
-+			unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
-+
-+			regs->u_regs[UREG_RETPC] = regs->pc;
-+			regs->pc = dl_resolve;
-+			regs->npc = dl_resolve+4;
-+			return 3;
-+		}
-+	} while (0);
-+#endif
-+
-+	return 1;
-+}
-+
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	unsigned long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 5; i++) {
-+		unsigned int c;
-+		if (get_user(c, (unsigned int *)pc+i))
-+			printk("???????? ");
-+		else
-+			printk("%08x ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
- 			       unsigned long address)
- {
-@@ -280,6 +530,24 @@ good_area:
- 		if(!(vma->vm_flags & VM_WRITE))
- 			goto bad_area;
- 	} else {
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
-+			up_read(&mm->mmap_sem);
-+			switch (pax_handle_fetch_fault(regs)) {
-+
-+#ifdef CONFIG_PAX_EMUPLT
-+			case 2:
-+			case 3:
-+				return;
-+#endif
-+
-+			}
-+			pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
-+			do_exit(SIGKILL);
-+		}
-+#endif
-+
- 		/* Allow reads even for write-only mappings */
- 		if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
- 			goto bad_area;
---- a/arch/sparc/mm/init.c
-+++ b/arch/sparc/mm/init.c
-@@ -336,17 +336,17 @@ void __init paging_init(void)
- 
- 	/* Initialize the protection map with non-constant, MMU dependent values. */
- 	protection_map[0] = PAGE_NONE;
--	protection_map[1] = PAGE_READONLY;
--	protection_map[2] = PAGE_COPY;
--	protection_map[3] = PAGE_COPY;
-+	protection_map[1] = PAGE_READONLY_NOEXEC;
-+	protection_map[2] = PAGE_COPY_NOEXEC;
-+	protection_map[3] = PAGE_COPY_NOEXEC;
- 	protection_map[4] = PAGE_READONLY;
- 	protection_map[5] = PAGE_READONLY;
- 	protection_map[6] = PAGE_COPY;
- 	protection_map[7] = PAGE_COPY;
- 	protection_map[8] = PAGE_NONE;
--	protection_map[9] = PAGE_READONLY;
--	protection_map[10] = PAGE_SHARED;
--	protection_map[11] = PAGE_SHARED;
-+	protection_map[9] = PAGE_READONLY_NOEXEC;
-+	protection_map[10] = PAGE_SHARED_NOEXEC;
-+	protection_map[11] = PAGE_SHARED_NOEXEC;
- 	protection_map[12] = PAGE_READONLY;
- 	protection_map[13] = PAGE_READONLY;
- 	protection_map[14] = PAGE_SHARED;
---- a/arch/sparc/mm/srmmu.c
-+++ b/arch/sparc/mm/srmmu.c
-@@ -2157,6 +2157,13 @@ void __init ld_mmu_srmmu(void)
- 	PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
- 	BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
- 	BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
-+	BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
-+	BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
-+#endif
-+
- 	BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
- 	page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
- 
---- a/arch/v850/kernel/module.c
-+++ b/arch/v850/kernel/module.c
-@@ -150,8 +150,8 @@ static uint32_t do_plt_call (void *locat
- 	tramp[1] = ((val >> 16) & 0xffff) + 0x610000; /* ...; jmp r1 */
- 
- 	/* Init, or core PLT? */
--	if (location >= mod->module_core
--	    && location < mod->module_core + mod->core_size)
-+	if (location >= mod->module_core_rx
-+	    && location < mod->module_core_rx + mod->core_size_rx)
- 		entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
- 	else
- 		entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
---- a/arch/x86_64/ia32/ia32_binfmt.c
-+++ b/arch/x86_64/ia32/ia32_binfmt.c
-@@ -36,12 +36,12 @@
- #define AT_SYSINFO 32
- #define AT_SYSINFO_EHDR		33
- 
--int sysctl_vsyscall32 = 1;
-+int sysctl_vsyscall32;
- 
- #undef ARCH_DLINFO
- #define ARCH_DLINFO do {  \
- 	if (sysctl_vsyscall32) { \
--		current->mm->context.vdso = (void *)VSYSCALL32_BASE;	\
-+		current->mm->context.vdso = VSYSCALL32_BASE; \
- 		NEW_AUX_ENT(AT_SYSINFO, (u32)(u64)VSYSCALL32_VSYSCALL); \
- 		NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL32_BASE);    \
- 	}	\
-@@ -145,6 +145,13 @@ struct elf_prpsinfo
- //#include <asm/ia32.h>
- #include <linux/elf.h>
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	0x08048000UL
-+
-+#define PAX_DELTA_MMAP_LEN	16
-+#define PAX_DELTA_STACK_LEN	16
-+#endif
-+
- typedef struct user_i387_ia32_struct elf_fpregset_t;
- typedef struct user32_fxsr_struct elf_fpxregset_t;
- 
-@@ -298,7 +305,7 @@ static ctl_table abi_table2[] = {
- 		.mode		= 0644,
- 		.proc_handler	= proc_dointvec
- 	},
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static ctl_table abi_root_table2[] = {
-@@ -308,7 +315,7 @@ static ctl_table abi_root_table2[] = {
- 		.mode = 0555,
- 		.child = abi_table2
- 	},
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static __init int ia32_binfmt_init(void)
---- a/arch/x86_64/ia32/ia32_signal.c
-+++ b/arch/x86_64/ia32/ia32_signal.c
-@@ -573,6 +573,7 @@ int ia32_setup_rt_frame(int sig, struct 
- 			__NR_ia32_rt_sigreturn,
- 			0x80cd,
- 			0,
-+			0
- 		}; 
- 		err |= __copy_to_user(frame->retcode, &code, 8); 
- 	} 
---- a/arch/x86_64/ia32/mmap32.c
-+++ b/arch/x86_64/ia32/mmap32.c
-@@ -69,10 +69,22 @@ void ia32_pick_mmap_layout(struct mm_str
- 			(current->personality & ADDR_COMPAT_LAYOUT) ||
- 			current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
- 		mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area;
- 		mm->unmap_area = arch_unmap_area;
- 	} else {
- 		mm->mmap_base = mmap_base(mm);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+		if (mm->pax_flags & MF_PAX_RANDMMAP)
-+			mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
-+#endif
-+
- 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- 		mm->unmap_area = arch_unmap_area_topdown;
- 	}
---- a/arch/x86_64/ia32/ptrace32.c
-+++ b/arch/x86_64/ia32/ptrace32.c
-@@ -382,7 +382,7 @@ asmlinkage long sys32_ptrace(long reques
- 		/* no checking to be bug-to-bug compatible with i386. */
- 		/* but silence warning */
- 		if (__copy_from_user(&child->thread.i387.fxsave, u, sizeof(*u)))
--			;
-+			{}
- 		set_stopped_child_used_math(child);
- 		child->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
- 		ret = 0; 
---- a/arch/x86_64/ia32/syscall32.c
-+++ b/arch/x86_64/ia32/syscall32.c
-@@ -30,6 +30,9 @@ int syscall32_setup_pages(struct linux_b
- 	struct mm_struct *mm = current->mm;
- 	int ret;
- 
-+	if (!sysctl_vsyscall32)
-+		return 0;
-+
- 	down_write(&mm->mmap_sem);
- 	/*
- 	 * MAYWRITE to allow gdb to COW and set breakpoints
---- a/arch/x86_64/kernel/hpet.c
-+++ b/arch/x86_64/kernel/hpet.c
-@@ -65,7 +65,7 @@ static __init int late_hpet_init(void)
- 		hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
- 		timer = &hpet->hpet_timers[2];
- 		for (i = 2; i < ntimer; timer++, i++)
--			hd.hd_irq[i] = (timer->hpet_config &
-+			hd.hd_irq[i] = (readl(&timer->hpet_config) &
- 					Tn_INT_ROUTE_CNF_MASK) >>
- 				Tn_INT_ROUTE_CNF_SHIFT;
- 
---- a/arch/x86_64/kernel/i8259.c
-+++ b/arch/x86_64/kernel/i8259.c
-@@ -395,7 +395,7 @@ device_initcall(i8259A_init_sysfs);
-  * IRQ2 is cascade interrupt to second interrupt controller
-  */
- 
--static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
-+static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL, 0, NULL};
- DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- 	[0 ... IRQ0_VECTOR - 1] = -1,
- 	[IRQ0_VECTOR] = 0,
---- a/arch/x86_64/kernel/ioport.c
-+++ b/arch/x86_64/kernel/ioport.c
-@@ -16,6 +16,7 @@
- #include <linux/slab.h>
- #include <linux/thread_info.h>
- #include <linux/syscalls.h>
-+#include <linux/grsecurity.h>
- 
- /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
- static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
-@@ -41,8 +42,16 @@ asmlinkage long sys_ioperm(unsigned long
- 
- 	if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
- 		return -EINVAL;
-+
-+#ifdef CONFIG_GRKERNSEC_IO
-+	if (turn_on) {
-+		gr_handle_ioperm();
-+		return -EPERM;
-+	}
-+#else
- 	if (turn_on && !capable(CAP_SYS_RAWIO))
- 		return -EPERM;
-+#endif
- 
- 	/*
- 	 * If it's the first ioperm() call in this thread's lifetime, set the
-@@ -111,8 +120,13 @@ asmlinkage long sys_iopl(unsigned int le
- 		return -EINVAL;
- 	/* Trying to gain more privileges? */
- 	if (level > old) {
-+#ifdef CONFIG_GRKERNSEC_IO
-+		gr_handle_iopl();
-+		return -EPERM;
-+#else
- 		if (!capable(CAP_SYS_RAWIO))
- 			return -EPERM;
-+#endif
- 	}
- 	regs->eflags = (regs->eflags &~ X86_EFLAGS_IOPL) | (level << 12);
- 	return 0;
---- a/arch/x86_64/kernel/mce.c
-+++ b/arch/x86_64/kernel/mce.c
-@@ -665,6 +665,7 @@ static struct miscdevice mce_log_device 
- 	MISC_MCELOG_MINOR,
- 	"mcelog",
- 	&mce_chrdev_ops,
-+	{NULL, NULL}, NULL, NULL
- };
- 
- static unsigned long old_cr4 __initdata;
---- a/arch/x86_64/kernel/process.c
-+++ b/arch/x86_64/kernel/process.c
-@@ -894,10 +894,3 @@ int dump_task_regs(struct task_struct *t
-  
- 	return 1;
- }
--
--unsigned long arch_align_stack(unsigned long sp)
--{
--	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
--		sp -= get_random_int() % 8192;
--	return sp & ~0xf;
--}
---- a/arch/x86_64/kernel/setup64.c
-+++ b/arch/x86_64/kernel/setup64.c
-@@ -37,7 +37,7 @@ struct desc_ptr idt_descr = { 256 * 16 -
- char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
- 
- unsigned long __supported_pte_mask __read_mostly = ~0UL;
--static int do_not_nx __cpuinitdata = 0;
-+EXPORT_SYMBOL(__supported_pte_mask);
- 
- /* noexec=on|off
- Control non executable mappings for 64bit processes.
-@@ -51,16 +51,14 @@ static int __init nonx_setup(char *str)
- 		return -EINVAL;
- 	if (!strncmp(str, "on", 2)) {
-                 __supported_pte_mask |= _PAGE_NX; 
-- 		do_not_nx = 0; 
- 	} else if (!strncmp(str, "off", 3)) {
--		do_not_nx = 1;
- 		__supported_pte_mask &= ~_PAGE_NX;
-         }
- 	return 0;
- } 
- early_param("noexec", nonx_setup);
- 
--int force_personality32 = 0; 
-+int force_personality32;
- 
- /* noexec32=on|off
- Control non executable heap for 32bit processes.
-@@ -177,7 +175,7 @@ void __cpuinit check_efer(void)
- 	unsigned long efer;
- 
- 	rdmsrl(MSR_EFER, efer); 
--        if (!(efer & EFER_NX) || do_not_nx) { 
-+        if (!(efer & EFER_NX)) { 
-                 __supported_pte_mask &= ~_PAGE_NX; 
-         }       
- }
---- a/arch/x86_64/kernel/signal.c
-+++ b/arch/x86_64/kernel/signal.c
-@@ -254,8 +254,8 @@ static int setup_rt_frame(int sig, struc
- 	err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me);
- 	err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
- 	if (sizeof(*set) == 16) { 
--		__put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
--		__put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 
-+		err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
-+		err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 
- 	} else
- 		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- 
---- a/arch/x86_64/kernel/sys_x86_64.c
-+++ b/arch/x86_64/kernel/sys_x86_64.c
-@@ -65,8 +65,8 @@ out:
- 	return error;
- }
- 
--static void find_start_end(unsigned long flags, unsigned long *begin,
--			   unsigned long *end)
-+static void find_start_end(struct mm_struct *mm, unsigned long flags,
-+			   unsigned long *begin, unsigned long *end)
- {
- 	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
- 		/* This is usually used needed to map code in small
-@@ -79,7 +79,7 @@ static void find_start_end(unsigned long
- 		*begin = 0x40000000; 
- 		*end = 0x80000000;		
- 	} else {
--		*begin = TASK_UNMAPPED_BASE;
-+		*begin = mm->mmap_base;
- 		*end = TASK_SIZE; 
- 	}
- } 
-@@ -96,11 +96,15 @@ arch_get_unmapped_area(struct file *filp
- 	if (flags & MAP_FIXED)
- 		return addr;
- 
--	find_start_end(flags, &begin, &end); 
-+	find_start_end(mm, flags, &begin, &end);
- 
- 	if (len > end)
- 		return -ENOMEM;
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP) || !filp)
-+#endif
-+
- 	if (addr) {
- 		addr = PAGE_ALIGN(addr);
- 		vma = find_vma(mm, addr);
---- a/arch/x86_64/kernel/vsyscall.c
-+++ b/arch/x86_64/kernel/vsyscall.c
-@@ -273,13 +273,13 @@ static ctl_table kernel_table2[] = {
- 	  .mode = 0644,
- 	  .strategy = vsyscall_sysctl_nostrat,
- 	  .proc_handler = vsyscall_sysctl_change },
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static ctl_table kernel_root_table2[] = {
- 	{ .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
- 	  .child = kernel_table2 },
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- #endif
---- a/arch/x86_64/mm/fault.c
-+++ b/arch/x86_64/mm/fault.c
-@@ -25,6 +25,7 @@
- #include <linux/kprobes.h>
- #include <linux/uaccess.h>
- #include <linux/kdebug.h>
-+#include <linux/binfmts.h>
- 
- #include <asm/system.h>
- #include <asm/pgalloc.h>
-@@ -291,6 +292,163 @@ static int vmalloc_fault(unsigned long a
- 	return 0;
- }
- 
-+#ifdef CONFIG_PAX_EMUTRAMP
-+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
-+{
-+	int err;
-+
-+	do { /* PaX: gcc trampoline emulation #1 */
-+		unsigned char mov1, mov2;
-+		unsigned short jmp;
-+		unsigned int addr1, addr2;
-+
-+		if ((regs->rip + 11) >> 32)
-+			break;
-+
-+		err = get_user(mov1, (unsigned char __user *)regs->rip);
-+		err |= get_user(addr1, (unsigned int __user *)(regs->rip + 1));
-+		err |= get_user(mov2, (unsigned char __user *)(regs->rip + 5));
-+		err |= get_user(addr2, (unsigned int __user *)(regs->rip + 6));
-+		err |= get_user(jmp, (unsigned short __user *)(regs->rip + 10));
-+
-+		if (err)
-+			break;
-+
-+		if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
-+			regs->rcx = addr1;
-+			regs->rax = addr2;
-+			regs->rip = addr2;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: gcc trampoline emulation #2 */
-+		unsigned char mov, jmp;
-+		unsigned int addr1, addr2;
-+
-+		if ((regs->rip + 9) >> 32)
-+			break;
-+
-+		err = get_user(mov, (unsigned char __user *)regs->rip);
-+		err |= get_user(addr1, (unsigned int __user *)(regs->rip + 1));
-+		err |= get_user(jmp, (unsigned char __user *)(regs->rip + 5));
-+		err |= get_user(addr2, (unsigned int __user *)(regs->rip + 6));
-+
-+		if (err)
-+			break;
-+
-+		if (mov == 0xB9 && jmp == 0xE9) {
-+			regs->rcx = addr1;
-+			regs->rip = (unsigned int)(regs->rip + addr2 + 10);
-+			return 2;
-+		}
-+	} while (0);
-+
-+	return 1; /* PaX in action */
-+}
-+
-+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
-+{
-+	int err;
-+
-+	do { /* PaX: gcc trampoline emulation #1 */
-+		unsigned short mov1, mov2, jmp1;
-+		unsigned char jmp2;
-+		unsigned int addr1;
-+		unsigned long addr2;
-+
-+		err = get_user(mov1, (unsigned short __user *)regs->rip);
-+		err |= get_user(addr1, (unsigned int __user *)(regs->rip + 2));
-+		err |= get_user(mov2, (unsigned short __user *)(regs->rip + 6));
-+		err |= get_user(addr2, (unsigned long __user *)(regs->rip + 8));
-+		err |= get_user(jmp1, (unsigned short __user *)(regs->rip + 16));
-+		err |= get_user(jmp2, (unsigned char __user *)(regs->rip + 18));
-+
-+		if (err)
-+			break;
-+
-+		if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
-+			regs->r11 = addr1;
-+			regs->r10 = addr2;
-+			regs->rip = addr1;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	do { /* PaX: gcc trampoline emulation #2 */
-+		unsigned short mov1, mov2, jmp1;
-+		unsigned char jmp2;
-+		unsigned long addr1, addr2;
-+
-+		err = get_user(mov1, (unsigned short __user *)regs->rip);
-+		err |= get_user(addr1, (unsigned long __user *)(regs->rip + 2));
-+		err |= get_user(mov2, (unsigned short __user *)(regs->rip + 10));
-+		err |= get_user(addr2, (unsigned long __user *)(regs->rip + 12));
-+		err |= get_user(jmp1, (unsigned short __user *)(regs->rip + 20));
-+		err |= get_user(jmp2, (unsigned char __user *)(regs->rip + 22));
-+
-+		if (err)
-+			break;
-+
-+		if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
-+			regs->r11 = addr1;
-+			regs->r10 = addr2;
-+			regs->rip = addr1;
-+			return 2;
-+		}
-+	} while (0);
-+
-+	return 1; /* PaX in action */
-+}
-+
-+/*
-+ * PaX: decide what to do with offenders (regs->rip = fault address)
-+ *
-+ * returns 1 when task should be killed
-+ *         2 when gcc trampoline was detected
-+ */
-+static int pax_handle_fetch_fault(struct pt_regs *regs)
-+{
-+	if (regs->eflags & X86_EFLAGS_VM)
-+		return 1;
-+
-+	if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
-+		return 1;
-+
-+	if (regs->cs == __USER32_CS || (regs->cs & (1<<2)))
-+		return pax_handle_fetch_fault_32(regs);
-+	else
-+		return pax_handle_fetch_fault_64(regs);
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+void pax_report_insns(void *pc, void *sp)
-+{
-+	long i;
-+
-+	printk(KERN_ERR "PAX: bytes at PC: ");
-+	for (i = 0; i < 20; i++) {
-+		unsigned char c;
-+		if (get_user(c, (unsigned char __user *)pc+i))
-+			printk("?? ");
-+		else
-+			printk("%02x ", c);
-+	}
-+	printk("\n");
-+
-+	printk(KERN_ERR "PAX: bytes at SP-8: ");
-+	for (i = -1; i < 10; i++) {
-+		unsigned long c;
-+		if (get_user(c, (unsigned long __user *)sp+i))
-+			printk("???????????????? ");
-+		else
-+			printk("%016lx ", c);
-+	}
-+	printk("\n");
-+}
-+#endif
-+
- static int page_fault_trace;
- int show_unhandled_signals = 1;
- 
-@@ -427,6 +585,8 @@ asmlinkage void __kprobes do_page_fault(
- good_area:
- 	info.si_code = SEGV_ACCERR;
- 	write = 0;
-+	if ((error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
-+		goto bad_area;
- 	switch (error_code & (PF_PROT|PF_WRITE)) {
- 		default:	/* 3: write, present */
- 			/* fall through */
-@@ -478,6 +638,21 @@ bad_area_nosemaphore:
- 		 */
- 		local_irq_enable();
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+		if (mm && (mm->pax_flags & MF_PAX_PAGEEXEC) && (error_code & 16)) {
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+			switch (pax_handle_fetch_fault(regs)) {
-+			case 2:
-+				return;
-+			}
-+#endif
-+
-+			pax_report_fault(regs, (void*)regs->rip, (void*)regs->rsp);
-+			do_exit(SIGKILL);
-+		}
-+#endif
-+
- 		if (is_prefetch(regs, address, error_code))
- 			return;
- 
-@@ -499,7 +674,7 @@ bad_area_nosemaphore:
- 					tsk->comm, tsk->pid, address, regs->rip,
- 					regs->rsp, error_code);
- 		}
--       
-+
- 		tsk->thread.cr2 = address;
- 		/* Kernel addresses are always protection faults */
- 		tsk->thread.error_code = error_code | (address >= TASK_SIZE);
---- a/arch/x86_64/mm/init.c
-+++ b/arch/x86_64/mm/init.c
-@@ -45,7 +45,7 @@
- #include <asm/sections.h>
- 
- #ifndef Dprintk
--#define Dprintk(x...)
-+#define Dprintk(x...) do {} while (0)
- #endif
- 
- const struct dma_mapping_ops* dma_ops;
-@@ -736,7 +736,7 @@ int in_gate_area_no_task(unsigned long a
- 
- const char *arch_vma_name(struct vm_area_struct *vma)
- {
--	if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
-+	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
- 		return "[vdso]";
- 	if (vma == &gate_vma)
- 		return "[vsyscall]";
---- a/arch/x86_64/mm/mmap.c
-+++ b/arch/x86_64/mm/mmap.c
-@@ -23,6 +23,12 @@ void arch_pick_mmap_layout(struct mm_str
- 		unsigned rnd = get_random_int() & 0xfffffff;
- 		mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
- 	}
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (mm->pax_flags & MF_PAX_RANDMMAP)
-+		mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- 	mm->get_unmapped_area = arch_get_unmapped_area;
- 	mm->unmap_area = arch_unmap_area;
- }
---- a/arch/x86_64/mm/numa.c
-+++ b/arch/x86_64/mm/numa.c
-@@ -19,7 +19,7 @@
- #include <asm/acpi.h>
- 
- #ifndef Dprintk
--#define Dprintk(x...)
-+#define Dprintk(x...) do {} while (0)
- #endif
- 
- struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
---- a/arch/x86_64/vdso/vma.c
-+++ b/arch/x86_64/vdso/vma.c
-@@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
- 	if (ret)
- 		goto up_fail;
- 
--	current->mm->context.vdso = (void *)addr;
-+	current->mm->context.vdso = addr;
- up_fail:
- 	up_write(&mm->mmap_sem);
- 	return ret;
---- a/crypto/async_tx/async_tx.c
-+++ b/crypto/async_tx/async_tx.c
-@@ -342,8 +342,8 @@ async_tx_init(void)
- err:
- 	printk(KERN_ERR "async_tx: initialization failure\n");
- 
--	while (--cap >= 0)
--		free_percpu(channel_table[cap]);
-+	while (cap)
-+		free_percpu(channel_table[--cap]);
- 
- 	return 1;
- }
---- a/crypto/lrw.c
-+++ b/crypto/lrw.c
-@@ -54,7 +54,7 @@ static int setkey(struct crypto_tfm *par
- 	struct priv *ctx = crypto_tfm_ctx(parent);
- 	struct crypto_cipher *child = ctx->child;
- 	int err, i;
--	be128 tmp = { 0 };
-+	be128 tmp = { 0, 0 };
- 	int bsize = crypto_cipher_blocksize(child);
- 
- 	crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
---- a/Documentation/dontdiff
-+++ b/Documentation/dontdiff
-@@ -176,14 +176,18 @@ times.h*
- tkparse
- trix_boot.h
- utsrelease.h*
-+vdso.lds
- version.h*
- vmlinux
- vmlinux-*
- vmlinux.aout
-+vmlinux.bin.all
- vmlinux.lds
-+vmlinux.relocs
- vsyscall.lds
- wanxlfw.inc
- uImage
- unifdef
-+utsrelease.h
- zImage*
- zconf.hash.c
---- a/drivers/acpi/blacklist.c
-+++ b/drivers/acpi/blacklist.c
-@@ -70,7 +70,7 @@ static struct acpi_blacklist_item acpi_b
- 	{"ASUS\0\0", "P2B-S   ", 0, ACPI_SIG_DSDT, all_versions,
- 	 "Bogus PCI routing", 1},
- 
--	{""}
-+	{"", "", 0, 0, 0, all_versions, 0}
- };
- 
- #if	CONFIG_ACPI_BLACKLIST_YEAR
---- a/drivers/acpi/osl.c
-+++ b/drivers/acpi/osl.c
-@@ -1233,7 +1233,7 @@ static struct dmi_system_id acpi_osl_dmi
- 		     DMI_MATCH(DMI_BOARD_NAME, "MPAD-MSAE Customer Reference Boards"),
- 		     },
- 	 },
--	{}
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL}
- };
- #endif /* CONFIG_DMI */
- 
---- a/drivers/acpi/processor_core.c
-+++ b/drivers/acpi/processor_core.c
-@@ -643,7 +643,7 @@ static int __cpuinit acpi_processor_star
- 		return 0;
- 	}
- 
--	BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0));
-+	BUG_ON(pr->id >= NR_CPUS);
- 
- 	/*
- 	 * Buggy BIOS check
---- a/drivers/acpi/processor_idle.c
-+++ b/drivers/acpi/processor_idle.c
-@@ -164,7 +164,7 @@ static struct dmi_system_id __cpuinitdat
- 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
- 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
- 	 (void *)2},
--	{},
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL},
- };
- 
- static inline u32 ticks_elapsed(u32 t1, u32 t2)
---- a/drivers/acpi/sleep/main.c
-+++ b/drivers/acpi/sleep/main.c
-@@ -228,7 +228,7 @@ static struct dmi_system_id __initdata a
- 	 .ident = "Toshiba Satellite 4030cdt",
- 	 .matches = {DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),},
- 	 },
--	{},
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL},
- };
- #endif /* CONFIG_SUSPEND */
- 
---- a/drivers/acpi/tables/tbfadt.c
-+++ b/drivers/acpi/tables/tbfadt.c
-@@ -48,7 +48,7 @@
- ACPI_MODULE_NAME("tbfadt")
- 
- /* Local prototypes */
--static void inline
-+static inline void
- acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
- 			     u8 bit_width, u64 address);
- 
-@@ -122,7 +122,7 @@ static struct acpi_fadt_info fadt_info_t
-  *
-  ******************************************************************************/
- 
--static void inline
-+static inline void
- acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
- 			     u8 bit_width, u64 address)
- {
---- a/drivers/ata/ahci.c
-+++ b/drivers/ata/ahci.c
-@@ -523,7 +523,7 @@ static const struct pci_device_id ahci_p
- 	{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- 	  PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
- 
--	{ }	/* terminate list */
-+	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */
- };
- 
- 
---- a/drivers/ata/ata_piix.c
-+++ b/drivers/ata/ata_piix.c
-@@ -257,7 +257,7 @@ static const struct pci_device_id piix_p
- 	/* SATA Controller IDE (Tolapai) */
- 	{ 0x8086, 0x5028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, tolapai_sata_ahci },
- 
--	{ }	/* terminate list */
-+	{ 0, 0, 0, 0, 0, 0, 0 }	/* terminate list */
- };
- 
- static struct pci_driver piix_pci_driver = {
-@@ -617,7 +617,7 @@ static const struct ich_laptop ich_lapto
- 	{ 0x27DF, 0x1043, 0x1267 },	/* ICH7 on Asus W5F */
- 	{ 0x24CA, 0x1025, 0x0061 },	/* ICH4 on ACER Aspire 2023WLMi */
- 	/* end marker */
--	{ 0, }
-+	{ 0, 0, 0 }
- };
- 
- /**
-@@ -963,7 +963,7 @@ static int piix_broken_suspend(void)
- 			},
- 		},
- 
--		{ }	/* terminate list */
-+		{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }	/* terminate list */
- 	};
- 	static const char *oemstrs[] = {
- 		"Tecra M3,",
---- a/drivers/ata/libata-core.c
-+++ b/drivers/ata/libata-core.c
-@@ -472,7 +472,7 @@ static const struct ata_xfer_ent {
- 	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
- 	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
- 	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
--	{ -1, },
-+	{ -1, 0, 0 },
- };
- 
- /**
-@@ -2546,7 +2546,7 @@ static const struct ata_timing ata_timin
- 
- /*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
- 
--	{ 0xFF }
-+	{ 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- #define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
-@@ -3799,7 +3799,7 @@ static const struct ata_blacklist_entry 
- 	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
- 
- 	/* End Marker */
--	{ }
-+	{ NULL, NULL, 0 }
- };
- 
- static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
---- a/drivers/char/agp/frontend.c
-+++ b/drivers/char/agp/frontend.c
-@@ -820,7 +820,7 @@ static int agpioc_reserve_wrap(struct ag
- 	if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
- 		return -EFAULT;
- 
--	if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
-+	if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
- 		return -EFAULT;
- 
- 	client = agp_find_client_by_pid(reserve.pid);
---- a/drivers/char/agp/intel-agp.c
-+++ b/drivers/char/agp/intel-agp.c
-@@ -2071,7 +2071,7 @@ static struct pci_device_id agp_intel_pc
- 	ID(PCI_DEVICE_ID_INTEL_G33_HB),
- 	ID(PCI_DEVICE_ID_INTEL_Q35_HB),
- 	ID(PCI_DEVICE_ID_INTEL_Q33_HB),
--	{ }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
---- a/drivers/char/drm/drm_pciids.h
-+++ b/drivers/char/drm/drm_pciids.h
-@@ -251,7 +251,7 @@
- 	{0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- 	{0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- 	{0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
--	{0, 0, 0}
-+	{0, 0, 0, 0, 0, 0, 0 }
- 
- #define i830_PCI_IDS \
- 	{0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
---- a/drivers/char/hpet.c
-+++ b/drivers/char/hpet.c
-@@ -1028,7 +1028,7 @@ static struct acpi_driver hpet_acpi_driv
- 		},
- };
- 
--static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
-+static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL };
- 
- static int __init hpet_init(void)
- {
---- a/drivers/char/keyboard.c
-+++ b/drivers/char/keyboard.c
-@@ -605,6 +605,16 @@ static void k_spec(struct vc_data *vc, u
- 	     kbd->kbdmode == VC_MEDIUMRAW) &&
- 	     value != KVAL(K_SAK))
- 		return;		/* SAK is allowed even in raw mode */
-+
-+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
-+	{
-+		void *func = fn_handler[value];
-+		if (func == fn_show_state || func == fn_show_ptregs ||
-+		    func == fn_show_mem)
-+			return;
-+	}
-+#endif
-+
- 	fn_handler[value](vc);
- }
- 
-@@ -1340,7 +1350,7 @@ static const struct input_device_id kbd_
-                 .evbit = { BIT(EV_SND) },
-         },
- 
--	{ },    /* Terminating entry */
-+	{ 0 },    /* Terminating entry */
- };
- 
- MODULE_DEVICE_TABLE(input, kbd_ids);
---- a/drivers/char/mem.c
-+++ b/drivers/char/mem.c
-@@ -26,6 +26,7 @@
- #include <linux/bootmem.h>
- #include <linux/splice.h>
- #include <linux/pfn.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include <asm/io.h>
-@@ -34,6 +35,10 @@
- # include <linux/efi.h>
- #endif
- 
-+#ifdef CONFIG_GRKERNSEC
-+extern struct file_operations grsec_fops;
-+#endif
-+
- /*
-  * Architectures vary in how they handle caching for addresses
-  * outside of main memory.
-@@ -180,6 +185,11 @@ static ssize_t write_mem(struct file * f
- 	if (!valid_phys_addr_range(p, count))
- 		return -EFAULT;
- 
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+	gr_handle_mem_write();
-+	return -EPERM;
-+#endif
-+
- 	written = 0;
- 
- #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
-@@ -281,6 +291,11 @@ static int mmap_mem(struct file * file, 
- 	if (!private_mapping_ok(vma))
- 		return -ENOSYS;
- 
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+	if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma))
-+		return -EPERM;
-+#endif
-+
- 	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
- 						 size,
- 						 vma->vm_page_prot);
-@@ -512,6 +527,11 @@ static ssize_t write_kmem(struct file * 
- 	ssize_t written;
- 	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
- 
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+	gr_handle_kmem_write();
-+	return -EPERM;
-+#endif
-+
- 	if (p < (unsigned long) high_memory) {
- 
- 		wrote = count;
-@@ -635,6 +655,10 @@ static inline size_t read_zero_pagealign
- 	struct vm_area_struct * vma;
- 	unsigned long addr=(unsigned long)buf;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m;
-+#endif
-+
- 	mm = current->mm;
- 	/* Oops, this was forgotten before. -ben */
- 	down_read(&mm->mmap_sem);
-@@ -651,8 +675,14 @@ static inline size_t read_zero_pagealign
- 		if (count > size)
- 			count = size;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		vma_m = pax_find_mirror_vma(vma);
-+		if (vma_m)
-+			zap_page_range(vma_m, addr + SEGMEXEC_TASK_SIZE, count, NULL);
-+#endif
-+
- 		zap_page_range(vma, addr, count, NULL);
--        	if (zeromap_page_range(vma, addr, count, PAGE_COPY))
-+        	if (zeromap_page_range(vma, addr, count, vma->vm_page_prot))
- 			break;
- 
- 		size -= count;
-@@ -805,6 +835,16 @@ static loff_t memory_lseek(struct file *
- 
- static int open_port(struct inode * inode, struct file * filp)
- {
-+#ifdef CONFIG_GRKERNSEC_KMEM
-+	gr_handle_open_port();
-+	return -EPERM;
-+#endif
-+
-+	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
-+}
-+
-+static int open_mem(struct inode * inode, struct file * filp)
-+{
- 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
- }
- 
-@@ -812,7 +852,6 @@ static int open_port(struct inode * inod
- #define full_lseek      null_lseek
- #define write_zero	write_null
- #define read_full       read_zero
--#define open_mem	open_port
- #define open_kmem	open_mem
- #define open_oldmem	open_mem
- 
-@@ -945,6 +984,11 @@ static int memory_open(struct inode * in
- 			filp->f_op = &oldmem_fops;
- 			break;
- #endif
-+#ifdef CONFIG_GRKERNSEC
-+		case 13:
-+			filp->f_op = &grsec_fops;
-+			break;
-+#endif
- 		default:
- 			return -ENXIO;
- 	}
-@@ -977,6 +1021,9 @@ static const struct {
- #ifdef CONFIG_CRASH_DUMP
- 	{12,"oldmem",    S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
- #endif
-+#ifdef CONFIG_GRKERNSEC
-+	{13,"grsec",	S_IRUSR | S_IWUGO,	    &grsec_fops},
-+#endif
- };
- 
- static struct class *mem_class;
---- a/drivers/char/nvram.c
-+++ b/drivers/char/nvram.c
-@@ -430,7 +430,10 @@ static const struct file_operations nvra
- static struct miscdevice nvram_dev = {
- 	NVRAM_MINOR,
- 	"nvram",
--	&nvram_fops
-+	&nvram_fops,
-+	{NULL, NULL},
-+	NULL,
-+	NULL
- };
- 
- static int __init
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -248,8 +248,13 @@
- /*
-  * Configuration information
-  */
-+#ifdef CONFIG_GRKERNSEC_RANDNET
-+#define INPUT_POOL_WORDS 512
-+#define OUTPUT_POOL_WORDS 128
-+#else
- #define INPUT_POOL_WORDS 128
- #define OUTPUT_POOL_WORDS 32
-+#endif
- #define SEC_XFER_SIZE 512
- 
- /*
-@@ -286,10 +291,17 @@ static struct poolinfo {
- 	int poolwords;
- 	int tap1, tap2, tap3, tap4, tap5;
- } poolinfo_table[] = {
-+#ifdef CONFIG_GRKERNSEC_RANDNET
-+	/* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
-+	{ 512,	411,	308,	208,	104,	1 },
-+	/* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
-+	{ 128,	103,	76,	51,	25,	1 },
-+#else
- 	/* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
- 	{ 128,	103,	76,	51,	25,	1 },
- 	/* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
- 	{ 32,	26,	20,	14,	7,	1 },
-+#endif
- #if 0
- 	/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1  -- 115 */
- 	{ 2048,	1638,	1231,	819,	411,	1 },
-@@ -1172,7 +1184,7 @@ EXPORT_SYMBOL(generate_random_uuid);
- #include <linux/sysctl.h>
- 
- static int min_read_thresh = 8, min_write_thresh;
--static int max_read_thresh = INPUT_POOL_WORDS * 32;
-+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
- static int max_write_thresh = INPUT_POOL_WORDS * 32;
- static char sysctl_bootid[16];
- 
---- a/drivers/char/vt_ioctl.c
-+++ b/drivers/char/vt_ioctl.c
-@@ -95,6 +95,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
- 	case KDSKBENT:
- 		if (!perm)
- 			return -EPERM;
-+
-+#ifdef CONFIG_GRKERNSEC
-+		if (!capable(CAP_SYS_TTY_CONFIG))
-+			return -EPERM;
-+#endif
-+
- 		if (!i && v == K_NOSUCHMAP) {
- 			/* deallocate map */
- 			key_map = key_maps[s];
-@@ -235,6 +241,13 @@ do_kdgkb_ioctl(int cmd, struct kbsentry 
- 			goto reterr;
- 		}
- 
-+#ifdef CONFIG_GRKERNSEC
-+		if (!capable(CAP_SYS_TTY_CONFIG)) {
-+			ret = -EPERM;
-+			goto reterr;
-+		}
-+#endif
-+
- 		q = func_table[i];
- 		first_free = funcbufptr + (funcbufsize - funcbufleft);
- 		for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) 
---- a/drivers/edac/edac_core.h
-+++ b/drivers/edac/edac_core.h
-@@ -86,11 +86,11 @@ extern int edac_debug_level;
- 
- #else				/* !CONFIG_EDAC_DEBUG */
- 
--#define debugf0( ... )
--#define debugf1( ... )
--#define debugf2( ... )
--#define debugf3( ... )
--#define debugf4( ... )
-+#define debugf0( ... ) do {} while (0)
-+#define debugf1( ... ) do {} while (0)
-+#define debugf2( ... ) do {} while (0)
-+#define debugf3( ... ) do {} while (0)
-+#define debugf4( ... ) do {} while (0)
- 
- #endif				/* !CONFIG_EDAC_DEBUG */
- 
---- a/drivers/hwmon/fscpos.c
-+++ b/drivers/hwmon/fscpos.c
-@@ -231,7 +231,6 @@ static ssize_t set_pwm(struct i2c_client
- 	unsigned long v = simple_strtoul(buf, NULL, 10);
- 
- 	/* Range: 0..255 */
--	if (v < 0) v = 0;
- 	if (v > 255) v = 255;
- 
- 	mutex_lock(&data->update_lock);
---- a/drivers/hwmon/k8temp.c
-+++ b/drivers/hwmon/k8temp.c
-@@ -130,7 +130,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n
- 
- static struct pci_device_id k8temp_ids[] = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
--	{ 0 },
-+	{ 0, 0, 0, 0, 0, 0, 0 },
- };
- 
- MODULE_DEVICE_TABLE(pci, k8temp_ids);
---- a/drivers/hwmon/sis5595.c
-+++ b/drivers/hwmon/sis5595.c
-@@ -673,7 +673,7 @@ static struct sis5595_data *sis5595_upda
- 
- static struct pci_device_id sis5595_pci_ids[] = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, sis5595_pci_ids);
---- a/drivers/hwmon/thmc50.c
-+++ b/drivers/hwmon/thmc50.c
-@@ -47,9 +47,9 @@ I2C_CLIENT_MODULE_PARM(adm1022_temp3, "L
- #define THMC50_REG_DIE_CODE			0x3F
- #define THMC50_REG_ANALOG_OUT			0x19
- 
--const static u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 };
--const static u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C };
--const static u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B };
-+static const u8 THMC50_REG_TEMP[] = { 0x27, 0x26, 0x20 };
-+static const u8 THMC50_REG_TEMP_MIN[] = { 0x3A, 0x38, 0x2C };
-+static const u8 THMC50_REG_TEMP_MAX[] = { 0x39, 0x37, 0x2B };
- 
- #define THMC50_REG_CONF_nFANOFF			0x20
- 
---- a/drivers/hwmon/via686a.c
-+++ b/drivers/hwmon/via686a.c
-@@ -740,7 +740,7 @@ static struct via686a_data *via686a_upda
- 
- static struct pci_device_id via686a_pci_ids[] = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
---- a/drivers/hwmon/vt8231.c
-+++ b/drivers/hwmon/vt8231.c
-@@ -662,7 +662,7 @@ static struct platform_driver vt8231_dri
- 
- static struct pci_device_id vt8231_pci_ids[] = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
---- a/drivers/hwmon/w83791d.c
-+++ b/drivers/hwmon/w83791d.c
-@@ -289,8 +289,8 @@ static int w83791d_attach_adapter(struct
- static int w83791d_detect(struct i2c_adapter *adapter, int address, int kind);
- static int w83791d_detach_client(struct i2c_client *client);
- 
--static int w83791d_read(struct i2c_client *client, u8 register);
--static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
-+static int w83791d_read(struct i2c_client *client, u8 reg);
-+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
- static struct w83791d_data *w83791d_update_device(struct device *dev);
- 
- #ifdef DEBUG
---- a/drivers/i2c/busses/i2c-i801.c
-+++ b/drivers/i2c/busses/i2c-i801.c
-@@ -543,7 +543,7 @@ static struct pci_device_id i801_ids[] =
- 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE (pci, i801_ids);
---- a/drivers/i2c/busses/i2c-i810.c
-+++ b/drivers/i2c/busses/i2c-i810.c
-@@ -198,7 +198,7 @@ static struct pci_device_id i810_ids[] _
- 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810E_IG) },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC) },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG) },
--	{ 0, },
-+	{ 0, 0, 0, 0, 0, 0, 0 },
- };
- 
- MODULE_DEVICE_TABLE (pci, i810_ids);
---- a/drivers/i2c/busses/i2c-piix4.c
-+++ b/drivers/i2c/busses/i2c-piix4.c
-@@ -113,7 +113,7 @@ static struct dmi_system_id __devinitdat
- 		.ident = "IBM",
- 		.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
- 	},
--	{ },
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL },
- };
- 
- static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
-@@ -411,7 +411,7 @@ static struct pci_device_id piix4_ids[] 
- 	  .driver_data = 3 },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3),
- 	  .driver_data = 0 },
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE (pci, piix4_ids);
---- a/drivers/i2c/busses/i2c-sis630.c
-+++ b/drivers/i2c/busses/i2c-sis630.c
-@@ -465,7 +465,7 @@ static struct i2c_adapter sis630_adapter
- static struct pci_device_id sis630_ids[] __devinitdata = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
- 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
--	{ 0, }
-+	{ PCI_DEVICE(0, 0) }
- };
- 
- MODULE_DEVICE_TABLE (pci, sis630_ids);
---- a/drivers/i2c/busses/i2c-sis96x.c
-+++ b/drivers/i2c/busses/i2c-sis96x.c
-@@ -255,7 +255,7 @@ static struct i2c_adapter sis96x_adapter
- 
- static struct pci_device_id sis96x_ids[] = {
- 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
--	{ 0, }
-+	{ PCI_DEVICE(0, 0) }
- };
- 
- MODULE_DEVICE_TABLE (pci, sis96x_ids);
---- a/drivers/ide/ide-cd.c
-+++ b/drivers/ide/ide-cd.c
-@@ -457,8 +457,6 @@ void cdrom_analyze_sense_data(ide_drive_
- 			sector &= ~(bio_sectors -1);
- 			valid = (sector - failed_command->sector) << 9;
- 
--			if (valid < 0)
--				valid = 0;
- 			if (sector < get_capacity(info->disk) &&
- 				drive->probed_capacity - sector < 4 * 75) {
- 				set_capacity(info->disk, sector);
---- a/drivers/ieee1394/dv1394.c
-+++ b/drivers/ieee1394/dv1394.c
-@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
- 	based upon DIF section and sequence
- */
- 
--static void inline
-+static inline void
- frame_put_packet (struct frame *f, struct packet *p)
- {
- 	int section_type = p->data[0] >> 5;           /* section type is in bits 5 - 7 */
-@@ -918,7 +918,7 @@ static int do_dv1394_init(struct video_c
- 		/* default SYT offset is 3 cycles */
- 		init->syt_offset = 3;
- 
--	if ( (init->channel > 63) || (init->channel < 0) )
-+	if (init->channel > 63)
- 		init->channel = 63;
- 
- 	chan_mask = (u64)1 << init->channel;
-@@ -2173,7 +2173,7 @@ static struct ieee1394_device_id dv1394_
- 		.specifier_id	= AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
- 		.version	= AVC_SW_VERSION_ENTRY & 0xffffff
- 	},
--	{ }
-+	{ 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
---- a/drivers/ieee1394/eth1394.c
-+++ b/drivers/ieee1394/eth1394.c
-@@ -449,7 +449,7 @@ static struct ieee1394_device_id eth1394
- 		.specifier_id =	ETHER1394_GASP_SPECIFIER_ID,
- 		.version = ETHER1394_GASP_VERSION,
- 	},
--	{}
-+	{ 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
---- a/drivers/ieee1394/hosts.c
-+++ b/drivers/ieee1394/hosts.c
-@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso 
- }
- 
- static struct hpsb_host_driver dummy_driver = {
-+	.name =		   "dummy",
- 	.transmit_packet = dummy_transmit_packet,
- 	.devctl =	   dummy_devctl,
- 	.isoctl =	   dummy_isoctl
---- a/drivers/ieee1394/ohci1394.c
-+++ b/drivers/ieee1394/ohci1394.c
-@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
- printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
- 
- /* Module Parameters */
--static int phys_dma = 1;
-+static int phys_dma;
- module_param(phys_dma, int, 0444);
--MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
-+MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 0).");
- 
- static void dma_trm_tasklet(unsigned long data);
- static void dma_trm_reset(struct dma_trm_ctx *d);
-@@ -3396,7 +3396,7 @@ static struct pci_device_id ohci1394_pci
- 		.subvendor =	PCI_ANY_ID,
- 		.subdevice =	PCI_ANY_ID,
- 	},
--	{ 0, },
-+	{ 0, 0, 0, 0, 0, 0, 0 },
- };
- 
- MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
---- a/drivers/ieee1394/raw1394.c
-+++ b/drivers/ieee1394/raw1394.c
-@@ -2952,7 +2952,7 @@ static struct ieee1394_device_id raw1394
- 	 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- 	 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
- 	 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
--	{}
-+	{ 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
---- a/drivers/ieee1394/sbp2.c
-+++ b/drivers/ieee1394/sbp2.c
-@@ -272,7 +272,7 @@ static struct ieee1394_device_id sbp2_id
- 	 .match_flags	= IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
- 	 .specifier_id	= SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
- 	 .version	= SBP2_SW_VERSION_ENTRY & 0xffffff},
--	{}
-+	{ 0, 0, 0, 0, 0, 0 }
- };
- MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
- 
-@@ -2063,7 +2063,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
- MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
- MODULE_LICENSE("GPL");
- 
--static int sbp2_module_init(void)
-+static int __init sbp2_module_init(void)
- {
- 	int ret;
- 
---- a/drivers/ieee1394/video1394.c
-+++ b/drivers/ieee1394/video1394.c
-@@ -893,7 +893,7 @@ static long video1394_ioctl(struct file 
- 		if (unlikely(d == NULL))
- 			return -EFAULT;
- 
--		if (unlikely((v.buffer<0) || (v.buffer>=d->num_desc - 1))) {
-+		if (unlikely(v.buffer>=d->num_desc - 1)) {
- 			PRINT(KERN_ERR, ohci->host->id,
- 			      "Buffer %d out of range",v.buffer);
- 			return -EINVAL;
-@@ -959,7 +959,7 @@ static long video1394_ioctl(struct file 
- 		if (unlikely(d == NULL))
- 			return -EFAULT;
- 
--		if (unlikely((v.buffer<0) || (v.buffer>d->num_desc - 1))) {
-+		if (unlikely(v.buffer>d->num_desc - 1)) {
- 			PRINT(KERN_ERR, ohci->host->id,
- 			      "Buffer %d out of range",v.buffer);
- 			return -EINVAL;
-@@ -1030,7 +1030,7 @@ static long video1394_ioctl(struct file 
- 		d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
- 		if (d == NULL) return -EFAULT;
- 
--		if ((v.buffer<0) || (v.buffer>=d->num_desc - 1)) {
-+		if (v.buffer>=d->num_desc - 1) {
- 			PRINT(KERN_ERR, ohci->host->id,
- 			      "Buffer %d out of range",v.buffer);
- 			return -EINVAL;
-@@ -1137,7 +1137,7 @@ static long video1394_ioctl(struct file 
- 		d = find_ctx(&ctx->context_list, OHCI_ISO_TRANSMIT, v.channel);
- 		if (d == NULL) return -EFAULT;
- 
--		if ((v.buffer<0) || (v.buffer>=d->num_desc-1)) {
-+		if (v.buffer>=d->num_desc-1) {
- 			PRINT(KERN_ERR, ohci->host->id,
- 			      "Buffer %d out of range",v.buffer);
- 			return -EINVAL;
-@@ -1309,7 +1309,7 @@ static struct ieee1394_device_id video13
-                 .specifier_id   = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
-                 .version        = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
-         },
--	{ }
-+	{ 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
---- a/drivers/input/keyboard/atkbd.c
-+++ b/drivers/input/keyboard/atkbd.c
-@@ -1075,7 +1075,7 @@ static struct serio_device_id atkbd_seri
- 		.id	= SERIO_ANY,
- 		.extra	= SERIO_ANY,
- 	},
--	{ 0 }
-+	{ 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(serio, atkbd_serio_ids);
---- a/drivers/input/mousedev.c
-+++ b/drivers/input/mousedev.c
-@@ -847,7 +847,7 @@ static struct input_handler mousedev_han
- 
- #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
- static struct miscdevice psaux_mouse = {
--	PSMOUSE_MINOR, "psaux", &mousedev_fops
-+	PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL
- };
- static int psaux_registered;
- #endif
---- a/drivers/input/mouse/lifebook.c
-+++ b/drivers/input/mouse/lifebook.c
-@@ -102,7 +102,7 @@ static struct dmi_system_id lifebook_dmi
- 			DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL}
- };
- 
- static psmouse_ret_t lifebook_process_byte(struct psmouse *psmouse)
---- a/drivers/input/mouse/psmouse-base.c
-+++ b/drivers/input/mouse/psmouse-base.c
-@@ -1325,7 +1325,7 @@ static struct serio_device_id psmouse_se
- 		.id	= SERIO_ANY,
- 		.extra	= SERIO_ANY,
- 	},
--	{ 0 }
-+	{ 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
---- a/drivers/input/mouse/synaptics.c
-+++ b/drivers/input/mouse/synaptics.c
-@@ -417,7 +417,7 @@ static void synaptics_process_packet(str
- 				break;
- 			case 2:
- 				if (SYN_MODEL_PEN(priv->model_id))
--					;   /* Nothing, treat a pen as a single finger */
-+					break;   /* Nothing, treat a pen as a single finger */
- 				break;
- 			case 4 ... 15:
- 				if (SYN_CAP_PALMDETECT(priv->capabilities))
-@@ -624,7 +624,7 @@ static struct dmi_system_id toshiba_dmi_
- 			DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }
- };
- #endif
- 
---- a/drivers/input/serio/i8042-x86ia64io.h
-+++ b/drivers/input/serio/i8042-x86ia64io.h
-@@ -110,7 +110,7 @@ static struct dmi_system_id __initdata i
- 			DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }
- };
- 
- /*
-@@ -262,7 +262,7 @@ static struct dmi_system_id __initdata i
- 			DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
- 		},
- 	},
--	{ }
-+	{ NULL, NULL, {DMI_MATCH(DMI_NONE, NULL)}, NULL }
- };
- 
- 
---- a/drivers/input/serio/serio_raw.c
-+++ b/drivers/input/serio/serio_raw.c
-@@ -369,7 +369,7 @@ static struct serio_device_id serio_raw_
- 		.id	= SERIO_ANY,
- 		.extra	= SERIO_ANY,
- 	},
--	{ 0 }
-+	{ 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids);
---- a/drivers/kvm/kvm_main.c
-+++ b/drivers/kvm/kvm_main.c
-@@ -63,21 +63,21 @@ static struct kvm_stats_debugfs_item {
- 	int offset;
- 	struct dentry *dentry;
- } debugfs_entries[] = {
--	{ "pf_fixed", STAT_OFFSET(pf_fixed) },
--	{ "pf_guest", STAT_OFFSET(pf_guest) },
--	{ "tlb_flush", STAT_OFFSET(tlb_flush) },
--	{ "invlpg", STAT_OFFSET(invlpg) },
--	{ "exits", STAT_OFFSET(exits) },
--	{ "io_exits", STAT_OFFSET(io_exits) },
--	{ "mmio_exits", STAT_OFFSET(mmio_exits) },
--	{ "signal_exits", STAT_OFFSET(signal_exits) },
--	{ "irq_window", STAT_OFFSET(irq_window_exits) },
--	{ "halt_exits", STAT_OFFSET(halt_exits) },
--	{ "request_irq", STAT_OFFSET(request_irq_exits) },
--	{ "irq_exits", STAT_OFFSET(irq_exits) },
--	{ "light_exits", STAT_OFFSET(light_exits) },
--	{ "efer_reload", STAT_OFFSET(efer_reload) },
--	{ NULL }
-+	{ "pf_fixed", STAT_OFFSET(pf_fixed), NULL },
-+	{ "pf_guest", STAT_OFFSET(pf_guest), NULL },
-+	{ "tlb_flush", STAT_OFFSET(tlb_flush), NULL },
-+	{ "invlpg", STAT_OFFSET(invlpg), NULL },
-+	{ "exits", STAT_OFFSET(exits), NULL },
-+	{ "io_exits", STAT_OFFSET(io_exits), NULL },
-+	{ "mmio_exits", STAT_OFFSET(mmio_exits), NULL },
-+	{ "signal_exits", STAT_OFFSET(signal_exits), NULL },
-+	{ "irq_window", STAT_OFFSET(irq_window_exits), NULL },
-+	{ "halt_exits", STAT_OFFSET(halt_exits), NULL },
-+	{ "request_irq", STAT_OFFSET(request_irq_exits), NULL },
-+	{ "irq_exits", STAT_OFFSET(irq_exits), NULL },
-+	{ "light_exits", STAT_OFFSET(light_exits), NULL },
-+	{ "efer_reload", STAT_OFFSET(efer_reload), NULL },
-+	{ NULL, 0, NULL }
- };
- 
- static struct dentry *debugfs_dir;
-@@ -2255,7 +2255,7 @@ static int kvm_vcpu_ioctl_translate(stru
- static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
- 				    struct kvm_interrupt *irq)
- {
--	if (irq->irq < 0 || irq->irq >= 256)
-+	if (irq->irq >= 256)
- 		return -EINVAL;
- 	vcpu_load(vcpu);
- 
-@@ -2895,6 +2895,9 @@ static struct miscdevice kvm_dev = {
- 	KVM_MINOR,
- 	"kvm",
- 	&kvm_chardev_ops,
-+	{NULL, NULL},
-+	NULL,
-+	NULL
- };
- 
- static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
---- a/drivers/kvm/vmx.c
-+++ b/drivers/kvm/vmx.c
-@@ -2148,7 +2148,7 @@ again:
- 
- 	vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
- 
--	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-+	asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS));
- 
- 	if (unlikely(fail)) {
- 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
---- a/drivers/md/bitmap.c
-+++ b/drivers/md/bitmap.c
-@@ -57,7 +57,7 @@
- #  if DEBUG > 0
- #    define PRINTK(x...) printk(KERN_DEBUG x)
- #  else
--#    define PRINTK(x...)
-+#    define PRINTK(x...) do {} while (0)
- #  endif
- #endif
- 
---- a/drivers/mtd/devices/doc2000.c
-+++ b/drivers/mtd/devices/doc2000.c
-@@ -632,7 +632,7 @@ static int doc_read(struct mtd_info *mtd
- 			len = ((from | 0x1ff) + 1) - from;
- 
- 		/* The ECC will not be calculated correctly if less than 512 is read */
--		if (len != 0x200 && eccbuf)
-+		if (len != 0x200)
- 			printk(KERN_WARNING
- 			       "ECC needs a full sector read (adr: %lx size %lx)\n",
- 			       (long) from, (long) len);
---- a/drivers/mtd/devices/doc2001.c
-+++ b/drivers/mtd/devices/doc2001.c
-@@ -398,6 +398,8 @@ static int doc_read (struct mtd_info *mt
- 	/* Don't allow read past end of device */
- 	if (from >= this->totlen)
- 		return -EINVAL;
-+	if (!len)
-+		return -EINVAL;
- 
- 	/* Don't allow a single read to cross a 512-byte block boundary */
- 	if (from + len > ((from | 0x1ff) + 1))
---- a/drivers/mtd/devices/doc2001plus.c
-+++ b/drivers/mtd/devices/doc2001plus.c
-@@ -748,7 +748,7 @@ static int doc_write(struct mtd_info *mt
- 	WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
- 
- 	/* On interleaved devices the flags for 2nd half 512 are before data */
--	if (eccbuf && before)
-+	if (before)
- 		fto -= 2;
- 
- 	/* issue the Serial Data In command to initial the Page Program process */
---- a/drivers/mtd/devices/slram.c
-+++ b/drivers/mtd/devices/slram.c
-@@ -270,7 +270,7 @@ static int parse_cmdline(char *devname, 
- 	}
- 	T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
- 			devname, devstart, devlength);
--	if ((devstart < 0) || (devlength < 0) || (devlength % SLRAM_BLK_SZ != 0)) {
-+	if (devlength % SLRAM_BLK_SZ != 0) {
- 		E("slram: Illegal start / length parameter.\n");
- 		return(-EINVAL);
- 	}
---- a/drivers/mtd/ubi/build.c
-+++ b/drivers/mtd/ubi/build.c
-@@ -727,7 +727,7 @@ static int __init bytes_str_to_int(const
- 	unsigned long result;
- 
- 	result = simple_strtoul(str, &endp, 0);
--	if (str == endp || result < 0) {
-+	if (str == endp) {
- 		printk("UBI error: incorrect bytes count: \"%s\"\n", str);
- 		return -EINVAL;
- 	}
---- a/drivers/net/eepro100.c
-+++ b/drivers/net/eepro100.c
-@@ -47,7 +47,7 @@ static int rxdmacount /* = 0 */;
- # define rx_align(skb)		skb_reserve((skb), 2)
- # define RxFD_ALIGNMENT		__attribute__ ((aligned (2), packed))
- #else
--# define rx_align(skb)
-+# define rx_align(skb) do {} while (0)
- # define RxFD_ALIGNMENT
- #endif
- 
-@@ -2344,33 +2344,33 @@ static void __devexit eepro100_remove_on
- }
- 
- static struct pci_device_id eepro100_pci_tbl[] = {
--	{ PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
--	{ PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
--	{ 0,}
-+	{ PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
- 
---- a/drivers/net/irda/vlsi_ir.c
-+++ b/drivers/net/irda/vlsi_ir.c
-@@ -906,13 +906,12 @@ static int vlsi_hard_start_xmit(struct s
- 			/* no race - tx-ring already empty */
- 			vlsi_set_baud(idev, iobase);
- 			netif_wake_queue(ndev);
--		}
--		else
--			;
-+		} else {
- 			/* keep the speed change pending like it would
- 			 * for any len>0 packet. tx completion interrupt
- 			 * will apply it when the tx ring becomes empty.
- 			 */
-+		}
- 		spin_unlock_irqrestore(&idev->lock, flags);
- 		dev_kfree_skb_any(skb);
- 		return 0;
---- a/drivers/net/pcnet32.c
-+++ b/drivers/net/pcnet32.c
-@@ -82,7 +82,7 @@ static int cards_found;
- /*
-  * VLB I/O addresses
-  */
--static unsigned int pcnet32_portlist[] __initdata =
-+static unsigned int pcnet32_portlist[] __devinitdata =
-     { 0x300, 0x320, 0x340, 0x360, 0 };
- 
- static int pcnet32_debug = 0;
---- a/drivers/net/tg3.h
-+++ b/drivers/net/tg3.h
-@@ -127,6 +127,7 @@
- #define  CHIPREV_ID_5750_A0		 0x4000
- #define  CHIPREV_ID_5750_A1		 0x4001
- #define  CHIPREV_ID_5750_A3		 0x4003
-+#define  CHIPREV_ID_5750_C1		 0x4201
- #define  CHIPREV_ID_5750_C2		 0x4202
- #define  CHIPREV_ID_5752_A0_HW		 0x5000
- #define  CHIPREV_ID_5752_A0		 0x6000
---- a/drivers/pci/hotplug/cpqphp_nvram.c
-+++ b/drivers/pci/hotplug/cpqphp_nvram.c
-@@ -425,9 +425,13 @@ static u32 store_HRT (void __iomem *rom_
- 
- void compaq_nvram_init (void __iomem *rom_start)
- {
-+
-+#ifndef CONFIG_PAX_KERNEXEC
- 	if (rom_start) {
- 		compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
- 	}
-+#endif
-+
- 	dbg("int15 entry  = %p\n", compaq_int15_entry_point);
- 
- 	/* initialize our int15 lock */
---- a/drivers/pci/pcie/aer/aerdrv.c
-+++ b/drivers/pci/pcie/aer/aerdrv.c
-@@ -58,7 +58,7 @@ static struct pcie_port_service_id aer_i
- 	.port_type 	= PCIE_RC_PORT,
- 	.service_type 	= PCIE_PORT_SERVICE_AER,
- 	},
--	{ /* end: all zeroes */ }
-+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- static struct pci_error_handlers aer_error_handlers = {
---- a/drivers/pci/pcie/aer/aerdrv_core.c
-+++ b/drivers/pci/pcie/aer/aerdrv_core.c
-@@ -660,7 +660,7 @@ static void aer_isr_one_error(struct pci
- 		struct aer_err_source *e_src)
- {
- 	struct device *s_device;
--	struct aer_err_info e_info = {0, 0, 0,};
-+	struct aer_err_info e_info = {0, 0, 0, {0, 0, 0, 0}};
- 	int i;
- 	u16 id;
- 
---- a/drivers/pci/pcie/portdrv_pci.c
-+++ b/drivers/pci/pcie/portdrv_pci.c
-@@ -265,7 +265,7 @@ static void pcie_portdrv_err_resume(stru
- static const struct pci_device_id port_pci_ids[] = { {
- 	/* handle any PCI-Express port */
- 	PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
--	}, { /* end: all zeroes */ }
-+	}, { 0, 0, 0, 0, 0, 0, 0 }
- };
- MODULE_DEVICE_TABLE(pci, port_pci_ids);
- 
---- a/drivers/pci/proc.c
-+++ b/drivers/pci/proc.c
-@@ -466,7 +466,15 @@ static int __init pci_proc_init(void)
- {
- 	struct proc_dir_entry *entry;
- 	struct pci_dev *dev = NULL;
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR, proc_bus);
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+	proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, proc_bus);
-+#endif
-+#else
- 	proc_bus_pci_dir = proc_mkdir("pci", proc_bus);
-+#endif
- 	entry = create_proc_entry("devices", 0, proc_bus_pci_dir);
- 	if (entry)
- 		entry->proc_fops = &proc_bus_pci_dev_operations;
---- a/drivers/pcmcia/ti113x.h
-+++ b/drivers/pcmcia/ti113x.h
-@@ -897,7 +897,7 @@ static struct pci_device_id ene_tune_tbl
- 	DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
- 		ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
- 
--	{}
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
---- a/drivers/pcmcia/yenta_socket.c
-+++ b/drivers/pcmcia/yenta_socket.c
-@@ -1358,7 +1358,7 @@ static struct pci_device_id yenta_table 
- 
- 	/* match any cardbus bridge */
- 	CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT),
--	{ /* all zeroes */ }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- MODULE_DEVICE_TABLE(pci, yenta_table);
- 
---- a/drivers/pnp/pnpbios/bioscalls.c
-+++ b/drivers/pnp/pnpbios/bioscalls.c
-@@ -61,7 +61,7 @@ set_base(gdt[(selname) >> 3], (u32)(addr
- set_limit(gdt[(selname) >> 3], size); \
- } while(0)
- 
--static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
-+static struct desc_struct bad_bios_desc __read_only = { 0, 0x00409300 };
- 
- /*
-  * At some point we want to use this stack frame pointer to unwind
-@@ -88,6 +88,10 @@ static inline u16 call_pnp_bios(u16 func
- 	struct desc_struct save_desc_40;
- 	int cpu;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	/*
- 	 * PnP BIOSes are generally not terribly re-entrant.
- 	 * Also, don't rely on them to save everything correctly.
-@@ -97,8 +101,17 @@ static inline u16 call_pnp_bios(u16 func
- 
- 	cpu = get_cpu();
- 	save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	/* On some boxes IRQ's during PnP BIOS calls are deadly.  */
- 	spin_lock_irqsave(&pnp_bios_lock, flags);
- 
-@@ -135,7 +148,16 @@ static inline u16 call_pnp_bios(u16 func
- 			     :"memory");
- 	spin_unlock_irqrestore(&pnp_bios_lock, flags);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- 	put_cpu();
- 
- 	/* If we get here and this is set then the PnP BIOS faulted on us. */
-@@ -469,16 +491,25 @@ int pnp_bios_read_escd(char *data, u32 n
- 	return status;
- }
- 
--void pnpbios_calls_init(union pnp_bios_install_struct *header)
-+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
- {
- 	int i;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	spin_lock_init(&pnp_bios_lock);
- 	pnp_bios_callpoint.offset = header->fields.pm16offset;
- 	pnp_bios_callpoint.segment = PNP_CS16;
- 
- 	set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
- 	_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	for (i = 0; i < NR_CPUS; i++) {
- 		struct desc_struct *gdt = get_cpu_gdt_table(i);
- 		if (!gdt)
-@@ -489,4 +520,9 @@ void pnpbios_calls_init(union pnp_bios_i
- 		set_base(gdt[GDT_ENTRY_PNPBIOS_DS],
- 			 __va(header->fields.pm16dseg));
- 	}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
---- a/drivers/pnp/quirks.c
-+++ b/drivers/pnp/quirks.c
-@@ -127,7 +127,7 @@ static struct pnp_fixup pnp_fixups[] = {
- 	{"CTL0043", quirk_sb16audio_resources},
- 	{"CTL0044", quirk_sb16audio_resources},
- 	{"CTL0045", quirk_sb16audio_resources},
--	{""}
-+	{"", NULL}
- };
- 
- void pnp_fixup_device(struct pnp_dev *dev)
---- a/drivers/pnp/resource.c
-+++ b/drivers/pnp/resource.c
-@@ -345,7 +345,7 @@ int pnp_check_irq(struct pnp_dev *dev, i
- 		return 1;
- 
- 	/* check if the resource is valid */
--	if (*irq < 0 || *irq > 15)
-+	if (*irq > 15)
- 		return 0;
- 
- 	/* check if the resource is reserved */
-@@ -412,7 +412,7 @@ int pnp_check_dma(struct pnp_dev *dev, i
- 		return 1;
- 
- 	/* check if the resource is valid */
--	if (*dma < 0 || *dma == 4 || *dma > 7)
-+	if (*dma == 4 || *dma > 7)
- 		return 0;
- 
- 	/* check if the resource is reserved */
---- a/drivers/scsi/scsi_lib.c
-+++ b/drivers/scsi/scsi_lib.c
-@@ -44,7 +44,7 @@ struct scsi_host_sg_pool {
- #error SCSI_MAX_PHYS_SEGMENTS is too small
- #endif
- 
--#define SP(x) { x, "sgpool-" #x } 
-+#define SP(x) { x, "sgpool-" #x, NULL, NULL }
- static struct scsi_host_sg_pool scsi_sg_pools[] = {
- 	SP(8),
- 	SP(16),
---- a/drivers/scsi/scsi_logging.h
-+++ b/drivers/scsi/scsi_logging.h
-@@ -51,7 +51,7 @@ do {								\
- 		} while (0);					\
- } while (0)
- #else
--#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
-+#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0)
- #endif /* CONFIG_SCSI_LOGGING */
- 
- /*
---- a/drivers/serial/8250_pci.c
-+++ b/drivers/serial/8250_pci.c
-@@ -2589,7 +2589,7 @@ static struct pci_device_id serial_pci_t
- 		PCI_ANY_ID, PCI_ANY_ID,
- 		PCI_CLASS_COMMUNICATION_MULTISERIAL << 8,
- 		0xffff00, pbn_default },
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- static struct pci_driver serial_pci_driver = {
---- a/drivers/usb/class/cdc-acm.c
-+++ b/drivers/usb/class/cdc-acm.c
-@@ -1199,7 +1199,7 @@ static struct usb_device_id acm_ids[] = 
- 		USB_CDC_ACM_PROTO_AT_CDMA) },
- 
- 	/* NOTE:  COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
--	{ }
-+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE (usb, acm_ids);
---- a/drivers/usb/class/usblp.c
-+++ b/drivers/usb/class/usblp.c
-@@ -225,7 +225,7 @@ static const struct quirk_printer_struct
- 	{ 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */
- 	{ 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
- 	{ 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */
--	{ 0, 0 }
-+	{ 0, 0, 0 }
- };
- 
- static int usblp_wwait(struct usblp *usblp, int nonblock);
-@@ -1376,7 +1376,7 @@ static struct usb_device_id usblp_ids []
- 	{ USB_INTERFACE_INFO(7, 1, 2) },
- 	{ USB_INTERFACE_INFO(7, 1, 3) },
- 	{ USB_DEVICE(0x04b8, 0x0202) },	/* Seiko Epson Receipt Printer M129C */
--	{ }						/* Terminating entry */
-+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }		/* Terminating entry */
- };
- 
- MODULE_DEVICE_TABLE (usb, usblp_ids);
---- a/drivers/usb/core/hub.c
-+++ b/drivers/usb/core/hub.c
-@@ -2762,7 +2762,7 @@ static struct usb_device_id hub_id_table
-       .bDeviceClass = USB_CLASS_HUB},
-     { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
-       .bInterfaceClass = USB_CLASS_HUB},
--    { }						/* Terminating entry */
-+    { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }						/* Terminating entry */
- };
- 
- MODULE_DEVICE_TABLE (usb, hub_id_table);
---- a/drivers/usb/host/ehci-pci.c
-+++ b/drivers/usb/host/ehci-pci.c
-@@ -377,7 +377,7 @@ static const struct pci_device_id pci_id
- 	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
- 	.driver_data =	(unsigned long) &ehci_pci_hc_driver,
- 	},
--	{ /* end: all zeroes */ }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- MODULE_DEVICE_TABLE(pci, pci_ids);
- 
---- a/drivers/usb/host/uhci-hcd.c
-+++ b/drivers/usb/host/uhci-hcd.c
-@@ -894,7 +894,7 @@ static const struct pci_device_id uhci_p
- 	/* handle any USB UHCI controller */
- 	PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
- 	.driver_data =	(unsigned long) &uhci_driver,
--	}, { /* end: all zeroes */ }
-+	}, { 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
---- a/drivers/usb/storage/debug.h
-+++ b/drivers/usb/storage/debug.h
-@@ -56,9 +56,9 @@ void usb_stor_show_sense( unsigned char 
- #define US_DEBUGPX(x...) printk( x )
- #define US_DEBUG(x) x 
- #else
--#define US_DEBUGP(x...)
--#define US_DEBUGPX(x...)
--#define US_DEBUG(x)
-+#define US_DEBUGP(x...) do {} while (0)
-+#define US_DEBUGPX(x...) do {} while (0)
-+#define US_DEBUG(x) do {} while (0)
- #endif
- 
- #endif
---- a/drivers/usb/storage/usb.c
-+++ b/drivers/usb/storage/usb.c
-@@ -134,7 +134,7 @@ static struct usb_device_id storage_usb_
- #undef UNUSUAL_DEV
- #undef USUAL_DEV
- 	/* Terminating entry */
--	{ }
-+	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE (usb, storage_usb_ids);
-@@ -174,7 +174,7 @@ static struct us_unusual_dev us_unusual_
- #	undef USUAL_DEV
- 
- 	/* Terminating entry */
--	{ NULL }
-+	{ NULL, NULL, 0, 0, NULL }
- };
- 
- 
---- a/drivers/video/fbcmap.c
-+++ b/drivers/video/fbcmap.c
-@@ -251,8 +251,7 @@ int fb_set_user_cmap(struct fb_cmap_user
- 	int rc, size = cmap->len * sizeof(u16);
- 	struct fb_cmap umap;
- 
--	if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
--			        !info->fbops->fb_setcmap))
-+	if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap)
- 		return -EINVAL;
- 
- 	memset(&umap, 0, sizeof(struct fb_cmap));
---- a/drivers/video/fbmem.c
-+++ b/drivers/video/fbmem.c
-@@ -394,7 +394,7 @@ static void fb_do_show_logo(struct fb_in
- 			image->dx += image->width + 8;
- 		}
- 	} else if (rotate == FB_ROTATE_UD) {
--		for (x = 0; x < num && image->dx >= 0; x++) {
-+		for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
- 			info->fbops->fb_imageblit(info, image);
- 			image->dx -= image->width + 8;
- 		}
-@@ -406,7 +406,7 @@ static void fb_do_show_logo(struct fb_in
- 			image->dy += image->height + 8;
- 		}
- 	} else if (rotate == FB_ROTATE_CCW) {
--		for (x = 0; x < num && image->dy >= 0; x++) {
-+		for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
- 			info->fbops->fb_imageblit(info, image);
- 			image->dy -= image->height + 8;
- 		}
-@@ -1057,9 +1057,9 @@ fb_ioctl(struct inode *inode, struct fil
- 	case FBIOPUT_CON2FBMAP:
- 		if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
- 			return - EFAULT;
--		if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES)
-+		if (con2fb.console > MAX_NR_CONSOLES)
- 		    return -EINVAL;
--		if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
-+		if (con2fb.framebuffer >= FB_MAX)
- 		    return -EINVAL;
- #ifdef CONFIG_KMOD
- 		if (!registered_fb[con2fb.framebuffer])
---- a/drivers/video/fbmon.c
-+++ b/drivers/video/fbmon.c
-@@ -45,7 +45,7 @@
- #ifdef DEBUG
- #define DPRINTK(fmt, args...) printk(fmt,## args)
- #else
--#define DPRINTK(fmt, args...)
-+#define DPRINTK(fmt, args...) do {} while (0)
- #endif
- 
- #define FBMON_FIX_HEADER  1
---- a/drivers/video/i810/i810_accel.c
-+++ b/drivers/video/i810/i810_accel.c
-@@ -73,6 +73,7 @@ static inline int wait_for_space(struct 
- 		}
- 	}
- 	printk("ringbuffer lockup!!!\n");
-+	printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
- 	i810_report_error(mmio); 
- 	par->dev_flags |= LOCKUP;
- 	info->pixmap.scan_align = 1;
---- a/drivers/video/i810/i810_main.c
-+++ b/drivers/video/i810/i810_main.c
-@@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_t
- 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
- 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC,
- 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
--	{ 0 },
-+	{ 0, 0, 0, 0, 0, 0, 0 },
- };
- 
- static struct pci_driver i810fb_driver = {
-@@ -1509,7 +1509,7 @@ static int i810fb_cursor(struct fb_info 
- 		int size = ((cursor->image.width + 7) >> 3) *
- 			cursor->image.height;
- 		int i;
--		u8 *data = kmalloc(64 * 8, GFP_ATOMIC);
-+		u8 *data = kmalloc(64 * 8, GFP_KERNEL);
- 
- 		if (data == NULL)
- 			return -ENOMEM;
---- a/drivers/video/modedb.c
-+++ b/drivers/video/modedb.c
-@@ -37,228 +37,228 @@ static const struct fb_videomode modedb[
-     {
- 	/* 640x400 @ 70 Hz, 31.5 kHz hsync */
- 	NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 640x480 @ 60 Hz, 31.5 kHz hsync */
- 	NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 800x600 @ 56 Hz, 35.15 kHz hsync */
- 	NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
- 	NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
--	0, FB_VMODE_INTERLACED
-+	0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 640x400 @ 85 Hz, 37.86 kHz hsync */
- 	NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
--	FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 640x480 @ 72 Hz, 36.5 kHz hsync */
- 	NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 640x480 @ 75 Hz, 37.50 kHz hsync */
- 	NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 800x600 @ 60 Hz, 37.8 kHz hsync */
- 	NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 640x480 @ 85 Hz, 43.27 kHz hsync */
- 	NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
- 	NULL, 69, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
--	0, FB_VMODE_INTERLACED
-+	0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 800x600 @ 72 Hz, 48.0 kHz hsync */
- 	NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1024x768 @ 60 Hz, 48.4 kHz hsync */
- 	NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 640x480 @ 100 Hz, 53.01 kHz hsync */
- 	NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x864 @ 60 Hz, 53.5 kHz hsync */
- 	NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 800x600 @ 85 Hz, 55.84 kHz hsync */
- 	NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1024x768 @ 70 Hz, 56.5 kHz hsync */
- 	NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
- 	NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
--	0, FB_VMODE_INTERLACED
-+	0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 800x600 @ 100 Hz, 64.02 kHz hsync */
- 	NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1024x768 @ 76 Hz, 62.5 kHz hsync */
- 	NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x864 @ 70 Hz, 62.4 kHz hsync */
- 	NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
- 	NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1400x1050 @ 60Hz, 63.9 kHz hsync */
- 	NULL, 68, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
--	0, FB_VMODE_NONINTERLACED   	
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
- 	NULL, 75, 1400, 1050, 9271, 120, 56, 13, 0, 112, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
-         NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1024x768 @ 85 Hz, 70.24 kHz hsync */
- 	NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x864 @ 78 Hz, 70.8 kHz hsync */
- 	NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
- 	NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1600x1200 @ 60Hz, 75.00 kHz hsync */
- 	NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x864 @ 84 Hz, 76.0 kHz hsync */
- 	NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
- 	NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1024x768 @ 100Hz, 80.21 kHz hsync */
- 	NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
- 	NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
- 	NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x864 @ 100 Hz, 89.62 kHz hsync */
- 	NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
- 	NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
- 	NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
- 	NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
- 	NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
- 	NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1800x1440 @ 64Hz, 96.15 kHz hsync  */
- 	NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1800x1440 @ 70Hz, 104.52 kHz hsync  */
- 	NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 512x384 @ 78 Hz, 31.50 kHz hsync */
- 	NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 512x384 @ 85 Hz, 34.38 kHz hsync */
- 	NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
- 	NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
- 	NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 320x240 @ 72 Hz, 36.5 kHz hsync */
- 	NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
- 	NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 400x300 @ 60 Hz, 37.8 kHz hsync */
- 	NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 400x300 @ 72 Hz, 48.0 kHz hsync */
- 	NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
- 	NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 480x300 @ 60 Hz, 37.8 kHz hsync */
- 	NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 480x300 @ 63 Hz, 39.6 kHz hsync */
- 	NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 480x300 @ 72 Hz, 48.0 kHz hsync */
- 	NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
--	0, FB_VMODE_DOUBLE
-+	0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
- 	NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
- 	FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
--	FB_VMODE_NONINTERLACED
-+	FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
- 	NULL, 60, 1152, 768, 15386, 158, 26, 29, 3, 136, 6,
--	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
-+	FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     }, {
- 	/* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
- 	NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
--	0, FB_VMODE_NONINTERLACED
-+	0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
-     },
- };
- 
---- a/drivers/video/vesafb.c
-+++ b/drivers/video/vesafb.c
-@@ -9,6 +9,7 @@
-  */
- 
- #include <linux/module.h>
-+#include <linux/moduleloader.h>
- #include <linux/kernel.h>
- #include <linux/errno.h>
- #include <linux/string.h>
-@@ -224,6 +225,7 @@ static int __init vesafb_probe(struct pl
- 	unsigned int size_vmode;
- 	unsigned int size_remap;
- 	unsigned int size_total;
-+	void *pmi_code = NULL;
- 
- 	if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
- 		return -ENODEV;
-@@ -266,10 +268,6 @@ static int __init vesafb_probe(struct pl
- 		size_remap = size_total;
- 	vesafb_fix.smem_len = size_remap;
- 
--#ifndef __i386__
--	screen_info.vesapm_seg = 0;
--#endif
--
- 	if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
- 		printk(KERN_WARNING
- 		       "vesafb: cannot reserve video memory at 0x%lx\n",
-@@ -302,9 +300,21 @@ static int __init vesafb_probe(struct pl
- 	printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
- 	       vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
- 
-+#ifdef __i386__
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pmi_code = module_alloc_exec(screen_info.vesapm_size);
-+	if (!pmi_code)
-+#else
-+	if (0)
-+#endif
-+
-+#endif
-+	screen_info.vesapm_seg = 0;
-+
- 	if (screen_info.vesapm_seg) {
--		printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
--		       screen_info.vesapm_seg,screen_info.vesapm_off);
-+		printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
-+		       screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
- 	}
- 
- 	if (screen_info.vesapm_seg < 0xc000)
-@@ -312,9 +322,29 @@ static int __init vesafb_probe(struct pl
- 
- 	if (ypan || pmi_setpal) {
- 		unsigned short *pmi_base;
--		pmi_base  = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
--		pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
--		pmi_pal   = (void*)((char*)pmi_base + pmi_base[2]);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		unsigned long cr0;
-+#endif
-+
-+		pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		pax_open_kernel(cr0);
-+		memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
-+		pax_close_kernel(cr0);
-+#else
-+		pmi_code = pmi_base;
-+#endif
-+
-+		pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
-+		pmi_pal   = (void*)((char*)pmi_code + pmi_base[2]);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		pmi_start -= __KERNEL_TEXT_OFFSET;
-+		pmi_pal -= __KERNEL_TEXT_OFFSET;
-+#endif
-+
- 		printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
- 		if (pmi_base[3]) {
- 			printk(KERN_INFO "vesafb: pmi: ports = ");
-@@ -456,6 +486,11 @@ static int __init vesafb_probe(struct pl
- 	       info->node, info->fix.id);
- 	return 0;
- err:
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	module_free_exec(NULL, pmi_code);
-+#endif
-+
- 	if (info->screen_base)
- 		iounmap(info->screen_base);
- 	framebuffer_release(info);
---- a/fs/binfmt_aout.c
-+++ b/fs/binfmt_aout.c
-@@ -24,6 +24,7 @@
- #include <linux/binfmts.h>
- #include <linux/personality.h>
- #include <linux/init.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/system.h>
- #include <asm/uaccess.h>
-@@ -123,10 +124,12 @@ static int aout_core_dump(long signr, st
- /* If the size of the dump file exceeds the rlimit, then see what would happen
-    if we wrote the stack, but not the data area.  */
- #ifdef __sparc__
-+	gr_learn_resource(current, RLIMIT_CORE, dump.u_dsize+dump.u_ssize, 1);
- 	if ((dump.u_dsize+dump.u_ssize) >
- 	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
- 		dump.u_dsize = 0;
- #else
-+	gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE, 1);
- 	if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE >
- 	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
- 		dump.u_dsize = 0;
-@@ -134,10 +137,12 @@ static int aout_core_dump(long signr, st
- 
- /* Make sure we have enough room to write the stack and data areas. */
- #ifdef __sparc__
-+	gr_learn_resource(current, RLIMIT_CORE, dump.u_ssize, 1);
- 	if ((dump.u_ssize) >
- 	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
- 		dump.u_ssize = 0;
- #else
-+	gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize+1) * PAGE_SIZE, 1);
- 	if ((dump.u_ssize+1) * PAGE_SIZE >
- 	    current->signal->rlim[RLIMIT_CORE].rlim_cur)
- 		dump.u_ssize = 0;
-@@ -294,6 +299,8 @@ static int load_aout_binary(struct linux
- 	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
- 	if (rlim >= RLIM_INFINITY)
- 		rlim = ~0;
-+
-+	gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
- 	if (ex.a_data + ex.a_bss > rlim)
- 		return -ENOMEM;
- 
-@@ -326,6 +333,28 @@ static int load_aout_binary(struct linux
- 	current->mm->mmap = NULL;
- 	compute_creds(bprm);
-  	current->flags &= ~PF_FORKNOEXEC;
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+	current->mm->pax_flags = 0UL;
-+#endif
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
-+		current->mm->pax_flags |= MF_PAX_PAGEEXEC;
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+		if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
-+			current->mm->pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+		if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
-+			current->mm->pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+	}
-+#endif
-+
- #ifdef __sparc__
- 	if (N_MAGIC(ex) == NMAGIC) {
- 		loff_t pos = fd_offset;
-@@ -421,7 +450,7 @@ static int load_aout_binary(struct linux
- 
- 		down_write(&current->mm->mmap_sem);
-  		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
--				PROT_READ | PROT_WRITE | PROT_EXEC,
-+				PROT_READ | PROT_WRITE,
- 				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
- 				fd_offset + ex.a_text);
- 		up_write(&current->mm->mmap_sem);
---- a/fs/binfmt_elf.c
-+++ b/fs/binfmt_elf.c
-@@ -39,10 +39,21 @@
- #include <linux/random.h>
- #include <linux/elf.h>
- #include <linux/utsname.h>
-+#include <linux/grsecurity.h>
-+
- #include <asm/uaccess.h>
- #include <asm/param.h>
- #include <asm/page.h>
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#include <asm/desc.h>
-+#endif
-+
-+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
-+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
-+EXPORT_SYMBOL(pax_set_initial_flags_func);
-+#endif
-+
- static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
- static int load_elf_library(struct file *);
- static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
-@@ -84,6 +95,8 @@ static struct linux_binfmt elf_format = 
- 
- static int set_brk(unsigned long start, unsigned long end)
- {
-+	unsigned long e = end;
-+
- 	start = ELF_PAGEALIGN(start);
- 	end = ELF_PAGEALIGN(end);
- 	if (end > start) {
-@@ -94,7 +107,7 @@ static int set_brk(unsigned long start, 
- 		if (BAD_ADDR(addr))
- 			return addr;
- 	}
--	current->mm->start_brk = current->mm->brk = end;
-+	current->mm->start_brk = current->mm->brk = e;
- 	return 0;
- }
- 
-@@ -325,10 +338,9 @@ static unsigned long load_elf_interp(str
- {
- 	struct elf_phdr *elf_phdata;
- 	struct elf_phdr *eppnt;
--	unsigned long load_addr = 0;
--	int load_addr_set = 0;
-+	unsigned long load_addr = 0, min_addr, max_addr, task_size = TASK_SIZE;
- 	unsigned long last_bss = 0, elf_bss = 0;
--	unsigned long error = ~0UL;
-+	unsigned long error = -EINVAL;
- 	int retval, i, size;
- 
- 	/* First of all, some simple consistency checks */
-@@ -367,66 +379,86 @@ static unsigned long load_elf_interp(str
- 		goto out_close;
- 	}
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
- 	eppnt = elf_phdata;
-+	min_addr = task_size;
-+	max_addr = 0;
-+	error = -ENOMEM;
-+
- 	for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
--		if (eppnt->p_type == PT_LOAD) {
--			int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
--			int elf_prot = 0;
--			unsigned long vaddr = 0;
--			unsigned long k, map_addr;
--
--			if (eppnt->p_flags & PF_R)
--		    		elf_prot = PROT_READ;
--			if (eppnt->p_flags & PF_W)
--				elf_prot |= PROT_WRITE;
--			if (eppnt->p_flags & PF_X)
--				elf_prot |= PROT_EXEC;
--			vaddr = eppnt->p_vaddr;
--			if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
--				elf_type |= MAP_FIXED;
--
--			map_addr = elf_map(interpreter, load_addr + vaddr,
--					   eppnt, elf_prot, elf_type);
--			error = map_addr;
--			if (BAD_ADDR(map_addr))
--				goto out_close;
--
--			if (!load_addr_set &&
--			    interp_elf_ex->e_type == ET_DYN) {
--				load_addr = map_addr - ELF_PAGESTART(vaddr);
--				load_addr_set = 1;
--			}
-+		if (eppnt->p_type != PT_LOAD)
-+			continue;
- 
--			/*
--			 * Check to see if the section's size will overflow the
--			 * allowed task size. Note that p_filesz must always be
--			 * <= p_memsize so it's only necessary to check p_memsz.
--			 */
--			k = load_addr + eppnt->p_vaddr;
--			if (BAD_ADDR(k) ||
--			    eppnt->p_filesz > eppnt->p_memsz ||
--			    eppnt->p_memsz > TASK_SIZE ||
--			    TASK_SIZE - eppnt->p_memsz < k) {
--				error = -ENOMEM;
--				goto out_close;
--			}
-+		/*
-+		 * Check to see if the section's size will overflow the
-+		 * allowed task size. Note that p_filesz must always be
-+		 * <= p_memsize so it is only necessary to check p_memsz.
-+		 */
-+		if (eppnt->p_filesz > eppnt->p_memsz || eppnt->p_vaddr >= eppnt->p_vaddr + eppnt->p_memsz)
-+			goto out_close;
- 
--			/*
--			 * Find the end of the file mapping for this phdr, and
--			 * keep track of the largest address we see for this.
--			 */
--			k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
--			if (k > elf_bss)
--				elf_bss = k;
-+		if (min_addr > ELF_PAGESTART(eppnt->p_vaddr))
-+			min_addr = ELF_PAGESTART(eppnt->p_vaddr);
-+		if (max_addr < ELF_PAGEALIGN(eppnt->p_vaddr + eppnt->p_memsz))
-+			max_addr = ELF_PAGEALIGN(eppnt->p_vaddr + eppnt->p_memsz);
-+	}
-+	if (min_addr >= max_addr || max_addr > task_size)
-+		goto out_close;
- 
--			/*
--			 * Do the same thing for the memory mapping - between
--			 * elf_bss and last_bss is the bss section.
--			 */
--			k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
--			if (k > last_bss)
--				last_bss = k;
--		}
-+	if (interp_elf_ex->e_type == ET_DYN) {
-+		load_addr = get_unmapped_area(interpreter, 0, max_addr - min_addr, 0, MAP_PRIVATE | MAP_EXECUTABLE);
-+
-+		if (load_addr >= task_size)
-+			goto out_close;
-+
-+		load_addr -= min_addr;
-+	}
-+
-+	eppnt = elf_phdata;
-+	for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
-+		int elf_type = MAP_PRIVATE | MAP_DENYWRITE | MAP_FIXED;
-+		int elf_prot = 0;
-+		unsigned long vaddr = 0;
-+		unsigned long k, map_addr;
-+
-+		if (eppnt->p_type != PT_LOAD)
-+			continue;
-+
-+		if (eppnt->p_flags & PF_R)
-+	    		elf_prot = PROT_READ;
-+		if (eppnt->p_flags & PF_W)
-+			elf_prot |= PROT_WRITE;
-+		if (eppnt->p_flags & PF_X)
-+			elf_prot |= PROT_EXEC;
-+		vaddr = eppnt->p_vaddr;
-+
-+		map_addr = elf_map(interpreter, load_addr + vaddr,
-+				   eppnt, elf_prot, elf_type);
-+		error = map_addr;
-+		if (BAD_ADDR(map_addr))
-+			goto out_close;
-+
-+		k = load_addr + eppnt->p_vaddr;
-+
-+		/*
-+		 * Find the end of the file mapping for this phdr, and
-+		 * keep track of the largest address we see for this.
-+		 */
-+		k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
-+		if (k > elf_bss)
-+			elf_bss = k;
-+
-+		/*
-+		 * Do the same thing for the memory mapping - between
-+		 * elf_bss and last_bss is the bss section.
-+		 */
-+		k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
-+		if (k > last_bss)
-+			last_bss = k;
- 	}
- 
- 	/*
-@@ -454,6 +486,8 @@ static unsigned long load_elf_interp(str
- 
- 	*interp_load_addr = load_addr;
- 	error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
-+	if (BAD_ADDR(error))
-+		error = -EFAULT;
- 
- out_close:
- 	kfree(elf_phdata);
-@@ -464,7 +498,7 @@ out:
- static unsigned long load_aout_interp(struct exec *interp_ex,
- 		struct file *interpreter)
- {
--	unsigned long text_data, elf_entry = ~0UL;
-+	unsigned long text_data, elf_entry = -EINVAL;
- 	char __user * addr;
- 	loff_t offset;
- 
-@@ -507,6 +541,177 @@ out:
- 	return elf_entry;
- }
- 
-+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
-+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
-+{
-+	unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (elf_phdata->p_flags & PF_PAGEEXEC)
-+		pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (elf_phdata->p_flags & PF_SEGMEXEC)
-+		pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+		if (nx_enabled)
-+			pax_flags &= ~MF_PAX_SEGMEXEC;
-+		else
-+			pax_flags &= ~MF_PAX_PAGEEXEC;
-+	}
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+	if (elf_phdata->p_flags & PF_EMUTRAMP)
-+		pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+	if (elf_phdata->p_flags & PF_MPROTECT)
-+		pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
-+	if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
-+		pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+	return pax_flags;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
-+{
-+	unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
-+		pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
-+		pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+		if (nx_enabled)
-+			pax_flags &= ~MF_PAX_SEGMEXEC;
-+		else
-+			pax_flags &= ~MF_PAX_PAGEEXEC;
-+	}
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+	if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
-+		pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+	if (!(elf_phdata->p_flags & PF_NOMPROTECT))
-+		pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
-+	if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
-+		pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+	return pax_flags;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_EI_PAX
-+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
-+{
-+	unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
-+		pax_flags |= MF_PAX_PAGEEXEC;
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
-+		pax_flags |= MF_PAX_SEGMEXEC;
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
-+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+		if (nx_enabled)
-+			pax_flags &= ~MF_PAX_SEGMEXEC;
-+		else
-+			pax_flags &= ~MF_PAX_PAGEEXEC;
-+	}
-+#endif
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
-+		pax_flags |= MF_PAX_EMUTRAMP;
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+	if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
-+		pax_flags |= MF_PAX_MPROTECT;
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+	if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
-+		pax_flags |= MF_PAX_RANDMMAP;
-+#endif
-+
-+	return pax_flags;
-+}
-+#endif
-+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
-+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
-+{
-+	unsigned long pax_flags = 0UL;
-+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+	unsigned long i;
-+#endif
-+
-+#ifdef CONFIG_PAX_EI_PAX
-+	pax_flags = pax_parse_ei_pax(elf_ex);
-+#endif
-+
-+#ifdef CONFIG_PAX_PT_PAX_FLAGS
-+	for (i = 0UL; i < elf_ex->e_phnum; i++)
-+		if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
-+			if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
-+			    ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
-+			    ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
-+			    ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
-+			    ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
-+				return -EINVAL;
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+			if (pax_softmode)
-+				pax_flags = pax_parse_softmode(&elf_phdata[i]);
-+			else
-+#endif
-+
-+				pax_flags = pax_parse_hardmode(&elf_phdata[i]);
-+			break;
-+		}
-+#endif
-+
-+	if (0 > pax_check_flags(&pax_flags))
-+		return -EINVAL;
-+
-+	current->mm->pax_flags = pax_flags;
-+	return 0;
-+}
-+#endif
-+
- /*
-  * These are the functions used to load ELF style executables and shared
-  * libraries.  There is no binary dependent code anywhere else.
-@@ -544,7 +749,7 @@ static int load_elf_binary(struct linux_
- 	char * elf_interpreter = NULL;
- 	unsigned int interpreter_type = INTERPRETER_NONE;
- 	unsigned char ibcs2_interpreter = 0;
--	unsigned long error;
-+	unsigned long error = 0;
- 	struct elf_phdr *elf_ppnt, *elf_phdata;
- 	unsigned long elf_bss, elf_brk;
- 	int elf_exec_fileno;
-@@ -556,12 +761,12 @@ static int load_elf_binary(struct linux_
- 	char passed_fileno[6];
- 	struct files_struct *files;
- 	int executable_stack = EXSTACK_DEFAULT;
--	unsigned long def_flags = 0;
- 	struct {
- 		struct elfhdr elf_ex;
- 		struct elfhdr interp_elf_ex;
-   		struct exec interp_ex;
- 	} *loc;
-+	unsigned long task_size = TASK_SIZE;
- 
- 	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
- 	if (!loc) {
-@@ -788,14 +993,89 @@ static int load_elf_binary(struct linux_
- 
- 	/* OK, This is the point of no return */
- 	current->flags &= ~PF_FORKNOEXEC;
--	current->mm->def_flags = def_flags;
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+	current->mm->pax_flags = 0UL;
-+#endif
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+	current->mm->call_dl_resolve = 0UL;
-+#endif
-+
-+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
-+	current->mm->call_syscall = 0UL;
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+	current->mm->delta_mmap = 0UL;
-+	current->mm->delta_stack = 0UL;
-+#endif
-+
-+	current->mm->def_flags = 0;
-+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
-+	if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
-+		send_sig(SIGKILL, current, 0);
-+		goto out_free_dentry;
-+	}
-+#endif
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+	pax_set_initial_flags(bprm);
-+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
-+	if (pax_set_initial_flags_func)
-+		(pax_set_initial_flags_func)(bprm);
-+#endif
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+	if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
-+		current->mm->context.user_cs_limit = PAGE_SIZE;
-+		current->mm->def_flags |= VM_PAGEEXEC;
-+	}
-+#endif
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+		current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
-+		current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
-+		task_size = SEGMEXEC_TASK_SIZE;
-+	}
-+#endif
-+
-+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+		set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
-+		put_cpu_no_resched();
-+	}
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+	if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
-+		current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
-+		current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
-+	}
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
-+		executable_stack = EXSTACK_DEFAULT;
-+#endif
- 
- 	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
- 	   may depend on the personality.  */
- 	SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (!(current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
-+#endif
-+
- 	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
- 		current->personality |= READ_IMPLIES_EXEC;
- 
-+#ifdef CONFIG_PAX_ASLR
-+	if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- 		current->flags |= PF_RANDOMIZE;
- 	arch_pick_mmap_layout(current->mm);
-@@ -871,6 +1151,20 @@ static int load_elf_binary(struct linux_
- 			 * might try to exec.  This is because the brk will
- 			 * follow the loader, and is not movable.  */
- 			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+			/* PaX: randomize base address at the default exe base if requested */
-+			if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
-+#ifdef CONFIG_SPARC64
-+				load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
-+#else
-+				load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
-+#endif
-+				load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
-+				elf_flags |= MAP_FIXED;
-+			}
-+#endif
-+
- 		}
- 
- 		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
-@@ -903,9 +1197,9 @@ static int load_elf_binary(struct linux_
- 		 * allowed task size. Note that p_filesz must always be
- 		 * <= p_memsz so it is only necessary to check p_memsz.
- 		 */
--		if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
--		    elf_ppnt->p_memsz > TASK_SIZE ||
--		    TASK_SIZE - elf_ppnt->p_memsz < k) {
-+		if (k >= task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
-+		    elf_ppnt->p_memsz > task_size ||
-+		    task_size - elf_ppnt->p_memsz < k) {
- 			/* set_brk can never work. Avoid overflows. */
- 			send_sig(SIGKILL, current, 0);
- 			retval = -EINVAL;
-@@ -933,6 +1227,11 @@ static int load_elf_binary(struct linux_
- 	start_data += load_bias;
- 	end_data += load_bias;
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (current->mm->pax_flags & MF_PAX_RANDMMAP)
-+		elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
-+#endif
-+
- 	/* Calling set_brk effectively mmaps the pages that we need
- 	 * for the bss and break sections.  We must do this before
- 	 * mapping in the interpreter, to make sure it doesn't wind
-@@ -944,9 +1243,11 @@ static int load_elf_binary(struct linux_
- 		goto out_free_dentry;
- 	}
- 	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
--		send_sig(SIGSEGV, current, 0);
--		retval = -EFAULT; /* Nobody gets to see this, but.. */
--		goto out_free_dentry;
-+		/*
-+		 * This bss-zeroing can fail if the ELF
-+		 * file specifies odd protections. So
-+		 * we don't check the return value
-+		 */
- 	}
- 
- 	if (elf_interpreter) {
-@@ -1183,8 +1484,10 @@ static int dump_seek(struct file *file, 
- 			unsigned long n = off;
- 			if (n > PAGE_SIZE)
- 				n = PAGE_SIZE;
--			if (!dump_write(file, buf, n))
-+			if (!dump_write(file, buf, n)) {
-+				free_page((unsigned long)buf);
- 				return 0;
-+			}
- 			off -= n;
- 		}
- 		free_page((unsigned long)buf);
-@@ -1199,7 +1502,7 @@ static int dump_seek(struct file *file, 
-  *
-  * I think we should skip something. But I am not sure how. H.J.
-  */
--static int maydump(struct vm_area_struct *vma, unsigned long mm_flags)
-+static int maydump(struct vm_area_struct *vma, unsigned long mm_flags, long signr)
- {
- 	/* The vma can be set up to tell us the answer directly.  */
- 	if (vma->vm_flags & VM_ALWAYSDUMP)
-@@ -1218,7 +1521,7 @@ static int maydump(struct vm_area_struct
- 	}
- 
- 	/* By default, if it hasn't been written to, don't write it out. */
--	if (!vma->anon_vma)
-+	if (signr != SIGKILL && !vma->anon_vma)
- 		return test_bit(MMF_DUMP_MAPPED_PRIVATE, &mm_flags);
- 
- 	return test_bit(MMF_DUMP_ANON_PRIVATE, &mm_flags);
-@@ -1275,8 +1578,11 @@ static int writenote(struct memelfnote *
- #undef DUMP_WRITE
- 
- #define DUMP_WRITE(addr, nr)	\
-+	do { \
-+	gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
- 	if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
--		goto end_coredump;
-+		goto end_coredump; \
-+	} while (0);
- #define DUMP_SEEK(off)	\
- 	if (!dump_seek(file, (off))) \
- 		goto end_coredump;
-@@ -1676,7 +1982,7 @@ static int elf_core_dump(long signr, str
- 		phdr.p_offset = offset;
- 		phdr.p_vaddr = vma->vm_start;
- 		phdr.p_paddr = 0;
--		phdr.p_filesz = maydump(vma, mm_flags) ? sz : 0;
-+		phdr.p_filesz = maydump(vma, mm_flags, signr) ? sz : 0;
- 		phdr.p_memsz = sz;
- 		offset += phdr.p_filesz;
- 		phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
-@@ -1720,7 +2026,7 @@ static int elf_core_dump(long signr, str
- 			vma = next_vma(vma, gate_vma)) {
- 		unsigned long addr;
- 
--		if (!maydump(vma, mm_flags))
-+		if (!maydump(vma, mm_flags, signr))
- 			continue;
- 
- 		for (addr = vma->vm_start;
-@@ -1743,6 +2049,7 @@ static int elf_core_dump(long signr, str
- 					flush_cache_page(vma, addr,
- 							 page_to_pfn(page));
- 					kaddr = kmap(page);
-+					gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
- 					if ((size += PAGE_SIZE) > limit ||
- 					    !dump_write(file, kaddr,
- 					    PAGE_SIZE)) {
---- a/fs/binfmt_flat.c
-+++ b/fs/binfmt_flat.c
-@@ -559,7 +559,9 @@ static int load_flat_file(struct linux_b
- 				realdatastart = (unsigned long) -ENOMEM;
- 			printk("Unable to allocate RAM for process data, errno %d\n",
- 					(int)-realdatastart);
-+			down_write(&current->mm->mmap_sem);
- 			do_munmap(current->mm, textpos, text_len);
-+			up_write(&current->mm->mmap_sem);
- 			ret = realdatastart;
- 			goto err;
- 		}
-@@ -581,8 +583,10 @@ static int load_flat_file(struct linux_b
- 		}
- 		if (result >= (unsigned long)-4096) {
- 			printk("Unable to read data+bss, errno %d\n", (int)-result);
-+			down_write(&current->mm->mmap_sem);
- 			do_munmap(current->mm, textpos, text_len);
- 			do_munmap(current->mm, realdatastart, data_len + extra);
-+			up_write(&current->mm->mmap_sem);
- 			ret = result;
- 			goto err;
- 		}
-@@ -655,8 +659,10 @@ static int load_flat_file(struct linux_b
- 		}
- 		if (result >= (unsigned long)-4096) {
- 			printk("Unable to read code+data+bss, errno %d\n",(int)-result);
-+			down_write(&current->mm->mmap_sem);
- 			do_munmap(current->mm, textpos, text_len + data_len + extra +
- 				MAX_SHARED_LIBS * sizeof(unsigned long));
-+			up_write(&current->mm->mmap_sem);
- 			ret = result;
- 			goto err;
- 		}
---- a/fs/binfmt_misc.c
-+++ b/fs/binfmt_misc.c
-@@ -113,9 +113,11 @@ static int load_misc_binary(struct linux
- 	struct files_struct *files = NULL;
- 
- 	retval = -ENOEXEC;
--	if (!enabled)
-+	if (!enabled || bprm->misc)
- 		goto _ret;
- 
-+	bprm->misc++;
-+
- 	/* to keep locking time low, we copy the interpreter string */
- 	read_lock(&entries_lock);
- 	fmt = check_file(bprm);
-@@ -720,7 +722,7 @@ static int bm_fill_super(struct super_bl
- 	static struct tree_descr bm_files[] = {
- 		[2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
- 		[3] = {"register", &bm_register_operations, S_IWUSR},
--		/* last one */ {""}
-+		/* last one */ {"", NULL, 0}
- 	};
- 	int err = simple_fill_super(sb, 0x42494e4d, bm_files);
- 	if (!err)
---- a/fs/buffer.c
-+++ b/fs/buffer.c
-@@ -41,6 +41,7 @@
- #include <linux/bitops.h>
- #include <linux/mpage.h>
- #include <linux/bit_spinlock.h>
-+#include <linux/grsecurity.h>
- 
- static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
- 
-@@ -2017,6 +2018,7 @@ static int __generic_cont_expand(struct 
- 
- 	err = -EFBIG;
-         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-+	gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1);
- 	if (limit != RLIM_INFINITY && size > (loff_t)limit) {
- 		send_sig(SIGXFSZ, current, 0);
- 		goto out;
---- a/fs/cifs/cifs_uniupr.h
-+++ b/fs/cifs/cifs_uniupr.h
-@@ -132,7 +132,7 @@ const struct UniCaseRange CifsUniUpperRa
- 	{0x0490, 0x04cc, UniCaseRangeU0490},
- 	{0x1e00, 0x1ffc, UniCaseRangeU1e00},
- 	{0xff40, 0xff5a, UniCaseRangeUff40},
--	{0}
-+	{0, 0, NULL}
- };
- #endif
- 
---- a/fs/cifs/dir.c
-+++ b/fs/cifs/dir.c
-@@ -397,7 +397,7 @@ int cifs_mknod(struct inode *inode, stru
- 				/* BB Do not bother to decode buf since no
- 				   local inode yet to put timestamps in,
- 				   but we can reuse it safely */
--				int bytes_written;
-+				unsigned int bytes_written;
- 				struct win_dev *pdev;
- 				pdev = (struct win_dev *)buf;
- 				if (S_ISCHR(mode)) {
---- a/fs/cifs/inode.c
-+++ b/fs/cifs/inode.c
-@@ -1469,7 +1469,7 @@ int cifs_setattr(struct dentry *direntry
- 			atomic_dec(&open_file->wrtPending);
- 			cFYI(1, ("SetFSize for attrs rc = %d", rc));
- 			if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
--				int bytes_written;
-+				unsigned int bytes_written;
- 				rc = CIFSSMBWrite(xid, pTcon,
- 						  nfid, 0, attrs->ia_size,
- 						  &bytes_written, NULL, NULL,
-@@ -1502,7 +1502,7 @@ int cifs_setattr(struct dentry *direntry
- 					cifs_sb->mnt_cifs_flags &
- 						CIFS_MOUNT_MAP_SPECIAL_CHR);
- 				if (rc == 0) {
--					int bytes_written;
-+					unsigned int bytes_written;
- 					rc = CIFSSMBWrite(xid, pTcon,
- 							netfid, 0,
- 							attrs->ia_size,
---- a/fs/compat.c
-+++ b/fs/compat.c
-@@ -50,6 +50,7 @@
- #include <linux/poll.h>
- #include <linux/mm.h>
- #include <linux/eventpoll.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include <asm/mmu_context.h>
-@@ -1300,14 +1301,12 @@ static int compat_copy_strings(int argc,
- 			if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
- 				struct page *page;
- 
--#ifdef CONFIG_STACK_GROWSUP
- 				ret = expand_stack_downwards(bprm->vma, pos);
- 				if (ret < 0) {
- 					/* We've exceed the stack rlimit. */
- 					ret = -E2BIG;
- 					goto out;
- 				}
--#endif
- 				ret = get_user_pages(current, bprm->mm, pos,
- 						     1, 1, 1, &page, NULL);
- 				if (ret <= 0) {
-@@ -1353,6 +1352,11 @@ int compat_do_execve(char * filename,
- 	compat_uptr_t __user *envp,
- 	struct pt_regs * regs)
- {
-+#ifdef CONFIG_GRKERNSEC
-+	struct file *old_exec_file;
-+	struct acl_subject_label *old_acl;
-+	struct rlimit old_rlim[RLIM_NLIMITS];
-+#endif
- 	struct linux_binprm *bprm;
- 	struct file *file;
- 	int retval;
-@@ -1373,6 +1377,14 @@ int compat_do_execve(char * filename,
- 	bprm->filename = filename;
- 	bprm->interp = filename;
- 
-+	gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
-+	retval = -EAGAIN;
-+	if (gr_handle_nproc())
-+		goto out_file;
-+	retval = -EACCES;
-+	if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
-+		goto out_file;
-+
- 	retval = bprm_mm_init(bprm);
- 	if (retval)
- 		goto out_file;
-@@ -1406,8 +1418,36 @@ int compat_do_execve(char * filename,
- 	if (retval < 0)
- 		goto out;
- 
-+	if (!gr_tpe_allow(file)) {
-+		retval = -EACCES;
-+		goto out;
-+	}
-+
-+	if (gr_check_crash_exec(file)) {
-+		retval = -EACCES;
-+		goto out;
-+	}
-+
-+	gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
-+
-+	gr_handle_exec_args(bprm, (char __user * __user *)argv);
-+
-+#ifdef CONFIG_GRKERNSEC
-+	old_acl = current->acl;
-+	memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
-+	old_exec_file = current->exec_file;
-+	get_file(file);
-+	current->exec_file = file;
-+#endif
-+
-+	gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
-+
- 	retval = search_binary_handler(bprm, regs);
- 	if (retval >= 0) {
-+#ifdef CONFIG_GRKERNSEC
-+		if (old_exec_file)
-+			fput(old_exec_file);
-+#endif
- 		/* execve success */
- 		security_bprm_free(bprm);
- 		acct_update_integrals(current);
-@@ -1415,6 +1455,13 @@ int compat_do_execve(char * filename,
- 		return retval;
- 	}
- 
-+#ifdef CONFIG_GRKERNSEC
-+	current->acl = old_acl;
-+	memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
-+	fput(current->exec_file);
-+	current->exec_file = old_exec_file;
-+#endif
-+
- out:
- 	if (bprm->security)
- 		security_bprm_free(bprm);
---- a/fs/compat_ioctl.c
-+++ b/fs/compat_ioctl.c
-@@ -2431,15 +2431,15 @@ struct ioctl_trans {
- };
- 
- #define HANDLE_IOCTL(cmd,handler) \
--	{ (cmd), (ioctl_trans_handler_t)(handler) },
-+	{ (cmd), (ioctl_trans_handler_t)(handler), NULL },
- 
- /* pointer to compatible structure or no argument */
- #define COMPATIBLE_IOCTL(cmd) \
--	{ (cmd), do_ioctl32_pointer },
-+	{ (cmd), do_ioctl32_pointer, NULL },
- 
- /* argument is an unsigned long integer, not a pointer */
- #define ULONG_IOCTL(cmd) \
--	{ (cmd), (ioctl_trans_handler_t)sys_ioctl },
-+	{ (cmd), (ioctl_trans_handler_t)sys_ioctl, NULL },
- 
- /* ioctl should not be warned about even if it's not implemented.
-    Valid reasons to use this:
---- a/fs/debugfs/inode.c
-+++ b/fs/debugfs/inode.c
-@@ -125,7 +125,7 @@ static inline int debugfs_positive(struc
- 
- static int debug_fill_super(struct super_block *sb, void *data, int silent)
- {
--	static struct tree_descr debug_files[] = {{""}};
-+	static struct tree_descr debug_files[] = {{"", NULL, 0}};
- 
- 	return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
- }
---- a/fs/exec.c
-+++ b/fs/exec.c
-@@ -50,6 +50,8 @@
- #include <linux/tsacct_kern.h>
- #include <linux/cn_proc.h>
- #include <linux/audit.h>
-+#include <linux/random.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include <asm/mmu_context.h>
-@@ -184,18 +186,10 @@ static struct page *get_arg_page(struct 
- 		int write)
- {
- 	struct page *page;
--	int ret;
- 
--#ifdef CONFIG_STACK_GROWSUP
--	if (write) {
--		ret = expand_stack_downwards(bprm->vma, pos);
--		if (ret < 0)
--			return NULL;
--	}
--#endif
--	ret = get_user_pages(current, bprm->mm, pos,
--			1, write, 1, &page, NULL);
--	if (ret <= 0)
-+	if (0 > expand_stack_downwards(bprm->vma, pos))
-+		return NULL;
-+	if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
- 		return NULL;
- 
- 	if (write) {
-@@ -260,7 +254,12 @@ static int __bprm_mm_init(struct linux_b
- 	vma->vm_start = vma->vm_end - PAGE_SIZE;
- 
- 	vma->vm_flags = VM_STACK_FLAGS;
--	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
-+#endif
-+
-+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- 	err = insert_vm_struct(mm, vma);
- 	if (err) {
- 		up_write(&mm->mmap_sem);
-@@ -272,6 +271,11 @@ static int __bprm_mm_init(struct linux_b
- 
- 	bprm->p = vma->vm_end - sizeof(void *);
- 
-+#ifdef CONFIG_PAX_RANDUSTACK
-+	if (randomize_va_space)
-+		bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
-+#endif
-+
- 	return 0;
- 
- err:
-@@ -395,7 +399,7 @@ static int count(char __user * __user * 
- 			if (!p)
- 				break;
- 			argv++;
--			if(++i > max)
-+			if (++i > max)
- 				return -E2BIG;
- 			cond_resched();
- 		}
-@@ -535,6 +539,10 @@ static int shift_arg_pages(struct vm_are
- 	if (vma != find_vma(mm, new_start))
- 		return -EFAULT;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	BUG_ON(pax_find_mirror_vma(vma));
-+#endif
-+
- 	/*
- 	 * cover the whole range: [new_start, old_end)
- 	 */
-@@ -623,6 +631,14 @@ int setup_arg_pages(struct linux_binprm 
- 	bprm->exec -= stack_shift;
- 
- 	down_write(&mm->mmap_sem);
-+
-+	/* Move stack pages down in memory. */
-+	if (stack_shift) {
-+		ret = shift_arg_pages(vma, stack_shift);
-+		if (ret)
-+			goto out_unlock;
-+	}
-+
- 	vm_flags = vma->vm_flags;
- 
- 	/*
-@@ -634,23 +650,28 @@ int setup_arg_pages(struct linux_binprm 
- 		vm_flags |= VM_EXEC;
- 	else if (executable_stack == EXSTACK_DISABLE_X)
- 		vm_flags &= ~VM_EXEC;
-+	else
-+		vm_flags = VM_STACK_FLAGS;
- 	vm_flags |= mm->def_flags;
- 
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+		vm_flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+		if (mm->pax_flags & MF_PAX_MPROTECT)
-+			vm_flags &= ~VM_MAYEXEC;
-+#endif
-+
-+	}
-+#endif
-+
- 	ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
- 			vm_flags);
- 	if (ret)
- 		goto out_unlock;
- 	BUG_ON(prev != vma);
- 
--	/* Move stack pages down in memory. */
--	if (stack_shift) {
--		ret = shift_arg_pages(vma, stack_shift);
--		if (ret) {
--			up_write(&mm->mmap_sem);
--			return ret;
--		}
--	}
--
- #ifdef CONFIG_STACK_GROWSUP
- 	stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
- #else
-@@ -662,7 +683,7 @@ int setup_arg_pages(struct linux_binprm 
- 
- out_unlock:
- 	up_write(&mm->mmap_sem);
--	return 0;
-+	return ret;
- }
- EXPORT_SYMBOL(setup_arg_pages);
- 
-@@ -682,7 +703,7 @@ struct file *open_exec(const char *name)
- 		file = ERR_PTR(-EACCES);
- 		if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
- 		    S_ISREG(inode->i_mode)) {
--			int err = vfs_permission(&nd, MAY_EXEC);
-+			err = vfs_permission(&nd, MAY_EXEC);
- 			file = ERR_PTR(err);
- 			if (!err) {
- 				file = nameidata_to_filp(&nd, O_RDONLY);
-@@ -1339,6 +1360,11 @@ int do_execve(char * filename,
- 	char __user *__user *envp,
- 	struct pt_regs * regs)
- {
-+#ifdef CONFIG_GRKERNSEC
-+	struct file *old_exec_file;
-+	struct acl_subject_label *old_acl;
-+	struct rlimit old_rlim[RLIM_NLIMITS];
-+#endif
- 	struct linux_binprm *bprm;
- 	struct file *file;
- 	unsigned long env_p;
-@@ -1354,6 +1380,20 @@ int do_execve(char * filename,
- 	if (IS_ERR(file))
- 		goto out_kfree;
- 
-+	gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
-+
-+	if (gr_handle_nproc()) {
-+		allow_write_access(file);
-+		fput(file);
-+		return -EAGAIN;
-+	}
-+
-+	if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
-+		allow_write_access(file);
-+		fput(file);
-+		return -EACCES;
-+	}
-+
- 	sched_exec();
- 
- 	bprm->file = file;
-@@ -1395,8 +1435,38 @@ int do_execve(char * filename,
- 		goto out;
- 	bprm->argv_len = env_p - bprm->p;
- 
-+	if (!gr_tpe_allow(file)) {
-+		retval = -EACCES;
-+		goto out;
-+	}
-+
-+	if (gr_check_crash_exec(file)) {
-+		retval = -EACCES;
-+		goto out;
-+	}
-+
-+	gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
-+
-+	gr_handle_exec_args(bprm, argv);
-+
-+#ifdef CONFIG_GRKERNSEC
-+	old_acl = current->acl;
-+	memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
-+	old_exec_file = current->exec_file;
-+	get_file(file);
-+	current->exec_file = file;
-+#endif
-+
-+	retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
-+	if (retval < 0)
-+		goto out_fail;
-+
- 	retval = search_binary_handler(bprm,regs);
- 	if (retval >= 0) {
-+#ifdef CONFIG_GRKERNSEC
-+		if (old_exec_file)
-+			fput(old_exec_file);
-+#endif
- 		/* execve success */
- 		free_arg_pages(bprm);
- 		security_bprm_free(bprm);
-@@ -1405,6 +1475,14 @@ int do_execve(char * filename,
- 		return retval;
- 	}
- 
-+out_fail:
-+#ifdef CONFIG_GRKERNSEC
-+	current->acl = old_acl;
-+	memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
-+	fput(current->exec_file);
-+	current->exec_file = old_exec_file;
-+#endif
-+
- out:
- 	free_arg_pages(bprm);
- 	if (bprm->security)
-@@ -1561,6 +1639,114 @@ out:
- 	return ispipe;
- }
- 
-+int pax_check_flags(unsigned long *flags)
-+{
-+	int retval = 0;
-+
-+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
-+	if (*flags & MF_PAX_SEGMEXEC)
-+	{
-+		*flags &= ~MF_PAX_SEGMEXEC;
-+		retval = -EINVAL;
-+	}
-+#endif
-+
-+	if ((*flags & MF_PAX_PAGEEXEC)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+	    &&  (*flags & MF_PAX_SEGMEXEC)
-+#endif
-+
-+	   )
-+	{
-+		*flags &= ~MF_PAX_PAGEEXEC;
-+		retval = -EINVAL;
-+	}
-+
-+	if ((*flags & MF_PAX_MPROTECT)
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+	    && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
-+#endif
-+
-+	   )
-+	{
-+		*flags &= ~MF_PAX_MPROTECT;
-+		retval = -EINVAL;
-+	}
-+
-+	if ((*flags & MF_PAX_EMUTRAMP)
-+
-+#ifdef CONFIG_PAX_EMUTRAMP
-+	    && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
-+#endif
-+
-+	   )
-+	{
-+		*flags &= ~MF_PAX_EMUTRAMP;
-+		retval = -EINVAL;
-+	}
-+
-+	return retval;
-+}
-+
-+EXPORT_SYMBOL(pax_check_flags);
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
-+{
-+	struct task_struct *tsk = current;
-+	struct mm_struct *mm = current->mm;
-+	char *buffer_exec = (char *)__get_free_page(GFP_ATOMIC);
-+	char *buffer_fault = (char *)__get_free_page(GFP_ATOMIC);
-+	char *path_exec = NULL;
-+	char *path_fault = NULL;
-+	unsigned long start = 0UL, end = 0UL, offset = 0UL;
-+
-+	if (buffer_exec && buffer_fault) {
-+		struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
-+
-+		down_read(&mm->mmap_sem);
-+		vma = mm->mmap;
-+		while (vma && (!vma_exec || !vma_fault)) {
-+			if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
-+				vma_exec = vma;
-+			if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
-+				vma_fault = vma;
-+			vma = vma->vm_next;
-+		}
-+		if (vma_exec) {
-+			path_exec = d_path(vma_exec->vm_file->f_path.dentry, vma_exec->vm_file->f_path.mnt, buffer_exec, PAGE_SIZE);
-+			if (IS_ERR(path_exec))
-+				path_exec = "<path too long>";
-+		}
-+		if (vma_fault) {
-+			start = vma_fault->vm_start;
-+			end = vma_fault->vm_end;
-+			offset = vma_fault->vm_pgoff << PAGE_SHIFT;
-+			if (vma_fault->vm_file) {
-+				path_fault = d_path(vma_fault->vm_file->f_path.dentry, vma_fault->vm_file->f_path.mnt, buffer_fault, PAGE_SIZE);
-+				if (IS_ERR(path_fault))
-+					path_fault = "<path too long>";
-+			} else
-+				path_fault = "<anonymous mapping>";
-+		}
-+		up_read(&mm->mmap_sem);
-+	}
-+	if (tsk->signal->curr_ip)
-+		printk(KERN_ERR "PAX: From %u.%u.%u.%u: execution attempt in: %s, %08lx-%08lx %08lx\n", NIPQUAD(tsk->signal->curr_ip), path_fault, start, end, offset);
-+	else
-+		printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
-+	printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
-+			"PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid,
-+			tsk->uid, tsk->euid, pc, sp);
-+	free_page((unsigned long)buffer_exec);
-+	free_page((unsigned long)buffer_fault);
-+	pax_report_insns(pc, sp);
-+	do_coredump(SIGKILL, SIGKILL, regs);
-+}
-+#endif
-+
- static void zap_process(struct task_struct *start)
- {
- 	struct task_struct *t;
-@@ -1753,6 +1939,10 @@ int do_coredump(long signr, int exit_cod
- 	 */
- 	clear_thread_flag(TIF_SIGPENDING);
- 
-+	if (signr == SIGKILL || signr == SIGILL)
-+		gr_handle_brute_attach(current);
-+
-+	gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
- 	if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
- 		goto fail_unlock;
- 
---- a/fs/ext2/balloc.c
-+++ b/fs/ext2/balloc.c
-@@ -111,7 +111,7 @@ static int reserve_blocks(struct super_b
- 	if (free_blocks < count)
- 		count = free_blocks;
- 
--	if (free_blocks < root_blocks + count && !capable(CAP_SYS_RESOURCE) &&
-+	if (free_blocks < root_blocks + count && !capable_nolog(CAP_SYS_RESOURCE) &&
- 	    sbi->s_resuid != current->fsuid &&
- 	    (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
- 		/*
---- a/fs/ext3/balloc.c
-+++ b/fs/ext3/balloc.c
-@@ -1359,7 +1359,7 @@ static int ext3_has_free_blocks(struct e
- 
- 	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
- 	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
--	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-+	if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
- 		sbi->s_resuid != current->fsuid &&
- 		(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
- 		return 0;
---- a/fs/ext3/namei.c
-+++ b/fs/ext3/namei.c
-@@ -1188,9 +1188,9 @@ static struct ext3_dir_entry_2 *do_split
- 	u32 hash2;
- 	struct dx_map_entry *map;
- 	char *data1 = (*bh)->b_data, *data2;
--	unsigned split, move, size, i;
-+	unsigned split, move, size;
- 	struct ext3_dir_entry_2 *de = NULL, *de2;
--	int	err = 0;
-+	int	i, err = 0;
- 
- 	bh2 = ext3_append (handle, dir, &newblock, &err);
- 	if (!(bh2)) {
---- a/fs/ext3/xattr.c
-+++ b/fs/ext3/xattr.c
-@@ -89,8 +89,8 @@
- 		printk("\n"); \
- 	} while (0)
- #else
--# define ea_idebug(f...)
--# define ea_bdebug(f...)
-+# define ea_idebug(f...) do {} while (0)
-+# define ea_bdebug(f...) do {} while (0)
- #endif
- 
- static void ext3_xattr_cache_insert(struct buffer_head *);
---- a/fs/ext4/balloc.c
-+++ b/fs/ext4/balloc.c
-@@ -1376,7 +1376,7 @@ static int ext4_has_free_blocks(struct e
- 
- 	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
- 	root_blocks = ext4_r_blocks_count(sbi->s_es);
--	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-+	if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
- 		sbi->s_resuid != current->fsuid &&
- 		(sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
- 		return 0;
---- a/fs/ext4/namei.c
-+++ b/fs/ext4/namei.c
-@@ -1186,9 +1186,9 @@ static struct ext4_dir_entry_2 *do_split
- 	u32 hash2;
- 	struct dx_map_entry *map;
- 	char *data1 = (*bh)->b_data, *data2;
--	unsigned split, move, size, i;
-+	unsigned split, move, size;
- 	struct ext4_dir_entry_2 *de = NULL, *de2;
--	int	err = 0;
-+	int	i, err = 0;
- 
- 	bh2 = ext4_append (handle, dir, &newblock, &err);
- 	if (!(bh2)) {
---- a/fs/fcntl.c
-+++ b/fs/fcntl.c
-@@ -18,6 +18,7 @@
- #include <linux/ptrace.h>
- #include <linux/signal.h>
- #include <linux/rcupdate.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/poll.h>
- #include <asm/siginfo.h>
-@@ -63,6 +64,7 @@ static int locate_fd(struct files_struct
- 	struct fdtable *fdt;
- 
- 	error = -EINVAL;
-+	gr_learn_resource(current, RLIMIT_NOFILE, orig_start, 0);
- 	if (orig_start >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
- 		goto out;
- 
-@@ -82,6 +84,7 @@ repeat:
- 					   fdt->max_fds, start);
- 	
- 	error = -EMFILE;
-+	gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
- 	if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
- 		goto out;
- 
-@@ -140,6 +143,8 @@ asmlinkage long sys_dup2(unsigned int ol
- 	struct files_struct * files = current->files;
- 	struct fdtable *fdt;
- 
-+	gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
-+
- 	spin_lock(&files->file_lock);
- 	if (!(file = fcheck(oldfd)))
- 		goto out_unlock;
-@@ -458,7 +463,8 @@ static inline int sigio_perm(struct task
- 	return (((fown->euid == 0) ||
- 		 (fown->euid == p->suid) || (fown->euid == p->uid) ||
- 		 (fown->uid == p->suid) || (fown->uid == p->uid)) &&
--		!security_file_send_sigiotask(p, fown, sig));
-+		!security_file_send_sigiotask(p, fown, sig) &&
-+		!gr_check_protected_task(p) && !gr_pid_is_chrooted(p));
- }
- 
- static void send_sigio_to_task(struct task_struct *p,
---- a/fs/fuse/control.c
-+++ b/fs/fuse/control.c
-@@ -159,7 +159,7 @@ void fuse_ctl_remove_conn(struct fuse_co
- 
- static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
- {
--	struct tree_descr empty_descr = {""};
-+	struct tree_descr empty_descr = {"", NULL, 0};
- 	struct fuse_conn *fc;
- 	int err;
- 
---- a/fs/hfs/inode.c
-+++ b/fs/hfs/inode.c
-@@ -415,7 +415,7 @@ int hfs_write_inode(struct inode *inode,
- 
- 	if (S_ISDIR(main_inode->i_mode)) {
- 		if (fd.entrylength < sizeof(struct hfs_cat_dir))
--			/* panic? */;
-+			{/* panic? */}
- 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
- 			   sizeof(struct hfs_cat_dir));
- 		if (rec.type != HFS_CDR_DIR ||
-@@ -436,7 +436,7 @@ int hfs_write_inode(struct inode *inode,
- 				sizeof(struct hfs_cat_file));
- 	} else {
- 		if (fd.entrylength < sizeof(struct hfs_cat_file))
--			/* panic? */;
-+			{/* panic? */}
- 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
- 			   sizeof(struct hfs_cat_file));
- 		if (rec.type != HFS_CDR_FIL ||
---- a/fs/hfsplus/inode.c
-+++ b/fs/hfsplus/inode.c
-@@ -418,7 +418,7 @@ int hfsplus_cat_read_inode(struct inode 
- 		struct hfsplus_cat_folder *folder = &entry.folder;
- 
- 		if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
--			/* panic? */;
-+			{/* panic? */}
- 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
- 					sizeof(struct hfsplus_cat_folder));
- 		hfsplus_get_perms(inode, &folder->permissions, 1);
-@@ -435,7 +435,7 @@ int hfsplus_cat_read_inode(struct inode 
- 		struct hfsplus_cat_file *file = &entry.file;
- 
- 		if (fd->entrylength < sizeof(struct hfsplus_cat_file))
--			/* panic? */;
-+			{/* panic? */}
- 		hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
- 					sizeof(struct hfsplus_cat_file));
- 
-@@ -491,7 +491,7 @@ int hfsplus_cat_write_inode(struct inode
- 		struct hfsplus_cat_folder *folder = &entry.folder;
- 
- 		if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
--			/* panic? */;
-+			{/* panic? */}
- 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
- 					sizeof(struct hfsplus_cat_folder));
- 		/* simple node checks? */
-@@ -513,7 +513,7 @@ int hfsplus_cat_write_inode(struct inode
- 		struct hfsplus_cat_file *file = &entry.file;
- 
- 		if (fd.entrylength < sizeof(struct hfsplus_cat_file))
--			/* panic? */;
-+			{/* panic? */}
- 		hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
- 					sizeof(struct hfsplus_cat_file));
- 		hfsplus_inode_write_fork(inode, &file->data_fork);
---- a/fs/jffs2/debug.h
-+++ b/fs/jffs2/debug.h
-@@ -51,13 +51,13 @@
- #if CONFIG_JFFS2_FS_DEBUG > 0
- #define D1(x) x
- #else
--#define D1(x)
-+#define D1(x) do {} while (0);
- #endif
- 
- #if CONFIG_JFFS2_FS_DEBUG > 1
- #define D2(x) x
- #else
--#define D2(x)
-+#define D2(x) do {} while (0);
- #endif
- 
- /* The prefixes of JFFS2 messages */
-@@ -113,68 +113,68 @@
- #ifdef JFFS2_DBG_READINODE_MESSAGES
- #define dbg_readinode(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_readinode(fmt, ...)
-+#define dbg_readinode(fmt, ...)	do {} while (0)
- #endif
- 
- /* Fragtree build debugging messages */
- #ifdef JFFS2_DBG_FRAGTREE_MESSAGES
- #define dbg_fragtree(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_fragtree(fmt, ...)
-+#define dbg_fragtree(fmt, ...)	do {} while (0)
- #endif
- #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
- #define dbg_fragtree2(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_fragtree2(fmt, ...)
-+#define dbg_fragtree2(fmt, ...)	do {} while (0)
- #endif
- 
- /* Directory entry list manilulation debugging messages */
- #ifdef JFFS2_DBG_DENTLIST_MESSAGES
- #define dbg_dentlist(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_dentlist(fmt, ...)
-+#define dbg_dentlist(fmt, ...)	do {} while (0)
- #endif
- 
- /* Print the messages about manipulating node_refs */
- #ifdef JFFS2_DBG_NODEREF_MESSAGES
- #define dbg_noderef(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_noderef(fmt, ...)
-+#define dbg_noderef(fmt, ...)	do {} while (0)
- #endif
- 
- /* Manipulations with the list of inodes (JFFS2 inocache) */
- #ifdef JFFS2_DBG_INOCACHE_MESSAGES
- #define dbg_inocache(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_inocache(fmt, ...)
-+#define dbg_inocache(fmt, ...)	do {} while (0)
- #endif
- 
- /* Summary debugging messages */
- #ifdef JFFS2_DBG_SUMMARY_MESSAGES
- #define dbg_summary(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_summary(fmt, ...)
-+#define dbg_summary(fmt, ...)	do {} while (0)
- #endif
- 
- /* File system build messages */
- #ifdef JFFS2_DBG_FSBUILD_MESSAGES
- #define dbg_fsbuild(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_fsbuild(fmt, ...)
-+#define dbg_fsbuild(fmt, ...)	do {} while (0)
- #endif
- 
- /* Watch the object allocations */
- #ifdef JFFS2_DBG_MEMALLOC_MESSAGES
- #define dbg_memalloc(fmt, ...)	JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_memalloc(fmt, ...)
-+#define dbg_memalloc(fmt, ...)	do {} while (0)
- #endif
- 
- /* Watch the XATTR subsystem */
- #ifdef JFFS2_DBG_XATTR_MESSAGES
- #define dbg_xattr(fmt, ...)  JFFS2_DEBUG(fmt, ##__VA_ARGS__)
- #else
--#define dbg_xattr(fmt, ...)
-+#define dbg_xattr(fmt, ...)	do {} while (0)
- #endif 
- 
- /* "Sanity" checks */
---- a/fs/jffs2/erase.c
-+++ b/fs/jffs2/erase.c
-@@ -389,7 +389,8 @@ static void jffs2_mark_erased_block(stru
- 		struct jffs2_unknown_node marker = {
- 			.magic =	cpu_to_je16(JFFS2_MAGIC_BITMASK),
- 			.nodetype =	cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
--			.totlen =	cpu_to_je32(c->cleanmarker_size)
-+			.totlen =	cpu_to_je32(c->cleanmarker_size),
-+			.hdr_crc =	cpu_to_je32(0)
- 		};
- 
- 		jffs2_prealloc_raw_node_refs(c, jeb, 1);
---- a/fs/jffs2/summary.h
-+++ b/fs/jffs2/summary.h
-@@ -188,18 +188,18 @@ int jffs2_sum_scan_sumnode(struct jffs2_
- 
- #define jffs2_sum_active() (0)
- #define jffs2_sum_init(a) (0)
--#define jffs2_sum_exit(a)
--#define jffs2_sum_disable_collecting(a)
-+#define jffs2_sum_exit(a) do {} while (0)
-+#define jffs2_sum_disable_collecting(a) do {} while (0)
- #define jffs2_sum_is_disabled(a) (0)
--#define jffs2_sum_reset_collected(a)
-+#define jffs2_sum_reset_collected(a) do {} while (0)
- #define jffs2_sum_add_kvec(a,b,c,d) (0)
--#define jffs2_sum_move_collected(a,b)
-+#define jffs2_sum_move_collected(a,b) do {} while (0)
- #define jffs2_sum_write_sumnode(a) (0)
--#define jffs2_sum_add_padding_mem(a,b)
--#define jffs2_sum_add_inode_mem(a,b,c)
--#define jffs2_sum_add_dirent_mem(a,b,c)
--#define jffs2_sum_add_xattr_mem(a,b,c)
--#define jffs2_sum_add_xref_mem(a,b,c)
-+#define jffs2_sum_add_padding_mem(a,b) do {} while (0)
-+#define jffs2_sum_add_inode_mem(a,b,c) do {} while (0)
-+#define jffs2_sum_add_dirent_mem(a,b,c) do {} while (0)
-+#define jffs2_sum_add_xattr_mem(a,b,c) do {} while (0)
-+#define jffs2_sum_add_xref_mem(a,b,c) do {} while (0)
- #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
- 
- #endif /* CONFIG_JFFS2_SUMMARY */
---- a/fs/jffs2/wbuf.c
-+++ b/fs/jffs2/wbuf.c
-@@ -973,7 +973,8 @@ static const struct jffs2_unknown_node o
- {
- 	.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
- 	.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
--	.totlen = constant_cpu_to_je32(8)
-+	.totlen = constant_cpu_to_je32(8),
-+	.hdr_crc = constant_cpu_to_je32(0)
- };
- 
- /*
---- a/fs/Kconfig
-+++ b/fs/Kconfig
-@@ -909,7 +909,7 @@ config PROC_FS
- 
- config PROC_KCORE
- 	bool "/proc/kcore support" if !ARM
--	depends on PROC_FS && MMU
-+	depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
- 
- config PROC_VMCORE
-         bool "/proc/vmcore support (EXPERIMENTAL)"
---- a/fs/namei.c
-+++ b/fs/namei.c
-@@ -31,6 +31,7 @@
- #include <linux/file.h>
- #include <linux/fcntl.h>
- #include <linux/namei.h>
-+#include <linux/grsecurity.h>
- #include <asm/namei.h>
- #include <asm/uaccess.h>
- 
-@@ -638,6 +639,13 @@ static inline int do_follow_link(struct 
- 	err = security_inode_follow_link(path->dentry, nd);
- 	if (err)
- 		goto loop;
-+
-+	if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
-+				  path->dentry->d_inode, path->dentry, nd->mnt)) {
-+		err = -EACCES;
-+		goto loop;
-+	}
-+
- 	current->link_count++;
- 	current->total_link_count++;
- 	nd->depth++;
-@@ -983,11 +991,18 @@ return_reval:
- 				break;
- 		}
- return_base:
-+		if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) {
-+			path_release(nd);
-+			return -ENOENT;
-+		}
- 		return 0;
- out_dput:
- 		dput_path(&next, nd);
- 		break;
- 	}
-+	if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt))
-+		err = -ENOENT;
-+
- 	path_release(nd);
- return_err:
- 	return err;
-@@ -1649,9 +1664,17 @@ static int open_namei_create(struct name
- 	int error;
- 	struct dentry *dir = nd->dentry;
- 
-+	if (!gr_acl_handle_creat(path->dentry, nd->dentry, nd->mnt, flag, mode)) {
-+		error = -EACCES;
-+		goto out_unlock_dput;
-+	}
-+
- 	if (!IS_POSIXACL(dir->d_inode))
- 		mode &= ~current->fs->umask;
- 	error = vfs_create(dir->d_inode, path->dentry, mode, nd);
-+	if (!error)
-+		gr_handle_create(path->dentry, nd->mnt);
-+out_unlock_dput:
- 	mutex_unlock(&dir->d_inode->i_mutex);
- 	dput(nd->dentry);
- 	nd->dentry = path->dentry;
-@@ -1702,6 +1725,17 @@ int open_namei(int dfd, const char *path
- 					 nd, flag);
- 		if (error)
- 			return error;
-+
-+		if (gr_handle_rawio(nd->dentry->d_inode)) {
-+			error = -EPERM;
-+			goto exit;
-+		}
-+
-+		if (!gr_acl_handle_open(nd->dentry, nd->mnt, flag)) {
-+			error = -EACCES;
-+			goto exit;
-+		}
-+
- 		goto ok;
- 	}
- 
-@@ -1751,6 +1785,23 @@ do_last:
- 	/*
- 	 * It already exists.
- 	 */
-+
-+	if (gr_handle_rawio(path.dentry->d_inode)) {
-+		mutex_unlock(&dir->d_inode->i_mutex);
-+		error = -EPERM;
-+		goto exit_dput;
-+	}
-+	if (!gr_acl_handle_open(path.dentry, nd->mnt, flag)) {
-+		mutex_unlock(&dir->d_inode->i_mutex);
-+		error = -EACCES;
-+		goto exit_dput;
-+	}
-+	if (gr_handle_fifo(path.dentry, nd->mnt, dir, flag, acc_mode)) {
-+		mutex_unlock(&dir->d_inode->i_mutex);
-+		error = -EACCES;
-+		goto exit_dput;
-+	}
-+
- 	mutex_unlock(&dir->d_inode->i_mutex);
- 	audit_inode(pathname, path.dentry->d_inode);
- 
-@@ -1806,6 +1857,13 @@ do_link:
- 	error = security_inode_follow_link(path.dentry, nd);
- 	if (error)
- 		goto exit_dput;
-+
-+	if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
-+				  path.dentry, nd->mnt)) {
-+		error = -EACCES;
-+		goto exit_dput;
-+	}
-+
- 	error = __do_follow_link(&path, nd);
- 	if (error) {
- 		/* Does someone understand code flow here? Or it is only
-@@ -1934,6 +1992,22 @@ asmlinkage long sys_mknodat(int dfd, con
- 	if (!IS_POSIXACL(nd.dentry->d_inode))
- 		mode &= ~current->fs->umask;
- 	if (!IS_ERR(dentry)) {
-+		if (gr_handle_chroot_mknod(dentry, nd.mnt, mode)) {
-+			error = -EPERM;
-+			dput(dentry);
-+			mutex_unlock(&nd.dentry->d_inode->i_mutex);
-+			path_release(&nd);
-+			goto out;
-+		}
-+
-+		if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
-+			error = -EACCES;
-+			dput(dentry);
-+			mutex_unlock(&nd.dentry->d_inode->i_mutex);
-+			path_release(&nd);
-+			goto out;
-+		}
-+
- 		switch (mode & S_IFMT) {
- 		case 0: case S_IFREG:
- 			error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd);
-@@ -1951,6 +2025,10 @@ asmlinkage long sys_mknodat(int dfd, con
- 		default:
- 			error = -EINVAL;
- 		}
-+
-+		if (!error)
-+			gr_handle_create(dentry, nd.mnt);
-+
- 		dput(dentry);
- 	}
- 	mutex_unlock(&nd.dentry->d_inode->i_mutex);
-@@ -2008,9 +2086,18 @@ asmlinkage long sys_mkdirat(int dfd, con
- 	if (IS_ERR(dentry))
- 		goto out_unlock;
- 
-+	if (!gr_acl_handle_mkdir(dentry, nd.dentry, nd.mnt)) {
-+		error = -EACCES;
-+		goto out_unlock_dput;
-+	}
-+
- 	if (!IS_POSIXACL(nd.dentry->d_inode))
- 		mode &= ~current->fs->umask;
- 	error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
-+
-+	if (!error)
-+		gr_handle_create(dentry, nd.mnt);
-+out_unlock_dput:
- 	dput(dentry);
- out_unlock:
- 	mutex_unlock(&nd.dentry->d_inode->i_mutex);
-@@ -2092,6 +2179,8 @@ static long do_rmdir(int dfd, const char
- 	char * name;
- 	struct dentry *dentry;
- 	struct nameidata nd;
-+	ino_t saved_ino = 0;
-+	dev_t saved_dev = 0;
- 
- 	name = getname(pathname);
- 	if(IS_ERR(name))
-@@ -2117,7 +2206,22 @@ static long do_rmdir(int dfd, const char
- 	error = PTR_ERR(dentry);
- 	if (IS_ERR(dentry))
- 		goto exit2;
-+
-+	if (dentry->d_inode != NULL) {
-+		if (dentry->d_inode->i_nlink <= 1) {
-+			saved_ino = dentry->d_inode->i_ino;
-+			saved_dev = dentry->d_inode->i_sb->s_dev;
-+		}
-+
-+		if (!gr_acl_handle_rmdir(dentry, nd.mnt)) {
-+			error = -EACCES;
-+			goto dput_exit2;
-+		}
-+	}
- 	error = vfs_rmdir(nd.dentry->d_inode, dentry);
-+	if (!error && (saved_dev || saved_ino))
-+		gr_handle_delete(saved_ino, saved_dev);
-+dput_exit2:
- 	dput(dentry);
- exit2:
- 	mutex_unlock(&nd.dentry->d_inode->i_mutex);
-@@ -2176,6 +2280,8 @@ static long do_unlinkat(int dfd, const c
- 	struct dentry *dentry;
- 	struct nameidata nd;
- 	struct inode *inode = NULL;
-+	ino_t saved_ino = 0;
-+	dev_t saved_dev = 0;
- 
- 	name = getname(pathname);
- 	if(IS_ERR(name))
-@@ -2191,13 +2297,26 @@ static long do_unlinkat(int dfd, const c
- 	dentry = lookup_hash(&nd);
- 	error = PTR_ERR(dentry);
- 	if (!IS_ERR(dentry)) {
-+		error = 0;
- 		/* Why not before? Because we want correct error value */
- 		if (nd.last.name[nd.last.len])
- 			goto slashes;
- 		inode = dentry->d_inode;
--		if (inode)
-+		if (inode) {
-+			if (inode->i_nlink <= 1) {
-+				saved_ino = inode->i_ino;
-+				saved_dev = inode->i_sb->s_dev;
-+			}
-+
-+			if (!gr_acl_handle_unlink(dentry, nd.mnt))
-+				error = -EACCES;
-+
- 			atomic_inc(&inode->i_count);
--		error = vfs_unlink(nd.dentry->d_inode, dentry);
-+		}
-+		if (!error)
-+			error = vfs_unlink(nd.dentry->d_inode, dentry);
-+		if (!error && (saved_ino || saved_dev))
-+			gr_handle_delete(saved_ino, saved_dev);
- 	exit2:
- 		dput(dentry);
- 	}
-@@ -2278,7 +2397,16 @@ asmlinkage long sys_symlinkat(const char
- 	if (IS_ERR(dentry))
- 		goto out_unlock;
- 
-+	if (!gr_acl_handle_symlink(dentry, nd.dentry, nd.mnt, from)) {
-+		error = -EACCES;
-+		goto out_dput_unlock;
-+	}
-+
- 	error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
-+
-+	if (!error)
-+		gr_handle_create(dentry, nd.mnt);
-+out_dput_unlock:
- 	dput(dentry);
- out_unlock:
- 	mutex_unlock(&nd.dentry->d_inode->i_mutex);
-@@ -2373,7 +2501,25 @@ asmlinkage long sys_linkat(int olddfd, c
- 	error = PTR_ERR(new_dentry);
- 	if (IS_ERR(new_dentry))
- 		goto out_unlock;
-+
-+	if (gr_handle_hardlink(old_nd.dentry, old_nd.mnt,
-+			       old_nd.dentry->d_inode,
-+			       old_nd.dentry->d_inode->i_mode, to)) {
-+		error = -EACCES;
-+		goto out_unlock_dput;
-+	}
-+
-+	if (!gr_acl_handle_link(new_dentry, nd.dentry, nd.mnt,
-+				old_nd.dentry, old_nd.mnt, to)) {
-+		error = -EACCES;
-+		goto out_unlock_dput;
-+	}
-+
- 	error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
-+
-+	if (!error)
-+		gr_handle_create(new_dentry, nd.mnt);
-+out_unlock_dput:
- 	dput(new_dentry);
- out_unlock:
- 	mutex_unlock(&nd.dentry->d_inode->i_mutex);
-@@ -2599,8 +2745,16 @@ static int do_rename(int olddfd, const c
- 	if (new_dentry == trap)
- 		goto exit5;
- 
--	error = vfs_rename(old_dir->d_inode, old_dentry,
-+	error = gr_acl_handle_rename(new_dentry, newnd.dentry, newnd.mnt,
-+				     old_dentry, old_dir->d_inode, oldnd.mnt,
-+				     newname);
-+
-+	if (!error)
-+		error = vfs_rename(old_dir->d_inode, old_dentry,
- 				   new_dir->d_inode, new_dentry);
-+	if (!error)
-+		gr_handle_rename(old_dir->d_inode, newnd.dentry->d_inode, old_dentry, 
-+				 new_dentry, oldnd.mnt, new_dentry->d_inode ? 1 : 0);
- exit5:
- 	dput(new_dentry);
- exit4:
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -25,6 +25,7 @@
- #include <linux/security.h>
- #include <linux/mount.h>
- #include <linux/ramfs.h>
-+#include <linux/grsecurity.h>
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
- #include "pnode.h"
-@@ -597,6 +598,8 @@ static int do_umount(struct vfsmount *mn
- 			DQUOT_OFF(sb);
- 			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
- 			unlock_kernel();
-+
-+			gr_log_remount(mnt->mnt_devname, retval);
- 		}
- 		up_write(&sb->s_umount);
- 		return retval;
-@@ -617,6 +620,9 @@ static int do_umount(struct vfsmount *mn
- 		security_sb_umount_busy(mnt);
- 	up_write(&namespace_sem);
- 	release_mounts(&umount_list);
-+
-+	gr_log_unmount(mnt->mnt_devname, retval);
-+
- 	return retval;
- }
- 
-@@ -1422,6 +1428,11 @@ long do_mount(char *dev_name, char *dir_
- 	if (retval)
- 		goto dput_out;
- 
-+	if (gr_handle_chroot_mount(nd.dentry, nd.mnt, dev_name)) {
-+		retval = -EPERM;
-+		goto dput_out;
-+	}
-+
- 	if (flags & MS_REMOUNT)
- 		retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
- 				    data_page);
-@@ -1436,6 +1447,9 @@ long do_mount(char *dev_name, char *dir_
- 				      dev_name, data_page);
- dput_out:
- 	path_release(&nd);
-+
-+	gr_log_mount(dev_name, dir_name, retval);
-+
- 	return retval;
- }
- 
-@@ -1673,6 +1687,9 @@ asmlinkage long sys_pivot_root(const cha
- 	if (!capable(CAP_SYS_ADMIN))
- 		return -EPERM;
- 
-+	if (gr_handle_chroot_pivot())
-+		return -EPERM;
-+
- 	lock_kernel();
- 
- 	error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
---- a/fs/nfs/callback_xdr.c
-+++ b/fs/nfs/callback_xdr.c
-@@ -139,7 +139,7 @@ static __be32 decode_compound_hdr_arg(st
- 	if (unlikely(status != 0))
- 		return status;
- 	/* We do not like overly long tags! */
--	if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) {
-+	if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12) {
- 		printk("NFSv4 CALLBACK %s: client sent tag of length %u\n",
- 				__FUNCTION__, hdr->taglen);
- 		return htonl(NFS4ERR_RESOURCE);
---- a/fs/nfsd/export.c
-+++ b/fs/nfsd/export.c
-@@ -478,7 +478,7 @@ static int secinfo_parse(char **mesg, ch
- 		 * probably discover the problem when someone fails to
- 		 * authenticate.
- 		 */
--		if (f->pseudoflavor < 0)
-+		if ((s32)f->pseudoflavor < 0)
- 			return -EINVAL;
- 		err = get_int(mesg, &f->flags);
- 		if (err)
---- a/fs/nfsd/nfs4state.c
-+++ b/fs/nfsd/nfs4state.c
-@@ -1248,7 +1248,7 @@ static int access_valid(u32 x)
- 
- static int deny_valid(u32 x)
- {
--	return (x >= 0 && x < 5);
-+	return (x < 5);
- }
- 
- static void
---- a/fs/nfs/nfs4proc.c
-+++ b/fs/nfs/nfs4proc.c
-@@ -657,7 +657,7 @@ static int _nfs4_do_open_reclaim(struct 
- static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
- {
- 	struct nfs_server *server = NFS_SERVER(state->inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = _nfs4_do_open_reclaim(ctx, state);
-@@ -699,7 +699,7 @@ static int _nfs4_open_delegation_recall(
- 
- int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	struct nfs_server *server = NFS_SERVER(state->inode);
- 	int err;
- 	do {
-@@ -1020,7 +1020,7 @@ static int _nfs4_open_expired(struct nfs
- static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
- {
- 	struct nfs_server *server = NFS_SERVER(state->inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	do {
-@@ -1122,7 +1122,7 @@ out_err:
- 
- static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	struct nfs4_state *res;
- 	int status;
- 
-@@ -1211,7 +1211,7 @@ static int nfs4_do_setattr(struct inode 
-                 struct iattr *sattr, struct nfs4_state *state)
- {
- 	struct nfs_server *server = NFS_SERVER(inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(server,
-@@ -1504,7 +1504,7 @@ static int _nfs4_server_capabilities(str
- 
- int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(server,
-@@ -1537,7 +1537,7 @@ static int _nfs4_lookup_root(struct nfs_
- static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
- 		struct nfs_fsinfo *info)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(server,
-@@ -1626,7 +1626,7 @@ static int _nfs4_proc_getattr(struct nfs
- 
- static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(server,
-@@ -1716,7 +1716,7 @@ static int nfs4_proc_lookupfh(struct nfs
- 			      struct qstr *name, struct nfs_fh *fhandle,
- 			      struct nfs_fattr *fattr)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
-@@ -1745,7 +1745,7 @@ static int _nfs4_proc_lookup(struct inod
- 
- static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(dir),
-@@ -1801,7 +1801,7 @@ static int _nfs4_proc_access(struct inod
- 
- static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(inode),
-@@ -1856,7 +1856,7 @@ static int _nfs4_proc_readlink(struct in
- static int nfs4_proc_readlink(struct inode *inode, struct page *page,
- 		unsigned int pgbase, unsigned int pglen)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(inode),
-@@ -1950,7 +1950,7 @@ static int _nfs4_proc_remove(struct inod
- 
- static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(dir),
-@@ -2022,7 +2022,7 @@ static int _nfs4_proc_rename(struct inod
- static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
- 		struct inode *new_dir, struct qstr *new_name)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(old_dir),
-@@ -2069,7 +2069,7 @@ static int _nfs4_proc_link(struct inode 
- 
- static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(inode),
-@@ -2126,7 +2126,7 @@ static int _nfs4_proc_symlink(struct ino
- static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
- 		struct page *page, unsigned int len, struct iattr *sattr)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(dir),
-@@ -2179,7 +2179,7 @@ static int _nfs4_proc_mkdir(struct inode
- static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
- 		struct iattr *sattr)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(dir),
-@@ -2225,7 +2225,7 @@ static int _nfs4_proc_readdir(struct den
- static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
-                   u64 cookie, struct page *page, unsigned int count, int plus)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
-@@ -2295,7 +2295,7 @@ static int _nfs4_proc_mknod(struct inode
- static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
- 		struct iattr *sattr, dev_t rdev)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(dir),
-@@ -2324,7 +2324,7 @@ static int _nfs4_proc_statfs(struct nfs_
- 
- static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(server,
-@@ -2352,7 +2352,7 @@ static int _nfs4_do_fsinfo(struct nfs_se
- 
- static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	do {
-@@ -2395,7 +2395,7 @@ static int _nfs4_proc_pathconf(struct nf
- static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
- 		struct nfs_pathconf *pathconf)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	do {
-@@ -2714,7 +2714,7 @@ out_free:
- 
- static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	ssize_t ret;
- 	do {
- 		ret = __nfs4_get_acl_uncached(inode, buf, buflen);
-@@ -2768,7 +2768,7 @@ static int __nfs4_proc_set_acl(struct in
- 
- static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = nfs4_handle_exception(NFS_SERVER(inode),
-@@ -3065,7 +3065,7 @@ static int _nfs4_proc_delegreturn(struct
- int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
- {
- 	struct nfs_server *server = NFS_SERVER(inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 	do {
- 		err = _nfs4_proc_delegreturn(inode, cred, stateid);
-@@ -3140,7 +3140,7 @@ out:
- 
- static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	do {
-@@ -3474,7 +3474,7 @@ static int _nfs4_do_setlk(struct nfs4_st
- static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
- {
- 	struct nfs_server *server = NFS_SERVER(state->inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	do {
-@@ -3492,7 +3492,7 @@ static int nfs4_lock_reclaim(struct nfs4
- static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
- {
- 	struct nfs_server *server = NFS_SERVER(state->inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	err = nfs4_set_lock_state(state, request);
-@@ -3553,7 +3553,7 @@ out:
- 
- static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
- {
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	do {
-@@ -3603,7 +3603,7 @@ nfs4_proc_lock(struct file *filp, int cm
- int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
- {
- 	struct nfs_server *server = NFS_SERVER(state->inode);
--	struct nfs4_exception exception = { };
-+	struct nfs4_exception exception = {0, 0};
- 	int err;
- 
- 	err = nfs4_set_lock_state(state, fl);
---- a/fs/nls/nls_base.c
-+++ b/fs/nls/nls_base.c
-@@ -42,7 +42,7 @@ static struct utf8_table utf8_table[] =
-     {0xF8,  0xF0,   3*6,    0x1FFFFF,       0x10000,   /* 4 byte sequence */},
-     {0xFC,  0xF8,   4*6,    0x3FFFFFF,      0x200000,  /* 5 byte sequence */},
-     {0xFE,  0xFC,   5*6,    0x7FFFFFFF,     0x4000000, /* 6 byte sequence */},
--    {0,						       /* end of table    */}
-+    {0, 0, 0, 0, 0,				       /* end of table    */}
- };
- 
- int
---- a/fs/ntfs/file.c
-+++ b/fs/ntfs/file.c
-@@ -2295,6 +2295,6 @@ const struct inode_operations ntfs_file_
- #endif /* NTFS_RW */
- };
- 
--const struct file_operations ntfs_empty_file_ops = {};
-+const struct file_operations ntfs_empty_file_ops;
- 
--const struct inode_operations ntfs_empty_inode_ops = {};
-+const struct inode_operations ntfs_empty_inode_ops;
---- a/fs/open.c
-+++ b/fs/open.c
-@@ -27,6 +27,7 @@
- #include <linux/rcupdate.h>
- #include <linux/audit.h>
- #include <linux/falloc.h>
-+#include <linux/grsecurity.h>
- 
- int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
- {
-@@ -204,6 +205,9 @@ int do_truncate(struct dentry *dentry, l
- 	if (length < 0)
- 		return -EINVAL;
- 
-+	if (filp && !gr_acl_handle_truncate(dentry, filp->f_vfsmnt))
-+		return -EACCES;
-+
- 	newattrs.ia_size = length;
- 	newattrs.ia_valid = ATTR_SIZE | time_attrs;
- 	if (filp) {
-@@ -461,6 +465,9 @@ asmlinkage long sys_faccessat(int dfd, c
- 	if(IS_RDONLY(nd.dentry->d_inode))
- 		res = -EROFS;
- 
-+	if (!res && !gr_acl_handle_access(nd.dentry, nd.mnt, mode))
-+		res = -EACCES;
-+
- out_path_release:
- 	path_release(&nd);
- out:
-@@ -490,6 +497,8 @@ asmlinkage long sys_chdir(const char __u
- 	if (error)
- 		goto dput_and_out;
- 
-+	gr_log_chdir(nd.dentry, nd.mnt);
-+
- 	set_fs_pwd(current->fs, nd.mnt, nd.dentry);
- 
- dput_and_out:
-@@ -520,6 +529,13 @@ asmlinkage long sys_fchdir(unsigned int 
- 		goto out_putf;
- 
- 	error = file_permission(file, MAY_EXEC);
-+
-+	if (!error && !gr_chroot_fchdir(dentry, mnt))
-+		error = -EPERM;
-+
-+	if (!error)
-+		gr_log_chdir(dentry, mnt);
-+
- 	if (!error)
- 		set_fs_pwd(current->fs, mnt, dentry);
- out_putf:
-@@ -545,8 +561,16 @@ asmlinkage long sys_chroot(const char __
- 	if (!capable(CAP_SYS_CHROOT))
- 		goto dput_and_out;
- 
-+	if (gr_handle_chroot_chroot(nd.dentry, nd.mnt))
-+		goto dput_and_out;
-+
- 	set_fs_root(current->fs, nd.mnt, nd.dentry);
- 	set_fs_altroot();
-+
-+	gr_handle_chroot_caps(current);
-+
-+	gr_handle_chroot_chdir(nd.dentry, nd.mnt);
-+
- 	error = 0;
- dput_and_out:
- 	path_release(&nd);
-@@ -577,9 +601,22 @@ asmlinkage long sys_fchmod(unsigned int 
- 	err = -EPERM;
- 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- 		goto out_putf;
-+
-+	if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
-+		err = -EACCES;
-+		goto out_putf;
-+	}
-+
- 	mutex_lock(&inode->i_mutex);
- 	if (mode == (mode_t) -1)
- 		mode = inode->i_mode;
-+
-+	if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
-+		err = -EPERM;
-+		mutex_unlock(&inode->i_mutex);
-+		goto out_putf;
-+	}
-+
- 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
- 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- 	err = notify_change(dentry, &newattrs);
-@@ -612,9 +649,21 @@ asmlinkage long sys_fchmodat(int dfd, co
- 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- 		goto dput_and_out;
- 
-+	if (!gr_acl_handle_chmod(nd.dentry, nd.mnt, mode)) {
-+		error = -EACCES;
-+		goto dput_and_out;
-+	};
-+
- 	mutex_lock(&inode->i_mutex);
- 	if (mode == (mode_t) -1)
- 		mode = inode->i_mode;
-+
-+	if (gr_handle_chroot_chmod(nd.dentry, nd.mnt, mode)) {
-+		error = -EACCES;
-+		mutex_unlock(&inode->i_mutex);
-+		goto dput_and_out;
-+	}
-+
- 	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
- 	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- 	error = notify_change(nd.dentry, &newattrs);
-@@ -631,7 +680,7 @@ asmlinkage long sys_chmod(const char __u
- 	return sys_fchmodat(AT_FDCWD, filename, mode);
- }
- 
--static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
-+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
- {
- 	struct inode * inode;
- 	int error;
-@@ -648,6 +697,12 @@ static int chown_common(struct dentry * 
- 	error = -EPERM;
- 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- 		goto out;
-+
-+	if (!gr_acl_handle_chown(dentry, mnt)) {
-+		error = -EACCES;
-+		goto out;
-+	}
-+
- 	newattrs.ia_valid =  ATTR_CTIME;
- 	if (user != (uid_t) -1) {
- 		newattrs.ia_valid |= ATTR_UID;
-@@ -674,7 +729,7 @@ asmlinkage long sys_chown(const char __u
- 	error = user_path_walk(filename, &nd);
- 	if (error)
- 		goto out;
--	error = chown_common(nd.dentry, user, group);
-+	error = chown_common(nd.dentry, user, group, nd.mnt);
- 	path_release(&nd);
- out:
- 	return error;
-@@ -694,7 +749,7 @@ asmlinkage long sys_fchownat(int dfd, co
- 	error = __user_walk_fd(dfd, filename, follow, &nd);
- 	if (error)
- 		goto out;
--	error = chown_common(nd.dentry, user, group);
-+	error = chown_common(nd.dentry, user, group, nd.mnt);
- 	path_release(&nd);
- out:
- 	return error;
-@@ -708,7 +763,7 @@ asmlinkage long sys_lchown(const char __
- 	error = user_path_walk_link(filename, &nd);
- 	if (error)
- 		goto out;
--	error = chown_common(nd.dentry, user, group);
-+	error = chown_common(nd.dentry, user, group, nd.mnt);
- 	path_release(&nd);
- out:
- 	return error;
-@@ -727,7 +782,7 @@ asmlinkage long sys_fchown(unsigned int 
- 
- 	dentry = file->f_path.dentry;
- 	audit_inode(NULL, dentry->d_inode);
--	error = chown_common(dentry, user, group);
-+	error = chown_common(dentry, user, group, file->f_vfsmnt);
- 	fput(file);
- out:
- 	return error;
-@@ -934,6 +989,7 @@ repeat:
- 	 * N.B. For clone tasks sharing a files structure, this test
- 	 * will limit the total number of files that can be opened.
- 	 */
-+	gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
- 	if (fd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
- 		goto out;
- 
---- a/fs/partitions/efi.c
-+++ b/fs/partitions/efi.c
-@@ -99,7 +99,7 @@
- #ifdef EFI_DEBUG
- #define Dprintk(x...) printk(KERN_DEBUG x)
- #else
--#define Dprintk(x...)
-+#define Dprintk(x...) do {} while (0)
- #endif
- 
- /* This allows a kernel command line option 'gpt' to override
---- a/fs/pipe.c
-+++ b/fs/pipe.c
-@@ -888,7 +888,7 @@ void free_pipe_info(struct inode *inode)
- 	inode->i_pipe = NULL;
- }
- 
--static struct vfsmount *pipe_mnt __read_mostly;
-+struct vfsmount *pipe_mnt __read_mostly;
- static int pipefs_delete_dentry(struct dentry *dentry)
- {
- 	/*
---- a/fs/proc/array.c
-+++ b/fs/proc/array.c
-@@ -298,6 +298,21 @@ static inline char *task_context_switch_
- 			    p->nivcsw);
- }
- 
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+static inline char *task_pax(struct task_struct *p, char *buffer)
-+{
-+	if (p->mm)
-+		return buffer + sprintf(buffer, "PaX:\t%c%c%c%c%c\n",
-+				p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
-+				p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
-+				p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
-+				p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
-+				p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
-+	else
-+		return buffer + sprintf(buffer, "PaX:\t-----\n");
-+}
-+#endif
-+
- int proc_pid_status(struct task_struct *task, char *buffer)
- {
- 	char *orig = buffer;
-@@ -317,6 +332,11 @@ int proc_pid_status(struct task_struct *
- 	buffer = task_show_regs(task, buffer);
- #endif
- 	buffer = task_context_switch_counts(task, buffer);
-+
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+	buffer = task_pax(task, buffer);
-+#endif
-+
- 	return buffer - orig;
- }
- 
-@@ -372,6 +392,12 @@ static cputime_t task_stime(struct task_
- }
- #endif
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
-+			    (_mm->pax_flags & MF_PAX_RANDMMAP || \
-+			     _mm->pax_flags & MF_PAX_SEGMEXEC))
-+#endif
-+
- static int do_task_stat(struct task_struct *task, char *buffer, int whole)
- {
- 	unsigned long vsize, eip, esp, wchan = ~0UL;
-@@ -458,6 +484,19 @@ static int do_task_stat(struct task_stru
- 		stime = task_stime(task);
- 	}
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+	if (PAX_RAND_FLAGS(mm)) {
-+		eip = 0;
-+		esp = 0;
-+		wchan = 0;
-+	}
-+#endif
-+#ifdef CONFIG_GRKERNSEC_HIDESYM
-+	wchan = 0;
-+	eip =0;
-+	esp =0;
-+#endif
-+
- 	/* scale priority and nice values from timeslices to -20..20 */
- 	/* to make it look like a "normal" Unix priority/nice value  */
- 	priority = task_prio(task);
-@@ -498,9 +537,15 @@ static int do_task_stat(struct task_stru
- 		vsize,
- 		mm ? get_mm_rss(mm) : 0,
- 		rsslim,
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+		PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->start_code : 0),
-+		PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->end_code : 0),
-+		PAX_RAND_FLAGS(mm) ? 0 : (mm ? mm->start_stack : 0),
-+#else
- 		mm ? mm->start_code : 0,
- 		mm ? mm->end_code : 0,
- 		mm ? mm->start_stack : 0,
-+#endif
- 		esp,
- 		eip,
- 		/* The signal information here is obsolete.
-@@ -547,3 +592,14 @@ int proc_pid_statm(struct task_struct *t
- 	return sprintf(buffer, "%d %d %d %d %d %d %d\n",
- 		       size, resident, shared, text, lib, data, 0);
- }
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
-+int proc_pid_ipaddr(struct task_struct *task, char * buffer)
-+{
-+	int len;
-+
-+	len = sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->signal->curr_ip));
-+	return len;
-+}
-+#endif
-+
---- a/fs/proc/base.c
-+++ b/fs/proc/base.c
-@@ -73,6 +73,7 @@
- #include <linux/nsproxy.h>
- #include <linux/oom.h>
- #include <linux/elf.h>
-+#include <linux/grsecurity.h>
- #include "internal.h"
- 
- /* NOTE:
-@@ -123,7 +124,7 @@ struct pid_entry {
- 		NULL, &proc_info_file_operations,	\
- 		{ .proc_read = &proc_##OTYPE } )
- 
--int maps_protect;
-+int maps_protect = 1;
- EXPORT_SYMBOL(maps_protect);
- 
- static struct fs_struct *get_fs_struct(struct task_struct *task)
-@@ -197,7 +198,7 @@ static int proc_root_link(struct inode *
- 	(task->parent == current && \
- 	(task->ptrace & PT_PTRACED) && \
- 	 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
--	 security_ptrace(current,task) == 0))
-+	 security_ptrace(current,task) == 0 && !gr_handle_proc_ptrace(task)))
- 
- static int proc_pid_environ(struct task_struct *task, char * buffer)
- {
-@@ -263,9 +264,9 @@ static int proc_pid_auxv(struct task_str
- 	struct mm_struct *mm = get_task_mm(task);
- 	if (mm) {
- 		unsigned int nwords = 0;
--		do
-+		do {
- 			nwords += 2;
--		while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
-+		} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
- 		res = nwords * sizeof(mm->saved_auxv[0]);
- 		if (res > PAGE_SIZE)
- 			res = PAGE_SIZE;
-@@ -338,6 +339,8 @@ static int proc_fd_access_allowed(struct
- 	task = get_proc_task(inode);
- 	if (task) {
- 		allowed = ptrace_may_attach(task);
-+		if (allowed != 0)
-+			allowed = !gr_acl_handle_procpidmem(task);
- 		put_task_struct(task);
- 	}
- 	return allowed;
-@@ -528,7 +531,7 @@ static ssize_t mem_read(struct file * fi
- 	if (!task)
- 		goto out_no_task;
- 
--	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
-+	if (!MAY_PTRACE(task) || !ptrace_may_attach(task) || gr_acl_handle_procpidmem(task))
- 		goto out;
- 
- 	ret = -ENOMEM;
-@@ -598,7 +601,7 @@ static ssize_t mem_write(struct file * f
- 	if (!task)
- 		goto out_no_task;
- 
--	if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
-+	if (!MAY_PTRACE(task) || !ptrace_may_attach(task) || gr_acl_handle_procpidmem(task))
- 		goto out;
- 
- 	copied = -ENOMEM;
-@@ -1050,7 +1053,11 @@ static struct inode *proc_pid_make_inode
- 	inode->i_gid = 0;
- 	if (task_dumpable(task)) {
- 		inode->i_uid = task->euid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+		inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
-+#else
- 		inode->i_gid = task->egid;
-+#endif
- 	}
- 	security_task_to_inode(task, inode);
- 
-@@ -1066,17 +1073,45 @@ static int pid_getattr(struct vfsmount *
- {
- 	struct inode *inode = dentry->d_inode;
- 	struct task_struct *task;
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	struct task_struct *tmp = current;
-+#endif
-+
- 	generic_fillattr(inode, stat);
- 
- 	rcu_read_lock();
- 	stat->uid = 0;
- 	stat->gid = 0;
- 	task = pid_task(proc_pid(inode), PIDTYPE_PID);
--	if (task) {
-+
-+	if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
-+		rcu_read_unlock();
-+		return -ENOENT;
-+	}
-+
-+
-+	if (task
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	    && (!tmp->uid || (tmp->uid == task->uid)
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+	    || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
-+#endif
-+	    )
-+#endif
-+	) {
- 		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
-+#endif
- 		    task_dumpable(task)) {
- 			stat->uid = task->euid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+			stat->gid = CONFIG_GRKERNSEC_PROC_GID;
-+#else
- 			stat->gid = task->egid;
-+#endif
- 		}
- 	}
- 	rcu_read_unlock();
-@@ -1104,11 +1139,21 @@ static int pid_revalidate(struct dentry 
- {
- 	struct inode *inode = dentry->d_inode;
- 	struct task_struct *task = get_proc_task(inode);
-+
- 	if (task) {
- 		if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+		    (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
-+#endif
- 		    task_dumpable(task)) {
- 			inode->i_uid = task->euid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+			inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
-+#else
- 			inode->i_gid = task->egid;
-+#endif
- 		} else {
- 			inode->i_uid = 0;
- 			inode->i_gid = 0;
-@@ -1118,6 +1163,7 @@ static int pid_revalidate(struct dentry 
- 		put_task_struct(task);
- 		return 1;
- 	}
-+out:
- 	d_drop(dentry);
- 	return 0;
- }
-@@ -1374,6 +1420,9 @@ static struct dentry *proc_lookupfd_comm
- 	if (fd == ~0U)
- 		goto out;
- 
-+	if (gr_acl_handle_procpidmem(task))
-+		goto out;
-+
- 	result = instantiate(dir, dentry, task, &fd);
- out:
- 	put_task_struct(task);
-@@ -1410,6 +1459,8 @@ static int proc_readfd_common(struct fil
- 				goto out;
- 			filp->f_pos++;
- 		default:
-+			if (gr_acl_handle_procpidmem(p))
-+				goto out;
- 			files = get_files_struct(p);
- 			if (!files)
- 				goto out;
-@@ -1598,6 +1649,9 @@ static struct dentry *proc_pident_lookup
- 	if (!task)
- 		goto out_no_task;
- 
-+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
-+		goto out;
-+
- 	/*
- 	 * Yes, it does not scale. And it should not. Don't add
- 	 * new entries into /proc/<tgid>/ without very good reasons.
-@@ -1643,6 +1697,9 @@ static int proc_pident_readdir(struct fi
- 	if (!task)
- 		goto out_no_task;
- 
-+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
-+		goto out;
-+
- 	ret = 0;
- 	pid = task->pid;
- 	i = filp->f_pos;
-@@ -1998,6 +2055,9 @@ static struct dentry *proc_base_lookup(s
- 	if (p > last)
- 		goto out;
- 
-+	if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
-+		goto out;
-+
- 	error = proc_base_instantiate(dir, dentry, task, p);
- 
- out:
-@@ -2097,6 +2157,9 @@ static const struct pid_entry tgid_base_
- #ifdef CONFIG_TASK_IO_ACCOUNTING
- 	INF("io",	S_IRUGO, pid_io_accounting),
- #endif
-+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
-+	INF("ipaddr",	  S_IRUSR, pid_ipaddr),
-+#endif
- };
- 
- static int proc_tgid_base_readdir(struct file * filp,
-@@ -2200,7 +2263,14 @@ static struct dentry *proc_pid_instantia
- 	if (!inode)
- 		goto out;
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+	inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
-+	inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
-+#else
- 	inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
-+#endif
- 	inode->i_op = &proc_tgid_base_inode_operations;
- 	inode->i_fop = &proc_tgid_base_operations;
- 	inode->i_flags|=S_IMMUTABLE;
-@@ -2241,7 +2311,11 @@ struct dentry *proc_pid_lookup(struct in
- 	if (!task)
- 		goto out;
- 
-+	if (gr_check_hidden_task(task))
-+		goto out_put_task;
-+
- 	result = proc_pid_instantiate(dir, dentry, task, NULL);
-+out_put_task:
- 	put_task_struct(task);
- out:
- 	return result;
-@@ -2299,6 +2373,9 @@ int proc_pid_readdir(struct file * filp,
- {
- 	unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- 	struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	struct task_struct *tmp = current;
-+#endif
- 	struct task_struct *task;
- 	int tgid;
- 
-@@ -2316,6 +2393,18 @@ int proc_pid_readdir(struct file * filp,
- 	     task;
- 	     put_task_struct(task), task = next_tgid(tgid + 1)) {
- 		tgid = task->pid;
-+
-+		if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)
-+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+		    || (tmp->uid && (task->uid != tmp->uid)
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+		        && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
-+#endif
-+			)
-+#endif
-+		)
-+			continue;
-+
- 		filp->f_pos = tgid + TGID_OFFSET;
- 		if (proc_pid_fill_cache(filp, dirent, filldir, task, tgid) < 0) {
- 			put_task_struct(task);
---- a/fs/proc/inode.c
-+++ b/fs/proc/inode.c
-@@ -418,7 +418,11 @@ struct inode *proc_get_inode(struct supe
- 		if (de->mode) {
- 			inode->i_mode = de->mode;
- 			inode->i_uid = de->uid;
-+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+			inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
-+#else
- 			inode->i_gid = de->gid;
-+#endif
- 		}
- 		if (de->size)
- 			inode->i_size = de->size;
---- a/fs/proc/internal.h
-+++ b/fs/proc/internal.h
-@@ -45,6 +45,9 @@ extern int proc_tid_stat(struct task_str
- extern int proc_tgid_stat(struct task_struct *, char *);
- extern int proc_pid_status(struct task_struct *, char *);
- extern int proc_pid_statm(struct task_struct *, char *);
-+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
-+extern int proc_pid_ipaddr(struct task_struct*,char*);
-+#endif
- 
- extern const struct file_operations proc_maps_operations;
- extern const struct file_operations proc_numa_maps_operations;
---- a/fs/proc/proc_misc.c
-+++ b/fs/proc/proc_misc.c
-@@ -668,6 +668,8 @@ void create_seq_entry(char *name, mode_t
- 
- void __init proc_misc_init(void)
- {
-+	int gr_mode = 0;
-+
- 	static struct {
- 		char *name;
- 		int (*read_proc)(char*,char**,off_t,int,int*,void*);
-@@ -683,7 +685,9 @@ void __init proc_misc_init(void)
- 		{"stram",	stram_read_proc},
- #endif
- 		{"filesystems",	filesystems_read_proc},
-+#ifndef CONFIG_GRKERNSEC_PROC_ADD
- 		{"cmdline",	cmdline_read_proc},
-+#endif
- 		{"locks",	locks_read_proc},
- 		{"execdomains",	execdomains_read_proc},
- 		{NULL,}
-@@ -691,6 +695,15 @@ void __init proc_misc_init(void)
- 	for (p = simple_ones; p->name; p++)
- 		create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	gr_mode = S_IRUSR;
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	gr_mode = S_IRUSR | S_IRGRP;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+	create_proc_read_entry("cmdline", gr_mode, NULL, &cmdline_read_proc, NULL);
-+#endif
-+
- 	proc_symlink("mounts", NULL, "self/mounts");
- 
- 	/* And now for trickier ones */
-@@ -702,7 +715,11 @@ void __init proc_misc_init(void)
- 			entry->proc_fops = &proc_kmsg_operations;
- 	}
- #endif
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+	create_seq_entry("devices", gr_mode, &proc_devinfo_operations);
-+#else
- 	create_seq_entry("devices", 0, &proc_devinfo_operations);
-+#endif
- 	create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
- #ifdef CONFIG_BLOCK
- 	create_seq_entry("partitions", 0, &proc_partitions_operations);
-@@ -710,7 +727,11 @@ void __init proc_misc_init(void)
- 	create_seq_entry("stat", 0, &proc_stat_operations);
- 	create_seq_entry("interrupts", 0, &proc_interrupts_operations);
- #ifdef CONFIG_SLAB
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+	create_seq_entry("slabinfo",S_IWUSR|gr_mode,&proc_slabinfo_operations);
-+#else
- 	create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
-+#endif
- #ifdef CONFIG_DEBUG_SLAB_LEAK
- 	create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
- #endif
-@@ -727,7 +748,7 @@ void __init proc_misc_init(void)
- #ifdef CONFIG_SCHEDSTATS
- 	create_seq_entry("schedstat", 0, &proc_schedstat_operations);
- #endif
--#ifdef CONFIG_PROC_KCORE
-+#if defined(CONFIG_PROC_KCORE) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
- 	proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
- 	if (proc_root_kcore) {
- 		proc_root_kcore->proc_fops = &proc_kcore_operations;
---- a/fs/proc/proc_sysctl.c
-+++ b/fs/proc/proc_sysctl.c
-@@ -7,6 +7,8 @@
- #include <linux/security.h>
- #include "internal.h"
- 
-+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
-+
- static struct dentry_operations proc_sys_dentry_operations;
- static const struct file_operations proc_sys_file_operations;
- static struct inode_operations proc_sys_inode_operations;
-@@ -151,6 +153,9 @@ static struct dentry *proc_sys_lookup(st
- 	if (!table)
- 		goto out;
- 
-+	if (gr_handle_sysctl(table, 001))
-+		goto out;
-+
- 	err = ERR_PTR(-ENOMEM);
- 	inode = proc_sys_make_inode(dir, table);
- 	if (!inode)
-@@ -358,6 +363,9 @@ static int proc_sys_readdir(struct file 
- 			if (pos < filp->f_pos)
- 				continue;
- 
-+			if (gr_handle_sysctl(table, 0))
-+				continue;
-+
- 			if (proc_sys_fill_cache(filp, dirent, filldir, table) < 0)
- 				goto out;
- 			filp->f_pos = pos + 1;
-@@ -420,6 +428,30 @@ out:
- 	return error;
- }
- 
-+/* Eric Biederman is to blame */
-+static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
-+{
-+	int error = 0;
-+	struct ctl_table_header *head;
-+	struct ctl_table *table;
-+
-+	table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
-+	/* Has the sysctl entry disappeared on us? */
-+	if (!table)
-+		goto out;
-+
-+	if (gr_handle_sysctl(table, 001)) {
-+		error = -ENOENT;
-+		goto out;
-+	}
-+
-+out:
-+	sysctl_head_finish(head);
-+
-+	generic_fillattr(dentry->d_inode, stat);
-+
-+	return error;
-+}
- static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
- {
- 	struct inode *inode = dentry->d_inode;
-@@ -448,6 +480,7 @@ static struct inode_operations proc_sys_
- 	.lookup		= proc_sys_lookup,
- 	.permission	= proc_sys_permission,
- 	.setattr	= proc_sys_setattr,
-+	.getattr	= proc_sys_getattr,
- };
- 
- static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd)
---- a/fs/proc/root.c
-+++ b/fs/proc/root.c
-@@ -61,7 +61,13 @@ void __init proc_root_init(void)
- 		return;
- 	}
- 	proc_misc_init();
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR, NULL);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
-+#else
- 	proc_net = proc_mkdir("net", NULL);
-+#endif
- 	proc_net_stat = proc_mkdir("net/stat", NULL);
- 
- #ifdef CONFIG_SYSVIPC
-@@ -78,7 +84,15 @@ void __init proc_root_init(void)
- #ifdef CONFIG_PROC_DEVICETREE
- 	proc_device_tree_init();
- #endif
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
-+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
-+#endif
-+#else
- 	proc_bus = proc_mkdir("bus", NULL);
-+#endif
- 	proc_sys_init();
- }
- 
---- a/fs/proc/task_mmu.c
-+++ b/fs/proc/task_mmu.c
-@@ -44,15 +44,27 @@ char *task_mem(struct mm_struct *mm, cha
- 		"VmStk:\t%8lu kB\n"
- 		"VmExe:\t%8lu kB\n"
- 		"VmLib:\t%8lu kB\n"
--		"VmPTE:\t%8lu kB\n",
--		hiwater_vm << (PAGE_SHIFT-10),
-+		"VmPTE:\t%8lu kB\n"
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+		"CsBase:\t%8lx\nCsLim:\t%8lx\n"
-+#endif
-+
-+		,hiwater_vm << (PAGE_SHIFT-10),
- 		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
- 		mm->locked_vm << (PAGE_SHIFT-10),
- 		hiwater_rss << (PAGE_SHIFT-10),
- 		total_rss << (PAGE_SHIFT-10),
- 		data << (PAGE_SHIFT-10),
- 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
--		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
-+		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+		, mm->context.user_cs_base, mm->context.user_cs_limit
-+#endif
-+
-+	);
-+
- 	return buffer;
- }
- 
-@@ -131,6 +143,12 @@ struct pmd_walker {
- 		       unsigned long, void *);
- };
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
-+			    (_mm->pax_flags & MF_PAX_RANDMMAP || \
-+			     _mm->pax_flags & MF_PAX_SEGMEXEC))
-+#endif
-+
- static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
- {
- 	struct proc_maps_private *priv = m->private;
-@@ -153,13 +171,22 @@ static int show_map_internal(struct seq_
- 	}
- 
- 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+			PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
-+			PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
-+#else
- 			vma->vm_start,
- 			vma->vm_end,
-+#endif
- 			flags & VM_READ ? 'r' : '-',
- 			flags & VM_WRITE ? 'w' : '-',
- 			flags & VM_EXEC ? 'x' : '-',
- 			flags & VM_MAYSHARE ? 's' : 'p',
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+			PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_pgoff << PAGE_SHIFT,
-+#else
- 			vma->vm_pgoff << PAGE_SHIFT,
-+#endif
- 			MAJOR(dev), MINOR(dev), ino, &len);
- 
- 	/*
-@@ -173,11 +200,11 @@ static int show_map_internal(struct seq_
- 		const char *name = arch_vma_name(vma);
- 		if (!name) {
- 			if (mm) {
--				if (vma->vm_start <= mm->start_brk &&
--						vma->vm_end >= mm->brk) {
-+				if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
- 					name = "[heap]";
--				} else if (vma->vm_start <= mm->start_stack &&
--					   vma->vm_end >= mm->start_stack) {
-+				} else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
-+					   (vma->vm_start <= mm->start_stack &&
-+					    vma->vm_end >= mm->start_stack)) {
- 					name = "[stack]";
- 				}
- 			} else {
-@@ -191,7 +218,27 @@ static int show_map_internal(struct seq_
- 	}
- 	seq_putc(m, '\n');
- 
--	if (mss)
-+	
-+	if (mss) {
-+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
-+	    if (PAX_RAND_FLAGS(mm))
-+		seq_printf(m,
-+			   "Size:          %8lu kB\n"
-+			   "Rss:            %8lu kB\n"
-+			   "Shared_Clean:   %8lu kB\n"
-+			   "Shared_Dirty:   %8lu kB\n"
-+			   "Private_Clean:  %8lu kB\n"
-+			   "Private_Dirty:  %8lu kB\n",
-+			   "Referenced:     %8lu kB\n",
-+			   0UL,
-+			   0UL,
-+			   0UL,
-+			   0UL,
-+			   0UL,
-+			   0UL,
-+			   0UL);
-+	    else
-+#endif
- 		seq_printf(m,
- 			   "Size:           %8lu kB\n"
- 			   "Rss:            %8lu kB\n"
-@@ -207,6 +254,7 @@ static int show_map_internal(struct seq_
- 			   mss->private_clean >> 10,
- 			   mss->private_dirty >> 10,
- 			   mss->referenced >> 10);
-+	}
- 
- 	if (m->count < m->size)  /* vma is copied successfully */
- 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
---- a/fs/readdir.c
-+++ b/fs/readdir.c
-@@ -16,6 +16,8 @@
- #include <linux/security.h>
- #include <linux/syscalls.h>
- #include <linux/unistd.h>
-+#include <linux/namei.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- 
-@@ -64,6 +66,7 @@ struct old_linux_dirent {
- 
- struct readdir_callback {
- 	struct old_linux_dirent __user * dirent;
-+	struct file * file;
- 	int result;
- };
- 
-@@ -79,6 +82,10 @@ static int fillonedir(void * __buf, cons
- 	d_ino = ino;
- 	if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
- 		return -EOVERFLOW;
-+
-+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+		return 0;
-+
- 	buf->result++;
- 	dirent = buf->dirent;
- 	if (!access_ok(VERIFY_WRITE, dirent,
-@@ -110,6 +117,7 @@ asmlinkage long old_readdir(unsigned int
- 
- 	buf.result = 0;
- 	buf.dirent = dirent;
-+	buf.file = file;
- 
- 	error = vfs_readdir(file, fillonedir, &buf);
- 	if (error >= 0)
-@@ -136,6 +144,7 @@ struct linux_dirent {
- struct getdents_callback {
- 	struct linux_dirent __user * current_dir;
- 	struct linux_dirent __user * previous;
-+	struct file * file;
- 	int count;
- 	int error;
- };
-@@ -154,6 +163,10 @@ static int filldir(void * __buf, const c
- 	d_ino = ino;
- 	if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
- 		return -EOVERFLOW;
-+
-+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+		return 0;
-+
- 	dirent = buf->previous;
- 	if (dirent) {
- 		if (__put_user(offset, &dirent->d_off))
-@@ -200,6 +213,7 @@ asmlinkage long sys_getdents(unsigned in
- 	buf.previous = NULL;
- 	buf.count = count;
- 	buf.error = 0;
-+	buf.file = file;
- 
- 	error = vfs_readdir(file, filldir, &buf);
- 	if (error < 0)
-@@ -222,6 +236,7 @@ out:
- struct getdents_callback64 {
- 	struct linux_dirent64 __user * current_dir;
- 	struct linux_dirent64 __user * previous;
-+	struct file *file;
- 	int count;
- 	int error;
- };
-@@ -236,6 +251,10 @@ static int filldir64(void * __buf, const
- 	buf->error = -EINVAL;	/* only used if we fail.. */
- 	if (reclen > buf->count)
- 		return -EINVAL;
-+
-+	if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
-+		return 0;
-+
- 	dirent = buf->previous;
- 	if (dirent) {
- 		if (__put_user(offset, &dirent->d_off))
-@@ -282,6 +301,7 @@ asmlinkage long sys_getdents64(unsigned 
- 
- 	buf.current_dir = dirent;
- 	buf.previous = NULL;
-+	buf.file = file;
- 	buf.count = count;
- 	buf.error = 0;
- 
---- a/fs/udf/balloc.c
-+++ b/fs/udf/balloc.c
-@@ -154,8 +154,7 @@ static void udf_bitmap_free_blocks(struc
- 	unsigned long overflow;
- 
- 	mutex_lock(&sbi->s_alloc_mutex);
--	if (bloc.logicalBlockNum < 0 ||
--	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
-+	if (bloc.logicalBlockNum + count > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
- 		udf_debug("%d < %d || %d + %d > %d\n",
- 			  bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
- 			  UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
-@@ -221,7 +220,7 @@ static int udf_bitmap_prealloc_blocks(st
- 	struct buffer_head *bh;
- 
- 	mutex_lock(&sbi->s_alloc_mutex);
--	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
-+	if (first_block >= UDF_SB_PARTLEN(sb, partition))
- 		goto out;
- 
- 	if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
-@@ -287,7 +286,7 @@ static int udf_bitmap_new_block(struct s
- 	mutex_lock(&sbi->s_alloc_mutex);
- 
- repeat:
--	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
-+	if (goal >= UDF_SB_PARTLEN(sb, partition))
- 		goal = 0;
- 
- 	nr_groups = bitmap->s_nr_groups;
-@@ -420,8 +419,7 @@ static void udf_table_free_blocks(struct
- 	int i;
- 
- 	mutex_lock(&sbi->s_alloc_mutex);
--	if (bloc.logicalBlockNum < 0 ||
--	    (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
-+	if (bloc.logicalBlockNum + count > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) {
- 		udf_debug("%d < %d || %d + %d > %d\n",
- 			  bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
- 			  UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum));
-@@ -627,7 +625,7 @@ static int udf_table_prealloc_blocks(str
- 	struct extent_position epos;
- 	int8_t etype = -1;
- 
--	if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
-+	if (first_block >= UDF_SB_PARTLEN(sb, partition))
- 		return 0;
- 
- 	if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
-@@ -703,7 +701,7 @@ static int udf_table_new_block(struct su
- 		return newblock;
- 
- 	mutex_lock(&sbi->s_alloc_mutex);
--	if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
-+	if (goal >= UDF_SB_PARTLEN(sb, partition))
- 		goal = 0;
- 
- 	/* We search for the closest matching block to goal. If we find a exact hit,
---- a/fs/udf/inode.c
-+++ b/fs/udf/inode.c
-@@ -308,9 +308,6 @@ static int udf_get_block(struct inode *i
- 
- 	lock_kernel();
- 
--	if (block < 0)
--		goto abort_negative;
--
- 	if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) {
- 		UDF_I_NEXT_ALLOC_BLOCK(inode)++;
- 		UDF_I_NEXT_ALLOC_GOAL(inode)++;
-@@ -331,10 +328,6 @@ static int udf_get_block(struct inode *i
- abort:
- 	unlock_kernel();
- 	return err;
--
--abort_negative:
--	udf_warning(inode->i_sb, "udf_get_block", "block < 0");
--	goto abort;
- }
- 
- static struct buffer_head *udf_getblk(struct inode *inode, long block,
---- a/fs/ufs/inode.c
-+++ b/fs/ufs/inode.c
-@@ -55,9 +55,7 @@ static int ufs_block_to_path(struct inod
- 
- 
- 	UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks);
--	if (i_block < 0) {
--		ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
--	} else if (i_block < direct_blocks) {
-+	if (i_block < direct_blocks) {
- 		offsets[n++] = i_block;
- 	} else if ((i_block -= direct_blocks) < indirect_blocks) {
- 		offsets[n++] = UFS_IND_BLOCK;
-@@ -439,8 +437,6 @@ int ufs_getfrag_block(struct inode *inod
- 	lock_kernel();
- 
- 	UFSD("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment);
--	if (fragment < 0)
--		goto abort_negative;
- 	if (fragment >
- 	    ((UFS_NDADDR + uspi->s_apb + uspi->s_2apb + uspi->s_3apb)
- 	     << uspi->s_fpbshift))
-@@ -503,10 +499,6 @@ abort:
- 	unlock_kernel();
- 	return err;
- 
--abort_negative:
--	ufs_warning(sb, "ufs_get_block", "block < 0");
--	goto abort;
--
- abort_too_big:
- 	ufs_warning(sb, "ufs_get_block", "block > big");
- 	goto abort;
---- a/fs/utimes.c
-+++ b/fs/utimes.c
-@@ -6,6 +6,7 @@
- #include <linux/sched.h>
- #include <linux/stat.h>
- #include <linux/utime.h>
-+#include <linux/grsecurity.h>
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
- 
-@@ -47,6 +48,7 @@ long do_utimes(int dfd, char __user *fil
- 	int error;
- 	struct nameidata nd;
- 	struct dentry *dentry;
-+	struct vfsmount *mnt;
- 	struct inode *inode;
- 	struct iattr newattrs;
- 	struct file *f = NULL;
-@@ -65,12 +67,14 @@ long do_utimes(int dfd, char __user *fil
- 		if (!f)
- 			goto out;
- 		dentry = f->f_path.dentry;
-+		mnt = f->f_path.mnt;
- 	} else {
- 		error = __user_walk_fd(dfd, filename, (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW, &nd);
- 		if (error)
- 			goto out;
- 
- 		dentry = nd.dentry;
-+		mnt = nd.mnt;
- 	}
- 
- 	inode = dentry->d_inode;
-@@ -117,6 +121,12 @@ long do_utimes(int dfd, char __user *fil
- 			}
- 		}
- 	}
-+
-+	if (!gr_acl_handle_utime(dentry, mnt)) {
-+		error = -EACCES;
-+		goto dput_and_out;
-+	}
-+
- 	mutex_lock(&inode->i_mutex);
- 	error = notify_change(dentry, &newattrs);
- 	mutex_unlock(&inode->i_mutex);
---- a/fs/xfs/xfs_bmap.c
-+++ b/fs/xfs/xfs_bmap.c
-@@ -374,7 +374,7 @@ xfs_bmap_validate_ret(
- 	int			nmap,
- 	int			ret_nmap);
- #else
--#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
-+#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
- #endif /* DEBUG */
- 
- #if defined(XFS_RW_TRACE)
---- /dev/null
-+++ b/grsecurity/gracl_alloc.c
-@@ -0,0 +1,91 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+
-+static unsigned long alloc_stack_next = 1;
-+static unsigned long alloc_stack_size = 1;
-+static void **alloc_stack;
-+
-+static __inline__ int
-+alloc_pop(void)
-+{
-+	if (alloc_stack_next == 1)
-+		return 0;
-+
-+	kfree(alloc_stack[alloc_stack_next - 2]);
-+
-+	alloc_stack_next--;
-+
-+	return 1;
-+}
-+
-+static __inline__ void
-+alloc_push(void *buf)
-+{
-+	if (alloc_stack_next >= alloc_stack_size)
-+		BUG();
-+
-+	alloc_stack[alloc_stack_next - 1] = buf;
-+
-+	alloc_stack_next++;
-+
-+	return;
-+}
-+
-+void *
-+acl_alloc(unsigned long len)
-+{
-+	void *ret;
-+
-+	if (len > PAGE_SIZE)
-+		BUG();
-+
-+	ret = kmalloc(len, GFP_KERNEL);
-+
-+	if (ret)
-+		alloc_push(ret);
-+
-+	return ret;
-+}
-+
-+void
-+acl_free_all(void)
-+{
-+	if (gr_acl_is_enabled() || !alloc_stack)
-+		return;
-+
-+	while (alloc_pop()) ;
-+
-+	if (alloc_stack) {
-+		if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
-+			kfree(alloc_stack);
-+		else
-+			vfree(alloc_stack);
-+	}
-+
-+	alloc_stack = NULL;
-+	alloc_stack_size = 1;
-+	alloc_stack_next = 1;
-+
-+	return;
-+}
-+
-+int
-+acl_alloc_stack_init(unsigned long size)
-+{
-+	if ((size * sizeof (void *)) <= PAGE_SIZE)
-+		alloc_stack =
-+		    (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
-+	else
-+		alloc_stack = (void **) vmalloc(size * sizeof (void *));
-+
-+	alloc_stack_size = size;
-+
-+	if (!alloc_stack)
-+		return 0;
-+	else
-+		return 1;
-+}
---- /dev/null
-+++ b/grsecurity/gracl.c
-@@ -0,0 +1,3722 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/namei.h>
-+#include <linux/mount.h>
-+#include <linux/tty.h>
-+#include <linux/proc_fs.h>
-+#include <linux/smp_lock.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/types.h>
-+#include <linux/capability.h>
-+#include <linux/sysctl.h>
-+#include <linux/netdevice.h>
-+#include <linux/ptrace.h>
-+#include <linux/gracl.h>
-+#include <linux/gralloc.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/pid_namespace.h>
-+#include <linux/percpu.h>
-+
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <asm/mman.h>
-+
-+static struct acl_role_db acl_role_set;
-+static struct name_db name_set;
-+static struct inodev_db inodev_set;
-+
-+/* for keeping track of userspace pointers used for subjects, so we
-+   can share references in the kernel as well
-+*/
-+
-+static struct dentry *real_root;
-+static struct vfsmount *real_root_mnt;
-+
-+static struct acl_subj_map_db subj_map_set;
-+
-+static struct acl_role_label *default_role;
-+
-+static u16 acl_sp_role_value;
-+
-+extern char *gr_shared_page[4];
-+static DECLARE_MUTEX(gr_dev_sem);
-+rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED;
-+
-+struct gr_arg *gr_usermode;
-+
-+static unsigned int gr_status = GR_STATUS_INIT;
-+
-+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
-+extern void gr_clear_learn_entries(void);
-+
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+extern void gr_log_resource(const struct task_struct *task,
-+			    const int res, const unsigned long wanted, const int gt);
-+#endif
-+
-+unsigned char *gr_system_salt;
-+unsigned char *gr_system_sum;
-+
-+static struct sprole_pw **acl_special_roles = NULL;
-+static __u16 num_sprole_pws = 0;
-+
-+static struct acl_role_label *kernel_role = NULL;
-+
-+static unsigned int gr_auth_attempts = 0;
-+static unsigned long gr_auth_expires = 0UL;
-+
-+extern struct vfsmount *sock_mnt;
-+extern struct vfsmount *pipe_mnt;
-+extern struct vfsmount *shm_mnt;
-+static struct acl_object_label *fakefs_obj;
-+
-+extern int gr_init_uidset(void);
-+extern void gr_free_uidset(void);
-+extern void gr_remove_uid(uid_t uid);
-+extern int gr_find_uid(uid_t uid);
-+
-+__inline__ int
-+gr_acl_is_enabled(void)
-+{
-+	return (gr_status & GR_READY);
-+}
-+
-+char gr_roletype_to_char(void)
-+{
-+	switch (current->role->roletype &
-+		(GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
-+		 GR_ROLE_SPECIAL)) {
-+	case GR_ROLE_DEFAULT:
-+		return 'D';
-+	case GR_ROLE_USER:
-+		return 'U';
-+	case GR_ROLE_GROUP:
-+		return 'G';
-+	case GR_ROLE_SPECIAL:
-+		return 'S';
-+	}
-+
-+	return 'X';
-+}
-+
-+__inline__ int
-+gr_acl_tpe_check(void)
-+{
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+	if (current->role->roletype & GR_ROLE_TPE)
-+		return 1;
-+	else
-+		return 0;
-+}
-+
-+int
-+gr_handle_rawio(const struct inode *inode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+	if (inode && S_ISBLK(inode->i_mode) &&
-+	    grsec_enable_chroot_caps && proc_is_chrooted(current) &&
-+	    !capable(CAP_SYS_RAWIO))
-+		return 1;
-+#endif
-+	return 0;
-+}
-+
-+static int
-+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
-+{
-+	int i;
-+	unsigned long *l1;
-+	unsigned long *l2;
-+	unsigned char *c1;
-+	unsigned char *c2;
-+	int num_longs;
-+
-+	if (likely(lena != lenb))
-+		return 0;
-+
-+	l1 = (unsigned long *)a;
-+	l2 = (unsigned long *)b;
-+
-+	num_longs = lena / sizeof(unsigned long);
-+
-+	for (i = num_longs; i--; l1++, l2++) {
-+		if (unlikely(*l1 != *l2))
-+			return 0;
-+	}
-+
-+	c1 = (unsigned char *) l1;
-+	c2 = (unsigned char *) l2;
-+
-+	i = lena - (num_longs * sizeof(unsigned long));	
-+
-+	for (; i--; c1++, c2++) {
-+		if (unlikely(*c1 != *c2))
-+			return 0;
-+	}
-+
-+	return 1;
-+}
-+
-+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
-+	                   struct dentry *root, struct vfsmount *rootmnt,
-+			   char *buffer, int buflen)
-+{
-+	char * end = buffer+buflen;
-+	char * retval;
-+	int namelen;
-+
-+	*--end = '\0';
-+	buflen--;
-+
-+	if (buflen < 1)
-+		goto Elong;
-+	/* Get '/' right */
-+	retval = end-1;
-+	*retval = '/';
-+
-+	for (;;) {
-+		struct dentry * parent;
-+
-+		if (dentry == root && vfsmnt == rootmnt)
-+			break;
-+		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
-+			/* Global root? */
-+			spin_lock(&vfsmount_lock);
-+			if (vfsmnt->mnt_parent == vfsmnt) {
-+				spin_unlock(&vfsmount_lock);
-+				goto global_root;
-+			}
-+			dentry = vfsmnt->mnt_mountpoint;
-+			vfsmnt = vfsmnt->mnt_parent;
-+			spin_unlock(&vfsmount_lock);
-+			continue;
-+		}
-+		parent = dentry->d_parent;
-+		prefetch(parent);
-+		namelen = dentry->d_name.len;
-+		buflen -= namelen + 1;
-+		if (buflen < 0)
-+			goto Elong;
-+		end -= namelen;
-+		memcpy(end, dentry->d_name.name, namelen);
-+		*--end = '/';
-+		retval = end;
-+		dentry = parent;
-+	}
-+
-+	return retval;
-+
-+global_root:
-+	namelen = dentry->d_name.len;
-+	buflen -= namelen;
-+	if (buflen < 0)
-+		goto Elong;
-+	retval -= namelen-1;	/* hit the slash */
-+	memcpy(retval, dentry->d_name.name, namelen);
-+	return retval;
-+Elong:
-+	return ERR_PTR(-ENAMETOOLONG);
-+}
-+
-+static char *
-+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
-+              struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
-+{
-+	char *retval;
-+
-+	retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
-+	if (unlikely(IS_ERR(retval)))
-+		retval = strcpy(buf, "<path too long>");
-+	else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
-+		retval[1] = '\0';
-+
-+	return retval;
-+}
-+
-+static char *
-+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
-+		char *buf, int buflen)
-+{
-+	char *res;
-+
-+	/* we can use real_root, real_root_mnt, because this is only called
-+	   by the RBAC system */
-+	res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
-+
-+	return res;
-+}
-+
-+static char *
-+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
-+	    char *buf, int buflen)
-+{
-+	char *res;
-+	struct dentry *root;
-+	struct vfsmount *rootmnt;
-+	struct task_struct *reaper = child_reaper(current);
-+
-+	/* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
-+	read_lock(&reaper->fs->lock);
-+	root = dget(reaper->fs->root);
-+	rootmnt = mntget(reaper->fs->rootmnt);
-+	read_unlock(&reaper->fs->lock);
-+
-+	spin_lock(&dcache_lock);
-+	res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
-+	spin_unlock(&dcache_lock);
-+
-+	dput(root);
-+	mntput(rootmnt);
-+	return res;
-+}
-+
-+static char *
-+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	char *ret;
-+	spin_lock(&dcache_lock);
-+	ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
-+			     PAGE_SIZE);
-+	spin_unlock(&dcache_lock);
-+	return ret;
-+}
-+
-+char *
-+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
-+			     PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
-+			   PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
-+			   PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
-+			   PAGE_SIZE);
-+}
-+
-+char *
-+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
-+			   PAGE_SIZE);
-+}
-+
-+__inline__ __u32
-+to_gr_audit(const __u32 reqmode)
-+{
-+	/* masks off auditable permission flags, then shifts them to create
-+	   auditing flags, and adds the special case of append auditing if
-+	   we're requesting write */
-+	return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
-+}
-+
-+struct acl_subject_label *
-+lookup_subject_map(const struct acl_subject_label *userp)
-+{
-+	unsigned int index = shash(userp, subj_map_set.s_size);
-+	struct subject_map *match;
-+
-+	match = subj_map_set.s_hash[index];
-+
-+	while (match && match->user != userp)
-+		match = match->next;
-+
-+	if (match != NULL)
-+		return match->kernel;
-+	else
-+		return NULL;
-+}
-+
-+static void
-+insert_subj_map_entry(struct subject_map *subjmap)
-+{
-+	unsigned int index = shash(subjmap->user, subj_map_set.s_size);
-+	struct subject_map **curr;
-+
-+	subjmap->prev = NULL;
-+
-+	curr = &subj_map_set.s_hash[index];
-+	if (*curr != NULL)
-+		(*curr)->prev = subjmap;
-+
-+	subjmap->next = *curr;
-+	*curr = subjmap;
-+
-+	return;
-+}
-+
-+static struct acl_role_label *
-+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
-+		      const gid_t gid)
-+{
-+	unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
-+	struct acl_role_label *match;
-+	struct role_allowed_ip *ipp;
-+	unsigned int x;
-+
-+	match = acl_role_set.r_hash[index];
-+
-+	while (match) {
-+		if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
-+			for (x = 0; x < match->domain_child_num; x++) {
-+				if (match->domain_children[x] == uid)
-+					goto found;
-+			}
-+		} else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
-+			break;
-+		match = match->next;
-+	}
-+found:
-+	if (match == NULL) {
-+	      try_group:
-+		index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
-+		match = acl_role_set.r_hash[index];
-+
-+		while (match) {
-+			if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
-+				for (x = 0; x < match->domain_child_num; x++) {
-+					if (match->domain_children[x] == gid)
-+						goto found2;
-+				}
-+			} else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
-+				break;
-+			match = match->next;
-+		}
-+found2:
-+		if (match == NULL)
-+			match = default_role;
-+		if (match->allowed_ips == NULL)
-+			return match;
-+		else {
-+			for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
-+				if (likely
-+				    ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
-+				     (ntohl(ipp->addr) & ipp->netmask)))
-+					return match;
-+			}
-+			match = default_role;
-+		}
-+	} else if (match->allowed_ips == NULL) {
-+		return match;
-+	} else {
-+		for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
-+			if (likely
-+			    ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
-+			     (ntohl(ipp->addr) & ipp->netmask)))
-+				return match;
-+		}
-+		goto try_group;
-+	}
-+
-+	return match;
-+}
-+
-+struct acl_subject_label *
-+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
-+		      const struct acl_role_label *role)
-+{
-+	unsigned int index = fhash(ino, dev, role->subj_hash_size);
-+	struct acl_subject_label *match;
-+
-+	match = role->subj_hash[index];
-+
-+	while (match && (match->inode != ino || match->device != dev ||
-+	       (match->mode & GR_DELETED))) {
-+		match = match->next;
-+	}
-+
-+	if (match && !(match->mode & GR_DELETED))
-+		return match;
-+	else
-+		return NULL;
-+}
-+
-+static struct acl_object_label *
-+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
-+		     const struct acl_subject_label *subj)
-+{
-+	unsigned int index = fhash(ino, dev, subj->obj_hash_size);
-+	struct acl_object_label *match;
-+
-+	match = subj->obj_hash[index];
-+
-+	while (match && (match->inode != ino || match->device != dev ||
-+	       (match->mode & GR_DELETED))) {
-+		match = match->next;
-+	}
-+
-+	if (match && !(match->mode & GR_DELETED))
-+		return match;
-+	else
-+		return NULL;
-+}
-+
-+static struct acl_object_label *
-+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
-+		     const struct acl_subject_label *subj)
-+{
-+	unsigned int index = fhash(ino, dev, subj->obj_hash_size);
-+	struct acl_object_label *match;
-+
-+	match = subj->obj_hash[index];
-+
-+	while (match && (match->inode != ino || match->device != dev ||
-+	       !(match->mode & GR_DELETED))) {
-+		match = match->next;
-+	}
-+
-+	if (match && (match->mode & GR_DELETED))
-+		return match;
-+
-+	match = subj->obj_hash[index];
-+
-+	while (match && (match->inode != ino || match->device != dev ||
-+	       (match->mode & GR_DELETED))) {
-+		match = match->next;
-+	}
-+
-+	if (match && !(match->mode & GR_DELETED))
-+		return match;
-+	else
-+		return NULL;
-+}
-+
-+static struct name_entry *
-+lookup_name_entry(const char *name)
-+{
-+	unsigned int len = strlen(name);
-+	unsigned int key = full_name_hash(name, len);
-+	unsigned int index = key % name_set.n_size;
-+	struct name_entry *match;
-+
-+	match = name_set.n_hash[index];
-+
-+	while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
-+		match = match->next;
-+
-+	return match;
-+}
-+
-+static struct name_entry *
-+lookup_name_entry_create(const char *name)
-+{
-+	unsigned int len = strlen(name);
-+	unsigned int key = full_name_hash(name, len);
-+	unsigned int index = key % name_set.n_size;
-+	struct name_entry *match;
-+
-+	match = name_set.n_hash[index];
-+
-+	while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
-+			 !match->deleted))
-+		match = match->next;
-+
-+	if (match && match->deleted)
-+		return match;
-+
-+	match = name_set.n_hash[index];
-+
-+	while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
-+			 match->deleted))
-+		match = match->next;
-+
-+	if (match && !match->deleted)
-+		return match;
-+	else
-+		return NULL;
-+}
-+
-+static struct inodev_entry *
-+lookup_inodev_entry(const ino_t ino, const dev_t dev)
-+{
-+	unsigned int index = fhash(ino, dev, inodev_set.i_size);
-+	struct inodev_entry *match;
-+
-+	match = inodev_set.i_hash[index];
-+
-+	while (match && (match->nentry->inode != ino || match->nentry->device != dev))
-+		match = match->next;
-+
-+	return match;
-+}
-+
-+static void
-+insert_inodev_entry(struct inodev_entry *entry)
-+{
-+	unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
-+				    inodev_set.i_size);
-+	struct inodev_entry **curr;
-+
-+	entry->prev = NULL;
-+
-+	curr = &inodev_set.i_hash[index];
-+	if (*curr != NULL)
-+		(*curr)->prev = entry;
-+	
-+	entry->next = *curr;
-+	*curr = entry;
-+
-+	return;
-+}
-+
-+static void
-+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
-+{
-+	unsigned int index =
-+	    rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
-+	struct acl_role_label **curr;
-+
-+	role->prev = NULL;
-+
-+	curr = &acl_role_set.r_hash[index];
-+	if (*curr != NULL)
-+		(*curr)->prev = role;
-+
-+	role->next = *curr;
-+	*curr = role;
-+
-+	return;
-+}
-+
-+static void
-+insert_acl_role_label(struct acl_role_label *role)
-+{
-+	int i;
-+
-+	if (role->roletype & GR_ROLE_DOMAIN) {
-+		for (i = 0; i < role->domain_child_num; i++)
-+			__insert_acl_role_label(role, role->domain_children[i]);
-+	} else
-+		__insert_acl_role_label(role, role->uidgid);
-+}
-+					
-+static int
-+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
-+{
-+	struct name_entry **curr, *nentry;
-+	struct inodev_entry *ientry;
-+	unsigned int len = strlen(name);
-+	unsigned int key = full_name_hash(name, len);
-+	unsigned int index = key % name_set.n_size;
-+
-+	curr = &name_set.n_hash[index];
-+
-+	while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
-+		curr = &((*curr)->next);
-+
-+	if (*curr != NULL)
-+		return 1;
-+
-+	nentry = acl_alloc(sizeof (struct name_entry));
-+	if (nentry == NULL)
-+		return 0;
-+	ientry = acl_alloc(sizeof (struct inodev_entry));
-+	if (ientry == NULL)
-+		return 0;
-+	ientry->nentry = nentry;
-+
-+	nentry->key = key;
-+	nentry->name = name;
-+	nentry->inode = inode;
-+	nentry->device = device;
-+	nentry->len = len;
-+	nentry->deleted = deleted;
-+
-+	nentry->prev = NULL;
-+	curr = &name_set.n_hash[index];
-+	if (*curr != NULL)
-+		(*curr)->prev = nentry;
-+	nentry->next = *curr;
-+	*curr = nentry;
-+
-+	/* insert us into the table searchable by inode/dev */
-+	insert_inodev_entry(ientry);
-+
-+	return 1;
-+}
-+
-+static void
-+insert_acl_obj_label(struct acl_object_label *obj,
-+		     struct acl_subject_label *subj)
-+{
-+	unsigned int index =
-+	    fhash(obj->inode, obj->device, subj->obj_hash_size);
-+	struct acl_object_label **curr;
-+
-+	
-+	obj->prev = NULL;
-+
-+	curr = &subj->obj_hash[index];
-+	if (*curr != NULL)
-+		(*curr)->prev = obj;
-+
-+	obj->next = *curr;
-+	*curr = obj;
-+
-+	return;
-+}
-+
-+static void
-+insert_acl_subj_label(struct acl_subject_label *obj,
-+		      struct acl_role_label *role)
-+{
-+	unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
-+	struct acl_subject_label **curr;
-+
-+	obj->prev = NULL;
-+
-+	curr = &role->subj_hash[index];
-+	if (*curr != NULL)
-+		(*curr)->prev = obj;
-+
-+	obj->next = *curr;
-+	*curr = obj;
-+
-+	return;
-+}
-+
-+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
-+
-+static void *
-+create_table(__u32 * len, int elementsize)
-+{
-+	unsigned int table_sizes[] = {
-+		7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
-+		32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
-+		4194301, 8388593, 16777213, 33554393, 67108859, 134217689,
-+		268435399, 536870909, 1073741789, 2147483647
-+	};
-+	void *newtable = NULL;
-+	unsigned int pwr = 0;
-+
-+	while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
-+	       table_sizes[pwr] <= *len)
-+		pwr++;
-+
-+	if (table_sizes[pwr] <= *len)
-+		return newtable;
-+
-+	if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
-+		newtable =
-+		    kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
-+	else
-+		newtable = vmalloc(table_sizes[pwr] * elementsize);
-+
-+	*len = table_sizes[pwr];
-+
-+	return newtable;
-+}
-+
-+static int
-+init_variables(const struct gr_arg *arg)
-+{
-+	struct task_struct *reaper = child_reaper(current);
-+	unsigned int stacksize;
-+
-+	subj_map_set.s_size = arg->role_db.num_subjects;
-+	acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
-+	name_set.n_size = arg->role_db.num_objects;
-+	inodev_set.i_size = arg->role_db.num_objects;
-+
-+	if (!subj_map_set.s_size || !acl_role_set.r_size ||
-+	    !name_set.n_size || !inodev_set.i_size)
-+		return 1;
-+
-+	if (!gr_init_uidset())
-+		return 1;
-+
-+	/* set up the stack that holds allocation info */
-+
-+	stacksize = arg->role_db.num_pointers + 5;
-+
-+	if (!acl_alloc_stack_init(stacksize))
-+		return 1;
-+
-+	/* grab reference for the real root dentry and vfsmount */
-+	read_lock(&reaper->fs->lock);
-+	real_root_mnt = mntget(reaper->fs->rootmnt);
-+	real_root = dget(reaper->fs->root);
-+	read_unlock(&reaper->fs->lock);
-+	
-+	fakefs_obj = acl_alloc(sizeof(struct acl_object_label));
-+	if (fakefs_obj == NULL)
-+		return 1;
-+	fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
-+
-+	subj_map_set.s_hash =
-+	    (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
-+	acl_role_set.r_hash =
-+	    (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
-+	name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
-+	inodev_set.i_hash =
-+	    (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
-+
-+	if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
-+	    !name_set.n_hash || !inodev_set.i_hash)
-+		return 1;
-+
-+	memset(subj_map_set.s_hash, 0,
-+	       sizeof(struct subject_map *) * subj_map_set.s_size);
-+	memset(acl_role_set.r_hash, 0,
-+	       sizeof (struct acl_role_label *) * acl_role_set.r_size);
-+	memset(name_set.n_hash, 0,
-+	       sizeof (struct name_entry *) * name_set.n_size);
-+	memset(inodev_set.i_hash, 0,
-+	       sizeof (struct inodev_entry *) * inodev_set.i_size);
-+
-+	return 0;
-+}
-+
-+/* free information not needed after startup
-+   currently contains user->kernel pointer mappings for subjects
-+*/
-+
-+static void
-+free_init_variables(void)
-+{
-+	__u32 i;
-+
-+	if (subj_map_set.s_hash) {
-+		for (i = 0; i < subj_map_set.s_size; i++) {
-+			if (subj_map_set.s_hash[i]) {
-+				kfree(subj_map_set.s_hash[i]);
-+				subj_map_set.s_hash[i] = NULL;
-+			}
-+		}
-+
-+		if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
-+		    PAGE_SIZE)
-+			kfree(subj_map_set.s_hash);
-+		else
-+			vfree(subj_map_set.s_hash);
-+	}
-+
-+	return;
-+}
-+
-+static void
-+free_variables(void)
-+{
-+	struct acl_subject_label *s;
-+	struct acl_role_label *r;
-+	struct task_struct *task, *task2;
-+	unsigned int i, x;
-+
-+	gr_clear_learn_entries();
-+
-+	read_lock(&tasklist_lock);
-+	do_each_thread(task2, task) {
-+		task->acl_sp_role = 0;
-+		task->acl_role_id = 0;
-+		task->acl = NULL;
-+		task->role = NULL;
-+	} while_each_thread(task2, task);
-+	read_unlock(&tasklist_lock);
-+
-+	/* release the reference to the real root dentry and vfsmount */
-+	if (real_root)
-+		dput(real_root);
-+	real_root = NULL;
-+	if (real_root_mnt)
-+		mntput(real_root_mnt);
-+	real_root_mnt = NULL;
-+
-+	/* free all object hash tables */
-+
-+	FOR_EACH_ROLE_START(r, i)
-+		if (r->subj_hash == NULL)
-+			break;
-+		FOR_EACH_SUBJECT_START(r, s, x)
-+			if (s->obj_hash == NULL)
-+				break;
-+			if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
-+				kfree(s->obj_hash);
-+			else
-+				vfree(s->obj_hash);
-+		FOR_EACH_SUBJECT_END(s, x)
-+		FOR_EACH_NESTED_SUBJECT_START(r, s)
-+			if (s->obj_hash == NULL)
-+				break;
-+			if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
-+				kfree(s->obj_hash);
-+			else
-+				vfree(s->obj_hash);
-+		FOR_EACH_NESTED_SUBJECT_END(s)
-+		if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
-+			kfree(r->subj_hash);
-+		else
-+			vfree(r->subj_hash);
-+		r->subj_hash = NULL;
-+	FOR_EACH_ROLE_END(r,i)
-+
-+	acl_free_all();
-+
-+	if (acl_role_set.r_hash) {
-+		if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
-+		    PAGE_SIZE)
-+			kfree(acl_role_set.r_hash);
-+		else
-+			vfree(acl_role_set.r_hash);
-+	}
-+	if (name_set.n_hash) {
-+		if ((name_set.n_size * sizeof (struct name_entry *)) <=
-+		    PAGE_SIZE)
-+			kfree(name_set.n_hash);
-+		else
-+			vfree(name_set.n_hash);
-+	}
-+
-+	if (inodev_set.i_hash) {
-+		if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
-+		    PAGE_SIZE)
-+			kfree(inodev_set.i_hash);
-+		else
-+			vfree(inodev_set.i_hash);
-+	}
-+
-+	gr_free_uidset();
-+
-+	memset(&name_set, 0, sizeof (struct name_db));
-+	memset(&inodev_set, 0, sizeof (struct inodev_db));
-+	memset(&acl_role_set, 0, sizeof (struct acl_role_db));
-+	memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
-+
-+	default_role = NULL;
-+
-+	return;
-+}
-+
-+static __u32
-+count_user_objs(struct acl_object_label *userp)
-+{
-+	struct acl_object_label o_tmp;
-+	__u32 num = 0;
-+
-+	while (userp) {
-+		if (copy_from_user(&o_tmp, userp,
-+				   sizeof (struct acl_object_label)))
-+			break;
-+
-+		userp = o_tmp.prev;
-+		num++;
-+	}
-+
-+	return num;
-+}
-+
-+static struct acl_subject_label *
-+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
-+
-+static int
-+copy_user_glob(struct acl_object_label *obj)
-+{
-+	struct acl_object_label *g_tmp, **guser;
-+	unsigned int len;
-+	char *tmp;
-+
-+	if (obj->globbed == NULL)
-+		return 0;
-+
-+	guser = &obj->globbed;
-+	while (*guser) {
-+		g_tmp = (struct acl_object_label *)
-+			acl_alloc(sizeof (struct acl_object_label));
-+		if (g_tmp == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(g_tmp, *guser,
-+				   sizeof (struct acl_object_label)))
-+			return -EFAULT;
-+
-+		len = strnlen_user(g_tmp->filename, PATH_MAX);
-+
-+		if (!len || len >= PATH_MAX)
-+			return -EINVAL;
-+
-+		if ((tmp = (char *) acl_alloc(len)) == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(tmp, g_tmp->filename, len))
-+			return -EFAULT;
-+
-+		g_tmp->filename = tmp;
-+
-+		*guser = g_tmp;
-+		guser = &(g_tmp->next);
-+	}
-+
-+	return 0;
-+}
-+
-+static int
-+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
-+	       struct acl_role_label *role)
-+{
-+	struct acl_object_label *o_tmp;
-+	unsigned int len;
-+	int ret;
-+	char *tmp;
-+
-+	while (userp) {
-+		if ((o_tmp = (struct acl_object_label *)
-+		     acl_alloc(sizeof (struct acl_object_label))) == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(o_tmp, userp,
-+				   sizeof (struct acl_object_label)))
-+			return -EFAULT;
-+
-+		userp = o_tmp->prev;
-+
-+		len = strnlen_user(o_tmp->filename, PATH_MAX);
-+
-+		if (!len || len >= PATH_MAX)
-+			return -EINVAL;
-+
-+		if ((tmp = (char *) acl_alloc(len)) == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(tmp, o_tmp->filename, len))
-+			return -EFAULT;
-+
-+		o_tmp->filename = tmp;
-+
-+		insert_acl_obj_label(o_tmp, subj);
-+		if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
-+				       o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
-+			return -ENOMEM;
-+
-+		ret = copy_user_glob(o_tmp);
-+		if (ret)
-+			return ret;
-+
-+		if (o_tmp->nested) {
-+			o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
-+			if (IS_ERR(o_tmp->nested))
-+				return PTR_ERR(o_tmp->nested);
-+
-+			/* insert into nested subject list */
-+			o_tmp->nested->next = role->hash->first;
-+			role->hash->first = o_tmp->nested;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+static __u32
-+count_user_subjs(struct acl_subject_label *userp)
-+{
-+	struct acl_subject_label s_tmp;
-+	__u32 num = 0;
-+
-+	while (userp) {
-+		if (copy_from_user(&s_tmp, userp,
-+				   sizeof (struct acl_subject_label)))
-+			break;
-+
-+		userp = s_tmp.prev;
-+		/* do not count nested subjects against this count, since
-+		   they are not included in the hash table, but are
-+		   attached to objects.  We have already counted
-+		   the subjects in userspace for the allocation 
-+		   stack
-+		*/
-+		if (!(s_tmp.mode & GR_NESTED))
-+			num++;
-+	}
-+
-+	return num;
-+}
-+
-+static int
-+copy_user_allowedips(struct acl_role_label *rolep)
-+{
-+	struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
-+
-+	ruserip = rolep->allowed_ips;
-+
-+	while (ruserip) {
-+		rlast = rtmp;
-+
-+		if ((rtmp = (struct role_allowed_ip *)
-+		     acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(rtmp, ruserip,
-+				   sizeof (struct role_allowed_ip)))
-+			return -EFAULT;
-+
-+		ruserip = rtmp->prev;
-+
-+		if (!rlast) {
-+			rtmp->prev = NULL;
-+			rolep->allowed_ips = rtmp;
-+		} else {
-+			rlast->next = rtmp;
-+			rtmp->prev = rlast;
-+		}
-+
-+		if (!ruserip)
-+			rtmp->next = NULL;
-+	}
-+
-+	return 0;
-+}
-+
-+static int
-+copy_user_transitions(struct acl_role_label *rolep)
-+{
-+	struct role_transition *rusertp, *rtmp = NULL, *rlast;
-+	
-+	unsigned int len;
-+	char *tmp;
-+
-+	rusertp = rolep->transitions;
-+
-+	while (rusertp) {
-+		rlast = rtmp;
-+
-+		if ((rtmp = (struct role_transition *)
-+		     acl_alloc(sizeof (struct role_transition))) == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(rtmp, rusertp,
-+				   sizeof (struct role_transition)))
-+			return -EFAULT;
-+
-+		rusertp = rtmp->prev;
-+
-+		len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
-+
-+		if (!len || len >= GR_SPROLE_LEN)
-+			return -EINVAL;
-+
-+		if ((tmp = (char *) acl_alloc(len)) == NULL)
-+			return -ENOMEM;
-+
-+		if (copy_from_user(tmp, rtmp->rolename, len))
-+			return -EFAULT;
-+
-+		rtmp->rolename = tmp;
-+
-+		if (!rlast) {
-+			rtmp->prev = NULL;
-+			rolep->transitions = rtmp;
-+		} else {
-+			rlast->next = rtmp;
-+			rtmp->prev = rlast;
-+		}
-+
-+		if (!rusertp)
-+			rtmp->next = NULL;
-+	}
-+
-+	return 0;
-+}
-+
-+static struct acl_subject_label *
-+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
-+{
-+	struct acl_subject_label *s_tmp = NULL, *s_tmp2;
-+	unsigned int len;
-+	char *tmp;
-+	__u32 num_objs;
-+	struct acl_ip_label **i_tmp, *i_utmp2;
-+	struct gr_hash_struct ghash;
-+	struct subject_map *subjmap;
-+	unsigned int i_num;
-+	int err;
-+
-+	s_tmp = lookup_subject_map(userp);
-+
-+	/* we've already copied this subject into the kernel, just return
-+	   the reference to it, and don't copy it over again
-+	*/
-+	if (s_tmp)
-+		return(s_tmp);
-+
-+	if ((s_tmp = (struct acl_subject_label *)
-+	    acl_alloc(sizeof (struct acl_subject_label))) == NULL)
-+		return ERR_PTR(-ENOMEM);
-+
-+	subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
-+	if (subjmap == NULL)
-+		return ERR_PTR(-ENOMEM);
-+
-+	subjmap->user = userp;
-+	subjmap->kernel = s_tmp;
-+	insert_subj_map_entry(subjmap);
-+
-+	if (copy_from_user(s_tmp, userp,
-+			   sizeof (struct acl_subject_label)))
-+		return ERR_PTR(-EFAULT);
-+
-+	len = strnlen_user(s_tmp->filename, PATH_MAX);
-+
-+	if (!len || len >= PATH_MAX)
-+		return ERR_PTR(-EINVAL);
-+
-+	if ((tmp = (char *) acl_alloc(len)) == NULL)
-+		return ERR_PTR(-ENOMEM);
-+
-+	if (copy_from_user(tmp, s_tmp->filename, len))
-+		return ERR_PTR(-EFAULT);
-+
-+	s_tmp->filename = tmp;
-+
-+	if (!strcmp(s_tmp->filename, "/"))
-+		role->root_label = s_tmp;
-+
-+	if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
-+		return ERR_PTR(-EFAULT);
-+
-+	/* copy user and group transition tables */
-+
-+	if (s_tmp->user_trans_num) {
-+		uid_t *uidlist;
-+
-+		uidlist = (uid_t *)acl_alloc(s_tmp->user_trans_num * sizeof(uid_t));
-+		if (uidlist == NULL)
-+			return ERR_PTR(-ENOMEM);
-+		if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
-+			return ERR_PTR(-EFAULT);
-+
-+		s_tmp->user_transitions = uidlist;
-+	}
-+
-+	if (s_tmp->group_trans_num) {
-+		gid_t *gidlist;
-+
-+		gidlist = (gid_t *)acl_alloc(s_tmp->group_trans_num * sizeof(gid_t));
-+		if (gidlist == NULL)
-+			return ERR_PTR(-ENOMEM);
-+		if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
-+			return ERR_PTR(-EFAULT);
-+
-+		s_tmp->group_transitions = gidlist;
-+	}
-+
-+	/* set up object hash table */
-+	num_objs = count_user_objs(ghash.first);
-+
-+	s_tmp->obj_hash_size = num_objs;
-+	s_tmp->obj_hash =
-+	    (struct acl_object_label **)
-+	    create_table(&(s_tmp->obj_hash_size), sizeof(void *));
-+
-+	if (!s_tmp->obj_hash)
-+		return ERR_PTR(-ENOMEM);
-+
-+	memset(s_tmp->obj_hash, 0,
-+	       s_tmp->obj_hash_size *
-+	       sizeof (struct acl_object_label *));
-+
-+	/* add in objects */
-+	err = copy_user_objs(ghash.first, s_tmp, role);
-+
-+	if (err)
-+		return ERR_PTR(err);
-+
-+	/* set pointer for parent subject */
-+	if (s_tmp->parent_subject) {
-+		s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
-+
-+		if (IS_ERR(s_tmp2))
-+			return s_tmp2;
-+
-+		s_tmp->parent_subject = s_tmp2;
-+	}
-+
-+	/* add in ip acls */
-+
-+	if (!s_tmp->ip_num) {
-+		s_tmp->ips = NULL;
-+		goto insert;
-+	}
-+
-+	i_tmp =
-+	    (struct acl_ip_label **) acl_alloc(s_tmp->ip_num *
-+					       sizeof (struct
-+						       acl_ip_label *));
-+
-+	if (!i_tmp)
-+		return ERR_PTR(-ENOMEM);
-+
-+	for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
-+		*(i_tmp + i_num) =
-+		    (struct acl_ip_label *)
-+		    acl_alloc(sizeof (struct acl_ip_label));
-+		if (!*(i_tmp + i_num))
-+			return ERR_PTR(-ENOMEM);
-+
-+		if (copy_from_user
-+		    (&i_utmp2, s_tmp->ips + i_num,
-+		     sizeof (struct acl_ip_label *)))
-+			return ERR_PTR(-EFAULT);
-+
-+		if (copy_from_user
-+		    (*(i_tmp + i_num), i_utmp2,
-+		     sizeof (struct acl_ip_label)))
-+			return ERR_PTR(-EFAULT);
-+		
-+		if ((*(i_tmp + i_num))->iface == NULL)
-+			continue;
-+
-+		len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
-+		if (!len || len >= IFNAMSIZ)
-+			return ERR_PTR(-EINVAL);
-+		tmp = acl_alloc(len);
-+		if (tmp == NULL)
-+			return ERR_PTR(-ENOMEM);
-+		if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
-+			return ERR_PTR(-EFAULT);
-+		(*(i_tmp + i_num))->iface = tmp;
-+	}
-+
-+	s_tmp->ips = i_tmp;
-+
-+insert:
-+	if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
-+			       s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
-+		return ERR_PTR(-ENOMEM);
-+
-+	return s_tmp;
-+}
-+
-+static int
-+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
-+{
-+	struct acl_subject_label s_pre;
-+	struct acl_subject_label * ret;
-+	int err;
-+
-+	while (userp) {
-+		if (copy_from_user(&s_pre, userp,
-+				   sizeof (struct acl_subject_label)))
-+			return -EFAULT;
-+		
-+		/* do not add nested subjects here, add
-+		   while parsing objects
-+		*/
-+
-+		if (s_pre.mode & GR_NESTED) {
-+			userp = s_pre.prev;
-+			continue;
-+		}
-+
-+		ret = do_copy_user_subj(userp, role);
-+
-+		err = PTR_ERR(ret);
-+		if (IS_ERR(ret))
-+			return err;
-+
-+		insert_acl_subj_label(ret, role);
-+
-+		userp = s_pre.prev;
-+	}
-+
-+	return 0;
-+}
-+
-+static int
-+copy_user_acl(struct gr_arg *arg)
-+{
-+	struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
-+	struct sprole_pw *sptmp;
-+	struct gr_hash_struct *ghash;
-+	uid_t *domainlist;
-+	unsigned int r_num;
-+	unsigned int len;
-+	char *tmp;
-+	int err = 0;
-+	__u16 i;
-+	__u32 num_subjs;
-+
-+	/* we need a default and kernel role */
-+	if (arg->role_db.num_roles < 2)
-+		return -EINVAL;
-+
-+	/* copy special role authentication info from userspace */
-+
-+	num_sprole_pws = arg->num_sprole_pws;
-+	acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *));
-+
-+	if (!acl_special_roles) {
-+		err = -ENOMEM;
-+		goto cleanup;
-+	}
-+
-+	for (i = 0; i < num_sprole_pws; i++) {
-+		sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
-+		if (!sptmp) {
-+			err = -ENOMEM;
-+			goto cleanup;
-+		}
-+		if (copy_from_user(sptmp, arg->sprole_pws + i,
-+				   sizeof (struct sprole_pw))) {
-+			err = -EFAULT;
-+			goto cleanup;
-+		}
-+
-+		len =
-+		    strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
-+
-+		if (!len || len >= GR_SPROLE_LEN) {
-+			err = -EINVAL;
-+			goto cleanup;
-+		}
-+
-+		if ((tmp = (char *) acl_alloc(len)) == NULL) {
-+			err = -ENOMEM;
-+			goto cleanup;
-+		}
-+
-+		if (copy_from_user(tmp, sptmp->rolename, len)) {
-+			err = -EFAULT;
-+			goto cleanup;
-+		}
-+
-+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
-+		printk(KERN_ALERT "Copying special role %s\n", tmp);
-+#endif
-+		sptmp->rolename = tmp;
-+		acl_special_roles[i] = sptmp;
-+	}
-+
-+	r_utmp = (struct acl_role_label **) arg->role_db.r_table;
-+
-+	for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
-+		r_tmp = acl_alloc(sizeof (struct acl_role_label));
-+
-+		if (!r_tmp) {
-+			err = -ENOMEM;
-+			goto cleanup;
-+		}
-+
-+		if (copy_from_user(&r_utmp2, r_utmp + r_num,
-+				   sizeof (struct acl_role_label *))) {
-+			err = -EFAULT;
-+			goto cleanup;
-+		}
-+
-+		if (copy_from_user(r_tmp, r_utmp2,
-+				   sizeof (struct acl_role_label))) {
-+			err = -EFAULT;
-+			goto cleanup;
-+		}
-+
-+		len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
-+
-+		if (!len || len >= PATH_MAX) {
-+			err = -EINVAL;
-+			goto cleanup;
-+		}
-+
-+		if ((tmp = (char *) acl_alloc(len)) == NULL) {
-+			err = -ENOMEM;
-+			goto cleanup;
-+		}
-+		if (copy_from_user(tmp, r_tmp->rolename, len)) {
-+			err = -EFAULT;
-+			goto cleanup;
-+		}
-+		r_tmp->rolename = tmp;
-+
-+		if (!strcmp(r_tmp->rolename, "default")
-+		    && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
-+			default_role = r_tmp;
-+		} else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
-+			kernel_role = r_tmp;
-+		}
-+
-+		if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
-+			err = -ENOMEM;
-+			goto cleanup;
-+		}
-+		if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
-+			err = -EFAULT;
-+			goto cleanup;
-+		}
-+
-+		r_tmp->hash = ghash;
-+
-+		num_subjs = count_user_subjs(r_tmp->hash->first);
-+
-+		r_tmp->subj_hash_size = num_subjs;
-+		r_tmp->subj_hash =
-+		    (struct acl_subject_label **)
-+		    create_table(&(r_tmp->subj_hash_size), sizeof(void *));
-+
-+		if (!r_tmp->subj_hash) {
-+			err = -ENOMEM;
-+			goto cleanup;
-+		}
-+
-+		err = copy_user_allowedips(r_tmp);
-+		if (err)
-+			goto cleanup;
-+
-+		/* copy domain info */
-+		if (r_tmp->domain_children != NULL) {
-+			domainlist = acl_alloc(r_tmp->domain_child_num * sizeof(uid_t));
-+			if (domainlist == NULL) {
-+				err = -ENOMEM;
-+				goto cleanup;
-+			}
-+			if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
-+				err = -EFAULT;
-+				goto cleanup;
-+			}
-+			r_tmp->domain_children = domainlist;
-+		}
-+
-+		err = copy_user_transitions(r_tmp);
-+		if (err)
-+			goto cleanup;
-+
-+		memset(r_tmp->subj_hash, 0,
-+		       r_tmp->subj_hash_size *
-+		       sizeof (struct acl_subject_label *));
-+
-+		err = copy_user_subjs(r_tmp->hash->first, r_tmp);
-+
-+		if (err)
-+			goto cleanup;
-+
-+		/* set nested subject list to null */
-+		r_tmp->hash->first = NULL;
-+
-+		insert_acl_role_label(r_tmp);
-+	}
-+
-+	goto return_err;
-+      cleanup:
-+	free_variables();
-+      return_err:
-+	return err;
-+
-+}
-+
-+static int
-+gracl_init(struct gr_arg *args)
-+{
-+	int error = 0;
-+
-+	memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
-+	memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
-+
-+	if (init_variables(args)) {
-+		gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
-+		error = -ENOMEM;
-+		free_variables();
-+		goto out;
-+	}
-+
-+	error = copy_user_acl(args);
-+	free_init_variables();
-+	if (error) {
-+		free_variables();
-+		goto out;
-+	}
-+
-+	if ((error = gr_set_acls(0))) {
-+		free_variables();
-+		goto out;
-+	}
-+
-+	gr_status |= GR_READY;
-+      out:
-+	return error;
-+}
-+
-+/* derived from glibc fnmatch() 0: match, 1: no match*/
-+
-+static int
-+glob_match(const char *p, const char *n)
-+{
-+	char c;
-+
-+	while ((c = *p++) != '\0') {
-+	switch (c) {
-+		case '?':
-+			if (*n == '\0')
-+				return 1;
-+			else if (*n == '/')
-+				return 1;
-+			break;
-+		case '\\':
-+			if (*n != c)
-+				return 1;
-+			break;
-+		case '*':
-+			for (c = *p++; c == '?' || c == '*'; c = *p++) {
-+				if (*n == '/')
-+					return 1;
-+				else if (c == '?') {
-+					if (*n == '\0')
-+						return 1;
-+					else
-+						++n;
-+				}
-+			}
-+			if (c == '\0') {
-+				return 0;
-+			} else {
-+				const char *endp;
-+
-+				if ((endp = strchr(n, '/')) == NULL)
-+					endp = n + strlen(n);
-+
-+				if (c == '[') {
-+					for (--p; n < endp; ++n)
-+						if (!glob_match(p, n))
-+							return 0;
-+				} else if (c == '/') {
-+					while (*n != '\0' && *n != '/')
-+						++n;
-+					if (*n == '/' && !glob_match(p, n + 1))
-+						return 0;
-+				} else {
-+					for (--p; n < endp; ++n)
-+						if (*n == c && !glob_match(p, n))
-+							return 0;
-+				}
-+
-+				return 1;
-+			}
-+		case '[':
-+			{
-+			int not;
-+			char cold;
-+
-+			if (*n == '\0' || *n == '/')
-+				return 1;
-+
-+			not = (*p == '!' || *p == '^');
-+			if (not)
-+				++p;
-+
-+			c = *p++;
-+			for (;;) {
-+				unsigned char fn = (unsigned char)*n;
-+
-+				if (c == '\0')
-+					return 1;
-+				else {
-+					if (c == fn)
-+						goto matched;
-+					cold = c;
-+					c = *p++;
-+
-+					if (c == '-' && *p != ']') {
-+						unsigned char cend = *p++;
-+
-+						if (cend == '\0')
-+							return 1;
-+
-+						if (cold <= fn && fn <= cend)
-+							goto matched;
-+
-+						c = *p++;
-+					}
-+				}
-+
-+				if (c == ']')
-+					break;
-+			}
-+			if (!not)
-+				return 1;
-+			break;
-+		matched:
-+			while (c != ']') {
-+				if (c == '\0')
-+					return 1;
-+
-+				c = *p++;
-+			}
-+			if (not)
-+				return 1;
-+		}
-+		break;
-+	default:
-+		if (c != *n)
-+			return 1;
-+	}
-+
-+	++n;
-+	}
-+
-+	if (*n == '\0')
-+		return 0;
-+
-+	if (*n == '/')
-+		return 0;
-+
-+	return 1;
-+}
-+
-+static struct acl_object_label *
-+chk_glob_label(struct acl_object_label *globbed,
-+	struct dentry *dentry, struct vfsmount *mnt, char **path)
-+{
-+	struct acl_object_label *tmp;
-+
-+	if (*path == NULL)
-+		*path = gr_to_filename_nolock(dentry, mnt);
-+
-+	tmp = globbed;
-+
-+	while (tmp) {
-+		if (!glob_match(tmp->filename, *path))
-+			return tmp;
-+		tmp = tmp->next;
-+	}
-+
-+	return NULL;
-+}
-+
-+static struct acl_object_label *
-+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
-+	    const ino_t curr_ino, const dev_t curr_dev,
-+	    const struct acl_subject_label *subj, char **path)
-+{
-+	struct acl_subject_label *tmpsubj;
-+	struct acl_object_label *retval;
-+	struct acl_object_label *retval2;
-+
-+	tmpsubj = (struct acl_subject_label *) subj;
-+	read_lock(&gr_inode_lock);
-+	do {
-+		retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
-+		if (retval) {
-+			if (retval->globbed) {
-+				retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
-+						(struct vfsmount *)orig_mnt, path);
-+				if (retval2)
-+					retval = retval2;
-+			}
-+			break;
-+		}
-+	} while ((tmpsubj = tmpsubj->parent_subject));
-+	read_unlock(&gr_inode_lock);
-+
-+	return retval;
-+}
-+
-+static __inline__ struct acl_object_label *
-+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
-+	    const struct dentry *curr_dentry,
-+	    const struct acl_subject_label *subj, char **path)
-+{
-+	return __full_lookup(orig_dentry, orig_mnt,
-+			     curr_dentry->d_inode->i_ino, 
-+			     curr_dentry->d_inode->i_sb->s_dev, subj, path);
-+}
-+
-+static struct acl_object_label *
-+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+	      const struct acl_subject_label *subj, char *path)
-+{
-+	struct dentry *dentry = (struct dentry *) l_dentry;
-+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
-+	struct acl_object_label *retval;
-+
-+	spin_lock(&dcache_lock);
-+
-+	if (unlikely(mnt == shm_mnt || mnt == pipe_mnt || mnt == sock_mnt ||
-+		/* ignore Eric Biederman */
-+	    IS_PRIVATE(l_dentry->d_inode))) {
-+		retval = fakefs_obj;
-+		goto out;
-+	}
-+
-+	for (;;) {
-+		if (dentry == real_root && mnt == real_root_mnt)
-+			break;
-+
-+		if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
-+			if (mnt->mnt_parent == mnt)
-+				break;
-+
-+			retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
-+			if (retval != NULL)
-+				goto out;
-+
-+			dentry = mnt->mnt_mountpoint;
-+			mnt = mnt->mnt_parent;
-+			continue;
-+		}
-+
-+		retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
-+		if (retval != NULL)
-+			goto out;
-+
-+		dentry = dentry->d_parent;
-+	}
-+
-+	retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
-+
-+	if (retval == NULL)
-+		retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path);
-+out:
-+	spin_unlock(&dcache_lock);
-+	return retval;
-+}
-+
-+static __inline__ struct acl_object_label *
-+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+	      const struct acl_subject_label *subj)
-+{
-+	char *path = NULL;
-+	return __chk_obj_label(l_dentry, l_mnt, subj, path);
-+}
-+
-+static __inline__ struct acl_object_label *
-+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+		     const struct acl_subject_label *subj, char *path)
-+{
-+	return __chk_obj_label(l_dentry, l_mnt, subj, path);
-+}
-+
-+static struct acl_subject_label *
-+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
-+	       const struct acl_role_label *role)
-+{
-+	struct dentry *dentry = (struct dentry *) l_dentry;
-+	struct vfsmount *mnt = (struct vfsmount *) l_mnt;
-+	struct acl_subject_label *retval;
-+
-+	spin_lock(&dcache_lock);
-+
-+	for (;;) {
-+		if (dentry == real_root && mnt == real_root_mnt)
-+			break;
-+		if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
-+			if (mnt->mnt_parent == mnt)
-+				break;
-+
-+			read_lock(&gr_inode_lock);
-+			retval =
-+				lookup_acl_subj_label(dentry->d_inode->i_ino,
-+						dentry->d_inode->i_sb->s_dev, role);
-+			read_unlock(&gr_inode_lock);
-+			if (retval != NULL)
-+				goto out;
-+
-+			dentry = mnt->mnt_mountpoint;
-+			mnt = mnt->mnt_parent;
-+			continue;
-+		}
-+
-+		read_lock(&gr_inode_lock);
-+		retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
-+					  dentry->d_inode->i_sb->s_dev, role);
-+		read_unlock(&gr_inode_lock);
-+		if (retval != NULL)
-+			goto out;
-+
-+		dentry = dentry->d_parent;
-+	}
-+
-+	read_lock(&gr_inode_lock);
-+	retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
-+				  dentry->d_inode->i_sb->s_dev, role);
-+	read_unlock(&gr_inode_lock);
-+
-+	if (unlikely(retval == NULL)) {
-+		read_lock(&gr_inode_lock);
-+		retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
-+					  real_root->d_inode->i_sb->s_dev, role);
-+		read_unlock(&gr_inode_lock);
-+	}
-+out:
-+	spin_unlock(&dcache_lock);
-+
-+	return retval;
-+}
-+
-+static void
-+gr_log_learn(const struct task_struct *task, const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
-+{
-+	security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
-+		       task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry,
-+		       task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename,
-+		       1, 1, gr_to_filename(dentry, mnt), (unsigned long) mode, NIPQUAD(task->signal->curr_ip));
-+
-+	return;
-+}
-+
-+static void
-+gr_log_learn_sysctl(const struct task_struct *task, const char *path, const __u32 mode)
-+{
-+	security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
-+		       task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry,
-+		       task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename,
-+		       1, 1, path, (unsigned long) mode, NIPQUAD(task->signal->curr_ip));
-+
-+	return;
-+}
-+
-+static void
-+gr_log_learn_id_change(const struct task_struct *task, const char type, const unsigned int real, 
-+		       const unsigned int effective, const unsigned int fs)
-+{
-+	security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
-+		       task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry,
-+		       task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename,
-+		       type, real, effective, fs, NIPQUAD(task->signal->curr_ip));
-+
-+	return;
-+}
-+
-+__u32
-+gr_check_link(const struct dentry * new_dentry,
-+	      const struct dentry * parent_dentry,
-+	      const struct vfsmount * parent_mnt,
-+	      const struct dentry * old_dentry, const struct vfsmount * old_mnt)
-+{
-+	struct acl_object_label *obj;
-+	__u32 oldmode, newmode;
-+	__u32 needmode;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return (GR_CREATE | GR_LINK);
-+
-+	obj = chk_obj_label(old_dentry, old_mnt, current->acl);
-+	oldmode = obj->mode;
-+
-+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+		oldmode |= (GR_CREATE | GR_LINK);
-+
-+	needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
-+	if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
-+		needmode |= GR_SETID | GR_AUDIT_SETID;
-+
-+	newmode =
-+	    gr_check_create(new_dentry, parent_dentry, parent_mnt,
-+			    oldmode | needmode);
-+
-+	needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
-+			      GR_SETID | GR_READ | GR_FIND | GR_DELETE |
-+			      GR_INHERIT | GR_AUDIT_INHERIT);
-+
-+	if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
-+		goto bad;
-+
-+	if ((oldmode & needmode) != needmode)
-+		goto bad;
-+
-+	needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
-+	if ((newmode & needmode) != needmode)
-+		goto bad;
-+
-+	if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
-+		return newmode;
-+bad:
-+	needmode = oldmode;
-+	if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
-+		needmode |= GR_SETID;
-+	
-+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+		gr_log_learn(current, old_dentry, old_mnt, needmode);
-+		return (GR_CREATE | GR_LINK);
-+	} else if (newmode & GR_SUPPRESS)
-+		return GR_SUPPRESS;
-+	else
-+		return 0;
-+}
-+
-+__u32
-+gr_search_file(const struct dentry * dentry, const __u32 mode,
-+	       const struct vfsmount * mnt)
-+{
-+	__u32 retval = mode;
-+	struct acl_subject_label *curracl;
-+	struct acl_object_label *currobj;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return (mode & ~GR_AUDITS);
-+
-+	curracl = current->acl;
-+
-+	currobj = chk_obj_label(dentry, mnt, curracl);
-+	retval = currobj->mode & mode;
-+
-+	if (unlikely
-+	    ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
-+	     && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
-+		__u32 new_mode = mode;
-+
-+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+		retval = new_mode;
-+
-+		if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
-+			new_mode |= GR_INHERIT;
-+
-+		if (!(mode & GR_NOLEARN))
-+			gr_log_learn(current, dentry, mnt, new_mode);
-+	}
-+
-+	return retval;
-+}
-+
-+__u32
-+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
-+		const struct vfsmount * mnt, const __u32 mode)
-+{
-+	struct name_entry *match;
-+	struct acl_object_label *matchpo;
-+	struct acl_subject_label *curracl;
-+	char *path;
-+	__u32 retval;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return (mode & ~GR_AUDITS);
-+
-+	preempt_disable();
-+	path = gr_to_filename_rbac(new_dentry, mnt);
-+	match = lookup_name_entry_create(path);
-+
-+	if (!match)
-+		goto check_parent;
-+
-+	curracl = current->acl;
-+
-+	read_lock(&gr_inode_lock);
-+	matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
-+	read_unlock(&gr_inode_lock);
-+
-+	if (matchpo) {
-+		if ((matchpo->mode & mode) !=
-+		    (mode & ~(GR_AUDITS | GR_SUPPRESS))
-+		    && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+			__u32 new_mode = mode;
-+
-+			new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+			gr_log_learn(current, new_dentry, mnt, new_mode);
-+
-+			preempt_enable();
-+			return new_mode;
-+		}
-+		preempt_enable();
-+		return (matchpo->mode & mode);
-+	}
-+
-+      check_parent:
-+	curracl = current->acl;
-+
-+	matchpo = chk_obj_create_label(parent, mnt, curracl, path);
-+	retval = matchpo->mode & mode;
-+
-+	if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
-+	    && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
-+		__u32 new_mode = mode;
-+
-+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+		gr_log_learn(current, new_dentry, mnt, new_mode);
-+		preempt_enable();
-+		return new_mode;
-+	}
-+
-+	preempt_enable();
-+	return retval;
-+}
-+
-+int
-+gr_check_hidden_task(const struct task_struct *task)
-+{
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
-+		return 1;
-+
-+	return 0;
-+}
-+
-+int
-+gr_check_protected_task(const struct task_struct *task)
-+{
-+	if (unlikely(!(gr_status & GR_READY) || !task))
-+		return 0;
-+
-+	if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
-+	    task->acl != current->acl)
-+		return 1;
-+
-+	return 0;
-+}
-+
-+void
-+gr_copy_label(struct task_struct *tsk)
-+{
-+	tsk->signal->used_accept = 0;
-+	tsk->acl_sp_role = 0;
-+	tsk->acl_role_id = current->acl_role_id;
-+	tsk->acl = current->acl;
-+	tsk->role = current->role;
-+	tsk->signal->curr_ip = current->signal->curr_ip;
-+	if (current->exec_file)
-+		get_file(current->exec_file);
-+	tsk->exec_file = current->exec_file;
-+	tsk->is_writable = current->is_writable;
-+	if (unlikely(current->signal->used_accept))
-+		current->signal->curr_ip = 0;
-+
-+	return;
-+}
-+
-+static void
-+gr_set_proc_res(struct task_struct *task)
-+{
-+	struct acl_subject_label *proc;
-+	unsigned short i;
-+
-+	proc = task->acl;
-+
-+	if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
-+		return;
-+
-+	for (i = 0; i < (GR_NLIMITS - 1); i++) {
-+		if (!(proc->resmask & (1 << i)))
-+			continue;
-+
-+		task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
-+		task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
-+	}
-+
-+	return;
-+}
-+
-+int
-+gr_check_user_change(int real, int effective, int fs)
-+{
-+	unsigned int i;
-+	__u16 num;
-+	uid_t *uidlist;
-+	int curuid;
-+	int realok = 0;
-+	int effectiveok = 0;
-+	int fsok = 0;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+		gr_log_learn_id_change(current, 'u', real, effective, fs);
-+
-+	num = current->acl->user_trans_num;
-+	uidlist = current->acl->user_transitions;
-+
-+	if (uidlist == NULL)
-+		return 0;
-+
-+	if (real == -1)
-+		realok = 1;
-+	if (effective == -1)
-+		effectiveok = 1;
-+	if (fs == -1)
-+		fsok = 1;
-+
-+	if (current->acl->user_trans_type & GR_ID_ALLOW) {
-+		for (i = 0; i < num; i++) {
-+			curuid = (int)uidlist[i];
-+			if (real == curuid)
-+				realok = 1;
-+			if (effective == curuid)
-+				effectiveok = 1;
-+			if (fs == curuid)
-+				fsok = 1;
-+		}
-+	} else if (current->acl->user_trans_type & GR_ID_DENY) {
-+		for (i = 0; i < num; i++) {
-+			curuid = (int)uidlist[i];
-+			if (real == curuid)
-+				break;
-+			if (effective == curuid)
-+				break;
-+			if (fs == curuid)
-+				break;
-+		}
-+		/* not in deny list */
-+		if (i == num) {
-+			realok = 1;
-+			effectiveok = 1;
-+			fsok = 1;
-+		}
-+	}
-+
-+	if (realok && effectiveok && fsok)
-+		return 0;
-+	else {
-+		gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
-+		return 1;
-+	}
-+}
-+
-+int
-+gr_check_group_change(int real, int effective, int fs)
-+{
-+	unsigned int i;
-+	__u16 num;
-+	gid_t *gidlist;
-+	int curgid;
-+	int realok = 0;
-+	int effectiveok = 0;
-+	int fsok = 0;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+		gr_log_learn_id_change(current, 'g', real, effective, fs);
-+
-+	num = current->acl->group_trans_num;
-+	gidlist = current->acl->group_transitions;
-+
-+	if (gidlist == NULL)
-+		return 0;
-+
-+	if (real == -1)
-+		realok = 1;
-+	if (effective == -1)
-+		effectiveok = 1;
-+	if (fs == -1)
-+		fsok = 1;
-+
-+	if (current->acl->group_trans_type & GR_ID_ALLOW) {
-+		for (i = 0; i < num; i++) {
-+			curgid = (int)gidlist[i];
-+			if (real == curgid)
-+				realok = 1;
-+			if (effective == curgid)
-+				effectiveok = 1;
-+			if (fs == curgid)
-+				fsok = 1;
-+		}
-+	} else if (current->acl->group_trans_type & GR_ID_DENY) {
-+		for (i = 0; i < num; i++) {
-+			curgid = (int)gidlist[i];
-+			if (real == curgid)
-+				break;
-+			if (effective == curgid)
-+				break;
-+			if (fs == curgid)
-+				break;
-+		}
-+		/* not in deny list */
-+		if (i == num) {
-+			realok = 1;
-+			effectiveok = 1;
-+			fsok = 1;
-+		}
-+	}
-+
-+	if (realok && effectiveok && fsok)
-+		return 0;
-+	else {
-+		gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
-+		return 1;
-+	}
-+}
-+
-+void
-+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
-+{
-+	struct acl_role_label *role = task->role;
-+	struct acl_subject_label *subj = NULL;
-+	struct acl_object_label *obj;
-+	struct file *filp;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return;
-+
-+	filp = task->exec_file;
-+
-+	/* kernel process, we'll give them the kernel role */
-+	if (unlikely(!filp)) {
-+		task->role = kernel_role;
-+		task->acl = kernel_role->root_label;
-+		return;
-+	} else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
-+		role = lookup_acl_role_label(task, uid, gid);
-+
-+	/* perform subject lookup in possibly new role
-+	   we can use this result below in the case where role == task->role
-+	*/
-+	subj = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, role);
-+
-+	/* if we changed uid/gid, but result in the same role
-+	   and are using inheritance, don't lose the inherited subject
-+	   if current subject is other than what normal lookup
-+	   would result in, we arrived via inheritance, don't
-+	   lose subject
-+	*/
-+	if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
-+				   (subj == task->acl)))
-+		task->acl = subj;
-+
-+	task->role = role;
-+
-+	task->is_writable = 0;
-+
-+	/* ignore additional mmap checks for processes that are writable 
-+	   by the default ACL */
-+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
-+	if (unlikely(obj->mode & GR_WRITE))
-+		task->is_writable = 1;
-+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
-+	if (unlikely(obj->mode & GR_WRITE))
-+		task->is_writable = 1;
-+
-+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
-+	printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
-+#endif
-+
-+	gr_set_proc_res(task);
-+
-+	return;
-+}
-+
-+int
-+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	struct task_struct *task = current;
-+	struct acl_subject_label *newacl;
-+	struct acl_object_label *obj;
-+	__u32 retmode;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	newacl = chk_subj_label(dentry, mnt, task->role);
-+
-+	task_lock(task);
-+	if (((task->ptrace & PT_PTRACED) && !(task->acl->mode &
-+	     GR_POVERRIDE) && (task->acl != newacl) &&
-+	     !(task->role->roletype & GR_ROLE_GOD) &&
-+	     !gr_search_file(dentry, GR_PTRACERD, mnt) &&
-+	     !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) ||
-+	    (atomic_read(&task->fs->count) > 1 ||
-+	     atomic_read(&task->files->count) > 1 ||
-+	     atomic_read(&task->sighand->count) > 1)) {
-+                task_unlock(task);
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
-+		return -EACCES;
-+	}
-+	task_unlock(task);
-+
-+	obj = chk_obj_label(dentry, mnt, task->acl);
-+	retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
-+
-+	if (!(task->acl->mode & GR_INHERITLEARN) &&
-+	    ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
-+		if (obj->nested)
-+			task->acl = obj->nested;
-+		else
-+			task->acl = newacl;
-+	} else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
-+		gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
-+
-+	task->is_writable = 0;
-+
-+	/* ignore additional mmap checks for processes that are writable 
-+	   by the default ACL */
-+	obj = chk_obj_label(dentry, mnt, default_role->root_label);
-+	if (unlikely(obj->mode & GR_WRITE))
-+		task->is_writable = 1;
-+	obj = chk_obj_label(dentry, mnt, task->role->root_label);
-+	if (unlikely(obj->mode & GR_WRITE))
-+		task->is_writable = 1;
-+
-+	gr_set_proc_res(task);
-+
-+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
-+	printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
-+#endif
-+	return 0;
-+}
-+
-+/* always called with valid inodev ptr */
-+static void
-+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
-+{
-+	struct acl_object_label *matchpo;
-+	struct acl_subject_label *matchps;
-+	struct acl_subject_label *subj;
-+	struct acl_role_label *role;
-+	unsigned int i, x;
-+
-+	FOR_EACH_ROLE_START(role, i)
-+		FOR_EACH_SUBJECT_START(role, subj, x)
-+			if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
-+				matchpo->mode |= GR_DELETED;
-+		FOR_EACH_SUBJECT_END(subj,x)
-+		FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+			if (subj->inode == ino && subj->device == dev)
-+				subj->mode |= GR_DELETED;
-+		FOR_EACH_NESTED_SUBJECT_END(subj)
-+		if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
-+			matchps->mode |= GR_DELETED;
-+	FOR_EACH_ROLE_END(role,i)
-+
-+	inodev->nentry->deleted = 1;
-+
-+	return;
-+}
-+
-+void
-+gr_handle_delete(const ino_t ino, const dev_t dev)
-+{
-+	struct inodev_entry *inodev;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return;
-+
-+	write_lock(&gr_inode_lock);
-+	inodev = lookup_inodev_entry(ino, dev);
-+	if (inodev != NULL)
-+		do_handle_delete(inodev, ino, dev);
-+	write_unlock(&gr_inode_lock);
-+
-+	return;
-+}
-+
-+static void
-+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
-+		     const ino_t newinode, const dev_t newdevice,
-+		     struct acl_subject_label *subj)
-+{
-+	unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
-+	struct acl_object_label *match;
-+
-+	match = subj->obj_hash[index];
-+
-+	while (match && (match->inode != oldinode ||
-+	       match->device != olddevice ||
-+	       !(match->mode & GR_DELETED)))
-+		match = match->next;
-+
-+	if (match && (match->inode == oldinode)
-+	    && (match->device == olddevice)
-+	    && (match->mode & GR_DELETED)) {
-+		if (match->prev == NULL) {
-+			subj->obj_hash[index] = match->next;
-+			if (match->next != NULL)
-+				match->next->prev = NULL;
-+		} else {
-+			match->prev->next = match->next;
-+			if (match->next != NULL)
-+				match->next->prev = match->prev;
-+		}
-+		match->prev = NULL;
-+		match->next = NULL;
-+		match->inode = newinode;
-+		match->device = newdevice;
-+		match->mode &= ~GR_DELETED;
-+
-+		insert_acl_obj_label(match, subj);
-+	}
-+
-+	return;
-+}
-+
-+static void
-+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
-+		      const ino_t newinode, const dev_t newdevice,
-+		      struct acl_role_label *role)
-+{
-+	unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
-+	struct acl_subject_label *match;
-+
-+	match = role->subj_hash[index];
-+
-+	while (match && (match->inode != oldinode ||
-+	       match->device != olddevice ||
-+	       !(match->mode & GR_DELETED)))
-+		match = match->next;
-+
-+	if (match && (match->inode == oldinode)
-+	    && (match->device == olddevice)
-+	    && (match->mode & GR_DELETED)) {
-+		if (match->prev == NULL) {
-+			role->subj_hash[index] = match->next;
-+			if (match->next != NULL)
-+				match->next->prev = NULL;
-+		} else {
-+			match->prev->next = match->next;
-+			if (match->next != NULL)
-+				match->next->prev = match->prev;
-+		}
-+		match->prev = NULL;
-+		match->next = NULL;
-+		match->inode = newinode;
-+		match->device = newdevice;
-+		match->mode &= ~GR_DELETED;
-+
-+		insert_acl_subj_label(match, role);
-+	}
-+
-+	return;
-+}
-+
-+static void
-+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
-+		    const ino_t newinode, const dev_t newdevice)
-+{
-+	unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
-+	struct inodev_entry *match;
-+
-+	match = inodev_set.i_hash[index];
-+
-+	while (match && (match->nentry->inode != oldinode ||
-+	       match->nentry->device != olddevice || !match->nentry->deleted))
-+		match = match->next;
-+
-+	if (match && (match->nentry->inode == oldinode)
-+	    && (match->nentry->device == olddevice) &&
-+	    match->nentry->deleted) {
-+		if (match->prev == NULL) {
-+			inodev_set.i_hash[index] = match->next;
-+			if (match->next != NULL)
-+				match->next->prev = NULL;
-+		} else {
-+			match->prev->next = match->next;
-+			if (match->next != NULL)
-+				match->next->prev = match->prev;
-+		}
-+		match->prev = NULL;
-+		match->next = NULL;
-+		match->nentry->inode = newinode;
-+		match->nentry->device = newdevice;
-+		match->nentry->deleted = 0;
-+
-+		insert_inodev_entry(match);
-+	}
-+
-+	return;
-+}
-+
-+static void
-+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
-+		 const struct vfsmount *mnt)
-+{
-+	struct acl_subject_label *subj;
-+	struct acl_role_label *role;
-+	unsigned int i, x;
-+
-+	FOR_EACH_ROLE_START(role, i)
-+		update_acl_subj_label(matchn->inode, matchn->device,
-+				      dentry->d_inode->i_ino,
-+				      dentry->d_inode->i_sb->s_dev, role);
-+
-+		FOR_EACH_NESTED_SUBJECT_START(role, subj)
-+			if ((subj->inode == dentry->d_inode->i_ino) &&
-+			    (subj->device == dentry->d_inode->i_sb->s_dev)) {
-+				subj->inode = dentry->d_inode->i_ino;
-+				subj->device = dentry->d_inode->i_sb->s_dev;
-+			}
-+		FOR_EACH_NESTED_SUBJECT_END(subj)
-+		FOR_EACH_SUBJECT_START(role, subj, x)
-+			update_acl_obj_label(matchn->inode, matchn->device,
-+					     dentry->d_inode->i_ino,
-+					     dentry->d_inode->i_sb->s_dev, subj);
-+		FOR_EACH_SUBJECT_END(subj,x)
-+	FOR_EACH_ROLE_END(role,i)
-+
-+	update_inodev_entry(matchn->inode, matchn->device,
-+			    dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev);
-+
-+	return;
-+}
-+
-+void
-+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	struct name_entry *matchn;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return;
-+
-+	preempt_disable();
-+	matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
-+
-+	if (unlikely((unsigned long)matchn)) {
-+		write_lock(&gr_inode_lock);
-+		do_handle_create(matchn, dentry, mnt);
-+		write_unlock(&gr_inode_lock);
-+	}
-+	preempt_enable();
-+
-+	return;
-+}
-+
-+void
-+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
-+		 struct dentry *old_dentry,
-+		 struct dentry *new_dentry,
-+		 struct vfsmount *mnt, const __u8 replace)
-+{
-+	struct name_entry *matchn;
-+	struct inodev_entry *inodev;
-+
-+	/* vfs_rename swaps the name and parent link for old_dentry and
-+	   new_dentry
-+	   at this point, old_dentry has the new name, parent link, and inode
-+	   for the renamed file
-+	   if a file is being replaced by a rename, new_dentry has the inode
-+	   and name for the replaced file
-+	*/
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return;
-+
-+	preempt_disable();
-+	matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
-+
-+	/* we wouldn't have to check d_inode if it weren't for
-+	   NFS silly-renaming
-+	 */
-+
-+	write_lock(&gr_inode_lock);
-+	if (unlikely(replace && new_dentry->d_inode)) {
-+		inodev = lookup_inodev_entry(new_dentry->d_inode->i_ino,
-+					     new_dentry->d_inode->i_sb->s_dev);
-+		if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
-+			do_handle_delete(inodev, new_dentry->d_inode->i_ino,
-+					 new_dentry->d_inode->i_sb->s_dev);
-+	}
-+
-+	inodev = lookup_inodev_entry(old_dentry->d_inode->i_ino,
-+				     old_dentry->d_inode->i_sb->s_dev);
-+	if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
-+		do_handle_delete(inodev, old_dentry->d_inode->i_ino,
-+				 old_dentry->d_inode->i_sb->s_dev);
-+
-+	if (unlikely((unsigned long)matchn))
-+		do_handle_create(matchn, old_dentry, mnt);
-+
-+	write_unlock(&gr_inode_lock);
-+	preempt_enable();
-+
-+	return;
-+}
-+
-+static int
-+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
-+			 unsigned char **sum)
-+{
-+	struct acl_role_label *r;
-+	struct role_allowed_ip *ipp;
-+	struct role_transition *trans;
-+	unsigned int i;
-+	int found = 0;
-+
-+	/* check transition table */
-+
-+	for (trans = current->role->transitions; trans; trans = trans->next) {
-+		if (!strcmp(rolename, trans->rolename)) {
-+			found = 1;
-+			break;
-+		}
-+	}
-+
-+	if (!found)
-+		return 0;
-+
-+	/* handle special roles that do not require authentication
-+	   and check ip */
-+
-+	FOR_EACH_ROLE_START(r, i)
-+		if (!strcmp(rolename, r->rolename) &&
-+		    (r->roletype & GR_ROLE_SPECIAL)) {
-+			found = 0;
-+			if (r->allowed_ips != NULL) {
-+				for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
-+					if ((ntohl(current->signal->curr_ip) & ipp->netmask) ==
-+					     (ntohl(ipp->addr) & ipp->netmask))
-+						found = 1;
-+				}
-+			} else
-+				found = 2;
-+			if (!found)
-+				return 0;
-+
-+			if (((mode == SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
-+			    ((mode == SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
-+				*salt = NULL;
-+				*sum = NULL;
-+				return 1;
-+			}
-+		}
-+	FOR_EACH_ROLE_END(r,i)
-+
-+	for (i = 0; i < num_sprole_pws; i++) {
-+		if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
-+			*salt = acl_special_roles[i]->salt;
-+			*sum = acl_special_roles[i]->sum;
-+			return 1;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+static void
-+assign_special_role(char *rolename)
-+{
-+	struct acl_object_label *obj;
-+	struct acl_role_label *r;
-+	struct acl_role_label *assigned = NULL;
-+	struct task_struct *tsk;
-+	struct file *filp;
-+	unsigned int i;
-+
-+	FOR_EACH_ROLE_START(r, i)
-+		if (!strcmp(rolename, r->rolename) &&
-+		    (r->roletype & GR_ROLE_SPECIAL))
-+			assigned = r;
-+	FOR_EACH_ROLE_END(r,i)
-+
-+	if (!assigned)
-+		return;
-+
-+	read_lock(&tasklist_lock);
-+	read_lock(&grsec_exec_file_lock);
-+
-+	tsk = current->parent;
-+	if (tsk == NULL)
-+		goto out_unlock;
-+
-+	filp = tsk->exec_file;
-+	if (filp == NULL)
-+		goto out_unlock;
-+
-+	tsk->is_writable = 0;
-+
-+	tsk->acl_sp_role = 1;
-+	tsk->acl_role_id = ++acl_sp_role_value;
-+	tsk->role = assigned;
-+	tsk->acl = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role);
-+
-+	/* ignore additional mmap checks for processes that are writable 
-+	   by the default ACL */
-+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
-+	if (unlikely(obj->mode & GR_WRITE))
-+		tsk->is_writable = 1;
-+	obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label);
-+	if (unlikely(obj->mode & GR_WRITE))
-+		tsk->is_writable = 1;
-+
-+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
-+	printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
-+#endif
-+
-+out_unlock:
-+	read_unlock(&grsec_exec_file_lock);
-+	read_unlock(&tasklist_lock);
-+	return;
-+}
-+
-+int gr_check_secure_terminal(struct task_struct *task)
-+{
-+	struct task_struct *p, *p2, *p3;
-+	struct files_struct *files;
-+	struct fdtable *fdt;
-+	struct file *our_file = NULL, *file;
-+	int i;
-+
-+	if (task->signal->tty == NULL)
-+		return 1;
-+
-+	files = get_files_struct(task);
-+	if (files != NULL) {
-+		rcu_read_lock();
-+		fdt = files_fdtable(files);
-+		for (i=0; i < fdt->max_fds; i++) {
-+			file = fcheck_files(files, i);
-+			if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
-+				get_file(file);
-+				our_file = file;
-+			}
-+		}
-+		rcu_read_unlock();
-+		put_files_struct(files);
-+	}
-+
-+	if (our_file == NULL)
-+		return 1;
-+
-+	read_lock(&tasklist_lock);
-+	do_each_thread(p2, p) {
-+		files = get_files_struct(p);
-+		if (files == NULL ||
-+		    (p->signal && p->signal->tty == task->signal->tty)) {
-+			if (files != NULL)
-+				put_files_struct(files);
-+			continue;
-+		}
-+		rcu_read_lock();
-+		fdt = files_fdtable(files);
-+		for (i=0; i < fdt->max_fds; i++) {
-+			file = fcheck_files(files, i);
-+			if (file && S_ISCHR(file->f_dentry->d_inode->i_mode) &&
-+			    file->f_dentry->d_inode->i_rdev == our_file->f_dentry->d_inode->i_rdev) {
-+				p3 = task;
-+				while (p3->pid > 0) {
-+					if (p3 == p)
-+						break;
-+					p3 = p3->parent;
-+				}
-+				if (p3 == p)
-+					break;
-+				gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
-+				gr_handle_alertkill(p);
-+				rcu_read_unlock();
-+				put_files_struct(files);
-+				read_unlock(&tasklist_lock);
-+				fput(our_file);
-+				return 0;
-+			}
-+		}
-+		rcu_read_unlock();
-+		put_files_struct(files);
-+	} while_each_thread(p2, p);
-+	read_unlock(&tasklist_lock);
-+
-+	fput(our_file);
-+	return 1;
-+}
-+
-+ssize_t
-+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
-+{
-+	struct gr_arg_wrapper uwrap;
-+	unsigned char *sprole_salt;
-+	unsigned char *sprole_sum;
-+	int error = sizeof (struct gr_arg_wrapper);
-+	int error2 = 0;
-+
-+	down(&gr_dev_sem);
-+
-+	if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
-+		error = -EPERM;
-+		goto out;
-+	}
-+
-+	if (count != sizeof (struct gr_arg_wrapper)) {
-+		gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
-+		error = -EINVAL;
-+		goto out;
-+	}
-+
-+	
-+	if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
-+		gr_auth_expires = 0;
-+		gr_auth_attempts = 0;
-+	}
-+
-+	if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
-+		error = -EFAULT;
-+		goto out;
-+	}
-+
-+	if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
-+		error = -EINVAL;
-+		goto out;
-+	}
-+
-+	if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
-+		error = -EFAULT;
-+		goto out;
-+	}
-+
-+	if (gr_usermode->mode != SPROLE && gr_usermode->mode != SPROLEPAM &&
-+	    gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
-+	    time_after(gr_auth_expires, get_seconds())) {
-+		error = -EBUSY;
-+		goto out;
-+	}
-+
-+	/* if non-root trying to do anything other than use a special role,
-+	   do not attempt authentication, do not count towards authentication
-+	   locking
-+	 */
-+
-+	if (gr_usermode->mode != SPROLE && gr_usermode->mode != STATUS &&
-+	    gr_usermode->mode != UNSPROLE && gr_usermode->mode != SPROLEPAM &&
-+	    current->uid) {
-+		error = -EPERM;
-+		goto out;
-+	}
-+
-+	/* ensure pw and special role name are null terminated */
-+
-+	gr_usermode->pw[GR_PW_LEN - 1] = '\0';
-+	gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
-+
-+	/* Okay. 
-+	 * We have our enough of the argument structure..(we have yet
-+	 * to copy_from_user the tables themselves) . Copy the tables
-+	 * only if we need them, i.e. for loading operations. */
-+
-+	switch (gr_usermode->mode) {
-+	case STATUS:
-+			if (gr_status & GR_READY) {
-+				error = 1;
-+				if (!gr_check_secure_terminal(current))
-+					error = 3;
-+			} else
-+				error = 2;
-+			goto out;
-+	case SHUTDOWN:
-+		if ((gr_status & GR_READY)
-+		    && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
-+			gr_status &= ~GR_READY;
-+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
-+			free_variables();
-+			memset(gr_usermode, 0, sizeof (struct gr_arg));
-+			memset(gr_system_salt, 0, GR_SALT_LEN);
-+			memset(gr_system_sum, 0, GR_SHA_LEN);
-+		} else if (gr_status & GR_READY) {
-+			gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
-+			error = -EPERM;
-+		} else {
-+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
-+			error = -EAGAIN;
-+		}
-+		break;
-+	case ENABLE:
-+		if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
-+			gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
-+		else {
-+			if (gr_status & GR_READY)
-+				error = -EAGAIN;
-+			else
-+				error = error2;
-+			gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
-+		}
-+		break;
-+	case RELOAD:
-+		if (!(gr_status & GR_READY)) {
-+			gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
-+			error = -EAGAIN;
-+		} else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
-+			lock_kernel();
-+			gr_status &= ~GR_READY;
-+			free_variables();
-+			if (!(error2 = gracl_init(gr_usermode))) {
-+				unlock_kernel();
-+				gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
-+			} else {
-+				unlock_kernel();
-+				error = error2;
-+				gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
-+			}
-+		} else {
-+			gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
-+			error = -EPERM;
-+		}
-+		break;
-+	case SEGVMOD:
-+		if (unlikely(!(gr_status & GR_READY))) {
-+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
-+			error = -EAGAIN;
-+			break;
-+		}
-+
-+		if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
-+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
-+			if (gr_usermode->segv_device && gr_usermode->segv_inode) {
-+				struct acl_subject_label *segvacl;
-+				segvacl =
-+				    lookup_acl_subj_label(gr_usermode->segv_inode,
-+							  gr_usermode->segv_device,
-+							  current->role);
-+				if (segvacl) {
-+					segvacl->crashes = 0;
-+					segvacl->expires = 0;
-+				}
-+			} else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
-+				gr_remove_uid(gr_usermode->segv_uid);
-+			}
-+		} else {
-+			gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
-+			error = -EPERM;
-+		}
-+		break;
-+	case SPROLE:
-+	case SPROLEPAM:
-+		if (unlikely(!(gr_status & GR_READY))) {
-+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
-+			error = -EAGAIN;
-+			break;
-+		}
-+
-+		if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
-+			current->role->expires = 0;
-+			current->role->auth_attempts = 0;
-+		}
-+
-+		if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
-+		    time_after(current->role->expires, get_seconds())) {
-+			error = -EBUSY;
-+			goto out;
-+		}
-+
-+		if (lookup_special_role_auth
-+		    (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
-+		    && ((!sprole_salt && !sprole_sum)
-+			|| !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
-+			char *p = "";
-+			assign_special_role(gr_usermode->sp_role);
-+			read_lock(&tasklist_lock);
-+			if (current->parent)
-+				p = current->parent->role->rolename;
-+			read_unlock(&tasklist_lock);
-+			gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
-+					p, acl_sp_role_value);
-+		} else {
-+			gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
-+			error = -EPERM;
-+			if(!(current->role->auth_attempts++))
-+				current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
-+
-+			goto out;
-+		}
-+		break;
-+	case UNSPROLE:
-+		if (unlikely(!(gr_status & GR_READY))) {
-+			gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
-+			error = -EAGAIN;
-+			break;
-+		}
-+
-+		if (current->role->roletype & GR_ROLE_SPECIAL) {
-+			char *p = "";
-+			int i = 0;
-+
-+			read_lock(&tasklist_lock);
-+			if (current->parent) {
-+				p = current->parent->role->rolename;
-+				i = current->parent->acl_role_id;
-+			}
-+			read_unlock(&tasklist_lock);
-+
-+			gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
-+			gr_set_acls(1);
-+		} else {
-+			gr_log_str(GR_DONT_AUDIT, GR_UNSPROLEF_ACL_MSG, current->role->rolename);
-+			error = -EPERM;
-+			goto out;
-+		}
-+		break;
-+	default:
-+		gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
-+		error = -EINVAL;
-+		break;
-+	}
-+
-+	if (error != -EPERM)
-+		goto out;
-+
-+	if(!(gr_auth_attempts++))
-+		gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
-+
-+      out:
-+	up(&gr_dev_sem);
-+	return error;
-+}
-+
-+int
-+gr_set_acls(const int type)
-+{
-+	struct acl_object_label *obj;
-+	struct task_struct *task, *task2;
-+	struct file *filp;
-+	struct acl_role_label *role = current->role;
-+	__u16 acl_role_id = current->acl_role_id;
-+
-+	read_lock(&tasklist_lock);
-+	read_lock(&grsec_exec_file_lock);
-+	do_each_thread(task2, task) {
-+		/* check to see if we're called from the exit handler,
-+		   if so, only replace ACLs that have inherited the admin
-+		   ACL */
-+
-+		if (type && (task->role != role ||
-+			     task->acl_role_id != acl_role_id))
-+			continue;
-+
-+		task->acl_role_id = 0;
-+		task->acl_sp_role = 0;
-+
-+		if ((filp = task->exec_file)) {
-+			task->role = lookup_acl_role_label(task, task->uid, task->gid);
-+
-+			task->acl =
-+			    chk_subj_label(filp->f_dentry, filp->f_vfsmnt,
-+					   task->role);
-+			if (task->acl) {
-+				struct acl_subject_label *curr;
-+				curr = task->acl;
-+
-+				task->is_writable = 0;
-+				/* ignore additional mmap checks for processes that are writable 
-+				   by the default ACL */
-+				obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
-+				if (unlikely(obj->mode & GR_WRITE))
-+					task->is_writable = 1;
-+				obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
-+				if (unlikely(obj->mode & GR_WRITE))
-+					task->is_writable = 1;
-+
-+				gr_set_proc_res(task);
-+
-+#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
-+				printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
-+#endif
-+			} else {
-+				read_unlock(&grsec_exec_file_lock);
-+				read_unlock(&tasklist_lock);
-+				gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
-+				return 1;
-+			}
-+		} else {
-+			// it's a kernel process
-+			task->role = kernel_role;
-+			task->acl = kernel_role->root_label;
-+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
-+			task->acl->mode &= ~GR_PROCFIND;
-+#endif
-+		}
-+	} while_each_thread(task2, task);
-+	read_unlock(&grsec_exec_file_lock);
-+	read_unlock(&tasklist_lock);
-+	return 0;
-+}
-+
-+void
-+gr_learn_resource(const struct task_struct *task,
-+		  const int res, const unsigned long wanted, const int gt)
-+{
-+	struct acl_subject_label *acl;
-+
-+	if (unlikely((gr_status & GR_READY) &&
-+		     task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
-+		goto skip_reslog;
-+
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+	gr_log_resource(task, res, wanted, gt);
-+#endif
-+      skip_reslog:
-+
-+	if (unlikely(!(gr_status & GR_READY) || !wanted))
-+		return;
-+
-+	acl = task->acl;
-+
-+	if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
-+		   !(acl->resmask & (1 << (unsigned short) res))))
-+		return;
-+
-+	if (wanted >= acl->res[res].rlim_cur) {
-+		unsigned long res_add;
-+
-+		res_add = wanted;
-+		switch (res) {
-+		case RLIMIT_CPU:
-+			res_add += GR_RLIM_CPU_BUMP;
-+			break;
-+		case RLIMIT_FSIZE:
-+			res_add += GR_RLIM_FSIZE_BUMP;
-+			break;
-+		case RLIMIT_DATA:
-+			res_add += GR_RLIM_DATA_BUMP;
-+			break;
-+		case RLIMIT_STACK:
-+			res_add += GR_RLIM_STACK_BUMP;
-+			break;
-+		case RLIMIT_CORE:
-+			res_add += GR_RLIM_CORE_BUMP;
-+			break;
-+		case RLIMIT_RSS:
-+			res_add += GR_RLIM_RSS_BUMP;
-+			break;
-+		case RLIMIT_NPROC:
-+			res_add += GR_RLIM_NPROC_BUMP;
-+			break;
-+		case RLIMIT_NOFILE:
-+			res_add += GR_RLIM_NOFILE_BUMP;
-+			break;
-+		case RLIMIT_MEMLOCK:
-+			res_add += GR_RLIM_MEMLOCK_BUMP;
-+			break;
-+		case RLIMIT_AS:
-+			res_add += GR_RLIM_AS_BUMP;
-+			break;
-+		case RLIMIT_LOCKS:
-+			res_add += GR_RLIM_LOCKS_BUMP;
-+			break;
-+		}
-+
-+		acl->res[res].rlim_cur = res_add;
-+
-+		if (wanted > acl->res[res].rlim_max)
-+			acl->res[res].rlim_max = res_add;
-+
-+		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
-+			       task->role->roletype, acl->filename,
-+			       acl->res[res].rlim_cur, acl->res[res].rlim_max,
-+			       "", (unsigned long) res);
-+	}
-+
-+	return;
-+}
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+void
-+pax_set_initial_flags(struct linux_binprm *bprm)
-+{
-+	struct task_struct *task = current;
-+        struct acl_subject_label *proc;
-+	unsigned long flags;
-+
-+        if (unlikely(!(gr_status & GR_READY)))
-+                return;
-+
-+	flags = pax_get_flags(task);
-+
-+        proc = task->acl;
-+
-+	if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
-+		flags &= ~MF_PAX_PAGEEXEC;
-+	if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
-+		flags &= ~MF_PAX_SEGMEXEC;
-+	if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
-+		flags &= ~MF_PAX_RANDMMAP;
-+	if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
-+		flags &= ~MF_PAX_EMUTRAMP;
-+	if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
-+		flags &= ~MF_PAX_MPROTECT;
-+
-+	if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
-+		flags |= MF_PAX_PAGEEXEC;
-+	if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
-+		flags |= MF_PAX_SEGMEXEC;
-+	if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
-+		flags |= MF_PAX_RANDMMAP;
-+	if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
-+		flags |= MF_PAX_EMUTRAMP;
-+	if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
-+		flags |= MF_PAX_MPROTECT;
-+
-+	pax_set_flags(task, flags);
-+
-+        return;
-+}
-+#endif
-+
-+#ifdef CONFIG_SYSCTL
-+/* Eric Biederman likes breaking userland ABI and every inode-based security
-+   system to save 35kb of memory */
-+
-+/* we modify the passed in filename, but adjust it back before returning */
-+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
-+{
-+	struct name_entry *nmatch;
-+	char *p, *lastp = NULL;
-+	struct acl_object_label *obj = NULL, *tmp;
-+	struct acl_subject_label *tmpsubj;
-+	int done = 0;
-+	char c = '\0';
-+
-+	read_lock(&gr_inode_lock);
-+
-+	p = name + len - 1;
-+	do {
-+		nmatch = lookup_name_entry(name);
-+		if (lastp != NULL)
-+			*lastp = c;
-+
-+		if (nmatch == NULL)
-+			goto next_component;
-+		tmpsubj = current->acl;
-+		do {
-+			obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
-+			if (obj != NULL) {
-+				tmp = obj->globbed;
-+				while (tmp) {
-+					if (!glob_match(tmp->filename, name)) {
-+						obj = tmp;
-+						goto found_obj;
-+					}
-+					tmp = tmp->next;
-+				}
-+				goto found_obj;
-+			}
-+		} while ((tmpsubj = tmpsubj->parent_subject));
-+next_component:
-+		/* end case */
-+		if (p == name)
-+			break;
-+
-+		while (*p != '/')
-+			p--;
-+		if (p == name)
-+			lastp = p + 1;
-+		else {
-+			lastp = p;
-+			p--;
-+		}
-+		c = *lastp;
-+		*lastp = '\0';
-+	} while (1);
-+found_obj:
-+	read_unlock(&gr_inode_lock);
-+	/* obj returned will always be non-null */
-+	return obj;
-+}
-+
-+/* returns 0 when allowing, non-zero on error
-+   op of 0 is used for readdir, so we don't log the names of hidden files
-+*/
-+__u32
-+gr_handle_sysctl(const struct ctl_table *table, const int op)
-+{
-+	ctl_table *tmp;
-+	struct nameidata nd;
-+	const char *proc_sys = "/proc/sys";
-+	char *path;
-+	struct acl_object_label *obj;
-+	unsigned short len = 0, pos = 0, depth = 0, i;
-+	__u32 err = 0;
-+	__u32 mode = 0;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	/* for now, ignore operations on non-sysctl entries if it's not a
-+	   readdir*/
-+	if (table->child != NULL && op != 0)
-+		return 0;
-+
-+	mode |= GR_FIND;
-+	/* it's only a read if it's an entry, read on dirs is for readdir */
-+	if (op & 004)
-+		mode |= GR_READ;
-+	if (op & 002)
-+		mode |= GR_WRITE;
-+
-+	preempt_disable();
-+
-+	path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
-+
-+	/* it's only a read/write if it's an actual entry, not a dir
-+	   (which are opened for readdir)
-+	*/
-+
-+	/* convert the requested sysctl entry into a pathname */
-+
-+	for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
-+		len += strlen(tmp->procname);
-+		len++;
-+		depth++;
-+	}
-+
-+	if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
-+		/* deny */
-+		goto out;
-+	}
-+
-+	memset(path, 0, PAGE_SIZE);
-+
-+	memcpy(path, proc_sys, strlen(proc_sys));
-+
-+	pos += strlen(proc_sys);
-+
-+	for (; depth > 0; depth--) {
-+		path[pos] = '/';
-+		pos++;
-+		for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
-+			if (depth == i) {
-+				memcpy(path + pos, tmp->procname,
-+				       strlen(tmp->procname));
-+				pos += strlen(tmp->procname);
-+			}
-+			i++;
-+		}
-+	}
-+
-+	obj = gr_lookup_by_name(path, pos);
-+	err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
-+
-+	if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
-+		     ((err & mode) != mode))) {
-+		__u32 new_mode = mode;
-+
-+		new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
-+
-+		err = 0;
-+		gr_log_learn_sysctl(current, path, new_mode);
-+	} else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
-+		gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
-+		err = -ENOENT;
-+	} else if (!(err & GR_FIND)) {
-+		err = -ENOENT;
-+	} else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
-+		gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
-+			       path, (mode & GR_READ) ? " reading" : "",
-+			       (mode & GR_WRITE) ? " writing" : "");
-+		err = -EACCES;
-+	} else if ((err & mode) != mode) {
-+		err = -EACCES;
-+	} else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
-+		gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
-+			       path, (mode & GR_READ) ? " reading" : "",
-+			       (mode & GR_WRITE) ? " writing" : "");
-+		err = 0;
-+	} else
-+		err = 0;
-+
-+      out:
-+	preempt_enable();
-+
-+	return err;
-+}
-+#endif
-+
-+int
-+gr_handle_proc_ptrace(struct task_struct *task)
-+{
-+	struct file *filp;
-+	struct task_struct *tmp = task;
-+	struct task_struct *curtemp = current;
-+	__u32 retmode;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	read_lock(&tasklist_lock);
-+	read_lock(&grsec_exec_file_lock);
-+	filp = task->exec_file;
-+
-+	while (tmp->pid > 0) {
-+		if (tmp == curtemp)
-+			break;
-+		tmp = tmp->parent;
-+	}
-+
-+	if (!filp || (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE))) {
-+		read_unlock(&grsec_exec_file_lock);
-+		read_unlock(&tasklist_lock);
-+		return 1;
-+	}
-+
-+	retmode = gr_search_file(filp->f_dentry, GR_NOPTRACE, filp->f_vfsmnt);
-+	read_unlock(&grsec_exec_file_lock);
-+	read_unlock(&tasklist_lock);
-+
-+	if (retmode & GR_NOPTRACE)
-+		return 1;
-+
-+	if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
-+	    && (current->acl != task->acl || (current->acl != current->role->root_label
-+	    && current->pid != task->pid)))
-+		return 1;
-+
-+	return 0;
-+}
-+
-+int
-+gr_handle_ptrace(struct task_struct *task, const long request)
-+{
-+	struct task_struct *tmp = task;
-+	struct task_struct *curtemp = current;
-+	__u32 retmode;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 0;
-+
-+	read_lock(&tasklist_lock);
-+	while (tmp->pid > 0) {
-+		if (tmp == curtemp)
-+			break;
-+		tmp = tmp->parent;
-+	}
-+
-+	if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE)) {
-+		read_unlock(&tasklist_lock);
-+		gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
-+		return 1;
-+	}
-+	read_unlock(&tasklist_lock);
-+
-+	read_lock(&grsec_exec_file_lock);
-+	if (unlikely(!task->exec_file)) {
-+		read_unlock(&grsec_exec_file_lock);
-+		return 0;
-+	}
-+
-+	retmode = gr_search_file(task->exec_file->f_dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_vfsmnt);
-+	read_unlock(&grsec_exec_file_lock);
-+
-+	if (retmode & GR_NOPTRACE) {
-+		gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
-+		return 1;
-+	}
-+		
-+	if (retmode & GR_PTRACERD) {
-+		switch (request) {
-+		case PTRACE_POKETEXT:
-+		case PTRACE_POKEDATA:
-+		case PTRACE_POKEUSR:
-+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
-+		case PTRACE_SETREGS:
-+		case PTRACE_SETFPREGS:
-+#endif
-+#ifdef CONFIG_X86
-+		case PTRACE_SETFPXREGS:
-+#endif
-+#ifdef CONFIG_ALTIVEC
-+		case PTRACE_SETVRREGS:
-+#endif
-+			return 1;
-+		default:
-+			return 0;
-+		}
-+	} else if (!(current->acl->mode & GR_POVERRIDE) &&
-+		   !(current->role->roletype & GR_ROLE_GOD) &&
-+		   (current->acl != task->acl)) {
-+		gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
-+		return 1;
-+	}
-+
-+	return 0;
-+}
-+
-+static int is_writable_mmap(const struct file *filp)
-+{
-+	struct task_struct *task = current;
-+	struct acl_object_label *obj, *obj2;
-+
-+	if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
-+	    !task->is_writable && S_ISREG(filp->f_dentry->d_inode->i_mode)) {
-+		obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
-+		obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt,
-+				     task->role->root_label);
-+		if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
-+			gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_dentry, filp->f_vfsmnt);
-+			return 1;
-+		}
-+	}
-+	return 0;
-+}
-+
-+int
-+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
-+{
-+	__u32 mode;
-+
-+	if (unlikely(!file || !(prot & PROT_EXEC)))
-+		return 1;
-+
-+	if (is_writable_mmap(file))
-+		return 0;
-+
-+	mode =
-+	    gr_search_file(file->f_dentry,
-+			   GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
-+			   file->f_vfsmnt);
-+
-+	if (!gr_tpe_allow(file))
-+		return 0;
-+
-+	if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_dentry, file->f_vfsmnt);
-+		return 0;
-+	} else if (unlikely(!(mode & GR_EXEC))) {
-+		return 0;
-+	} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
-+		gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_dentry, file->f_vfsmnt);
-+		return 1;
-+	}
-+
-+	return 1;
-+}
-+
-+int
-+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
-+{
-+	__u32 mode;
-+
-+	if (unlikely(!file || !(prot & PROT_EXEC)))
-+		return 1;
-+
-+	if (is_writable_mmap(file))
-+		return 0;
-+
-+	mode =
-+	    gr_search_file(file->f_dentry,
-+			   GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
-+			   file->f_vfsmnt);
-+
-+	if (!gr_tpe_allow(file))
-+		return 0;
-+
-+	if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_dentry, file->f_vfsmnt);
-+		return 0;
-+	} else if (unlikely(!(mode & GR_EXEC))) {
-+		return 0;
-+	} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
-+		gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_dentry, file->f_vfsmnt);
-+		return 1;
-+	}
-+
-+	return 1;
-+}
-+
-+void
-+gr_acl_handle_psacct(struct task_struct *task, const long code)
-+{
-+	unsigned long runtime;
-+	unsigned long cputime;
-+	unsigned int wday, cday;
-+	__u8 whr, chr;
-+	__u8 wmin, cmin;
-+	__u8 wsec, csec;
-+
-+	if (unlikely(!(gr_status & GR_READY) || !task->acl ||
-+		     !(task->acl->mode & GR_PROCACCT)))
-+		return;
-+
-+	runtime = xtime.tv_sec - task->start_time.tv_sec;
-+	wday = runtime / (3600 * 24);
-+	runtime -= wday * (3600 * 24);
-+	whr = runtime / 3600;
-+	runtime -= whr * 3600;
-+	wmin = runtime / 60;
-+	runtime -= wmin * 60;
-+	wsec = runtime;
-+
-+	cputime = (task->utime + task->stime) / HZ;
-+	cday = cputime / (3600 * 24);
-+	cputime -= cday * (3600 * 24);
-+	chr = cputime / 3600;
-+	cputime -= chr * 3600;
-+	cmin = cputime / 60;
-+	cputime -= cmin * 60;
-+	csec = cputime;
-+
-+	gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
-+
-+	return;
-+}
-+
-+void gr_set_kernel_label(struct task_struct *task)
-+{
-+	if (gr_status & GR_READY) {
-+		task->role = kernel_role;
-+		task->acl = kernel_role->root_label;
-+	}
-+	return;
-+}
-+
-+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
-+{
-+	struct task_struct *task = current;
-+	struct dentry *dentry = file->f_dentry;
-+	struct vfsmount *mnt = file->f_vfsmnt;
-+	struct acl_object_label *obj, *tmp;
-+	struct acl_subject_label *subj;
-+	unsigned int bufsize;
-+	int is_not_root;
-+	char *path;
-+
-+	if (unlikely(!(gr_status & GR_READY)))
-+		return 1;
-+
-+	if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+		return 1;
-+
-+	/* ignore Eric Biederman */
-+	if (IS_PRIVATE(dentry->d_inode))
-+		return 1;
-+
-+	subj = task->acl;
-+	do {
-+		obj = lookup_acl_obj_label(ino, dentry->d_inode->i_sb->s_dev, subj);
-+		if (obj != NULL)
-+			return (obj->mode & GR_FIND) ? 1 : 0;
-+	} while ((subj = subj->parent_subject));
-+	
-+	obj = chk_obj_label(dentry, mnt, task->acl);
-+	if (obj->globbed == NULL)
-+		return (obj->mode & GR_FIND) ? 1 : 0;
-+
-+	is_not_root = ((obj->filename[0] == '/') &&
-+		   (obj->filename[1] == '\0')) ? 0 : 1;
-+	bufsize = PAGE_SIZE - namelen - is_not_root;
-+
-+	/* check bufsize > PAGE_SIZE || bufsize == 0 */
-+	if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
-+		return 1;
-+
-+	preempt_disable();
-+	path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
-+			   bufsize);
-+
-+	bufsize = strlen(path);
-+
-+	/* if base is "/", don't append an additional slash */
-+	if (is_not_root)
-+		*(path + bufsize) = '/';
-+	memcpy(path + bufsize + is_not_root, name, namelen);
-+	*(path + bufsize + namelen + is_not_root) = '\0';
-+
-+	tmp = obj->globbed;
-+	while (tmp) {
-+		if (!glob_match(tmp->filename, path)) {
-+			preempt_enable();
-+			return (tmp->mode & GR_FIND) ? 1 : 0;
-+		}
-+		tmp = tmp->next;
-+	}
-+	preempt_enable();
-+	return (obj->mode & GR_FIND) ? 1 : 0;
-+}
-+
-+EXPORT_SYMBOL(gr_learn_resource);
-+EXPORT_SYMBOL(gr_set_kernel_label);
-+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_check_user_change);
-+EXPORT_SYMBOL(gr_check_group_change);
-+#endif
-+
---- /dev/null
-+++ b/grsecurity/gracl_cap.c
-@@ -0,0 +1,112 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/capability.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+static const char *captab_log[] = {
-+	"CAP_CHOWN",
-+	"CAP_DAC_OVERRIDE",
-+	"CAP_DAC_READ_SEARCH",
-+	"CAP_FOWNER",
-+	"CAP_FSETID",
-+	"CAP_KILL",
-+	"CAP_SETGID",
-+	"CAP_SETUID",
-+	"CAP_SETPCAP",
-+	"CAP_LINUX_IMMUTABLE",
-+	"CAP_NET_BIND_SERVICE",
-+	"CAP_NET_BROADCAST",
-+	"CAP_NET_ADMIN",
-+	"CAP_NET_RAW",
-+	"CAP_IPC_LOCK",
-+	"CAP_IPC_OWNER",
-+	"CAP_SYS_MODULE",
-+	"CAP_SYS_RAWIO",
-+	"CAP_SYS_CHROOT",
-+	"CAP_SYS_PTRACE",
-+	"CAP_SYS_PACCT",
-+	"CAP_SYS_ADMIN",
-+	"CAP_SYS_BOOT",
-+	"CAP_SYS_NICE",
-+	"CAP_SYS_RESOURCE",
-+	"CAP_SYS_TIME",
-+	"CAP_SYS_TTY_CONFIG",
-+	"CAP_MKNOD",
-+	"CAP_LEASE",
-+	"CAP_AUDIT_WRITE",
-+	"CAP_AUDIT_CONTROL"
-+};
-+
-+EXPORT_SYMBOL(gr_task_is_capable);
-+EXPORT_SYMBOL(gr_is_capable_nolog);
-+
-+int
-+gr_task_is_capable(struct task_struct *task, const int cap)
-+{
-+	struct acl_subject_label *curracl;
-+	__u32 cap_drop = 0, cap_mask = 0;
-+
-+	if (!gr_acl_is_enabled())
-+		return 1;
-+
-+	curracl = task->acl;
-+
-+	cap_drop = curracl->cap_lower;
-+	cap_mask = curracl->cap_mask;
-+
-+	while ((curracl = curracl->parent_subject)) {
-+		if (!(cap_mask & (1 << cap)) && (curracl->cap_mask & (1 << cap)))
-+			cap_drop |= curracl->cap_lower & (1 << cap);
-+		cap_mask |= curracl->cap_mask;
-+	}
-+
-+	if (!cap_raised(cap_drop, cap))
-+		return 1;
-+
-+	curracl = task->acl;
-+
-+	if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
-+	    && cap_raised(task->cap_effective, cap)) {
-+		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
-+			       task->role->roletype, task->uid,
-+			       task->gid, task->exec_file ?
-+			       gr_to_filename(task->exec_file->f_dentry,
-+			       task->exec_file->f_vfsmnt) : curracl->filename,
-+			       curracl->filename, 0UL,
-+			       0UL, "", (unsigned long) cap, NIPQUAD(task->signal->curr_ip));
-+		return 1;
-+	}
-+
-+	if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(task->cap_effective, cap))
-+		gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
-+	return 0;
-+}
-+
-+int
-+gr_is_capable_nolog(const int cap)
-+{
-+	struct acl_subject_label *curracl;
-+	__u32 cap_drop = 0, cap_mask = 0;
-+
-+	if (!gr_acl_is_enabled())
-+		return 1;
-+
-+	curracl = current->acl;
-+
-+	cap_drop = curracl->cap_lower;
-+	cap_mask = curracl->cap_mask;
-+
-+	while ((curracl = curracl->parent_subject)) {
-+		cap_drop |= curracl->cap_lower & (cap_mask & ~curracl->cap_mask);
-+		cap_mask |= curracl->cap_mask;
-+	}
-+
-+	if (!cap_raised(cap_drop, cap))
-+		return 1;
-+
-+	return 0;
-+}
-+
---- /dev/null
-+++ b/grsecurity/gracl_fs.c
-@@ -0,0 +1,423 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/types.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/stat.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/gracl.h>
-+
-+__u32
-+gr_acl_handle_hidden_file(const struct dentry * dentry,
-+			  const struct vfsmount * mnt)
-+{
-+	__u32 mode;
-+
-+	if (unlikely(!dentry->d_inode))
-+		return GR_FIND;
-+
-+	mode =
-+	    gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
-+
-+	if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
-+		gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
-+		return mode;
-+	} else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
-+		return 0;
-+	} else if (unlikely(!(mode & GR_FIND)))
-+		return 0;
-+
-+	return GR_FIND;
-+}
-+
-+__u32
-+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
-+		   const int fmode)
-+{
-+	__u32 reqmode = GR_FIND;
-+	__u32 mode;
-+
-+	if (unlikely(!dentry->d_inode))
-+		return reqmode;
-+
-+	if (unlikely(fmode & O_APPEND))
-+		reqmode |= GR_APPEND;
-+	else if (unlikely(fmode & FMODE_WRITE))
-+		reqmode |= GR_WRITE;
-+	if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
-+		reqmode |= GR_READ;
-+
-+	mode =
-+	    gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
-+			   mnt);
-+
-+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
-+		gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
-+			       reqmode & GR_READ ? " reading" : "",
-+			       reqmode & GR_WRITE ? " writing" : reqmode &
-+			       GR_APPEND ? " appending" : "");
-+		return reqmode;
-+	} else
-+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
-+	{
-+		gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
-+			       reqmode & GR_READ ? " reading" : "",
-+			       reqmode & GR_WRITE ? " writing" : reqmode &
-+			       GR_APPEND ? " appending" : "");
-+		return 0;
-+	} else if (unlikely((mode & reqmode) != reqmode))
-+		return 0;
-+
-+	return reqmode;
-+}
-+
-+__u32
-+gr_acl_handle_creat(const struct dentry * dentry,
-+		    const struct dentry * p_dentry,
-+		    const struct vfsmount * p_mnt, const int fmode,
-+		    const int imode)
-+{
-+	__u32 reqmode = GR_WRITE | GR_CREATE;
-+	__u32 mode;
-+
-+	if (unlikely(fmode & O_APPEND))
-+		reqmode |= GR_APPEND;
-+	if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
-+		reqmode |= GR_READ;
-+	if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
-+		reqmode |= GR_SETID;
-+
-+	mode =
-+	    gr_check_create(dentry, p_dentry, p_mnt,
-+			    reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
-+
-+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
-+		gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
-+			       reqmode & GR_READ ? " reading" : "",
-+			       reqmode & GR_WRITE ? " writing" : reqmode &
-+			       GR_APPEND ? " appending" : "");
-+		return reqmode;
-+	} else
-+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
-+	{
-+		gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
-+			       reqmode & GR_READ ? " reading" : "",
-+			       reqmode & GR_WRITE ? " writing" : reqmode &
-+			       GR_APPEND ? " appending" : "");
-+		return 0;
-+	} else if (unlikely((mode & reqmode) != reqmode))
-+		return 0;
-+
-+	return reqmode;
-+}
-+
-+__u32
-+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
-+		     const int fmode)
-+{
-+	__u32 mode, reqmode = GR_FIND;
-+
-+	if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
-+		reqmode |= GR_EXEC;
-+	if (fmode & S_IWOTH)
-+		reqmode |= GR_WRITE;
-+	if (fmode & S_IROTH)
-+		reqmode |= GR_READ;
-+
-+	mode =
-+	    gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
-+			   mnt);
-+
-+	if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
-+		gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
-+			       reqmode & GR_READ ? " reading" : "",
-+			       reqmode & GR_WRITE ? " writing" : "",
-+			       reqmode & GR_EXEC ? " executing" : "");
-+		return reqmode;
-+	} else
-+	    if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
-+	{
-+		gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
-+			       reqmode & GR_READ ? " reading" : "",
-+			       reqmode & GR_WRITE ? " writing" : "",
-+			       reqmode & GR_EXEC ? " executing" : "");
-+		return 0;
-+	} else if (unlikely((mode & reqmode) != reqmode))
-+		return 0;
-+
-+	return reqmode;
-+}
-+
-+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
-+{
-+	__u32 mode;
-+
-+	mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
-+
-+	if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
-+		gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
-+		return mode;
-+	} else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
-+		return 0;
-+	} else if (unlikely((mode & (reqmode)) != (reqmode)))
-+		return 0;
-+
-+	return (reqmode);
-+}
-+
-+__u32
-+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
-+		     mode_t mode)
-+{
-+	if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
-+		return 1;
-+
-+	if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
-+		return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
-+				   GR_FCHMOD_ACL_MSG);
-+	} else {
-+		return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
-+	}
-+}
-+
-+__u32
-+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
-+		    mode_t mode)
-+{
-+	if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
-+		return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
-+				   GR_CHMOD_ACL_MSG);
-+	} else {
-+		return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
-+	}
-+}
-+
-+__u32
-+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
-+			   GR_UNIXCONNECT_ACL_MSG);
-+}
-+
-+/* hardlinks require at minimum create permission,
-+   any additional privilege required is based on the
-+   privilege of the file being linked to
-+*/
-+__u32
-+gr_acl_handle_link(const struct dentry * new_dentry,
-+		   const struct dentry * parent_dentry,
-+		   const struct vfsmount * parent_mnt,
-+		   const struct dentry * old_dentry,
-+		   const struct vfsmount * old_mnt, const char *to)
-+{
-+	__u32 mode;
-+	__u32 needmode = GR_CREATE | GR_LINK;
-+	__u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
-+
-+	mode =
-+	    gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
-+			  old_mnt);
-+
-+	if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
-+		gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
-+		return mode;
-+	} else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
-+		return 0;
-+	} else if (unlikely((mode & needmode) != needmode))
-+		return 0;
-+
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_symlink(const struct dentry * new_dentry,
-+		      const struct dentry * parent_dentry,
-+		      const struct vfsmount * parent_mnt, const char *from)
-+{
-+	__u32 needmode = GR_WRITE | GR_CREATE;
-+	__u32 mode;
-+
-+	mode =
-+	    gr_check_create(new_dentry, parent_dentry, parent_mnt,
-+			    GR_CREATE | GR_AUDIT_CREATE |
-+			    GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
-+
-+	if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
-+		gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
-+		return mode;
-+	} else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
-+		return 0;
-+	} else if (unlikely((mode & needmode) != needmode))
-+		return 0;
-+
-+	return (GR_WRITE | GR_CREATE);
-+}
-+
-+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
-+{
-+	__u32 mode;
-+
-+	mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
-+
-+	if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
-+		gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
-+		return mode;
-+	} else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
-+		gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
-+		return 0;
-+	} else if (unlikely((mode & (reqmode)) != (reqmode)))
-+		return 0;
-+
-+	return (reqmode);
-+}
-+
-+__u32
-+gr_acl_handle_mknod(const struct dentry * new_dentry,
-+		    const struct dentry * parent_dentry,
-+		    const struct vfsmount * parent_mnt,
-+		    const int mode)
-+{
-+	__u32 reqmode = GR_WRITE | GR_CREATE;
-+	if (unlikely(mode & (S_ISUID | S_ISGID)))
-+		reqmode |= GR_SETID;
-+
-+	return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
-+				  reqmode, GR_MKNOD_ACL_MSG);
-+}
-+
-+__u32
-+gr_acl_handle_mkdir(const struct dentry *new_dentry,
-+		    const struct dentry *parent_dentry,
-+		    const struct vfsmount *parent_mnt)
-+{
-+	return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
-+				  GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
-+}
-+
-+#define RENAME_CHECK_SUCCESS(old, new) \
-+	(((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
-+	 ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
-+
-+int
-+gr_acl_handle_rename(struct dentry *new_dentry,
-+		     struct dentry *parent_dentry,
-+		     const struct vfsmount *parent_mnt,
-+		     struct dentry *old_dentry,
-+		     struct inode *old_parent_inode,
-+		     struct vfsmount *old_mnt, const char *newname)
-+{
-+	__u32 comp1, comp2;
-+	int error = 0;
-+
-+	if (unlikely(!gr_acl_is_enabled()))
-+		return 0;
-+
-+	if (!new_dentry->d_inode) {
-+		comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
-+					GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
-+					GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
-+		comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
-+				       GR_DELETE | GR_AUDIT_DELETE |
-+				       GR_AUDIT_READ | GR_AUDIT_WRITE |
-+				       GR_SUPPRESS, old_mnt);
-+	} else {
-+		comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
-+				       GR_CREATE | GR_DELETE |
-+				       GR_AUDIT_CREATE | GR_AUDIT_DELETE |
-+				       GR_AUDIT_READ | GR_AUDIT_WRITE |
-+				       GR_SUPPRESS, parent_mnt);
-+		comp2 =
-+		    gr_search_file(old_dentry,
-+				   GR_READ | GR_WRITE | GR_AUDIT_READ |
-+				   GR_DELETE | GR_AUDIT_DELETE |
-+				   GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
-+	}
-+
-+	if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
-+	    ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
-+		gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
-+	else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
-+		 && !(comp2 & GR_SUPPRESS)) {
-+		gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
-+		error = -EACCES;
-+	} else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
-+		error = -EACCES;
-+
-+	return error;
-+}
-+
-+void
-+gr_acl_handle_exit(void)
-+{
-+	u16 id;
-+	char *rolename;
-+	struct file *exec_file;
-+
-+	if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
-+		id = current->acl_role_id;
-+		rolename = current->role->rolename;
-+		gr_set_acls(1);
-+		gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
-+	}
-+
-+	write_lock(&grsec_exec_file_lock);
-+	exec_file = current->exec_file;
-+	current->exec_file = NULL;
-+	write_unlock(&grsec_exec_file_lock);
-+
-+	if (exec_file)
-+		fput(exec_file);
-+}
-+
-+int
-+gr_acl_handle_procpidmem(const struct task_struct *task)
-+{
-+	if (unlikely(!gr_acl_is_enabled()))
-+		return 0;
-+
-+	if (task->acl->mode & GR_PROTPROCFD)
-+		return -EACCES;
-+
-+	return 0;
-+}
---- /dev/null
-+++ b/grsecurity/gracl_ip.c
-@@ -0,0 +1,313 @@
-+#include <linux/kernel.h>
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <net/sock.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/skbuff.h>
-+#include <linux/ip.h>
-+#include <linux/udp.h>
-+#include <linux/smp_lock.h>
-+#include <linux/types.h>
-+#include <linux/sched.h>
-+#include <linux/netdevice.h>
-+#include <linux/inetdevice.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+#define GR_BIND 	0x01
-+#define GR_CONNECT 	0x02
-+#define GR_INVERT 	0x04
-+
-+static const char * gr_protocols[256] = {
-+	"ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
-+	"egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
-+	"chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
-+	"trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
-+	"merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
-+	"il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
-+	"mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
-+	"tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
-+	"sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
-+	"cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", 
-+	"iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", 
-+	"eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
-+	"scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
-+	"aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
-+	"vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
-+	"uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
-+	"sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
-+	"unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
-+	"unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
-+	"unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
-+	"unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
-+	"unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
-+	"unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
-+	"unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
-+	"unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
-+	"unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
-+	"unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
-+	"unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
-+	"unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
-+	"unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
-+	"unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
-+	"unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
-+	};
-+
-+static const char * gr_socktypes[11] = {
-+	"unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", 
-+	"unknown:7", "unknown:8", "unknown:9", "packet"
-+	};
-+
-+const char *
-+gr_proto_to_name(unsigned char proto)
-+{
-+	return gr_protocols[proto];
-+}
-+
-+const char *
-+gr_socktype_to_name(unsigned char type)
-+{
-+	return gr_socktypes[type];
-+}
-+
-+int
-+gr_search_socket(const int domain, const int type, const int protocol)
-+{
-+	struct acl_subject_label *curr;
-+
-+	if (unlikely(!gr_acl_is_enabled()))
-+		goto exit;
-+
-+	if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
-+	    || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
-+		goto exit;	// let the kernel handle it
-+
-+	curr = current->acl;
-+
-+	if (!curr->ips)
-+		goto exit;
-+
-+	if ((curr->ip_type & (1 << type)) &&
-+	    (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
-+		goto exit;
-+
-+	if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+		/* we don't place acls on raw sockets , and sometimes
-+		   dgram/ip sockets are opened for ioctl and not
-+		   bind/connect, so we'll fake a bind learn log */
-+		if (type == SOCK_RAW || type == SOCK_PACKET) {
-+			__u32 fakeip = 0;
-+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+				       current->role->roletype, current->uid,
-+				       current->gid, current->exec_file ?
-+				       gr_to_filename(current->exec_file->f_dentry,
-+				       current->exec_file->f_vfsmnt) :
-+				       curr->filename, curr->filename,
-+				       NIPQUAD(fakeip), 0, type,
-+				       protocol, GR_CONNECT, 
-+NIPQUAD(current->signal->curr_ip));
-+		} else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
-+			__u32 fakeip = 0;
-+			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+				       current->role->roletype, current->uid,
-+				       current->gid, current->exec_file ?
-+				       gr_to_filename(current->exec_file->f_dentry,
-+				       current->exec_file->f_vfsmnt) :
-+				       curr->filename, curr->filename,
-+				       NIPQUAD(fakeip), 0, type,
-+				       protocol, GR_BIND, NIPQUAD(current->signal->curr_ip));
-+		}
-+		/* we'll log when they use connect or bind */
-+		goto exit;
-+	}
-+
-+	gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet", 
-+		    gr_socktype_to_name(type), gr_proto_to_name(protocol));
-+
-+	return 0;
-+      exit:
-+	return 1;
-+}
-+
-+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
-+{
-+	if ((ip->mode & mode) &&
-+	    (ip_port >= ip->low) &&
-+	    (ip_port <= ip->high) &&
-+	    ((ntohl(ip_addr) & our_netmask) ==
-+	     (ntohl(our_addr) & our_netmask))
-+	    && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
-+	    && (ip->type & (1 << type))) {
-+		if (ip->mode & GR_INVERT)
-+			return 2; // specifically denied
-+		else
-+			return 1; // allowed
-+	}
-+
-+	return 0; // not specifically allowed, may continue parsing
-+}
-+
-+static int
-+gr_search_connectbind(const int mode, const struct sock *sk,
-+		      const struct sockaddr_in *addr, const int type)
-+{
-+	char iface[IFNAMSIZ] = {0};
-+	struct acl_subject_label *curr;
-+	struct acl_ip_label *ip;
-+	struct net_device *dev;
-+	struct in_device *idev;
-+	unsigned long i;
-+	int ret;
-+	__u32 ip_addr = 0;
-+	__u32 our_addr;
-+	__u32 our_netmask;
-+	char *p;
-+	__u16 ip_port = 0;
-+
-+	if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
-+		return 1;
-+
-+	curr = current->acl;
-+
-+	if (!curr->ips)
-+		return 1;
-+
-+	ip_addr = addr->sin_addr.s_addr;
-+	ip_port = ntohs(addr->sin_port);
-+
-+	if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
-+		security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+			       current->role->roletype, current->uid,
-+			       current->gid, current->exec_file ?
-+			       gr_to_filename(current->exec_file->f_dentry,
-+			       current->exec_file->f_vfsmnt) :
-+			       curr->filename, curr->filename,
-+			       NIPQUAD(ip_addr), ip_port, type,
-+			       sk->sk_protocol, mode, NIPQUAD(current->signal->curr_ip));
-+		return 1;
-+	}
-+
-+	for (i = 0; i < curr->ip_num; i++) {
-+		ip = *(curr->ips + i);
-+		if (ip->iface != NULL) {
-+			strncpy(iface, ip->iface, IFNAMSIZ - 1);
-+			p = strchr(iface, ':');
-+			if (p != NULL)
-+				*p = '\0';
-+			dev = dev_get_by_name(iface);
-+			if (dev == NULL)
-+				continue;
-+			idev = in_dev_get(dev);
-+			if (idev == NULL) {
-+				dev_put(dev);
-+				continue;
-+			}
-+			rcu_read_lock();
-+			for_ifa(idev) {
-+				if (!strcmp(ip->iface, ifa->ifa_label)) {
-+					our_addr = ifa->ifa_address;
-+					our_netmask = 0xffffffff;
-+					ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
-+					if (ret == 1) {
-+						rcu_read_unlock();
-+						in_dev_put(idev);
-+						dev_put(dev);
-+						return 1;
-+					} else if (ret == 2) {
-+						rcu_read_unlock();
-+						in_dev_put(idev);
-+						dev_put(dev);
-+						goto denied;
-+					}
-+				}
-+			} endfor_ifa(idev);
-+			rcu_read_unlock();
-+			in_dev_put(idev);
-+			dev_put(dev);
-+		} else {
-+			our_addr = ip->addr;
-+			our_netmask = ip->netmask;
-+			ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
-+			if (ret == 1)
-+				return 1;
-+			else if (ret == 2)
-+				goto denied;
-+		}
-+	}
-+
-+denied:
-+	if (mode == GR_BIND)
-+		gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
-+	else if (mode == GR_CONNECT)
-+		gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
-+
-+	return 0;
-+}
-+
-+int
-+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
-+{
-+	return gr_search_connectbind(GR_CONNECT, sock->sk, addr, sock->type);
-+}
-+
-+int
-+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
-+{
-+	return gr_search_connectbind(GR_BIND, sock->sk, addr, sock->type);
-+}
-+
-+int gr_search_listen(const struct socket *sock)
-+{
-+	struct sock *sk = sock->sk;
-+	struct sockaddr_in addr;
-+
-+	addr.sin_addr.s_addr = inet_sk(sk)->saddr;
-+	addr.sin_port = inet_sk(sk)->sport;
-+
-+	return gr_search_connectbind(GR_BIND, sock->sk, &addr, sock->type);
-+}
-+
-+int gr_search_accept(const struct socket *sock)
-+{
-+	struct sock *sk = sock->sk;
-+	struct sockaddr_in addr;
-+
-+	addr.sin_addr.s_addr = inet_sk(sk)->saddr;
-+	addr.sin_port = inet_sk(sk)->sport;
-+
-+	return gr_search_connectbind(GR_BIND, sock->sk, &addr, sock->type);
-+}
-+
-+int
-+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
-+{
-+	if (addr)
-+		return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
-+	else {
-+		struct sockaddr_in sin;
-+		const struct inet_sock *inet = inet_sk(sk);
-+
-+		sin.sin_addr.s_addr = inet->daddr;
-+		sin.sin_port = inet->dport;
-+
-+		return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
-+	}
-+}
-+
-+int
-+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
-+{
-+	struct sockaddr_in sin;
-+
-+	if (unlikely(skb->len < sizeof (struct udphdr)))
-+		return 1;	// skip this packet
-+
-+	sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
-+	sin.sin_port = udp_hdr(skb)->source;
-+
-+	return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
-+}
---- /dev/null
-+++ b/grsecurity/gracl_learn.c
-@@ -0,0 +1,211 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/poll.h>
-+#include <linux/smp_lock.h>
-+#include <linux/string.h>
-+#include <linux/file.h>
-+#include <linux/types.h>
-+#include <linux/vmalloc.h>
-+#include <linux/grinternal.h>
-+
-+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
-+				   size_t count, loff_t *ppos);
-+extern int gr_acl_is_enabled(void);
-+
-+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
-+static int gr_learn_attached;
-+
-+/* use a 512k buffer */
-+#define LEARN_BUFFER_SIZE (512 * 1024)
-+
-+static spinlock_t gr_learn_lock = SPIN_LOCK_UNLOCKED;
-+static DECLARE_MUTEX(gr_learn_user_sem);
-+
-+/* we need to maintain two buffers, so that the kernel context of grlearn
-+   uses a semaphore around the userspace copying, and the other kernel contexts
-+   use a spinlock when copying into the buffer, since they cannot sleep
-+*/
-+static char *learn_buffer;
-+static char *learn_buffer_user;
-+static int learn_buffer_len;
-+static int learn_buffer_user_len;
-+
-+static ssize_t
-+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
-+{
-+	DECLARE_WAITQUEUE(wait, current);
-+	ssize_t retval = 0;
-+
-+	add_wait_queue(&learn_wait, &wait);
-+	set_current_state(TASK_INTERRUPTIBLE);
-+	do {
-+		down(&gr_learn_user_sem);
-+		spin_lock(&gr_learn_lock);
-+		if (learn_buffer_len)
-+			break;
-+		spin_unlock(&gr_learn_lock);
-+		up(&gr_learn_user_sem);
-+		if (file->f_flags & O_NONBLOCK) {
-+			retval = -EAGAIN;
-+			goto out;
-+		}
-+		if (signal_pending(current)) {
-+			retval = -ERESTARTSYS;
-+			goto out;
-+		}
-+
-+		schedule();
-+	} while (1);
-+
-+	memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
-+	learn_buffer_user_len = learn_buffer_len;
-+	retval = learn_buffer_len;
-+	learn_buffer_len = 0;
-+
-+	spin_unlock(&gr_learn_lock);
-+
-+	if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
-+		retval = -EFAULT;
-+
-+	up(&gr_learn_user_sem);
-+out:
-+	set_current_state(TASK_RUNNING);
-+	remove_wait_queue(&learn_wait, &wait);
-+	return retval;
-+}
-+
-+static unsigned int
-+poll_learn(struct file * file, poll_table * wait)
-+{
-+	poll_wait(file, &learn_wait, wait);
-+
-+	if (learn_buffer_len)
-+		return (POLLIN | POLLRDNORM);
-+
-+	return 0;
-+}
-+
-+void
-+gr_clear_learn_entries(void)
-+{
-+	char *tmp;
-+
-+	down(&gr_learn_user_sem);
-+	if (learn_buffer != NULL) {
-+		spin_lock(&gr_learn_lock);
-+		tmp = learn_buffer;
-+		learn_buffer = NULL;
-+		spin_unlock(&gr_learn_lock);
-+		vfree(learn_buffer);
-+	}
-+	if (learn_buffer_user != NULL) {
-+		vfree(learn_buffer_user);
-+		learn_buffer_user = NULL;
-+	}
-+	learn_buffer_len = 0;
-+	up(&gr_learn_user_sem);
-+
-+	return;
-+}
-+
-+void
-+gr_add_learn_entry(const char *fmt, ...)
-+{
-+	va_list args;
-+	unsigned int len;
-+
-+	if (!gr_learn_attached)
-+		return;
-+
-+	spin_lock(&gr_learn_lock);
-+
-+	/* leave a gap at the end so we know when it's "full" but don't have to
-+	   compute the exact length of the string we're trying to append
-+	*/
-+	if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
-+		spin_unlock(&gr_learn_lock);
-+		wake_up_interruptible(&learn_wait);
-+		return;
-+	}
-+	if (learn_buffer == NULL) {
-+		spin_unlock(&gr_learn_lock);
-+		return;
-+	}
-+
-+	va_start(args, fmt);
-+	len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
-+	va_end(args);
-+
-+	learn_buffer_len += len + 1;
-+
-+	spin_unlock(&gr_learn_lock);
-+	wake_up_interruptible(&learn_wait);
-+
-+	return;
-+}
-+
-+static int
-+open_learn(struct inode *inode, struct file *file)
-+{
-+	if (file->f_mode & FMODE_READ && gr_learn_attached)
-+		return -EBUSY;
-+	if (file->f_mode & FMODE_READ) {
-+		int retval = 0;
-+		down(&gr_learn_user_sem);
-+		if (learn_buffer == NULL)
-+			learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
-+		if (learn_buffer_user == NULL)
-+			learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
-+		if (learn_buffer == NULL) {
-+			retval = -ENOMEM;
-+			goto out_error;
-+		}
-+		if (learn_buffer_user == NULL) {
-+			retval = -ENOMEM;
-+			goto out_error;
-+		}
-+		learn_buffer_len = 0;
-+		learn_buffer_user_len = 0;
-+		gr_learn_attached = 1;
-+out_error:
-+		up(&gr_learn_user_sem);
-+		return retval;
-+	}
-+	return 0;
-+}
-+
-+static int
-+close_learn(struct inode *inode, struct file *file)
-+{
-+	char *tmp;
-+
-+	if (file->f_mode & FMODE_READ) {
-+		down(&gr_learn_user_sem);
-+		if (learn_buffer != NULL) {
-+			spin_lock(&gr_learn_lock);
-+			tmp = learn_buffer;
-+			learn_buffer = NULL;
-+			spin_unlock(&gr_learn_lock);
-+			vfree(tmp);
-+		}
-+		if (learn_buffer_user != NULL) {
-+			vfree(learn_buffer_user);
-+			learn_buffer_user = NULL;
-+		}
-+		learn_buffer_len = 0;
-+		learn_buffer_user_len = 0;
-+		gr_learn_attached = 0;
-+		up(&gr_learn_user_sem);
-+	}
-+
-+	return 0;
-+}
-+		
-+struct file_operations grsec_fops = {
-+	.read		= read_learn,
-+	.write		= write_grsec_handler,
-+	.open		= open_learn,
-+	.release	= close_learn,
-+	.poll		= poll_learn,
-+};
---- /dev/null
-+++ b/grsecurity/gracl_res.c
-@@ -0,0 +1,45 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/gracl.h>
-+#include <linux/grinternal.h>
-+
-+static const char *restab_log[] = {
-+	[RLIMIT_CPU] = "RLIMIT_CPU",
-+	[RLIMIT_FSIZE] = "RLIMIT_FSIZE",
-+	[RLIMIT_DATA] = "RLIMIT_DATA",
-+	[RLIMIT_STACK] = "RLIMIT_STACK",
-+	[RLIMIT_CORE] = "RLIMIT_CORE",
-+	[RLIMIT_RSS] = "RLIMIT_RSS",
-+	[RLIMIT_NPROC] = "RLIMIT_NPROC",
-+	[RLIMIT_NOFILE] = "RLIMIT_NOFILE",
-+	[RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
-+	[RLIMIT_AS] = "RLIMIT_AS",
-+	[RLIMIT_LOCKS] = "RLIMIT_LOCKS",
-+	[RLIMIT_LOCKS + 1] = "RLIMIT_CRASH"
-+};
-+
-+void
-+gr_log_resource(const struct task_struct *task,
-+		const int res, const unsigned long wanted, const int gt)
-+{
-+	if (res == RLIMIT_NPROC && 
-+	    (cap_raised(task->cap_effective, CAP_SYS_ADMIN) || 
-+	     cap_raised(task->cap_effective, CAP_SYS_RESOURCE)))
-+		return;
-+	else if (res == RLIMIT_MEMLOCK &&
-+		 cap_raised(task->cap_effective, CAP_IPC_LOCK))
-+		return;
-+
-+	if (!gr_acl_is_enabled() && !grsec_resource_logging)
-+		return;
-+
-+	preempt_disable();
-+
-+	if (unlikely(((gt && wanted > task->signal->rlim[res].rlim_cur) ||
-+		      (!gt && wanted >= task->signal->rlim[res].rlim_cur)) &&
-+		     task->signal->rlim[res].rlim_cur != RLIM_INFINITY))
-+		gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], task->signal->rlim[res].rlim_cur);
-+	preempt_enable_no_resched();
-+
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/gracl_segv.c
-@@ -0,0 +1,301 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <asm/uaccess.h>
-+#include <asm/errno.h>
-+#include <asm/mman.h>
-+#include <net/sock.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/smp_lock.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <linux/sched.h>
-+#include <linux/timer.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+static struct crash_uid *uid_set;
-+static unsigned short uid_used;
-+static spinlock_t gr_uid_lock = SPIN_LOCK_UNLOCKED;
-+extern rwlock_t gr_inode_lock;
-+extern struct acl_subject_label *
-+	lookup_acl_subj_label(const ino_t inode, const dev_t dev,
-+			      struct acl_role_label *role);
-+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
-+
-+int
-+gr_init_uidset(void)
-+{
-+	uid_set =
-+	    kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
-+	uid_used = 0;
-+
-+	return uid_set ? 1 : 0;
-+}
-+
-+void
-+gr_free_uidset(void)
-+{
-+	if (uid_set)
-+		kfree(uid_set);
-+
-+	return;
-+}
-+
-+int
-+gr_find_uid(const uid_t uid)
-+{
-+	struct crash_uid *tmp = uid_set;
-+	uid_t buid;
-+	int low = 0, high = uid_used - 1, mid;
-+
-+	while (high >= low) {
-+		mid = (low + high) >> 1;
-+		buid = tmp[mid].uid;
-+		if (buid == uid)
-+			return mid;
-+		if (buid > uid)
-+			high = mid - 1;
-+		if (buid < uid)
-+			low = mid + 1;
-+	}
-+
-+	return -1;
-+}
-+
-+static __inline__ void
-+gr_insertsort(void)
-+{
-+	unsigned short i, j;
-+	struct crash_uid index;
-+
-+	for (i = 1; i < uid_used; i++) {
-+		index = uid_set[i];
-+		j = i;
-+		while ((j > 0) && uid_set[j - 1].uid > index.uid) {
-+			uid_set[j] = uid_set[j - 1];
-+			j--;
-+		}
-+		uid_set[j] = index;
-+	}
-+
-+	return;
-+}
-+
-+static __inline__ void
-+gr_insert_uid(const uid_t uid, const unsigned long expires)
-+{
-+	int loc;
-+
-+	if (uid_used == GR_UIDTABLE_MAX)
-+		return;
-+
-+	loc = gr_find_uid(uid);
-+
-+	if (loc >= 0) {
-+		uid_set[loc].expires = expires;
-+		return;
-+	}
-+
-+	uid_set[uid_used].uid = uid;
-+	uid_set[uid_used].expires = expires;
-+	uid_used++;
-+
-+	gr_insertsort();
-+
-+	return;
-+}
-+
-+void
-+gr_remove_uid(const unsigned short loc)
-+{
-+	unsigned short i;
-+
-+	for (i = loc + 1; i < uid_used; i++)
-+		uid_set[i - 1] = uid_set[i];
-+
-+	uid_used--;
-+
-+	return;
-+}
-+
-+int
-+gr_check_crash_uid(const uid_t uid)
-+{
-+	int loc;
-+	int ret = 0;
-+
-+	if (unlikely(!gr_acl_is_enabled()))
-+		return 0;
-+
-+	spin_lock(&gr_uid_lock);
-+	loc = gr_find_uid(uid);
-+
-+	if (loc < 0)
-+		goto out_unlock;
-+
-+	if (time_before_eq(uid_set[loc].expires, get_seconds()))
-+		gr_remove_uid(loc);
-+	else
-+		ret = 1;
-+
-+out_unlock:
-+	spin_unlock(&gr_uid_lock);
-+	return ret;
-+}
-+
-+static __inline__ int
-+proc_is_setxid(const struct task_struct *task)
-+{
-+	if (task->uid != task->euid || task->uid != task->suid ||
-+	    task->uid != task->fsuid)
-+		return 1;
-+	if (task->gid != task->egid || task->gid != task->sgid ||
-+	    task->gid != task->fsgid)
-+		return 1;
-+
-+	return 0;
-+}
-+static __inline__ int
-+gr_fake_force_sig(int sig, struct task_struct *t)
-+{
-+	unsigned long int flags;
-+	int ret, blocked, ignored;
-+	struct k_sigaction *action;
-+
-+	spin_lock_irqsave(&t->sighand->siglock, flags);
-+	action = &t->sighand->action[sig-1];
-+	ignored = action->sa.sa_handler == SIG_IGN;
-+	blocked = sigismember(&t->blocked, sig);
-+	if (blocked || ignored) {
-+		action->sa.sa_handler = SIG_DFL;
-+		if (blocked) {
-+			sigdelset(&t->blocked, sig);
-+			recalc_sigpending_and_wake(t);
-+		}
-+	}
-+	ret = specific_send_sig_info(sig, (void*)1L, t);
-+	spin_unlock_irqrestore(&t->sighand->siglock, flags);
-+
-+	return ret;
-+}
-+
-+void
-+gr_handle_crash(struct task_struct *task, const int sig)
-+{
-+	struct acl_subject_label *curr;
-+	struct acl_subject_label *curr2;
-+	struct task_struct *tsk, *tsk2;
-+
-+	if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
-+		return;
-+
-+	if (unlikely(!gr_acl_is_enabled()))
-+		return;
-+
-+	curr = task->acl;
-+
-+	if (!(curr->resmask & (1 << GR_CRASH_RES)))
-+		return;
-+
-+	if (time_before_eq(curr->expires, get_seconds())) {
-+		curr->expires = 0;
-+		curr->crashes = 0;
-+	}
-+
-+	curr->crashes++;
-+
-+	if (!curr->expires)
-+		curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
-+
-+	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
-+	    time_after(curr->expires, get_seconds())) {
-+		if (task->uid && proc_is_setxid(task)) {
-+			gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
-+			spin_lock(&gr_uid_lock);
-+			gr_insert_uid(task->uid, curr->expires);
-+			spin_unlock(&gr_uid_lock);
-+			curr->expires = 0;
-+			curr->crashes = 0;
-+			read_lock(&tasklist_lock);
-+			do_each_thread(tsk2, tsk) {
-+				if (tsk != task && tsk->uid == task->uid)
-+					gr_fake_force_sig(SIGKILL, tsk);
-+			} while_each_thread(tsk2, tsk);
-+			read_unlock(&tasklist_lock);
-+		} else {
-+			gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
-+			read_lock(&tasklist_lock);
-+			do_each_thread(tsk2, tsk) {
-+				if (likely(tsk != task)) {
-+					curr2 = tsk->acl;
-+
-+					if (curr2->device == curr->device &&
-+					    curr2->inode == curr->inode)
-+						gr_fake_force_sig(SIGKILL, tsk);
-+				}
-+			} while_each_thread(tsk2, tsk);
-+			read_unlock(&tasklist_lock);
-+		}
-+	}
-+
-+	return;
-+}
-+
-+int
-+gr_check_crash_exec(const struct file *filp)
-+{
-+	struct acl_subject_label *curr;
-+
-+	if (unlikely(!gr_acl_is_enabled()))
-+		return 0;
-+
-+	read_lock(&gr_inode_lock);
-+	curr = lookup_acl_subj_label(filp->f_dentry->d_inode->i_ino,
-+				     filp->f_dentry->d_inode->i_sb->s_dev,
-+				     current->role);
-+	read_unlock(&gr_inode_lock);
-+
-+	if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
-+	    (!curr->crashes && !curr->expires))
-+		return 0;
-+
-+	if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
-+	    time_after(curr->expires, get_seconds()))
-+		return 1;
-+	else if (time_before_eq(curr->expires, get_seconds())) {
-+		curr->crashes = 0;
-+		curr->expires = 0;
-+	}
-+
-+	return 0;
-+}
-+
-+void
-+gr_handle_alertkill(struct task_struct *task)
-+{
-+	struct acl_subject_label *curracl;
-+	__u32 curr_ip;
-+	struct task_struct *p, *p2;
-+
-+	if (unlikely(!gr_acl_is_enabled()))
-+		return;
-+
-+	curracl = task->acl;
-+	curr_ip = task->signal->curr_ip;
-+
-+	if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
-+		read_lock(&tasklist_lock);
-+		do_each_thread(p2, p) {
-+			if (p->signal->curr_ip == curr_ip)
-+				gr_fake_force_sig(SIGKILL, p);
-+		} while_each_thread(p2, p);
-+		read_unlock(&tasklist_lock);
-+	} else if (curracl->mode & GR_KILLPROC)
-+		gr_fake_force_sig(SIGKILL, task);
-+
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/gracl_shm.c
-@@ -0,0 +1,33 @@
-+#include <linux/kernel.h>
-+#include <linux/mm.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/ipc.h>
-+#include <linux/gracl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+		const time_t shm_createtime, const uid_t cuid, const int shmid)
-+{
-+	struct task_struct *task;
-+
-+	if (!gr_acl_is_enabled())
-+		return 1;
-+
-+	task = find_task_by_pid(shm_cprid);
-+
-+	if (unlikely(!task))
-+		task = find_task_by_pid(shm_lapid);
-+
-+	if (unlikely(task && (time_before((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
-+			      (task->pid == shm_lapid)) &&
-+		     (task->acl->mode & GR_PROTSHM) &&
-+		     (task->acl != current->acl))) {
-+		gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
-+		return 0;
-+	}
-+
-+	return 1;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_chdir.c
-@@ -0,0 +1,19 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
-+	if ((grsec_enable_chdir && grsec_enable_group &&
-+	     in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
-+					      !grsec_enable_group)) {
-+		gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
-+	}
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_chroot.c
-@@ -0,0 +1,335 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/mount.h>
-+#include <linux/types.h>
-+#include <linux/pid_namespace.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_chroot_unix(const pid_t pid)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+	struct pid *spid = NULL;
-+
-+	if (unlikely(!grsec_enable_chroot_unix))
-+		return 1;
-+
-+	if (likely(!proc_is_chrooted(current)))
-+		return 1;
-+
-+	read_lock(&tasklist_lock);
-+
-+	spid = find_pid(pid);
-+	if (spid) {
-+		struct task_struct *p;
-+		p = pid_task(spid, PIDTYPE_PID);
-+		task_lock(p);
-+		if (unlikely(!have_same_root(current, p))) {
-+			task_unlock(p);
-+			read_unlock(&tasklist_lock);
-+			gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
-+			return 0;
-+		}
-+		task_unlock(p);
-+	}
-+	read_unlock(&tasklist_lock);
-+#endif
-+	return 1;
-+}
-+
-+int
-+gr_handle_chroot_nice(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+	if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+	if (grsec_enable_chroot_nice && (niceval < task_nice(p))
-+			&& proc_is_chrooted(current)) {
-+		gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_chroot_rawio(const struct inode *inode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+	if (grsec_enable_chroot_caps && proc_is_chrooted(current) && 
-+	    inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
-+		return 1;
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_pid_is_chrooted(struct task_struct *p)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+	if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
-+		return 0;
-+
-+	task_lock(p);
-+	if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
-+	    !have_same_root(current, p)) {
-+		task_unlock(p);
-+		return 1;
-+	}
-+	task_unlock(p);
-+#endif
-+	return 0;
-+}
-+
-+EXPORT_SYMBOL(gr_pid_is_chrooted);
-+
-+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
-+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
-+{
-+	struct dentry *dentry = (struct dentry *)u_dentry;
-+	struct vfsmount *mnt = (struct vfsmount *)u_mnt;
-+	struct dentry *realroot;
-+	struct vfsmount *realrootmnt;
-+	struct dentry *currentroot;
-+	struct vfsmount *currentmnt;
-+	struct task_struct *reaper = child_reaper(current);
-+	int ret = 1;
-+
-+	read_lock(&reaper->fs->lock);
-+	realrootmnt = mntget(reaper->fs->rootmnt);
-+	realroot = dget(reaper->fs->root);
-+	read_unlock(&reaper->fs->lock);
-+
-+	read_lock(&current->fs->lock);
-+	currentmnt = mntget(current->fs->rootmnt);
-+	currentroot = dget(current->fs->root);
-+	read_unlock(&current->fs->lock);
-+
-+	spin_lock(&dcache_lock);
-+	for (;;) {
-+		if (unlikely((dentry == realroot && mnt == realrootmnt)
-+		     || (dentry == currentroot && mnt == currentmnt)))
-+			break;
-+		if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
-+			if (mnt->mnt_parent == mnt)
-+				break;
-+			dentry = mnt->mnt_mountpoint;
-+			mnt = mnt->mnt_parent;
-+			continue;
-+		}
-+		dentry = dentry->d_parent;
-+	}
-+	spin_unlock(&dcache_lock);
-+
-+	dput(currentroot);
-+	mntput(currentmnt);
-+
-+	/* access is outside of chroot */
-+	if (dentry == realroot && mnt == realrootmnt)
-+		ret = 0;
-+
-+	dput(realroot);
-+	mntput(realrootmnt);
-+	return ret;
-+}
-+#endif
-+
-+int
-+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+	if (!grsec_enable_chroot_fchdir)
-+		return 1;
-+
-+	if (!proc_is_chrooted(current))
-+		return 1;
-+	else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
-+		return 0;
-+	}
-+#endif
-+	return 1;
-+}
-+
-+int
-+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+		const time_t shm_createtime)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
-+	struct pid *pid = NULL;
-+	time_t starttime;
-+
-+	if (unlikely(!grsec_enable_chroot_shmat))
-+		return 1;
-+
-+	if (likely(!proc_is_chrooted(current)))
-+		return 1;
-+
-+	read_lock(&tasklist_lock);
-+
-+	pid = find_pid(shm_cprid);
-+	if (pid) {
-+		struct task_struct *p;
-+		p = pid_task(pid, PIDTYPE_PID);
-+		task_lock(p);
-+		starttime = p->start_time.tv_sec;
-+		if (unlikely(!have_same_root(current, p) &&
-+			     time_before((unsigned long)starttime, (unsigned long)shm_createtime))) {
-+			task_unlock(p);
-+			read_unlock(&tasklist_lock);
-+			gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
-+			return 0;
-+		}
-+		task_unlock(p);
-+	} else {
-+		pid = find_pid(shm_lapid);
-+		if (pid) {
-+			struct task_struct *p;
-+			p = pid_task(pid, PIDTYPE_PID);
-+			task_lock(p);
-+			if (unlikely(!have_same_root(current, p))) {
-+				task_unlock(p);
-+				read_unlock(&tasklist_lock);
-+				gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
-+				return 0;
-+			}
-+			task_unlock(p);
-+		}
-+	}
-+
-+	read_unlock(&tasklist_lock);
-+#endif
-+	return 1;
-+}
-+
-+void
-+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
-+	if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
-+		gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
-+#endif
-+	return;
-+}
-+
-+int
-+gr_handle_chroot_mknod(const struct dentry *dentry,
-+		       const struct vfsmount *mnt, const int mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
-+	if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && 
-+	    proc_is_chrooted(current)) {
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_chroot_mount(const struct dentry *dentry,
-+		       const struct vfsmount *mnt, const char *dev_name)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
-+	if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
-+		gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_chroot_pivot(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
-+	if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
-+	if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
-+	    !gr_is_outside_chroot(dentry, mnt)) {
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+void
-+gr_handle_chroot_caps(struct task_struct *task)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+	if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
-+		task->cap_permitted =
-+		    cap_drop(task->cap_permitted, GR_CHROOT_CAPS);
-+		task->cap_inheritable =
-+		    cap_drop(task->cap_inheritable, GR_CHROOT_CAPS);
-+		task->cap_effective =
-+		    cap_drop(task->cap_effective, GR_CHROOT_CAPS);
-+	}
-+#endif
-+	return;
-+}
-+
-+int
-+gr_handle_chroot_sysctl(const int op)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
-+	if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
-+	    && (op & 002))
-+		return -EACCES;
-+#endif
-+	return 0;
-+}
-+
-+void
-+gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
-+	if (grsec_enable_chroot_chdir)
-+		set_fs_pwd(current->fs, mnt, dentry);
-+#endif
-+	return;
-+}
-+
-+int
-+gr_handle_chroot_chmod(const struct dentry *dentry,
-+		       const struct vfsmount *mnt, const int mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
-+	if (grsec_enable_chroot_chmod &&
-+	    ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
-+	    proc_is_chrooted(current)) {
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_handle_chroot_caps);
-+#endif
---- /dev/null
-+++ b/grsecurity/grsec_disabled.c
-@@ -0,0 +1,418 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/kdev_t.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/sysctl.h>
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+void
-+pax_set_initial_flags(struct linux_binprm *bprm)
-+{
-+	return;
-+}
-+#endif
-+
-+#ifdef CONFIG_SYSCTL
-+__u32
-+gr_handle_sysctl(const struct ctl_table * table, const int op)
-+{
-+	return 0;
-+}
-+#endif
-+
-+int
-+gr_acl_is_enabled(void)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_handle_rawio(const struct inode *inode)
-+{
-+	return 0;
-+}
-+
-+void
-+gr_acl_handle_psacct(struct task_struct *task, const long code)
-+{
-+	return;
-+}
-+
-+int
-+gr_handle_ptrace(struct task_struct *task, const long request)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_handle_proc_ptrace(struct task_struct *task)
-+{
-+	return 0;
-+}
-+
-+void
-+gr_learn_resource(const struct task_struct *task,
-+		  const int res, const unsigned long wanted, const int gt)
-+{
-+	return;
-+}
-+
-+int
-+gr_set_acls(const int type)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_check_hidden_task(const struct task_struct *tsk)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_check_protected_task(const struct task_struct *task)
-+{
-+	return 0;
-+}
-+
-+void
-+gr_copy_label(struct task_struct *tsk)
-+{
-+	return;
-+}
-+
-+void
-+gr_set_pax_flags(struct task_struct *task)
-+{
-+	return;
-+}
-+
-+int
-+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return 0;
-+}
-+
-+void
-+gr_handle_delete(const ino_t ino, const dev_t dev)
-+{
-+	return;
-+}
-+
-+void
-+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+	return;
-+}
-+
-+void
-+gr_handle_crash(struct task_struct *task, const int sig)
-+{
-+	return;
-+}
-+
-+int
-+gr_check_crash_exec(const struct file *filp)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_check_crash_uid(const uid_t uid)
-+{
-+	return 0;
-+}
-+
-+void
-+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
-+		 struct dentry *old_dentry,
-+		 struct dentry *new_dentry,
-+		 struct vfsmount *mnt, const __u8 replace)
-+{
-+	return;
-+}
-+
-+int
-+gr_search_socket(const int family, const int type, const int protocol)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_search_connectbind(const int mode, const struct socket *sock,
-+		      const struct sockaddr_in *addr)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_task_is_capable(struct task_struct *task, const int cap)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_is_capable_nolog(const int cap)
-+{
-+	return 1;
-+}
-+
-+void
-+gr_handle_alertkill(struct task_struct *task)
-+{
-+	return;
-+}
-+
-+__u32
-+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_hidden_file(const struct dentry * dentry,
-+			  const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
-+		   const int fmode)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
-+		   unsigned int *vm_flags)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_truncate(const struct dentry * dentry,
-+		       const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_access(const struct dentry * dentry,
-+		     const struct vfsmount * mnt, const int fmode)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
-+		     mode_t mode)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
-+		    mode_t mode)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+void
-+grsecurity_init(void)
-+{
-+	return;
-+}
-+
-+__u32
-+gr_acl_handle_mknod(const struct dentry * new_dentry,
-+		    const struct dentry * parent_dentry,
-+		    const struct vfsmount * parent_mnt,
-+		    const int mode)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_mkdir(const struct dentry * new_dentry,
-+		    const struct dentry * parent_dentry,
-+		    const struct vfsmount * parent_mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_symlink(const struct dentry * new_dentry,
-+		      const struct dentry * parent_dentry,
-+		      const struct vfsmount * parent_mnt, const char *from)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_link(const struct dentry * new_dentry,
-+		   const struct dentry * parent_dentry,
-+		   const struct vfsmount * parent_mnt,
-+		   const struct dentry * old_dentry,
-+		   const struct vfsmount * old_mnt, const char *to)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_acl_handle_rename(const struct dentry *new_dentry,
-+		     const struct dentry *parent_dentry,
-+		     const struct vfsmount *parent_mnt,
-+		     const struct dentry *old_dentry,
-+		     const struct inode *old_parent_inode,
-+		     const struct vfsmount *old_mnt, const char *newname)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_acl_handle_filldir(const struct file *file, const char *name,
-+		      const int namelen, const ino_t ino)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+		const time_t shm_createtime, const uid_t cuid, const int shmid)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_search_accept(const struct socket *sock)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_search_listen(const struct socket *sock)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
-+{
-+	return 1;
-+}
-+
-+__u32
-+gr_acl_handle_creat(const struct dentry * dentry,
-+		    const struct dentry * p_dentry,
-+		    const struct vfsmount * p_mnt, const int fmode,
-+		    const int imode)
-+{
-+	return 1;
-+}
-+
-+void
-+gr_acl_handle_exit(void)
-+{
-+	return;
-+}
-+
-+int
-+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
-+{
-+	return 1;
-+}
-+
-+void
-+gr_set_role_label(const uid_t uid, const gid_t gid)
-+{
-+	return;
-+}
-+
-+int
-+gr_acl_handle_procpidmem(const struct task_struct *task)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
-+{
-+	return 1;
-+}
-+
-+int
-+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
-+{
-+	return 1;
-+}
-+
-+void
-+gr_set_kernel_label(struct task_struct *task)
-+{
-+	return;
-+}
-+
-+int
-+gr_check_user_change(int real, int effective, int fs)
-+{
-+	return 0;
-+}
-+
-+int
-+gr_check_group_change(int real, int effective, int fs)
-+{
-+	return 0;
-+}
-+
-+
-+EXPORT_SYMBOL(gr_task_is_capable);
-+EXPORT_SYMBOL(gr_is_capable_nolog);
-+EXPORT_SYMBOL(gr_learn_resource);
-+EXPORT_SYMBOL(gr_set_kernel_label);
-+#ifdef CONFIG_SECURITY
-+EXPORT_SYMBOL(gr_check_user_change);
-+EXPORT_SYMBOL(gr_check_group_change);
-+#endif
---- /dev/null
-+++ b/grsecurity/grsec_exec.c
-@@ -0,0 +1,88 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/binfmts.h>
-+#include <linux/smp_lock.h>
-+#include <linux/fs.h>
-+#include <linux/types.h>
-+#include <linux/grdefs.h>
-+#include <linux/grinternal.h>
-+#include <linux/capability.h>
-+
-+#include <asm/uaccess.h>
-+
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+static char gr_exec_arg_buf[132];
-+static DECLARE_MUTEX(gr_exec_arg_sem);
-+#endif
-+
-+int
-+gr_handle_nproc(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_EXECVE
-+	if (grsec_enable_execve && current->user &&
-+	    (atomic_read(&current->user->processes) >
-+	     current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
-+	    !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
-+		return -EAGAIN;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+void
-+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *__user *argv)
-+{
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+	char *grarg = gr_exec_arg_buf;
-+	unsigned int i, x, execlen = 0;
-+	char c;
-+
-+	if (!((grsec_enable_execlog && grsec_enable_group &&
-+	       in_group_p(grsec_audit_gid))
-+	      || (grsec_enable_execlog && !grsec_enable_group)))
-+		return;
-+
-+	down(&gr_exec_arg_sem);
-+	memset(grarg, 0, sizeof(gr_exec_arg_buf));
-+
-+	if (unlikely(argv == NULL))
-+		goto log;
-+
-+	for (i = 0; i < bprm->argc && execlen < 128; i++) {
-+		const char __user *p;
-+		unsigned int len;
-+
-+		if (copy_from_user(&p, argv + i, sizeof(p)))
-+			goto log;
-+		if (!p)
-+			goto log;
-+		len = strnlen_user(p, 128 - execlen);
-+		if (len > 128 - execlen)
-+			len = 128 - execlen;
-+		else if (len > 0)
-+			len--;
-+		if (copy_from_user(grarg + execlen, p, len))
-+			goto log;
-+
-+		/* rewrite unprintable characters */
-+		for (x = 0; x < len; x++) {
-+			c = *(grarg + execlen + x);
-+			if (c < 32 || c > 126)
-+				*(grarg + execlen + x) = ' ';
-+		}
-+
-+		execlen += len;
-+		*(grarg + execlen) = ' ';
-+		*(grarg + execlen + 1) = '\0';
-+		execlen++;
-+	}
-+
-+      log:
-+	gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_dentry,
-+			bprm->file->f_vfsmnt, grarg);
-+	up(&gr_exec_arg_sem);
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_fifo.c
-@@ -0,0 +1,22 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
-+	       const struct dentry *dir, const int flag, const int acc_mode)
-+{
-+#ifdef CONFIG_GRKERNSEC_FIFO
-+	if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
-+	    !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
-+	    (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
-+	    (current->fsuid != dentry->d_inode->i_uid)) {
-+		if (!generic_permission(dentry->d_inode, acc_mode, NULL))
-+			gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_fork.c
-@@ -0,0 +1,15 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/errno.h>
-+
-+void
-+gr_log_forkfail(const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_FORKFAIL
-+	if (grsec_enable_forkfail && retval != -ERESTARTNOINTR)
-+		gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval);
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_init.c
-@@ -0,0 +1,230 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/smp_lock.h>
-+#include <linux/gracl.h>
-+#include <linux/slab.h>
-+#include <linux/vmalloc.h>
-+#include <linux/percpu.h>
-+
-+int grsec_enable_shm;
-+int grsec_enable_link;
-+int grsec_enable_dmesg;
-+int grsec_enable_fifo;
-+int grsec_enable_execve;
-+int grsec_enable_execlog;
-+int grsec_enable_signal;
-+int grsec_enable_forkfail;
-+int grsec_enable_time;
-+int grsec_enable_audit_textrel;
-+int grsec_enable_group;
-+int grsec_audit_gid;
-+int grsec_enable_chdir;
-+int grsec_enable_audit_ipc;
-+int grsec_enable_mount;
-+int grsec_enable_chroot_findtask;
-+int grsec_enable_chroot_mount;
-+int grsec_enable_chroot_shmat;
-+int grsec_enable_chroot_fchdir;
-+int grsec_enable_chroot_double;
-+int grsec_enable_chroot_pivot;
-+int grsec_enable_chroot_chdir;
-+int grsec_enable_chroot_chmod;
-+int grsec_enable_chroot_mknod;
-+int grsec_enable_chroot_nice;
-+int grsec_enable_chroot_execlog;
-+int grsec_enable_chroot_caps;
-+int grsec_enable_chroot_sysctl;
-+int grsec_enable_chroot_unix;
-+int grsec_enable_tpe;
-+int grsec_tpe_gid;
-+int grsec_enable_tpe_all;
-+int grsec_enable_socket_all;
-+int grsec_socket_all_gid;
-+int grsec_enable_socket_client;
-+int grsec_socket_client_gid;
-+int grsec_enable_socket_server;
-+int grsec_socket_server_gid;
-+int grsec_resource_logging;
-+int grsec_lock;
-+
-+spinlock_t grsec_alert_lock = SPIN_LOCK_UNLOCKED;
-+unsigned long grsec_alert_wtime = 0;
-+unsigned long grsec_alert_fyet = 0;
-+
-+spinlock_t grsec_audit_lock = SPIN_LOCK_UNLOCKED;
-+
-+rwlock_t grsec_exec_file_lock = RW_LOCK_UNLOCKED;
-+
-+char *gr_shared_page[4];
-+
-+char *gr_alert_log_fmt;
-+char *gr_audit_log_fmt;
-+char *gr_alert_log_buf;
-+char *gr_audit_log_buf;
-+
-+extern struct gr_arg *gr_usermode;
-+extern unsigned char *gr_system_salt;
-+extern unsigned char *gr_system_sum;
-+
-+void
-+grsecurity_init(void)
-+{
-+	int j;
-+	/* create the per-cpu shared pages */
-+
-+	for (j = 0; j < 4; j++) {
-+		gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE);
-+		if (gr_shared_page[j] == NULL) {
-+			panic("Unable to allocate grsecurity shared page");
-+			return;
-+		}
-+	}
-+
-+	/* allocate log buffers */
-+	gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
-+	if (!gr_alert_log_fmt) {
-+		panic("Unable to allocate grsecurity alert log format buffer");
-+		return;
-+	}
-+	gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
-+	if (!gr_audit_log_fmt) {
-+		panic("Unable to allocate grsecurity audit log format buffer");
-+		return;
-+	}
-+	gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
-+	if (!gr_alert_log_buf) {
-+		panic("Unable to allocate grsecurity alert log buffer");
-+		return;
-+	}
-+	gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
-+	if (!gr_audit_log_buf) {
-+		panic("Unable to allocate grsecurity audit log buffer");
-+		return;
-+	}
-+
-+	/* allocate memory for authentication structure */
-+	gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
-+	gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
-+	gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
-+
-+	if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
-+		panic("Unable to allocate grsecurity authentication structure");
-+		return;
-+	}
-+
-+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
-+#ifndef CONFIG_GRKERNSEC_SYSCTL
-+	grsec_lock = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SHM
-+	grsec_enable_shm = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+	grsec_enable_audit_textrel = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
-+	grsec_enable_group = 1;
-+	grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
-+	grsec_enable_chdir = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	grsec_enable_audit_ipc = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+	grsec_enable_mount = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_LINK
-+	grsec_enable_link = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+	grsec_enable_dmesg = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FIFO
-+	grsec_enable_fifo = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_EXECVE
-+	grsec_enable_execve = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+	grsec_enable_execlog = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SIGNAL
-+	grsec_enable_signal = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FORKFAIL
-+	grsec_enable_forkfail = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TIME
-+	grsec_enable_time = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+	grsec_resource_logging = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+	grsec_enable_chroot_findtask = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+	grsec_enable_chroot_unix = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
-+	grsec_enable_chroot_mount = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+	grsec_enable_chroot_fchdir = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
-+	grsec_enable_chroot_shmat = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
-+	grsec_enable_chroot_double = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
-+	grsec_enable_chroot_pivot = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
-+	grsec_enable_chroot_chdir = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
-+	grsec_enable_chroot_chmod = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
-+	grsec_enable_chroot_mknod = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+	grsec_enable_chroot_nice = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
-+	grsec_enable_chroot_execlog = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+	grsec_enable_chroot_caps = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
-+	grsec_enable_chroot_sysctl = 1;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE
-+	grsec_enable_tpe = 1;
-+	grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
-+#ifdef CONFIG_GRKERNSEC_TPE_ALL
-+	grsec_enable_tpe_all = 1;
-+#endif
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
-+	grsec_enable_socket_all = 1;
-+	grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
-+	grsec_enable_socket_client = 1;
-+	grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+	grsec_enable_socket_server = 1;
-+	grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
-+#endif
-+#endif
-+
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_ipc.c
-@@ -0,0 +1,81 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/types.h>
-+#include <linux/ipc.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_msgget(const int ret, const int msgflg)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
-+	      grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
-+					  !grsec_enable_group)) && (ret >= 0)
-+	    && (msgflg & IPC_CREAT))
-+		gr_log_noargs(GR_DO_AUDIT, GR_MSGQ_AUDIT_MSG);
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_msgrm(const uid_t uid, const uid_t cuid)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
-+	     grsec_enable_audit_ipc) ||
-+	    (grsec_enable_audit_ipc && !grsec_enable_group))
-+		gr_log_int_int(GR_DO_AUDIT, GR_MSGQR_AUDIT_MSG, uid, cuid);
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_semget(const int err, const int semflg)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
-+	      grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
-+					  !grsec_enable_group)) && (err >= 0)
-+	    && (semflg & IPC_CREAT))
-+		gr_log_noargs(GR_DO_AUDIT, GR_SEM_AUDIT_MSG);
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_semrm(const uid_t uid, const uid_t cuid)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
-+	     grsec_enable_audit_ipc) ||
-+	    (grsec_enable_audit_ipc && !grsec_enable_group))
-+		gr_log_int_int(GR_DO_AUDIT, GR_SEMR_AUDIT_MSG, uid, cuid);
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_shmget(const int err, const int shmflg, const size_t size)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
-+	      grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
-+					  !grsec_enable_group)) && (err >= 0)
-+	    && (shmflg & IPC_CREAT))
-+		gr_log_int(GR_DO_AUDIT, GR_SHM_AUDIT_MSG, size);
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_shmrm(const uid_t uid, const uid_t cuid)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
-+	     grsec_enable_audit_ipc) ||
-+	    (grsec_enable_audit_ipc && !grsec_enable_group))
-+		gr_log_int_int(GR_DO_AUDIT, GR_SHMR_AUDIT_MSG, uid, cuid);
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_link.c
-@@ -0,0 +1,39 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/fs.h>
-+#include <linux/file.h>
-+#include <linux/grinternal.h>
-+
-+int
-+gr_handle_follow_link(const struct inode *parent,
-+		      const struct inode *inode,
-+		      const struct dentry *dentry, const struct vfsmount *mnt)
-+{
-+#ifdef CONFIG_GRKERNSEC_LINK
-+	if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
-+	    (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
-+	    (parent->i_mode & S_IWOTH) && (current->fsuid != inode->i_uid)) {
-+		gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_hardlink(const struct dentry *dentry,
-+		   const struct vfsmount *mnt,
-+		   struct inode *inode, const int mode, const char *to)
-+{
-+#ifdef CONFIG_GRKERNSEC_LINK
-+	if (grsec_enable_link && current->fsuid != inode->i_uid &&
-+	    (!S_ISREG(mode) || (mode & S_ISUID) ||
-+	     ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
-+	     (generic_permission(inode, MAY_READ | MAY_WRITE, NULL))) &&
-+	    !capable(CAP_FOWNER) && current->uid) {
-+		gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_log.c
-@@ -0,0 +1,269 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/tty.h>
-+#include <linux/fs.h>
-+#include <linux/grinternal.h>
-+
-+#define BEGIN_LOCKS(x) \
-+	read_lock(&tasklist_lock); \
-+	read_lock(&grsec_exec_file_lock); \
-+	if (x != GR_DO_AUDIT) \
-+		spin_lock(&grsec_alert_lock); \
-+	else \
-+		spin_lock(&grsec_audit_lock)
-+
-+#define END_LOCKS(x) \
-+	if (x != GR_DO_AUDIT) \
-+		spin_unlock(&grsec_alert_lock); \
-+	else \
-+		spin_unlock(&grsec_audit_lock); \
-+	read_unlock(&grsec_exec_file_lock); \
-+	read_unlock(&tasklist_lock); \
-+	if (x == GR_DONT_AUDIT) \
-+		gr_handle_alertkill(current)
-+
-+enum {
-+	FLOODING,
-+	NO_FLOODING
-+};
-+
-+extern char *gr_alert_log_fmt;
-+extern char *gr_audit_log_fmt;
-+extern char *gr_alert_log_buf;
-+extern char *gr_audit_log_buf;
-+
-+static int gr_log_start(int audit)
-+{
-+	char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
-+	char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
-+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+
-+	if (audit == GR_DO_AUDIT)
-+		goto set_fmt;
-+
-+	if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
-+		grsec_alert_wtime = jiffies;
-+		grsec_alert_fyet = 0;
-+	} else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
-+		grsec_alert_fyet++;
-+	} else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
-+		grsec_alert_wtime = jiffies;
-+		grsec_alert_fyet++;
-+		printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
-+		return FLOODING;
-+	} else return FLOODING;
-+
-+set_fmt:
-+	memset(buf, 0, PAGE_SIZE);
-+	if (current->signal->curr_ip && gr_acl_is_enabled()) {
-+		sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) ");
-+		snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->signal->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename);
-+	} else if (current->signal->curr_ip) {
-+		sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: ");
-+		snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->signal->curr_ip));
-+	} else if (gr_acl_is_enabled()) {
-+		sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
-+		snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
-+	} else {
-+		sprintf(fmt, "%s%s", loglevel, "grsec: ");
-+		strcpy(buf, fmt);
-+	}
-+
-+	return NO_FLOODING;
-+}
-+
-+static void gr_log_middle(int audit, const char *msg, va_list ap)
-+{
-+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+	unsigned int len = strlen(buf);
-+
-+	vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
-+
-+	return;
-+}
-+
-+static void gr_log_middle_varargs(int audit, const char *msg, ...)
-+{
-+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+	unsigned int len = strlen(buf);
-+	va_list ap;
-+
-+	va_start(ap, msg);
-+	vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
-+	va_end(ap);
-+
-+	return;
-+}
-+
-+static void gr_log_end(int audit)
-+{
-+	char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
-+	unsigned int len = strlen(buf);
-+
-+	snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current));
-+	printk("%s\n", buf);
-+
-+	return;
-+}
-+
-+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
-+{
-+	int logtype;
-+	char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
-+	char *str1, *str2, *str3;
-+	int num1, num2;
-+	unsigned long ulong1, ulong2;
-+	struct dentry *dentry;
-+	struct vfsmount *mnt;
-+	struct file *file;
-+	struct task_struct *task;
-+	va_list ap;
-+
-+	BEGIN_LOCKS(audit);
-+	logtype = gr_log_start(audit);
-+	if (logtype == FLOODING) {
-+		END_LOCKS(audit);
-+		return;
-+	}
-+	va_start(ap, argtypes);
-+	switch (argtypes) {
-+	case GR_TTYSNIFF:
-+		task = va_arg(ap, struct task_struct *);
-+		gr_log_middle_varargs(audit, msg, NIPQUAD(task->signal->curr_ip), gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid);
-+		break;
-+	case GR_SYSCTL_HIDDEN:
-+		str1 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, result, str1);
-+		break;
-+	case GR_RBAC:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
-+		break;
-+	case GR_RBAC_STR:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		str1 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
-+		break;
-+	case GR_STR_RBAC:
-+		str1 = va_arg(ap, char *);
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
-+		break;
-+	case GR_RBAC_MODE2:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		str1 = va_arg(ap, char *);
-+		str2 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
-+		break;
-+	case GR_RBAC_MODE3:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		str1 = va_arg(ap, char *);
-+		str2 = va_arg(ap, char *);
-+		str3 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
-+		break;
-+	case GR_FILENAME:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
-+		break;
-+	case GR_STR_FILENAME:
-+		str1 = va_arg(ap, char *);
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
-+		break;
-+	case GR_FILENAME_STR:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		str1 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
-+		break;
-+	case GR_FILENAME_TWO_INT:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		num1 = va_arg(ap, int);
-+		num2 = va_arg(ap, int);
-+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
-+		break;
-+	case GR_FILENAME_TWO_INT_STR:
-+		dentry = va_arg(ap, struct dentry *);
-+		mnt = va_arg(ap, struct vfsmount *);
-+		num1 = va_arg(ap, int);
-+		num2 = va_arg(ap, int);
-+		str1 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
-+		break;
-+	case GR_TEXTREL:
-+		file = va_arg(ap, struct file *);
-+		ulong1 = va_arg(ap, unsigned long);
-+		ulong2 = va_arg(ap, unsigned long);
-+		gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_dentry, file->f_vfsmnt) : "<anonymous mapping>", ulong1, ulong2);
-+		break;
-+	case GR_PTRACE:
-+		task = va_arg(ap, struct task_struct *);
-+		gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_dentry, task->exec_file->f_vfsmnt) : "(none)", task->comm, task->pid);
-+		break;
-+	case GR_RESOURCE:
-+		task = va_arg(ap, struct task_struct *);
-+		ulong1 = va_arg(ap, unsigned long);
-+		str1 = va_arg(ap, char *);
-+		ulong2 = va_arg(ap, unsigned long);
-+		gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid);
-+		break;
-+	case GR_CAP:
-+		task = va_arg(ap, struct task_struct *);
-+		str1 = va_arg(ap, char *);
-+		gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid);
-+		break;
-+	case GR_SIG:
-+		task = va_arg(ap, struct task_struct *);
-+		num1 = va_arg(ap, int);
-+		gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid);
-+		break;
-+	case GR_CRASH1:
-+		task = va_arg(ap, struct task_struct *);
-+		ulong1 = va_arg(ap, unsigned long);
-+		gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid, task->uid, ulong1);
-+		break;
-+	case GR_CRASH2:
-+		task = va_arg(ap, struct task_struct *);
-+		ulong1 = va_arg(ap, unsigned long);
-+		gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid, ulong1);
-+		break;
-+	case GR_PSACCT:
-+		{
-+			unsigned int wday, cday;
-+			__u8 whr, chr;
-+			__u8 wmin, cmin;
-+			__u8 wsec, csec;
-+			char cur_tty[64] = { 0 };
-+			char parent_tty[64] = { 0 };
-+
-+			task = va_arg(ap, struct task_struct *);
-+			wday = va_arg(ap, unsigned int);
-+			cday = va_arg(ap, unsigned int);
-+			whr = va_arg(ap, int);
-+			chr = va_arg(ap, int);
-+			wmin = va_arg(ap, int);
-+			cmin = va_arg(ap, int);
-+			wsec = va_arg(ap, int);
-+			csec = va_arg(ap, int);
-+			ulong1 = va_arg(ap, unsigned long);
-+
-+			gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, NIPQUAD(task->signal->curr_ip), tty_name(task->signal->tty, cur_tty), task->uid, task->euid, task->gid, task->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, NIPQUAD(task->parent->signal->curr_ip), tty_name(task->parent->signal->tty, parent_tty), task->parent->uid, task->parent->euid, task->parent->gid, task->parent->egid);
-+		}
-+		break;
-+	default:
-+		gr_log_middle(audit, msg, ap);
-+	}
-+	va_end(ap);
-+	gr_log_end(audit);
-+	END_LOCKS(audit);
-+}
---- /dev/null
-+++ b/grsecurity/grsec_mem.c
-@@ -0,0 +1,71 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/mman.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_handle_ioperm(void)
-+{
-+	gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
-+	return;
-+}
-+
-+void
-+gr_handle_iopl(void)
-+{
-+	gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
-+	return;
-+}
-+
-+void
-+gr_handle_mem_write(void)
-+{
-+	gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG);
-+	return;
-+}
-+
-+void
-+gr_handle_kmem_write(void)
-+{
-+	gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG);
-+	return;
-+}
-+
-+void
-+gr_handle_open_port(void)
-+{
-+	gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG);
-+	return;
-+}
-+
-+int
-+gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
-+{
-+	unsigned long start, end;
-+
-+	start = offset;
-+	end = start + vma->vm_end - vma->vm_start;
-+
-+	if (start > end) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
-+		return -EPERM;
-+	}
-+
-+	/* allowed ranges : ISA I/O BIOS */
-+	if ((start >= __pa(high_memory))
-+#ifdef CONFIG_X86
-+	    || (start >= 0x000a0000 && end <= 0x00100000)
-+	    || (start >= 0x00000000 && end <= 0x00001000)
-+#endif
-+	)
-+		return 0;
-+
-+	if (vma->vm_flags & VM_WRITE) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
-+		return -EPERM;
-+	} else
-+		vma->vm_flags &= ~VM_MAYWRITE;
-+
-+	return 0;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_mount.c
-@@ -0,0 +1,34 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_remount(const char *devname, const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+	if (grsec_enable_mount && (retval >= 0))
-+		gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_unmount(const char *devname, const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+	if (grsec_enable_mount && (retval >= 0))
-+		gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
-+#endif
-+	return;
-+}
-+
-+void
-+gr_log_mount(const char *from, const char *to, const int retval)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+	if (grsec_enable_mount && (retval >= 0))
-+		gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_sig.c
-@@ -0,0 +1,59 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_signal(const int sig, const struct task_struct *t)
-+{
-+#ifdef CONFIG_GRKERNSEC_SIGNAL
-+	if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
-+				    (sig == SIGABRT) || (sig == SIGBUS))) {
-+		if (t->pid == current->pid) {
-+			gr_log_int(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, sig);
-+		} else {
-+			gr_log_sig(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
-+		}
-+	}
-+#endif
-+	return;
-+}
-+
-+int
-+gr_handle_signal(const struct task_struct *p, const int sig)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+	if (current->pid > 1 && gr_check_protected_task(p)) {
-+		gr_log_sig(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
-+		return -EPERM;
-+	} else if (gr_pid_is_chrooted((struct task_struct *)p)) {
-+		return -EPERM;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+void gr_handle_brute_attach(struct task_struct *p)
-+{
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+	read_lock(&tasklist_lock);
-+	read_lock(&grsec_exec_file_lock);
-+	if (p->parent && p->parent->exec_file == p->exec_file)
-+		p->parent->brute = 1;
-+	read_unlock(&grsec_exec_file_lock);
-+	read_unlock(&tasklist_lock);
-+#endif
-+	return;
-+}
-+
-+void gr_handle_brute_check(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_BRUTE
-+	if (current->brute) {
-+		set_current_state(TASK_UNINTERRUPTIBLE);
-+		schedule_timeout(30 * HZ);
-+	}
-+#endif
-+	return;
-+}
-+
---- /dev/null
-+++ b/grsecurity/grsec_sock.c
-@@ -0,0 +1,263 @@
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/net.h>
-+#include <linux/in.h>
-+#include <linux/ip.h>
-+#include <net/sock.h>
-+#include <net/inet_sock.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+#include <linux/gracl.h>
-+
-+#if defined(CONFIG_IP_NF_MATCH_STEALTH_MODULE)
-+extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
-+EXPORT_SYMBOL(udp_v4_lookup);
-+#endif
-+
-+EXPORT_SYMBOL(gr_cap_rtnetlink);
-+
-+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
-+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
-+
-+EXPORT_SYMBOL(gr_search_udp_recvmsg);
-+EXPORT_SYMBOL(gr_search_udp_sendmsg);
-+
-+#ifdef CONFIG_UNIX_MODULE
-+EXPORT_SYMBOL(gr_acl_handle_unix);
-+EXPORT_SYMBOL(gr_acl_handle_mknod);
-+EXPORT_SYMBOL(gr_handle_chroot_unix);
-+EXPORT_SYMBOL(gr_handle_create);
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC
-+#define gr_conn_table_size 32749
-+struct conn_table_entry {
-+	struct conn_table_entry *next;
-+	struct signal_struct *sig;
-+};
-+
-+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
-+spinlock_t gr_conn_table_lock = SPIN_LOCK_UNLOCKED;
-+
-+extern const char * gr_socktype_to_name(unsigned char type);
-+extern const char * gr_proto_to_name(unsigned char proto);
-+
-+static __inline__ int 
-+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
-+{
-+	return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
-+}
-+
-+static __inline__ int
-+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, 
-+	   __u16 sport, __u16 dport)
-+{
-+	if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
-+		     sig->gr_sport == sport && sig->gr_dport == dport))
-+		return 1;
-+	else
-+		return 0;
-+}
-+
-+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
-+{
-+	struct conn_table_entry **match;
-+	unsigned int index;
-+
-+	index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
-+			  sig->gr_sport, sig->gr_dport, 
-+			  gr_conn_table_size);
-+
-+	newent->sig = sig;
-+	
-+	match = &gr_conn_table[index];
-+	newent->next = *match;
-+	*match = newent;
-+
-+	return;
-+}
-+
-+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
-+{
-+	struct conn_table_entry *match, *last = NULL;
-+	unsigned int index;
-+
-+	index = conn_hash(sig->gr_saddr, sig->gr_daddr, 
-+			  sig->gr_sport, sig->gr_dport, 
-+			  gr_conn_table_size);
-+
-+	match = gr_conn_table[index];
-+	while (match && !conn_match(match->sig, 
-+		sig->gr_saddr, sig->gr_daddr, sig->gr_sport, 
-+		sig->gr_dport)) {
-+		last = match;
-+		match = match->next;
-+	}
-+
-+	if (match) {
-+		if (last)
-+			last->next = match->next;
-+		else
-+			gr_conn_table[index] = NULL;
-+		kfree(match);
-+	}
-+
-+	return;
-+}
-+
-+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
-+					     __u16 sport, __u16 dport)
-+{
-+	struct conn_table_entry *match;
-+	unsigned int index;
-+
-+	index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
-+
-+	match = gr_conn_table[index];
-+	while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
-+		match = match->next;
-+
-+	if (match)
-+		return match->sig;
-+	else
-+		return NULL;
-+}
-+
-+#endif
-+
-+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+	struct signal_struct *sig = task->signal;
-+	struct conn_table_entry *newent;
-+
-+	newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
-+	if (newent == NULL)
-+		return;
-+	/* no bh lock needed since we are called with bh disabled */
-+	spin_lock(&gr_conn_table_lock);
-+	gr_del_task_from_ip_table_nolock(sig);
-+	sig->gr_saddr = inet->rcv_saddr;
-+	sig->gr_daddr = inet->daddr;
-+	sig->gr_sport = inet->sport;
-+	sig->gr_dport = inet->dport;
-+	gr_add_to_task_ip_table_nolock(sig, newent);
-+	spin_unlock(&gr_conn_table_lock);
-+#endif
-+	return;
-+}
-+
-+void gr_del_task_from_ip_table(struct task_struct *task)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+	spin_lock(&gr_conn_table_lock);
-+	gr_del_task_from_ip_table_nolock(task->signal);
-+	spin_unlock(&gr_conn_table_lock);
-+#endif
-+	return;
-+}
-+
-+void
-+gr_attach_curr_ip(const struct sock *sk)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+	struct signal_struct *p, *set;
-+	const struct inet_sock *inet = inet_sk(sk);	
-+
-+	if (unlikely(sk->sk_protocol != IPPROTO_TCP))
-+		return;
-+
-+	set = current->signal;
-+
-+	spin_lock_bh(&gr_conn_table_lock);
-+	p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
-+				    inet->dport, inet->sport);
-+	if (unlikely(p != NULL)) {
-+		set->curr_ip = p->curr_ip;
-+		set->used_accept = 1;
-+		gr_del_task_from_ip_table_nolock(p);
-+		spin_unlock_bh(&gr_conn_table_lock);
-+		return;
-+	}
-+	spin_unlock_bh(&gr_conn_table_lock);
-+
-+	set->curr_ip = inet->daddr;
-+	set->used_accept = 1;
-+#endif
-+	return;
-+}
-+
-+int
-+gr_handle_sock_all(const int family, const int type, const int protocol)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
-+	if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
-+	    (family != AF_UNIX) && (family != AF_LOCAL)) {
-+		gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol));
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_sock_server(const struct sockaddr *sck)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+	if (grsec_enable_socket_server &&
-+	    in_group_p(grsec_socket_server_gid) &&
-+	    sck && (sck->sa_family != AF_UNIX) &&
-+	    (sck->sa_family != AF_LOCAL)) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_sock_server_other(const struct sock *sck)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+	if (grsec_enable_socket_server &&
-+	    in_group_p(grsec_socket_server_gid) &&
-+	    sck && (sck->sk_family != AF_UNIX) &&
-+	    (sck->sk_family != AF_LOCAL)) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+int
-+gr_handle_sock_client(const struct sockaddr *sck)
-+{
-+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
-+	if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
-+	    sck && (sck->sa_family != AF_UNIX) &&
-+	    (sck->sa_family != AF_LOCAL)) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+__u32
-+gr_cap_rtnetlink(void)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+	if (!gr_acl_is_enabled())
-+		return current->cap_effective;
-+	else if (cap_raised(current->cap_effective, CAP_NET_ADMIN) &&
-+		 gr_task_is_capable(current, CAP_NET_ADMIN))
-+		return current->cap_effective;
-+	else
-+		return 0;
-+#else
-+	return current->cap_effective;
-+#endif
-+}
---- /dev/null
-+++ b/grsecurity/grsec_sysctl.c
-@@ -0,0 +1,456 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/sysctl.h>
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+#ifdef CONFIG_GRKERNSEC_MODSTOP
-+int grsec_modstop;
-+#endif
-+
-+int
-+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
-+{
-+#ifdef CONFIG_GRKERNSEC_SYSCTL
-+	if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & 002)) {
-+		gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
-+		return -EACCES;
-+	}
-+#endif
-+#ifdef CONFIG_GRKERNSEC_MODSTOP
-+	if (!strcmp(dirname, "grsecurity") && !strcmp(name, "disable_modules") &&
-+	    grsec_modstop && (op & 002)) {
-+		gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
-+		return -EACCES;
-+	}
-+#endif
-+	return 0;
-+}
-+
-+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_MODSTOP)
-+enum {GS_LINK=1, GS_FIFO, GS_EXECVE, GS_EXECLOG, GS_SIGNAL,
-+GS_FORKFAIL, GS_TIME, GS_CHROOT_SHMAT, GS_CHROOT_UNIX, GS_CHROOT_MNT,
-+GS_CHROOT_FCHDIR, GS_CHROOT_DBL, GS_CHROOT_PVT, GS_CHROOT_CD, GS_CHROOT_CM,
-+GS_CHROOT_MK, GS_CHROOT_NI, GS_CHROOT_EXECLOG, GS_CHROOT_CAPS,
-+GS_CHROOT_SYSCTL, GS_TPE, GS_TPE_GID, GS_TPE_ALL, GS_SIDCAPS,
-+GS_SOCKET_ALL, GS_SOCKET_ALL_GID, GS_SOCKET_CLIENT,
-+GS_SOCKET_CLIENT_GID, GS_SOCKET_SERVER, GS_SOCKET_SERVER_GID, 
-+GS_GROUP, GS_GID, GS_ACHDIR, GS_AMOUNT, GS_AIPC, GS_DMSG,
-+GS_TEXTREL, GS_FINDTASK, GS_SHM, GS_LOCK, GS_MODSTOP, GS_RESLOG};
-+
-+
-+ctl_table grsecurity_table[] = {
-+#ifdef CONFIG_GRKERNSEC_SYSCTL
-+#ifdef CONFIG_GRKERNSEC_LINK
-+	{
-+		.ctl_name	= GS_LINK,
-+		.procname	= "linking_restrictions",
-+		.data		= &grsec_enable_link,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FIFO
-+	{
-+		.ctl_name	= GS_FIFO,
-+		.procname	= "fifo_restrictions",
-+		.data		= &grsec_enable_fifo,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_EXECVE
-+	{
-+		.ctl_name	= GS_EXECVE,
-+		.procname	= "execve_limiting",
-+		.data		= &grsec_enable_execve,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_EXECLOG
-+	{
-+		.ctl_name	= GS_EXECLOG,
-+		.procname	= "exec_logging",
-+		.data		= &grsec_enable_execlog,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SIGNAL
-+	{
-+		.ctl_name	= GS_SIGNAL,
-+		.procname	= "signal_logging",
-+		.data		= &grsec_enable_signal,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_FORKFAIL
-+	{
-+		.ctl_name	= GS_FORKFAIL,
-+		.procname	= "forkfail_logging",
-+		.data		= &grsec_enable_forkfail,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TIME
-+	{
-+		.ctl_name	= GS_TIME,
-+		.procname	= "timechange_logging",
-+		.data		= &grsec_enable_time,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
-+	{
-+		.ctl_name	= GS_CHROOT_SHMAT,
-+		.procname	= "chroot_deny_shmat",
-+		.data		= &grsec_enable_chroot_shmat,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+	{
-+		.ctl_name	= GS_CHROOT_UNIX,
-+		.procname	= "chroot_deny_unix",
-+		.data		= &grsec_enable_chroot_unix,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
-+	{
-+		.ctl_name	= GS_CHROOT_MNT,
-+		.procname	= "chroot_deny_mount",
-+		.data		= &grsec_enable_chroot_mount,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
-+	{
-+		.ctl_name	= GS_CHROOT_FCHDIR,
-+		.procname	= "chroot_deny_fchdir",
-+		.data		= &grsec_enable_chroot_fchdir,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
-+	{
-+		.ctl_name	= GS_CHROOT_DBL,
-+		.procname	= "chroot_deny_chroot",
-+		.data		= &grsec_enable_chroot_double,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
-+	{
-+		.ctl_name	= GS_CHROOT_PVT,
-+		.procname	= "chroot_deny_pivot",
-+		.data		= &grsec_enable_chroot_pivot,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
-+	{
-+		.ctl_name	= GS_CHROOT_CD,
-+		.procname	= "chroot_enforce_chdir",
-+		.data		= &grsec_enable_chroot_chdir,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
-+	{
-+		.ctl_name	= GS_CHROOT_CM,
-+		.procname	= "chroot_deny_chmod",
-+		.data		= &grsec_enable_chroot_chmod,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
-+	{
-+		.ctl_name	= GS_CHROOT_MK,
-+		.procname	= "chroot_deny_mknod",
-+		.data		= &grsec_enable_chroot_mknod,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
-+	{
-+		.ctl_name	= GS_CHROOT_NI,
-+		.procname	= "chroot_restrict_nice",
-+		.data		= &grsec_enable_chroot_nice,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
-+	{
-+		.ctl_name	= GS_CHROOT_EXECLOG,
-+		.procname	= "chroot_execlog",
-+		.data		= &grsec_enable_chroot_execlog,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
-+	{
-+		.ctl_name	= GS_CHROOT_CAPS,
-+		.procname	= "chroot_caps",
-+		.data		= &grsec_enable_chroot_caps,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
-+	{
-+		.ctl_name	= GS_CHROOT_SYSCTL,
-+		.procname	= "chroot_deny_sysctl",
-+		.data		= &grsec_enable_chroot_sysctl,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE
-+	{
-+		.ctl_name	= GS_TPE,
-+		.procname	= "tpe",
-+		.data		= &grsec_enable_tpe,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+	{
-+		.ctl_name	= GS_TPE_GID,
-+		.procname	= "tpe_gid",
-+		.data		= &grsec_tpe_gid,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_TPE_ALL
-+	{
-+		.ctl_name	= GS_TPE_ALL,
-+		.procname	= "tpe_restrict_all",
-+		.data		= &grsec_enable_tpe_all,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
-+	{
-+		.ctl_name	= GS_SOCKET_ALL,
-+		.procname	= "socket_all",
-+		.data		= &grsec_enable_socket_all,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+	{
-+		.ctl_name	= GS_SOCKET_ALL_GID,
-+		.procname	= "socket_all_gid",
-+		.data		= &grsec_socket_all_gid,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
-+	{
-+		.ctl_name	= GS_SOCKET_CLIENT,
-+		.procname	= "socket_client",
-+		.data		= &grsec_enable_socket_client,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+	{
-+		.ctl_name	= GS_SOCKET_CLIENT_GID,
-+		.procname	= "socket_client_gid",
-+		.data		= &grsec_socket_client_gid,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
-+	{
-+		.ctl_name	= GS_SOCKET_SERVER,
-+		.procname	= "socket_server",
-+		.data		= &grsec_enable_socket_server,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+	{
-+		.ctl_name	= GS_SOCKET_SERVER_GID,
-+		.procname	= "socket_server_gid",
-+		.data		= &grsec_socket_server_gid,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
-+	{
-+		.ctl_name	= GS_GROUP,
-+		.procname	= "audit_group",
-+		.data		= &grsec_enable_group,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+	{
-+		.ctl_name	= GS_GID,
-+		.procname	= "audit_gid",
-+		.data		= &grsec_audit_gid,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
-+	{
-+		.ctl_name	= GS_ACHDIR,
-+		.procname	= "audit_chdir",
-+		.data		= &grsec_enable_chdir,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
-+	{
-+		.ctl_name	= GS_AMOUNT,
-+		.procname	= "audit_mount",
-+		.data		= &grsec_enable_mount,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
-+	{
-+		.ctl_name	= GS_AIPC,
-+		.procname	= "audit_ipc",
-+		.data		= &grsec_enable_audit_ipc,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+	{
-+		.ctl_name	= GS_TEXTREL,
-+		.procname	= "audit_textrel",
-+		.data		= &grsec_enable_audit_textrel,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+	{
-+		.ctl_name	= GS_DMSG,
-+		.procname	= "dmesg",
-+		.data		= &grsec_enable_dmesg,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
-+	{
-+		.ctl_name	= GS_FINDTASK,
-+		.procname	= "chroot_findtask",
-+		.data		= &grsec_enable_chroot_findtask,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_SHM
-+	{
-+		.ctl_name	= GS_SHM,
-+		.procname	= "destroy_unused_shm",
-+		.data		= &grsec_enable_shm,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_RESLOG
-+	{
-+		.ctl_name	= GS_RESLOG,
-+		.procname	= "resource_logging",
-+		.data		= &grsec_resource_logging,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+	{
-+		.ctl_name	= GS_LOCK,
-+		.procname	= "grsec_lock",
-+		.data		= &grsec_lock,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+#ifdef CONFIG_GRKERNSEC_MODSTOP
-+	{
-+		.ctl_name	= GS_MODSTOP,
-+		.procname	= "disable_modules",
-+		.data		= &grsec_modstop,
-+		.maxlen		= sizeof(int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+#endif
-+	{ .ctl_name = 0 }
-+};
-+#endif
-+
-+int gr_check_modstop(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_MODSTOP
-+	if (grsec_modstop == 1) {
-+		gr_log_noargs(GR_DONT_AUDIT, GR_STOPMOD_MSG);
-+		return 1;
-+	}
-+#endif
-+	return 0;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_textrel.c
-@@ -0,0 +1,16 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/file.h>
-+#include <linux/grinternal.h>
-+#include <linux/grsecurity.h>
-+
-+void
-+gr_log_textrel(struct vm_area_struct * vma)
-+{
-+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
-+	if (grsec_enable_audit_textrel)
-+		gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_time.c
-@@ -0,0 +1,13 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/grinternal.h>
-+
-+void
-+gr_log_timechange(void)
-+{
-+#ifdef CONFIG_GRKERNSEC_TIME
-+	if (grsec_enable_time)
-+		gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
-+#endif
-+	return;
-+}
---- /dev/null
-+++ b/grsecurity/grsec_tpe.c
-@@ -0,0 +1,37 @@
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/grinternal.h>
-+
-+extern int gr_acl_tpe_check(void);
-+
-+int
-+gr_tpe_allow(const struct file *file)
-+{
-+#ifdef CONFIG_GRKERNSEC
-+	struct inode *inode = file->f_dentry->d_parent->d_inode;
-+
-+	if (current->uid && ((grsec_enable_tpe &&
-+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
-+	    !in_group_p(grsec_tpe_gid)
-+#else
-+	    in_group_p(grsec_tpe_gid)
-+#endif
-+	    ) || gr_acl_tpe_check()) &&
-+	    (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
-+						(inode->i_mode & S_IWOTH))))) {
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_dentry, file->f_vfsmnt);
-+		return 0;
-+	}
-+#ifdef CONFIG_GRKERNSEC_TPE_ALL
-+	if (current->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
-+	    ((inode->i_uid && (inode->i_uid != current->uid)) ||
-+	     (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
-+		gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_dentry, file->f_vfsmnt);
-+		return 0;
-+	}
-+#endif
-+#endif
-+	return 1;
-+}
---- /dev/null
-+++ b/grsecurity/grsum.c
-@@ -0,0 +1,59 @@
-+#include <linux/err.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mm.h>
-+#include <linux/scatterlist.h>
-+#include <linux/crypto.h>
-+#include <linux/gracl.h>
-+
-+
-+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
-+#error "crypto and sha256 must be built into the kernel"
-+#endif
-+
-+int
-+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
-+{
-+	char *p;
-+	struct crypto_hash *tfm;
-+	struct hash_desc desc;
-+	struct scatterlist sg;
-+	unsigned char temp_sum[GR_SHA_LEN];
-+	volatile int retval = 0;
-+	volatile int dummy = 0;
-+	unsigned int i;
-+
-+	tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
-+	if (IS_ERR(tfm)) {
-+		/* should never happen, since sha256 should be built in */
-+		return 1;
-+	}
-+
-+	desc.tfm = tfm;
-+	desc.flags = 0;
-+
-+	crypto_hash_init(&desc);
-+
-+	p = salt;
-+	sg_set_buf(&sg, p, GR_SALT_LEN);
-+	crypto_hash_update(&desc, &sg, sg.length);
-+
-+	p = entry->pw;
-+	sg_set_buf(&sg, p, strlen(p));
-+	
-+	crypto_hash_update(&desc, &sg, sg.length);
-+
-+	crypto_hash_final(&desc, temp_sum);
-+
-+	memset(entry->pw, 0, GR_PW_LEN);
-+
-+	for (i = 0; i < GR_SHA_LEN; i++)
-+		if (sum[i] != temp_sum[i])
-+			retval = 1;
-+		else
-+			dummy = 1;	// waste a cycle
-+
-+	crypto_free_hash(tfm);
-+
-+	return retval;
-+}
---- /dev/null
-+++ b/grsecurity/Kconfig
-@@ -0,0 +1,873 @@
-+#
-+# grecurity configuration
-+#
-+
-+menu "Grsecurity"
-+
-+config GRKERNSEC
-+	bool "Grsecurity"
-+	select CRYPTO
-+	select CRYPTO_SHA256
-+	help
-+	  If you say Y here, you will be able to configure many features
-+	  that will enhance the security of your system.  It is highly
-+	  recommended that you say Y here and read through the help
-+	  for each option so that you fully understand the features and
-+	  can evaluate their usefulness for your machine.
-+
-+choice
-+	prompt "Security Level"
-+	depends GRKERNSEC
-+	default GRKERNSEC_CUSTOM
-+
-+config GRKERNSEC_LOW
-+	bool "Low"
-+	select GRKERNSEC_LINK
-+	select GRKERNSEC_FIFO
-+	select GRKERNSEC_EXECVE
-+	select GRKERNSEC_RANDNET
-+	select GRKERNSEC_DMESG
-+	select GRKERNSEC_CHROOT_CHDIR
-+	select GRKERNSEC_MODSTOP if (MODULES)
-+
-+	help
-+	  If you choose this option, several of the grsecurity options will
-+	  be enabled that will give you greater protection against a number
-+	  of attacks, while assuring that none of your software will have any
-+	  conflicts with the additional security measures.  If you run a lot
-+	  of unusual software, or you are having problems with the higher
-+	  security levels, you should say Y here.  With this option, the
-+	  following features are enabled:
-+
-+	  - Linking restrictions
-+	  - FIFO restrictions
-+	  - Enforcing RLIMIT_NPROC on execve
-+	  - Restricted dmesg
-+	  - Enforced chdir("/") on chroot
-+	  - Runtime module disabling
-+
-+config GRKERNSEC_MEDIUM
-+	bool "Medium"
-+	select PAX
-+	select PAX_EI_PAX
-+	select PAX_PT_PAX_FLAGS
-+	select PAX_HAVE_ACL_FLAGS
-+	select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
-+	select GRKERNSEC_CHROOT_SYSCTL
-+	select GRKERNSEC_LINK
-+	select GRKERNSEC_FIFO
-+	select GRKERNSEC_EXECVE
-+	select GRKERNSEC_DMESG
-+	select GRKERNSEC_RANDNET
-+	select GRKERNSEC_FORKFAIL
-+	select GRKERNSEC_TIME
-+	select GRKERNSEC_SIGNAL
-+	select GRKERNSEC_CHROOT
-+	select GRKERNSEC_CHROOT_UNIX
-+	select GRKERNSEC_CHROOT_MOUNT
-+	select GRKERNSEC_CHROOT_PIVOT
-+	select GRKERNSEC_CHROOT_DOUBLE
-+	select GRKERNSEC_CHROOT_CHDIR
-+	select GRKERNSEC_CHROOT_MKNOD
-+	select GRKERNSEC_PROC
-+	select GRKERNSEC_PROC_USERGROUP
-+	select GRKERNSEC_MODSTOP if (MODULES)
-+	select PAX_RANDUSTACK
-+	select PAX_ASLR
-+	select PAX_RANDMMAP
-+
-+	help
-+	  If you say Y here, several features in addition to those included
-+	  in the low additional security level will be enabled.  These
-+	  features provide even more security to your system, though in rare
-+	  cases they may be incompatible with very old or poorly written
-+	  software.  If you enable this option, make sure that your auth
-+	  service (identd) is running as gid 1001.  With this option, 
-+	  the following features (in addition to those provided in the 
-+	  low additional security level) will be enabled:
-+
-+	  - Randomized TCP source ports
-+	  - Failed fork logging
-+	  - Time change logging
-+	  - Signal logging
-+	  - Deny mounts in chroot
-+	  - Deny double chrooting
-+	  - Deny sysctl writes in chroot
-+	  - Deny mknod in chroot
-+	  - Deny access to abstract AF_UNIX sockets out of chroot
-+	  - Deny pivot_root in chroot
-+	  - Denied writes of /dev/kmem, /dev/mem, and /dev/port
-+	  - /proc restrictions with special GID set to 10 (usually wheel)
-+	  - Address Space Layout Randomization (ASLR)
-+
-+config GRKERNSEC_HIGH
-+	bool "High"
-+	select GRKERNSEC_LINK
-+	select GRKERNSEC_FIFO
-+	select GRKERNSEC_EXECVE
-+	select GRKERNSEC_DMESG
-+	select GRKERNSEC_FORKFAIL
-+	select GRKERNSEC_TIME
-+	select GRKERNSEC_SIGNAL
-+	select GRKERNSEC_CHROOT_SHMAT
-+	select GRKERNSEC_CHROOT_UNIX
-+	select GRKERNSEC_CHROOT_MOUNT
-+	select GRKERNSEC_CHROOT_FCHDIR
-+	select GRKERNSEC_CHROOT_PIVOT
-+	select GRKERNSEC_CHROOT_DOUBLE
-+	select GRKERNSEC_CHROOT_CHDIR
-+	select GRKERNSEC_CHROOT_MKNOD
-+	select GRKERNSEC_CHROOT_CAPS
-+	select GRKERNSEC_CHROOT_SYSCTL
-+	select GRKERNSEC_CHROOT_FINDTASK
-+	select GRKERNSEC_PROC
-+	select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
-+	select GRKERNSEC_HIDESYM
-+	select GRKERNSEC_BRUTE
-+	select GRKERNSEC_SHM if (SYSVIPC)
-+	select GRKERNSEC_PROC_USERGROUP
-+	select GRKERNSEC_KMEM
-+	select GRKERNSEC_RESLOG
-+	select GRKERNSEC_RANDNET
-+	select GRKERNSEC_PROC_ADD
-+	select GRKERNSEC_CHROOT_CHMOD
-+	select GRKERNSEC_CHROOT_NICE
-+	select GRKERNSEC_AUDIT_MOUNT
-+	select GRKERNSEC_MODSTOP if (MODULES)
-+	select PAX
-+	select PAX_RANDUSTACK
-+	select PAX_ASLR
-+	select PAX_RANDMMAP
-+	select PAX_NOEXEC
-+	select PAX_MPROTECT
-+	select PAX_EI_PAX
-+	select PAX_PT_PAX_FLAGS
-+	select PAX_HAVE_ACL_FLAGS
-+	select PAX_KERNEXEC if (!X86_64 && !EFI && !COMPAT_VDSO && !PARAVIRT && X86_WP_WORKS_OK)
-+	select PAX_MEMORY_UDEREF if (!X86_64 && !COMPAT_VDSO)
-+	select PAX_RANDKSTACK if (X86_TSC && !X86_64)
-+	select PAX_SEGMEXEC if (X86 && !X86_64)
-+	select PAX_PAGEEXEC if (!X86)
-+	select PAX_EMUPLT if (ALPHA || PARISC || PPC32 || SPARC32 || SPARC64)
-+	select PAX_DLRESOLVE if (SPARC32 || SPARC64)
-+	select PAX_SYSCALL if (PPC32)
-+	select PAX_EMUTRAMP if (PARISC)
-+	select PAX_EMUSIGRT if (PARISC)
-+	select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
-+	help
-+	  If you say Y here, many of the features of grsecurity will be
-+	  enabled, which will protect you against many kinds of attacks
-+	  against your system.  The heightened security comes at a cost
-+	  of an increased chance of incompatibilities with rare software
-+	  on your machine.  Since this security level enables PaX, you should
-+	  view <http://pax.grsecurity.net> and read about the PaX
-+	  project.  While you are there, download chpax and run it on
-+	  binaries that cause problems with PaX.  Also remember that
-+	  since the /proc restrictions are enabled, you must run your
-+	  identd as gid 1001.  This security level enables the following 
-+	  features in addition to those listed in the low and medium 
-+	  security levels:
-+
-+	  - Additional /proc restrictions
-+	  - Chmod restrictions in chroot
-+	  - No signals, ptrace, or viewing of processes outside of chroot
-+	  - Capability restrictions in chroot
-+	  - Deny fchdir out of chroot
-+	  - Priority restrictions in chroot
-+	  - Segmentation-based implementation of PaX
-+	  - Mprotect restrictions
-+	  - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
-+	  - Kernel stack randomization
-+	  - Mount/unmount/remount logging
-+	  - Kernel symbol hiding
-+	  - Destroy unused shared memory	
-+	  - Prevention of memory exhaustion-based exploits
-+config GRKERNSEC_CUSTOM
-+	bool "Custom"
-+	help
-+	  If you say Y here, you will be able to configure every grsecurity
-+	  option, which allows you to enable many more features that aren't
-+	  covered in the basic security levels.  These additional features
-+	  include TPE, socket restrictions, and the sysctl system for
-+	  grsecurity.  It is advised that you read through the help for
-+	  each option to determine its usefulness in your situation.
-+
-+endchoice
-+
-+menu "Address Space Protection"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_KMEM
-+	bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
-+	help
-+	  If you say Y here, /dev/kmem and /dev/mem won't be allowed to
-+	  be written to via mmap or otherwise to modify the running kernel.
-+	  /dev/port will also not be allowed to be opened. If you have module
-+	  support disabled, enabling this will close up four ways that are
-+	  currently used  to insert malicious code into the running kernel.
-+	  Even with all these features enabled, we still highly recommend that
-+	  you use the RBAC system, as it is still possible for an attacker to
-+	  modify the running kernel through privileged I/O granted by ioperm/iopl.
-+	  If you are not using XFree86, you may be able to stop this additional
-+	  case by enabling the 'Disable privileged I/O' option. Though nothing
-+	  legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
-+	  but only to video memory, which is the only writing we allow in this
-+	  case.  If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
-+	  not be allowed to mprotect it with PROT_WRITE later.
-+	  It is highly recommended that you say Y here if you meet all the
-+	  conditions above.
-+
-+config GRKERNSEC_IO
-+	bool "Disable privileged I/O"
-+	depends on X86
-+	select RTC
-+	help
-+	  If you say Y here, all ioperm and iopl calls will return an error.
-+	  Ioperm and iopl can be used to modify the running kernel.
-+	  Unfortunately, some programs need this access to operate properly,
-+	  the most notable of which are XFree86 and hwclock.  hwclock can be
-+	  remedied by having RTC support in the kernel, so CONFIG_RTC is
-+	  enabled if this option is enabled, to ensure that hwclock operates
-+	  correctly.  XFree86 still will not operate correctly with this option
-+	  enabled, so DO NOT CHOOSE Y IF YOU USE XFree86.  If you use XFree86
-+	  and you still want to protect your kernel against modification,
-+	  use the RBAC system.
-+
-+config GRKERNSEC_PROC_MEMMAP
-+	bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
-+	depends on PAX_NOEXEC || PAX_ASLR
-+	help
-+	  If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
-+	  give no information about the addresses of its mappings if
-+	  PaX features that rely on random addresses are enabled on the task.
-+	  If you use PaX it is greatly recommended that you say Y here as it
-+	  closes up a hole that makes the full ASLR useless for suid
-+	  binaries.
-+
-+config GRKERNSEC_BRUTE
-+	bool "Deter exploit bruteforcing"
-+	help
-+	  If you say Y here, attempts to bruteforce exploits against forking
-+	  daemons such as apache or sshd will be deterred.  When a child of a
-+	  forking daemon is killed by PaX or crashes due to an illegal
-+	  instruction, the parent process will be delayed 30 seconds upon every
-+	  subsequent fork until the administrator is able to assess the
-+	  situation and restart the daemon.  It is recommended that you also
-+	  enable signal logging in the auditing section so that logs are
-+	  generated when a process performs an illegal instruction.
-+
-+config GRKERNSEC_MODSTOP
-+	bool "Runtime module disabling"
-+	depends on MODULES
-+	help
-+	  If you say Y here, you will be able to disable the ability to (un)load
-+	  modules at runtime.  This feature is useful if you need the ability
-+	  to load kernel modules at boot time, but do not want to allow an
-+	  attacker to load a rootkit kernel module into the system, or to remove
-+	  a loaded kernel module important to system functioning.  You should
-+	  enable the /dev/mem protection feature as well, since rootkits can be
-+	  inserted into the kernel via other methods than kernel modules.  Since
-+	  an untrusted module could still be loaded by modifying init scripts and
-+	  rebooting the system, it is also recommended that you enable the RBAC
-+	  system.  If you enable this option, a sysctl option with name
-+	  "disable_modules" will be created.  Setting this option to "1" disables
-+	  module loading.  After this option is set, no further writes to it are
-+	  allowed until the system is rebooted.
-+
-+config GRKERNSEC_HIDESYM
-+	bool "Hide kernel symbols"
-+	help
-+	  If you say Y here, getting information on loaded modules, and
-+	  displaying all kernel symbols through a syscall will be restricted
-+	  to users with CAP_SYS_MODULE.  This option is only effective
-+	  provided the following conditions are met:
-+	  1) The kernel using grsecurity is not precompiled by some distribution
-+	  2) You are using the RBAC system and hiding other files such as your
-+	     kernel image and System.map
-+	  3) You have the additional /proc restrictions enabled, which removes
-+	     /proc/kcore
-+	  If the above conditions are met, this option will aid to provide a
-+	  useful protection against local and remote kernel exploitation of
-+	  overflows and arbitrary read/write vulnerabilities.
-+
-+endmenu
-+menu "Role Based Access Control Options"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_ACL_HIDEKERN
-+	bool "Hide kernel processes"
-+	help
-+	  If you say Y here, all kernel threads will be hidden to all
-+	  processes but those whose subject has the "view hidden processes"
-+	  flag.
-+
-+config GRKERNSEC_ACL_MAXTRIES
-+	int "Maximum tries before password lockout"
-+	default 3
-+	help
-+	  This option enforces the maximum number of times a user can attempt
-+	  to authorize themselves with the grsecurity RBAC system before being
-+	  denied the ability to attempt authorization again for a specified time.
-+	  The lower the number, the harder it will be to brute-force a password.
-+
-+config GRKERNSEC_ACL_TIMEOUT
-+	int "Time to wait after max password tries, in seconds"
-+	default 30
-+	help
-+	  This option specifies the time the user must wait after attempting to
-+	  authorize to the RBAC system with the maximum number of invalid
-+	  passwords.  The higher the number, the harder it will be to brute-force
-+	  a password.
-+
-+endmenu
-+menu "Filesystem Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_PROC
-+	bool "Proc restrictions"
-+	help
-+	  If you say Y here, the permissions of the /proc filesystem
-+	  will be altered to enhance system security and privacy.  You MUST
-+  	  choose either a user only restriction or a user and group restriction.
-+	  Depending upon the option you choose, you can either restrict users to
-+	  see only the processes they themselves run, or choose a group that can
-+	  view all processes and files normally restricted to root if you choose
-+	  the "restrict to user only" option.  NOTE: If you're running identd as
-+	  a non-root user, you will have to run it as the group you specify here.
-+
-+config GRKERNSEC_PROC_USER
-+	bool "Restrict /proc to user only"
-+	depends on GRKERNSEC_PROC
-+	help
-+	  If you say Y here, non-root users will only be able to view their own
-+	  processes, and restricts them from viewing network-related information,
-+	  and viewing kernel symbol and module information.
-+
-+config GRKERNSEC_PROC_USERGROUP
-+	bool "Allow special group"
-+	depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
-+	help
-+	  If you say Y here, you will be able to select a group that will be
-+	  able to view all processes, network-related information, and
-+	  kernel and symbol information.  This option is useful if you want
-+	  to run identd as a non-root user.
-+
-+config GRKERNSEC_PROC_GID
-+	int "GID for special group"
-+	depends on GRKERNSEC_PROC_USERGROUP
-+	default 1001
-+
-+config GRKERNSEC_PROC_ADD
-+	bool "Additional restrictions"
-+	depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
-+	help
-+	  If you say Y here, additional restrictions will be placed on
-+	  /proc that keep normal users from viewing device information and 
-+	  slabinfo information that could be useful for exploits.
-+
-+config GRKERNSEC_LINK
-+	bool "Linking restrictions"
-+	help
-+	  If you say Y here, /tmp race exploits will be prevented, since users
-+	  will no longer be able to follow symlinks owned by other users in
-+	  world-writable +t directories (i.e. /tmp), unless the owner of the
-+	  symlink is the owner of the directory. users will also not be
-+	  able to hardlink to files they do not own.  If the sysctl option is
-+	  enabled, a sysctl option with name "linking_restrictions" is created.
-+
-+config GRKERNSEC_FIFO
-+	bool "FIFO restrictions"
-+	help
-+	  If you say Y here, users will not be able to write to FIFOs they don't
-+	  own in world-writable +t directories (i.e. /tmp), unless the owner of
-+	  the FIFO is the same owner of the directory it's held in.  If the sysctl
-+	  option is enabled, a sysctl option with name "fifo_restrictions" is
-+	  created.
-+
-+config GRKERNSEC_CHROOT
-+	bool "Chroot jail restrictions"
-+	help
-+	  If you say Y here, you will be able to choose several options that will
-+	  make breaking out of a chrooted jail much more difficult.  If you
-+	  encounter no software incompatibilities with the following options, it
-+	  is recommended that you enable each one.
-+
-+config GRKERNSEC_CHROOT_MOUNT
-+	bool "Deny mounts"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to
-+	  mount or remount filesystems.  If the sysctl option is enabled, a
-+	  sysctl option with name "chroot_deny_mount" is created.
-+
-+config GRKERNSEC_CHROOT_DOUBLE
-+	bool "Deny double-chroots"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to chroot
-+	  again outside the chroot.  This is a widely used method of breaking
-+	  out of a chroot jail and should not be allowed.  If the sysctl 
-+	  option is enabled, a sysctl option with name 
-+	  "chroot_deny_chroot" is created.
-+
-+config GRKERNSEC_CHROOT_PIVOT
-+	bool "Deny pivot_root in chroot"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to use
-+	  a function called pivot_root() that was introduced in Linux 2.3.41.  It
-+	  works similar to chroot in that it changes the root filesystem.  This
-+	  function could be misused in a chrooted process to attempt to break out
-+	  of the chroot, and therefore should not be allowed.  If the sysctl
-+	  option is enabled, a sysctl option with name "chroot_deny_pivot" is
-+	  created.
-+
-+config GRKERNSEC_CHROOT_CHDIR
-+	bool "Enforce chdir(\"/\") on all chroots"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, the current working directory of all newly-chrooted
-+	  applications will be set to the the root directory of the chroot.
-+	  The man page on chroot(2) states:
-+	  Note that this call does not change  the  current  working
-+	  directory,  so  that `.' can be outside the tree rooted at
-+	  `/'.  In particular, the  super-user  can  escape  from  a
-+	  `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
-+
-+	  It is recommended that you say Y here, since it's not known to break
-+	  any software.  If the sysctl option is enabled, a sysctl option with
-+	  name "chroot_enforce_chdir" is created.
-+
-+config GRKERNSEC_CHROOT_CHMOD
-+	bool "Deny (f)chmod +s"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to chmod
-+	  or fchmod files to make them have suid or sgid bits.  This protects
-+	  against another published method of breaking a chroot.  If the sysctl
-+	  option is enabled, a sysctl option with name "chroot_deny_chmod" is
-+	  created.
-+
-+config GRKERNSEC_CHROOT_FCHDIR
-+	bool "Deny fchdir out of chroot"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, a well-known method of breaking chroots by fchdir'ing
-+	  to a file descriptor of the chrooting process that points to a directory
-+	  outside the filesystem will be stopped.  If the sysctl option
-+	  is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
-+
-+config GRKERNSEC_CHROOT_MKNOD
-+	bool "Deny mknod"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be allowed to
-+	  mknod.  The problem with using mknod inside a chroot is that it
-+	  would allow an attacker to create a device entry that is the same
-+	  as one on the physical root of your system, which could range from
-+	  anything from the console device to a device for your harddrive (which
-+	  they could then use to wipe the drive or steal data).  It is recommended
-+	  that you say Y here, unless you run into software incompatibilities.
-+	  If the sysctl option is enabled, a sysctl option with name
-+	  "chroot_deny_mknod" is created.
-+
-+config GRKERNSEC_CHROOT_SHMAT
-+	bool "Deny shmat() out of chroot"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to attach
-+	  to shared memory segments that were created outside of the chroot jail.
-+	  It is recommended that you say Y here.  If the sysctl option is enabled,
-+	  a sysctl option with name "chroot_deny_shmat" is created.
-+
-+config GRKERNSEC_CHROOT_UNIX
-+	bool "Deny access to abstract AF_UNIX sockets out of chroot"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to
-+	  connect to abstract (meaning not belonging to a filesystem) Unix
-+	  domain sockets that were bound outside of a chroot.  It is recommended
-+	  that you say Y here.  If the sysctl option is enabled, a sysctl option
-+	  with name "chroot_deny_unix" is created.
-+
-+config GRKERNSEC_CHROOT_FINDTASK
-+	bool "Protect outside processes"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to
-+	  kill, send signals with fcntl, ptrace, capget, getpgid, getsid,
-+	  or view any process outside of the chroot.  If the sysctl
-+	  option is enabled, a sysctl option with name "chroot_findtask" is
-+	  created.
-+
-+config GRKERNSEC_CHROOT_NICE
-+	bool "Restrict priority changes"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, processes inside a chroot will not be able to raise
-+	  the priority of processes in the chroot, or alter the priority of
-+	  processes outside the chroot.  This provides more security than simply
-+	  removing CAP_SYS_NICE from the process' capability set.  If the
-+	  sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
-+	  is created.
-+
-+config GRKERNSEC_CHROOT_SYSCTL
-+	bool "Deny sysctl writes"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, an attacker in a chroot will not be able to
-+	  write to sysctl entries, either by sysctl(2) or through a /proc
-+	  interface.  It is strongly recommended that you say Y here. If the
-+	  sysctl option is enabled, a sysctl option with name
-+	  "chroot_deny_sysctl" is created.
-+
-+config GRKERNSEC_CHROOT_CAPS
-+	bool "Capability restrictions"
-+	depends on GRKERNSEC_CHROOT
-+	help
-+	  If you say Y here, the capabilities on all root processes within a
-+	  chroot jail will be lowered to stop module insertion, raw i/o,
-+	  system and net admin tasks, rebooting the system, modifying immutable
-+	  files, modifying IPC owned by another, and changing the system time.
-+	  This is left an option because it can break some apps.  Disable this
-+	  if your chrooted apps are having problems performing those kinds of
-+	  tasks.  If the sysctl option is enabled, a sysctl option with
-+	  name "chroot_caps" is created.
-+
-+endmenu
-+menu "Kernel Auditing"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_AUDIT_GROUP
-+	bool "Single group for auditing"
-+	help
-+	  If you say Y here, the exec, chdir, (un)mount, and ipc logging features
-+	  will only operate on a group you specify.  This option is recommended
-+	  if you only want to watch certain users instead of having a large
-+	  amount of logs from the entire system.  If the sysctl option is enabled,
-+	  a sysctl option with name "audit_group" is created.
-+
-+config GRKERNSEC_AUDIT_GID
-+	int "GID for auditing"
-+	depends on GRKERNSEC_AUDIT_GROUP
-+	default 1007
-+
-+config GRKERNSEC_EXECLOG
-+	bool "Exec logging"
-+	help
-+	  If you say Y here, all execve() calls will be logged (since the
-+	  other exec*() calls are frontends to execve(), all execution
-+	  will be logged).  Useful for shell-servers that like to keep track
-+	  of their users.  If the sysctl option is enabled, a sysctl option with
-+	  name "exec_logging" is created.
-+	  WARNING: This option when enabled will produce a LOT of logs, especially
-+	  on an active system.
-+
-+config GRKERNSEC_RESLOG
-+	bool "Resource logging"
-+	help
-+	  If you say Y here, all attempts to overstep resource limits will
-+	  be logged with the resource name, the requested size, and the current
-+	  limit.  It is highly recommended that you say Y here.  If the sysctl
-+	  option is enabled, a sysctl option with name "resource_logging" is
-+	  created.  If the RBAC system is enabled, the sysctl value is ignored.
-+
-+config GRKERNSEC_CHROOT_EXECLOG
-+	bool "Log execs within chroot"
-+	help
-+	  If you say Y here, all executions inside a chroot jail will be logged
-+	  to syslog.  This can cause a large amount of logs if certain
-+	  applications (eg. djb's daemontools) are installed on the system, and
-+	  is therefore left as an option.  If the sysctl option is enabled, a
-+	  sysctl option with name "chroot_execlog" is created.
-+
-+config GRKERNSEC_AUDIT_CHDIR
-+	bool "Chdir logging"
-+	help
-+	  If you say Y here, all chdir() calls will be logged.  If the sysctl
-+ 	  option is enabled, a sysctl option with name "audit_chdir" is created.
-+
-+config GRKERNSEC_AUDIT_MOUNT
-+	bool "(Un)Mount logging"
-+	help
-+	  If you say Y here, all mounts and unmounts will be logged.  If the
-+	  sysctl option is enabled, a sysctl option with name "audit_mount" is
-+	  created.
-+
-+config GRKERNSEC_AUDIT_IPC
-+	bool "IPC logging"
-+	help
-+	  If you say Y here, creation and removal of message queues, semaphores,
-+	  and shared memory will be logged.  If the sysctl option is enabled, a
-+	  sysctl option with name "audit_ipc" is created.
-+
-+config GRKERNSEC_SIGNAL
-+	bool "Signal logging"
-+	help
-+	  If you say Y here, certain important signals will be logged, such as
-+	  SIGSEGV, which will as a result inform you of when a error in a program
-+	  occurred, which in some cases could mean a possible exploit attempt.
-+	  If the sysctl option is enabled, a sysctl option with name
-+	  "signal_logging" is created.
-+
-+config GRKERNSEC_FORKFAIL
-+	bool "Fork failure logging"
-+	help
-+	  If you say Y here, all failed fork() attempts will be logged.
-+	  This could suggest a fork bomb, or someone attempting to overstep
-+	  their process limit.  If the sysctl option is enabled, a sysctl option
-+	  with name "forkfail_logging" is created.
-+
-+config GRKERNSEC_TIME
-+	bool "Time change logging"
-+	help
-+	  If you say Y here, any changes of the system clock will be logged.
-+	  If the sysctl option is enabled, a sysctl option with name
-+	  "timechange_logging" is created.
-+
-+config GRKERNSEC_PROC_IPADDR
-+	bool "/proc/<pid>/ipaddr support"
-+	help
-+	  If you say Y here, a new entry will be added to each /proc/<pid>
-+	  directory that contains the IP address of the person using the task.
-+	  The IP is carried across local TCP and AF_UNIX stream sockets.
-+	  This information can be useful for IDS/IPSes to perform remote response
-+	  to a local attack.  The entry is readable by only the owner of the
-+	  process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
-+	  the RBAC system), and thus does not create privacy concerns.
-+
-+config GRKERNSEC_AUDIT_TEXTREL
-+	bool 'ELF text relocations logging (READ HELP)'
-+	depends on PAX_MPROTECT
-+	help
-+	  If you say Y here, text relocations will be logged with the filename
-+	  of the offending library or binary.  The purpose of the feature is
-+	  to help Linux distribution developers get rid of libraries and
-+	  binaries that need text relocations which hinder the future progress
-+	  of PaX.  Only Linux distribution developers should say Y here, and
-+	  never on a production machine, as this option creates an information
-+	  leak that could aid an attacker in defeating the randomization of
-+	  a single memory region.  If the sysctl option is enabled, a sysctl
-+	  option with name "audit_textrel" is created.
-+
-+endmenu
-+
-+menu "Executable Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_EXECVE
-+	bool "Enforce RLIMIT_NPROC on execs"
-+	help
-+	  If you say Y here, users with a resource limit on processes will
-+	  have the value checked during execve() calls.  The current system
-+	  only checks the system limit during fork() calls.  If the sysctl option
-+	  is enabled, a sysctl option with name "execve_limiting" is created.
-+
-+config GRKERNSEC_SHM
-+	bool "Destroy unused shared memory"
-+	depends on SYSVIPC
-+	help
-+	  If you say Y here, shared memory will be destroyed when no one is
-+	  attached to it.  Otherwise, resources involved with the shared
-+	  memory can be used up and not be associated with any process (as the
-+	  shared memory still exists, and the creating process has exited).  If
-+	  the sysctl option is enabled, a sysctl option with name
-+	  "destroy_unused_shm" is created.
-+
-+config GRKERNSEC_DMESG
-+	bool "Dmesg(8) restriction"
-+	help
-+	  If you say Y here, non-root users will not be able to use dmesg(8)
-+	  to view up to the last 4kb of messages in the kernel's log buffer.
-+	  If the sysctl option is enabled, a sysctl option with name "dmesg" is
-+	  created.
-+
-+config GRKERNSEC_TPE
-+	bool "Trusted Path Execution (TPE)"
-+	help
-+	  If you say Y here, you will be able to choose a gid to add to the
-+	  supplementary groups of users you want to mark as "untrusted."
-+	  These users will not be able to execute any files that are not in
-+	  root-owned directories writable only by root.  If the sysctl option
-+	  is enabled, a sysctl option with name "tpe" is created.
-+
-+config GRKERNSEC_TPE_ALL
-+	bool "Partially restrict non-root users"
-+	depends on GRKERNSEC_TPE
-+	help
-+	  If you say Y here, All non-root users other than the ones in the
-+	  group specified in the main TPE option will only be allowed to
-+	  execute files in directories they own that are not group or
-+	  world-writable, or in directories owned by root and writable only by
-+	  root.  If the sysctl option is enabled, a sysctl option with name
-+	  "tpe_restrict_all" is created.
-+
-+config GRKERNSEC_TPE_INVERT
-+	bool "Invert GID option"
-+	depends on GRKERNSEC_TPE
-+	help
-+	  If you say Y here, the group you specify in the TPE configuration will
-+	  decide what group TPE restrictions will be *disabled* for.  This
-+	  option is useful if you want TPE restrictions to be applied to most
-+	  users on the system.
-+
-+config GRKERNSEC_TPE_GID
-+	int "GID for untrusted users"
-+	depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
-+	default 1005
-+	help
-+	  If you have selected the "Invert GID option" above, setting this
-+	  GID determines what group TPE restrictions will be *disabled* for.
-+	  If you have not selected the "Invert GID option" above, setting this
-+	  GID determines what group TPE restrictions will be *enabled* for.
-+	  If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+	  is created.
-+
-+config GRKERNSEC_TPE_GID
-+	int "GID for trusted users"
-+	depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
-+	default 1005
-+	help
-+	  If you have selected the "Invert GID option" above, setting this
-+	  GID determines what group TPE restrictions will be *disabled* for.
-+	  If you have not selected the "Invert GID option" above, setting this
-+	  GID determines what group TPE restrictions will be *enabled* for.
-+	  If the sysctl option is enabled, a sysctl option with name "tpe_gid"
-+	  is created.
-+
-+endmenu
-+menu "Network Protections"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_RANDNET
-+	bool "Larger entropy pools"
-+	help
-+	  If you say Y here, the entropy pools used for many features of Linux
-+	  and grsecurity will be doubled in size.  Since several grsecurity
-+	  features use additional randomness, it is recommended that you say Y
-+	  here.  Saying Y here has a similar effect as modifying
-+	  /proc/sys/kernel/random/poolsize.
-+
-+config GRKERNSEC_SOCKET
-+	bool "Socket restrictions"
-+	help
-+	  If you say Y here, you will be able to choose from several options.
-+	  If you assign a GID on your system and add it to the supplementary
-+	  groups of users you want to restrict socket access to, this patch
-+	  will perform up to three things, based on the option(s) you choose.
-+
-+config GRKERNSEC_SOCKET_ALL
-+	bool "Deny any sockets to group"
-+	depends on GRKERNSEC_SOCKET
-+	help
-+	  If you say Y here, you will be able to choose a GID of whose users will
-+	  be unable to connect to other hosts from your machine or run server
-+	  applications from your machine.  If the sysctl option is enabled, a
-+	  sysctl option with name "socket_all" is created.
-+
-+config GRKERNSEC_SOCKET_ALL_GID
-+	int "GID to deny all sockets for"
-+	depends on GRKERNSEC_SOCKET_ALL
-+	default 1004
-+	help
-+	  Here you can choose the GID to disable socket access for. Remember to
-+	  add the users you want socket access disabled for to the GID
-+	  specified here.  If the sysctl option is enabled, a sysctl option
-+	  with name "socket_all_gid" is created.
-+
-+config GRKERNSEC_SOCKET_CLIENT
-+	bool "Deny client sockets to group"
-+	depends on GRKERNSEC_SOCKET
-+	help
-+	  If you say Y here, you will be able to choose a GID of whose users will
-+	  be unable to connect to other hosts from your machine, but will be
-+	  able to run servers.  If this option is enabled, all users in the group
-+	  you specify will have to use passive mode when initiating ftp transfers
-+	  from the shell on your machine.  If the sysctl option is enabled, a
-+	  sysctl option with name "socket_client" is created.
-+
-+config GRKERNSEC_SOCKET_CLIENT_GID
-+	int "GID to deny client sockets for"
-+	depends on GRKERNSEC_SOCKET_CLIENT
-+	default 1003
-+	help
-+	  Here you can choose the GID to disable client socket access for.
-+	  Remember to add the users you want client socket access disabled for to
-+	  the GID specified here.  If the sysctl option is enabled, a sysctl
-+	  option with name "socket_client_gid" is created.
-+
-+config GRKERNSEC_SOCKET_SERVER
-+	bool "Deny server sockets to group"
-+	depends on GRKERNSEC_SOCKET
-+	help
-+	  If you say Y here, you will be able to choose a GID of whose users will
-+	  be unable to run server applications from your machine.  If the sysctl
-+	  option is enabled, a sysctl option with name "socket_server" is created.
-+
-+config GRKERNSEC_SOCKET_SERVER_GID
-+	int "GID to deny server sockets for"
-+	depends on GRKERNSEC_SOCKET_SERVER
-+	default 1002
-+	help
-+	  Here you can choose the GID to disable server socket access for.
-+	  Remember to add the users you want server socket access disabled for to
-+	  the GID specified here.  If the sysctl option is enabled, a sysctl
-+	  option with name "socket_server_gid" is created.
-+
-+endmenu
-+menu "Sysctl support"
-+depends on GRKERNSEC && SYSCTL
-+
-+config GRKERNSEC_SYSCTL
-+	bool "Sysctl support"
-+	help
-+	  If you say Y here, you will be able to change the options that
-+	  grsecurity runs with at bootup, without having to recompile your
-+	  kernel.  You can echo values to files in /proc/sys/kernel/grsecurity
-+	  to enable (1) or disable (0) various features.  All the sysctl entries
-+	  are mutable until the "grsec_lock" entry is set to a non-zero value.
-+	  All features enabled in the kernel configuration are disabled at boot
-+	  if you do not say Y to the "Turn on features by default" option.
-+	  All options should be set at startup, and the grsec_lock entry should
-+	  be set to a non-zero value after all the options are set.
-+	  *THIS IS EXTREMELY IMPORTANT*
-+
-+config GRKERNSEC_SYSCTL_ON
-+	bool "Turn on features by default"
-+	depends on GRKERNSEC_SYSCTL
-+	help
-+	  If you say Y here, instead of having all features enabled in the
-+	  kernel configuration disabled at boot time, the features will be
-+	  enabled at boot time.  It is recommended you say Y here unless
-+	  there is some reason you would want all sysctl-tunable features to
-+	  be disabled by default.  As mentioned elsewhere, it is important
-+	  to enable the grsec_lock entry once you have finished modifying
-+	  the sysctl entries.
-+
-+endmenu
-+menu "Logging Options"
-+depends on GRKERNSEC
-+
-+config GRKERNSEC_FLOODTIME
-+	int "Seconds in between log messages (minimum)"
-+	default 10
-+	help
-+	  This option allows you to enforce the number of seconds between
-+	  grsecurity log messages.  The default should be suitable for most
-+	  people, however, if you choose to change it, choose a value small enough
-+	  to allow informative logs to be produced, but large enough to
-+	  prevent flooding.
-+
-+config GRKERNSEC_FLOODBURST
-+	int "Number of messages in a burst (maximum)"
-+	default 4
-+	help
-+	  This option allows you to choose the maximum number of messages allowed
-+	  within the flood time interval you chose in a separate option.  The
-+	  default should be suitable for most people, however if you find that
-+	  many of your logs are being interpreted as flooding, you may want to
-+	  raise this value.
-+
-+endmenu
-+
-+endmenu
---- /dev/null
-+++ b/grsecurity/Makefile
-@@ -0,0 +1,20 @@
-+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
-+# during 2001-2005 it has been completely redesigned by Brad Spengler
-+# into an RBAC system
-+#
-+# All code in this directory and various hooks inserted throughout the kernel
-+# are copyright Brad Spengler, and released under the GPL v2 or higher
-+
-+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
-+	grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
-+	grsec_time.o grsec_tpe.o grsec_ipc.o grsec_link.o grsec_textrel.o
-+
-+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o \
-+	gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
-+	gracl_learn.o grsec_log.o
-+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
-+
-+ifndef CONFIG_GRKERNSEC
-+obj-y += grsec_disabled.o
-+endif
-+
---- a/include/asm-alpha/a.out.h
-+++ b/include/asm-alpha/a.out.h
-@@ -98,7 +98,7 @@ struct exec
- 	set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000L \
- 			   ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
- 
--#define STACK_TOP \
-+#define __STACK_TOP \
-   (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
- 
- #define STACK_TOP_MAX	0x00120000000UL
---- a/include/asm-alpha/elf.h
-+++ b/include/asm-alpha/elf.h
-@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
- 
- #define ELF_ET_DYN_BASE		(TASK_UNMAPPED_BASE + 0x1000000)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	(current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	(current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
-+#define PAX_DELTA_STACK_LEN	(current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
-+#endif
-+
- /* $0 is set by ld.so to a pointer to a function which might be 
-    registered using atexit.  This provides a mean for the dynamic
-    linker to call DT_FINI functions for shared libraries that have
---- a/include/asm-alpha/kmap_types.h
-+++ b/include/asm-alpha/kmap_types.h
-@@ -24,7 +24,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-alpha/pgtable.h
-+++ b/include/asm-alpha/pgtable.h
-@@ -101,6 +101,17 @@ struct vm_area_struct;
- #define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
- #define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
- #define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
-+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
-+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
-+#else
-+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
-+# define PAGE_COPY_NOEXEC	PAGE_COPY
-+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
-+#endif
-+
- #define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
- 
- #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
---- a/include/asm-arm/a.out.h
-+++ b/include/asm-arm/a.out.h
-@@ -28,7 +28,7 @@ struct exec
- #define M_ARM 103
- 
- #ifdef __KERNEL__
--#define STACK_TOP	((current->personality == PER_LINUX_32BIT) ? \
-+#define __STACK_TOP	((current->personality == PER_LINUX_32BIT) ? \
- 			 TASK_SIZE : TASK_SIZE_26)
- #define STACK_TOP_MAX	TASK_SIZE
- #endif
---- a/include/asm-arm/elf.h
-+++ b/include/asm-arm/elf.h
-@@ -90,6 +90,13 @@ extern char elf_platform[];
- 
- #define ELF_ET_DYN_BASE	(2 * TASK_SIZE / 3)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	0x00008000UL
-+
-+#define PAX_DELTA_MMAP_LEN	((current->personality == PER_LINUX_32BIT) ? 16 : 10)
-+#define PAX_DELTA_STACK_LEN	((current->personality == PER_LINUX_32BIT) ? 16 : 10)
-+#endif
-+
- /* When the program starts, a1 contains a pointer to a function to be 
-    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
-    have no such handler.  */
---- a/include/asm-arm/kmap_types.h
-+++ b/include/asm-arm/kmap_types.h
-@@ -18,6 +18,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-avr32/a.out.h
-+++ b/include/asm-avr32/a.out.h
-@@ -19,8 +19,8 @@ struct exec
- 
- #ifdef __KERNEL__
- 
--#define STACK_TOP	TASK_SIZE
--#define STACK_TOP_MAX	STACK_TOP
-+#define __STACK_TOP	TASK_SIZE
-+#define STACK_TOP_MAX	__STACK_TOP
- 
- #endif
- 
---- a/include/asm-avr32/elf.h
-+++ b/include/asm-avr32/elf.h
-@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
-    the loader.  We need to make sure that it is out of the way of the program
-    that it will "exec", and that there is sufficient room for the brk.  */
- 
--#define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
-+#define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	0x00001000UL
-+
-+#define PAX_DELTA_MMAP_LEN	15
-+#define PAX_DELTA_STACK_LEN	15
-+#endif
- 
- /* This yields a mask that user programs can use to figure out what
-    instruction set this CPU supports.  This could be done in user space,
---- a/include/asm-avr32/kmap_types.h
-+++ b/include/asm-avr32/kmap_types.h
-@@ -22,7 +22,8 @@ D(10)	KM_IRQ0,
- D(11)	KM_IRQ1,
- D(12)	KM_SOFTIRQ0,
- D(13)	KM_SOFTIRQ1,
--D(14)	KM_TYPE_NR
-+D(14)	KM_CLEARPAGE,
-+D(15)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-blackfin/kmap_types.h
-+++ b/include/asm-blackfin/kmap_types.h
-@@ -15,6 +15,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-cris/kmap_types.h
-+++ b/include/asm-cris/kmap_types.h
-@@ -19,6 +19,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-frv/kmap_types.h
-+++ b/include/asm-frv/kmap_types.h
-@@ -23,6 +23,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-generic/futex.h
-+++ b/include/asm-generic/futex.h
-@@ -8,7 +8,7 @@
- #include <asm/uaccess.h>
- 
- static inline int
--futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- {
- 	int op = (encoded_op >> 28) & 7;
- 	int cmp = (encoded_op >> 24) & 15;
-@@ -50,7 +50,7 @@ futex_atomic_op_inuser (int encoded_op, 
- }
- 
- static inline int
--futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
-+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
- {
- 	return -ENOSYS;
- }
---- a/include/asm-generic/vmlinux.lds.h
-+++ b/include/asm-generic/vmlinux.lds.h
-@@ -19,6 +19,7 @@
- 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
- 		VMLINUX_SYMBOL(__start_rodata) = .;			\
- 		*(.rodata) *(.rodata.*)					\
-+		*(.data.read_only)					\
- 		*(__vermagic)		/* Kernel version magic */	\
- 	}								\
- 									\
---- a/include/asm-h8300/kmap_types.h
-+++ b/include/asm-h8300/kmap_types.h
-@@ -15,6 +15,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-i386/alternative.h
-+++ b/include/asm-i386/alternative.h
-@@ -54,7 +54,7 @@ static inline void alternatives_smp_swit
- 		      "  .byte 662b-661b\n"       /* sourcelen */	\
- 		      "  .byte 664f-663f\n"       /* replacementlen */	\
- 		      ".previous\n"					\
--		      ".section .altinstr_replacement,\"ax\"\n"		\
-+		      ".section .altinstr_replacement,\"a\"\n"		\
- 		      "663:\n\t" newinstr "\n664:\n"   /* replacement */\
- 		      ".previous" :: "i" (feature) : "memory")
- 
-@@ -78,7 +78,7 @@ static inline void alternatives_smp_swit
- 		      "  .byte 662b-661b\n"       /* sourcelen */	\
- 		      "  .byte 664f-663f\n"       /* replacementlen */ 	\
- 		      ".previous\n"					\
--		      ".section .altinstr_replacement,\"ax\"\n"		\
-+		      ".section .altinstr_replacement,\"a\"\n"		\
- 		      "663:\n\t" newinstr "\n664:\n"   /* replacement */\
- 		      ".previous" :: "i" (feature), ##input)
- 
-@@ -93,7 +93,7 @@ static inline void alternatives_smp_swit
- 		      "  .byte 662b-661b\n"       /* sourcelen */	\
- 		      "  .byte 664f-663f\n"       /* replacementlen */	\
- 		      ".previous\n"					\
--		      ".section .altinstr_replacement,\"ax\"\n"		\
-+		      ".section .altinstr_replacement,\"a\"\n"		\
- 		      "663:\n\t" newinstr "\n664:\n"   /* replacement */ \
- 		      ".previous" : output : [feat] "i" (feature), ##input)
- 
---- a/include/asm-i386/a.out.h
-+++ b/include/asm-i386/a.out.h
-@@ -19,8 +19,13 @@ struct exec
- 
- #ifdef __KERNEL__
- 
--#define STACK_TOP	TASK_SIZE
--#define STACK_TOP_MAX	STACK_TOP
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define __STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE)
-+#else
-+#define __STACK_TOP TASK_SIZE
-+#endif
-+
-+#define STACK_TOP_MAX	TASK_SIZE
- 
- #endif
- 
---- a/include/asm-i386/apic.h
-+++ b/include/asm-i386/apic.h
-@@ -8,7 +8,7 @@
- #include <asm/processor.h>
- #include <asm/system.h>
- 
--#define Dprintk(x...)
-+#define Dprintk(x...) do {} while (0)
- 
- /*
-  * Debugging macros
---- a/include/asm-i386/cache.h
-+++ b/include/asm-i386/cache.h
-@@ -10,5 +10,6 @@
- #define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
- 
- #define __read_mostly __attribute__((__section__(".data.read_mostly")))
-+#define __read_only __attribute__((__section__(".data.read_only")))
- 
- #endif
---- a/include/asm-i386/checksum.h
-+++ b/include/asm-i386/checksum.h
-@@ -30,6 +30,12 @@ asmlinkage __wsum csum_partial(const voi
- asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
- 						  int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
- 
-+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
-+						  int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
-+
-+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
-+						  int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr);
-+
- /*
-  *	Note: when you get a NULL pointer exception here this means someone
-  *	passed in an incorrect kernel address to one of these functions.
-@@ -49,7 +55,7 @@ __wsum csum_partial_copy_from_user(const
- 						int len, __wsum sum, int *err_ptr)
- {
- 	might_sleep();
--	return csum_partial_copy_generic((__force void *)src, dst,
-+	return csum_partial_copy_generic_from_user((__force void *)src, dst,
- 					len, sum, err_ptr, NULL);
- }
- 
-@@ -180,7 +186,7 @@ static __inline__ __wsum csum_and_copy_t
- {
- 	might_sleep();
- 	if (access_ok(VERIFY_WRITE, dst, len))
--		return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr);
-+		return csum_partial_copy_generic_to_user(src, (__force void *)dst, len, sum, NULL, err_ptr);
- 
- 	if (len)
- 		*err_ptr = -EFAULT;
---- a/include/asm-i386/desc.h
-+++ b/include/asm-i386/desc.h
-@@ -7,26 +7,22 @@
- #ifndef __ASSEMBLY__
- 
- #include <linux/preempt.h>
--#include <linux/smp.h>
- #include <linux/percpu.h>
-+#include <linux/smp.h>
- 
- #include <asm/mmu.h>
- 
-+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
-+
- struct Xgt_desc_struct {
- 	unsigned short size;
--	unsigned long address __attribute__((packed));
-+	struct desc_struct *address __attribute__((packed));
- 	unsigned short pad;
- } __attribute__ ((packed));
- 
--struct gdt_page
--{
--	struct desc_struct gdt[GDT_ENTRIES];
--} __attribute__((aligned(PAGE_SIZE)));
--DECLARE_PER_CPU(struct gdt_page, gdt_page);
--
- static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
- {
--	return per_cpu(gdt_page, cpu).gdt;
-+	return cpu_gdt_table[cpu];
- }
- 
- extern struct Xgt_desc_struct idt_descr;
-@@ -81,8 +77,20 @@ static inline void pack_gate(__u32 *a, _
- static inline void write_dt_entry(struct desc_struct *dt,
- 				  int entry, u32 entry_low, u32 entry_high)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	dt[entry].a = entry_low;
- 	dt[entry].b = entry_high;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- 
- static inline void native_set_ldt(const void *addr, unsigned int entries)
-@@ -139,8 +147,19 @@ static inline void native_load_tls(struc
- 	unsigned int i;
- 	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
- 		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- 
- static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
-@@ -175,7 +194,7 @@ static inline void __set_tss_desc(unsign
- 	((info)->seg_32bit << 22) | \
- 	((info)->limit_in_pages << 23) | \
- 	((info)->useable << 20) | \
--	0x7000)
-+	0x7100)
- 
- #define LDT_empty(info) (\
- 	(info)->base_addr	== 0	&& \
-@@ -207,15 +226,25 @@ static inline void load_LDT(mm_context_t
- 	preempt_enable();
- }
- 
--static inline unsigned long get_desc_base(unsigned long *desc)
-+static inline unsigned long get_desc_base(struct desc_struct *desc)
- {
- 	unsigned long base;
--	base = ((desc[0] >> 16)  & 0x0000ffff) |
--		((desc[1] << 16) & 0x00ff0000) |
--		(desc[1] & 0xff000000);
-+	base = ((desc->a >> 16)  & 0x0000ffff) |
-+		((desc->b << 16) & 0x00ff0000) |
-+		(desc->b & 0xff000000);
- 	return base;
- }
- 
-+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
-+{
-+	__u32 a, b;
-+
-+	if (likely(limit))
-+		limit = (limit - 1UL) >> PAGE_SHIFT;
-+	pack_descriptor(&a, &b, base, limit, 0xFB, 0xC);
-+	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, a, b);
-+}
-+
- #else /* __ASSEMBLY__ */
- 
- /*
---- a/include/asm-i386/elf.h
-+++ b/include/asm-i386/elf.h
-@@ -73,7 +73,18 @@ typedef struct user_fxsr_struct elf_fpxr
-    the loader.  We need to make sure that it is out of the way of the program
-    that it will "exec", and that there is sufficient room for the brk.  */
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define ELF_ET_DYN_BASE         ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
-+#else
- #define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	0x10000000UL
-+
-+#define PAX_DELTA_MMAP_LEN	(current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
-+#define PAX_DELTA_STACK_LEN	(current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
-+#endif
- 
- /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
-    now struct_user_regs, they are different) */
-@@ -131,7 +142,7 @@ extern int dump_task_extended_fpu (struc
- #define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
- 
- #define VDSO_HIGH_BASE		(__fix_to_virt(FIX_VDSO))
--#define VDSO_CURRENT_BASE	((unsigned long)current->mm->context.vdso)
-+#define VDSO_CURRENT_BASE	(current->mm->context.vdso)
- #define VDSO_PRELINK		0
- 
- #define VDSO_SYM(x) \
---- a/include/asm-i386/futex.h
-+++ b/include/asm-i386/futex.h
-@@ -11,8 +11,11 @@
- 
- #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
-   __asm__ __volatile (						\
-+	"movw	%w6, %%ds\n"\
- "1:	" insn "\n"						\
--"2:	.section .fixup,\"ax\"\n\
-+"2:	pushl	%%ss\n\
-+	popl	%%ds\n\
-+	.section .fixup,\"ax\"\n\
- 3:	mov	%3, %1\n\
- 	jmp	2b\n\
- 	.previous\n\
-@@ -21,16 +24,19 @@
- 	.long	1b,3b\n\
- 	.previous"						\
- 	: "=r" (oldval), "=r" (ret), "+m" (*uaddr)		\
--	: "i" (-EFAULT), "0" (oparg), "1" (0))
-+	: "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
- 
- #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
-   __asm__ __volatile (						\
--"1:	movl	%2, %0\n\
-+"	movw	%w7, %%es\n\
-+1:	movl	%%es:%2, %0\n\
- 	movl	%0, %3\n"					\
- 	insn "\n"						\
--"2:	" LOCK_PREFIX "cmpxchgl %3, %2\n\
-+"2:	" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n\
- 	jnz	1b\n\
--3:	.section .fixup,\"ax\"\n\
-+3:	pushl	%%ss\n\
-+	popl	%%es\n\
-+	.section .fixup,\"ax\"\n\
- 4:	mov	%5, %1\n\
- 	jmp	3b\n\
- 	.previous\n\
-@@ -40,10 +46,10 @@
- 	.previous"						\
- 	: "=&a" (oldval), "=&r" (ret), "+m" (*uaddr),		\
- 	  "=&r" (tem)						\
--	: "r" (oparg), "i" (-EFAULT), "1" (0))
-+	: "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
- 
- static inline int
--futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- {
- 	int op = (encoded_op >> 28) & 7;
- 	int cmp = (encoded_op >> 24) & 15;
-@@ -59,7 +65,7 @@ futex_atomic_op_inuser (int encoded_op, 
- 	pagefault_disable();
- 
- 	if (op == FUTEX_OP_SET)
--		__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
-+		__futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
- 	else {
- #ifndef CONFIG_X86_BSWAP
- 		if (boot_cpu_data.x86 == 3)
-@@ -68,7 +74,7 @@ futex_atomic_op_inuser (int encoded_op, 
- #endif
- 		switch (op) {
- 		case FUTEX_OP_ADD:
--			__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
-+			__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret,
- 					   oldval, uaddr, oparg);
- 			break;
- 		case FUTEX_OP_OR:
-@@ -105,15 +111,17 @@ futex_atomic_op_inuser (int encoded_op, 
- }
- 
- static inline int
--futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
-+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
- {
- 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- 		return -EFAULT;
- 
- 	__asm__ __volatile__(
--		"1:	" LOCK_PREFIX "cmpxchgl %3, %1		\n"
--
--		"2:	.section .fixup, \"ax\"			\n"
-+		"	movw %w5, %%ds				\n"
-+		"1:	" LOCK_PREFIX "cmpxchgl %3, %%ds:%1	\n"
-+		"2:	pushl   %%ss				\n"
-+		"	popl    %%ds				\n"
-+		"	.section .fixup, \"ax\"			\n"
- 		"3:	mov     %2, %0				\n"
- 		"	jmp     2b				\n"
- 		"	.previous				\n"
-@@ -124,7 +132,7 @@ futex_atomic_cmpxchg_inatomic(int __user
- 		"	.previous				\n"
- 
- 		: "=a" (oldval), "+m" (*uaddr)
--		: "i" (-EFAULT), "r" (newval), "0" (oldval)
-+		: "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
- 		: "memory"
- 	);
- 
---- a/include/asm-i386/i387.h
-+++ b/include/asm-i386/i387.h
-@@ -40,13 +40,8 @@ extern void kernel_fpu_begin(void);
- #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
- 
- /* We need a safe address that is cheap to find and that is already
--   in L1 during context switch. The best choices are unfortunately
--   different for UP and SMP */
--#ifdef CONFIG_SMP
--#define safe_address (__per_cpu_offset[0])
--#else
--#define safe_address (kstat_cpu(0).cpustat.user)
--#endif
-+   in L1 during context switch. */
-+#define safe_address (init_tss[smp_processor_id()].x86_tss.esp0)
- 
- /*
-  * These must be called with preempt disabled
---- a/include/asm-i386/irqflags.h
-+++ b/include/asm-i386/irqflags.h
-@@ -108,6 +108,8 @@ static inline unsigned long __raw_local_
- #define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
- #define INTERRUPT_RETURN		iret
- #define GET_CR0_INTO_EAX		movl %cr0, %eax
-+#define GET_CR0_INTO_EDX		movl %cr0, %edx
-+#define SET_CR0_FROM_EDX		movl %edx, %cr0
- #endif /* __ASSEMBLY__ */
- #endif /* CONFIG_PARAVIRT */
- 
---- a/include/asm-i386/kmap_types.h
-+++ b/include/asm-i386/kmap_types.h
-@@ -22,7 +22,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-i386/mach-default/apm.h
-+++ b/include/asm-i386/mach-default/apm.h
-@@ -36,7 +36,7 @@ static inline void apm_bios_call_asm(u32
- 	__asm__ __volatile__(APM_DO_ZERO_SEGS
- 		"pushl %%edi\n\t"
- 		"pushl %%ebp\n\t"
--		"lcall *%%cs:apm_bios_entry\n\t"
-+		"lcall *%%ss:apm_bios_entry\n\t"
- 		"setc %%al\n\t"
- 		"popl %%ebp\n\t"
- 		"popl %%edi\n\t"
-@@ -60,7 +60,7 @@ static inline u8 apm_bios_call_simple_as
- 	__asm__ __volatile__(APM_DO_ZERO_SEGS
- 		"pushl %%edi\n\t"
- 		"pushl %%ebp\n\t"
--		"lcall *%%cs:apm_bios_entry\n\t"
-+		"lcall *%%ss:apm_bios_entry\n\t"
- 		"setc %%bl\n\t"
- 		"popl %%ebp\n\t"
- 		"popl %%edi\n\t"
---- a/include/asm-i386/mman.h
-+++ b/include/asm-i386/mman.h
-@@ -14,4 +14,12 @@
- #define MCL_CURRENT	1		/* lock all current mappings */
- #define MCL_FUTURE	2		/* lock all future mappings */
- 
-+#ifdef __KERNEL__
-+#ifndef __ASSEMBLY__
-+#define arch_mmap_check	i386_mmap_check
-+int i386_mmap_check(unsigned long addr, unsigned long len,
-+		unsigned long flags);
-+#endif
-+#endif
-+
- #endif /* __I386_MMAN_H__ */
---- a/include/asm-i386/mmu_context.h
-+++ b/include/asm-i386/mmu_context.h
-@@ -57,6 +57,22 @@ static inline void switch_mm(struct mm_s
- 		 */
- 		if (unlikely(prev->context.ldt != next->context.ldt))
- 			load_LDT_nolock(&next->context);
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+		if (!nx_enabled) {
-+			smp_mb__before_clear_bit();
-+			cpu_clear(cpu, prev->context.cpu_user_cs_mask);
-+			smp_mb__after_clear_bit();
-+			cpu_set(cpu, next->context.cpu_user_cs_mask);
-+		}
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+		if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
-+			     prev->context.user_cs_limit != next->context.user_cs_limit))
-+			set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
-+#endif
-+
- 	}
- #ifdef CONFIG_SMP
- 	else {
-@@ -69,6 +85,19 @@ static inline void switch_mm(struct mm_s
- 			 */
- 			load_cr3(next->pgd);
- 			load_LDT_nolock(&next->context);
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+			if (!nx_enabled)
-+				cpu_set(cpu, next->context.cpu_user_cs_mask);
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+#ifdef CONFIG_PAX_PAGEEXEC
-+			if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
-+#endif
-+				set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
-+#endif
-+
- 		}
- 	}
- #endif
---- a/include/asm-i386/mmu.h
-+++ b/include/asm-i386/mmu.h
-@@ -11,8 +11,19 @@
- typedef struct { 
- 	int size;
- 	struct semaphore sem;
--	void *ldt;
--	void *vdso;
-+	struct desc_struct *ldt;
-+	unsigned long vdso;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	unsigned long user_cs_base;
-+	unsigned long user_cs_limit;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
-+	cpumask_t cpu_user_cs_mask;
-+#endif
-+
-+#endif
-+
- } mm_context_t;
- 
- #endif
---- a/include/asm-i386/module.h
-+++ b/include/asm-i386/module.h
-@@ -70,6 +70,12 @@ struct mod_arch_specific
- #define MODULE_STACKSIZE ""
- #endif
- 
--#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
-+#ifdef CONFIG_GRKERNSEC
-+#define MODULE_GRSEC "GRSECURTY "
-+#else
-+#define MODULE_GRSEC ""
-+#endif
-+
-+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC
- 
- #endif /* _ASM_I386_MODULE_H */
---- a/include/asm-i386/page.h
-+++ b/include/asm-i386/page.h
-@@ -10,6 +10,7 @@
- #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
- 
- #ifdef __KERNEL__
-+#include <asm/boot.h>
- #ifndef __ASSEMBLY__
- 
- #ifdef CONFIG_X86_USE_3DNOW
-@@ -90,7 +91,6 @@ static inline pte_t native_make_pte(unsi
- typedef struct { unsigned long pte_low; } pte_t;
- typedef struct { unsigned long pgd; } pgd_t;
- typedef struct { unsigned long pgprot; } pgprot_t;
--#define boot_pte_t pte_t /* or would you rather have a typedef */
- 
- static inline unsigned long native_pgd_val(pgd_t pgd)
- {
-@@ -175,6 +175,18 @@ extern int page_is_ram(unsigned long pag
- #define __PAGE_OFFSET		((unsigned long)CONFIG_PAGE_OFFSET)
- #endif
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+#ifdef __ASSEMBLY__
-+#define __KERNEL_TEXT_OFFSET	(__PAGE_OFFSET + ((LOAD_PHYSICAL_ADDR + 6*1024*1024 - 1) & ~(4*1024*1024 - 1)))
-+#else
-+extern unsigned char KERNEL_TEXT_OFFSET[];
-+#define __KERNEL_TEXT_OFFSET ((unsigned long)KERNEL_TEXT_OFFSET)
-+extern unsigned char MODULES_VADDR[];
-+extern unsigned char MODULES_END[];
-+#endif
-+#else
-+#define __KERNEL_TEXT_OFFSET	(0)
-+#endif
- 
- #define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
- #define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
-@@ -197,6 +209,10 @@ extern int page_is_ram(unsigned long pag
- 	((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- 		 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
-+#endif
-+
- #include <asm-generic/memory_model.h>
- #include <asm-generic/page.h>
- 
---- a/include/asm-i386/paravirt.h
-+++ b/include/asm-i386/paravirt.h
-@@ -1057,23 +1057,23 @@ static inline unsigned long __raw_local_
- 
- #define INTERRUPT_RETURN					\
- 	PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE,		\
--		  jmp *%cs:paravirt_ops+PARAVIRT_iret)
-+		  jmp *%ss:paravirt_ops+PARAVIRT_iret)
- 
- #define DISABLE_INTERRUPTS(clobbers)					\
- 	PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers,		\
- 		  pushl %eax; pushl %ecx; pushl %edx;			\
--		  call *%cs:paravirt_ops+PARAVIRT_irq_disable;		\
-+		  call *%ss:paravirt_ops+PARAVIRT_irq_disable;		\
- 		  popl %edx; popl %ecx; popl %eax)			\
- 
- #define ENABLE_INTERRUPTS(clobbers)					\
- 	PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers,		\
- 		  pushl %eax; pushl %ecx; pushl %edx;			\
--		  call *%cs:paravirt_ops+PARAVIRT_irq_enable;		\
-+		  call *%ss:paravirt_ops+PARAVIRT_irq_enable;		\
- 		  popl %edx; popl %ecx; popl %eax)
- 
- #define ENABLE_INTERRUPTS_SYSEXIT					\
- 	PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE,	\
--		  jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
-+		  jmp *%ss:paravirt_ops+PARAVIRT_irq_enable_sysexit)
- 
- #define GET_CR0_INTO_EAX			\
- 	push %ecx; push %edx;			\
---- a/include/asm-i386/percpu.h
-+++ b/include/asm-i386/percpu.h
-@@ -22,7 +22,7 @@
- #define PER_CPU_VAR(var)	%fs:per_cpu__##var
- #else /* ! SMP */
- #define PER_CPU(var, reg)			\
--	movl $per_cpu__##var, reg
-+	movl per_cpu__##var, reg
- #define PER_CPU_VAR(var)	per_cpu__##var
- #endif	/* SMP */
- 
-@@ -42,12 +42,12 @@
-  */
- #ifdef CONFIG_SMP
- /* Same as generic implementation except for optimized local access. */
--#define __GENERIC_PER_CPU
- 
- /* This is used for other cpus to find our section. */
- extern unsigned long __per_cpu_offset[];
-+extern void setup_per_cpu_areas(void);
- 
--#define per_cpu_offset(x) (__per_cpu_offset[x])
-+#define per_cpu_offset(x) (__per_cpu_offset[x] - (unsigned long)__per_cpu_start)
- 
- /* Separate out the type, so (int[3], foo) works. */
- #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
-@@ -64,11 +64,11 @@ DECLARE_PER_CPU(unsigned long, this_cpu_
- 
- /* var is in discarded region: offset to particular copy we want */
- #define per_cpu(var, cpu) (*({				\
--	extern int simple_indentifier_##var(void);	\
-+	extern int simple_identifier_##var(void);	\
- 	RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
- 
- #define __raw_get_cpu_var(var) (*({					\
--	extern int simple_indentifier_##var(void);			\
-+	extern int simple_identifier_##var(void);			\
- 	RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off));	\
- }))
- 
-@@ -79,7 +79,7 @@ DECLARE_PER_CPU(unsigned long, this_cpu_
- do {								\
- 	unsigned int __i;					\
- 	for_each_possible_cpu(__i)				\
--		memcpy((pcpudst)+__per_cpu_offset[__i],		\
-+		memcpy((pcpudst)+per_cpu_offset(__i),		\
- 		       (src), (size));				\
- } while (0)
- 
---- a/include/asm-i386/pgalloc.h
-+++ b/include/asm-i386/pgalloc.h
-@@ -15,11 +15,19 @@
- #define paravirt_release_pd(pfn) do { } while (0)
- #endif
- 
-+#ifdef CONFIG_COMPAT_VDSO
- #define pmd_populate_kernel(mm, pmd, pte)			\
- do {								\
- 	paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);		\
- 	set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)));		\
- } while (0)
-+#else
-+#define pmd_populate_kernel(mm, pmd, pte)			\
-+do {								\
-+	paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);		\
-+	set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));		\
-+} while (0)
-+#endif
- 
- #define pmd_populate(mm, pmd, pte) 				\
- do {								\
---- a/include/asm-i386/pgtable-2level.h
-+++ b/include/asm-i386/pgtable-2level.h
-@@ -22,7 +22,19 @@ static inline void native_set_pte_at(str
- }
- static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	*pmdp = pmd;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- #ifndef CONFIG_PARAVIRT
- #define set_pte(pteptr, pteval)		native_set_pte(pteptr, pteval)
---- a/include/asm-i386/pgtable-3level.h
-+++ b/include/asm-i386/pgtable-3level.h
-@@ -67,11 +67,35 @@ static inline void native_set_pte_atomic
- }
- static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd));
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- static inline void native_set_pud(pud_t *pudp, pud_t pud)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
- 	*pudp = pud;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- 
- /*
---- a/include/asm-i386/pgtable.h
-+++ b/include/asm-i386/pgtable.h
-@@ -34,7 +34,6 @@ struct vm_area_struct;
-  */
- #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
- extern unsigned long empty_zero_page[1024];
--extern pgd_t swapper_pg_dir[1024];
- extern struct kmem_cache *pmd_cache;
- extern spinlock_t pgd_lock;
- extern struct page *pgd_list;
-@@ -58,6 +57,11 @@ void paging_init(void);
- # include <asm/pgtable-2level-defs.h>
- #endif
- 
-+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-+#ifdef CONFIG_X86_PAE
-+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
-+#endif
-+
- #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
- #define PGDIR_MASK	(~(PGDIR_SIZE-1))
- 
-@@ -67,9 +71,11 @@ void paging_init(void);
- #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
- #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
- 
-+#ifndef CONFIG_X86_PAE
- #define TWOLEVEL_PGDIR_SHIFT	22
- #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
- #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
-+#endif
- 
- /* Just any arbitrary offset to the start of the vmalloc VM area: the
-  * current 8MB value just means that there will be a 8MB "hole" after the
-@@ -136,7 +142,7 @@ void paging_init(void);
- #define PAGE_NONE \
- 	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
- #define PAGE_SHARED \
--	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-+	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
- 
- #define PAGE_SHARED_EXEC \
- 	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
-@@ -202,7 +208,7 @@ extern unsigned long long __PAGE_KERNEL,
- #undef TEST_ACCESS_OK
- 
- /* The boot page tables (all created as a single array) */
--extern unsigned long pg0[];
-+extern pte_t pg0[];
- 
- #define pte_present(x)	((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
- 
-@@ -218,30 +224,55 @@ extern unsigned long pg0[];
-  * The following only work if pte_present() is true.
-  * Undefined behaviour if not..
-  */
-+static inline int pte_user(pte_t pte)		{ return (pte).pte_low & _PAGE_USER; }
- static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
- static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
- static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
- static inline int pte_huge(pte_t pte)		{ return (pte).pte_low & _PAGE_PSE; }
- 
-+#ifdef CONFIG_X86_PAE
-+# include <asm/pgtable-3level.h>
-+#else
-+# include <asm/pgtable-2level.h>
-+#endif
-+
- /*
-  * The following only works if pte_present() is not true.
-  */
- static inline int pte_file(pte_t pte)		{ return (pte).pte_low & _PAGE_FILE; }
- 
-+static inline pte_t pte_exprotect(pte_t pte)
-+{
-+#ifdef CONFIG_X86_PAE
-+	if (__supported_pte_mask & _PAGE_NX)
-+		set_pte(&pte, __pte(pte_val(pte) | _PAGE_NX));
-+	else
-+#endif
-+		set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER));
-+	return pte;
-+}
-+
- static inline pte_t pte_mkclean(pte_t pte)	{ (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
- static inline pte_t pte_mkold(pte_t pte)	{ (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
- static inline pte_t pte_wrprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_RW; return pte; }
-+static inline pte_t pte_mkread(pte_t pte)	{ (pte).pte_low |= _PAGE_USER; return pte; }
-+
-+static inline pte_t pte_mkexec(pte_t pte)
-+{
-+#ifdef CONFIG_X86_PAE
-+	if (__supported_pte_mask & _PAGE_NX)
-+		set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX));
-+	else
-+#endif
-+		set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER));
-+	return pte;
-+}
-+
- static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
- static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
- static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
- static inline pte_t pte_mkhuge(pte_t pte)	{ (pte).pte_low |= _PAGE_PSE; return pte; }
- 
--#ifdef CONFIG_X86_PAE
--# include <asm/pgtable-3level.h>
--#else
--# include <asm/pgtable-2level.h>
--#endif
--
- #ifndef CONFIG_PARAVIRT
- /*
-  * Rules for using pte_update - it must be called after any PTE update which
-@@ -353,7 +384,19 @@ static inline void ptep_set_wrprotect(st
-  */
- static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
- {
--       memcpy(dst, src, count * sizeof(pgd_t));
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	memcpy(dst, src, count * sizeof(pgd_t));
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
- }
- 
- /*
-@@ -500,6 +543,9 @@ static inline void paravirt_pagetable_se
- 
- #endif /* !__ASSEMBLY__ */
- 
-+#define HAVE_ARCH_UNMAPPED_AREA
-+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-+
- #ifdef CONFIG_FLATMEM
- #define kern_addr_valid(addr)	(1)
- #endif /* CONFIG_FLATMEM */
---- a/include/asm-i386/processor.h
-+++ b/include/asm-i386/processor.h
-@@ -99,8 +99,6 @@ struct cpuinfo_x86 {
- 
- extern struct cpuinfo_x86 boot_cpu_data;
- extern struct cpuinfo_x86 new_cpu_data;
--extern struct tss_struct doublefault_tss;
--DECLARE_PER_CPU(struct tss_struct, init_tss);
- 
- #ifdef CONFIG_SMP
- extern struct cpuinfo_x86 cpu_data[];
-@@ -209,11 +207,19 @@ extern int bootloader_type;
-  */
- #define TASK_SIZE	(PAGE_OFFSET)
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define SEGMEXEC_TASK_SIZE	(TASK_SIZE / 2)
-+#endif
-+
- /* This decides where the kernel will search for a free chunk of vm
-  * space during mmap's.
-  */
- #define TASK_UNMAPPED_BASE	(PAGE_ALIGN(TASK_SIZE / 3))
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+#define SEGMEXEC_TASK_UNMAPPED_BASE	(PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
-+#endif
-+
- #define HAVE_ARCH_PICK_MMAP_LAYOUT
- 
- extern void hard_disable_TSC(void);
-@@ -338,6 +344,9 @@ struct tss_struct {
- 
- #define ARCH_MIN_TASKALIGN	16
- 
-+extern struct tss_struct doublefault_tss;
-+extern struct tss_struct init_tss[NR_CPUS];
-+
- struct thread_struct {
- /* cached TLS descriptors. */
- 	struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
-@@ -366,7 +375,7 @@ struct thread_struct {
- };
- 
- #define INIT_THREAD  {							\
--	.esp0 = sizeof(init_stack) + (long)&init_stack,			\
-+	.esp0 = sizeof(init_stack) + (long)&init_stack - 8,		\
- 	.vm86_info = NULL,						\
- 	.sysenter_cs = __KERNEL_CS,					\
- 	.io_bitmap_ptr = NULL,						\
-@@ -381,7 +390,7 @@ struct thread_struct {
-  */
- #define INIT_TSS  {							\
- 	.x86_tss = {							\
--		.esp0		= sizeof(init_stack) + (long)&init_stack, \
-+		.esp0		= sizeof(init_stack) + (long)&init_stack - 8, \
- 		.ss0		= __KERNEL_DS,				\
- 		.ss1		= __KERNEL_CS,				\
- 		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,		\
-@@ -422,11 +431,7 @@ void show_trace(struct task_struct *task
- unsigned long get_wchan(struct task_struct *p);
- 
- #define THREAD_SIZE_LONGS      (THREAD_SIZE/sizeof(unsigned long))
--#define KSTK_TOP(info)                                                 \
--({                                                                     \
--       unsigned long *__ptr = (unsigned long *)(info);                 \
--       (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
--})
-+#define KSTK_TOP(info)         ((info)->task.thread.esp0)
- 
- /*
-  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-@@ -441,7 +446,7 @@ unsigned long get_wchan(struct task_stru
- #define task_pt_regs(task)                                             \
- ({                                                                     \
-        struct pt_regs *__regs__;                                       \
--       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
-+       __regs__ = (struct pt_regs *)((task)->thread.esp0);             \
-        __regs__ - 1;                                                   \
- })
- 
-@@ -603,8 +608,8 @@ static inline void cpuid(unsigned int op
- }
- 
- /* Some CPUID calls want 'count' to be placed in ecx */
--static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
--			       int *edx)
-+static inline void cpuid_count(unsigned int op, unsigned int count, unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
-+			       unsigned int *edx)
- {
- 	*eax = op;
- 	*ecx = count;
---- a/include/asm-i386/ptrace.h
-+++ b/include/asm-i386/ptrace.h
-@@ -35,17 +35,18 @@ struct task_struct;
- extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code);
- 
- /*
-- * user_mode_vm(regs) determines whether a register set came from user mode.
-+ * user_mode(regs) determines whether a register set came from user mode.
-  * This is true if V8086 mode was enabled OR if the register set was from
-  * protected mode with RPL-3 CS value.  This tricky test checks that with
-  * one comparison.  Many places in the kernel can bypass this full check
-- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
-+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
-+ * be used.
-  */
--static inline int user_mode(struct pt_regs *regs)
-+static inline int user_mode_novm(struct pt_regs *regs)
- {
- 	return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL;
- }
--static inline int user_mode_vm(struct pt_regs *regs)
-+static inline int user_mode(struct pt_regs *regs)
- {
- 	return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL;
- }
---- a/include/asm-i386/reboot.h
-+++ b/include/asm-i386/reboot.h
-@@ -15,6 +15,6 @@ struct machine_ops
- 
- extern struct machine_ops machine_ops;
- 
--void machine_real_restart(unsigned char *code, int length);
-+void machine_real_restart(const unsigned char *code, unsigned int length);
- 
- #endif	/* _ASM_REBOOT_H */
---- a/include/asm-i386/segment.h
-+++ b/include/asm-i386/segment.h
-@@ -81,6 +81,12 @@
- #define __KERNEL_PERCPU 0
- #endif
- 
-+#define GDT_ENTRY_PCIBIOS_CS			(GDT_ENTRY_KERNEL_BASE + 16)
-+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
-+
-+#define GDT_ENTRY_PCIBIOS_DS			(GDT_ENTRY_KERNEL_BASE + 17)
-+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
-+
- #define GDT_ENTRY_DOUBLEFAULT_TSS	31
- 
- /*
-@@ -140,9 +146,9 @@
- #define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8)
- 
- /* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */
--#define SEGMENT_IS_FLAT_CODE(x)  (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8)
-+#define SEGMENT_IS_FLAT_CODE(x)  (((x) & 0xFFFCU) == __KERNEL_CS || ((x) & 0xFFFCU) == __USER_CS)
- 
- /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
--#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
-+#define SEGMENT_IS_PNP_CODE(x)   (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
- 
- #endif
---- a/include/asm-i386/system.h
-+++ b/include/asm-i386/system.h
-@@ -183,6 +183,21 @@ static inline void native_wbinvd(void)
- /* Set the 'TS' bit */
- #define stts() write_cr0(8 | read_cr0())
- 
-+#define pax_open_kernel(cr0)		\
-+do {					\
-+	typecheck(unsigned long, cr0);	\
-+	preempt_disable();		\
-+	cr0 = read_cr0();		\
-+	write_cr0(cr0 & ~X86_CR0_WP);	\
-+} while (0)
-+
-+#define pax_close_kernel(cr0)		\
-+do {					\
-+	typecheck(unsigned long, cr0);	\
-+	write_cr0(cr0);			\
-+	preempt_enable_no_resched();	\
-+} while (0)
-+
- #endif	/* __KERNEL__ */
- 
- static inline unsigned long get_limit(unsigned long segment)
-@@ -190,7 +205,7 @@ static inline unsigned long get_limit(un
- 	unsigned long __limit;
- 	__asm__("lsll %1,%0"
- 		:"=r" (__limit):"r" (segment));
--	return __limit+1;
-+	return __limit;
- }
- 
- #define nop() __asm__ __volatile__ ("nop")
-@@ -305,7 +320,7 @@ void enable_hlt(void);
- extern int es7000_plat;
- void cpu_idle_wait(void);
- 
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) (x)
- extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
- 
- void default_idle(void);
---- a/include/asm-i386/uaccess.h
-+++ b/include/asm-i386/uaccess.h
-@@ -9,6 +9,7 @@
- #include <linux/prefetch.h>
- #include <linux/string.h>
- #include <asm/page.h>
-+#include <asm/segment.h>
- 
- #define VERIFY_READ 0
- #define VERIFY_WRITE 1
-@@ -29,7 +30,8 @@
- 
- #define get_ds()	(KERNEL_DS)
- #define get_fs()	(current_thread_info()->addr_limit)
--#define set_fs(x)	(current_thread_info()->addr_limit = (x))
-+void __set_fs(mm_segment_t x, int cpu);
-+void set_fs(mm_segment_t x);
- 
- #define segment_eq(a,b)	((a).seg == (b).seg)
- 
-@@ -101,6 +103,7 @@ struct exception_table_entry
- };
- 
- extern int fixup_exception(struct pt_regs *regs);
-+#define ARCH_HAS_SORT_EXTABLE
- 
- /*
-  * These are the main single-value transfer routines.  They automatically
-@@ -280,9 +283,12 @@ extern void __put_user_8(void);
- 
- #define __put_user_u64(x, addr, err)				\
- 	__asm__ __volatile__(					\
--		"1:	movl %%eax,0(%2)\n"			\
--		"2:	movl %%edx,4(%2)\n"			\
-+		"	movw %w5,%%ds\n"			\
-+		"1:	movl %%eax,%%ds:0(%2)\n"		\
-+		"2:	movl %%edx,%%ds:4(%2)\n"		\
- 		"3:\n"						\
-+		"	pushl %%ss\n"				\
-+		"	popl %%ds\n"				\
- 		".section .fixup,\"ax\"\n"			\
- 		"4:	movl %3,%0\n"				\
- 		"	jmp 3b\n"				\
-@@ -293,7 +299,8 @@ extern void __put_user_8(void);
- 		"	.long 2b,4b\n"				\
- 		".previous"					\
- 		: "=r"(err)					\
--		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
-+		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err),	\
-+		  "r"(__USER_DS))
- 
- #ifdef CONFIG_X86_WP_WORKS_OK
- 
-@@ -332,8 +339,11 @@ struct __large_struct { unsigned long bu
-  */
- #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
- 	__asm__ __volatile__(						\
--		"1:	mov"itype" %"rtype"1,%2\n"			\
-+		"	movw %w5,%%ds\n"				\
-+		"1:	mov"itype" %"rtype"1,%%ds:%2\n"			\
- 		"2:\n"							\
-+		"	pushl %%ss\n"					\
-+		"	popl %%ds\n"					\
- 		".section .fixup,\"ax\"\n"				\
- 		"3:	movl %3,%0\n"					\
- 		"	jmp 2b\n"					\
-@@ -343,7 +353,8 @@ struct __large_struct { unsigned long bu
- 		"	.long 1b,3b\n"					\
- 		".previous"						\
- 		: "=r"(err)						\
--		: ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
-+		: ltype (x), "m"(__m(addr)), "i"(errret), "0"(err),	\
-+		  "r"(__USER_DS))
- 
- 
- #define __get_user_nocheck(x,ptr,size)				\
-@@ -371,8 +382,11 @@ do {									\
- 
- #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
- 	__asm__ __volatile__(						\
--		"1:	mov"itype" %2,%"rtype"1\n"			\
-+		"	movw %w5,%%ds\n"				\
-+		"1:	mov"itype" %%ds:%2,%"rtype"1\n"			\
- 		"2:\n"							\
-+		"	pushl %%ss\n"					\
-+		"	popl %%ds\n"					\
- 		".section .fixup,\"ax\"\n"				\
- 		"3:	movl %3,%0\n"					\
- 		"	xor"itype" %"rtype"1,%"rtype"1\n"		\
-@@ -383,7 +397,7 @@ do {									\
- 		"	.long 1b,3b\n"					\
- 		".previous"						\
- 		: "=r"(err), ltype (x)					\
--		: "m"(__m(addr)), "i"(errret), "0"(err))
-+		: "m"(__m(addr)), "i"(errret), "0"(err), "r"(__USER_DS))
- 
- 
- unsigned long __must_check __copy_to_user_ll(void __user *to,
---- a/include/asm-ia64/elf.h
-+++ b/include/asm-ia64/elf.h
-@@ -162,7 +162,12 @@ typedef elf_greg_t elf_gregset_t[ELF_NGR
- typedef struct ia64_fpreg elf_fpreg_t;
- typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	(current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
- 
-+#define PAX_DELTA_MMAP_LEN	(current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#define PAX_DELTA_STACK_LEN	(current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
-+#endif
- 
- struct pt_regs;	/* forward declaration... */
- extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
---- a/include/asm-ia64/kmap_types.h
-+++ b/include/asm-ia64/kmap_types.h
-@@ -22,7 +22,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-ia64/pgtable.h
-+++ b/include/asm-ia64/pgtable.h
-@@ -143,6 +143,17 @@
- #define PAGE_READONLY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
- #define PAGE_COPY	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
- #define PAGE_COPY_EXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
-+# define PAGE_READONLY_NOEXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-+# define PAGE_COPY_NOEXEC	__pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
-+#else
-+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
-+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
-+# define PAGE_COPY_NOEXEC	PAGE_COPY
-+#endif
-+
- #define PAGE_GATE	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
- #define PAGE_KERNEL	__pgprot(__DIRTY_BITS  | _PAGE_PL_0 | _PAGE_AR_RWX)
- #define PAGE_KERNELRX	__pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
---- a/include/asm-ia64/processor.h
-+++ b/include/asm-ia64/processor.h
-@@ -275,7 +275,7 @@ struct thread_struct {
- 	.on_ustack =	0,					\
- 	.ksp =		0,					\
- 	.map_base =	DEFAULT_MAP_BASE,			\
--	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
-+	.rbs_bot =	__STACK_TOP - DEFAULT_USER_STACK_SIZE,	\
- 	.task_size =	DEFAULT_TASK_SIZE,			\
- 	.last_fph_cpu =  -1,					\
- 	INIT_THREAD_IA32					\
---- a/include/asm-ia64/ustack.h
-+++ b/include/asm-ia64/ustack.h
-@@ -10,8 +10,8 @@
- 
- /* The absolute hard limit for stack size is 1/2 of the mappable space in the region */
- #define MAX_USER_STACK_SIZE	(RGN_MAP_LIMIT/2)
--#define STACK_TOP		(0x6000000000000000UL + RGN_MAP_LIMIT)
--#define STACK_TOP_MAX		STACK_TOP
-+#define __STACK_TOP		(0x6000000000000000UL + RGN_MAP_LIMIT)
-+#define STACK_TOP_MAX		__STACK_TOP
- #endif
- 
- /* Make a default stack size of 2GiB */
---- a/include/asm-m32r/kmap_types.h
-+++ b/include/asm-m32r/kmap_types.h
-@@ -21,7 +21,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-m68k/kmap_types.h
-+++ b/include/asm-m68k/kmap_types.h
-@@ -15,6 +15,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-m68knommu/kmap_types.h
-+++ b/include/asm-m68knommu/kmap_types.h
-@@ -15,6 +15,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-mips/a.out.h
-+++ b/include/asm-mips/a.out.h
-@@ -35,10 +35,10 @@ struct exec
- #ifdef __KERNEL__
- 
- #ifdef CONFIG_32BIT
--#define STACK_TOP	TASK_SIZE
-+#define __STACK_TOP	TASK_SIZE
- #endif
- #ifdef CONFIG_64BIT
--#define STACK_TOP	\
-+#define __STACK_TOP	\
-       (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE)
- #endif
- #define STACK_TOP_MAX	TASK_SIZE
---- a/include/asm-mips/elf.h
-+++ b/include/asm-mips/elf.h
-@@ -372,4 +372,11 @@ extern int dump_task_fpu(struct task_str
- #define ELF_ET_DYN_BASE         (TASK_SIZE / 3 * 2)
- #endif
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	((current->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#define PAX_DELTA_STACK_LEN	((current->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
-+#endif
-+
- #endif /* _ASM_ELF_H */
---- a/include/asm-mips/kmap_types.h
-+++ b/include/asm-mips/kmap_types.h
-@@ -22,7 +22,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-mips/page.h
-+++ b/include/asm-mips/page.h
-@@ -82,7 +82,7 @@ extern void copy_user_highpage(struct pa
-   #ifdef CONFIG_CPU_MIPS32
-     typedef struct { unsigned long pte_low, pte_high; } pte_t;
-     #define pte_val(x)    ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
--    #define __pte(x)      ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
-+    #define __pte(x)      ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
-   #else
-      typedef struct { unsigned long long pte; } pte_t;
-      #define pte_val(x)	((x).pte)
---- a/include/asm-mips/system.h
-+++ b/include/asm-mips/system.h
-@@ -213,6 +213,6 @@ extern int stop_a_enabled;
-  */
- #define __ARCH_WANT_UNLOCKED_CTXSW
- 
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) (x)
- 
- #endif /* _ASM_SYSTEM_H */
---- a/include/asm-parisc/a.out.h
-+++ b/include/asm-parisc/a.out.h
-@@ -22,7 +22,7 @@ struct exec
- /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
-  * prumpf */
- 
--#define STACK_TOP	TASK_SIZE
-+#define __STACK_TOP	TASK_SIZE
- #define STACK_TOP_MAX	DEFAULT_TASK_SIZE
- 
- #endif
---- a/include/asm-parisc/elf.h
-+++ b/include/asm-parisc/elf.h
-@@ -337,6 +337,13 @@ struct pt_regs;	/* forward declaration..
- 
- #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE + 0x01000000)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	0x10000UL
-+
-+#define PAX_DELTA_MMAP_LEN	16
-+#define PAX_DELTA_STACK_LEN	16
-+#endif
-+
- /* This yields a mask that user programs can use to figure out what
-    instruction set this CPU supports.  This could be done in user space,
-    but it's not easy, and we've already done it here.  */
---- a/include/asm-parisc/kmap_types.h
-+++ b/include/asm-parisc/kmap_types.h
-@@ -22,7 +22,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-parisc/pgtable.h
-+++ b/include/asm-parisc/pgtable.h
-@@ -218,6 +218,17 @@ extern  void *vmalloc_start;
- #define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
- #define PAGE_COPY       PAGE_EXECREAD
- #define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
-+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-+#else
-+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
-+# define PAGE_COPY_NOEXEC	PAGE_COPY
-+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
-+#endif
-+
- #define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
- #define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
- #define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
---- a/include/asm-powerpc/a.out.h
-+++ b/include/asm-powerpc/a.out.h
-@@ -23,15 +23,15 @@ struct exec
- #define STACK_TOP_USER64 TASK_SIZE_USER64
- #define STACK_TOP_USER32 TASK_SIZE_USER32
- 
--#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
-+#define __STACK_TOP (test_thread_flag(TIF_32BIT) ? \
- 		   STACK_TOP_USER32 : STACK_TOP_USER64)
- 
- #define STACK_TOP_MAX STACK_TOP_USER64
- 
- #else /* __powerpc64__ */
- 
--#define STACK_TOP TASK_SIZE
--#define STACK_TOP_MAX	STACK_TOP
-+#define __STACK_TOP TASK_SIZE
-+#define STACK_TOP_MAX	__STACK_TOP
- 
- #endif /* __powerpc64__ */
- #endif /* __KERNEL__ */
---- a/include/asm-powerpc/elf.h
-+++ b/include/asm-powerpc/elf.h
-@@ -159,6 +159,18 @@ typedef elf_vrreg_t elf_vrregset_t[ELF_N
- typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
- #endif
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	(0x10000000UL)
-+
-+#ifdef __powerpc64__
-+#define PAX_DELTA_MMAP_LEN	(test_thread_flag(TIF_32BIT) ? 16 : 28)
-+#define PAX_DELTA_STACK_LEN	(test_thread_flag(TIF_32BIT) ? 16 : 28)
-+#else
-+#define PAX_DELTA_MMAP_LEN	15
-+#define PAX_DELTA_STACK_LEN	15
-+#endif
-+#endif
-+
- #ifdef __KERNEL__
- /*
-  * This is used to ensure we don't load something for the wrong architecture.
---- a/include/asm-powerpc/kmap_types.h
-+++ b/include/asm-powerpc/kmap_types.h
-@@ -26,6 +26,7 @@ enum km_type {
- 	KM_SOFTIRQ1,
- 	KM_PPC_SYNC_PAGE,
- 	KM_PPC_SYNC_ICACHE,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-powerpc/page_64.h
-+++ b/include/asm-powerpc/page_64.h
-@@ -158,15 +158,18 @@ extern int is_hugepage_only_range(struct
-  * stack by default, so in the absense of a PT_GNU_STACK program header
-  * we turn execute permission off.
-  */
--#define VM_STACK_DEFAULT_FLAGS32	(VM_READ | VM_WRITE | VM_EXEC | \
--					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_STACK_DEFAULT_FLAGS32 \
-+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
- 
- #define VM_STACK_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
- 					 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
- 
-+#ifndef CONFIG_PAX_PAGEEXEC
- #define VM_STACK_DEFAULT_FLAGS \
- 	(test_thread_flag(TIF_32BIT) ? \
- 	 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
-+#endif
- 
- #include <asm-generic/page.h>
- 
---- a/include/asm-powerpc/page.h
-+++ b/include/asm-powerpc/page.h
-@@ -71,8 +71,9 @@
-  * and needs to be executable.  This means the whole heap ends
-  * up being executable.
-  */
--#define VM_DATA_DEFAULT_FLAGS32	(VM_READ | VM_WRITE | VM_EXEC | \
--				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-+#define VM_DATA_DEFAULT_FLAGS32 \
-+	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-+	 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
- 
- #define VM_DATA_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
- 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
---- a/include/asm-ppc/mmu_context.h
-+++ b/include/asm-ppc/mmu_context.h
-@@ -145,7 +145,8 @@ static inline void get_mmu_context(struc
- static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
- {
- 	mm->context.id = NO_CONTEXT;
--	mm->context.vdso_base = 0;
-+	if (t == current)
-+		mm->context.vdso_base = ~0UL;
- 	return 0;
- }
- 
---- a/include/asm-ppc/pgtable.h
-+++ b/include/asm-ppc/pgtable.h
-@@ -440,11 +440,21 @@ extern unsigned long ioremap_bot, iorema
- 
- #define PAGE_NONE	__pgprot(_PAGE_BASE)
- #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
--#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-+#define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
- #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
--#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
-+#define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC)
- #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
--#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-+#define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x)
-+# define PAGE_SHARED_NOEXEC	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED)
-+# define PAGE_COPY_NOEXEC	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
-+# define PAGE_READONLY_NOEXEC	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
-+#else
-+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
-+# define PAGE_COPY_NOEXEC	PAGE_COPY
-+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
-+#endif
- 
- #define PAGE_KERNEL		__pgprot(_PAGE_RAM)
- #define PAGE_KERNEL_NOCACHE	__pgprot(_PAGE_IO)
-@@ -456,21 +466,21 @@ extern unsigned long ioremap_bot, iorema
-  * This is the closest we can get..
-  */
- #define __P000	PAGE_NONE
--#define __P001	PAGE_READONLY_X
--#define __P010	PAGE_COPY
--#define __P011	PAGE_COPY_X
--#define __P100	PAGE_READONLY
-+#define __P001	PAGE_READONLY_NOEXEC
-+#define __P010	PAGE_COPY_NOEXEC
-+#define __P011	PAGE_COPY_NOEXEC
-+#define __P100	PAGE_READONLY_X
- #define __P101	PAGE_READONLY_X
--#define __P110	PAGE_COPY
-+#define __P110	PAGE_COPY_X
- #define __P111	PAGE_COPY_X
- 
- #define __S000	PAGE_NONE
--#define __S001	PAGE_READONLY_X
--#define __S010	PAGE_SHARED
--#define __S011	PAGE_SHARED_X
--#define __S100	PAGE_READONLY
-+#define __S001	PAGE_READONLY_NOEXEC
-+#define __S010	PAGE_SHARED_NOEXEC
-+#define __S011	PAGE_SHARED_NOEXEC
-+#define __S100	PAGE_READONLY_X
- #define __S101	PAGE_READONLY_X
--#define __S110	PAGE_SHARED
-+#define __S110	PAGE_SHARED_X
- #define __S111	PAGE_SHARED_X
- 
- #ifndef __ASSEMBLY__
---- a/include/asm-s390/kmap_types.h
-+++ b/include/asm-s390/kmap_types.h
-@@ -16,6 +16,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,	
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-sh/kmap_types.h
-+++ b/include/asm-sh/kmap_types.h
-@@ -24,7 +24,8 @@ D(9)	KM_IRQ0,
- D(10)	KM_IRQ1,
- D(11)	KM_SOFTIRQ0,
- D(12)	KM_SOFTIRQ1,
--D(13)	KM_TYPE_NR
-+D(13)	KM_CLEARPAGE,
-+D(14)	KM_TYPE_NR
- };
- 
- #undef D
---- a/include/asm-sparc64/a.out.h
-+++ b/include/asm-sparc64/a.out.h
-@@ -98,7 +98,7 @@ struct relocation_info /* used when head
- #define STACK_TOP32	((1UL << 32UL) - PAGE_SIZE)
- #define STACK_TOP64	(0x0000080000000000UL - (1UL << 32UL))
- 
--#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
-+#define __STACK_TOP (test_thread_flag(TIF_32BIT) ? \
- 		   STACK_TOP32 : STACK_TOP64)
- 
- #define STACK_TOP_MAX STACK_TOP64
---- a/include/asm-sparc64/elf.h
-+++ b/include/asm-sparc64/elf.h
-@@ -143,6 +143,12 @@ typedef struct {
- #define ELF_ET_DYN_BASE         0x0000010000000000UL
- #endif
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	(test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	(test_thread_flag(TIF_32BIT) ? 14 : 28 )
-+#define PAX_DELTA_STACK_LEN	(test_thread_flag(TIF_32BIT) ? 15 : 29 )
-+#endif
- 
- /* This yields a mask that user programs can use to figure out what
-    instruction set this cpu supports.  */
---- a/include/asm-sparc64/kmap_types.h
-+++ b/include/asm-sparc64/kmap_types.h
-@@ -19,6 +19,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-sparc/a.out.h
-+++ b/include/asm-sparc/a.out.h
-@@ -91,8 +91,8 @@ struct relocation_info /* used when head
- 
- #include <asm/page.h>
- 
--#define STACK_TOP	(PAGE_OFFSET - PAGE_SIZE)
--#define STACK_TOP_MAX	STACK_TOP
-+#define __STACK_TOP	(PAGE_OFFSET - PAGE_SIZE)
-+#define STACK_TOP_MAX	__STACK_TOP
- 
- #endif /* __KERNEL__ */
- 
---- a/include/asm-sparc/elf.h
-+++ b/include/asm-sparc/elf.h
-@@ -143,6 +143,13 @@ do {	unsigned long *dest = &(__elf_regs[
- 
- #define ELF_ET_DYN_BASE         (TASK_UNMAPPED_BASE)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	0x10000UL
-+
-+#define PAX_DELTA_MMAP_LEN	16
-+#define PAX_DELTA_STACK_LEN	16
-+#endif
-+
- /* This yields a mask that user programs can use to figure out what
-    instruction set this cpu supports.  This can NOT be done in userspace
-    on Sparc.  */
---- a/include/asm-sparc/kmap_types.h
-+++ b/include/asm-sparc/kmap_types.h
-@@ -15,6 +15,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-sparc/pgtable.h
-+++ b/include/asm-sparc/pgtable.h
-@@ -69,6 +69,16 @@ extern pgprot_t PAGE_SHARED;
- #define PAGE_COPY      __pgprot(BTFIXUP_INT(page_copy))
- #define PAGE_READONLY  __pgprot(BTFIXUP_INT(page_readonly))
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+extern pgprot_t PAGE_SHARED_NOEXEC;
-+# define PAGE_COPY_NOEXEC	__pgprot(BTFIXUP_INT(page_copy_noexec))
-+# define PAGE_READONLY_NOEXEC	__pgprot(BTFIXUP_INT(page_readonly_noexec))
-+#else
-+# define PAGE_SHARED_NOEXEC	PAGE_SHARED
-+# define PAGE_COPY_NOEXEC	PAGE_COPY
-+# define PAGE_READONLY_NOEXEC	PAGE_READONLY
-+#endif
-+
- extern unsigned long page_kernel;
- 
- #ifdef MODULE
---- a/include/asm-sparc/pgtsrmmu.h
-+++ b/include/asm-sparc/pgtsrmmu.h
-@@ -115,6 +115,16 @@
- 				    SRMMU_EXEC | SRMMU_REF)
- #define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
- 				    SRMMU_EXEC | SRMMU_REF)
-+
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#define SRMMU_PAGE_SHARED_NOEXEC  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
-+					   SRMMU_WRITE | SRMMU_REF)
-+#define SRMMU_PAGE_COPY_NOEXEC    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
-+					   SRMMU_REF)
-+#define SRMMU_PAGE_RDONLY_NOEXEC  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
-+					   SRMMU_REF)
-+#endif
-+
- #define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
- 				    SRMMU_DIRTY | SRMMU_REF)
- 
---- a/include/asm-sparc/uaccess.h
-+++ b/include/asm-sparc/uaccess.h
-@@ -41,7 +41,7 @@
-  * No one can read/write anything from userland in the kernel space by setting
-  * large size and address near to PAGE_OFFSET - a fault will break his intentions.
-  */
--#define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
-+#define __user_ok(addr, size) ({ (void)(size); (addr) < __STACK_TOP; })
- #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
- #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
- #define access_ok(type, addr, size)					\
---- a/include/asm-um/kmap_types.h
-+++ b/include/asm-um/kmap_types.h
-@@ -23,6 +23,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-v850/kmap_types.h
-+++ b/include/asm-v850/kmap_types.h
-@@ -13,6 +13,7 @@ enum km_type {
- 	KM_PTE1,
- 	KM_IRQ0,
- 	KM_IRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-x86_64/a.out.h
-+++ b/include/asm-x86_64/a.out.h
-@@ -21,7 +21,7 @@ struct exec
- 
- #ifdef __KERNEL__
- #include <linux/thread_info.h>
--#define STACK_TOP	TASK_SIZE
-+#define __STACK_TOP	TASK_SIZE
- #define STACK_TOP_MAX	TASK_SIZE64
- #endif
- 
---- a/include/asm-x86_64/apic.h
-+++ b/include/asm-x86_64/apic.h
-@@ -7,7 +7,7 @@
- #include <asm/apicdef.h>
- #include <asm/system.h>
- 
--#define Dprintk(x...)
-+#define Dprintk(x...) do {} while (0)
- 
- /*
-  * Debugging macros
---- a/include/asm-x86_64/elf.h
-+++ b/include/asm-x86_64/elf.h
-@@ -92,6 +92,13 @@ typedef struct user_i387_struct elf_fpre
- 
- #define ELF_ET_DYN_BASE         (2 * TASK_SIZE / 3)
- 
-+#ifdef CONFIG_PAX_ASLR
-+#define PAX_ELF_ET_DYN_BASE	(test_thread_flag(TIF_IA32) ? 0x08048000UL : 0x400000UL)
-+
-+#define PAX_DELTA_MMAP_LEN	(test_thread_flag(TIF_IA32) ? 16 : 32)
-+#define PAX_DELTA_STACK_LEN	(test_thread_flag(TIF_IA32) ? 16 : 32)
-+#endif
-+
- /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
-    now struct_user_regs, they are different). Assumes current is the process
-    getting dumped. */
-@@ -172,7 +179,7 @@ extern int vdso_enabled;
- 
- #define ARCH_DLINFO						\
- do if (vdso_enabled) {						\
--	NEW_AUX_ENT(AT_SYSINFO_EHDR,(unsigned long)current->mm->context.vdso);\
-+	NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
- } while (0)
- 
- #endif
---- a/include/asm-x86_64/futex.h
-+++ b/include/asm-x86_64/futex.h
-@@ -42,7 +42,7 @@
- 	: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
- 
- static inline int
--futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
-+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- {
- 	int op = (encoded_op >> 28) & 7;
- 	int cmp = (encoded_op >> 24) & 15;
-@@ -95,7 +95,7 @@ futex_atomic_op_inuser (int encoded_op, 
- }
- 
- static inline int
--futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
-+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
- {
- 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- 		return -EFAULT;
---- a/include/asm-x86_64/ia32.h
-+++ b/include/asm-x86_64/ia32.h
-@@ -156,7 +156,13 @@ struct ustat32 {
- 	char			f_fpack[6];
- };
- 
--#define IA32_STACK_TOP IA32_PAGE_OFFSET
-+#ifdef CONFIG_PAX_RANDUSTACK
-+#define IA32_DELTA_STACK (current->mm->delta_stack)
-+#else
-+#define IA32_DELTA_STACK 0UL
-+#endif
-+
-+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - IA32_DELTA_STACK)
- 
- #ifdef __KERNEL__
- struct user_desc;
---- a/include/asm-x86_64/kmap_types.h
-+++ b/include/asm-x86_64/kmap_types.h
-@@ -13,6 +13,7 @@ enum km_type {
- 	KM_IRQ1,
- 	KM_SOFTIRQ0,
- 	KM_SOFTIRQ1,
-+	KM_CLEARPAGE,
- 	KM_TYPE_NR
- };
- 
---- a/include/asm-x86_64/mmu.h
-+++ b/include/asm-x86_64/mmu.h
-@@ -15,7 +15,7 @@ typedef struct { 
- 	rwlock_t ldtlock; 
- 	int size;
- 	struct semaphore sem; 
--	void *vdso;
-+	unsigned long vdso;
- } mm_context_t;
- 
- #endif
---- a/include/asm-x86_64/page.h
-+++ b/include/asm-x86_64/page.h
-@@ -94,6 +94,8 @@ extern unsigned long phys_base;
- #define __START_KERNEL_map	_AC(0xffffffff80000000, UL)
- #define __PAGE_OFFSET           _AC(0xffff810000000000, UL)
- 
-+#define __KERNEL_TEXT_OFFSET	(0)
-+
- /* to align the pointer to the (next) page boundary */
- #define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
- 
---- a/include/asm-x86_64/pgalloc.h
-+++ b/include/asm-x86_64/pgalloc.h
-@@ -6,7 +6,7 @@
- #include <linux/mm.h>
- 
- #define pmd_populate_kernel(mm, pmd, pte) \
--		set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
-+		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(pte)))
- #define pud_populate(mm, pud, pmd) \
- 		set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
- #define pgd_populate(mm, pgd, pud) \
---- a/include/asm-x86_64/pgtable.h
-+++ b/include/asm-x86_64/pgtable.h
-@@ -179,6 +179,10 @@ static inline pte_t ptep_get_and_clear_f
- #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
- #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
- #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-+
-+#define PAGE_READONLY_NOEXEC PAGE_READONLY
-+#define PAGE_SHARED_NOEXEC PAGE_SHARED
-+
- #define __PAGE_KERNEL \
- 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
- #define __PAGE_KERNEL_EXEC \
---- a/include/asm-x86_64/processor.h
-+++ b/include/asm-x86_64/processor.h
-@@ -140,7 +140,7 @@ static inline void clear_in_cr4 (unsigne
- /* This decides where the kernel will search for a free chunk of vm
-  * space during mmap's.
-  */
--#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
-+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFf000)
- 
- #define TASK_SIZE 		(test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
- #define TASK_SIZE_OF(child) 	((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
---- a/include/asm-x86_64/system.h
-+++ b/include/asm-x86_64/system.h
-@@ -174,7 +174,7 @@ static inline void write_cr8(unsigned lo
- 
- void cpu_idle_wait(void);
- 
--extern unsigned long arch_align_stack(unsigned long sp);
-+#define arch_align_stack(x) (x)
- extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
- 
- #endif
---- a/include/asm-xtensa/kmap_types.h
-+++ b/include/asm-xtensa/kmap_types.h
-@@ -25,6 +25,7 @@ enum km_type {
-   KM_IRQ1,
-   KM_SOFTIRQ0,
-   KM_SOFTIRQ1,
-+  KM_CLEARPAGE,
-   KM_TYPE_NR
- };
- 
---- a/include/linux/a.out.h
-+++ b/include/linux/a.out.h
-@@ -7,6 +7,16 @@
- 
- #include <asm/a.out.h>
- 
-+#ifdef CONFIG_PAX_RANDUSTACK
-+#define __DELTA_STACK (current->mm->delta_stack)
-+#else
-+#define __DELTA_STACK 0UL
-+#endif
-+
-+#ifndef STACK_TOP
-+#define STACK_TOP	(__STACK_TOP - __DELTA_STACK)
-+#endif
-+
- #endif /* __STRUCT_EXEC_OVERRIDE__ */
- 
- /* these go in the N_MACHTYPE field */
-@@ -37,6 +47,14 @@ enum machine_type {
-   M_MIPS2 = 152		/* MIPS R6000/R4000 binary */
- };
- 
-+/* Constants for the N_FLAGS field */
-+#define F_PAX_PAGEEXEC	1	/* Paging based non-executable pages */
-+#define F_PAX_EMUTRAMP	2	/* Emulate trampolines */
-+#define F_PAX_MPROTECT	4	/* Restrict mprotect() */
-+#define F_PAX_RANDMMAP	8	/* Randomize mmap() base */
-+/*#define F_PAX_RANDEXEC	16*/	/* Randomize ET_EXEC base */
-+#define F_PAX_SEGMEXEC	32	/* Segmentation based non-executable pages */
-+
- #if !defined (N_MAGIC)
- #define N_MAGIC(exec) ((exec).a_info & 0xffff)
- #endif
---- a/include/linux/binfmts.h
-+++ b/include/linux/binfmts.h
-@@ -48,6 +48,7 @@ struct linux_binprm{
- 	unsigned interp_data;
- 	unsigned long loader, exec;
- 	unsigned long argv_len;
-+	int misc;
- };
- 
- #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
-@@ -99,5 +100,8 @@ extern void compute_creds(struct linux_b
- extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
- extern int set_binfmt(struct linux_binfmt *new);
- 
-+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
-+void pax_report_insns(void *pc, void *sp);
-+
- #endif /* __KERNEL__ */
- #endif /* _LINUX_BINFMTS_H */
---- a/include/linux/cache.h
-+++ b/include/linux/cache.h
-@@ -16,6 +16,10 @@
- #define __read_mostly
- #endif
- 
-+#ifndef __read_only
-+#define __read_only
-+#endif
-+
- #ifndef ____cacheline_aligned
- #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
- #endif
---- a/include/linux/capability.h
-+++ b/include/linux/capability.h
-@@ -359,6 +359,7 @@ static inline kernel_cap_t cap_invert(ke
- #define cap_is_fs_cap(c)     (CAP_TO_MASK(c) & CAP_FS_MASK)
- 
- int capable(int cap);
-+int capable_nolog(int cap);
- int __capable(struct task_struct *t, int cap);
- 
- #endif /* __KERNEL__ */
---- a/include/linux/elf.h
-+++ b/include/linux/elf.h
-@@ -8,6 +8,10 @@
- 
- struct file;
- 
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+#undef elf_read_implies_exec
-+#endif
-+
- #ifndef elf_read_implies_exec
-   /* Executables for which elf_read_implies_exec() returns TRUE will
-      have the READ_IMPLIES_EXEC personality flag set automatically.
-@@ -49,6 +53,16 @@ typedef __s64	Elf64_Sxword;
- 
- #define PT_GNU_STACK	(PT_LOOS + 0x474e551)
- 
-+#define PT_PAX_FLAGS	(PT_LOOS + 0x5041580)
-+
-+/* Constants for the e_flags field */
-+#define EF_PAX_PAGEEXEC		1	/* Paging based non-executable pages */
-+#define EF_PAX_EMUTRAMP		2	/* Emulate trampolines */
-+#define EF_PAX_MPROTECT		4	/* Restrict mprotect() */
-+#define EF_PAX_RANDMMAP		8	/* Randomize mmap() base */
-+/*#define EF_PAX_RANDEXEC		16*/	/* Randomize ET_EXEC base */
-+#define EF_PAX_SEGMEXEC		32	/* Segmentation based non-executable pages */
-+
- /* These constants define the different elf file types */
- #define ET_NONE   0
- #define ET_REL    1
-@@ -83,6 +97,8 @@ typedef __s64	Elf64_Sxword;
- #define DT_DEBUG	21
- #define DT_TEXTREL	22
- #define DT_JMPREL	23
-+#define DT_FLAGS	30
-+  #define DF_TEXTREL  0x00000004
- #define DT_ENCODING	32
- #define OLD_DT_LOOS	0x60000000
- #define DT_LOOS		0x6000000d
-@@ -229,6 +245,19 @@ typedef struct elf64_hdr {
- #define PF_W		0x2
- #define PF_X		0x1
- 
-+#define PF_PAGEEXEC	(1U << 4)	/* Enable  PAGEEXEC */
-+#define PF_NOPAGEEXEC	(1U << 5)	/* Disable PAGEEXEC */
-+#define PF_SEGMEXEC	(1U << 6)	/* Enable  SEGMEXEC */
-+#define PF_NOSEGMEXEC	(1U << 7)	/* Disable SEGMEXEC */
-+#define PF_MPROTECT	(1U << 8)	/* Enable  MPROTECT */
-+#define PF_NOMPROTECT	(1U << 9)	/* Disable MPROTECT */
-+/*#define PF_RANDEXEC	(1U << 10)*/	/* Enable  RANDEXEC */
-+/*#define PF_NORANDEXEC	(1U << 11)*/	/* Disable RANDEXEC */
-+#define PF_EMUTRAMP	(1U << 12)	/* Enable  EMUTRAMP */
-+#define PF_NOEMUTRAMP	(1U << 13)	/* Disable EMUTRAMP */
-+#define PF_RANDMMAP	(1U << 14)	/* Enable  RANDMMAP */
-+#define PF_NORANDMMAP	(1U << 15)	/* Disable RANDMMAP */
-+
- typedef struct elf32_phdr{
-   Elf32_Word	p_type;
-   Elf32_Off	p_offset;
-@@ -321,6 +350,8 @@ typedef struct elf64_shdr {
- #define	EI_OSABI	7
- #define	EI_PAD		8
- 
-+#define	EI_PAX		14
-+
- #define	ELFMAG0		0x7f		/* EI_MAG */
- #define	ELFMAG1		'E'
- #define	ELFMAG2		'L'
-@@ -378,6 +409,7 @@ extern Elf32_Dyn _DYNAMIC [];
- #define elf_phdr	elf32_phdr
- #define elf_note	elf32_note
- #define elf_addr_t	Elf32_Off
-+#define elf_dyn		Elf32_Dyn
- 
- #else
- 
-@@ -386,6 +418,7 @@ extern Elf64_Dyn _DYNAMIC [];
- #define elf_phdr	elf64_phdr
- #define elf_note	elf64_note
- #define elf_addr_t	Elf64_Off
-+#define elf_dyn		Elf64_Dyn
- 
- #endif
- 
---- a/include/linux/ext4_fs_extents.h
-+++ b/include/linux/ext4_fs_extents.h
-@@ -50,7 +50,7 @@
- #ifdef EXT_DEBUG
- #define ext_debug(a...)		printk(a)
- #else
--#define ext_debug(a...)
-+#define ext_debug(a...)		do {} while (0)
- #endif
- 
- /*
---- /dev/null
-+++ b/include/linux/gracl.h
-@@ -0,0 +1,317 @@
-+#ifndef GR_ACL_H
-+#define GR_ACL_H
-+
-+#include <linux/grdefs.h>
-+#include <linux/resource.h>
-+#include <linux/dcache.h>
-+#include <asm/resource.h>
-+
-+/* Major status information */
-+
-+#define GR_VERSION  "grsecurity 2.1.11"
-+#define GRSECURITY_VERSION 0x2111
-+
-+enum {
-+
-+	SHUTDOWN = 0,
-+	ENABLE = 1,
-+	SPROLE = 2,
-+	RELOAD = 3,
-+	SEGVMOD = 4,
-+	STATUS = 5,
-+	UNSPROLE = 6,
-+	PASSSET = 7,
-+	SPROLEPAM = 8
-+};
-+
-+/* Password setup definitions
-+ * kernel/grhash.c */
-+enum {
-+	GR_PW_LEN = 128,
-+	GR_SALT_LEN = 16,
-+	GR_SHA_LEN = 32,
-+};
-+
-+enum {
-+	GR_SPROLE_LEN = 64,
-+};
-+
-+#define GR_NLIMITS (RLIMIT_LOCKS + 2)
-+
-+/* Begin Data Structures */
-+
-+struct sprole_pw {
-+	unsigned char *rolename;
-+	unsigned char salt[GR_SALT_LEN];
-+	unsigned char sum[GR_SHA_LEN];	/* 256-bit SHA hash of the password */
-+};
-+
-+struct name_entry {
-+	__u32 key;
-+	ino_t inode;
-+	dev_t device;
-+	char *name;
-+	__u16 len;
-+	__u8 deleted;
-+	struct name_entry *prev;
-+	struct name_entry *next;
-+};
-+
-+struct inodev_entry {
-+	struct name_entry *nentry;
-+	struct inodev_entry *prev;
-+	struct inodev_entry *next;
-+};
-+
-+struct acl_role_db {
-+	struct acl_role_label **r_hash;
-+	__u32 r_size;
-+};
-+
-+struct inodev_db {
-+	struct inodev_entry **i_hash;
-+	__u32 i_size;
-+};
-+
-+struct name_db {
-+	struct name_entry **n_hash;
-+	__u32 n_size;
-+};
-+
-+struct crash_uid {
-+	uid_t uid;
-+	unsigned long expires;
-+};
-+
-+struct gr_hash_struct {
-+	void **table;
-+	void **nametable;
-+	void *first;
-+	__u32 table_size;
-+	__u32 used_size;
-+	int type;
-+};
-+
-+/* Userspace Grsecurity ACL data structures */
-+
-+struct acl_subject_label {
-+	char *filename;
-+	ino_t inode;
-+	dev_t device;
-+	__u32 mode;
-+	__u32 cap_mask;
-+	__u32 cap_lower;
-+
-+	struct rlimit res[GR_NLIMITS];
-+	__u16 resmask;
-+
-+	__u8 user_trans_type;
-+	__u8 group_trans_type;
-+	uid_t *user_transitions;
-+	gid_t *group_transitions;
-+	__u16 user_trans_num;
-+	__u16 group_trans_num;
-+
-+	__u32 ip_proto[8];
-+	__u32 ip_type;
-+	struct acl_ip_label **ips;
-+	__u32 ip_num;
-+
-+	__u32 crashes;
-+	unsigned long expires;
-+
-+	struct acl_subject_label *parent_subject;
-+	struct gr_hash_struct *hash;
-+	struct acl_subject_label *prev;
-+	struct acl_subject_label *next;
-+
-+	struct acl_object_label **obj_hash;
-+	__u32 obj_hash_size;
-+	__u16 pax_flags;
-+};
-+
-+struct role_allowed_ip {
-+	__u32 addr;
-+	__u32 netmask;
-+
-+	struct role_allowed_ip *prev;
-+	struct role_allowed_ip *next;
-+};
-+
-+struct role_transition {
-+	char *rolename;
-+
-+	struct role_transition *prev;
-+	struct role_transition *next;
-+};
-+
-+struct acl_role_label {
-+	char *rolename;
-+	uid_t uidgid;
-+	__u16 roletype;
-+
-+	__u16 auth_attempts;
-+	unsigned long expires;
-+
-+	struct acl_subject_label *root_label;
-+	struct gr_hash_struct *hash;
-+
-+	struct acl_role_label *prev;
-+	struct acl_role_label *next;
-+
-+	struct role_transition *transitions;
-+	struct role_allowed_ip *allowed_ips;
-+	uid_t *domain_children;
-+	__u16 domain_child_num;
-+
-+	struct acl_subject_label **subj_hash;
-+	__u32 subj_hash_size;
-+};
-+
-+struct user_acl_role_db {
-+	struct acl_role_label **r_table;
-+	__u32 num_pointers;		/* Number of allocations to track */
-+	__u32 num_roles;		/* Number of roles */
-+	__u32 num_domain_children;	/* Number of domain children */
-+	__u32 num_subjects;		/* Number of subjects */
-+	__u32 num_objects;		/* Number of objects */
-+};
-+
-+struct acl_object_label {
-+	char *filename;
-+	ino_t inode;
-+	dev_t device;
-+	__u32 mode;
-+
-+	struct acl_subject_label *nested;
-+	struct acl_object_label *globbed;
-+
-+	/* next two structures not used */
-+
-+	struct acl_object_label *prev;
-+	struct acl_object_label *next;
-+};
-+
-+struct acl_ip_label {
-+	char *iface;
-+	__u32 addr;
-+	__u32 netmask;
-+	__u16 low, high;
-+	__u8 mode;
-+	__u32 type;
-+	__u32 proto[8];
-+
-+	/* next two structures not used */
-+
-+	struct acl_ip_label *prev;
-+	struct acl_ip_label *next;
-+};
-+
-+struct gr_arg {
-+	struct user_acl_role_db role_db;
-+	unsigned char pw[GR_PW_LEN];
-+	unsigned char salt[GR_SALT_LEN];
-+	unsigned char sum[GR_SHA_LEN];
-+	unsigned char sp_role[GR_SPROLE_LEN];
-+	struct sprole_pw *sprole_pws;
-+	dev_t segv_device;
-+	ino_t segv_inode;
-+	uid_t segv_uid;
-+	__u16 num_sprole_pws;
-+	__u16 mode;
-+};
-+
-+struct gr_arg_wrapper {
-+	struct gr_arg *arg;
-+	__u32 version;
-+	__u32 size;
-+};
-+
-+struct subject_map {
-+	struct acl_subject_label *user;
-+	struct acl_subject_label *kernel;
-+	struct subject_map *prev;
-+	struct subject_map *next;
-+};
-+
-+struct acl_subj_map_db {
-+	struct subject_map **s_hash;
-+	__u32 s_size;
-+};
-+
-+/* End Data Structures Section */
-+
-+/* Hash functions generated by empirical testing by Brad Spengler
-+   Makes good use of the low bits of the inode.  Generally 0-1 times
-+   in loop for successful match.  0-3 for unsuccessful match.
-+   Shift/add algorithm with modulus of table size and an XOR*/
-+
-+static __inline__ unsigned int
-+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
-+{
-+	return (((uid << type) + (uid ^ type)) % sz);
-+}
-+
-+ static __inline__ unsigned int
-+shash(const struct acl_subject_label *userp, const unsigned int sz)
-+{
-+	return ((const unsigned long)userp % sz);
-+}
-+
-+static __inline__ unsigned int
-+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
-+{
-+	return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
-+}
-+
-+static __inline__ unsigned int
-+nhash(const char *name, const __u16 len, const unsigned int sz)
-+{
-+	return full_name_hash(name, len) % sz;
-+}
-+
-+#define FOR_EACH_ROLE_START(role,iter) \
-+	role = NULL; \
-+	iter = 0; \
-+	while (iter < acl_role_set.r_size) { \
-+		if (role == NULL) \
-+			role = acl_role_set.r_hash[iter]; \
-+		if (role == NULL) { \
-+			iter++; \
-+			continue; \
-+		}
-+
-+#define FOR_EACH_ROLE_END(role,iter) \
-+		role = role->next; \
-+		if (role == NULL) \
-+			iter++; \
-+	}
-+
-+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
-+	subj = NULL; \
-+	iter = 0; \
-+	while (iter < role->subj_hash_size) { \
-+		if (subj == NULL) \
-+			subj = role->subj_hash[iter]; \
-+		if (subj == NULL) { \
-+			iter++; \
-+			continue; \
-+		}
-+
-+#define FOR_EACH_SUBJECT_END(subj,iter) \
-+		subj = subj->next; \
-+		if (subj == NULL) \
-+			iter++; \
-+	}
-+
-+
-+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
-+	subj = role->hash->first; \
-+	while (subj != NULL) {
-+
-+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
-+		subj = subj->next; \
-+	}
-+
-+#endif
-+
---- /dev/null
-+++ b/include/linux/gralloc.h
-@@ -0,0 +1,8 @@
-+#ifndef __GRALLOC_H
-+#define __GRALLOC_H
-+
-+void acl_free_all(void);
-+int acl_alloc_stack_init(unsigned long size);
-+void *acl_alloc(unsigned long len);
-+
-+#endif
---- /dev/null
-+++ b/include/linux/grdefs.h
-@@ -0,0 +1,131 @@
-+#ifndef GRDEFS_H
-+#define GRDEFS_H
-+
-+/* Begin grsecurity status declarations */
-+
-+enum {
-+	GR_READY = 0x01,
-+	GR_STATUS_INIT = 0x00	// disabled state
-+};
-+
-+/* Begin  ACL declarations */
-+
-+/* Role flags */
-+
-+enum {
-+	GR_ROLE_USER = 0x0001,
-+	GR_ROLE_GROUP = 0x0002,
-+	GR_ROLE_DEFAULT = 0x0004,
-+	GR_ROLE_SPECIAL = 0x0008,
-+	GR_ROLE_AUTH = 0x0010,
-+	GR_ROLE_NOPW = 0x0020,
-+	GR_ROLE_GOD = 0x0040,
-+	GR_ROLE_LEARN = 0x0080,
-+	GR_ROLE_TPE = 0x0100,
-+	GR_ROLE_DOMAIN = 0x0200,
-+	GR_ROLE_PAM = 0x0400
-+};
-+
-+/* ACL Subject and Object mode flags */
-+enum {
-+	GR_DELETED = 0x80000000
-+};
-+
-+/* ACL Object-only mode flags */
-+enum {
-+	GR_READ 	= 0x00000001,
-+	GR_APPEND 	= 0x00000002,
-+	GR_WRITE 	= 0x00000004,
-+	GR_EXEC 	= 0x00000008,
-+	GR_FIND 	= 0x00000010,
-+	GR_INHERIT 	= 0x00000020,
-+	GR_SETID 	= 0x00000040,
-+	GR_CREATE 	= 0x00000080,
-+	GR_DELETE 	= 0x00000100,
-+	GR_LINK		= 0x00000200,
-+	GR_AUDIT_READ 	= 0x00000400,
-+	GR_AUDIT_APPEND = 0x00000800,
-+	GR_AUDIT_WRITE 	= 0x00001000,
-+	GR_AUDIT_EXEC 	= 0x00002000,
-+	GR_AUDIT_FIND 	= 0x00004000,
-+	GR_AUDIT_INHERIT= 0x00008000,
-+	GR_AUDIT_SETID 	= 0x00010000,
-+	GR_AUDIT_CREATE = 0x00020000,
-+	GR_AUDIT_DELETE = 0x00040000,
-+	GR_AUDIT_LINK	= 0x00080000,
-+	GR_PTRACERD 	= 0x00100000,
-+	GR_NOPTRACE	= 0x00200000,
-+	GR_SUPPRESS 	= 0x00400000,
-+	GR_NOLEARN 	= 0x00800000
-+};
-+
-+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
-+		   GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
-+		   GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
-+
-+/* ACL subject-only mode flags */
-+enum {
-+	GR_KILL 	= 0x00000001,
-+	GR_VIEW 	= 0x00000002,
-+	GR_PROTECTED 	= 0x00000004,
-+	GR_LEARN 	= 0x00000008,
-+	GR_OVERRIDE 	= 0x00000010,
-+	/* just a placeholder, this mode is only used in userspace */
-+	GR_DUMMY 	= 0x00000020,
-+	GR_PROTSHM	= 0x00000040,
-+	GR_KILLPROC	= 0x00000080,
-+	GR_KILLIPPROC	= 0x00000100,
-+	/* just a placeholder, this mode is only used in userspace */
-+	GR_NOTROJAN	= 0x00000200,
-+	GR_PROTPROCFD	= 0x00000400,
-+	GR_PROCACCT	= 0x00000800,
-+	GR_RELAXPTRACE	= 0x00001000,
-+	GR_NESTED	= 0x00002000,
-+	GR_INHERITLEARN	= 0x00004000,
-+	GR_PROCFIND	= 0x00008000,
-+	GR_POVERRIDE	= 0x00010000,
-+	GR_KERNELAUTH	= 0x00020000,
-+};
-+
-+enum {
-+	GR_PAX_ENABLE_SEGMEXEC	= 0x0001,
-+	GR_PAX_ENABLE_PAGEEXEC	= 0x0002,
-+	GR_PAX_ENABLE_MPROTECT	= 0x0004,
-+	GR_PAX_ENABLE_RANDMMAP	= 0x0008,
-+	GR_PAX_ENABLE_EMUTRAMP	= 0x0010,
-+	GR_PAX_DISABLE_SEGMEXEC	= 0x0100,
-+	GR_PAX_DISABLE_PAGEEXEC	= 0x0200,
-+	GR_PAX_DISABLE_MPROTECT	= 0x0400,
-+	GR_PAX_DISABLE_RANDMMAP	= 0x0800,
-+	GR_PAX_DISABLE_EMUTRAMP	= 0x1000,
-+};
-+
-+enum {
-+	GR_ID_USER	= 0x01,
-+	GR_ID_GROUP	= 0x02,
-+};
-+
-+enum {
-+	GR_ID_ALLOW	= 0x01,
-+	GR_ID_DENY	= 0x02,
-+};
-+
-+#define GR_CRASH_RES	11
-+#define GR_UIDTABLE_MAX 500
-+
-+/* begin resource learning section */
-+enum {
-+	GR_RLIM_CPU_BUMP = 60,
-+	GR_RLIM_FSIZE_BUMP = 50000,
-+	GR_RLIM_DATA_BUMP = 10000,
-+	GR_RLIM_STACK_BUMP = 1000,
-+	GR_RLIM_CORE_BUMP = 10000,
-+	GR_RLIM_RSS_BUMP = 500000,
-+	GR_RLIM_NPROC_BUMP = 1,
-+	GR_RLIM_NOFILE_BUMP = 5,
-+	GR_RLIM_MEMLOCK_BUMP = 50000,
-+	GR_RLIM_AS_BUMP = 500000,
-+	GR_RLIM_LOCKS_BUMP = 2
-+};
-+
-+#endif
---- /dev/null
-+++ b/include/linux/grinternal.h
-@@ -0,0 +1,210 @@
-+#ifndef __GRINTERNAL_H
-+#define __GRINTERNAL_H
-+
-+#ifdef CONFIG_GRKERNSEC
-+
-+#include <linux/fs.h>
-+#include <linux/gracl.h>
-+#include <linux/grdefs.h>
-+#include <linux/grmsg.h>
-+
-+void gr_add_learn_entry(const char *fmt, ...);
-+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
-+			    const struct vfsmount *mnt);
-+__u32 gr_check_create(const struct dentry *new_dentry,
-+			     const struct dentry *parent,
-+			     const struct vfsmount *mnt, const __u32 mode);
-+int gr_check_protected_task(const struct task_struct *task);
-+__u32 to_gr_audit(const __u32 reqmode);
-+int gr_set_acls(const int type);
-+
-+int gr_acl_is_enabled(void);
-+char gr_roletype_to_char(void);
-+
-+void gr_handle_alertkill(struct task_struct *task);
-+char *gr_to_filename(const struct dentry *dentry,
-+			    const struct vfsmount *mnt);
-+char *gr_to_filename1(const struct dentry *dentry,
-+			    const struct vfsmount *mnt);
-+char *gr_to_filename2(const struct dentry *dentry,
-+			    const struct vfsmount *mnt);
-+char *gr_to_filename3(const struct dentry *dentry,
-+			    const struct vfsmount *mnt);
-+
-+extern int grsec_enable_link;
-+extern int grsec_enable_fifo;
-+extern int grsec_enable_execve;
-+extern int grsec_enable_shm;
-+extern int grsec_enable_execlog;
-+extern int grsec_enable_signal;
-+extern int grsec_enable_forkfail;
-+extern int grsec_enable_time;
-+extern int grsec_enable_chroot_shmat;
-+extern int grsec_enable_chroot_findtask;
-+extern int grsec_enable_chroot_mount;
-+extern int grsec_enable_chroot_double;
-+extern int grsec_enable_chroot_pivot;
-+extern int grsec_enable_chroot_chdir;
-+extern int grsec_enable_chroot_chmod;
-+extern int grsec_enable_chroot_mknod;
-+extern int grsec_enable_chroot_fchdir;
-+extern int grsec_enable_chroot_nice;
-+extern int grsec_enable_chroot_execlog;
-+extern int grsec_enable_chroot_caps;
-+extern int grsec_enable_chroot_sysctl;
-+extern int grsec_enable_chroot_unix;
-+extern int grsec_enable_tpe;
-+extern int grsec_tpe_gid;
-+extern int grsec_enable_tpe_all;
-+extern int grsec_enable_sidcaps;
-+extern int grsec_enable_socket_all;
-+extern int grsec_socket_all_gid;
-+extern int grsec_enable_socket_client;
-+extern int grsec_socket_client_gid;
-+extern int grsec_enable_socket_server;
-+extern int grsec_socket_server_gid;
-+extern int grsec_audit_gid;
-+extern int grsec_enable_group;
-+extern int grsec_enable_audit_ipc;
-+extern int grsec_enable_audit_textrel;
-+extern int grsec_enable_mount;
-+extern int grsec_enable_chdir;
-+extern int grsec_resource_logging;
-+extern int grsec_lock;
-+
-+extern spinlock_t grsec_alert_lock;
-+extern unsigned long grsec_alert_wtime;
-+extern unsigned long grsec_alert_fyet;
-+
-+extern spinlock_t grsec_audit_lock;
-+
-+extern rwlock_t grsec_exec_file_lock;
-+
-+#define gr_task_fullpath(tsk) (tsk->exec_file ? \
-+			gr_to_filename2(tsk->exec_file->f_dentry, \
-+			tsk->exec_file->f_vfsmnt) : "/")
-+
-+#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \
-+			gr_to_filename3(tsk->parent->exec_file->f_dentry, \
-+			tsk->parent->exec_file->f_vfsmnt) : "/")
-+
-+#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
-+			gr_to_filename(tsk->exec_file->f_dentry, \
-+			tsk->exec_file->f_vfsmnt) : "/")
-+
-+#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \
-+			gr_to_filename1(tsk->parent->exec_file->f_dentry, \
-+			tsk->parent->exec_file->f_vfsmnt) : "/")
-+
-+#define proc_is_chrooted(tsk_a)  ((tsk_a->pid > 1) && (tsk_a->fs != NULL) && \
-+			  ((tsk_a->fs->root->d_inode->i_sb->s_dev != \
-+			  child_reaper(tsk_a)->fs->root->d_inode->i_sb->s_dev) || \
-+			  (tsk_a->fs->root->d_inode->i_ino != \
-+			  child_reaper(tsk_a)->fs->root->d_inode->i_ino)))
-+
-+#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs != NULL) && (tsk_b->fs != NULL) && \
-+			  (tsk_a->fs->root->d_inode->i_sb->s_dev == \
-+			  tsk_b->fs->root->d_inode->i_sb->s_dev) && \
-+			  (tsk_a->fs->root->d_inode->i_ino == \
-+			  tsk_b->fs->root->d_inode->i_ino))
-+
-+#define DEFAULTSECARGS(task) gr_task_fullpath(task), task->comm, \
-+		       task->pid, task->uid, \
-+		       task->euid, task->gid, task->egid, \
-+		       gr_parent_task_fullpath(task), \
-+		       task->parent->comm, task->parent->pid, \
-+		       task->parent->uid, task->parent->euid, \
-+		       task->parent->gid, task->parent->egid
-+
-+#define GR_CHROOT_CAPS ( \
-+	CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
-+	CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
-+	CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
-+	CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
-+	CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
-+	CAP_TO_MASK(CAP_IPC_OWNER))
-+
-+#define security_learn(normal_msg,args...) \
-+({ \
-+	read_lock(&grsec_exec_file_lock); \
-+	gr_add_learn_entry(normal_msg "\n", ## args); \
-+	read_unlock(&grsec_exec_file_lock); \
-+})
-+
-+enum {
-+	GR_DO_AUDIT,
-+	GR_DONT_AUDIT,
-+	GR_DONT_AUDIT_GOOD
-+};
-+
-+enum {
-+	GR_TTYSNIFF,
-+	GR_RBAC,
-+	GR_RBAC_STR,
-+	GR_STR_RBAC,
-+	GR_RBAC_MODE2,
-+	GR_RBAC_MODE3,
-+	GR_FILENAME,
-+	GR_SYSCTL_HIDDEN,
-+	GR_NOARGS,
-+	GR_ONE_INT,
-+	GR_ONE_INT_TWO_STR,
-+	GR_ONE_STR,
-+	GR_STR_INT,
-+	GR_TWO_INT,
-+	GR_THREE_INT,
-+	GR_FIVE_INT_TWO_STR,
-+	GR_TWO_STR,
-+	GR_THREE_STR,
-+	GR_FOUR_STR,
-+	GR_STR_FILENAME,
-+	GR_FILENAME_STR,
-+	GR_FILENAME_TWO_INT,
-+	GR_FILENAME_TWO_INT_STR,
-+	GR_TEXTREL,
-+	GR_PTRACE,
-+	GR_RESOURCE,
-+	GR_CAP,
-+	GR_SIG,
-+	GR_CRASH1,
-+	GR_CRASH2,
-+	GR_PSACCT
-+};
-+
-+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
-+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
-+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
-+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
-+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
-+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
-+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
-+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
-+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
-+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
-+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
-+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
-+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
-+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
-+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
-+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
-+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
-+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
-+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
-+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
-+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
-+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
-+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
-+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
-+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
-+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
-+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
-+#define gr_log_sig(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG, task, num)
-+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
-+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
-+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
-+
-+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
-+
-+#endif
-+
-+#endif
---- /dev/null
-+++ b/include/linux/grmsg.h
-@@ -0,0 +1,108 @@
-+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
-+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
-+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
-+#define GR_STOPMOD_MSG "denied modification of module state by "
-+#define GR_IOPERM_MSG "denied use of ioperm() by "
-+#define GR_IOPL_MSG "denied use of iopl() by "
-+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
-+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
-+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
-+#define GR_KMEM_MSG "denied write of /dev/kmem by "
-+#define GR_PORT_OPEN_MSG "denied open of /dev/port by "
-+#define GR_MEM_WRITE_MSG "denied write of /dev/mem by "
-+#define GR_MEM_MMAP_MSG "denied mmap write of /dev/[k]mem by "
-+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
-+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u"
-+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%u.%u.%u.%u"
-+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
-+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
-+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
-+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
-+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
-+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
-+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
-+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%u.%u.%u.%u %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
-+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
-+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
-+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
-+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
-+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
-+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
-+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
-+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
-+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
-+#define GR_NPROC_MSG "denied overstep of process limit by "
-+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
-+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
-+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
-+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
-+#define GR_MOUNT_CHROOT_MSG "denied mount of %.30s as %.930s from chroot by "
-+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
-+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
-+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
-+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
-+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
-+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
-+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
-+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
-+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
-+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
-+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
-+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
-+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
-+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
-+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
-+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
-+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
-+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
-+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
-+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
-+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
-+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
-+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
-+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
-+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
-+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
-+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
-+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
-+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
-+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
-+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
-+#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for "
-+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
-+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
-+#define GR_FAILFORK_MSG "failed fork with errno %d by "
-+#define GR_NICE_CHROOT_MSG "denied priority change by "
-+#define GR_UNISIGLOG_MSG "signal %d sent to "
-+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
-+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
-+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
-+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
-+#define GR_TIME_MSG "time set by "
-+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
-+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
-+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
-+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
-+#define GR_SOCK2_MSG "denied socket(%d,%.16s,%.16s) by "
-+#define GR_BIND_MSG "denied bind() by "
-+#define GR_CONNECT_MSG "denied connect() by "
-+#define GR_BIND_ACL_MSG "denied bind() to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by "
-+#define GR_CONNECT_ACL_MSG "denied connect() to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by "
-+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u"
-+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
-+#define GR_CAP_ACL_MSG "use of %s denied for "
-+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
-+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
-+#define GR_REMOUNT_AUDIT_MSG "remount of %.30s by "
-+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.30s by "
-+#define GR_MOUNT_AUDIT_MSG "mount of %.30s to %.64s by "
-+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
-+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
-+#define GR_MSGQ_AUDIT_MSG "message queue created by "
-+#define GR_MSGQR_AUDIT_MSG "message queue of uid:%u euid:%u removed by "
-+#define GR_SEM_AUDIT_MSG "semaphore created by "
-+#define GR_SEMR_AUDIT_MSG "semaphore of uid:%u euid:%u removed by "
-+#define GR_SHM_AUDIT_MSG "shared memory of size %d created by "
-+#define GR_SHMR_AUDIT_MSG "shared memory of uid:%u euid:%u removed by "
-+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
-+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
---- /dev/null
-+++ b/include/linux/grsecurity.h
-@@ -0,0 +1,193 @@
-+#ifndef GR_SECURITY_H
-+#define GR_SECURITY_H
-+#include <linux/fs.h>
-+#include <linux/binfmts.h>
-+#include <linux/gracl.h>
-+
-+void gr_handle_brute_attach(struct task_struct *p);
-+void gr_handle_brute_check(void);
-+
-+char gr_roletype_to_char(void);
-+
-+int gr_check_user_change(int real, int effective, int fs);
-+int gr_check_group_change(int real, int effective, int fs);
-+
-+void gr_del_task_from_ip_table(struct task_struct *p);
-+
-+int gr_pid_is_chrooted(struct task_struct *p);
-+int gr_handle_chroot_nice(void);
-+int gr_handle_chroot_sysctl(const int op);
-+int gr_handle_chroot_setpriority(struct task_struct *p,
-+					const int niceval);
-+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
-+int gr_handle_chroot_chroot(const struct dentry *dentry,
-+				   const struct vfsmount *mnt);
-+void gr_handle_chroot_caps(struct task_struct *task);
-+void gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt);
-+int gr_handle_chroot_chmod(const struct dentry *dentry,
-+				  const struct vfsmount *mnt, const int mode);
-+int gr_handle_chroot_mknod(const struct dentry *dentry,
-+				  const struct vfsmount *mnt, const int mode);
-+int gr_handle_chroot_mount(const struct dentry *dentry,
-+				  const struct vfsmount *mnt,
-+				  const char *dev_name);
-+int gr_handle_chroot_pivot(void);
-+int gr_handle_chroot_unix(const pid_t pid);
-+
-+int gr_handle_rawio(const struct inode *inode);
-+int gr_handle_nproc(void);
-+
-+void gr_handle_ioperm(void);
-+void gr_handle_iopl(void);
-+
-+int gr_tpe_allow(const struct file *file);
-+
-+int gr_random_pid(void);
-+
-+void gr_log_forkfail(const int retval);
-+void gr_log_timechange(void);
-+void gr_log_signal(const int sig, const struct task_struct *t);
-+void gr_log_chdir(const struct dentry *dentry,
-+			 const struct vfsmount *mnt);
-+void gr_log_chroot_exec(const struct dentry *dentry,
-+			       const struct vfsmount *mnt);
-+void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
-+void gr_log_remount(const char *devname, const int retval);
-+void gr_log_unmount(const char *devname, const int retval);
-+void gr_log_mount(const char *from, const char *to, const int retval);
-+void gr_log_msgget(const int ret, const int msgflg);
-+void gr_log_msgrm(const uid_t uid, const uid_t cuid);
-+void gr_log_semget(const int err, const int semflg);
-+void gr_log_semrm(const uid_t uid, const uid_t cuid);
-+void gr_log_shmget(const int err, const int shmflg, const size_t size);
-+void gr_log_shmrm(const uid_t uid, const uid_t cuid);
-+void gr_log_textrel(struct vm_area_struct *vma);
-+
-+int gr_handle_follow_link(const struct inode *parent,
-+				 const struct inode *inode,
-+				 const struct dentry *dentry,
-+				 const struct vfsmount *mnt);
-+int gr_handle_fifo(const struct dentry *dentry,
-+			  const struct vfsmount *mnt,
-+			  const struct dentry *dir, const int flag,
-+			  const int acc_mode);
-+int gr_handle_hardlink(const struct dentry *dentry,
-+			      const struct vfsmount *mnt,
-+			      struct inode *inode,
-+			      const int mode, const char *to);
-+
-+int gr_task_is_capable(struct task_struct *task, const int cap);
-+int gr_is_capable_nolog(const int cap);
-+void gr_learn_resource(const struct task_struct *task, const int limit,
-+			      const unsigned long wanted, const int gt);
-+void gr_copy_label(struct task_struct *tsk);
-+void gr_handle_crash(struct task_struct *task, const int sig);
-+int gr_handle_signal(const struct task_struct *p, const int sig);
-+int gr_check_crash_uid(const uid_t uid);
-+int gr_check_protected_task(const struct task_struct *task);
-+int gr_acl_handle_mmap(const struct file *file,
-+			      const unsigned long prot);
-+int gr_acl_handle_mprotect(const struct file *file,
-+				  const unsigned long prot);
-+int gr_check_hidden_task(const struct task_struct *tsk);
-+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
-+				    const struct vfsmount *mnt);
-+__u32 gr_acl_handle_utime(const struct dentry *dentry,
-+				 const struct vfsmount *mnt);
-+__u32 gr_acl_handle_access(const struct dentry *dentry,
-+				  const struct vfsmount *mnt, const int fmode);
-+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
-+				  const struct vfsmount *mnt, mode_t mode);
-+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
-+				 const struct vfsmount *mnt, mode_t mode);
-+__u32 gr_acl_handle_chown(const struct dentry *dentry,
-+				 const struct vfsmount *mnt);
-+int gr_handle_ptrace(struct task_struct *task, const long request);
-+int gr_handle_proc_ptrace(struct task_struct *task);
-+__u32 gr_acl_handle_execve(const struct dentry *dentry,
-+				  const struct vfsmount *mnt);
-+int gr_check_crash_exec(const struct file *filp);
-+int gr_acl_is_enabled(void);
-+void gr_set_kernel_label(struct task_struct *task);
-+void gr_set_role_label(struct task_struct *task, const uid_t uid,
-+			      const gid_t gid);
-+int gr_set_proc_label(const struct dentry *dentry,
-+			      const struct vfsmount *mnt);
-+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
-+				       const struct vfsmount *mnt);
-+__u32 gr_acl_handle_open(const struct dentry *dentry,
-+				const struct vfsmount *mnt, const int fmode);
-+__u32 gr_acl_handle_creat(const struct dentry *dentry,
-+				 const struct dentry *p_dentry,
-+				 const struct vfsmount *p_mnt, const int fmode,
-+				 const int imode);
-+void gr_handle_create(const struct dentry *dentry,
-+			     const struct vfsmount *mnt);
-+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
-+				 const struct dentry *parent_dentry,
-+				 const struct vfsmount *parent_mnt,
-+				 const int mode);
-+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
-+				 const struct dentry *parent_dentry,
-+				 const struct vfsmount *parent_mnt);
-+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
-+				 const struct vfsmount *mnt);
-+void gr_handle_delete(const ino_t ino, const dev_t dev);
-+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
-+				  const struct vfsmount *mnt);
-+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
-+				   const struct dentry *parent_dentry,
-+				   const struct vfsmount *parent_mnt,
-+				   const char *from);
-+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
-+				const struct dentry *parent_dentry,
-+				const struct vfsmount *parent_mnt,
-+				const struct dentry *old_dentry,
-+				const struct vfsmount *old_mnt, const char *to);
-+int gr_acl_handle_rename(struct dentry *new_dentry,
-+				struct dentry *parent_dentry,
-+				const struct vfsmount *parent_mnt,
-+				struct dentry *old_dentry,
-+				struct inode *old_parent_inode,
-+				struct vfsmount *old_mnt, const char *newname);
-+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
-+				struct dentry *old_dentry,
-+				struct dentry *new_dentry,
-+				struct vfsmount *mnt, const __u8 replace);
-+__u32 gr_check_link(const struct dentry *new_dentry,
-+			   const struct dentry *parent_dentry,
-+			   const struct vfsmount *parent_mnt,
-+			   const struct dentry *old_dentry,
-+			   const struct vfsmount *old_mnt);
-+int gr_acl_handle_filldir(const struct file *file, const char *name,
-+				 const unsigned int namelen, const ino_t ino);
-+
-+__u32 gr_acl_handle_unix(const struct dentry *dentry,
-+				const struct vfsmount *mnt);
-+void gr_acl_handle_exit(void);
-+void gr_acl_handle_psacct(struct task_struct *task, const long code);
-+int gr_acl_handle_procpidmem(const struct task_struct *task);
-+__u32 gr_cap_rtnetlink(void);
-+
-+#ifdef CONFIG_SYSVIPC
-+void gr_shm_exit(struct task_struct *task);
-+#else
-+static inline void gr_shm_exit(struct task_struct *task)
-+{
-+	return;
-+}
-+#endif
-+
-+#ifdef CONFIG_GRKERNSEC
-+void gr_handle_mem_write(void);
-+void gr_handle_kmem_write(void);
-+void gr_handle_open_port(void);
-+int gr_handle_mem_mmap(const unsigned long offset,
-+			      struct vm_area_struct *vma);
-+
-+extern int grsec_enable_dmesg;
-+extern int grsec_enable_randsrc;
-+extern int grsec_enable_shm;
-+#endif
-+
-+#endif
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -124,6 +124,13 @@ static inline void clear_highpage(struct
- 	kunmap_atomic(kaddr, KM_USER0);
- }
- 
-+static inline void sanitize_highpage(struct page *page)
-+{
-+	void *kaddr = kmap_atomic(page, KM_CLEARPAGE);
-+	clear_page(kaddr);
-+	kunmap_atomic(kaddr, KM_CLEARPAGE);
-+}
-+
- /*
-  * Same but also flushes aliased cache contents to RAM.
-  *
-@@ -132,14 +139,14 @@ static inline void clear_highpage(struct
-  */
- #define zero_user_page(page, offset, size, km_type)		\
- 	do {							\
--		void *kaddr;					\
-+		void *__kaddr;					\
- 								\
- 		BUG_ON((offset) + (size) > PAGE_SIZE);		\
- 								\
--		kaddr = kmap_atomic(page, km_type);		\
--		memset((char *)kaddr + (offset), 0, (size));	\
-+		__kaddr = kmap_atomic(page, km_type);		\
-+		memset((char *)__kaddr + (offset), 0, (size));	\
- 		flush_dcache_page(page);			\
--		kunmap_atomic(kaddr, (km_type));		\
-+		kunmap_atomic(__kaddr, (km_type));		\
- 	} while (0)
- 
- static inline void __deprecated memclear_highpage_flush(struct page *page,
---- a/include/linux/irqflags.h
-+++ b/include/linux/irqflags.h
-@@ -84,10 +84,10 @@
- 
- #define irqs_disabled()						\
- ({								\
--	unsigned long flags;					\
-+	unsigned long __flags;					\
- 								\
--	raw_local_save_flags(flags);				\
--	raw_irqs_disabled_flags(flags);				\
-+	raw_local_save_flags(__flags);				\
-+	raw_irqs_disabled_flags(__flags);			\
- })
- 
- #define irqs_disabled_flags(flags)	raw_irqs_disabled_flags(flags)
---- a/include/linux/jbd2.h
-+++ b/include/linux/jbd2.h
-@@ -68,7 +68,7 @@ extern u8 jbd2_journal_enable_debug;
- 		}							\
- 	} while (0)
- #else
--#define jbd_debug(f, a...)	/**/
-+#define jbd_debug(f, a...)	do {} while (0)
- #endif
- 
- extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
---- a/include/linux/jbd.h
-+++ b/include/linux/jbd.h
-@@ -68,7 +68,7 @@ extern int journal_enable_debug;
- 		}							\
- 	} while (0)
- #else
--#define jbd_debug(f, a...)	/**/
-+#define jbd_debug(f, a...)	do {} while (0)
- #endif
- 
- extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
---- a/include/linux/libata.h
-+++ b/include/linux/libata.h
-@@ -63,11 +63,11 @@
- #ifdef ATA_VERBOSE_DEBUG
- #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
- #else
--#define VPRINTK(fmt, args...)
-+#define VPRINTK(fmt, args...) do {} while (0)
- #endif	/* ATA_VERBOSE_DEBUG */
- #else
--#define DPRINTK(fmt, args...)
--#define VPRINTK(fmt, args...)
-+#define DPRINTK(fmt, args...) do {} while (0)
-+#define VPRINTK(fmt, args...) do {} while (0)
- #endif	/* ATA_DEBUG */
- 
- #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
---- a/include/linux/mm.h
-+++ b/include/linux/mm.h
-@@ -38,6 +38,7 @@ extern int sysctl_legacy_va_layout;
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/processor.h>
-+#include <asm/mman.h>
- 
- #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
- 
-@@ -111,6 +112,8 @@ struct vm_area_struct {
- #ifdef CONFIG_NUMA
- 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
- #endif
-+
-+	struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
- };
- 
- extern struct kmem_cache *vm_area_cachep;
-@@ -171,6 +174,14 @@ extern unsigned int kobjsize(const void 
- 
- #define VM_CAN_NONLINEAR 0x08000000	/* Has ->fault & does nonlinear pages */
- 
-+#ifdef CONFIG_PAX_PAGEEXEC
-+#define VM_PAGEEXEC	0x10000000	/* vma->vm_page_prot needs special handling */
-+#endif
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+#define VM_MAYNOTWRITE	0x20000000	/* vma cannot be granted VM_WRITE any more */
-+#endif
-+
- #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
- #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
- #endif
-@@ -862,6 +873,8 @@ struct shrinker {
- extern void register_shrinker(struct shrinker *);
- extern void unregister_shrinker(struct shrinker *);
- 
-+pgprot_t vm_get_page_prot(unsigned long vm_flags);
-+
- int vma_wants_writenotify(struct vm_area_struct *vma);
- 
- extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
-@@ -1088,6 +1101,7 @@ out:
- }
- 
- extern int do_munmap(struct mm_struct *, unsigned long, size_t);
-+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
- 
- extern unsigned long do_brk(unsigned long, unsigned long);
- 
-@@ -1142,6 +1156,10 @@ extern struct vm_area_struct * find_vma(
- extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
- 					     struct vm_area_struct **pprev);
- 
-+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
-+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
-+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
-+
- /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
-    NULL if none.  Assume start_addr < end_addr. */
- static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
-@@ -1158,7 +1176,6 @@ static inline unsigned long vma_pages(st
- 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- }
- 
--pgprot_t vm_get_page_prot(unsigned long vm_flags);
- struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
- struct page *vmalloc_to_page(void *addr);
- unsigned long vmalloc_to_pfn(void *addr);
-@@ -1218,5 +1235,11 @@ extern int randomize_va_space;
- 
- const char * arch_vma_name(struct vm_area_struct *vma);
- 
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
-+#else
-+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
-+#endif
-+
- #endif /* __KERNEL__ */
- #endif /* _LINUX_MM_H */
---- a/include/linux/module.h
-+++ b/include/linux/module.h
-@@ -295,16 +295,16 @@ struct module
- 	int (*init)(void);
- 
- 	/* If this is non-NULL, vfree after init() returns */
--	void *module_init;
-+	void *module_init_rx, *module_init_rw;
- 
- 	/* Here is the actual code + data, vfree'd on unload. */
--	void *module_core;
-+	void *module_core_rx, *module_core_rw;
- 
- 	/* Here are the sizes of the init and core sections */
--	unsigned long init_size, core_size;
-+	unsigned long init_size_rw, core_size_rw;
- 
- 	/* The size of the executable code in each section.  */
--	unsigned long init_text_size, core_text_size;
-+	unsigned long init_size_rx, core_size_rx;
- 
- 	/* The handle returned from unwind_add_table. */
- 	void *unwind_info;
---- a/include/linux/moduleloader.h
-+++ b/include/linux/moduleloader.h
-@@ -17,9 +17,21 @@ int module_frob_arch_sections(Elf_Ehdr *
-    sections.  Returns NULL on failure. */
- void *module_alloc(unsigned long size);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+void *module_alloc_exec(unsigned long size);
-+#else
-+#define module_alloc_exec(x) module_alloc(x)
-+#endif
-+
- /* Free memory returned from module_alloc. */
- void module_free(struct module *mod, void *module_region);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+void module_free_exec(struct module *mod, void *module_region);
-+#else
-+#define module_free_exec(x, y) module_free(x, y)
-+#endif
-+
- /* Apply the given relocation to the (simplified) ELF.  Return -error
-    or 0. */
- int apply_relocate(Elf_Shdr *sechdrs,
---- a/include/linux/percpu.h
-+++ b/include/linux/percpu.h
-@@ -18,7 +18,7 @@
- #endif
- 
- #define PERCPU_ENOUGH_ROOM						\
--	(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
-+	((unsigned long)(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE))
- #endif	/* PERCPU_ENOUGH_ROOM */
- 
- /*
---- a/include/linux/random.h
-+++ b/include/linux/random.h
-@@ -72,6 +72,11 @@ unsigned long randomize_range(unsigned l
- u32 random32(void);
- void srandom32(u32 seed);
- 
-+static inline unsigned long pax_get_random_long(void)
-+{
-+	return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
-+}
-+
- #endif /* __KERNEL___ */
- 
- #endif /* _LINUX_RANDOM_H */
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -92,6 +92,7 @@ struct sched_param {
- struct exec_domain;
- struct futex_pi_state;
- struct bio;
-+struct linux_binprm;
- 
- /*
-  * List of flags we want to share for kernel threads,
-@@ -432,6 +433,24 @@ struct mm_struct {
- 	/* aio bits */
- 	rwlock_t		ioctx_list_lock;
- 	struct kioctx		*ioctx_list;
-+
-+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+	unsigned long pax_flags;
-+#endif
-+
-+#ifdef CONFIG_PAX_DLRESOLVE
-+	unsigned long call_dl_resolve;
-+#endif
-+
-+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
-+	unsigned long call_syscall;
-+#endif
-+
-+#ifdef CONFIG_PAX_ASLR
-+	unsigned long delta_mmap;		/* randomized offset */
-+	unsigned long delta_stack;		/* randomized offset */
-+#endif
-+
- };
- 
- struct sighand_struct {
-@@ -556,6 +575,15 @@ struct signal_struct {
- 	unsigned audit_tty;
- 	struct tty_audit_buf *tty_audit_buf;
- #endif
-+
-+#ifdef CONFIG_GRKERNSEC
-+	u32 curr_ip;
-+	u32 gr_saddr;
-+	u32 gr_daddr;
-+	u16 gr_sport;
-+	u16 gr_dport;
-+	u8 used_accept:1;
-+#endif
- };
- 
- /* Context switch must be unlocked if interrupts are to be enabled */
-@@ -1017,8 +1045,8 @@ struct task_struct {
- 	struct list_head thread_group;
- 
- 	struct completion *vfork_done;		/* for vfork() */
--	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
--	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
-+	pid_t __user *set_child_tid;		/* CLONE_CHILD_SETTID */
-+	pid_t __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
- 
- 	unsigned int rt_priority;
- 	cputime_t utime, stime;
-@@ -1183,6 +1211,17 @@ struct task_struct {
- 	struct list_head pi_state_list;
- 	struct futex_pi_state *pi_state_cache;
- 
-+#ifdef CONFIG_GRKERNSEC
-+	/* grsecurity */
-+	struct acl_subject_label *acl;
-+	struct acl_role_label *role;
-+	struct file *exec_file;
-+	u16 acl_role_id;
-+	u8 acl_sp_role:1;
-+	u8 is_writable:1;
-+	u8 brute:1;
-+#endif
-+
- 	atomic_t fs_excl;	/* holding fs exclusive resources */
- 	struct rcu_head rcu;
- 
-@@ -1198,6 +1237,46 @@ struct task_struct {
- #endif
- };
- 
-+#define MF_PAX_PAGEEXEC		0x01000000	/* Paging based non-executable pages */
-+#define MF_PAX_EMUTRAMP		0x02000000	/* Emulate trampolines */
-+#define MF_PAX_MPROTECT		0x04000000	/* Restrict mprotect() */
-+#define MF_PAX_RANDMMAP		0x08000000	/* Randomize mmap() base */
-+/*#define MF_PAX_RANDEXEC		0x10000000*/	/* Randomize ET_EXEC base */
-+#define MF_PAX_SEGMEXEC		0x20000000	/* Segmentation based non-executable pages */
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+extern unsigned int pax_softmode;
-+#endif
-+
-+extern int pax_check_flags(unsigned long *);
-+
-+/* if tsk != current then task_lock must be held on it */
-+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
-+static inline unsigned long pax_get_flags(struct task_struct *tsk)
-+{
-+	if (likely(tsk->mm))
-+		return tsk->mm->pax_flags;
-+	else
-+		return 0UL;
-+}
-+
-+/* if tsk != current then task_lock must be held on it */
-+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
-+{
-+	if (likely(tsk->mm)) {
-+		tsk->mm->pax_flags = flags;
-+		return 0;
-+	}
-+	return -EINVAL;
-+}
-+#endif
-+
-+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
-+extern void pax_set_initial_flags(struct linux_binprm *bprm);
-+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
-+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
-+#endif
-+
- /*
-  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
-  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
-@@ -1831,6 +1910,12 @@ extern void arch_pick_mmap_layout(struct
- static inline void arch_pick_mmap_layout(struct mm_struct *mm)
- {
- 	mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (mm->pax_flags & MF_PAX_RANDMMAP)
-+		mm->mmap_base += mm->delta_mmap;
-+#endif
-+
- 	mm->get_unmapped_area = arch_get_unmapped_area;
- 	mm->unmap_area = arch_unmap_area;
- }
---- a/include/linux/screen_info.h
-+++ b/include/linux/screen_info.h
-@@ -42,7 +42,8 @@ struct screen_info {
- 	u16 pages;		/* 0x32 */
- 	u16 vesa_attributes;	/* 0x34 */
- 	u32 capabilities;       /* 0x36 */
--	u8  _reserved[6];	/* 0x3a */
-+	u16 vesapm_size;	/* 0x3a */
-+	u8  _reserved[4];	/* 0x3c */
- } __attribute__((packed));
- 
- extern struct screen_info screen_info;
---- a/include/linux/security.h
-+++ b/include/linux/security.h
-@@ -2796,7 +2796,7 @@ static inline struct dentry *securityfs_
- 						mode_t mode,
- 						struct dentry *parent,
- 						void *data,
--						struct file_operations *fops)
-+						const struct file_operations *fops)
- {
- 	return ERR_PTR(-ENODEV);
- }
---- a/include/linux/shm.h
-+++ b/include/linux/shm.h
-@@ -86,6 +86,10 @@ struct shmid_kernel /* private to the ke
- 	pid_t			shm_cprid;
- 	pid_t			shm_lprid;
- 	struct user_struct	*mlock_user;
-+#ifdef CONFIG_GRKERNSEC
-+	time_t			shm_createtime;
-+	pid_t			shm_lapid;
-+#endif
- };
- 
- /* shm_mode upper byte flags */
---- a/include/linux/skbuff.h
-+++ b/include/linux/skbuff.h
-@@ -385,7 +385,7 @@ extern void	      skb_truesize_bug(struc
- 
- static inline void skb_truesize_check(struct sk_buff *skb)
- {
--	if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len))
-+	if (unlikely(skb->truesize < sizeof(struct sk_buff) + skb->len))
- 		skb_truesize_bug(skb);
- }
- 
---- a/include/linux/sysctl.h
-+++ b/include/linux/sysctl.h
-@@ -168,9 +168,21 @@ enum
- #ifdef CONFIG_ALPHA_UAC_SYSCTL
- 	KERN_UAC_POLICY=78,	/* int: Alpha unaligned access control policy flags */
- #endif /* CONFIG_ALPHA_UAC_SYSCTL */
--};
-+#ifdef CONFIG_GRKERNSEC
-+	KERN_GRSECURITY=98,	/* grsecurity */
-+#endif
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+	KERN_PAX=99,		/* PaX control */
-+#endif
- 
-+};
- 
-+#ifdef CONFIG_PAX_SOFTMODE
-+enum {
-+	PAX_SOFTMODE=1		/* PaX: disable/enable soft mode */
-+};
-+#endif
- 
- /* CTL_VM names: */
- enum
---- a/include/linux/uaccess.h
-+++ b/include/linux/uaccess.h
-@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
- 		long ret;				\
- 		mm_segment_t old_fs = get_fs();		\
- 							\
--		set_fs(KERNEL_DS);			\
- 		pagefault_disable();			\
-+		set_fs(KERNEL_DS);			\
- 		ret = __get_user(retval, (__force typeof(retval) __user *)(addr));		\
--		pagefault_enable();			\
- 		set_fs(old_fs);				\
-+		pagefault_enable();			\
- 		ret;					\
- 	})
- 
---- a/include/linux/udf_fs.h
-+++ b/include/linux/udf_fs.h
-@@ -45,7 +45,7 @@
- 		printk (f, ##a); \
- 	}
- #else
--#define udf_debug(f, a...) /**/
-+#define udf_debug(f, a...) do {} while (0)
- #endif
- 
- #define udf_info(f, a...) \
---- a/include/net/sctp/sctp.h
-+++ b/include/net/sctp/sctp.h
-@@ -317,8 +317,8 @@ extern int sctp_debug_flag;
- 
- #else	/* SCTP_DEBUG */
- 
--#define SCTP_DEBUG_PRINTK(whatever...)
--#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
-+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
-+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
- #define SCTP_ENABLE_DEBUG
- #define SCTP_DISABLE_DEBUG
- #define SCTP_ASSERT(expr, str, func)
---- a/include/sound/core.h
-+++ b/include/sound/core.h
-@@ -396,9 +396,9 @@ void snd_verbose_printd(const char *file
- 
- #else /* !CONFIG_SND_DEBUG */
- 
--#define snd_printd(fmt, args...)	/* nothing */
-+#define snd_printd(fmt, args...)	do {} while (0)
- #define snd_assert(expr, args...)	(void)(expr)
--#define snd_BUG()			/* nothing */
-+#define snd_BUG()			do {} while (0)
- 
- #endif /* CONFIG_SND_DEBUG */
- 
-@@ -412,7 +412,7 @@ void snd_verbose_printd(const char *file
-  */
- #define snd_printdd(format, args...) snd_printk(format, ##args)
- #else
--#define snd_printdd(format, args...) /* nothing */
-+#define snd_printdd(format, args...) do {} while (0)
- #endif
- 
- 
---- a/init/do_mounts.c
-+++ b/init/do_mounts.c
-@@ -68,11 +68,12 @@ static dev_t try_name(char *name, int pa
- 
- 	/* read device number from .../dev */
- 
--	sprintf(path, "/sys/block/%s/dev", name);
--	fd = sys_open(path, 0, 0);
-+	if (sizeof path <= snprintf(path, sizeof path, "/sys/block/%s/dev", name))
-+		goto fail;
-+	fd = sys_open((char __user *)path, 0, 0);
- 	if (fd < 0)
- 		goto fail;
--	len = sys_read(fd, buf, 32);
-+	len = sys_read(fd, (char __user *)buf, 32);
- 	sys_close(fd);
- 	if (len <= 0 || len == 32 || buf[len - 1] != '\n')
- 		goto fail;
-@@ -98,11 +99,12 @@ static dev_t try_name(char *name, int pa
- 		return res;
- 
- 	/* otherwise read range from .../range */
--	sprintf(path, "/sys/block/%s/range", name);
--	fd = sys_open(path, 0, 0);
-+	if (sizeof path <= snprintf(path, sizeof path, "/sys/block/%s/range", name))
-+		goto fail;
-+	fd = sys_open((char __user *)path, 0, 0);
- 	if (fd < 0)
- 		goto fail;
--	len = sys_read(fd, buf, 32);
-+	len = sys_read(fd, (char __user *)buf, 32);
- 	sys_close(fd);
- 	if (len <= 0 || len == 32 || buf[len - 1] != '\n')
- 		goto fail;
-@@ -145,8 +147,8 @@ dev_t name_to_dev_t(char *name)
- 	int part;
- 
- #ifdef CONFIG_SYSFS
--	int mkdir_err = sys_mkdir("/sys", 0700);
--	if (sys_mount("sysfs", "/sys", "sysfs", 0, NULL) < 0)
-+	int mkdir_err = sys_mkdir((char __user *)"/sys", 0700);
-+	if (sys_mount((char __user *)"sysfs", (char __user *)"/sys", (char __user *)"sysfs", 0, NULL) < 0)
- 		goto out;
- #endif
- 
-@@ -198,10 +200,10 @@ dev_t name_to_dev_t(char *name)
- 	res = try_name(s, part);
- done:
- #ifdef CONFIG_SYSFS
--	sys_umount("/sys", 0);
-+	sys_umount((char __user *)"/sys", 0);
- out:
- 	if (!mkdir_err)
--		sys_rmdir("/sys");
-+		sys_rmdir((char __user *)"/sys");
- #endif
- 	return res;
- fail:
-@@ -281,11 +283,11 @@ static void __init get_fs_names(char *pa
- 
- static int __init do_mount_root(char *name, char *fs, int flags, void *data)
- {
--	int err = sys_mount(name, "/root", fs, flags, data);
-+	int err = sys_mount((char __user *)name, (char __user *)"/root", (char __user *)fs, flags, (void __user *)data);
- 	if (err)
- 		return err;
- 
--	sys_chdir("/root");
-+	sys_chdir((char __user *)"/root");
- 	ROOT_DEV = current->fs->pwdmnt->mnt_sb->s_dev;
- 	printk("VFS: Mounted root (%s filesystem)%s.\n",
- 	       current->fs->pwdmnt->mnt_sb->s_type->name,
-@@ -371,18 +373,18 @@ void __init change_floppy(char *fmt, ...
- 	va_start(args, fmt);
- 	vsprintf(buf, fmt, args);
- 	va_end(args);
--	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
-+	fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
- 	if (fd >= 0) {
- 		sys_ioctl(fd, FDEJECT, 0);
- 		sys_close(fd);
- 	}
- 	printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
--	fd = sys_open("/dev/console", O_RDWR, 0);
-+	fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
- 	if (fd >= 0) {
- 		sys_ioctl(fd, TCGETS, (long)&termios);
- 		termios.c_lflag &= ~ICANON;
- 		sys_ioctl(fd, TCSETSF, (long)&termios);
--		sys_read(fd, &c, 1);
-+		sys_read(fd, (char __user *)&c, 1);
- 		termios.c_lflag |= ICANON;
- 		sys_ioctl(fd, TCSETSF, (long)&termios);
- 		sys_close(fd);
-@@ -468,8 +470,8 @@ void __init prepare_namespace(void)
- 
- 	mount_root();
- out:
--	sys_mount(".", "/", NULL, MS_MOVE, NULL);
--	sys_chroot(".");
-+	sys_mount((char __user *)".", (char __user *)"/", NULL, MS_MOVE, NULL);
-+	sys_chroot((char __user *)".");
- 	security_sb_post_mountroot();
- }
- 
---- a/init/do_mounts.h
-+++ b/init/do_mounts.h
-@@ -15,15 +15,15 @@ extern char *root_device_name;
- 
- static inline int create_dev(char *name, dev_t dev)
- {
--	sys_unlink(name);
--	return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
-+	sys_unlink((char __user *)name);
-+	return sys_mknod((char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
- }
- 
- #if BITS_PER_LONG == 32
- static inline u32 bstat(char *name)
- {
- 	struct stat64 stat;
--	if (sys_stat64(name, &stat) != 0)
-+	if (sys_stat64((char __user *)name, (struct stat64 __user *)&stat) != 0)
- 		return 0;
- 	if (!S_ISBLK(stat.st_mode))
- 		return 0;
---- a/init/do_mounts_md.c
-+++ b/init/do_mounts_md.c
-@@ -167,7 +167,7 @@ static void __init md_setup_drive(void)
- 			partitioned ? "_d" : "", minor,
- 			md_setup_args[ent].device_names);
- 
--		fd = sys_open(name, 0, 0);
-+		fd = sys_open((char __user *)name, 0, 0);
- 		if (fd < 0) {
- 			printk(KERN_ERR "md: open failed - cannot start "
- 					"array %s\n", name);
-@@ -230,7 +230,7 @@ static void __init md_setup_drive(void)
- 			 * array without it
- 			 */
- 			sys_close(fd);
--			fd = sys_open(name, 0, 0);
-+			fd = sys_open((char __user *)name, 0, 0);
- 			sys_ioctl(fd, BLKRRPART, 0);
- 		}
- 		sys_close(fd);
-@@ -271,7 +271,7 @@ void __init md_run_setup(void)
- 	if (raid_noautodetect)
- 		printk(KERN_INFO "md: Skipping autodetection of RAID arrays. (raid=noautodetect)\n");
- 	else {
--		int fd = sys_open("/dev/md0", 0, 0);
-+		int fd = sys_open((char __user *)"/dev/md0", 0, 0);
- 		if (fd >= 0) {
- 			sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
- 			sys_close(fd);
---- a/init/initramfs.c
-+++ b/init/initramfs.c
-@@ -240,7 +240,7 @@ static int __init maybe_link(void)
- 	if (nlink >= 2) {
- 		char *old = find_link(major, minor, ino, mode, collected);
- 		if (old)
--			return (sys_link(old, collected) < 0) ? -1 : 1;
-+			return (sys_link((char __user *)old, (char __user *)collected) < 0) ? -1 : 1;
- 	}
- 	return 0;
- }
-@@ -249,11 +249,11 @@ static void __init clean_path(char *path
- {
- 	struct stat st;
- 
--	if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
-+	if (!sys_newlstat((char __user *)path, (struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
- 		if (S_ISDIR(st.st_mode))
--			sys_rmdir(path);
-+			sys_rmdir((char __user *)path);
- 		else
--			sys_unlink(path);
-+			sys_unlink((char __user *)path);
- 	}
- }
- 
-@@ -276,7 +276,7 @@ static int __init do_name(void)
- 			int openflags = O_WRONLY|O_CREAT;
- 			if (ml != 1)
- 				openflags |= O_TRUNC;
--			wfd = sys_open(collected, openflags, mode);
-+			wfd = sys_open((char __user *)collected, openflags, mode);
- 
- 			if (wfd >= 0) {
- 				sys_fchown(wfd, uid, gid);
-@@ -285,15 +285,15 @@ static int __init do_name(void)
- 			}
- 		}
- 	} else if (S_ISDIR(mode)) {
--		sys_mkdir(collected, mode);
--		sys_chown(collected, uid, gid);
--		sys_chmod(collected, mode);
-+		sys_mkdir((char __user *)collected, mode);
-+		sys_chown((char __user *)collected, uid, gid);
-+		sys_chmod((char __user *)collected, mode);
- 	} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
- 		   S_ISFIFO(mode) || S_ISSOCK(mode)) {
- 		if (maybe_link() == 0) {
--			sys_mknod(collected, mode, rdev);
--			sys_chown(collected, uid, gid);
--			sys_chmod(collected, mode);
-+			sys_mknod((char __user *)collected, mode, rdev);
-+			sys_chown((char __user *)collected, uid, gid);
-+			sys_chmod((char __user *)collected, mode);
- 		}
- 	}
- 	return 0;
-@@ -302,13 +302,13 @@ static int __init do_name(void)
- static int __init do_copy(void)
- {
- 	if (count >= body_len) {
--		sys_write(wfd, victim, body_len);
-+		sys_write(wfd, (char __user *)victim, body_len);
- 		sys_close(wfd);
- 		eat(body_len);
- 		state = SkipIt;
- 		return 0;
- 	} else {
--		sys_write(wfd, victim, count);
-+		sys_write(wfd, (char __user *)victim, count);
- 		body_len -= count;
- 		eat(count);
- 		return 1;
-@@ -319,8 +319,8 @@ static int __init do_symlink(void)
- {
- 	collected[N_ALIGN(name_len) + body_len] = '\0';
- 	clean_path(collected, 0);
--	sys_symlink(collected + N_ALIGN(name_len), collected);
--	sys_lchown(collected, uid, gid);
-+	sys_symlink((char __user *)collected + N_ALIGN(name_len), (char __user *)collected);
-+	sys_lchown((char __user *)collected, uid, gid);
- 	state = SkipIt;
- 	next_state = Reset;
- 	return 0;
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -384,6 +384,7 @@ config SYSCTL_SYSCALL
- config KALLSYMS
- 	 bool "Load all symbols for debugging/ksymoops" if EMBEDDED
- 	 default y
-+	 depends on !GRKERNSEC_HIDESYM
- 	 help
- 	   Say Y here to let the kernel print out symbolic crash information and
- 	   symbolic stack backtraces. This increases the size of the kernel
---- a/init/main.c
-+++ b/init/main.c
-@@ -107,6 +107,7 @@ static inline void mark_rodata_ro(void) 
- #ifdef CONFIG_TC
- extern void tc_init(void);
- #endif
-+extern void grsecurity_init(void);
- 
- enum system_states system_state;
- EXPORT_SYMBOL(system_state);
-@@ -193,6 +194,17 @@ static int __init set_reset_devices(char
- 
- __setup("reset_devices", set_reset_devices);
- 
-+#ifdef CONFIG_PAX_SOFTMODE
-+unsigned int pax_softmode;
-+
-+static int __init setup_pax_softmode(char *str)
-+{
-+	get_option(&str, &pax_softmode);
-+	return 1;
-+}
-+__setup("pax_softmode=", setup_pax_softmode);
-+#endif
-+
- static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
- char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
- static const char *panic_later, *panic_param;
-@@ -854,6 +866,8 @@ static int __init kernel_init(void * unu
- 		prepare_namespace();
- 	}
- 
-+	grsecurity_init();
-+
- 	/*
- 	 * Ok, we have completed the initial bootup, and
- 	 * we're essentially up and running. Get rid of the
---- a/init/noinitramfs.c
-+++ b/init/noinitramfs.c
-@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
- {
- 	int err;
- 
--	err = sys_mkdir("/dev", 0755);
-+	err = sys_mkdir((const char __user *)"/dev", 0755);
- 	if (err < 0)
- 		goto out;
- 
-@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
- 	if (err < 0)
- 		goto out;
- 
--	err = sys_mkdir("/root", 0700);
-+	err = sys_mkdir((const char __user *)"/root", 0700);
- 	if (err < 0)
- 		goto out;
- 
---- a/ipc/ipc_sysctl.c
-+++ b/ipc/ipc_sysctl.c
-@@ -161,7 +161,7 @@ static struct ctl_table ipc_kern_table[]
- 		.proc_handler	= proc_ipc_dointvec,
- 		.strategy	= sysctl_ipc_data,
- 	},
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static struct ctl_table ipc_root_table[] = {
-@@ -171,7 +171,7 @@ static struct ctl_table ipc_root_table[]
- 		.mode		= 0555,
- 		.child		= ipc_kern_table,
- 	},
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static int __init ipc_sysctl_init(void)
---- a/ipc/msg.c
-+++ b/ipc/msg.c
-@@ -36,6 +36,7 @@
- #include <linux/seq_file.h>
- #include <linux/mutex.h>
- #include <linux/nsproxy.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/current.h>
- #include <asm/uaccess.h>
-@@ -286,6 +287,8 @@ asmlinkage long sys_msgget(key_t key, in
- 	}
- 	mutex_unlock(&msg_ids(ns).mutex);
- 
-+	gr_log_msgget(ret, msgflg);
-+
- 	return ret;
- }
- 
-@@ -552,6 +555,7 @@ asmlinkage long sys_msgctl(int msqid, in
- 		break;
- 	}
- 	case IPC_RMID:
-+		gr_log_msgrm(ipcp->uid, ipcp->cuid);
- 		freeque(ns, msq, msqid);
- 		break;
- 	}
---- a/ipc/sem.c
-+++ b/ipc/sem.c
-@@ -82,6 +82,7 @@
- #include <linux/seq_file.h>
- #include <linux/mutex.h>
- #include <linux/nsproxy.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include "util.h"
-@@ -293,6 +294,9 @@ asmlinkage long sys_semget (key_t key, i
- 	}
- 
- 	mutex_unlock(&sem_ids(ns).mutex);
-+
-+	gr_log_semget(err, semflg);
-+
- 	return err;
- }
- 
-@@ -894,6 +898,7 @@ static int semctl_down(struct ipc_namesp
- 
- 	switch(cmd){
- 	case IPC_RMID:
-+		gr_log_semrm(ipcp->uid, ipcp->cuid);
- 		freeary(ns, sma, semid);
- 		err = 0;
- 		break;
---- a/ipc/shm.c
-+++ b/ipc/shm.c
-@@ -38,6 +38,7 @@
- #include <linux/mutex.h>
- #include <linux/nsproxy.h>
- #include <linux/mount.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- 
-@@ -77,6 +78,14 @@ static void shm_destroy (struct ipc_name
- static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
- #endif
- 
-+#ifdef CONFIG_GRKERNSEC
-+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+			   const time_t shm_createtime, const uid_t cuid,
-+			   const int shmid);
-+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+			   const time_t shm_createtime);
-+#endif
-+
- static void __shm_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
- {
- 	ns->ids[IPC_SHM_IDS] = ids;
-@@ -89,6 +98,8 @@ static void __shm_init_ns(struct ipc_nam
- 
- static void do_shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *shp)
- {
-+	gr_log_shmrm(shp->shm_perm.uid, shp->shm_perm.cuid);
-+
- 	if (shp->shm_nattch){
- 		shp->shm_perm.mode |= SHM_DEST;
- 		/* Do not find it any more */
-@@ -216,6 +227,17 @@ static void shm_close(struct vm_area_str
- 	shp->shm_lprid = current->tgid;
- 	shp->shm_dtim = get_seconds();
- 	shp->shm_nattch--;
-+#ifdef CONFIG_GRKERNSEC_SHM
-+	if (grsec_enable_shm) {
-+		if (shp->shm_nattch == 0) {
-+			shp->shm_perm.mode |= SHM_DEST;
-+			shm_destroy(ns, shp);
-+		} else
-+			shm_unlock(shp);
-+		mutex_unlock(&shm_ids(ns).mutex);
-+		return;
-+	}
-+#endif
- 	if(shp->shm_nattch == 0 &&
- 	   shp->shm_perm.mode & SHM_DEST)
- 		shm_destroy(ns, shp);
-@@ -395,6 +417,9 @@ static int newseg (struct ipc_namespace 
- 	shp->shm_lprid = 0;
- 	shp->shm_atim = shp->shm_dtim = 0;
- 	shp->shm_ctim = get_seconds();
-+#ifdef CONFIG_GRKERNSEC
-+	shp->shm_createtime = get_seconds();
-+#endif
- 	shp->shm_segsz = size;
- 	shp->shm_nattch = 0;
- 	shp->id = shm_buildid(ns, id, shp->shm_perm.seq);
-@@ -452,6 +477,8 @@ asmlinkage long sys_shmget (key_t key, s
- 	}
- 	mutex_unlock(&shm_ids(ns).mutex);
- 
-+	gr_log_shmget(err, shmflg, size);
-+
- 	return err;
- }
- 
-@@ -905,9 +932,21 @@ long do_shmat(int shmid, char __user *sh
- 	if (err)
- 		goto out_unlock;
- 
-+#ifdef CONFIG_GRKERNSEC
-+	if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
-+			     shp->shm_perm.cuid, shmid) ||
-+	    !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
-+		err = -EACCES;
-+		goto out_unlock;
-+	}
-+#endif
-+
- 	path.dentry = dget(shp->shm_file->f_path.dentry);
- 	path.mnt    = mntget(shp->shm_file->f_path.mnt);
- 	shp->shm_nattch++;
-+#ifdef CONFIG_GRKERNSEC
-+	shp->shm_lapid = current->pid;
-+#endif
- 	size = i_size_read(path.dentry->d_inode);
- 	shm_unlock(shp);
- 
-@@ -1111,3 +1150,27 @@ static int sysvipc_shm_proc_show(struct 
- 			  shp->shm_ctim);
- }
- #endif
-+
-+void gr_shm_exit(struct task_struct *task)
-+{
-+#ifdef CONFIG_GRKERNSEC_SHM
-+	int i;
-+	struct shmid_kernel *shp;
-+	struct ipc_namespace *ns;
-+
-+	ns = current->nsproxy->ipc_ns;
-+
-+	if (!grsec_enable_shm)
-+		return;
-+
-+	for (i = 0; i <= shm_ids(ns).max_id; i++) {
-+		shp = shm_get(ns, i);
-+		if (shp && (shp->shm_cprid == task->pid) &&
-+		    (shp->shm_nattch <= 0)) {
-+			shp->shm_perm.mode |= SHM_DEST;
-+			shm_destroy(ns, shp);
-+		}
-+	}
-+#endif
-+	return;
-+}
---- a/kernel/acct.c
-+++ b/kernel/acct.c
-@@ -511,7 +511,7 @@ static void do_acct_process(struct file 
-  	 */
- 	flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- 	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
--	file->f_op->write(file, (char *)&ac,
-+	file->f_op->write(file, (char __user *)&ac,
- 			       sizeof(acct_t), &file->f_pos);
- 	current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
- 	set_fs(fs);
---- a/kernel/capability.c
-+++ b/kernel/capability.c
-@@ -12,6 +12,7 @@
- #include <linux/module.h>
- #include <linux/security.h>
- #include <linux/syscalls.h>
-+#include <linux/grsecurity.h>
- #include <asm/uaccess.h>
- 
- unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
-@@ -236,14 +237,25 @@ out:
-      return ret;
- }
- 
-+extern int gr_task_is_capable(struct task_struct *task, const int cap);
-+extern int gr_is_capable_nolog(const int cap);
-+
- int __capable(struct task_struct *t, int cap)
- {
--	if (security_capable(t, cap) == 0) {
-+	if ((security_capable(t, cap) == 0) && gr_task_is_capable(t, cap)) {
- 		t->flags |= PF_SUPERPRIV;
- 		return 1;
- 	}
- 	return 0;
- }
-+int capable_nolog(int cap)
-+{
-+	if ((security_capable(current, cap) == 0) && gr_is_capable_nolog(cap)) {
-+		current->flags |= PF_SUPERPRIV;
-+		return 1;
-+	}
-+	return 0;
-+}
- EXPORT_SYMBOL(__capable);
- 
- int capable(int cap)
-@@ -251,3 +263,4 @@ int capable(int cap)
- 	return __capable(current, cap);
- }
- EXPORT_SYMBOL(capable);
-+EXPORT_SYMBOL(capable_nolog);
---- a/kernel/configs.c
-+++ b/kernel/configs.c
-@@ -79,8 +79,16 @@ static int __init ikconfig_init(void)
- 	struct proc_dir_entry *entry;
- 
- 	/* create the current config file */
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR, &proc_root);
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+	entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR | S_IRGRP, &proc_root);
-+#endif
-+#else
- 	entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO,
- 				  &proc_root);
-+#endif
- 	if (!entry)
- 		return -ENOMEM;
- 
---- a/kernel/exit.c
-+++ b/kernel/exit.c
-@@ -45,6 +45,11 @@
- #include <linux/blkdev.h>
- #include <linux/task_io_accounting_ops.h>
- #include <linux/freezer.h>
-+#include <linux/grsecurity.h>
-+
-+#ifdef CONFIG_GRKERNSEC
-+extern rwlock_t grsec_exec_file_lock;
-+#endif
- 
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
-@@ -123,6 +128,7 @@ static void __exit_signal(struct task_st
- 
- 	__unhash_process(tsk);
- 
-+	gr_del_task_from_ip_table(tsk);
- 	tsk->signal = NULL;
- 	tsk->sighand = NULL;
- 	spin_unlock(&sighand->siglock);
-@@ -274,12 +280,23 @@ static void reparent_to_kthreadd(void)
- {
- 	write_lock_irq(&tasklist_lock);
- 
-+#ifdef CONFIG_GRKERNSEC
-+	write_lock(&grsec_exec_file_lock);
-+	if (current->exec_file) {
-+		fput(current->exec_file);
-+		current->exec_file = NULL;
-+	}
-+	write_unlock(&grsec_exec_file_lock);
-+#endif
-+
- 	ptrace_unlink(current);
- 	/* Reparent to init */
- 	remove_parent(current);
- 	current->real_parent = current->parent = kthreadd_task;
- 	add_parent(current);
- 
-+	gr_set_kernel_label(current);
-+
- 	/* Set the exit signal to SIGCHLD so we signal init on exit */
- 	current->exit_signal = SIGCHLD;
- 
-@@ -374,6 +391,17 @@ void daemonize(const char *name, ...)
- 	vsnprintf(current->comm, sizeof(current->comm), name, args);
- 	va_end(args);
- 
-+#ifdef CONFIG_GRKERNSEC
-+	write_lock(&grsec_exec_file_lock);
-+	if (current->exec_file) {
-+		fput(current->exec_file);
-+		current->exec_file = NULL;
-+	}
-+	write_unlock(&grsec_exec_file_lock);
-+#endif
-+
-+	gr_set_kernel_label(current);
-+
- 	/*
- 	 * If we were started as result of loading a module, close all of the
- 	 * user space pages.  We don't need them, and if we didn't close them
-@@ -969,11 +997,15 @@ fastcall NORET_TYPE void do_exit(long co
- 	tsk->exit_code = code;
- 	taskstats_exit(tsk, group_dead);
- 
-+	gr_acl_handle_psacct(tsk, code);
-+	gr_acl_handle_exit();
-+
- 	exit_mm(tsk);
- 
- 	if (group_dead)
- 		acct_process();
- 	exit_sem(tsk);
-+	gr_shm_exit(tsk);
- 	__exit_files(tsk);
- 	__exit_fs(tsk);
- 	check_stack_usage();
-@@ -1174,7 +1206,7 @@ static int wait_task_zombie(struct task_
- 		pid_t pid = p->pid;
- 		uid_t uid = p->uid;
- 		int exit_code = p->exit_code;
--		int why, status;
-+		int why;
- 
- 		if (unlikely(p->exit_state != EXIT_ZOMBIE))
- 			return 0;
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -50,6 +50,7 @@
- #include <linux/taskstats_kern.h>
- #include <linux/random.h>
- #include <linux/tty.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/pgtable.h>
- #include <asm/pgalloc.h>
-@@ -181,7 +182,7 @@ static struct task_struct *dup_task_stru
- 	setup_thread_stack(tsk, orig);
- 
- #ifdef CONFIG_CC_STACKPROTECTOR
--	tsk->stack_canary = get_random_int();
-+	tsk->stack_canary = pax_get_random_long();
- #endif
- 
- 	/* One for us, one for whoever does the "release_task()" (usually parent) */
-@@ -203,6 +204,10 @@ static inline int dup_mmap(struct mm_str
- 	unsigned long charge;
- 	struct mempolicy *pol;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *mpnt_m;
-+#endif
-+
- 	down_write(&oldmm->mmap_sem);
- 	flush_cache_dup_mm(oldmm);
- 	/*
-@@ -213,8 +218,8 @@ static inline int dup_mmap(struct mm_str
- 	mm->locked_vm = 0;
- 	mm->mmap = NULL;
- 	mm->mmap_cache = NULL;
--	mm->free_area_cache = oldmm->mmap_base;
--	mm->cached_hole_size = ~0UL;
-+	mm->free_area_cache = oldmm->free_area_cache;
-+	mm->cached_hole_size = oldmm->cached_hole_size;
- 	mm->map_count = 0;
- 	cpus_clear(mm->cpu_vm_mask);
- 	mm->mm_rb = RB_ROOT;
-@@ -233,6 +238,7 @@ static inline int dup_mmap(struct mm_str
- 			continue;
- 		}
- 		charge = 0;
-+
- 		if (mpnt->vm_flags & VM_ACCOUNT) {
- 			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
- 			if (security_vm_enough_memory(len))
-@@ -251,6 +257,7 @@ static inline int dup_mmap(struct mm_str
- 		tmp->vm_flags &= ~VM_LOCKED;
- 		tmp->vm_mm = mm;
- 		tmp->vm_next = NULL;
-+		tmp->vm_mirror = NULL;
- 		anon_vma_link(tmp);
- 		file = tmp->vm_file;
- 		if (file) {
-@@ -287,6 +294,29 @@ static inline int dup_mmap(struct mm_str
- 		if (retval)
- 			goto out;
- 	}
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
-+		for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
-+			BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
-+
-+			if (!mpnt->vm_mirror)
-+				continue;
-+
-+			if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
-+				BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
-+				mpnt->vm_mirror = mpnt_m;
-+			} else {
-+				BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
-+				mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
-+				mpnt_m->vm_mirror->vm_mirror = mpnt_m;
-+				mpnt->vm_mirror->vm_mirror = mpnt;
-+			}
-+		}
-+		BUG_ON(mpnt_m);
-+	}
-+#endif
-+
- 	/* a new mm has just been created */
- 	arch_dup_mmap(oldmm, mm);
- 	retval = 0;
-@@ -464,7 +494,7 @@ void mm_release(struct task_struct *tsk,
- 	if (tsk->clear_child_tid
- 	    && !(tsk->flags & PF_SIGNALED)
- 	    && atomic_read(&mm->mm_users) > 1) {
--		u32 __user * tidptr = tsk->clear_child_tid;
-+		pid_t __user * tidptr = tsk->clear_child_tid;
- 		tsk->clear_child_tid = NULL;
- 
- 		/*
-@@ -472,7 +502,7 @@ void mm_release(struct task_struct *tsk,
- 		 * not set up a proper pointer then tough luck.
- 		 */
- 		put_user(0, tidptr);
--		sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
-+		sys_futex((u32 __user *)tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
- 	}
- }
- 
-@@ -1001,6 +1031,9 @@ static struct task_struct *copy_process(
- 	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
- #endif
- 	retval = -EAGAIN;
-+
-+	gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->user->processes), 0);
-+
- 	if (atomic_read(&p->user->processes) >=
- 			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
- 		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
-@@ -1140,6 +1173,8 @@ static struct task_struct *copy_process(
- 	if (retval)
- 		goto bad_fork_cleanup_namespaces;
- 
-+	gr_copy_label(p);
-+
- 	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
- 	/*
- 	 * Clear TID on mm_release()?
-@@ -1318,6 +1353,8 @@ bad_fork_cleanup_count:
- bad_fork_free:
- 	free_task(p);
- fork_out:
-+	gr_log_forkfail(retval);
-+
- 	return ERR_PTR(retval);
- }
- 
-@@ -1391,6 +1428,8 @@ long do_fork(unsigned long clone_flags,
- 	if (!IS_ERR(p)) {
- 		struct completion vfork;
- 
-+		gr_handle_brute_check();
-+
- 		if (clone_flags & CLONE_VFORK) {
- 			p->vfork_done = &vfork;
- 			init_completion(&vfork);
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -186,6 +186,11 @@ int get_futex_key(u32 __user *uaddr, str
- 	struct page *page;
- 	int err;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
-+		return -EFAULT;
-+#endif
-+
- 	/*
- 	 * The futex address must be "naturally" aligned.
- 	 */
-@@ -212,8 +217,8 @@ int get_futex_key(u32 __user *uaddr, str
- 	 * The futex is hashed differently depending on whether
- 	 * it's in a shared or private mapping.  So check vma first.
- 	 */
--	vma = find_extend_vma(mm, address);
--	if (unlikely(!vma))
-+	vma = find_vma(mm, address);
-+	if (unlikely(!vma || address < vma->vm_start))
- 		return -EFAULT;
- 
- 	/*
-@@ -1922,7 +1927,7 @@ retry:
-  */
- static inline int fetch_robust_entry(struct robust_list __user **entry,
- 				     struct robust_list __user * __user *head,
--				     int *pi)
-+				     unsigned int *pi)
- {
- 	unsigned long uentry;
- 
---- a/kernel/irq/handle.c
-+++ b/kernel/irq/handle.c
-@@ -55,7 +55,8 @@ struct irq_desc irq_desc[NR_IRQS] __cach
- 		.depth = 1,
- 		.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- #ifdef CONFIG_SMP
--		.affinity = CPU_MASK_ALL
-+		.affinity = CPU_MASK_ALL,
-+		.cpu = 0,
- #endif
- 	}
- };
---- a/kernel/kallsyms.c
-+++ b/kernel/kallsyms.c
-@@ -65,6 +65,19 @@ static inline int is_kernel_text(unsigne
- 
- static inline int is_kernel(unsigned long addr)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+
-+#ifdef CONFIG_MODULES
-+	if ((unsigned long)MODULES_VADDR <= addr + __KERNEL_TEXT_OFFSET &&
-+	    addr + __KERNEL_TEXT_OFFSET < (unsigned long)MODULES_END)
-+		return 0;
-+#endif
-+
-+	if (is_kernel_inittext(addr))
-+		return 1;
-+#endif
-+
- 	if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
- 		return 1;
- 	return in_gate_area_no_task(addr);
-@@ -373,7 +386,6 @@ static unsigned long get_ksymbol_core(st
- 
- static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
- {
--	iter->name[0] = '\0';
- 	iter->nameoff = get_symbol_offset(new_pos);
- 	iter->pos = new_pos;
- }
-@@ -457,7 +469,7 @@ static int kallsyms_open(struct inode *i
- 	struct kallsym_iter *iter;
- 	int ret;
- 
--	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
-+	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
- 	if (!iter)
- 		return -ENOMEM;
- 	reset_iter(iter, 0);
-@@ -481,7 +493,15 @@ static int __init kallsyms_init(void)
- {
- 	struct proc_dir_entry *entry;
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR, NULL);
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+	entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR | S_IRGRP, NULL);
-+#endif
-+#else
- 	entry = create_proc_entry("kallsyms", 0444, NULL);
-+#endif
- 	if (entry)
- 		entry->proc_fops = &kallsyms_operations;
- 	return 0;
---- a/kernel/kprobes.c
-+++ b/kernel/kprobes.c
-@@ -168,7 +168,7 @@ kprobe_opcode_t __kprobes *get_insn_slot
- 	 * kernel image and loaded module images reside. This is required
- 	 * so x86_64 can correctly handle the %rip-relative fixups.
- 	 */
--	kip->insns = module_alloc(PAGE_SIZE);
-+	kip->insns = module_alloc_exec(PAGE_SIZE);
- 	if (!kip->insns) {
- 		kfree(kip);
- 		return NULL;
-@@ -200,7 +200,7 @@ static int __kprobes collect_one_slot(st
- 			hlist_add_head(&kip->hlist,
- 				       &kprobe_insn_pages);
- 		} else {
--			module_free(NULL, kip->insns);
-+			module_free_exec(NULL, kip->insns);
- 			kfree(kip);
- 		}
- 		return 1;
---- a/kernel/module.c
-+++ b/kernel/module.c
-@@ -44,6 +44,11 @@
- #include <asm/uaccess.h>
- #include <asm/semaphore.h>
- #include <asm/cacheflush.h>
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+#include <asm/desc.h>
-+#endif
-+
- #include <linux/license.h>
- 
- extern int module_sysfs_initialized;
-@@ -68,6 +73,8 @@ static LIST_HEAD(modules);
- 
- static BLOCKING_NOTIFIER_HEAD(module_notify_list);
- 
-+extern int gr_check_modstop(void);
-+
- int register_module_notifier(struct notifier_block * nb)
- {
- 	return blocking_notifier_chain_register(&module_notify_list, nb);
-@@ -347,7 +354,7 @@ static void *percpu_modalloc(unsigned lo
- 	unsigned int i;
- 	void *ptr;
- 
--	if (align > PAGE_SIZE) {
-+	if (align-1 >= PAGE_SIZE) {
- 		printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
- 		       name, align, PAGE_SIZE);
- 		align = PAGE_SIZE;
-@@ -660,6 +667,9 @@ sys_delete_module(const char __user *nam
- 	char name[MODULE_NAME_LEN];
- 	int ret, forced = 0;
- 
-+	if (gr_check_modstop())
-+		return -EPERM;
-+
- 	if (!capable(CAP_SYS_MODULE))
- 		return -EPERM;
- 
-@@ -1209,16 +1219,19 @@ static void free_module(struct module *m
- 	module_unload_free(mod);
- 
- 	/* This may be NULL, but that's OK */
--	module_free(mod, mod->module_init);
-+	module_free(mod, mod->module_init_rw);
-+	module_free_exec(mod, mod->module_init_rx);
- 	kfree(mod->args);
- 	if (mod->percpu)
- 		percpu_modfree(mod->percpu);
- 
- 	/* Free lock-classes: */
--	lockdep_free_key_range(mod->module_core, mod->core_size);
-+	lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
-+	lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
- 
- 	/* Finally, free the core (containing the module structure) */
--	module_free(mod, mod->module_core);
-+	module_free_exec(mod, mod->module_core_rx);
-+	module_free(mod, mod->module_core_rw);
- }
- 
- void *__symbol_get(const char *symbol)
-@@ -1279,10 +1292,14 @@ static int simplify_symbols(Elf_Shdr *se
- 			    struct module *mod)
- {
- 	Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
--	unsigned long secbase;
-+	unsigned long secbase, symbol;
- 	unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
- 	int ret = 0;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	for (i = 1; i < n; i++) {
- 		switch (sym[i].st_shndx) {
- 		case SHN_COMMON:
-@@ -1301,10 +1318,19 @@ static int simplify_symbols(Elf_Shdr *se
- 			break;
- 
- 		case SHN_UNDEF:
--			sym[i].st_value
--			  = resolve_symbol(sechdrs, versindex,
-+			symbol = resolve_symbol(sechdrs, versindex,
- 					   strtab + sym[i].st_name, mod);
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_open_kernel(cr0);
-+#endif
-+
-+			sym[i].st_value = symbol;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_close_kernel(cr0);
-+#endif
-+
- 			/* Ok if resolved.  */
- 			if (sym[i].st_value != 0)
- 				break;
-@@ -1319,11 +1345,27 @@ static int simplify_symbols(Elf_Shdr *se
- 
- 		default:
- 			/* Divert to percpu allocation if a percpu var. */
--			if (sym[i].st_shndx == pcpuindex)
-+			if (sym[i].st_shndx == pcpuindex) {
-+
-+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
-+				secbase = (unsigned long)mod->percpu - (unsigned long)__per_cpu_start;
-+#else
- 				secbase = (unsigned long)mod->percpu;
--			else
-+#endif
-+
-+			} else
- 				secbase = sechdrs[sym[i].st_shndx].sh_addr;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_open_kernel(cr0);
-+#endif
-+
- 			sym[i].st_value += secbase;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+			pax_close_kernel(cr0);
-+#endif
-+
- 			break;
- 		}
- 	}
-@@ -1375,11 +1417,14 @@ static void layout_sections(struct modul
- 			    || strncmp(secstrings + s->sh_name,
- 				       ".init", 5) == 0)
- 				continue;
--			s->sh_entsize = get_offset(&mod->core_size, s);
-+			if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
-+				s->sh_entsize = get_offset(&mod->core_size_rw, s);
-+			else
-+				s->sh_entsize = get_offset(&mod->core_size_rx, s);
- 			DEBUGP("\t%s\n", secstrings + s->sh_name);
- 		}
- 		if (m == 0)
--			mod->core_text_size = mod->core_size;
-+			mod->core_size_rx = mod->core_size_rx;
- 	}
- 
- 	DEBUGP("Init section allocation order:\n");
-@@ -1393,12 +1438,15 @@ static void layout_sections(struct modul
- 			    || strncmp(secstrings + s->sh_name,
- 				       ".init", 5) != 0)
- 				continue;
--			s->sh_entsize = (get_offset(&mod->init_size, s)
--					 | INIT_OFFSET_MASK);
-+			if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
-+				s->sh_entsize = get_offset(&mod->init_size_rw, s);
-+			else
-+				s->sh_entsize = get_offset(&mod->init_size_rx, s);
-+			s->sh_entsize |= INIT_OFFSET_MASK;
- 			DEBUGP("\t%s\n", secstrings + s->sh_name);
- 		}
- 		if (m == 0)
--			mod->init_text_size = mod->init_size;
-+			mod->init_size_rx = mod->init_size_rx;
- 	}
- }
- 
-@@ -1525,14 +1573,31 @@ static void add_kallsyms(struct module *
- {
- 	unsigned int i;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	mod->symtab = (void *)sechdrs[symindex].sh_addr;
- 	mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
- 	mod->strtab = (void *)sechdrs[strindex].sh_addr;
- 
- 	/* Set types up while we still have access to sections. */
--	for (i = 0; i < mod->num_symtab; i++)
--		mod->symtab[i].st_info
--			= elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
-+
-+	for (i = 0; i < mod->num_symtab; i++) {
-+		char type = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		pax_open_kernel(cr0);
-+#endif
-+
-+		mod->symtab[i].st_info = type;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		pax_close_kernel(cr0);
-+#endif
-+
-+	}
-+
- }
- #else
- static inline void add_kallsyms(struct module *mod,
-@@ -1580,6 +1645,10 @@ static struct module *load_module(void _
- 	struct exception_table_entry *extable;
- 	mm_segment_t old_fs;
- 
-+#ifdef CONFIG_PAX_KERNEXEC
-+	unsigned long cr0;
-+#endif
-+
- 	DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
- 	       umod, len, uargs);
- 	if (len < sizeof(*hdr))
-@@ -1738,21 +1807,57 @@ static struct module *load_module(void _
- 	layout_sections(mod, hdr, sechdrs, secstrings);
- 
- 	/* Do the allocs. */
--	ptr = module_alloc(mod->core_size);
-+	ptr = module_alloc(mod->core_size_rw);
- 	if (!ptr) {
- 		err = -ENOMEM;
- 		goto free_percpu;
- 	}
--	memset(ptr, 0, mod->core_size);
--	mod->module_core = ptr;
-+	memset(ptr, 0, mod->core_size_rw);
-+	mod->module_core_rw = ptr;
-+
-+	ptr = module_alloc(mod->init_size_rw);
-+	if (!ptr && mod->init_size_rw) {
-+		err = -ENOMEM;
-+		goto free_core_rw;
-+	}
-+	memset(ptr, 0, mod->init_size_rw);
-+	mod->module_init_rw = ptr;
-+
-+	ptr = module_alloc_exec(mod->core_size_rx);
-+	if (!ptr) {
-+		err = -ENOMEM;
-+		goto free_init_rw;
-+	}
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
- 
--	ptr = module_alloc(mod->init_size);
--	if (!ptr && mod->init_size) {
-+	memset(ptr, 0, mod->core_size_rx);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
-+	mod->module_core_rx = ptr;
-+
-+	ptr = module_alloc_exec(mod->init_size_rx);
-+	if (!ptr && mod->init_size_rx) {
- 		err = -ENOMEM;
--		goto free_core;
-+		goto free_core_rx;
- 	}
--	memset(ptr, 0, mod->init_size);
--	mod->module_init = ptr;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_open_kernel(cr0);
-+#endif
-+
-+	memset(ptr, 0, mod->init_size_rx);
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	pax_close_kernel(cr0);
-+#endif
-+
-+	mod->module_init_rx = ptr;
- 
- 	/* Transfer each section which specifies SHF_ALLOC */
- 	DEBUGP("final section addresses:\n");
-@@ -1762,17 +1867,41 @@ static struct module *load_module(void _
- 		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
- 			continue;
- 
--		if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
--			dest = mod->module_init
--				+ (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
--		else
--			dest = mod->module_core + sechdrs[i].sh_entsize;
-+		if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
-+			if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
-+				dest = mod->module_init_rw
-+					+ (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
-+			else
-+				dest = mod->module_init_rx
-+					+ (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
-+		} else {
-+			if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
-+				dest = mod->module_core_rw + sechdrs[i].sh_entsize;
-+			else
-+				dest = mod->module_core_rx + sechdrs[i].sh_entsize;
-+		}
-+
-+		if (sechdrs[i].sh_type != SHT_NOBITS) {
- 
--		if (sechdrs[i].sh_type != SHT_NOBITS)
--			memcpy(dest, (void *)sechdrs[i].sh_addr,
--			       sechdrs[i].sh_size);
-+#ifdef CONFIG_PAX_KERNEXEC
-+			if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
-+				pax_open_kernel(cr0);
-+				memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
-+				pax_close_kernel(cr0);
-+			} else
-+#endif
-+
-+			memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
-+		}
- 		/* Update sh_addr to point to copy in image. */
--		sechdrs[i].sh_addr = (unsigned long)dest;
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+		if (sechdrs[i].sh_flags & SHF_EXECINSTR)
-+			sechdrs[i].sh_addr = (unsigned long)dest - __KERNEL_TEXT_OFFSET;
-+		else
-+#endif
-+
-+			sechdrs[i].sh_addr = (unsigned long)dest;
- 		DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
- 	}
- 	/* Module has been moved. */
-@@ -1892,12 +2021,12 @@ static struct module *load_module(void _
- 	 * Do it before processing of module parameters, so the module
- 	 * can provide parameter accessor functions of its own.
- 	 */
--	if (mod->module_init)
--		flush_icache_range((unsigned long)mod->module_init,
--				   (unsigned long)mod->module_init
--				   + mod->init_size);
--	flush_icache_range((unsigned long)mod->module_core,
--			   (unsigned long)mod->module_core + mod->core_size);
-+	if (mod->module_init_rx)
-+		flush_icache_range((unsigned long)mod->module_init_rx,
-+				   (unsigned long)mod->module_init_rx
-+				   + mod->init_size_rx);
-+	flush_icache_range((unsigned long)mod->module_core_rx,
-+			   (unsigned long)mod->module_core_rx + mod->core_size_rx);
- 
- 	set_fs(old_fs);
- 
-@@ -1940,9 +2069,13 @@ static struct module *load_module(void _
- 	module_arch_cleanup(mod);
-  cleanup:
- 	module_unload_free(mod);
--	module_free(mod, mod->module_init);
-- free_core:
--	module_free(mod, mod->module_core);
-+	module_free_exec(mod, mod->module_init_rx);
-+ free_core_rx:
-+	module_free_exec(mod, mod->module_core_rx);
-+ free_init_rw:
-+	module_free(mod, mod->module_init_rw);
-+ free_core_rw:
-+	module_free(mod, mod->module_core_rw);
-  free_percpu:
- 	if (percpu)
- 		percpu_modfree(percpu);
-@@ -1978,6 +2111,9 @@ sys_init_module(void __user *umod,
- 	struct module *mod;
- 	int ret = 0;
- 
-+	if (gr_check_modstop())
-+		return -EPERM;
-+
- 	/* Must have permission */
- 	if (!capable(CAP_SYS_MODULE))
- 		return -EPERM;
-@@ -2029,10 +2165,12 @@ sys_init_module(void __user *umod,
- 	/* Drop initial reference. */
- 	module_put(mod);
- 	unwind_remove_table(mod->unwind_info, 1);
--	module_free(mod, mod->module_init);
--	mod->module_init = NULL;
--	mod->init_size = 0;
--	mod->init_text_size = 0;
-+	module_free(mod, mod->module_init_rw);
-+	module_free_exec(mod, mod->module_init_rx);
-+	mod->module_init_rw = NULL;
-+	mod->module_init_rx = NULL;
-+	mod->init_size_rw = 0;
-+	mod->init_size_rx = 0;
- 	mutex_unlock(&module_mutex);
- 
- 	return 0;
-@@ -2040,6 +2178,13 @@ sys_init_module(void __user *umod,
- 
- static inline int within(unsigned long addr, void *start, unsigned long size)
- {
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+	if (addr + __KERNEL_TEXT_OFFSET >= (unsigned long)start &&
-+	    addr + __KERNEL_TEXT_OFFSET < (unsigned long)start + size)
-+		return 1;
-+#endif
-+
- 	return ((void *)addr >= start && (void *)addr < start + size);
- }
- 
-@@ -2063,10 +2208,14 @@ static const char *get_ksymbol(struct mo
- 	unsigned long nextval;
- 
- 	/* At worse, next value is at end of module */
--	if (within(addr, mod->module_init, mod->init_size))
--		nextval = (unsigned long)mod->module_init+mod->init_text_size;
--	else 
--		nextval = (unsigned long)mod->module_core+mod->core_text_size;
-+	if (within(addr, mod->module_init_rx, mod->init_size_rx))
-+		nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
-+	else if (within(addr, mod->module_init_rw, mod->init_size_rw))
-+		nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
-+	else if (within(addr, mod->module_core_rx, mod->core_size_rx))
-+		nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
-+	else
-+		nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
- 
- 	/* Scan for closest preceeding symbol, and next symbol. (ELF
-            starts real symbols at 1). */
-@@ -2109,8 +2258,10 @@ const char *module_address_lookup(unsign
- 	struct module *mod;
- 
- 	list_for_each_entry(mod, &modules, list) {
--		if (within(addr, mod->module_init, mod->init_size)
--		    || within(addr, mod->module_core, mod->core_size)) {
-+		if (within(addr, mod->module_init_rx, mod->init_size_rx) ||
-+		    within(addr, mod->module_init_rw, mod->init_size_rw) ||
-+		    within(addr, mod->module_core_rx, mod->core_size_rx) ||
-+		    within(addr, mod->module_core_rw, mod->core_size_rw)) {
- 			if (modname)
- 				*modname = mod->name;
- 			return get_ksymbol(mod, addr, size, offset);
-@@ -2125,8 +2276,10 @@ int lookup_module_symbol_name(unsigned l
- 
- 	mutex_lock(&module_mutex);
- 	list_for_each_entry(mod, &modules, list) {
--		if (within(addr, mod->module_init, mod->init_size) ||
--		    within(addr, mod->module_core, mod->core_size)) {
-+		if (within(addr, mod->module_init_rx, mod->init_size_rx) ||
-+		    within(addr, mod->module_init_rw, mod->init_size_rw) ||
-+		    within(addr, mod->module_core_rx, mod->core_size_rx) ||
-+		    within(addr, mod->module_core_rw, mod->core_size_rw)) {
- 			const char *sym;
- 
- 			sym = get_ksymbol(mod, addr, NULL, NULL);
-@@ -2149,8 +2302,10 @@ int lookup_module_symbol_attrs(unsigned 
- 
- 	mutex_lock(&module_mutex);
- 	list_for_each_entry(mod, &modules, list) {
--		if (within(addr, mod->module_init, mod->init_size) ||
--		    within(addr, mod->module_core, mod->core_size)) {
-+		if (within(addr, mod->module_init_rx, mod->init_size_rx) ||
-+		    within(addr, mod->module_init_rw, mod->init_size_rw) ||
-+		    within(addr, mod->module_core_rx, mod->core_size_rx) ||
-+		    within(addr, mod->module_core_rw, mod->core_size_rw)) {
- 			const char *sym;
- 
- 			sym = get_ksymbol(mod, addr, size, offset);
-@@ -2270,7 +2425,7 @@ static int m_show(struct seq_file *m, vo
- 	char buf[8];
- 
- 	seq_printf(m, "%s %lu",
--		   mod->name, mod->init_size + mod->core_size);
-+		   mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
- 	print_unload_info(m, mod);
- 
- 	/* Informative for users. */
-@@ -2279,7 +2434,7 @@ static int m_show(struct seq_file *m, vo
- 		   mod->state == MODULE_STATE_COMING ? "Loading":
- 		   "Live");
- 	/* Used by oprofile and other similar tools. */
--	seq_printf(m, " 0x%p", mod->module_core);
-+	seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
- 
- 	/* Taints info */
- 	if (mod->taints)
-@@ -2335,7 +2490,8 @@ int is_module_address(unsigned long addr
- 	preempt_disable();
- 
- 	list_for_each_entry(mod, &modules, list) {
--		if (within(addr, mod->module_core, mod->core_size)) {
-+		if (within(addr, mod->module_core_rx, mod->core_size_rx) ||
-+		    within(addr, mod->module_core_rw, mod->core_size_rw)) {
- 			preempt_enable();
- 			return 1;
- 		}
-@@ -2353,8 +2509,8 @@ struct module *__module_text_address(uns
- 	struct module *mod;
- 
- 	list_for_each_entry(mod, &modules, list)
--		if (within(addr, mod->module_init, mod->init_text_size)
--		    || within(addr, mod->module_core, mod->core_text_size))
-+		if (within(addr, mod->module_init_rx, mod->init_size_rx)
-+		    || within(addr, mod->module_core_rx, mod->core_size_rx))
- 			return mod;
- 	return NULL;
- }
---- a/kernel/mutex.c
-+++ b/kernel/mutex.c
-@@ -81,7 +81,7 @@ __mutex_lock_slowpath(atomic_t *lock_cou
-  *
-  * This function is similar to (but not equivalent to) down().
-  */
--void inline fastcall __sched mutex_lock(struct mutex *lock)
-+inline void fastcall __sched mutex_lock(struct mutex *lock)
- {
- 	might_sleep();
- 	/*
---- a/kernel/params.c
-+++ b/kernel/params.c
-@@ -275,7 +275,7 @@ static int param_array(const char *name,
- 		       unsigned int min, unsigned int max,
- 		       void *elem, int elemsize,
- 		       int (*set)(const char *, struct kernel_param *kp),
--		       int *num)
-+		       unsigned int *num)
- {
- 	int ret;
- 	struct kernel_param kp;
---- a/kernel/pid.c
-+++ b/kernel/pid.c
-@@ -28,6 +28,7 @@
- #include <linux/hash.h>
- #include <linux/pid_namespace.h>
- #include <linux/init_task.h>
-+#include <linux/grsecurity.h>
- 
- #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
- static struct hlist_head *pid_hash;
-@@ -37,7 +38,7 @@ struct pid init_struct_pid = INIT_STRUCT
- 
- int pid_max = PID_MAX_DEFAULT;
- 
--#define RESERVED_PIDS		300
-+#define RESERVED_PIDS		500
- 
- int pid_max_min = RESERVED_PIDS + 1;
- int pid_max_max = PID_MAX_LIMIT;
-@@ -309,7 +310,14 @@ struct task_struct * fastcall pid_task(s
-  */
- struct task_struct *find_task_by_pid_type(int type, int nr)
- {
--	return pid_task(find_pid(nr), type);
-+	struct task_struct *task;
-+	
-+	task = pid_task(find_pid(nr), type);
-+
-+	if (gr_pid_is_chrooted(task))
-+		return NULL;
-+
-+	return task;
- }
- 
- EXPORT_SYMBOL(find_task_by_pid_type);
---- a/kernel/posix-cpu-timers.c
-+++ b/kernel/posix-cpu-timers.c
-@@ -6,6 +6,7 @@
- #include <linux/posix-timers.h>
- #include <asm/uaccess.h>
- #include <linux/errno.h>
-+#include <linux/grsecurity.h>
- 
- static int check_clock(const clockid_t which_clock)
- {
-@@ -1144,6 +1145,7 @@ static void check_process_timers(struct 
- 			__group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
- 			return;
- 		}
-+		gr_learn_resource(tsk, RLIMIT_CPU, psecs, 1);
- 		if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
- 			/*
- 			 * At the soft limit, send a SIGXCPU every second.
---- a/kernel/power/poweroff.c
-+++ b/kernel/power/poweroff.c
-@@ -35,7 +35,7 @@ static struct sysrq_key_op	sysrq_powerof
-  	.enable_mask	= SYSRQ_ENABLE_BOOT,
- };
- 
--static int pm_sysrq_init(void)
-+static int __init pm_sysrq_init(void)
- {
- 	register_sysrq_key('o', &sysrq_poweroff_op);
- 	return 0;
---- a/kernel/printk.c
-+++ b/kernel/printk.c
-@@ -31,6 +31,7 @@
- #include <linux/bootmem.h>
- #include <linux/syscalls.h>
- #include <linux/jiffies.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- 
-@@ -184,6 +185,11 @@ int do_syslog(int type, char __user *buf
- 	char c;
- 	int error = 0;
- 
-+#ifdef CONFIG_GRKERNSEC_DMESG
-+	if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
-+		return -EPERM;
-+#endif
-+
- 	error = security_syslog(type);
- 	if (error)
- 		return error;
---- a/kernel/ptrace.c
-+++ b/kernel/ptrace.c
-@@ -19,6 +19,7 @@
- #include <linux/security.h>
- #include <linux/signal.h>
- #include <linux/audit.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/pgtable.h>
- #include <asm/uaccess.h>
-@@ -138,12 +139,12 @@ static int may_attach(struct task_struct
- 	     (current->uid != task->uid) ||
- 	     (current->gid != task->egid) ||
- 	     (current->gid != task->sgid) ||
--	     (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
-+	     (current->gid != task->gid)) && !capable_nolog(CAP_SYS_PTRACE))
- 		return -EPERM;
- 	smp_rmb();
- 	if (task->mm)
- 		dumpable = get_dumpable(task->mm);
--	if (!dumpable && !capable(CAP_SYS_PTRACE))
-+	if (!dumpable && !capable_nolog(CAP_SYS_PTRACE))
- 		return -EPERM;
- 
- 	return security_ptrace(current, task);
-@@ -480,6 +481,11 @@ asmlinkage long sys_ptrace(long request,
- 	if (ret < 0)
- 		goto out_put_task_struct;
- 
-+	if (gr_handle_ptrace(child, request)) {
-+		ret = -EPERM;
-+		goto out_put_task_struct;
-+	}
-+
- 	ret = arch_ptrace(child, request, addr, data);
- 	if (ret < 0)
- 		goto out_put_task_struct;
---- a/kernel/rcupdate.c
-+++ b/kernel/rcupdate.c
-@@ -63,11 +63,11 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk
- 	.cpumask = CPU_MASK_NONE,
- };
- 
--DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
--DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
-+DEFINE_PER_CPU(struct rcu_data, rcu_data);
-+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
- 
- /* Fake initialization required by compiler */
--static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
-+static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet);
- static int blimit = 10;
- static int qhimark = 10000;
- static int qlowmark = 100;
---- a/kernel/relay.c
-+++ b/kernel/relay.c
-@@ -1139,7 +1139,7 @@ static int subbuf_splice_actor(struct fi
- 		return 0;
- 
- 	ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
--	if (ret < 0 || ret < total_len)
-+	if ((int)ret < 0 || ret < total_len)
- 		return ret;
- 
-         if (read_start + ret == nonpad_end)
---- a/kernel/resource.c
-+++ b/kernel/resource.c
-@@ -133,10 +133,27 @@ static int __init ioresources_init(void)
- {
- 	struct proc_dir_entry *entry;
- 
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	entry = create_proc_entry("ioports", S_IRUSR, NULL);
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+	entry = create_proc_entry("ioports", S_IRUSR | S_IRGRP, NULL);
-+#endif
-+#else
- 	entry = create_proc_entry("ioports", 0, NULL);
-+#endif
- 	if (entry)
- 		entry->proc_fops = &proc_ioports_operations;
-+
-+#ifdef CONFIG_GRKERNSEC_PROC_ADD
-+#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	entry = create_proc_entry("iomem", S_IRUSR, NULL);
-+#elif CONFIG_GRKERNSEC_PROC_USERGROUP
-+	entry = create_proc_entry("iomem", S_IRUSR | S_IRGRP, NULL);
-+#endif
-+#else
- 	entry = create_proc_entry("iomem", 0, NULL);
-+#endif
- 	if (entry)
- 		entry->proc_fops = &proc_iomem_operations;
- 	return 0;
---- a/kernel/sched.c
-+++ b/kernel/sched.c
-@@ -61,6 +61,7 @@
- #include <linux/delayacct.h>
- #include <linux/reciprocal_div.h>
- #include <linux/unistd.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/tlb.h>
- 
-@@ -3470,7 +3471,7 @@ pick_next_task(struct rq *rq, struct tas
- asmlinkage void __sched schedule(void)
- {
- 	struct task_struct *prev, *next;
--	long *switch_count;
-+	unsigned long *switch_count;
- 	struct rq *rq;
- 	int cpu;
- 
-@@ -4079,7 +4080,8 @@ asmlinkage long sys_nice(int increment)
- 	if (nice > 19)
- 		nice = 19;
- 
--	if (increment < 0 && !can_nice(current, nice))
-+	if (increment < 0 && (!can_nice(current, nice) ||
-+			      gr_handle_chroot_nice()))
- 		return -EPERM;
- 
- 	retval = security_task_setnice(current, nice);
-@@ -5267,7 +5269,7 @@ static struct ctl_table sd_ctl_dir[] = {
- 		.procname	= "sched_domain",
- 		.mode		= 0555,
- 	},
--	{0,},
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL },
- };
- 
- static struct ctl_table sd_ctl_root[] = {
-@@ -5277,7 +5279,7 @@ static struct ctl_table sd_ctl_root[] = 
- 		.mode		= 0555,
- 		.child		= sd_ctl_dir,
- 	},
--	{0,},
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL },
- };
- 
- static struct ctl_table *sd_alloc_ctl_entry(int n)
---- a/kernel/signal.c
-+++ b/kernel/signal.c
-@@ -25,6 +25,7 @@
- #include <linux/capability.h>
- #include <linux/freezer.h>
- #include <linux/pid_namespace.h>
-+#include <linux/grsecurity.h>
- #include <linux/nsproxy.h>
- 
- #include <asm/param.h>
-@@ -541,7 +542,9 @@ static int check_kill_permission(int sig
- 		    && (current->euid ^ t->suid) && (current->euid ^ t->uid)
- 		    && (current->uid ^ t->suid) && (current->uid ^ t->uid)
- 		    && !capable(CAP_KILL))
--		return error;
-+			return error;
-+		if (gr_handle_signal(t, sig))
-+			return error;
- 	}
- 
- 	return security_task_kill(t, info, sig, 0);
-@@ -758,7 +761,7 @@ static int __init setup_print_fatal_sign
- 
- __setup("print-fatal-signals=", setup_print_fatal_signals);
- 
--static int
-+int
- specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
- {
- 	int ret = 0;
-@@ -812,6 +815,10 @@ force_sig_info(int sig, struct siginfo *
- 		}
- 	}
- 	ret = specific_send_sig_info(sig, info, t);
-+
-+	gr_log_signal(sig, t);
-+	gr_handle_crash(t, sig);
-+
- 	spin_unlock_irqrestore(&t->sighand->siglock, flags);
- 
- 	return ret;
---- a/kernel/softirq.c
-+++ b/kernel/softirq.c
-@@ -471,9 +471,9 @@ void tasklet_kill(struct tasklet_struct 
- 		printk("Attempt to kill tasklet from interrupt\n");
- 
- 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
--		do
-+		do {
- 			yield();
--		while (test_bit(TASKLET_STATE_SCHED, &t->state));
-+		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
- 	}
- 	tasklet_unlock_wait(t);
- 	clear_bit(TASKLET_STATE_SCHED, &t->state);
---- a/kernel/sys.c
-+++ b/kernel/sys.c
-@@ -33,6 +33,7 @@
- #include <linux/task_io_accounting_ops.h>
- #include <linux/seccomp.h>
- #include <linux/cpu.h>
-+#include <linux/grsecurity.h>
- 
- #include <linux/compat.h>
- #include <linux/syscalls.h>
-@@ -651,6 +652,12 @@ static int set_one_prio(struct task_stru
- 		error = -EACCES;
- 		goto out;
- 	}
-+
-+	if (gr_handle_chroot_setpriority(p, niceval)) {
-+		error = -EACCES;
-+		goto out;
-+	}
-+
- 	no_nice = security_task_setnice(p, niceval);
- 	if (no_nice) {
- 		error = no_nice;
-@@ -707,10 +714,10 @@ asmlinkage long sys_setpriority(int whic
- 				if ((who != current->uid) && !(user = find_user(who)))
- 					goto out_unlock;	/* No processes for this user */
- 
--			do_each_thread(g, p)
-+			do_each_thread(g, p) {
- 				if (p->uid == who)
- 					error = set_one_prio(p, niceval, error);
--			while_each_thread(g, p);
-+			} while_each_thread(g, p);
- 			if (who != current->uid)
- 				free_uid(user);		/* For find_user() */
- 			break;
-@@ -769,13 +776,13 @@ asmlinkage long sys_getpriority(int whic
- 				if ((who != current->uid) && !(user = find_user(who)))
- 					goto out_unlock;	/* No processes for this user */
- 
--			do_each_thread(g, p)
-+			do_each_thread(g, p) {
- 				if (p->uid == who) {
- 					niceval = 20 - task_nice(p);
- 					if (niceval > retval)
- 						retval = niceval;
- 				}
--			while_each_thread(g, p);
-+			} while_each_thread(g, p);
- 			if (who != current->uid)
- 				free_uid(user);		/* for find_user() */
- 			break;
-@@ -1047,6 +1054,9 @@ asmlinkage long sys_setregid(gid_t rgid,
- 	if (rgid != (gid_t) -1 ||
- 	    (egid != (gid_t) -1 && egid != old_rgid))
- 		current->sgid = new_egid;
-+
-+	gr_set_role_label(current, current->uid, new_rgid);
-+
- 	current->fsgid = new_egid;
- 	current->egid = new_egid;
- 	current->gid = new_rgid;
-@@ -1074,6 +1084,9 @@ asmlinkage long sys_setgid(gid_t gid)
- 			set_dumpable(current->mm, suid_dumpable);
- 			smp_wmb();
- 		}
-+
-+		gr_set_role_label(current, current->uid, gid);
-+
- 		current->gid = current->egid = current->sgid = current->fsgid = gid;
- 	} else if ((gid == current->gid) || (gid == current->sgid)) {
- 		if (old_egid != gid) {
-@@ -1111,6 +1124,9 @@ static int set_user(uid_t new_ruid, int 
- 		set_dumpable(current->mm, suid_dumpable);
- 		smp_wmb();
- 	}
-+
-+	gr_set_role_label(current, new_ruid, current->gid);
-+
- 	current->uid = new_ruid;
- 	return 0;
- }
-@@ -1213,6 +1229,9 @@ asmlinkage long sys_setuid(uid_t uid)
- 	} else if ((uid != current->uid) && (uid != new_suid))
- 		return -EPERM;
- 
-+	if (gr_check_crash_uid(uid))
-+		return -EPERM;
-+
- 	if (old_euid != uid) {
- 		set_dumpable(current->mm, suid_dumpable);
- 		smp_wmb();
-@@ -1315,8 +1334,10 @@ asmlinkage long sys_setresgid(gid_t rgid
- 		current->egid = egid;
- 	}
- 	current->fsgid = current->egid;
--	if (rgid != (gid_t) -1)
-+	if (rgid != (gid_t) -1) {
-+		gr_set_role_label(current, current->uid, rgid);
- 		current->gid = rgid;
-+	}
- 	if (sgid != (gid_t) -1)
- 		current->sgid = sgid;
- 
-@@ -1463,7 +1484,10 @@ asmlinkage long sys_setpgid(pid_t pid, p
- 	write_lock_irq(&tasklist_lock);
- 
- 	err = -ESRCH;
--	p = find_task_by_pid(pid);
-+	/* grsec: replaced find_task_by_pid with equivalent call
-+	   which lacks the chroot restriction
-+	*/
-+	p = pid_task(find_pid(pid), PIDTYPE_PID);
- 	if (!p)
- 		goto out;
- 
-@@ -2183,7 +2207,7 @@ asmlinkage long sys_prctl(int option, un
- 			error = get_dumpable(current->mm);
- 			break;
- 		case PR_SET_DUMPABLE:
--			if (arg2 < 0 || arg2 > 1) {
-+			if (arg2 > 1) {
- 				error = -EINVAL;
- 				break;
- 			}
---- a/kernel/sysctl.c
-+++ b/kernel/sysctl.c
-@@ -56,6 +56,13 @@
- #endif
- 
- #if defined(CONFIG_SYSCTL)
-+#include <linux/grsecurity.h>
-+#include <linux/grinternal.h>
-+
-+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
-+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
-+				const int op);
-+extern int gr_handle_chroot_sysctl(const int op);
- 
- /* External variables not in a header file. */
- extern int C_A_D;
-@@ -141,7 +148,7 @@ static int proc_dointvec_taint(ctl_table
- 
- static ctl_table root_table[];
- static struct ctl_table_header root_table_header =
--	{ root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) };
-+	{ root_table, LIST_HEAD_INIT(root_table_header.ctl_entry), 0, NULL };
- 
- static ctl_table kern_table[];
- static ctl_table vm_table[];
-@@ -158,11 +165,27 @@ extern ctl_table inotify_table[];
- #ifdef CONFIG_ALPHA_UAC_SYSCTL
- extern ctl_table uac_table[];
- #endif
-+extern ctl_table grsecurity_table[];
- 
- #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
- int sysctl_legacy_va_layout;
- #endif
- 
-+#ifdef CONFIG_PAX_SOFTMODE
-+static ctl_table pax_table[] = {
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "softmode",
-+		.data		= &pax_softmode,
-+		.maxlen		= sizeof(unsigned int),
-+		.mode		= 0600,
-+		.proc_handler	= &proc_dointvec,
-+	},
-+
-+	{ .ctl_name = 0 }
-+};
-+#endif
-+
- extern int prove_locking;
- extern int lock_stat;
- 
-@@ -207,6 +230,16 @@ static ctl_table root_table[] = {
- 		.mode		= 0555,
- 		.child		= dev_table,
- 	},
-+
-+#ifdef CONFIG_PAX_SOFTMODE
-+	{
-+		.ctl_name	= CTL_UNNUMBERED,
-+		.procname	= "pax",
-+		.mode		= 0500,
-+		.child		= pax_table,
-+	},
-+#endif
-+
- /*
-  * NOTE: do not add new entries to this table unless you have read
-  * Documentation/sysctl/ctl_unnumbered.txt
-@@ -777,6 +810,14 @@ static ctl_table kern_table[] = {
- 		.proc_handler	= &proc_dostring,
- 		.strategy	= &sysctl_string,
- 	},
-+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_MODSTOP)
-+	{
-+		.ctl_name	= KERN_GRSECURITY,
-+		.procname	= "grsecurity",
-+		.mode		= 0500,
-+		.child		= grsecurity_table,
-+	},
-+#endif
- /*
-  * NOTE: do not add new entries to this table unless you have read
-  * Documentation/sysctl/ctl_unnumbered.txt
-@@ -1388,6 +1429,25 @@ static int test_perm(int mode, int op)
- int sysctl_perm(ctl_table *table, int op)
- {
- 	int error;
-+	if (table->parent != NULL && table->parent->procname != NULL &&
-+	    table->procname != NULL &&
-+	    gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
-+		return -EACCES;
-+	if (gr_handle_chroot_sysctl(op))
-+		return -EACCES;
-+	error = gr_handle_sysctl(table, op);
-+	if (error)
-+		return error;
-+	error = security_sysctl(table, op);
-+	if (error)
-+		return error;
-+	return test_perm(table->mode, op);
-+}
-+
-+int sysctl_perm_nochk(ctl_table *table, int op)
-+{
-+	int error;
-+
- 	error = security_sysctl(table, op);
- 	if (error)
- 		return error;
-@@ -1412,13 +1472,14 @@ repeat:
- 		if (n == table->ctl_name) {
- 			int error;
- 			if (table->child) {
--				if (sysctl_perm(table, 001))
-+				if (sysctl_perm_nochk(table, 001))
- 					return -EPERM;
- 				name++;
- 				nlen--;
- 				table = table->child;
- 				goto repeat;
- 			}
-+
- 			error = do_sysctl_strategy(table, name, nlen,
- 						   oldval, oldlenp,
- 						   newval, newlen);
---- a/kernel/time.c
-+++ b/kernel/time.c
-@@ -35,6 +35,7 @@
- #include <linux/security.h>
- #include <linux/fs.h>
- #include <linux/module.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
-@@ -92,6 +93,9 @@ asmlinkage long sys_stime(time_t __user 
- 		return err;
- 
- 	do_settimeofday(&tv);
-+
-+	gr_log_timechange();
-+
- 	return 0;
- }
- 
-@@ -197,6 +201,8 @@ asmlinkage long sys_settimeofday(struct 
- 			return -EFAULT;
- 	}
- 
-+	gr_log_timechange();
-+
- 	return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
- }
- 
-@@ -235,7 +241,7 @@ EXPORT_SYMBOL(current_fs_time);
-  * Avoid unnecessary multiplications/divisions in the
-  * two most common HZ cases:
-  */
--unsigned int inline jiffies_to_msecs(const unsigned long j)
-+inline unsigned int jiffies_to_msecs(const unsigned long j)
- {
- #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- 	return (MSEC_PER_SEC / HZ) * j;
-@@ -247,7 +253,7 @@ unsigned int inline jiffies_to_msecs(con
- }
- EXPORT_SYMBOL(jiffies_to_msecs);
- 
--unsigned int inline jiffies_to_usecs(const unsigned long j)
-+inline unsigned int jiffies_to_usecs(const unsigned long j)
- {
- #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
- 	return (USEC_PER_SEC / HZ) * j;
---- a/kernel/utsname_sysctl.c
-+++ b/kernel/utsname_sysctl.c
-@@ -121,7 +121,7 @@ static struct ctl_table uts_kern_table[]
- 		.proc_handler	= proc_do_uts_string,
- 		.strategy	= sysctl_uts_string,
- 	},
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static struct ctl_table uts_root_table[] = {
-@@ -131,7 +131,7 @@ static struct ctl_table uts_root_table[]
- 		.mode		= 0555,
- 		.child		= uts_kern_table,
- 	},
--	{}
-+	{ 0, NULL, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL }
- };
- 
- static int __init utsname_sysctl_init(void)
---- a/lib/radix-tree.c
-+++ b/lib/radix-tree.c
-@@ -76,7 +76,7 @@ struct radix_tree_preload {
- 	int nr;
- 	struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
- };
--DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
-+DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, {NULL} };
- 
- static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
- {
---- a/Makefile
-+++ b/Makefile
-@@ -312,7 +312,7 @@ LINUXINCLUDE    := -Iinclude \
- 
- CPPFLAGS        := -D__KERNEL__ $(LINUXINCLUDE)
- 
--CFLAGS          := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-+CFLAGS          := -Wall -W -Wno-unused -Wno-sign-compare -Wundef -Wstrict-prototypes -Wno-trigraphs \
- 		   -fno-strict-aliasing -fno-common \
- 		   -Werror-implicit-function-declaration
- AFLAGS          := -D__ASSEMBLY__
-@@ -560,7 +560,7 @@ export mod_strip_cmd
- 
- 
- ifeq ($(KBUILD_EXTMOD),)
--core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/
-+core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
- 
- vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
- 		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
---- a/mm/filemap.c
-+++ b/mm/filemap.c
-@@ -30,6 +30,7 @@
- #include <linux/security.h>
- #include <linux/syscalls.h>
- #include <linux/cpuset.h>
-+#include <linux/grsecurity.h>
- #include "filemap.h"
- #include "internal.h"
- 
-@@ -1461,7 +1462,7 @@ int generic_file_mmap(struct file * file
- 	struct address_space *mapping = file->f_mapping;
- 
- 	if (!mapping->a_ops->readpage)
--		return -ENOEXEC;
-+		return -ENODEV;
- 	file_accessed(file);
- 	vma->vm_ops = &generic_file_vm_ops;
- 	vma->vm_flags |= VM_CAN_NONLINEAR;
-@@ -1726,6 +1727,7 @@ inline int generic_write_checks(struct f
-                         *pos = i_size_read(inode);
- 
- 		if (limit != RLIM_INFINITY) {
-+			gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
- 			if (*pos >= limit) {
- 				send_sig(SIGXFSZ, current, 0);
- 				return -EFBIG;
---- a/mm/fremap.c
-+++ b/mm/fremap.c
-@@ -148,6 +148,13 @@ asmlinkage long sys_remap_file_pages(uns
-  retry:
- 	vma = find_vma(mm, start);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) {
-+		up_read(&mm->mmap_sem);
-+		return err;
-+	}
-+#endif
-+
- 	/*
- 	 * Make sure the vma is shared, that it supports prefaulting,
- 	 * and that the remapped range is valid and fully within
---- a/mm/hugetlb.c
-+++ b/mm/hugetlb.c
-@@ -460,6 +460,26 @@ void unmap_hugepage_range(struct vm_area
- 	}
- }
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	struct vm_area_struct *vma_m;
-+	unsigned long address_m;
-+	pte_t *ptep_m;
-+
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (!vma_m)
-+		return;
-+
-+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+	address_m = address + SEGMEXEC_TASK_SIZE;
-+	ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
-+	get_page(page_m);
-+	set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
-+}
-+#endif
-+
- static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
- 			unsigned long address, pte_t *ptep, pte_t pte)
- {
-@@ -493,6 +513,11 @@ static int hugetlb_cow(struct mm_struct 
- 		/* Break COW */
- 		set_huge_pte_at(mm, address, ptep,
- 				make_huge_pte(vma, new_page, 1));
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		pax_mirror_huge_pte(vma, address, new_page);
-+#endif
-+
- 		/* Make the old page be freed below */
- 		new_page = old_page;
- 	}
-@@ -563,6 +588,10 @@ retry:
- 				&& (vma->vm_flags & VM_SHARED)));
- 	set_huge_pte_at(mm, address, ptep, new_pte);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	pax_mirror_huge_pte(vma, address, page);
-+#endif
-+
- 	if (write_access && !(vma->vm_flags & VM_SHARED)) {
- 		/* Optimization, do the COW without a second fault */
- 		ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
-@@ -589,6 +618,27 @@ int hugetlb_fault(struct mm_struct *mm, 
- 	int ret;
- 	static DEFINE_MUTEX(hugetlb_instantiation_mutex);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m;
-+
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (vma_m) {
-+		unsigned long address_m;
-+
-+		if (vma->vm_start > vma_m->vm_start) {
-+			address_m = address;
-+			address -= SEGMEXEC_TASK_SIZE;
-+			vma = vma_m;
-+		} else
-+			address_m = address + SEGMEXEC_TASK_SIZE;
-+
-+		if (!huge_pte_alloc(mm, address_m))
-+			return VM_FAULT_OOM;
-+		address_m &= HPAGE_MASK;
-+		unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE);
-+	}
-+#endif
-+
- 	ptep = huge_pte_alloc(mm, address);
- 	if (!ptep)
- 		return VM_FAULT_OOM;
---- a/mm/madvise.c
-+++ b/mm/madvise.c
-@@ -43,6 +43,10 @@ static long madvise_behavior(struct vm_a
- 	pgoff_t pgoff;
- 	int new_flags = vma->vm_flags;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m;
-+#endif
-+
- 	switch (behavior) {
- 	case MADV_NORMAL:
- 		new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
-@@ -92,6 +96,13 @@ success:
- 	/*
- 	 * vm_flags is protected by the mmap_sem held in write mode.
- 	 */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (vma_m)
-+		vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
-+#endif
-+
- 	vma->vm_flags = new_flags;
- 
- out:
-@@ -236,6 +247,17 @@ madvise_vma(struct vm_area_struct *vma, 
- 
- 	case MADV_DONTNEED:
- 		error = madvise_dontneed(vma, prev, start, end);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (!error) {
-+			struct vm_area_struct *vma_m, *prev_m;
-+
-+			vma_m = pax_find_mirror_vma(vma);
-+			if (vma_m)
-+				error = madvise_dontneed(vma_m, &prev_m, start + SEGMEXEC_TASK_SIZE, end + SEGMEXEC_TASK_SIZE);
-+		}
-+#endif
-+
- 		break;
- 
- 	default:
-@@ -308,6 +330,16 @@ asmlinkage long sys_madvise(unsigned lon
- 	if (end < start)
- 		goto out;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+		if (end > SEGMEXEC_TASK_SIZE)
-+			goto out;
-+	} else
-+#endif
-+
-+	if (end > TASK_SIZE)
-+		goto out;
-+
- 	error = 0;
- 	if (end == start)
- 		goto out;
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -50,6 +50,7 @@
- #include <linux/delayacct.h>
- #include <linux/init.h>
- #include <linux/writeback.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/pgalloc.h>
- #include <asm/uaccess.h>
-@@ -993,7 +994,7 @@ int get_user_pages(struct task_struct *t
- 		struct vm_area_struct *vma;
- 		unsigned int foll_flags;
- 
--		vma = find_extend_vma(mm, start);
-+		vma = find_vma(mm, start);
- 		if (!vma && in_gate_area(tsk, start)) {
- 			unsigned long pg = start & PAGE_MASK;
- 			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
-@@ -1033,7 +1034,7 @@ int get_user_pages(struct task_struct *t
- 			continue;
- 		}
- 
--		if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
-+		if (!vma || start < vma->vm_start || (vma->vm_flags & (VM_IO | VM_PFNMAP))
- 				|| !(vm_flags & vma->vm_flags))
- 			return i ? : -EFAULT;
- 
-@@ -1614,6 +1615,195 @@ static inline void cow_user_page(struct 
- 	copy_user_highpage(dst, src, va, vma);
- }
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	spinlock_t *ptl;
-+	pte_t *pte, entry;
-+
-+	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-+	entry = *pte;
-+	if (!pte_present(entry)) {
-+		if (!pte_none(entry)) {
-+			BUG_ON(pte_file(entry));
-+			free_swap_and_cache(pte_to_swp_entry(entry));
-+			pte_clear_not_present_full(mm, address, pte, 0);
-+		}
-+	} else {
-+		struct page *page;
-+
-+		page = vm_normal_page(vma, address, entry);
-+		if (page) {
-+			flush_cache_page(vma, address, pte_pfn(entry));
-+			flush_icache_page(vma, page);
-+		}
-+		ptep_clear_flush(vma, address, pte);
-+		BUG_ON(pte_dirty(entry));
-+		if (page) {
-+			update_hiwater_rss(mm);
-+			if (PageAnon(page))
-+				dec_mm_counter(mm, anon_rss);
-+			else
-+				dec_mm_counter(mm, file_rss);
-+			page_remove_rmap(page, vma);
-+			page_cache_release(page);
-+		}
-+	}
-+	pte_unmap_unlock(pte, ptl);
-+}
-+
-+/* PaX: if vma is mirrored, synchronize the mirror's PTE
-+ *
-+ * the ptl of the lower mapped page is held on entry and is not released on exit
-+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
-+ */
-+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	unsigned long address_m;
-+	spinlock_t *ptl_m;
-+	struct vm_area_struct *vma_m;
-+	pmd_t *pmd_m;
-+	pte_t *pte_m, entry_m;
-+
-+	BUG_ON(!page_m || !PageAnon(page_m));
-+
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (!vma_m)
-+		return;
-+
-+	BUG_ON(!PageLocked(page_m));
-+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+	address_m = address + SEGMEXEC_TASK_SIZE;
-+	pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
-+	pte_m = pte_offset_map_nested(pmd_m, address_m);
-+	ptl_m = pte_lockptr(mm, pmd_m);
-+	if (ptl != ptl_m) {
-+		spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
-+		if (!pte_none(*pte_m)) {
-+			spin_unlock(ptl_m);
-+			pte_unmap_nested(pte_m);
-+			unlock_page(page_m);
-+			return;
-+		}
-+	}
-+
-+	entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
-+	page_cache_get(page_m);
-+	page_add_anon_rmap(page_m, vma_m, address_m);
-+	inc_mm_counter(mm, anon_rss);
-+	set_pte_at(mm, address_m, pte_m, entry_m);
-+	update_mmu_cache(vma_m, address_m, entry_m);
-+	lazy_mmu_prot_update(entry_m);
-+	if (ptl != ptl_m)
-+		spin_unlock(ptl_m);
-+	pte_unmap_nested(pte_m);
-+	unlock_page(page_m);
-+}
-+
-+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	unsigned long address_m;
-+	spinlock_t *ptl_m;
-+	struct vm_area_struct *vma_m;
-+	pmd_t *pmd_m;
-+	pte_t *pte_m, entry_m;
-+
-+	BUG_ON(!page_m || PageAnon(page_m));
-+
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (!vma_m)
-+		return;
-+
-+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+	address_m = address + SEGMEXEC_TASK_SIZE;
-+	pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
-+	pte_m = pte_offset_map_nested(pmd_m, address_m);
-+	ptl_m = pte_lockptr(mm, pmd_m);
-+	if (ptl != ptl_m) {
-+		spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
-+		if (!pte_none(*pte_m)) {
-+			spin_unlock(ptl_m);
-+			pte_unmap_nested(pte_m);
-+			return;
-+		}
-+	}
-+
-+	entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
-+	page_cache_get(page_m);
-+	page_add_file_rmap(page_m);
-+	inc_mm_counter(mm, file_rss);
-+	set_pte_at(mm, address_m, pte_m, entry_m);
-+	update_mmu_cache(vma_m, address_m, entry_m);
-+	lazy_mmu_prot_update(entry_m);
-+	if (ptl != ptl_m)
-+		spin_unlock(ptl_m);
-+	pte_unmap_nested(pte_m);
-+}
-+
-+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
-+{
-+	struct mm_struct *mm = vma->vm_mm;
-+	unsigned long address_m;
-+	spinlock_t *ptl_m;
-+	struct vm_area_struct *vma_m;
-+	pmd_t *pmd_m;
-+	pte_t *pte_m, entry_m;
-+
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (!vma_m)
-+		return;
-+
-+	BUG_ON(address >= SEGMEXEC_TASK_SIZE);
-+	address_m = address + SEGMEXEC_TASK_SIZE;
-+	pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
-+	pte_m = pte_offset_map_nested(pmd_m, address_m);
-+	ptl_m = pte_lockptr(mm, pmd_m);
-+	if (ptl != ptl_m) {
-+		spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
-+		if (!pte_none(*pte_m)) {
-+			spin_unlock(ptl_m);
-+			pte_unmap_nested(pte_m);
-+			return;
-+		}
-+	}
-+
-+	entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
-+	set_pte_at(mm, address_m, pte_m, entry_m);
-+	if (ptl != ptl_m)
-+		spin_unlock(ptl_m);
-+	pte_unmap_nested(pte_m);
-+}
-+
-+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, spinlock_t *ptl)
-+{
-+	struct page *page_m;
-+	pte_t entry;
-+
-+	if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
-+		return;
-+
-+	entry = *pte;
-+	page_m  = vm_normal_page(vma, address, entry);
-+	if (!page_m)
-+		pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
-+	else if (PageAnon(page_m)) {
-+		if (pax_find_mirror_vma(vma)) {
-+			spin_unlock(ptl);
-+			lock_page(page_m);
-+			spin_lock(ptl);
-+			if (pte_same(entry, *pte))
-+				pax_mirror_anon_pte(vma, address, page_m, ptl);
-+			else
-+				unlock_page(page_m);
-+		}
-+	} else
-+		pax_mirror_file_pte(vma, address, page_m, ptl);
-+}
-+#endif
-+
- /*
-  * This routine handles present pages, when users try to write
-  * to a shared page. It is done by copying the page to a new address
-@@ -1733,6 +1923,12 @@ gotten:
- 	 */
- 	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
- 	if (likely(pte_same(*page_table, orig_pte))) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (pax_find_mirror_vma(vma))
-+			BUG_ON(TestSetPageLocked(new_page));
-+#endif
-+
- 		if (old_page) {
- 			page_remove_rmap(old_page, vma);
- 			if (!PageAnon(old_page)) {
-@@ -1757,6 +1953,10 @@ gotten:
- 		lru_cache_add_active(new_page);
- 		page_add_new_anon_rmap(new_page, vma, address);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		pax_mirror_anon_pte(vma, address, new_page, ptl);
-+#endif
-+
- 		/* Free the old page.. */
- 		new_page = old_page;
- 		ret |= VM_FAULT_WRITE;
-@@ -2034,6 +2234,7 @@ int vmtruncate(struct inode * inode, lof
- 
- do_expand:
- 	limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-+	gr_learn_resource(current, RLIMIT_FSIZE, offset, 1);
- 	if (limit != RLIM_INFINITY && offset > limit)
- 		goto out_sig;
- 	if (offset > inode->i_sb->s_maxbytes)
-@@ -2216,6 +2417,11 @@ static int do_swap_page(struct mm_struct
- 	swap_free(entry);
- 	if (vm_swap_full())
- 		remove_exclusive_swap_page(page);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (write_access || !pax_find_mirror_vma(vma))
-+#endif
-+
- 	unlock_page(page);
- 
- 	if (write_access) {
-@@ -2228,6 +2434,11 @@ static int do_swap_page(struct mm_struct
- 
- 	/* No need to invalidate - it was non-present before */
- 	update_mmu_cache(vma, address, pte);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	pax_mirror_anon_pte(vma, address, page, ptl);
-+#endif
-+
- unlock:
- 	pte_unmap_unlock(page_table, ptl);
- out:
-@@ -2268,6 +2479,12 @@ static int do_anonymous_page(struct mm_s
- 		page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
- 		if (!pte_none(*page_table))
- 			goto release;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (pax_find_mirror_vma(vma))
-+			BUG_ON(TestSetPageLocked(page));
-+#endif
-+
- 		inc_mm_counter(mm, anon_rss);
- 		lru_cache_add_active(page);
- 		page_add_new_anon_rmap(page, vma, address);
-@@ -2290,6 +2507,14 @@ static int do_anonymous_page(struct mm_s
- 	/* No need to invalidate - it was non-present before */
- 	update_mmu_cache(vma, address, entry);
- 	lazy_mmu_prot_update(entry);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (write_access)
-+		pax_mirror_anon_pte(vma, address, page, ptl);
-+	else
-+		pax_mirror_file_pte(vma, address, page, ptl);
-+#endif
-+
- unlock:
- 	pte_unmap_unlock(page_table, ptl);
- 	return 0;
-@@ -2422,6 +2647,12 @@ static int __do_fault(struct mm_struct *
- 	 */
- 	/* Only go through if we didn't race with anybody else... */
- 	if (likely(pte_same(*page_table, orig_pte))) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (anon && pax_find_mirror_vma(vma))
-+			BUG_ON(TestSetPageLocked(page));
-+#endif
-+
- 		flush_icache_page(vma, page);
- 		entry = mk_pte(page, vma->vm_page_prot);
- 		if (flags & FAULT_FLAG_WRITE)
-@@ -2443,6 +2674,14 @@ static int __do_fault(struct mm_struct *
- 		/* no need to invalidate: a not-present page won't be cached */
- 		update_mmu_cache(vma, address, entry);
- 		lazy_mmu_prot_update(entry);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (anon)
-+			pax_mirror_anon_pte(vma, address, page, ptl);
-+		else
-+			pax_mirror_file_pte(vma, address, page, ptl);
-+#endif
-+
- 	} else {
- 		if (anon)
- 			page_cache_release(page);
-@@ -2522,6 +2761,11 @@ static noinline int do_no_pfn(struct mm_
- 		if (write_access)
- 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- 		set_pte_at(mm, address, page_table, entry);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		pax_mirror_pfn_pte(vma, address, pfn, ptl);
-+#endif
-+
- 	}
- 	pte_unmap_unlock(page_table, ptl);
- 	return 0;
-@@ -2625,6 +2869,11 @@ static inline int handle_pte_fault(struc
- 		if (write_access)
- 			flush_tlb_page(vma, address);
- 	}
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	pax_mirror_pte(vma, address, pte, ptl);
-+#endif
-+
- unlock:
- 	pte_unmap_unlock(pte, ptl);
- 	return 0;
-@@ -2641,6 +2890,10 @@ int handle_mm_fault(struct mm_struct *mm
- 	pmd_t *pmd;
- 	pte_t *pte;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m;
-+#endif
-+
- 	__set_current_state(TASK_RUNNING);
- 
- 	count_vm_event(PGFAULT);
-@@ -2648,6 +2901,34 @@ int handle_mm_fault(struct mm_struct *mm
- 	if (unlikely(is_vm_hugetlb_page(vma)))
- 		return hugetlb_fault(mm, vma, address, write_access);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (vma_m) {
-+		unsigned long address_m;
-+		pgd_t *pgd_m;
-+		pud_t *pud_m;
-+		pmd_t *pmd_m;
-+
-+		if (vma->vm_start > vma_m->vm_start) {
-+			address_m = address;
-+			address -= SEGMEXEC_TASK_SIZE;
-+			vma = vma_m;
-+		} else
-+			address_m = address + SEGMEXEC_TASK_SIZE;
-+
-+		pgd_m = pgd_offset(mm, address_m);
-+		pud_m = pud_alloc(mm, pgd_m, address_m);
-+		if (!pud_m)
-+			return VM_FAULT_OOM;
-+		pmd_m = pmd_alloc(mm, pud_m, address_m);
-+		if (!pmd_m)
-+			return VM_FAULT_OOM;
-+		if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
-+			return VM_FAULT_OOM;
-+		pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
-+	}
-+#endif
-+
- 	pgd = pgd_offset(mm, address);
- 	pud = pud_alloc(mm, pgd, address);
- 	if (!pud)
-@@ -2781,7 +3062,7 @@ static int __init gate_vma_init(void)
- 	gate_vma.vm_start = FIXADDR_USER_START;
- 	gate_vma.vm_end = FIXADDR_USER_END;
- 	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
--	gate_vma.vm_page_prot = __P101;
-+	gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
- 	/*
- 	 * Make sure the vDSO gets into every core dump.
- 	 * Dumping its contents makes post-mortem fully interpretable later
---- a/mm/mempolicy.c
-+++ b/mm/mempolicy.c
-@@ -401,6 +401,10 @@ static int mbind_range(struct vm_area_st
- 	struct vm_area_struct *next;
- 	int err;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m;
-+#endif
-+
- 	err = 0;
- 	for (; vma && vma->vm_start < end; vma = next) {
- 		next = vma->vm_next;
-@@ -412,6 +416,16 @@ static int mbind_range(struct vm_area_st
- 			err = policy_vma(vma, new);
- 		if (err)
- 			break;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		vma_m = pax_find_mirror_vma(vma);
-+		if (vma_m) {
-+			err = policy_vma(vma_m, new);
-+			if (err)
-+				break;
-+		}
-+#endif
-+
- 	}
- 	return err;
- }
-@@ -732,7 +746,7 @@ static struct page *new_vma_page(struct 
- }
- #endif
- 
--long do_mbind(unsigned long start, unsigned long len,
-+static long do_mbind(unsigned long start, unsigned long len,
- 		unsigned long mode, nodemask_t *nmask, unsigned long flags)
- {
- 	struct vm_area_struct *vma;
-@@ -760,6 +774,17 @@ long do_mbind(unsigned long start, unsig
- 
- 	if (end < start)
- 		return -EINVAL;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (mm->pax_flags & MF_PAX_SEGMEXEC) {
-+		if (end > SEGMEXEC_TASK_SIZE)
-+			return -EINVAL;
-+	} else
-+#endif
-+
-+	if (end > TASK_SIZE)
-+		return -EINVAL;
-+
- 	if (end == start)
- 		return 0;
- 
---- a/mm/mlock.c
-+++ b/mm/mlock.c
-@@ -12,6 +12,7 @@
- #include <linux/syscalls.h>
- #include <linux/sched.h>
- #include <linux/module.h>
-+#include <linux/grsecurity.h>
- 
- int can_do_mlock(void)
- {
-@@ -95,6 +96,17 @@ static int do_mlock(unsigned long start,
- 		return -EINVAL;
- 	if (end == start)
- 		return 0;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+		if (end > SEGMEXEC_TASK_SIZE)
-+			return -EINVAL;
-+	} else
-+#endif
-+
-+	if (end > TASK_SIZE)
-+		return -EINVAL;
-+
- 	vma = find_vma_prev(current->mm, start, &prev);
- 	if (!vma || vma->vm_start > start)
- 		return -ENOMEM;
-@@ -152,6 +164,7 @@ asmlinkage long sys_mlock(unsigned long 
- 	lock_limit >>= PAGE_SHIFT;
- 
- 	/* check against resource limits */
-+	gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
- 	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
- 		error = do_mlock(start, len, 1);
- 	up_write(&current->mm->mmap_sem);
-@@ -173,10 +186,10 @@ asmlinkage long sys_munlock(unsigned lon
- static int do_mlockall(int flags)
- {
- 	struct vm_area_struct * vma, * prev = NULL;
--	unsigned int def_flags = 0;
-+	unsigned int def_flags = current->mm->def_flags & ~VM_LOCKED;
- 
- 	if (flags & MCL_FUTURE)
--		def_flags = VM_LOCKED;
-+		def_flags |= VM_LOCKED;
- 	current->mm->def_flags = def_flags;
- 	if (flags == MCL_FUTURE)
- 		goto out;
-@@ -184,6 +197,12 @@ static int do_mlockall(int flags)
- 	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
- 		unsigned int newflags;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
-+			break;
-+#endif
-+
-+		BUG_ON(vma->vm_end > TASK_SIZE);
- 		newflags = vma->vm_flags | VM_LOCKED;
- 		if (!(flags & MCL_CURRENT))
- 			newflags &= ~VM_LOCKED;
-@@ -213,6 +232,7 @@ asmlinkage long sys_mlockall(int flags)
- 	lock_limit >>= PAGE_SHIFT;
- 
- 	ret = -ENOMEM;
-+	gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
- 	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
- 	    capable(CAP_IPC_LOCK))
- 		ret = do_mlockall(flags);
---- a/mm/mmap.c
-+++ b/mm/mmap.c
-@@ -25,6 +25,7 @@
- #include <linux/mount.h>
- #include <linux/mempolicy.h>
- #include <linux/rmap.h>
-+#include <linux/grsecurity.h>
- 
- #include <asm/uaccess.h>
- #include <asm/cacheflush.h>
-@@ -35,6 +36,16 @@
- #define arch_mmap_check(addr, len, flags)	(0)
- #endif
- 
-+static inline void verify_mm_writelocked(struct mm_struct *mm)
-+{
-+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
-+	if (unlikely(down_read_trylock(&mm->mmap_sem))) {
-+		up_read(&mm->mmap_sem);
-+		BUG();
-+	}
-+#endif
-+}
-+
- static void unmap_region(struct mm_struct *mm,
- 		struct vm_area_struct *vma, struct vm_area_struct *prev,
- 		unsigned long start, unsigned long end);
-@@ -60,15 +71,23 @@ static void unmap_region(struct mm_struc
-  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
-  *
-  */
--pgprot_t protection_map[16] = {
-+pgprot_t protection_map[16] __read_only = {
- 	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
- 	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
- };
- 
- pgprot_t vm_get_page_prot(unsigned long vm_flags)
- {
--	return protection_map[vm_flags &
--				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-+	pgprot_t prot = protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+	if (!nx_enabled &&
-+	    (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
-+	    (vm_flags & (VM_READ | VM_WRITE)))
-+		prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
-+#endif
-+
-+	return prot;
- }
- EXPORT_SYMBOL(vm_get_page_prot);
- 
-@@ -225,6 +244,7 @@ static struct vm_area_struct *remove_vma
- 	struct vm_area_struct *next = vma->vm_next;
- 
- 	might_sleep();
-+	BUG_ON(vma->vm_mirror);
- 	if (vma->vm_ops && vma->vm_ops->close)
- 		vma->vm_ops->close(vma);
- 	if (vma->vm_file)
-@@ -252,6 +272,7 @@ asmlinkage unsigned long sys_brk(unsigne
- 	 * not page aligned -Ram Gupta
- 	 */
- 	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-+	gr_learn_resource(current, RLIMIT_DATA, brk - mm->start_data, 1);
- 	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
- 		goto out;
- 
-@@ -352,8 +373,12 @@ find_vma_prepare(struct mm_struct *mm, u
- 
- 		if (vma_tmp->vm_end > addr) {
- 			vma = vma_tmp;
--			if (vma_tmp->vm_start <= addr)
--				return vma;
-+			if (vma_tmp->vm_start <= addr) {
-+//printk("PAX: prep: %08lx-%08lx %08lx pr:%p l:%p pa:%p ",
-+//vma->vm_start, vma->vm_end, addr, *pprev, *rb_link, *rb_parent);
-+//__print_symbol("%s\n", __builtin_extract_return_addr(__builtin_return_address(0)));
-+				break;
-+			}
- 			__rb_link = &__rb_parent->rb_left;
- 		} else {
- 			rb_prev = __rb_parent;
-@@ -677,6 +702,12 @@ static int
- can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
- 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
-+		return 0;
-+#endif
-+
- 	if (is_mergeable_vma(vma, file, vm_flags) &&
- 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
- 		if (vma->vm_pgoff == vm_pgoff)
-@@ -696,6 +727,12 @@ static int
- can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
- 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
-+		return 0;
-+#endif
-+
- 	if (is_mergeable_vma(vma, file, vm_flags) &&
- 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
- 		pgoff_t vm_pglen;
-@@ -738,12 +775,19 @@ can_vma_merge_after(struct vm_area_struc
- struct vm_area_struct *vma_merge(struct mm_struct *mm,
- 			struct vm_area_struct *prev, unsigned long addr,
- 			unsigned long end, unsigned long vm_flags,
--		     	struct anon_vma *anon_vma, struct file *file,
-+			struct anon_vma *anon_vma, struct file *file,
- 			pgoff_t pgoff, struct mempolicy *policy)
- {
- 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
- 	struct vm_area_struct *area, *next;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
-+	struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
-+
-+	BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
-+#endif
-+
- 	/*
- 	 * We later require that vma->vm_flags == vm_flags,
- 	 * so this tests vma->vm_flags & VM_SPECIAL, too.
-@@ -759,6 +803,15 @@ struct vm_area_struct *vma_merge(struct 
- 	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
- 		next = next->vm_next;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (prev)
-+		prev_m = pax_find_mirror_vma(prev);
-+	if (area)
-+		area_m = pax_find_mirror_vma(area);
-+	if (next)
-+		next_m = pax_find_mirror_vma(next);
-+#endif
-+
- 	/*
- 	 * Can it merge with the predecessor?
- 	 */
-@@ -778,9 +831,24 @@ struct vm_area_struct *vma_merge(struct 
- 							/* cases 1, 6 */
- 			vma_adjust(prev, prev->vm_start,
- 				next->vm_end, prev->vm_pgoff, NULL);
--		} else					/* cases 2, 5, 7 */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+			if (prev_m)
-+				vma_adjust(prev_m, prev_m->vm_start,
-+					next_m->vm_end, prev_m->vm_pgoff, NULL);
-+#endif
-+
-+		} else {				/* cases 2, 5, 7 */
- 			vma_adjust(prev, prev->vm_start,
- 				end, prev->vm_pgoff, NULL);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+			if (prev_m)
-+				vma_adjust(prev_m, prev_m->vm_start,
-+					end_m, prev_m->vm_pgoff, NULL);
-+#endif
-+
-+		}
- 		return prev;
- 	}
- 
-@@ -791,12 +859,27 @@ struct vm_area_struct *vma_merge(struct 
-  			mpol_equal(policy, vma_policy(next)) &&
- 			can_vma_merge_before(next, vm_flags,
- 					anon_vma, file, pgoff+pglen)) {
--		if (prev && addr < prev->vm_end)	/* case 4 */
-+		if (prev && addr < prev->vm_end) {	/* case 4 */
- 			vma_adjust(prev, prev->vm_start,
- 				addr, prev->vm_pgoff, NULL);
--		else					/* cases 3, 8 */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+			if (prev_m)
-+				vma_adjust(prev_m, prev_m->vm_start,
-+					addr_m, prev_m->vm_pgoff, NULL);
-+#endif
-+
-+		} else {				/* cases 3, 8 */
- 			vma_adjust(area, addr, next->vm_end,
- 				next->vm_pgoff - pglen, NULL);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+			if (area_m)
-+				vma_adjust(area_m, addr_m, next_m->vm_end,
-+					next_m->vm_pgoff - pglen, NULL);
-+#endif
-+
-+		}
- 		return area;
- 	}
- 
-@@ -871,14 +954,11 @@ none:
- void vm_stat_account(struct mm_struct *mm, unsigned long flags,
- 						struct file *file, long pages)
- {
--	const unsigned long stack_flags
--		= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
--
- 	if (file) {
- 		mm->shared_vm += pages;
- 		if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
- 			mm->exec_vm += pages;
--	} else if (flags & stack_flags)
-+	} else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
- 		mm->stack_vm += pages;
- 	if (flags & (VM_RESERVED|VM_IO))
- 		mm->reserved_vm += pages;
-@@ -906,22 +986,22 @@ unsigned long do_mmap_pgoff(struct file 
- 	 * (the exception is when the underlying filesystem is noexec
- 	 *  mounted, in which case we dont add PROT_EXEC.)
- 	 */
--	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-+	if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
- 		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
- 			prot |= PROT_EXEC;
- 
- 	if (!len)
- 		return -EINVAL;
- 
--	error = arch_mmap_check(addr, len, flags);
--	if (error)
--		return error;
--
- 	/* Careful about overflows.. */
- 	len = PAGE_ALIGN(len);
- 	if (!len || len > TASK_SIZE)
- 		return -ENOMEM;
- 
-+	error = arch_mmap_check(addr, len, flags);
-+	if (error)
-+		return error;
-+
- 	/* offset overflow? */
- 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-                return -EOVERFLOW;
-@@ -933,7 +1013,7 @@ unsigned long do_mmap_pgoff(struct file 
- 	/* Obtain the address to map to. we verify (or select) it and ensure
- 	 * that it represents a valid section of the address space.
- 	 */
--	addr = get_unmapped_area(file, addr, len, pgoff, flags);
-+	addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
- 	if (addr & ~PAGE_MASK)
- 		return addr;
- 
-@@ -944,6 +1024,26 @@ unsigned long do_mmap_pgoff(struct file 
- 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
- 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
- 
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+		if (mm->pax_flags & MF_PAX_MPROTECT) {
-+			if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC)
-+				vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
-+			else
-+				vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-+		}
-+#endif
-+
-+	}
-+#endif
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+	if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
-+		vm_flags &= ~VM_PAGEEXEC;
-+#endif
-+
- 	if (flags & MAP_LOCKED) {
- 		if (!can_do_mlock())
- 			return -EPERM;
-@@ -956,6 +1056,7 @@ unsigned long do_mmap_pgoff(struct file 
- 		locked += mm->locked_vm;
- 		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
- 		lock_limit >>= PAGE_SHIFT;
-+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
- 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- 			return -EAGAIN;
- 	}
-@@ -1024,6 +1125,9 @@ unsigned long do_mmap_pgoff(struct file 
- 	if (error)
- 		return error;
- 
-+	if (!gr_acl_handle_mmap(file, prot))
-+		return -EACCES;
-+
- 	return mmap_region(file, addr, len, flags, vm_flags, pgoff,
- 			   accountable);
- }
-@@ -1037,10 +1141,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
-  */
- int vma_wants_writenotify(struct vm_area_struct *vma)
- {
--	unsigned int vm_flags = vma->vm_flags;
-+	unsigned long vm_flags = vma->vm_flags;
- 
- 	/* If it was private or non-writable, the write bit is already clear */
--	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
-+	if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
- 		return 0;
- 
- 	/* The backer wishes to know when pages are first written to? */
-@@ -1049,8 +1153,7 @@ int vma_wants_writenotify(struct vm_area
- 
- 	/* The open routine did something to the protections already? */
- 	if (pgprot_val(vma->vm_page_prot) !=
--	    pgprot_val(protection_map[vm_flags &
--		    (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
-+	    pgprot_val(vm_get_page_prot(vm_flags)))
- 		return 0;
- 
- 	/* Specialty mapping? */
-@@ -1076,14 +1179,24 @@ unsigned long mmap_region(struct file *f
- 	unsigned long charged = 0;
- 	struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m = NULL;
-+#endif
-+
-+	/*
-+	 * mm->mmap_sem is required to protect against another thread
-+	 * changing the mappings in case we sleep.
-+	 */
-+	verify_mm_writelocked(mm);
-+
- 	/* Clear old maps */
- 	error = -ENOMEM;
--munmap_back:
- 	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
- 	if (vma && vma->vm_start < addr + len) {
- 		if (do_munmap(mm, addr, len))
- 			return -ENOMEM;
--		goto munmap_back;
-+		vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-+		BUG_ON(vma && vma->vm_start < addr + len);
- 	}
- 
- 	/* Check against address space limit. */
-@@ -1127,12 +1240,22 @@ munmap_back:
- 		goto unacct_error;
- 	}
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
-+		vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+		if (!vma_m) {
-+			kmem_cache_free(vm_area_cachep, vma);
-+			error = -ENOMEM;
-+			goto unacct_error;
-+		}
-+	}
-+#endif
-+
- 	vma->vm_mm = mm;
- 	vma->vm_start = addr;
- 	vma->vm_end = addr + len;
- 	vma->vm_flags = vm_flags;
--	vma->vm_page_prot = protection_map[vm_flags &
--				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-+	vma->vm_page_prot = vm_get_page_prot(vm_flags);
- 	vma->vm_pgoff = pgoff;
- 
- 	if (file) {
-@@ -1150,6 +1273,14 @@ munmap_back:
- 		error = file->f_op->mmap(file, vma);
- 		if (error)
- 			goto unmap_and_free_vma;
-+
-+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
-+		if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
-+			vma->vm_flags |= VM_PAGEEXEC;
-+			vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-+		}
-+#endif
-+
- 	} else if (vm_flags & VM_SHARED) {
- 		error = shmem_zero_setup(vma);
- 		if (error)
-@@ -1174,13 +1305,18 @@ munmap_back:
- 	vm_flags = vma->vm_flags;
- 
- 	if (vma_wants_writenotify(vma))
--		vma->vm_page_prot =
--			protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
-+		vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
- 
- 	if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
- 			vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
- 		file = vma->vm_file;
- 		vma_link(mm, vma, prev, rb_link, rb_parent);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (vma_m)
-+			pax_mirror_vma(vma_m, vma);
-+#endif
-+
- 		if (correct_wcount)
- 			atomic_inc(&inode->i_writecount);
- 	} else {
-@@ -1191,10 +1327,12 @@ munmap_back:
- 		}
- 		mpol_free(vma_policy(vma));
- 		kmem_cache_free(vm_area_cachep, vma);
-+		vma = NULL;
- 	}
- out:	
- 	mm->total_vm += len >> PAGE_SHIFT;
- 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
-+	track_exec_limit(mm, addr, addr + len, vm_flags);
- 	if (vm_flags & VM_LOCKED) {
- 		mm->locked_vm += len >> PAGE_SHIFT;
- 		make_pages_present(addr, addr + len);
-@@ -1213,6 +1351,12 @@ unmap_and_free_vma:
- 	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
- 	charged = 0;
- free_vma:
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (vma_m)
-+		kmem_cache_free(vm_area_cachep, vma_m);
-+#endif
-+
- 	kmem_cache_free(vm_area_cachep, vma);
- unacct_error:
- 	if (charged)
-@@ -1246,6 +1390,10 @@ arch_get_unmapped_area(struct file *filp
- 	if (flags & MAP_FIXED)
- 		return addr;
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- 	if (addr) {
- 		addr = PAGE_ALIGN(addr);
- 		vma = find_vma(mm, addr);
-@@ -1254,10 +1402,10 @@ arch_get_unmapped_area(struct file *filp
- 			return addr;
- 	}
- 	if (len > mm->cached_hole_size) {
--	        start_addr = addr = mm->free_area_cache;
-+		start_addr = addr = mm->free_area_cache;
- 	} else {
--	        start_addr = addr = TASK_UNMAPPED_BASE;
--	        mm->cached_hole_size = 0;
-+		start_addr = addr = mm->mmap_base;
-+		mm->cached_hole_size = 0;
- 	}
- 
- full_search:
-@@ -1268,9 +1416,8 @@ full_search:
- 			 * Start a new search - just in case we missed
- 			 * some holes.
- 			 */
--			if (start_addr != TASK_UNMAPPED_BASE) {
--				addr = TASK_UNMAPPED_BASE;
--			        start_addr = addr;
-+			if (start_addr != mm->mmap_base) {
-+				start_addr = addr = mm->mmap_base;
- 				mm->cached_hole_size = 0;
- 				goto full_search;
- 			}
-@@ -1292,10 +1439,16 @@ full_search:
- 
- void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
-+		return;
-+#endif
-+
- 	/*
- 	 * Is this a new hole at the lowest possible address?
- 	 */
--	if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
-+	if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
- 		mm->free_area_cache = addr;
- 		mm->cached_hole_size = ~0UL;
- 	}
-@@ -1313,7 +1466,7 @@ arch_get_unmapped_area_topdown(struct fi
- {
- 	struct vm_area_struct *vma;
- 	struct mm_struct *mm = current->mm;
--	unsigned long addr = addr0;
-+	unsigned long base = mm->mmap_base, addr = addr0;
- 
- 	/* requested length too big for entire address space */
- 	if (len > TASK_SIZE)
-@@ -1322,6 +1475,10 @@ arch_get_unmapped_area_topdown(struct fi
- 	if (flags & MAP_FIXED)
- 		return addr;
- 
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (!(mm->pax_flags & MF_PAX_RANDMMAP))
-+#endif
-+
- 	/* requesting a specific address */
- 	if (addr) {
- 		addr = PAGE_ALIGN(addr);
-@@ -1379,13 +1536,21 @@ bottomup:
- 	 * can happen with large stack limits and large mmap()
- 	 * allocations.
- 	 */
-+	mm->mmap_base = TASK_UNMAPPED_BASE;
-+
-+#ifdef CONFIG_PAX_RANDMMAP
-+	if (mm->pax_flags & MF_PAX_RANDMMAP)
-+		mm->mmap_base += mm->delta_mmap;
-+#endif
-+
-+	mm->free_area_cache = mm->mmap_base;
- 	mm->cached_hole_size = ~0UL;
--  	mm->free_area_cache = TASK_UNMAPPED_BASE;
- 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
- 	/*
- 	 * Restore the topdown base:
- 	 */
--	mm->free_area_cache = mm->mmap_base;
-+	mm->mmap_base = base;
-+	mm->free_area_cache = base;
- 	mm->cached_hole_size = ~0UL;
- 
- 	return addr;
-@@ -1394,6 +1559,12 @@ bottomup:
- 
- void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
- {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
-+		return;
-+#endif
-+
- 	/*
- 	 * Is this a new hole at the highest possible address?
- 	 */
-@@ -1401,8 +1572,10 @@ void arch_unmap_area_topdown(struct mm_s
- 		mm->free_area_cache = addr;
- 
- 	/* dont allow allocations above current base */
--	if (mm->free_area_cache > mm->mmap_base)
-+	if (mm->free_area_cache > mm->mmap_base) {
- 		mm->free_area_cache = mm->mmap_base;
-+		mm->cached_hole_size = ~0UL;
-+	}
- }
- 
- unsigned long
-@@ -1502,6 +1675,32 @@ out:
- 	return prev ? prev->vm_next : vma;
- }
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
-+{
-+	struct vm_area_struct *vma_m;
-+
-+	BUG_ON(!vma || vma->vm_start >= vma->vm_end);
-+	if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
-+		BUG_ON(vma->vm_mirror);
-+		return NULL;
-+	}
-+	BUG_ON(vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < vma->vm_start - SEGMEXEC_TASK_SIZE - 1);
-+	vma_m = vma->vm_mirror;
-+	BUG_ON(!vma_m || vma_m->vm_mirror != vma);
-+	BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
-+	BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+	BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_MAYNOTWRITE));
-+#else
-+	BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
-+#endif
-+
-+	return vma_m;
-+}
-+#endif
-+
- /*
-  * Verify that the stack growth is acceptable and
-  * update accounting. This is shared with both the
-@@ -1518,6 +1717,7 @@ static int acct_stack_growth(struct vm_a
- 		return -ENOMEM;
- 
- 	/* Stack limit test */
-+	gr_learn_resource(current, RLIMIT_STACK, size, 1);
- 	if (size > rlim[RLIMIT_STACK].rlim_cur)
- 		return -ENOMEM;
- 
-@@ -1527,6 +1727,7 @@ static int acct_stack_growth(struct vm_a
- 		unsigned long limit;
- 		locked = mm->locked_vm + grow;
- 		limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
-+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
- 		if (locked > limit && !capable(CAP_IPC_LOCK))
- 			return -ENOMEM;
- 	}
-@@ -1562,35 +1763,40 @@ static inline
- #endif
- int expand_upwards(struct vm_area_struct *vma, unsigned long address)
- {
--	int error;
-+	int error, locknext;
- 
- 	if (!(vma->vm_flags & VM_GROWSUP))
- 		return -EFAULT;
- 
-+	/* Also guard against wrapping around to address 0. */
-+	if (address < PAGE_ALIGN(address+1))
-+		address = PAGE_ALIGN(address+1);
-+	else
-+		return -ENOMEM;
-+
- 	/*
- 	 * We must make sure the anon_vma is allocated
- 	 * so that the anon_vma locking is not a noop.
- 	 */
- 	if (unlikely(anon_vma_prepare(vma)))
- 		return -ENOMEM;
-+	locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
-+	if (locknext && unlikely(anon_vma_prepare(vma->vm_next)))
-+		return -ENOMEM;
- 	anon_vma_lock(vma);
-+	if (locknext)
-+		anon_vma_lock(vma->vm_next);
- 
- 	/*
- 	 * vma->vm_start/vm_end cannot change under us because the caller
- 	 * is required to hold the mmap_sem in read mode.  We need the
--	 * anon_vma lock to serialize against concurrent expand_stacks.
--	 * Also guard against wrapping around to address 0.
-+	 * anon_vma locks to serialize against concurrent expand_stacks
-+	 * and expand_upwards.
- 	 */
--	if (address < PAGE_ALIGN(address+4))
--		address = PAGE_ALIGN(address+4);
--	else {
--		anon_vma_unlock(vma);
--		return -ENOMEM;
--	}
- 	error = 0;
- 
- 	/* Somebody else might have raced and expanded it already */
--	if (address > vma->vm_end) {
-+	if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
- 		unsigned long size, grow;
- 
- 		size = address - vma->vm_start;
-@@ -1600,6 +1806,8 @@ int expand_upwards(struct vm_area_struct
- 		if (!error)
- 			vma->vm_end = address;
- 	}
-+	if (locknext)
-+		anon_vma_unlock(vma->vm_next);
- 	anon_vma_unlock(vma);
- 	return error;
- }
-@@ -1611,7 +1819,8 @@ int expand_upwards(struct vm_area_struct
- static inline int expand_downwards(struct vm_area_struct *vma,
- 				   unsigned long address)
- {
--	int error;
-+	int error, lockprev = 0;
-+	struct vm_area_struct *prev = NULL;
- 
- 	/*
- 	 * We must make sure the anon_vma is allocated
-@@ -1619,6 +1828,16 @@ static inline int expand_downwards(struc
- 	 */
- 	if (unlikely(anon_vma_prepare(vma)))
- 		return -ENOMEM;
-+
-+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
-+	find_vma_prev(address, &prev);
-+	lockprev = prev && (prev->vm_flags & VM_GROWSUP);
-+#endif
-+	if (lockprev && unlikely(anon_vma_prepare(prev)))
-+		return -ENOMEM;
-+	if (lockprev)
-+		anon_vma_lock(prev);
-+
- 	anon_vma_lock(vma);
- 
- 	/*
-@@ -1630,9 +1849,15 @@ static inline int expand_downwards(struc
- 	error = 0;
- 
- 	/* Somebody else might have raced and expanded it already */
--	if (address < vma->vm_start) {
-+	if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
- 		unsigned long size, grow;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		struct vm_area_struct *vma_m;
-+
-+		vma_m = pax_find_mirror_vma(vma);
-+#endif
-+
- 		size = vma->vm_end - address;
- 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
- 
-@@ -1640,9 +1865,20 @@ static inline int expand_downwards(struc
- 		if (!error) {
- 			vma->vm_start = address;
- 			vma->vm_pgoff -= grow;
-+			track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+			if (vma_m) {
-+				vma_m->vm_start -= grow << PAGE_SHIFT;
-+				vma_m->vm_pgoff -= grow;
-+			}
-+#endif
-+
- 		}
- 	}
- 	anon_vma_unlock(vma);
-+	if (lockprev)
-+		anon_vma_unlock(prev);
- 	return error;
- }
- 
-@@ -1714,6 +1950,13 @@ static void remove_vma_list(struct mm_st
- 	do {
- 		long nrpages = vma_pages(vma);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
-+			vma = remove_vma(vma);
-+			continue;
-+		}
-+#endif
-+
- 		mm->total_vm -= nrpages;
- 		if (vma->vm_flags & VM_LOCKED)
- 			mm->locked_vm -= nrpages;
-@@ -1760,6 +2003,16 @@ detach_vmas_to_be_unmapped(struct mm_str
- 
- 	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
- 	do {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		if (vma->vm_mirror) {
-+			BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
-+			vma->vm_mirror->vm_mirror = NULL;
-+			vma->vm_mirror->vm_flags &= ~VM_EXEC;
-+			vma->vm_mirror = NULL;
-+		}
-+#endif
-+
- 		rb_erase(&vma->vm_rb, &mm->mm_rb);
- 		mm->map_count--;
- 		tail_vma = vma;
-@@ -1779,6 +2032,112 @@ detach_vmas_to_be_unmapped(struct mm_str
-  * Split a vma into two pieces at address 'addr', a new vma is allocated
-  * either for the first part or the tail.
-  */
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
-+	      unsigned long addr, int new_below)
-+{
-+	struct mempolicy *pol, *pol_m;
-+	struct vm_area_struct *new, *vma_m, *new_m = NULL;
-+	unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
-+
-+	if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK))
-+		return -EINVAL;
-+
-+	vma_m = pax_find_mirror_vma(vma);
-+	if (vma_m) {
-+		BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
-+		if (mm->map_count >= sysctl_max_map_count-1)
-+			return -ENOMEM;
-+	} else if (mm->map_count >= sysctl_max_map_count)
-+		return -ENOMEM;
-+
-+	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+	if (!new)
-+		return -ENOMEM;
-+
-+	if (vma_m) {
-+		new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
-+		if (!new_m) {
-+			kmem_cache_free(vm_area_cachep, new);
-+			return -ENOMEM;
-+		}
-+	}
-+
-+	/* most fields are the same, copy all, and then fixup */
-+	*new = *vma;
-+
-+	if (new_below)
-+		new->vm_end = addr;
-+	else {
-+		new->vm_start = addr;
-+		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
-+	}
-+
-+	if (vma_m) {
-+		*new_m = *vma_m;
-+		new_m->vm_mirror = new;
-+		new->vm_mirror = new_m;
-+
-+		if (new_below)
-+			new_m->vm_end = addr_m;
-+		else {
-+			new_m->vm_start = addr_m;
-+			new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
-+		}
-+	}
-+
-+	pol = mpol_copy(vma_policy(vma));
-+	if (IS_ERR(pol)) {
-+		if (new_m)
-+			kmem_cache_free(vm_area_cachep, new_m);
-+		kmem_cache_free(vm_area_cachep, new);
-+		return PTR_ERR(pol);
-+	}
-+
-+	if (vma_m) {
-+		pol_m = mpol_copy(vma_policy(vma_m));
-+		if (IS_ERR(pol_m)) {
-+			mpol_free(pol);
-+			kmem_cache_free(vm_area_cachep, new_m);
-+			kmem_cache_free(vm_area_cachep, new);
-+			return PTR_ERR(pol);
-+		}
-+	}
-+
-+	vma_set_policy(new, pol);
-+
-+	if (new->vm_file)
-+		get_file(new->vm_file);
-+
-+	if (new->vm_ops && new->vm_ops->open)
-+		new->vm_ops->open(new);
-+
-+	if (new_below)
-+		vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
-+			((addr - new->vm_start) >> PAGE_SHIFT), new);
-+	else
-+		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
-+
-+	if (vma_m) {
-+		vma_set_policy(new_m, pol_m);
-+
-+		if (new_m->vm_file)
-+			get_file(new_m->vm_file);
-+
-+		if (new_m->vm_ops && new_m->vm_ops->open)
-+			new_m->vm_ops->open(new_m);
-+
-+		if (new_below)
-+			vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
-+				((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
-+		else
-+			vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
-+	}
-+
-+	return 0;
-+}
-+#else
- int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- 	      unsigned long addr, int new_below)
- {
-@@ -1826,17 +2185,37 @@ int split_vma(struct mm_struct * mm, str
- 
- 	return 0;
- }
-+#endif
- 
- /* Munmap is split into 2 main parts -- this part which finds
-  * what needs doing, and the areas themselves, which do the
-  * work.  This now handles partial unmappings.
-  * Jeremy Fitzhardinge <jeremy@goop.org>
-  */
-+#ifdef CONFIG_PAX_SEGMEXEC
- int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
- {
-+	int ret = __do_munmap(mm, start, len);
-+	if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
-+		return ret;
-+
-+	return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
-+}
-+
-+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
-+#else
-+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
-+#endif
-+{
- 	unsigned long end;
- 	struct vm_area_struct *vma, *prev, *last;
- 
-+	/*
-+	 * mm->mmap_sem is required to protect against another thread
-+	 * changing the mappings in case we sleep.
-+	 */
-+	verify_mm_writelocked(mm);
-+
- 	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
- 		return -EINVAL;
- 
-@@ -1886,6 +2265,8 @@ int do_munmap(struct mm_struct *mm, unsi
- 	/* Fix up all other VM information */
- 	remove_vma_list(mm, vma);
- 
-+	track_exec_limit(mm, start, end, 0UL);
-+
- 	return 0;
- }
- 
-@@ -1898,22 +2279,18 @@ asmlinkage long sys_munmap(unsigned long
- 
- 	profile_munmap(addr);
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
-+	    (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
-+		return -EINVAL;
-+#endif
-+
- 	down_write(&mm->mmap_sem);
- 	ret = do_munmap(mm, addr, len);
- 	up_write(&mm->mmap_sem);
- 	return ret;
- }
- 
--static inline void verify_mm_writelocked(struct mm_struct *mm)
--{
--#ifdef CONFIG_DEBUG_VM
--	if (unlikely(down_read_trylock(&mm->mmap_sem))) {
--		WARN_ON(1);
--		up_read(&mm->mmap_sem);
--	}
--#endif
--}
--
- /*
-  *  this is really a simplified "do_mmap".  it only handles
-  *  anonymous maps.  eventually we may be able to do some
-@@ -1927,6 +2304,11 @@ unsigned long do_brk(unsigned long addr,
- 	struct rb_node ** rb_link, * rb_parent;
- 	pgoff_t pgoff = addr >> PAGE_SHIFT;
- 	int error;
-+	unsigned long charged;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m = NULL;
-+#endif
- 
- 	len = PAGE_ALIGN(len);
- 	if (!len)
-@@ -1940,19 +2322,34 @@ unsigned long do_brk(unsigned long addr,
- 
- 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- 
-+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
-+	if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
-+		flags &= ~VM_EXEC;
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+		if (mm->pax_flags & MF_PAX_MPROTECT)
-+			flags &= ~VM_MAYEXEC;
-+#endif
-+
-+	}
-+#endif
-+
- 	error = arch_mmap_check(addr, len, flags);
- 	if (error)
- 		return error;
- 
-+	charged = len >> PAGE_SHIFT;
-+
- 	/*
- 	 * mlock MCL_FUTURE?
- 	 */
- 	if (mm->def_flags & VM_LOCKED) {
- 		unsigned long locked, lock_limit;
--		locked = len >> PAGE_SHIFT;
-+		locked = charged;
- 		locked += mm->locked_vm;
- 		lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
- 		lock_limit >>= PAGE_SHIFT;
-+		gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
- 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
- 			return -EAGAIN;
- 	}
-@@ -1966,22 +2363,22 @@ unsigned long do_brk(unsigned long addr,
- 	/*
- 	 * Clear old maps.  this also does some error checking for us
- 	 */
-- munmap_back:
- 	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
- 	if (vma && vma->vm_start < addr + len) {
- 		if (do_munmap(mm, addr, len))
- 			return -ENOMEM;
--		goto munmap_back;
-+		vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
-+		BUG_ON(vma && vma->vm_start < addr + len);
- 	}
- 
- 	/* Check against address space limits *after* clearing old maps... */
--	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
-+	if (!may_expand_vm(mm, charged))
- 		return -ENOMEM;
- 
- 	if (mm->map_count > sysctl_max_map_count)
- 		return -ENOMEM;
- 
--	if (security_vm_enough_memory(len >> PAGE_SHIFT))
-+	if (security_vm_enough_memory(charged))
- 		return -ENOMEM;
- 
- 	/* Can we just expand an old private anonymous mapping? */
-@@ -1994,24 +2391,41 @@ unsigned long do_brk(unsigned long addr,
- 	 */
- 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
- 	if (!vma) {
--		vm_unacct_memory(len >> PAGE_SHIFT);
-+		vm_unacct_memory(charged);
- 		return -ENOMEM;
- 	}
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (flags & VM_EXEC)) {
-+		vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+		if (!vma_m) {
-+			kmem_cache_free(vm_area_cachep, vma);
-+			vm_unacct_memory(charged);
-+			return -ENOMEM;
-+		}
-+	}
-+#endif
-+
- 	vma->vm_mm = mm;
- 	vma->vm_start = addr;
- 	vma->vm_end = addr + len;
- 	vma->vm_pgoff = pgoff;
- 	vma->vm_flags = flags;
--	vma->vm_page_prot = protection_map[flags &
--				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-+	vma->vm_page_prot = vm_get_page_prot(flags);
- 	vma_link(mm, vma, prev, rb_link, rb_parent);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (vma_m)
-+		pax_mirror_vma(vma_m, vma);
-+#endif
-+
- out:
--	mm->total_vm += len >> PAGE_SHIFT;
-+	mm->total_vm += charged;
- 	if (flags & VM_LOCKED) {
--		mm->locked_vm += len >> PAGE_SHIFT;
-+		mm->locked_vm += charged;
- 		make_pages_present(addr, addr + len);
- 	}
-+	track_exec_limit(mm, addr, addr + len, flags);
- 	return addr;
- }
- 
-@@ -2042,8 +2456,10 @@ void exit_mmap(struct mm_struct *mm)
- 	 * Walk the list again, actually closing and freeing it,
- 	 * with preemption enabled, without holding any MM locks.
- 	 */
--	while (vma)
-+	while (vma) {
-+		vma->vm_mirror = NULL;
- 		vma = remove_vma(vma);
-+	}
- 
- 	BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
- }
-@@ -2057,6 +2473,10 @@ int insert_vm_struct(struct mm_struct * 
- 	struct vm_area_struct * __vma, * prev;
- 	struct rb_node ** rb_link, * rb_parent;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m = NULL;
-+#endif
-+
- 	/*
- 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
- 	 * until its first write fault, when page's anon_vma and index
-@@ -2079,7 +2499,22 @@ int insert_vm_struct(struct mm_struct * 
- 	if ((vma->vm_flags & VM_ACCOUNT) &&
- 	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
- 		return -ENOMEM;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
-+		vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+		if (!vma_m)
-+			return -ENOMEM;
-+	}
-+#endif
-+
- 	vma_link(mm, vma, prev, rb_link, rb_parent);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (vma_m)
-+		pax_mirror_vma(vma_m, vma);
-+#endif
-+
- 	return 0;
- }
- 
-@@ -2137,6 +2572,30 @@ struct vm_area_struct *copy_vma(struct v
- 	return new_vma;
- }
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
-+{
-+	struct vm_area_struct *prev_m;
-+	struct rb_node **rb_link_m, *rb_parent_m;
-+
-+	BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
-+	BUG_ON(vma->vm_mirror || vma_m->vm_mirror || vma_policy(vma));
-+	*vma_m = *vma;
-+	vma_m->vm_start += SEGMEXEC_TASK_SIZE;
-+	vma_m->vm_end += SEGMEXEC_TASK_SIZE;
-+	vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
-+	vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
-+	if (vma_m->vm_file)
-+		get_file(vma_m->vm_file);
-+	if (vma_m->vm_ops && vma_m->vm_ops->open)
-+		vma_m->vm_ops->open(vma_m);
-+	find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
-+	vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
-+	vma_m->vm_mirror = vma;
-+	vma->vm_mirror = vma_m;
-+}
-+#endif
-+
- /*
-  * Return true if the calling process may expand its vm space by the passed
-  * number of pages
-@@ -2147,7 +2606,7 @@ int may_expand_vm(struct mm_struct *mm, 
- 	unsigned long lim;
- 
- 	lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
--
-+	gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
- 	if (cur + npages > lim)
- 		return 0;
- 	return 1;
-@@ -2159,7 +2618,7 @@ static struct page *special_mapping_nopa
- {
- 	struct page **pages;
- 
--	BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-+	BUG_ON(address < vma->vm_start || address >= vma->vm_end || (address & ~PAGE_MASK));
- 
- 	address -= vma->vm_start;
- 	for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
-@@ -2209,8 +2668,17 @@ int install_special_mapping(struct mm_st
- 	vma->vm_start = addr;
- 	vma->vm_end = addr + len;
- 
-+#ifdef CONFIG_PAX_MPROTECT
-+	if (mm->pax_flags & MF_PAX_MPROTECT) {
-+		if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
-+			vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
-+		else
-+			vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
-+	}
-+#endif
-+
- 	vma->vm_flags = vm_flags | mm->def_flags;
--	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
-+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- 
- 	vma->vm_ops = &special_mapping_vmops;
- 	vma->vm_private_data = pages;
---- a/mm/mprotect.c
-+++ b/mm/mprotect.c
-@@ -21,10 +21,17 @@
- #include <linux/syscalls.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
-+#include <linux/grsecurity.h>
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+#include <linux/elf.h>
-+#endif
-+
- #include <asm/uaccess.h>
- #include <asm/pgtable.h>
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
-+#include <asm/mmu_context.h>
- 
- static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
- 		unsigned long addr, unsigned long end, pgprot_t newprot,
-@@ -128,6 +135,48 @@ static void change_protection(struct vm_
- 	flush_tlb_range(vma, start, end);
- }
- 
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+/* called while holding the mmap semaphor for writing except stack expansion */
-+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
-+{
-+	unsigned long oldlimit, newlimit = 0UL;
-+
-+	if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
-+		return;
-+
-+	spin_lock(&mm->page_table_lock);
-+	oldlimit = mm->context.user_cs_limit;
-+	if ((prot & VM_EXEC) && oldlimit < end)
-+		/* USER_CS limit moved up */
-+		newlimit = end;
-+	else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
-+		/* USER_CS limit moved down */
-+		newlimit = start;
-+
-+	if (newlimit) {
-+		mm->context.user_cs_limit = newlimit;
-+
-+#ifdef CONFIG_SMP
-+		wmb();
-+		cpus_clear(mm->context.cpu_user_cs_mask);
-+		cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
-+#endif
-+
-+		set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
-+	}
-+	spin_unlock(&mm->page_table_lock);
-+	if (newlimit == end) {
-+		struct vm_area_struct *vma = find_vma(mm, oldlimit);
-+
-+		for (; vma && vma->vm_start < end; vma = vma->vm_next)
-+			if (is_vm_hugetlb_page(vma))
-+				hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
-+			else
-+				change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
-+	}
-+}
-+#endif
-+
- int
- mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
- 	unsigned long start, unsigned long end, unsigned long newflags)
-@@ -140,11 +189,39 @@ mprotect_fixup(struct vm_area_struct *vm
- 	int error;
- 	int dirty_accountable = 0;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	struct vm_area_struct *vma_m = NULL;
-+	unsigned long start_m, end_m;
-+
-+	start_m = start + SEGMEXEC_TASK_SIZE;
-+	end_m = end + SEGMEXEC_TASK_SIZE;
-+#endif
-+
- 	if (newflags == oldflags) {
- 		*pprev = vma;
- 		return 0;
- 	}
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (pax_find_mirror_vma(vma) && !(newflags & VM_EXEC)) {
-+		if (start != vma->vm_start) {
-+			error = split_vma(mm, vma, start, 1);
-+			if (error)
-+				return -ENOMEM;
-+		}
-+
-+		if (end != vma->vm_end) {
-+			error = split_vma(mm, vma, end, 0);
-+			if (error)
-+				return -ENOMEM;
-+		}
-+
-+		error = __do_munmap(mm, start_m, end_m - start_m);
-+		if (error)
-+			return -ENOMEM;
-+	}
-+#endif
-+
- 	/*
- 	 * If we make a private mapping writable we increase our commit;
- 	 * but (without finer accounting) cannot reduce our commit if we
-@@ -187,17 +264,25 @@ mprotect_fixup(struct vm_area_struct *vm
- 			goto fail;
- 	}
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(oldflags & VM_EXEC) && (newflags & VM_EXEC)) {
-+		vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
-+		if (!vma_m) {
-+			error = -ENOMEM;
-+			goto fail;
-+		}
-+	}
-+#endif
-+
- success:
- 	/*
- 	 * vm_flags and vm_page_prot are protected by the mmap_sem
- 	 * held in write mode.
- 	 */
- 	vma->vm_flags = newflags;
--	vma->vm_page_prot = protection_map[newflags &
--		(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-+	vma->vm_page_prot = vm_get_page_prot(newflags);
- 	if (vma_wants_writenotify(vma)) {
--		vma->vm_page_prot = protection_map[newflags &
--			(VM_READ|VM_WRITE|VM_EXEC)];
-+		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
- 		dirty_accountable = 1;
- 	}
- 
-@@ -205,6 +290,12 @@ success:
- 		hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
- 	else
- 		change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (vma_m)
-+		pax_mirror_vma(vma_m, vma);
-+#endif
-+
- 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
- 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
- 	return 0;
-@@ -214,6 +305,70 @@ fail:
- 	return error;
- }
- 
-+#ifdef CONFIG_PAX_MPROTECT
-+/* PaX: non-PIC ELF libraries need relocations on their executable segments
-+ * therefore we'll grant them VM_MAYWRITE once during their life.
-+ *
-+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
-+ * basis because we want to allow the common case and not the special ones.
-+ */
-+static inline void pax_handle_maywrite(struct vm_area_struct *vma, unsigned long start)
-+{
-+	struct elfhdr elf_h;
-+	struct elf_phdr elf_p;
-+	elf_addr_t dyn_offset = 0UL;
-+	elf_dyn dyn;
-+	unsigned long i, j = 65536UL / sizeof(struct elf_phdr);
-+
-+#ifndef CONFIG_PAX_NOELFRELOCS
-+	if ((vma->vm_start != start) ||
-+	    !vma->vm_file ||
-+	    !(vma->vm_flags & VM_MAYEXEC) ||
-+	    (vma->vm_flags & VM_MAYNOTWRITE))
-+#endif
-+
-+		return;
-+
-+	if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
-+	    memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
-+
-+#ifdef CONFIG_PAX_ETEXECRELOCS
-+	    (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) ||
-+#else
-+	    elf_h.e_type != ET_DYN ||
-+#endif
-+
-+	    !elf_check_arch(&elf_h) ||
-+	    elf_h.e_phentsize != sizeof(struct elf_phdr) ||
-+	    elf_h.e_phnum > j)
-+		return;
-+
-+	for (i = 0UL; i < elf_h.e_phnum; i++) {
-+		if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
-+			return;
-+		if (elf_p.p_type == PT_DYNAMIC) {
-+			dyn_offset = elf_p.p_offset;
-+			j = i;
-+		}
-+	}
-+	if (elf_h.e_phnum <= j)
-+		return;
-+
-+	i = 0UL;
-+	do {
-+		if (sizeof(dyn) != kernel_read(vma->vm_file, dyn_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
-+			return;
-+		if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
-+			vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE;
-+			gr_log_textrel(vma);
-+			return;
-+		}
-+		i++;
-+	} while (dyn.d_tag != DT_NULL);
-+	return;
-+}
-+#endif
-+
- asmlinkage long
- sys_mprotect(unsigned long start, size_t len, unsigned long prot)
- {
-@@ -233,6 +388,17 @@ sys_mprotect(unsigned long start, size_t
- 	end = start + len;
- 	if (end <= start)
- 		return -ENOMEM;
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
-+		if (end > SEGMEXEC_TASK_SIZE)
-+			return -EINVAL;
-+	} else
-+#endif
-+
-+	if (end > TASK_SIZE)
-+		return -EINVAL;
-+
- 	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
- 		return -EINVAL;
- 
-@@ -240,7 +406,7 @@ sys_mprotect(unsigned long start, size_t
- 	/*
- 	 * Does the application expect PROT_READ to imply PROT_EXEC:
- 	 */
--	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-+	if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
- 		prot |= PROT_EXEC;
- 
- 	vm_flags = calc_vm_prot_bits(prot);
-@@ -272,6 +438,16 @@ sys_mprotect(unsigned long start, size_t
- 	if (start > vma->vm_start)
- 		prev = vma;
- 
-+	if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
-+		error = -EACCES;
-+		goto out;
-+	}
-+
-+#ifdef CONFIG_PAX_MPROTECT
-+	if ((vma->vm_mm->pax_flags & MF_PAX_MPROTECT) && (prot & PROT_WRITE))
-+		pax_handle_maywrite(vma, start);
-+#endif
-+
- 	for (nstart = start ; ; ) {
- 		unsigned long newflags;
- 
-@@ -285,6 +461,12 @@ sys_mprotect(unsigned long start, size_t
- 			goto out;
- 		}
- 
-+#ifdef CONFIG_PAX_MPROTECT
-+		/* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
-+		if ((vma->vm_mm->pax_flags & MF_PAX_MPROTECT) && !(prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE))
-+			newflags &= ~VM_MAYWRITE;
-+#endif
-+
- 		error = security_file_mprotect(vma, reqprot, prot);
- 		if (error)
- 			goto out;
-@@ -295,6 +477,9 @@ sys_mprotect(unsigned long start, size_t
- 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
- 		if (error)
- 			goto out;
-+
-+		track_exec_limit(current->mm, nstart, tmp, vm_flags);
-+
- 		nstart = tmp;
- 
- 		if (nstart < prev->vm_end)
---- a/mm/mremap.c
-+++ b/mm/mremap.c
-@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_str
- 			continue;
- 		pte = ptep_clear_flush(vma, old_addr, old_pte);
- 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
-+
-+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
-+		if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
-+			pte = pte_exprotect(pte);
-+#endif
-+
- 		set_pte_at(mm, new_addr, new_pte, pte);
- 	}
- 
-@@ -254,6 +260,7 @@ unsigned long do_mremap(unsigned long ad
- 	struct vm_area_struct *vma;
- 	unsigned long ret = -EINVAL;
- 	unsigned long charged = 0;
-+	unsigned long task_size = TASK_SIZE;
- 
- 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
- 		goto out;
-@@ -272,6 +279,15 @@ unsigned long do_mremap(unsigned long ad
- 	if (!new_len)
- 		goto out;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
-+		task_size = SEGMEXEC_TASK_SIZE;
-+#endif
-+
-+	if (new_len > task_size || addr > task_size-new_len ||
-+	    old_len > task_size || addr > task_size-old_len)
-+		goto out;
-+
- 	/* new_addr is only valid if MREMAP_FIXED is specified */
- 	if (flags & MREMAP_FIXED) {
- 		if (new_addr & ~PAGE_MASK)
-@@ -279,16 +295,13 @@ unsigned long do_mremap(unsigned long ad
- 		if (!(flags & MREMAP_MAYMOVE))
- 			goto out;
- 
--		if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
-+		if (new_addr > task_size - new_len)
- 			goto out;
- 
- 		/* Check if the location we're moving into overlaps the
- 		 * old location at all, and fail if it does.
- 		 */
--		if ((new_addr <= addr) && (new_addr+new_len) > addr)
--			goto out;
--
--		if ((addr <= new_addr) && (addr+old_len) > new_addr)
-+		if (addr + old_len > new_addr && new_addr + new_len > addr)
- 			goto out;
- 
- 		ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
-@@ -326,6 +339,14 @@ unsigned long do_mremap(unsigned long ad
- 		ret = -EINVAL;
- 		goto out;
- 	}
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+	if (pax_find_mirror_vma(vma)) {
-+		ret = -EINVAL;
-+		goto out;
-+	}
-+#endif
-+
- 	/* We can't remap across vm area boundaries */
- 	if (old_len > vma->vm_end - addr)
- 		goto out;
-@@ -359,7 +380,7 @@ unsigned long do_mremap(unsigned long ad
- 	if (old_len == vma->vm_end - addr &&
- 	    !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
- 	    (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
--		unsigned long max_addr = TASK_SIZE;
-+		unsigned long max_addr = task_size;
- 		if (vma->vm_next)
- 			max_addr = vma->vm_next->vm_start;
- 		/* can we just expand the current mapping? */
-@@ -377,6 +398,7 @@ unsigned long do_mremap(unsigned long ad
- 						   addr + new_len);
- 			}
- 			ret = addr;
-+			track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
- 			goto out;
- 		}
- 	}
-@@ -387,8 +409,8 @@ unsigned long do_mremap(unsigned long ad
- 	 */
- 	ret = -ENOMEM;
- 	if (flags & MREMAP_MAYMOVE) {
-+		unsigned long map_flags = 0;
- 		if (!(flags & MREMAP_FIXED)) {
--			unsigned long map_flags = 0;
- 			if (vma->vm_flags & VM_MAYSHARE)
- 				map_flags |= MAP_SHARED;
- 
-@@ -403,7 +425,12 @@ unsigned long do_mremap(unsigned long ad
- 			if (ret)
- 				goto out;
- 		}
-+		map_flags = vma->vm_flags;
- 		ret = move_vma(vma, addr, old_len, new_len, new_addr);
-+		if (!(ret & ~PAGE_MASK)) {
-+			track_exec_limit(current->mm, addr, addr + old_len, 0UL);
-+			track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
-+		}
- 	}
- out:
- 	if (ret & ~PAGE_MASK)
---- a/mm/nommu.c
-+++ b/mm/nommu.c
-@@ -376,15 +376,6 @@ struct vm_area_struct *find_vma(struct m
- }
- EXPORT_SYMBOL(find_vma);
- 
--/*
-- * find a VMA
-- * - we don't extend stack VMAs under NOMMU conditions
-- */
--struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
--{
--	return find_vma(mm, addr);
--}
--
- int expand_stack(struct vm_area_struct *vma, unsigned long address)
- {
- 	return -ENOMEM;
---- a/mm/page_alloc.c
-+++ b/mm/page_alloc.c
-@@ -402,7 +402,7 @@ static inline int page_is_buddy(struct p
- static inline void __free_one_page(struct page *page,
- 		struct zone *zone, unsigned int order)
- {
--	unsigned long page_idx;
-+	unsigned long page_idx, index;
- 	int order_size = 1 << order;
- 
- 	if (unlikely(PageCompound(page)))
-@@ -413,6 +413,11 @@ static inline void __free_one_page(struc
- 	VM_BUG_ON(page_idx & (order_size - 1));
- 	VM_BUG_ON(bad_range(zone, page));
- 
-+#ifdef CONFIG_PAX_MEMORY_SANITIZE
-+	for (index = order_size; index; --index)
-+		sanitize_highpage(page + index - 1);
-+#endif
-+
- 	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
- 	while (order < MAX_ORDER-1) {
- 		unsigned long combined_idx;
---- a/mm/rmap.c
-+++ b/mm/rmap.c
-@@ -63,6 +63,10 @@ int anon_vma_prepare(struct vm_area_stru
- 		struct mm_struct *mm = vma->vm_mm;
- 		struct anon_vma *allocated, *locked;
- 
-+#ifdef CONFIG_PAX_SEGMEXEC
-+		struct vm_area_struct *vma_m;
-+#endif
-+
- 		anon_vma = find_mergeable_anon_vma(vma);
- 		if (anon_vma) {
- 			allocated = NULL;
-@@ -79,6 +83,15 @@ int anon_vma_prepare(struct vm_area_stru
- 		/* page_table_lock to protect against threads */
- 		spin_lock(&mm->page_table_lock);
- 		if (likely(!vma->anon_vma)) {
-+
-+#ifdef CONFIG_PAX_SEGMEXEC
-+			vma_m = pax_find_mirror_vma(vma);
-+			if (vma_m) {
-+				vma_m->anon_vma = anon_vma;
-+				__anon_vma_link(vma_m);
-+			}
-+#endif
-+
- 			vma->anon_vma = anon_vma;
- 			list_add_tail(&vma->anon_vma_node, &anon_vma->head);
- 			allocated = NULL;
---- a/mm/shmem.c
-+++ b/mm/shmem.c
-@@ -2452,7 +2452,7 @@ static struct file_system_type tmpfs_fs_
- 	.get_sb		= shmem_get_sb,
- 	.kill_sb	= kill_litter_super,
- };
--static struct vfsmount *shm_mnt;
-+struct vfsmount *shm_mnt;
- 
- static int __init init_tmpfs(void)
- {
---- a/mm/slab.c
-+++ b/mm/slab.c
-@@ -306,7 +306,7 @@ struct kmem_list3 {
-  * Need this for bootstrapping a per node allocator.
-  */
- #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
--struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
-+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
- #define	CACHE_CACHE 0
- #define	SIZE_AC 1
- #define	SIZE_L3 (1 + MAX_NUMNODES)
-@@ -655,14 +655,14 @@ struct cache_names {
- static struct cache_names __initdata cache_names[] = {
- #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
- #include <linux/kmalloc_sizes.h>
--	{NULL,}
-+	{NULL, NULL}
- #undef CACHE
- };
- 
- static struct arraycache_init initarray_cache __initdata =
--    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
-+    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
- static struct arraycache_init initarray_generic =
--    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
-+    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
- 
- /* internal cache of cache description objs */
- static struct kmem_cache cache_cache = {
-@@ -2980,7 +2980,7 @@ retry:
- 		 * there must be at least one object available for
- 		 * allocation.
- 		 */
--		BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
-+		BUG_ON(slabp->inuse >= cachep->num);
- 
- 		while (slabp->inuse < cachep->num && batchcount--) {
- 			STATS_INC_ALLOCED(cachep);
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -1530,7 +1530,7 @@ debug:
-  *
-  * Otherwise we can simply pick the next object from the lockless free list.
-  */
--static void __always_inline *slab_alloc(struct kmem_cache *s,
-+static __always_inline void *slab_alloc(struct kmem_cache *s,
- 		gfp_t gfpflags, int node, void *addr)
- {
- 	struct page *page;
-@@ -1639,7 +1639,7 @@ debug:
-  * If fastpath is not possible then fall back to __slab_free where we deal
-  * with all sorts of special processing.
-  */
--static void __always_inline slab_free(struct kmem_cache *s,
-+static __always_inline void slab_free(struct kmem_cache *s,
- 			struct page *page, void *x, void *addr)
- {
- 	void **object = (void *)x;
---- a/mm/swap.c
-+++ b/mm/swap.c
-@@ -174,8 +174,8 @@ EXPORT_SYMBOL(mark_page_accessed);
-  * lru_cache_add: add a page to the page lists
-  * @page: the page to add
-  */
--static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
--static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
-+static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, 0, {NULL} };
-+static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, 0, {NULL} };
- 
- void fastcall lru_cache_add(struct page *page)
- {
---- a/mm/tiny-shmem.c
-+++ b/mm/tiny-shmem.c
-@@ -26,7 +26,7 @@ static struct file_system_type tmpfs_fs_
- 	.kill_sb	= kill_litter_super,
- };
- 
--static struct vfsmount *shm_mnt;
-+struct vfsmount *shm_mnt;
- 
- static int __init init_tmpfs(void)
- {
---- a/mm/vmalloc.c
-+++ b/mm/vmalloc.c
-@@ -201,6 +201,8 @@ static struct vm_struct *__get_vm_area_n
- 
- 	write_lock(&vmlist_lock);
- 	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
-+		if (addr > end - size)
-+			goto out;
- 		if ((unsigned long)tmp->addr < addr) {
- 			if((unsigned long)tmp->addr + tmp->size >= addr)
- 				addr = ALIGN(tmp->size + 
-@@ -212,8 +214,6 @@ static struct vm_struct *__get_vm_area_n
- 		if (size + addr <= (unsigned long)tmp->addr)
- 			goto found;
- 		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
--		if (addr > end - size)
--			goto out;
- 	}
- 
- found:
---- a/net/core/flow.c
-+++ b/net/core/flow.c
-@@ -40,7 +40,7 @@ atomic_t flow_cache_genid = ATOMIC_INIT(
- 
- static u32 flow_hash_shift;
- #define flow_hash_size	(1 << flow_hash_shift)
--static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
-+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
- 
- #define flow_table(cpu) (per_cpu(flow_tables, cpu))
- 
-@@ -53,7 +53,7 @@ struct flow_percpu_info {
- 	u32 hash_rnd;
- 	int count;
- } ____cacheline_aligned;
--static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
-+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
- 
- #define flow_hash_rnd_recalc(cpu) \
- 	(per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
-@@ -70,7 +70,7 @@ struct flow_flush_info {
- 	atomic_t cpuleft;
- 	struct completion completion;
- };
--static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
-+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
- 
- #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
- 
---- a/net/dccp/ccids/ccid3.c
-+++ b/net/dccp/ccids/ccid3.c
-@@ -44,7 +44,7 @@
- static int ccid3_debug;
- #define ccid3_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid3_debug, format, ##a)
- #else
--#define ccid3_pr_debug(format, a...)
-+#define ccid3_pr_debug(format, a...) do {} while (0)
- #endif
- 
- static struct dccp_tx_hist *ccid3_tx_hist;
---- a/net/dccp/dccp.h
-+++ b/net/dccp/dccp.h
-@@ -42,8 +42,8 @@ extern int dccp_debug;
- #define dccp_pr_debug(format, a...)	  DCCP_PR_DEBUG(dccp_debug, format, ##a)
- #define dccp_pr_debug_cat(format, a...)   DCCP_PRINTK(dccp_debug, format, ##a)
- #else
--#define dccp_pr_debug(format, a...)
--#define dccp_pr_debug_cat(format, a...)
-+#define dccp_pr_debug(format, a...) do {} while (0)
-+#define dccp_pr_debug_cat(format, a...) do {} while (0)
- #endif
- 
- extern struct inet_hashinfo dccp_hashinfo;
---- a/net/ipv4/inet_connection_sock.c
-+++ b/net/ipv4/inet_connection_sock.c
-@@ -15,6 +15,7 @@
- 
- #include <linux/module.h>
- #include <linux/jhash.h>
-+#include <linux/grsecurity.h>
- 
- #include <net/inet_connection_sock.h>
- #include <net/inet_hashtables.h>
---- a/net/ipv4/inet_hashtables.c
-+++ b/net/ipv4/inet_hashtables.c
-@@ -18,11 +18,14 @@
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/wait.h>
-+#include <linux/grsecurity.h>
- 
- #include <net/inet_connection_sock.h>
- #include <net/inet_hashtables.h>
- #include <net/ip.h>
- 
-+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
-+
- /*
-  * Allocate and initialize a new local port bind bucket.
-  * The bindhash mutex for snum's hash chain must be held here.
-@@ -338,6 +341,8 @@ ok:
- 		}
- 		spin_unlock(&head->lock);
- 
-+		gr_update_task_in_ip_table(current, inet_sk(sk));
-+
- 		if (tw) {
- 			inet_twsk_deschedule(tw, death_row);
- 			inet_twsk_put(tw);
---- /dev/null
-+++ b/net/ipv4/netfilter/ipt_stealth.c
-@@ -0,0 +1,114 @@
-+/* Kernel module to add stealth support.
-+ *
-+ * Copyright (C) 2002-2006 Brad Spengler  <spender@grsecurity.net>
-+ *
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/skbuff.h>
-+#include <linux/net.h>
-+#include <linux/sched.h>
-+#include <linux/inet.h>
-+#include <linux/stddef.h>
-+
-+#include <net/ip.h>
-+#include <net/sock.h>
-+#include <net/tcp.h>
-+#include <net/udp.h>
-+#include <net/route.h>
-+#include <net/inet_common.h>
-+
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+
-+MODULE_LICENSE("GPL");
-+
-+extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
-+
-+static int
-+match(const struct sk_buff *skb,
-+      const struct net_device *in,
-+      const struct net_device *out,
-+      const struct xt_match *match,
-+      const void *matchinfo,
-+      int offset,
-+      unsigned int protoff,
-+      int *hotdrop)
-+{
-+	struct iphdr *ip = ip_hdr(skb);
-+	struct tcphdr th;
-+	struct udphdr uh;
-+	struct sock *sk = NULL;
-+
-+	if (!ip || offset) return 0;
-+
-+	switch(ip->protocol) {
-+	case IPPROTO_TCP:
-+		if (skb_copy_bits(skb, (ip_hdr(skb))->ihl*4, &th, sizeof(th)) < 0) {
-+			*hotdrop = 1;
-+			return 0;
-+		}
-+		if (!(th.syn && !th.ack)) return 0;
-+		sk = inet_lookup_listener(&tcp_hashinfo, ip->daddr, th.dest, inet_iif(skb));	
-+		break;
-+	case IPPROTO_UDP:
-+		if (skb_copy_bits(skb, (ip_hdr(skb))->ihl*4, &uh, sizeof(uh)) < 0) {
-+			*hotdrop = 1;
-+			return 0;
-+		}
-+		sk = udp_v4_lookup(ip->saddr, uh.source, ip->daddr, uh.dest, skb->dev->ifindex);
-+		break;
-+	default:
-+		return 0;
-+	}
-+
-+	if(!sk) // port is being listened on, match this
-+		return 1;
-+	else {
-+		sock_put(sk);
-+		return 0;
-+	}
-+}
-+
-+/* Called when user tries to insert an entry of this type. */
-+static int
-+checkentry(const char *tablename,
-+           const void *nip,
-+	   const struct xt_match *match,
-+           void *matchinfo,
-+           unsigned int hook_mask)
-+{
-+	const struct ipt_ip *ip = (const struct ipt_ip *)nip;
-+
-+	if(((ip->proto == IPPROTO_TCP && !(ip->invflags & IPT_INV_PROTO)) ||
-+		((ip->proto == IPPROTO_UDP) && !(ip->invflags & IPT_INV_PROTO)))
-+		&& (hook_mask & (1 << NF_IP_LOCAL_IN)))
-+			return 1;
-+
-+	printk("stealth: Only works on TCP and UDP for the INPUT chain.\n");
-+
-+        return 0;
-+}
-+
-+
-+static struct xt_match stealth_match = {
-+	.name = "stealth",
-+	.family = AF_INET,
-+	.match = match,
-+	.checkentry = checkentry,
-+	.destroy = NULL,
-+	.me = THIS_MODULE
-+};
-+
-+static int __init init(void)
-+{
-+	return xt_register_match(&stealth_match);
-+}
-+
-+static void __exit fini(void)
-+{
-+	xt_unregister_match(&stealth_match);
-+}
-+
-+module_init(init);
-+module_exit(fini);
---- a/net/ipv4/netfilter/Kconfig
-+++ b/net/ipv4/netfilter/Kconfig
-@@ -130,6 +130,21 @@ config IP_NF_MATCH_ADDRTYPE
- 	  If you want to compile it as a module, say M here and read
- 	  <file:Documentation/modules.txt>.  If unsure, say `N'.
- 
-+config IP_NF_MATCH_STEALTH
-+	tristate "stealth match support"
-+	depends on IP_NF_IPTABLES
-+	help
-+	  Enabling this option will drop all syn packets coming to unserved tcp
-+	  ports as well as all packets coming to unserved udp ports.  If you
-+	  are using your system to route any type of packets (ie. via NAT)
-+	  you should put this module at the end of your ruleset, since it will
-+	  drop packets that aren't going to ports that are listening on your
-+	  machine itself, it doesn't take into account that the packet might be
-+	  destined for someone on your internal network if you're using NAT for
-+	  instance.
-+
-+	  To compile it as a module, choose M here.  If unsure, say N.
-+
- # `filter', generic and specific targets
- config IP_NF_FILTER
- 	tristate "Packet filtering"
-@@ -403,4 +418,3 @@ config IP_NF_ARP_MANGLE
- 	  hardware and network addresses.
- 
- endmenu
--
---- a/net/ipv4/netfilter/Makefile
-+++ b/net/ipv4/netfilter/Makefile
-@@ -49,6 +49,7 @@ obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn
- obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
- obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
- obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
-+obj-$(CONFIG_IP_NF_MATCH_STEALTH) += ipt_stealth.o
- 
- # targets
- obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
---- a/net/ipv4/tcp.c
-+++ b/net/ipv4/tcp.c
-@@ -1053,7 +1053,8 @@ int tcp_read_sock(struct sock *sk, read_
- 		return -ENOTCONN;
- 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
- 		if (offset < skb->len) {
--			size_t used, len;
-+			int used;
-+			size_t len;
- 
- 			len = skb->len - offset;
- 			/* Stop reading if we hit a patch of urgent data */
---- a/net/ipv4/tcp_ipv4.c
-+++ b/net/ipv4/tcp_ipv4.c
-@@ -61,6 +61,7 @@
- #include <linux/jhash.h>
- #include <linux/init.h>
- #include <linux/times.h>
-+#include <linux/grsecurity.h>
- 
- #include <net/icmp.h>
- #include <net/inet_hashtables.h>
---- a/net/ipv4/udp.c
-+++ b/net/ipv4/udp.c
-@@ -98,12 +98,19 @@
- #include <linux/skbuff.h>
- #include <linux/proc_fs.h>
- #include <linux/seq_file.h>
-+#include <linux/grsecurity.h>
- #include <net/icmp.h>
- #include <net/route.h>
- #include <net/checksum.h>
- #include <net/xfrm.h>
- #include "udp_impl.h"
- 
-+extern int gr_search_udp_recvmsg(const struct sock *sk,
-+				 const struct sk_buff *skb);
-+extern int gr_search_udp_sendmsg(const struct sock *sk,
-+				 const struct sockaddr_in *addr);
-+
-+
- /*
-  *	Snmp MIB for the UDP layer
-  */
-@@ -287,6 +294,13 @@ static struct sock *__udp4_lib_lookup(__
- 	return result;
- }
- 
-+struct sock *udp_v4_lookup(__be32 saddr, __be16 sport,
-+			   __be32 daddr, __be16 dport, int dif)
-+{
-+	return __udp4_lib_lookup(saddr, sport, daddr, dport, dif, udp_hash);
-+}
-+
-+
- static inline struct sock *udp_v4_mcast_next(struct sock *sk,
- 					     __be16 loc_port, __be32 loc_addr,
- 					     __be16 rmt_port, __be32 rmt_addr,
-@@ -572,9 +586,16 @@ int udp_sendmsg(struct kiocb *iocb, stru
- 		dport = usin->sin_port;
- 		if (dport == 0)
- 			return -EINVAL;
-+
-+		if (!gr_search_udp_sendmsg(sk, usin))
-+			return -EPERM;
- 	} else {
- 		if (sk->sk_state != TCP_ESTABLISHED)
- 			return -EDESTADDRREQ;
-+
-+		if (!gr_search_udp_sendmsg(sk, NULL))
-+			return -EPERM;
-+
- 		daddr = inet->daddr;
- 		dport = inet->dport;
- 		/* Open fast path for connected socket.
-@@ -834,6 +855,11 @@ try_again:
- 	if (!skb)
- 		goto out;
- 
-+	if (!gr_search_udp_recvmsg(sk, skb)) {
-+		err = -EPERM;
-+		goto out_free;
-+	}
-+
- 	ulen = skb->len - sizeof(struct udphdr);
- 	copied = len;
- 	if (copied > ulen)
---- a/net/ipv6/exthdrs.c
-+++ b/net/ipv6/exthdrs.c
-@@ -645,7 +645,7 @@ static struct tlvtype_proc tlvprochopopt
- 		.type	= IPV6_TLV_JUMBO,
- 		.func	= ipv6_hop_jumbo,
- 	},
--	{ -1, }
-+	{ -1, NULL }
- };
- 
- int ipv6_parse_hopopts(struct sk_buff **skbp)
---- a/net/ipv6/raw.c
-+++ b/net/ipv6/raw.c
-@@ -577,7 +577,7 @@ out:
- 	return err;
- }
- 
--static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
-+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
- 			struct flowi *fl, struct rt6_info *rt,
- 			unsigned int flags)
- {
---- a/net/irda/ircomm/ircomm_tty.c
-+++ b/net/irda/ircomm/ircomm_tty.c
-@@ -371,7 +371,7 @@ static int ircomm_tty_open(struct tty_st
- 	IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
- 
- 	line = tty->index;
--	if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) {
-+	if (line >= IRCOMM_TTY_PORTS) {
- 		return -ENODEV;
- 	}
- 
---- a/net/mac80211/ieee80211.c
-+++ b/net/mac80211/ieee80211.c
-@@ -1260,7 +1260,7 @@ __ieee80211_parse_tx_radiotap(
- }
- 
- 
--static ieee80211_txrx_result inline
-+static inline ieee80211_txrx_result
- __ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
- 		       struct sk_buff *skb,
- 		       struct net_device *dev,
-@@ -1332,7 +1332,7 @@ __ieee80211_tx_prepare(struct ieee80211_
- 	return res;
- }
- 
--static int inline is_ieee80211_device(struct net_device *dev,
-+static inline int is_ieee80211_device(struct net_device *dev,
- 				      struct net_device *master)
- {
- 	return (wdev_priv(dev->ieee80211_ptr) ==
-@@ -1341,7 +1341,7 @@ static int inline is_ieee80211_device(st
- 
- /* Device in tx->dev has a reference added; use dev_put(tx->dev) when
-  * finished with it. */
--static int inline ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
-+static inline int ieee80211_tx_prepare(struct ieee80211_txrx_data *tx,
- 				       struct sk_buff *skb,
- 				       struct net_device *mdev,
- 				       struct ieee80211_tx_control *control)
---- a/net/mac80211/regdomain.c
-+++ b/net/mac80211/regdomain.c
-@@ -61,14 +61,14 @@ static const struct ieee80211_channel_ra
- 	{ 5180, 5240, 17, 6 } /* IEEE 802.11a, channels 36..48 */,
- 	{ 5260, 5320, 23, 6 } /* IEEE 802.11a, channels 52..64 */,
- 	{ 5745, 5825, 30, 6 } /* IEEE 802.11a, channels 149..165, outdoor */,
--	{ 0 }
-+	{ 0, 0, 0, 0 }
- };
- 
- static const struct ieee80211_channel_range ieee80211_mkk_channels[] = {
- 	{ 2412, 2472, 20, 6 } /* IEEE 802.11b/g, channels 1..13 */,
- 	{ 5170, 5240, 20, 6 } /* IEEE 802.11a, channels 34..48 */,
- 	{ 5260, 5320, 20, 6 } /* IEEE 802.11a, channels 52..64 */,
--	{ 0 }
-+	{ 0, 0, 0, 0 }
- };
- 
- 
---- a/net/sctp/socket.c
-+++ b/net/sctp/socket.c
-@@ -1370,7 +1370,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc
- 	struct sctp_sndrcvinfo *sinfo;
- 	struct sctp_initmsg *sinit;
- 	sctp_assoc_t associd = 0;
--	sctp_cmsgs_t cmsgs = { NULL };
-+	sctp_cmsgs_t cmsgs = { NULL, NULL };
- 	int err;
- 	sctp_scope_t scope;
- 	long timeo;
---- a/net/socket.c
-+++ b/net/socket.c
-@@ -84,6 +84,7 @@
- #include <linux/kmod.h>
- #include <linux/audit.h>
- #include <linux/wireless.h>
-+#include <linux/in.h>
- 
- #include <asm/uaccess.h>
- #include <asm/unistd.h>
-@@ -93,6 +94,21 @@
- #include <net/sock.h>
- #include <linux/netfilter.h>
- 
-+extern void gr_attach_curr_ip(const struct sock *sk);
-+extern int gr_handle_sock_all(const int family, const int type,
-+			      const int protocol);
-+extern int gr_handle_sock_server(const struct sockaddr *sck);
-+extern int gr_handle_sock_server_other(const struct socket *sck);
-+extern int gr_handle_sock_client(const struct sockaddr *sck);
-+extern int gr_search_connect(const struct socket * sock,
-+			     const struct sockaddr_in * addr);
-+extern int gr_search_bind(const struct socket * sock,
-+			   const struct sockaddr_in * addr);
-+extern int gr_search_listen(const struct socket * sock);
-+extern int gr_search_accept(const struct socket * sock);
-+extern int gr_search_socket(const int domain, const int type,
-+			    const int protocol);
-+
- static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
- static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
- 			 unsigned long nr_segs, loff_t pos);
-@@ -292,7 +308,7 @@ static int sockfs_get_sb(struct file_sys
- 			     mnt);
- }
- 
--static struct vfsmount *sock_mnt __read_mostly;
-+struct vfsmount *sock_mnt __read_mostly;
- 
- static struct file_system_type sock_fs_type = {
- 	.name =		"sockfs",
-@@ -1199,6 +1215,16 @@ asmlinkage long sys_socket(int family, i
- 	int retval;
- 	struct socket *sock;
- 
-+	if(!gr_search_socket(family, type, protocol)) {
-+		retval = -EACCES;
-+		goto out;
-+	}
-+
-+	if (gr_handle_sock_all(family, type, protocol)) {
-+		retval = -EACCES;
-+		goto out;
-+	}
-+
- 	retval = sock_create(family, type, protocol, &sock);
- 	if (retval < 0)
- 		goto out;
-@@ -1329,6 +1355,12 @@ asmlinkage long sys_bind(int fd, struct 
- 	if (sock) {
- 		err = move_addr_to_kernel(umyaddr, addrlen, address);
- 		if (err >= 0) {
-+			if (!gr_search_bind(sock, (struct sockaddr_in *)address) ||
-+			    gr_handle_sock_server((struct sockaddr *)address)) {
-+				err = -EACCES;
-+				goto error;
-+			}
-+
- 			err = security_socket_bind(sock,
- 						   (struct sockaddr *)address,
- 						   addrlen);
-@@ -1337,6 +1369,7 @@ asmlinkage long sys_bind(int fd, struct 
- 						      (struct sockaddr *)
- 						      address, addrlen);
- 		}
-+error:
- 		fput_light(sock->file, fput_needed);
- 	}
- 	return err;
-@@ -1360,10 +1393,17 @@ asmlinkage long sys_listen(int fd, int b
- 		if ((unsigned)backlog > sysctl_somaxconn)
- 			backlog = sysctl_somaxconn;
- 
-+		if (gr_handle_sock_server_other(sock) ||
-+		    !gr_search_listen(sock)) {
-+			err = -EPERM;
-+			goto error;
-+		}
-+
- 		err = security_socket_listen(sock, backlog);
- 		if (!err)
- 			err = sock->ops->listen(sock, backlog);
- 
-+error:
- 		fput_light(sock->file, fput_needed);
- 	}
- 	return err;
-@@ -1400,6 +1440,13 @@ asmlinkage long sys_accept(int fd, struc
- 	newsock->type = sock->type;
- 	newsock->ops = sock->ops;
- 
-+	if (gr_handle_sock_server_other(sock) ||
-+	    !gr_search_accept(sock)) {
-+		err = -EPERM;
-+		sock_release(newsock);
-+		goto out_put;
-+	}
-+
- 	/*
- 	 * We don't need try_module_get here, as the listening socket (sock)
- 	 * has the protocol module (sock->ops->owner) held.
-@@ -1443,6 +1490,7 @@ asmlinkage long sys_accept(int fd, struc
- 	err = newfd;
- 
- 	security_socket_post_accept(sock, newsock);
-+	gr_attach_curr_ip(newsock->sk);
- 
- out_put:
- 	fput_light(sock->file, fput_needed);
-@@ -1476,6 +1524,7 @@ asmlinkage long sys_connect(int fd, stru
- {
- 	struct socket *sock;
- 	char address[MAX_SOCK_ADDR];
-+	struct sockaddr *sck;
- 	int err, fput_needed;
- 
- 	sock = sockfd_lookup_light(fd, &err, &fput_needed);
-@@ -1485,6 +1534,13 @@ asmlinkage long sys_connect(int fd, stru
- 	if (err < 0)
- 		goto out_put;
- 
-+	sck = (struct sockaddr *)address;
-+	if (!gr_search_connect(sock, (struct sockaddr_in *)sck) ||
-+	    gr_handle_sock_client(sck)) {
-+		err = -EACCES;
-+		goto out_put;
-+	}
-+
- 	err =
- 	    security_socket_connect(sock, (struct sockaddr *)address, addrlen);
- 	if (err)
-@@ -1762,6 +1818,7 @@ asmlinkage long sys_shutdown(int fd, int
- 			err = sock->ops->shutdown(sock, how);
- 		fput_light(sock->file, fput_needed);
- 	}
-+
- 	return err;
- }
- 
---- a/net/unix/af_unix.c
-+++ b/net/unix/af_unix.c
-@@ -115,6 +115,7 @@
- #include <linux/mount.h>
- #include <net/checksum.h>
- #include <linux/security.h>
-+#include <linux/grsecurity.h>
- 
- int sysctl_unix_max_dgram_qlen __read_mostly = 10;
- 
-@@ -733,6 +734,11 @@ static struct sock *unix_find_other(stru
- 		if (err)
- 			goto put_fail;
- 
-+		if (!gr_acl_handle_unix(nd.dentry, nd.mnt)) {
-+			err = -EACCES;
-+			goto put_fail;
-+		}
-+
- 		err = -ECONNREFUSED;
- 		if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
- 			goto put_fail;
-@@ -756,6 +762,13 @@ static struct sock *unix_find_other(stru
- 		if (u) {
- 			struct dentry *dentry;
- 			dentry = unix_sk(u)->dentry;
-+
-+			if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
-+				err = -EPERM;
-+				sock_put(u);
-+				goto fail;
-+			}
-+
- 			if (dentry)
- 				touch_atime(unix_sk(u)->mnt, dentry);
- 		} else
-@@ -834,9 +847,18 @@ static int unix_bind(struct socket *sock
- 		 */
- 		mode = S_IFSOCK |
- 		       (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
-+
-+		if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
-+			err = -EACCES;
-+			goto out_mknod_dput;
-+		}
-+
- 		err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
- 		if (err)
- 			goto out_mknod_dput;
-+
-+		gr_handle_create(dentry, nd.mnt);
-+
- 		mutex_unlock(&nd.dentry->d_inode->i_mutex);
- 		dput(nd.dentry);
- 		nd.dentry = dentry;
-@@ -854,6 +876,10 @@ static int unix_bind(struct socket *sock
- 			goto out_unlock;
- 		}
- 
-+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
-+		sk->sk_peercred.pid = current->pid;
-+#endif
-+
- 		list = &unix_socket_table[addr->hash];
- 	} else {
- 		list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
---- a/scripts/pnmtologo.c
-+++ b/scripts/pnmtologo.c
-@@ -237,14 +237,14 @@ static void write_header(void)
-     fprintf(out, " *  Linux logo %s\n", logoname);
-     fputs(" */\n\n", out);
-     fputs("#include <linux/linux_logo.h>\n\n", out);
--    fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
-+    fprintf(out, "static unsigned char %s_data[] = {\n",
- 	    logoname);
- }
- 
- static void write_footer(void)
- {
-     fputs("\n};\n\n", out);
--    fprintf(out, "struct linux_logo %s __initdata = {\n", logoname);
-+    fprintf(out, "struct linux_logo %s = {\n", logoname);
-     fprintf(out, "    .type\t= %s,\n", logo_types[logo_type]);
-     fprintf(out, "    .width\t= %d,\n", logo_width);
-     fprintf(out, "    .height\t= %d,\n", logo_height);
-@@ -374,7 +374,7 @@ static void write_logo_clut224(void)
-     fputs("\n};\n\n", out);
- 
-     /* write logo clut */
--    fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
-+    fprintf(out, "static unsigned char %s_clut[] = {\n",
- 	    logoname);
-     write_hex_cnt = 0;
-     for (i = 0; i < logo_clutsize; i++) {
---- a/security/commoncap.c
-+++ b/security/commoncap.c
-@@ -22,10 +22,11 @@
- #include <linux/ptrace.h>
- #include <linux/xattr.h>
- #include <linux/hugetlb.h>
-+#include <linux/grsecurity.h>
- 
- int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
- {
--	NETLINK_CB(skb).eff_cap = current->cap_effective;
-+	NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink();
- 	return 0;
- }
- 
-@@ -43,7 +44,15 @@ EXPORT_SYMBOL(cap_netlink_recv);
- int cap_capable (struct task_struct *tsk, int cap)
- {
- 	/* Derived from include/linux/sched.h:capable. */
--	if (cap_raised(tsk->cap_effective, cap))
-+	if (cap_raised (tsk->cap_effective, cap))
-+		return 0;
-+	return -EPERM;
-+}
-+
-+int cap_capable_nolog (struct task_struct *tsk, int cap)
-+{
-+	/* tsk = current for all callers */
-+	if (cap_raised(tsk->cap_effective, cap) && gr_is_capable_nolog(cap))
- 		return 0;
- 	return -EPERM;
- }
-@@ -162,8 +171,11 @@ void cap_bprm_apply_creds (struct linux_
- 		}
- 	}
- 
--	current->suid = current->euid = current->fsuid = bprm->e_uid;
--	current->sgid = current->egid = current->fsgid = bprm->e_gid;
-+	if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid))
-+		current->suid = current->euid = current->fsuid = bprm->e_uid;
-+
-+	if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid))
-+		current->sgid = current->egid = current->fsgid = bprm->e_gid;
- 
- 	/* For init, we want to retain the capabilities set
- 	 * in the init_task struct. Thus we skip the usual
-@@ -174,6 +186,8 @@ void cap_bprm_apply_creds (struct linux_
- 		    cap_intersect (new_permitted, bprm->cap_effective);
- 	}
- 
-+	gr_handle_chroot_caps(current);
-+
- 	/* AUD: Audit candidate if current->cap_effective is set */
- 
- 	current->keep_capabilities = 0;
-@@ -319,12 +333,13 @@ int cap_vm_enough_memory(struct mm_struc
- {
- 	int cap_sys_admin = 0;
- 
--	if (cap_capable(current, CAP_SYS_ADMIN) == 0)
-+	if (cap_capable_nolog(current, CAP_SYS_ADMIN) == 0)
- 		cap_sys_admin = 1;
- 	return __vm_enough_memory(mm, pages, cap_sys_admin);
- }
- 
- EXPORT_SYMBOL(cap_capable);
-+EXPORT_SYMBOL(cap_capable_nolog);
- EXPORT_SYMBOL(cap_settime);
- EXPORT_SYMBOL(cap_ptrace);
- EXPORT_SYMBOL(cap_capget);
---- a/security/dummy.c
-+++ b/security/dummy.c
-@@ -28,6 +28,7 @@
- #include <linux/hugetlb.h>
- #include <linux/ptrace.h>
- #include <linux/file.h>
-+#include <linux/grsecurity.h>
- 
- static int dummy_ptrace (struct task_struct *parent, struct task_struct *child)
- {
-@@ -138,8 +139,11 @@ static void dummy_bprm_apply_creds (stru
- 		}
- 	}
- 
--	current->suid = current->euid = current->fsuid = bprm->e_uid;
--	current->sgid = current->egid = current->fsgid = bprm->e_gid;
-+	if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid))
-+		current->suid = current->euid = current->fsuid = bprm->e_uid;
-+
-+	if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid))
-+		current->sgid = current->egid = current->fsgid = bprm->e_gid;
- 
- 	dummy_capget(current, &current->cap_effective, &current->cap_inheritable, &current->cap_permitted);
- }
---- a/security/Kconfig
-+++ b/security/Kconfig
-@@ -4,6 +4,429 @@
- 
- menu "Security options"
- 
-+source grsecurity/Kconfig
-+
-+menu "PaX"
-+
-+config PAX
-+	bool "Enable various PaX features"
-+	depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86 || X86_64)
-+	help
-+	  This allows you to enable various PaX features.  PaX adds
-+	  intrusion prevention mechanisms to the kernel that reduce
-+	  the risks posed by exploitable memory corruption bugs.
-+
-+menu "PaX Control"
-+	depends on PAX
-+
-+config PAX_SOFTMODE
-+	bool 'Support soft mode'
-+	help
-+	  Enabling this option will allow you to run PaX in soft mode, that
-+	  is, PaX features will not be enforced by default, only on executables
-+	  marked explicitly.  You must also enable PT_PAX_FLAGS support as it
-+	  is the only way to mark executables for soft mode use.
-+
-+	  Soft mode can be activated by using the "pax_softmode=1" kernel command
-+	  line option on boot.  Furthermore you can control various PaX features
-+	  at runtime via the entries in /proc/sys/kernel/pax.
-+
-+config PAX_EI_PAX
-+	bool 'Use legacy ELF header marking'
-+	help
-+	  Enabling this option will allow you to control PaX features on
-+	  a per executable basis via the 'chpax' utility available at
-+	  http://pax.grsecurity.net/.  The control flags will be read from
-+	  an otherwise reserved part of the ELF header.  This marking has
-+	  numerous drawbacks (no support for soft-mode, toolchain does not
-+	  know about the non-standard use of the ELF header) therefore it
-+	  has been deprecated in favour of PT_PAX_FLAGS support.
-+
-+	  If you have applications not marked by the PT_PAX_FLAGS ELF
-+	  program header then you MUST enable this option otherwise they
-+	  will not get any protection.
-+
-+	  Note that if you enable PT_PAX_FLAGS marking support as well,
-+	  the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
-+
-+config PAX_PT_PAX_FLAGS
-+	bool 'Use ELF program header marking'
-+	help
-+	  Enabling this option will allow you to control PaX features on
-+	  a per executable basis via the 'paxctl' utility available at
-+	  http://pax.grsecurity.net/.  The control flags will be read from
-+	  a PaX specific ELF program header (PT_PAX_FLAGS).  This marking
-+	  has the benefits of supporting both soft mode and being fully
-+	  integrated into the toolchain (the binutils patch is available
-+	  from http://pax.grsecurity.net).
-+
-+	  If you have applications not marked by the PT_PAX_FLAGS ELF
-+	  program header then you MUST enable the EI_PAX marking support
-+	  otherwise they will not get any protection.
-+
-+	  Note that if you enable the legacy EI_PAX marking support as well,
-+	  the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
-+
-+choice
-+	prompt 'MAC system integration'
-+	default PAX_HAVE_ACL_FLAGS
-+	help
-+	  Mandatory Access Control systems have the option of controlling
-+	  PaX flags on a per executable basis, choose the method supported
-+	  by your particular system.
-+
-+	  - "none": if your MAC system does not interact with PaX,
-+	  - "direct": if your MAC system defines pax_set_initial_flags() itself,
-+	  - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
-+
-+	  NOTE: this option is for developers/integrators only.
-+
-+	config PAX_NO_ACL_FLAGS
-+		bool 'none'
-+
-+	config PAX_HAVE_ACL_FLAGS
-+		bool 'direct'
-+
-+	config PAX_HOOK_ACL_FLAGS
-+		bool 'hook'
-+endchoice
-+
-+endmenu
-+
-+menu "Non-executable pages"
-+	depends on PAX
-+
-+config PAX_NOEXEC
-+	bool "Enforce non-executable pages"
-+	depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86 || X86_64)
-+	help
-+	  By design some architectures do not allow for protecting memory
-+	  pages against execution or even if they do, Linux does not make
-+	  use of this feature.  In practice this means that if a page is
-+	  readable (such as the stack or heap) it is also executable.
-+
-+	  There is a well known exploit technique that makes use of this
-+	  fact and a common programming mistake where an attacker can
-+	  introduce code of his choice somewhere in the attacked program's
-+	  memory (typically the stack or the heap) and then execute it.
-+
-+	  If the attacked program was running with different (typically
-+	  higher) privileges than that of the attacker, then he can elevate
-+	  his own privilege level (e.g. get a root shell, write to files for
-+	  which he does not have write access to, etc).
-+
-+	  Enabling this option will let you choose from various features
-+	  that prevent the injection and execution of 'foreign' code in
-+	  a program.
-+
-+	  This will also break programs that rely on the old behaviour and
-+	  expect that dynamically allocated memory via the malloc() family
-+	  of functions is executable (which it is not).  Notable examples
-+	  are the XFree86 4.x server, the java runtime and wine.
-+
-+config PAX_PAGEEXEC
-+	bool "Paging based non-executable pages"
-+	depends on !COMPAT_VDSO && PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MPENTIUM4 || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2)
-+	help
-+	  This implementation is based on the paging feature of the CPU.
-+	  On i386 without hardware non-executable bit support there is a
-+	  variable but usually low performance impact, however on Intel's
-+	  P4 core based CPUs it is very high so you should not enable this
-+	  for kernels meant to be used on such CPUs.
-+
-+	  On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
-+	  with hardware non-executable bit support there is no performance
-+	  impact, on ppc the impact is negligible.
-+
-+	  Note that several architectures require various emulations due to
-+	  badly designed userland ABIs, this will cause a performance impact
-+	  but will disappear as soon as userland is fixed (e.g., ppc users
-+	  can make use of the secure-plt feature found in binutils).
-+
-+config PAX_SEGMEXEC
-+	bool "Segmentation based non-executable pages"
-+	depends on !COMPAT_VDSO && PAX_NOEXEC && X86_32
-+	help
-+	  This implementation is based on the segmentation feature of the
-+	  CPU and has a very small performance impact, however applications
-+	  will be limited to a 1.5 GB address space instead of the normal
-+	  3 GB.
-+
-+config PAX_EMUTRAMP
-+	bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || PPC32 || X86)
-+	default y if PARISC || PPC32
-+	help
-+	  There are some programs and libraries that for one reason or
-+	  another attempt to execute special small code snippets from
-+	  non-executable memory pages.  Most notable examples are the
-+	  signal handler return code generated by the kernel itself and
-+	  the GCC trampolines.
-+
-+	  If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
-+	  such programs will no longer work under your kernel.
-+
-+	  As a remedy you can say Y here and use the 'chpax' or 'paxctl'
-+	  utilities to enable trampoline emulation for the affected programs
-+	  yet still have the protection provided by the non-executable pages.
-+
-+	  On parisc and ppc you MUST enable this option and EMUSIGRT as
-+	  well, otherwise your system will not even boot.
-+
-+	  Alternatively you can say N here and use the 'chpax' or 'paxctl'
-+	  utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
-+	  for the affected files.
-+
-+	  NOTE: enabling this feature *may* open up a loophole in the
-+	  protection provided by non-executable pages that an attacker
-+	  could abuse.  Therefore the best solution is to not have any
-+	  files on your system that would require this option.  This can
-+	  be achieved by not using libc5 (which relies on the kernel
-+	  signal handler return code) and not using or rewriting programs
-+	  that make use of the nested function implementation of GCC.
-+	  Skilled users can just fix GCC itself so that it implements
-+	  nested function calls in a way that does not interfere with PaX.
-+
-+config PAX_EMUSIGRT
-+	bool "Automatically emulate sigreturn trampolines"
-+	depends on PAX_EMUTRAMP && (PARISC || PPC32)
-+	default y
-+	help
-+	  Enabling this option will have the kernel automatically detect
-+	  and emulate signal return trampolines executing on the stack
-+	  that would otherwise lead to task termination.
-+
-+	  This solution is intended as a temporary one for users with
-+	  legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
-+	  Modula-3 runtime, etc) or executables linked to such, basically
-+	  everything that does not specify its own SA_RESTORER function in
-+	  normal executable memory like glibc 2.1+ does.
-+
-+	  On parisc and ppc you MUST enable this option, otherwise your
-+	  system will not even boot.
-+
-+	  NOTE: this feature cannot be disabled on a per executable basis
-+	  and since it *does* open up a loophole in the protection provided
-+	  by non-executable pages, the best solution is to not have any
-+	  files on your system that would require this option.
-+
-+config PAX_MPROTECT
-+	bool "Restrict mprotect()"
-+	depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && !PPC64
-+	help
-+	  Enabling this option will prevent programs from
-+	   - changing the executable status of memory pages that were
-+	     not originally created as executable,
-+	   - making read-only executable pages writable again,
-+	   - creating executable pages from anonymous memory.
-+
-+	  You should say Y here to complete the protection provided by
-+	  the enforcement of non-executable pages.
-+
-+	  NOTE: you can use the 'chpax' or 'paxctl' utilities to control
-+	  this feature on a per file basis.
-+
-+config PAX_NOELFRELOCS
-+	bool "Disallow ELF text relocations"
-+	depends on PAX_MPROTECT && !PAX_ETEXECRELOCS && (IA64 || X86 || X86_64)
-+	help
-+	  Non-executable pages and mprotect() restrictions are effective
-+	  in preventing the introduction of new executable code into an
-+	  attacked task's address space.  There remain only two venues
-+	  for this kind of attack: if the attacker can execute already
-+	  existing code in the attacked task then he can either have it
-+	  create and mmap() a file containing his code or have it mmap()
-+	  an already existing ELF library that does not have position
-+	  independent code in it and use mprotect() on it to make it
-+	  writable and copy his code there.  While protecting against
-+	  the former approach is beyond PaX, the latter can be prevented
-+	  by having only PIC ELF libraries on one's system (which do not
-+	  need to relocate their code).  If you are sure this is your case,
-+	  then enable this option otherwise be careful as you may not even
-+	  be able to boot or log on your system (for example, some PAM
-+	  modules are erroneously compiled as non-PIC by default).
-+
-+	  NOTE: if you are using dynamic ELF executables (as suggested
-+	  when using ASLR) then you must have made sure that you linked
-+	  your files using the PIC version of crt1 (the et_dyn.tar.gz package
-+	  referenced there has already been updated to support this).
-+
-+config PAX_ETEXECRELOCS
-+	bool "Allow ELF ET_EXEC text relocations"
-+	depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
-+	default y
-+	help
-+	  On some architectures there are incorrectly created applications
-+	  that require text relocations and would not work without enabling
-+	  this option.  If you are an alpha, ia64 or parisc user, you should
-+	  enable this option and disable it once you have made sure that
-+	  none of your applications need it.
-+
-+config PAX_EMUPLT
-+	bool "Automatically emulate ELF PLT"
-+	depends on PAX_MPROTECT && (ALPHA || PARISC || PPC32 || SPARC32 || SPARC64)
-+	default y
-+	help
-+	  Enabling this option will have the kernel automatically detect
-+	  and emulate the Procedure Linkage Table entries in ELF files.
-+	  On some architectures such entries are in writable memory, and
-+	  become non-executable leading to task termination.  Therefore
-+	  it is mandatory that you enable this option on alpha, parisc,
-+	  ppc (if secure-plt is not used throughout in userland), sparc
-+	  and sparc64, otherwise your system would not even boot.
-+
-+	  NOTE: this feature *does* open up a loophole in the protection
-+	  provided by the non-executable pages, therefore the proper
-+	  solution is to modify the toolchain to produce a PLT that does
-+	  not need to be writable.
-+
-+config PAX_DLRESOLVE
-+	bool
-+	depends on PAX_EMUPLT && (SPARC32 || SPARC64)
-+	default y
-+
-+config PAX_SYSCALL
-+	bool
-+	depends on PAX_PAGEEXEC && PPC32
-+	default y
-+
-+config PAX_KERNEXEC
-+	bool "Enforce non-executable kernel pages"
-+	depends on PAX_NOEXEC && X86_32 && !EFI && !COMPAT_VDSO && X86_WP_WORKS_OK && !PARAVIRT
-+	help
-+	  This is the kernel land equivalent of PAGEEXEC and MPROTECT,
-+	  that is, enabling this option will make it harder to inject
-+	  and execute 'foreign' code in kernel memory itself.
-+
-+endmenu
-+
-+menu "Address Space Layout Randomization"
-+	depends on PAX
-+
-+config PAX_ASLR
-+	bool "Address Space Layout Randomization"
-+	depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
-+	help
-+	  Many if not most exploit techniques rely on the knowledge of
-+	  certain addresses in the attacked program.  The following options
-+	  will allow the kernel to apply a certain amount of randomization
-+	  to specific parts of the program thereby forcing an attacker to
-+	  guess them in most cases.  Any failed guess will most likely crash
-+	  the attacked program which allows the kernel to detect such attempts
-+	  and react on them.  PaX itself provides no reaction mechanisms,
-+	  instead it is strongly encouraged that you make use of Nergal's
-+	  segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
-+	  (http://www.grsecurity.net/) built-in crash detection features or
-+	  develop one yourself.
-+
-+	  By saying Y here you can choose to randomize the following areas:
-+	   - top of the task's kernel stack
-+	   - top of the task's userland stack
-+	   - base address for mmap() requests that do not specify one
-+	     (this includes all libraries)
-+	   - base address of the main executable
-+
-+	  It is strongly recommended to say Y here as address space layout
-+	  randomization has negligible impact on performance yet it provides
-+	  a very effective protection.
-+
-+	  NOTE: you can use the 'chpax' or 'paxctl' utilities to control
-+	  this feature on a per file basis.
-+
-+config PAX_RANDKSTACK
-+	bool "Randomize kernel stack base"
-+	depends on PAX_ASLR && X86_TSC && X86_32
-+	help
-+	  By saying Y here the kernel will randomize every task's kernel
-+	  stack on every system call.  This will not only force an attacker
-+	  to guess it but also prevent him from making use of possible
-+	  leaked information about it.
-+
-+	  Since the kernel stack is a rather scarce resource, randomization
-+	  may cause unexpected stack overflows, therefore you should very
-+	  carefully test your system.  Note that once enabled in the kernel
-+	  configuration, this feature cannot be disabled on a per file basis.
-+
-+config PAX_RANDUSTACK
-+	bool "Randomize user stack base"
-+	depends on PAX_ASLR
-+	help
-+	  By saying Y here the kernel will randomize every task's userland
-+	  stack.  The randomization is done in two steps where the second
-+	  one may apply a big amount of shift to the top of the stack and
-+	  cause problems for programs that want to use lots of memory (more
-+	  than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
-+	  For this reason the second step can be controlled by 'chpax' or
-+	  'paxctl' on a per file basis.
-+
-+config PAX_RANDMMAP
-+	bool "Randomize mmap() base"
-+	depends on PAX_ASLR
-+	help
-+	  By saying Y here the kernel will use a randomized base address for
-+	  mmap() requests that do not specify one themselves.  As a result
-+	  all dynamically loaded libraries will appear at random addresses
-+	  and therefore be harder to exploit by a technique where an attacker
-+	  attempts to execute library code for his purposes (e.g. spawn a
-+	  shell from an exploited program that is running at an elevated
-+	  privilege level).
-+
-+	  Furthermore, if a program is relinked as a dynamic ELF file, its
-+	  base address will be randomized as well, completing the full
-+	  randomization of the address space layout.  Attacking such programs
-+	  becomes a guess game.  You can find an example of doing this at
-+	  http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
-+	  http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
-+
-+	  NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
-+	  feature on a per file basis.
-+
-+endmenu
-+
-+menu "Miscellaneous hardening features"
-+
-+config PAX_MEMORY_SANITIZE
-+	bool "Sanitize all freed memory"
-+	help
-+	  By saying Y here the kernel will erase memory pages as soon as they
-+	  are freed.  This in turn reduces the lifetime of data stored in the
-+	  pages, making it less likely that sensitive information such as
-+	  passwords, cryptographic secrets, etc stay in memory for too long.
-+
-+	  This is especially useful for programs whose runtime is short, long
-+	  lived processes and the kernel itself benefit from this as long as
-+	  they operate on whole memory pages and ensure timely freeing of pages
-+	  that may hold sensitive information.
-+
-+	  The tradeoff is performance impact, on a single CPU system kernel
-+	  compilation sees a 3% slowdown, other systems and workloads may vary
-+	  and you are advised to test this feature on your expected workload
-+	  before deploying it.
-+
-+	  Note that this feature does not protect data stored in live pages,
-+	  e.g., process memory swapped to disk may stay there for a long time.
-+
-+config PAX_MEMORY_UDEREF
-+	bool "Prevent invalid userland pointer dereference"
-+	depends on X86_32 && !COMPAT_VDSO
-+	help
-+	  By saying Y here the kernel will be prevented from dereferencing
-+	  userland pointers in contexts where the kernel expects only kernel
-+	  pointers.  This is both a useful runtime debugging feature and a
-+	  security measure that prevents exploiting a class of kernel bugs.
-+
-+	  The tradeoff is that some virtualization solutions may experience
-+	  a huge slowdown and therefore you should not enable this feature
-+	  for kernels meant to run in such environments.  Whether a given VM
-+	  solution is affected or not is best determined by simply trying it
-+	  out, the performance impact will be obvious right on boot as this
-+	  mechanism engages from very early on.  A good rule of thumb is that
-+	  VMs running on CPUs without hardware virtualization support (i.e.,
-+	  the majority of IA-32 CPUs) will likely experience the slowdown.
-+
-+endmenu
-+
-+endmenu
-+
- config KEYS
- 	bool "Enable access key retention support"
- 	help
---- a/sound/core/oss/pcm_oss.c
-+++ b/sound/core/oss/pcm_oss.c
-@@ -2880,8 +2880,8 @@ static void snd_pcm_oss_proc_done(struct
- 	}
- }
- #else /* !CONFIG_SND_VERBOSE_PROCFS */
--#define snd_pcm_oss_proc_init(pcm)
--#define snd_pcm_oss_proc_done(pcm)
-+#define snd_pcm_oss_proc_init(pcm) do {} while (0)
-+#define snd_pcm_oss_proc_done(pcm) do {} while (0)
- #endif /* CONFIG_SND_VERBOSE_PROCFS */
- 
- /*
---- a/sound/core/seq/seq_lock.h
-+++ b/sound/core/seq/seq_lock.h
-@@ -23,10 +23,10 @@ void snd_use_lock_sync_helper(snd_use_lo
- #else /* SMP || CONFIG_SND_DEBUG */
- 
- typedef spinlock_t snd_use_lock_t;	/* dummy */
--#define snd_use_lock_init(lockp) /**/
--#define snd_use_lock_use(lockp) /**/
--#define snd_use_lock_free(lockp) /**/
--#define snd_use_lock_sync(lockp) /**/
-+#define snd_use_lock_init(lockp) do {} while (0)
-+#define snd_use_lock_use(lockp) do {} while (0)
-+#define snd_use_lock_free(lockp) do {} while (0)
-+#define snd_use_lock_sync(lockp) do {} while (0)
- 
- #endif /* SMP || CONFIG_SND_DEBUG */
- 
---- a/sound/pci/ac97/ac97_patch.c
-+++ b/sound/pci/ac97/ac97_patch.c
-@@ -1415,7 +1415,7 @@ static const struct snd_ac97_res_table a
- 	{ AC97_VIDEO, 0x9f1f },
- 	{ AC97_AUX, 0x9f1f },
- 	{ AC97_PCM, 0x9f1f },
--	{ } /* terminator */
-+	{ 0, 0 } /* terminator */
- };
- 
- static int patch_ad1819(struct snd_ac97 * ac97)
-@@ -3489,7 +3489,7 @@ static struct snd_ac97_res_table lm4550_
- 	{ AC97_AUX, 0x1f1f },
- 	{ AC97_PCM, 0x1f1f },
- 	{ AC97_REC_GAIN, 0x0f0f },
--	{ } /* terminator */
-+	{ 0, 0 } /* terminator */
- };
- 
- static int patch_lm4550(struct snd_ac97 *ac97)
---- a/sound/pci/ens1370.c
-+++ b/sound/pci/ens1370.c
-@@ -453,7 +453,7 @@ static struct pci_device_id snd_audiopci
- 	{ 0x1274, 0x5880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },	/* ES1373 - CT5880 */
- 	{ 0x1102, 0x8938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },	/* Ectiva EV1938 */
- #endif
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, snd_audiopci_ids);
---- a/sound/pci/intel8x0.c
-+++ b/sound/pci/intel8x0.c
-@@ -436,7 +436,7 @@ static struct pci_device_id snd_intel8x0
- 	{ 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL },	/* AMD8111 */
- 	{ 0x1022, 0x7445, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL },	/* AMD768 */
- 	{ 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI },   /* Ali5455 */
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids);
-@@ -2044,7 +2044,7 @@ static struct ac97_quirk ac97_quirks[] _
- 		.type = AC97_TUNE_HP_ONLY
- 	},
- #endif
--	{ } /* terminator */
-+	{ 0, 0, 0, 0, NULL, 0 } /* terminator */
- };
- 
- static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
---- a/sound/pci/intel8x0m.c
-+++ b/sound/pci/intel8x0m.c
-@@ -240,7 +240,7 @@ static struct pci_device_id snd_intel8x0
- 	{ 0x1022, 0x746d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_INTEL },	/* AMD8111 */
- 	{ 0x10b9, 0x5455, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DEVICE_ALI },   /* Ali5455 */
- #endif
--	{ 0, }
-+	{ 0, 0, 0, 0, 0, 0, 0 }
- };
- 
- MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids);
-@@ -1261,7 +1261,7 @@ static struct shortname_table {
- 	{ 0x5455, "ALi M5455" },
- 	{ 0x746d, "AMD AMD8111" },
- #endif
--	{ 0 },
-+	{ 0, NULL },
- };
- 
- static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,

-- 
gentoo-commits@lists.gentoo.org mailing list



^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2008-03-22 18:38 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-03-22 18:37 [gentoo-commits] linux-patches r1270 - hardened/2.6/trunk/2.6.23 Christian Heim (phreak)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox