public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Anthony G. Basile" <blueness@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/hardened-patchset:master commit in: 3.8.2/
Date: Wed,  6 Mar 2013 12:29:53 +0000 (UTC)	[thread overview]
Message-ID: <1362572950.40b9f3d9591cf0d15f06b79fd94b43f062293a0d.blueness@gentoo> (raw)

commit:     40b9f3d9591cf0d15f06b79fd94b43f062293a0d
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Wed Mar  6 12:29:10 2013 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Wed Mar  6 12:29:10 2013 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=40b9f3d9

Correct 3.8.2, add bump from 3.8.1

---
 3.8.2/0000_README                                  |    6 +-
 3.8.2/1001_linux-3.8.2.patch                       | 3093 ++++++++++++++++++++
 ...4420_grsecurity-2.9.1-3.8.2-201303041742.patch} |  704 ++---
 3 files changed, 3412 insertions(+), 391 deletions(-)

diff --git a/3.8.2/0000_README b/3.8.2/0000_README
index 517c0e6..4525042 100644
--- a/3.8.2/0000_README
+++ b/3.8.2/0000_README
@@ -2,7 +2,11 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-2.9.1-3.8.1-201303012255.patch
+Patch:	1001_linux-3.8.1.patch
+From:	http://www.kernel.org
+Desc:	Linux 3.8.1
+
+Patch:	4420_grsecurity-2.9.1-3.8.2-201303041742.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.8.2/1001_linux-3.8.2.patch b/3.8.2/1001_linux-3.8.2.patch
new file mode 100644
index 0000000..0952288
--- /dev/null
+++ b/3.8.2/1001_linux-3.8.2.patch
@@ -0,0 +1,3093 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 6c72381..986614d 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -564,6 +564,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 			UART at the specified I/O port or MMIO address,
+ 			switching to the matching ttyS device later.  The
+ 			options are the same as for ttyS, above.
++		hvc<n>	Use the hypervisor console device <n>. This is for
++			both Xen and PowerPC hypervisors.
+ 
+                 If the device connected to the port is not a TTY but a braille
+                 device, prepend "brl," before the device type, for instance
+@@ -754,6 +756,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 
+ 	earlyprintk=	[X86,SH,BLACKFIN]
+ 			earlyprintk=vga
++			earlyprintk=xen
+ 			earlyprintk=serial[,ttySn[,baudrate]]
+ 			earlyprintk=ttySn[,baudrate]
+ 			earlyprintk=dbgp[debugController#]
+@@ -771,6 +774,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 			The VGA output is eventually overwritten by the real
+ 			console.
+ 
++			The xen output can only be used by Xen PV guests.
++
+ 	ekgdboc=	[X86,KGDB] Allow early kernel console debugging
+ 			ekgdboc=kbd
+ 
+diff --git a/Makefile b/Makefile
+index 746c856..20d5318 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 8
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Unicycling Gorilla
+ 
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index f8fa411..c205035 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -19,23 +19,28 @@
+ 
+ static efi_system_table_t *sys_table;
+ 
++static void efi_char16_printk(efi_char16_t *str)
++{
++	struct efi_simple_text_output_protocol *out;
++
++	out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
++	efi_call_phys2(out->output_string, out, str);
++}
++
+ static void efi_printk(char *str)
+ {
+ 	char *s8;
+ 
+ 	for (s8 = str; *s8; s8++) {
+-		struct efi_simple_text_output_protocol *out;
+ 		efi_char16_t ch[2] = { 0 };
+ 
+ 		ch[0] = *s8;
+-		out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
+-
+ 		if (*s8 == '\n') {
+ 			efi_char16_t nl[2] = { '\r', 0 };
+-			efi_call_phys2(out->output_string, out, nl);
++			efi_char16_printk(nl);
+ 		}
+ 
+-		efi_call_phys2(out->output_string, out, ch);
++		efi_char16_printk(ch);
+ 	}
+ }
+ 
+@@ -709,7 +714,12 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
+ 			if ((u8 *)p >= (u8 *)filename_16 + sizeof(filename_16))
+ 				break;
+ 
+-			*p++ = *str++;
++			if (*str == '/') {
++				*p++ = '\\';
++				*str++;
++			} else {
++				*p++ = *str++;
++			}
+ 		}
+ 
+ 		*p = '\0';
+@@ -737,7 +747,9 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
+ 		status = efi_call_phys5(fh->open, fh, &h, filename_16,
+ 					EFI_FILE_MODE_READ, (u64)0);
+ 		if (status != EFI_SUCCESS) {
+-			efi_printk("Failed to open initrd file\n");
++			efi_printk("Failed to open initrd file: ");
++			efi_char16_printk(filename_16);
++			efi_printk("\n");
+ 			goto close_handles;
+ 		}
+ 
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index b994cc8..cbf5121 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
+ {
+ 	if (config_enabled(CONFIG_X86_32) && !arg)
+ 		force_enable_local_apic = 1;
+-	else if (!strncmp(arg, "notscdeadline", 13))
++	else if (arg && !strncmp(arg, "notscdeadline", 13))
+ 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
+ 	return 0;
+ }
+diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
+index 48d9d4e..992f442 100644
+--- a/arch/x86/kernel/head.c
++++ b/arch/x86/kernel/head.c
+@@ -5,8 +5,6 @@
+ #include <asm/setup.h>
+ #include <asm/bios_ebda.h>
+ 
+-#define BIOS_LOWMEM_KILOBYTES 0x413
+-
+ /*
+  * The BIOS places the EBDA/XBDA at the top of conventional
+  * memory, and usually decreases the reported amount of
+@@ -16,17 +14,30 @@
+  * chipset: reserve a page before VGA to prevent PCI prefetch
+  * into it (errata #56). Usually the page is reserved anyways,
+  * unless you have no PS/2 mouse plugged in.
++ *
++ * This functions is deliberately very conservative.  Losing
++ * memory in the bottom megabyte is rarely a problem, as long
++ * as we have enough memory to install the trampoline.  Using
++ * memory that is in use by the BIOS or by some DMA device
++ * the BIOS didn't shut down *is* a big problem.
+  */
++
++#define BIOS_LOWMEM_KILOBYTES	0x413
++#define LOWMEM_CAP		0x9f000U	/* Absolute maximum */
++#define INSANE_CUTOFF		0x20000U	/* Less than this = insane */
++
+ void __init reserve_ebda_region(void)
+ {
+ 	unsigned int lowmem, ebda_addr;
+ 
+-	/* To determine the position of the EBDA and the */
+-	/* end of conventional memory, we need to look at */
+-	/* the BIOS data area. In a paravirtual environment */
+-	/* that area is absent. We'll just have to assume */
+-	/* that the paravirt case can handle memory setup */
+-	/* correctly, without our help. */
++	/*
++	 * To determine the position of the EBDA and the
++	 * end of conventional memory, we need to look at
++	 * the BIOS data area. In a paravirtual environment
++	 * that area is absent. We'll just have to assume
++	 * that the paravirt case can handle memory setup
++	 * correctly, without our help.
++	 */
+ 	if (paravirt_enabled())
+ 		return;
+ 
+@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
+ 	/* start of EBDA area */
+ 	ebda_addr = get_bios_ebda();
+ 
+-	/* Fixup: bios puts an EBDA in the top 64K segment */
+-	/* of conventional memory, but does not adjust lowmem. */
+-	if ((lowmem - ebda_addr) <= 0x10000)
+-		lowmem = ebda_addr;
++	/*
++	 * Note: some old Dells seem to need 4k EBDA without
++	 * reporting so, so just consider the memory above 0x9f000
++	 * to be off limits (bugzilla 2990).
++	 */
++
++	/* If the EBDA address is below 128K, assume it is bogus */
++	if (ebda_addr < INSANE_CUTOFF)
++		ebda_addr = LOWMEM_CAP;
+ 
+-	/* Fixup: bios does not report an EBDA at all. */
+-	/* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
+-	if ((ebda_addr == 0) && (lowmem >= 0x9f000))
+-		lowmem = 0x9f000;
++	/* If lowmem is less than 128K, assume it is bogus */
++	if (lowmem < INSANE_CUTOFF)
++		lowmem = LOWMEM_CAP;
+ 
+-	/* Paranoia: should never happen, but... */
+-	if ((lowmem == 0) || (lowmem >= 0x100000))
+-		lowmem = 0x9f000;
++	/* Use the lower of the lowmem and EBDA markers as the cutoff */
++	lowmem = min(lowmem, ebda_addr);
++	lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+ 
+ 	/* reserve all memory between lowmem and the 1MB mark */
+ 	memblock_reserve(lowmem, 0x100000 - lowmem);
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 928bf83..e2cd38f 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -85,9 +85,10 @@ int efi_enabled(int facility)
+ }
+ EXPORT_SYMBOL(efi_enabled);
+ 
++static bool disable_runtime = false;
+ static int __init setup_noefi(char *arg)
+ {
+-	clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
++	disable_runtime = true;
+ 	return 0;
+ }
+ early_param("noefi", setup_noefi);
+@@ -734,7 +735,7 @@ void __init efi_init(void)
+ 	if (!efi_is_native())
+ 		pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+ 	else {
+-		if (efi_runtime_init())
++		if (disable_runtime || efi_runtime_init())
+ 			return;
+ 		set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ 	}
+diff --git a/block/genhd.c b/block/genhd.c
+index 3993ebf..7dcfdd8 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -25,7 +25,7 @@ static DEFINE_MUTEX(block_class_lock);
+ struct kobject *block_depr;
+ 
+ /* for extended dynamic devt allocation, currently only one major is used */
+-#define MAX_EXT_DEVT		(1 << MINORBITS)
++#define NR_EXT_DEVT		(1 << MINORBITS)
+ 
+ /* For extended devt allocation.  ext_devt_mutex prevents look up
+  * results from going away underneath its user.
+@@ -422,17 +422,18 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
+ 	do {
+ 		if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
+ 			return -ENOMEM;
++		mutex_lock(&ext_devt_mutex);
+ 		rc = idr_get_new(&ext_devt_idr, part, &idx);
++		if (!rc && idx >= NR_EXT_DEVT) {
++			idr_remove(&ext_devt_idr, idx);
++			rc = -EBUSY;
++		}
++		mutex_unlock(&ext_devt_mutex);
+ 	} while (rc == -EAGAIN);
+ 
+ 	if (rc)
+ 		return rc;
+ 
+-	if (idx > MAX_EXT_DEVT) {
+-		idr_remove(&ext_devt_idr, idx);
+-		return -EBUSY;
+-	}
+-
+ 	*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
+ 	return 0;
+ }
+@@ -646,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
+ 	disk_part_iter_exit(&piter);
+ 
+ 	invalidate_partition(disk, 0);
+-	blk_free_devt(disk_to_dev(disk)->devt);
+ 	set_capacity(disk, 0);
+ 	disk->flags &= ~GENHD_FL_UP;
+ 
+@@ -664,6 +664,7 @@ void del_gendisk(struct gendisk *disk)
+ 	if (!sysfs_deprecated)
+ 		sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ 	device_del(disk_to_dev(disk));
++	blk_free_devt(disk_to_dev(disk)->devt);
+ }
+ EXPORT_SYMBOL(del_gendisk);
+ 
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index f1d1451..1cb4dec 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
+ 	if (!part)
+ 		return;
+ 
+-	blk_free_devt(part_devt(part));
+ 	rcu_assign_pointer(ptbl->part[partno], NULL);
+ 	rcu_assign_pointer(ptbl->last_lookup, NULL);
+ 	kobject_put(part->holder_dir);
+ 	device_del(part_to_dev(part));
++	blk_free_devt(part_devt(part));
+ 
+ 	hd_struct_put(part);
+ }
+diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
+index 38c5078..f5ae996 100644
+--- a/drivers/acpi/Kconfig
++++ b/drivers/acpi/Kconfig
+@@ -268,7 +268,8 @@ config ACPI_CUSTOM_DSDT
+ 	default ACPI_CUSTOM_DSDT_FILE != ""
+ 
+ config ACPI_INITRD_TABLE_OVERRIDE
+-	bool "ACPI tables can be passed via uncompressed cpio in initrd"
++	bool "ACPI tables override via initrd"
++	depends on BLK_DEV_INITRD && X86
+ 	default n
+ 	help
+ 	  This option provides functionality to override arbitrary ACPI tables
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 2fcc67d..df85051 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -177,6 +177,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+ 	},
+ 	{
+ 	.callback = init_nvs_nosave,
++	.ident = "Sony Vaio VGN-FW41E_H",
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
++		},
++	},
++	{
++	.callback = init_nvs_nosave,
+ 	.ident = "Sony Vaio VGN-FW21E",
+ 	.matches = {
+ 		DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 4979127..72e3e12 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -265,6 +265,30 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
++	{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
++	{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
++	{ PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
++	{ PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
++	{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
++	{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
++	{ PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
++	{ PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
++	{ PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
++	{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
++	{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
+index 174eca6..d2ba439 100644
+--- a/drivers/ata/ata_piix.c
++++ b/drivers/ata/ata_piix.c
+@@ -317,6 +317,23 @@ static const struct pci_device_id piix_pci_tbl[] = {
+ 	{ 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ 	/* SATA Controller IDE (DH89xxCC) */
+ 	{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++	/* SATA Controller IDE (Avoton) */
++	{ 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++	/* SATA Controller IDE (Avoton) */
++	{ 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++	/* SATA Controller IDE (Avoton) */
++	{ 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++	/* SATA Controller IDE (Avoton) */
++	{ 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++	/* SATA Controller IDE (Wellsburg) */
++	{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++	/* SATA Controller IDE (Wellsburg) */
++	{ 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++	/* SATA Controller IDE (Wellsburg) */
++	{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
++	/* SATA Controller IDE (Wellsburg) */
++	{ 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
++
+ 	{ }	/* terminate list */
+ };
+ 
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 043ddcc..eb591fb 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -595,12 +595,20 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
+ 		struct request sreq;
+ 
+ 		dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
++		if (!nbd->sock)
++			return -EINVAL;
+ 
++		mutex_unlock(&nbd->tx_lock);
++		fsync_bdev(bdev);
++		mutex_lock(&nbd->tx_lock);
+ 		blk_rq_init(NULL, &sreq);
+ 		sreq.cmd_type = REQ_TYPE_SPECIAL;
+ 		nbd_cmd(&sreq) = NBD_CMD_DISC;
++
++		/* Check again after getting mutex back.  */
+ 		if (!nbd->sock)
+ 			return -EINVAL;
++
+ 		nbd_send_req(nbd, &sreq);
+                 return 0;
+ 	}
+@@ -614,6 +622,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
+ 		nbd_clear_que(nbd);
+ 		BUG_ON(!list_empty(&nbd->queue_head));
+ 		BUG_ON(!list_empty(&nbd->waiting_queue));
++		kill_bdev(bdev);
+ 		if (file)
+ 			fput(file);
+ 		return 0;
+@@ -702,6 +711,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
+ 		nbd->file = NULL;
+ 		nbd_clear_que(nbd);
+ 		dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
++		kill_bdev(bdev);
+ 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
+ 		if (file)
+ 			fput(file);
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 5ac841f..de1f319 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -46,6 +46,7 @@
+ #include <xen/xen.h>
+ #include <asm/xen/hypervisor.h>
+ #include <asm/xen/hypercall.h>
++#include <xen/balloon.h>
+ #include "common.h"
+ 
+ /*
+@@ -239,6 +240,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
+ 			ret = gnttab_unmap_refs(unmap, NULL, pages,
+ 				segs_to_unmap);
+ 			BUG_ON(ret);
++			free_xenballooned_pages(segs_to_unmap, pages);
+ 			segs_to_unmap = 0;
+ 		}
+ 
+@@ -527,8 +529,8 @@ static int xen_blkbk_map(struct blkif_request *req,
+ 				GFP_KERNEL);
+ 			if (!persistent_gnt)
+ 				return -ENOMEM;
+-			persistent_gnt->page = alloc_page(GFP_KERNEL);
+-			if (!persistent_gnt->page) {
++			if (alloc_xenballooned_pages(1, &persistent_gnt->page,
++			    false)) {
+ 				kfree(persistent_gnt);
+ 				return -ENOMEM;
+ 			}
+@@ -879,7 +881,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ 		goto fail_response;
+ 	}
+ 
+-	preq.dev           = req->u.rw.handle;
+ 	preq.sector_number = req->u.rw.sector_number;
+ 	preq.nr_sects      = 0;
+ 
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 6398072..5e237f6 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -367,6 +367,7 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
+ 		be->blkif = NULL;
+ 	}
+ 
++	kfree(be->mode);
+ 	kfree(be);
+ 	dev_set_drvdata(&dev->dev, NULL);
+ 	return 0;
+@@ -502,6 +503,7 @@ static void backend_changed(struct xenbus_watch *watch,
+ 		= container_of(watch, struct backend_info, backend_watch);
+ 	struct xenbus_device *dev = be->dev;
+ 	int cdrom = 0;
++	unsigned long handle;
+ 	char *device_type;
+ 
+ 	DPRINTK("");
+@@ -521,10 +523,10 @@ static void backend_changed(struct xenbus_watch *watch,
+ 		return;
+ 	}
+ 
+-	if ((be->major || be->minor) &&
+-	    ((be->major != major) || (be->minor != minor))) {
+-		pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
+-			be->major, be->minor, major, minor);
++	if (be->major | be->minor) {
++		if (be->major != major || be->minor != minor)
++			pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
++				be->major, be->minor, major, minor);
+ 		return;
+ 	}
+ 
+@@ -542,36 +544,33 @@ static void backend_changed(struct xenbus_watch *watch,
+ 		kfree(device_type);
+ 	}
+ 
+-	if (be->major == 0 && be->minor == 0) {
+-		/* Front end dir is a number, which is used as the handle. */
+-
+-		char *p = strrchr(dev->otherend, '/') + 1;
+-		long handle;
+-		err = strict_strtoul(p, 0, &handle);
+-		if (err)
+-			return;
++	/* Front end dir is a number, which is used as the handle. */
++	err = strict_strtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
++	if (err)
++		return;
+ 
+-		be->major = major;
+-		be->minor = minor;
++	be->major = major;
++	be->minor = minor;
+ 
+-		err = xen_vbd_create(be->blkif, handle, major, minor,
+-				 (NULL == strchr(be->mode, 'w')), cdrom);
+-		if (err) {
+-			be->major = 0;
+-			be->minor = 0;
+-			xenbus_dev_fatal(dev, err, "creating vbd structure");
+-			return;
+-		}
++	err = xen_vbd_create(be->blkif, handle, major, minor,
++			     !strchr(be->mode, 'w'), cdrom);
+ 
++	if (err)
++		xenbus_dev_fatal(dev, err, "creating vbd structure");
++	else {
+ 		err = xenvbd_sysfs_addif(dev);
+ 		if (err) {
+ 			xen_vbd_free(&be->blkif->vbd);
+-			be->major = 0;
+-			be->minor = 0;
+ 			xenbus_dev_fatal(dev, err, "creating sysfs entries");
+-			return;
+ 		}
++	}
+ 
++	if (err) {
++		kfree(be->mode);
++		be->mode = NULL;
++		be->major = 0;
++		be->minor = 0;
++	} else {
+ 		/* We're potentially connected now */
+ 		xen_update_blkif_status(be->blkif);
+ 	}
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 11043c1..c3dae2e 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -791,7 +791,7 @@ static void blkif_restart_queue(struct work_struct *work)
+ static void blkif_free(struct blkfront_info *info, int suspend)
+ {
+ 	struct llist_node *all_gnts;
+-	struct grant *persistent_gnt;
++	struct grant *persistent_gnt, *tmp;
+ 	struct llist_node *n;
+ 
+ 	/* Prevent new requests being issued until we fix things up. */
+@@ -805,10 +805,17 @@ static void blkif_free(struct blkfront_info *info, int suspend)
+ 	/* Remove all persistent grants */
+ 	if (info->persistent_gnts_c) {
+ 		all_gnts = llist_del_all(&info->persistent_gnts);
+-		llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
++		persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
++		while (persistent_gnt) {
+ 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+ 			__free_page(pfn_to_page(persistent_gnt->pfn));
+-			kfree(persistent_gnt);
++			tmp = persistent_gnt;
++			n = persistent_gnt->node.next;
++			if (n)
++				persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
++			else
++				persistent_gnt = NULL;
++			kfree(tmp);
+ 		}
+ 		info->persistent_gnts_c = 0;
+ 	}
+diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
+index 3873d53..af3e8aa 100644
+--- a/drivers/firewire/core-device.c
++++ b/drivers/firewire/core-device.c
+@@ -1020,6 +1020,10 @@ static void fw_device_init(struct work_struct *work)
+ 	ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
+ 	      idr_get_new(&fw_device_idr, device, &minor) :
+ 	      -ENOMEM;
++	if (minor >= 1 << MINORBITS) {
++		idr_remove(&fw_device_idr, minor);
++		minor = -ENOSPC;
++	}
+ 	up_write(&fw_device_rwsem);
+ 
+ 	if (ret < 0)
+diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
+index f5596db..bcb201c 100644
+--- a/drivers/firmware/efivars.c
++++ b/drivers/firmware/efivars.c
+@@ -79,6 +79,7 @@
+ #include <linux/device.h>
+ #include <linux/slab.h>
+ #include <linux/pstore.h>
++#include <linux/ctype.h>
+ 
+ #include <linux/fs.h>
+ #include <linux/ramfs.h>
+@@ -900,6 +901,48 @@ static struct inode *efivarfs_get_inode(struct super_block *sb,
+ 	return inode;
+ }
+ 
++/*
++ * Return true if 'str' is a valid efivarfs filename of the form,
++ *
++ *	VariableName-12345678-1234-1234-1234-1234567891bc
++ */
++static bool efivarfs_valid_name(const char *str, int len)
++{
++	static const char dashes[GUID_LEN] = {
++		[8] = 1, [13] = 1, [18] = 1, [23] = 1
++	};
++	const char *s = str + len - GUID_LEN;
++	int i;
++
++	/*
++	 * We need a GUID, plus at least one letter for the variable name,
++	 * plus the '-' separator
++	 */
++	if (len < GUID_LEN + 2)
++		return false;
++
++	/* GUID should be right after the first '-' */
++	if (s - 1 != strchr(str, '-'))
++		return false;
++
++	/*
++	 * Validate that 's' is of the correct format, e.g.
++	 *
++	 *	12345678-1234-1234-1234-123456789abc
++	 */
++	for (i = 0; i < GUID_LEN; i++) {
++		if (dashes[i]) {
++			if (*s++ != '-')
++				return false;
++		} else {
++			if (!isxdigit(*s++))
++				return false;
++		}
++	}
++
++	return true;
++}
++
+ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+ {
+ 	guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]);
+@@ -928,11 +971,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 	struct efivar_entry *var;
+ 	int namelen, i = 0, err = 0;
+ 
+-	/*
+-	 * We need a GUID, plus at least one letter for the variable name,
+-	 * plus the '-' separator
+-	 */
+-	if (dentry->d_name.len < GUID_LEN + 2)
++	if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
+ 		return -EINVAL;
+ 
+ 	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+@@ -1004,6 +1043,84 @@ static int efivarfs_unlink(struct inode *dir, struct dentry *dentry)
+ 	return -EINVAL;
+ };
+ 
++/*
++ * Compare two efivarfs file names.
++ *
++ * An efivarfs filename is composed of two parts,
++ *
++ *	1. A case-sensitive variable name
++ *	2. A case-insensitive GUID
++ *
++ * So we need to perform a case-sensitive match on part 1 and a
++ * case-insensitive match on part 2.
++ */
++static int efivarfs_d_compare(const struct dentry *parent, const struct inode *pinode,
++			      const struct dentry *dentry, const struct inode *inode,
++			      unsigned int len, const char *str,
++			      const struct qstr *name)
++{
++	int guid = len - GUID_LEN;
++
++	if (name->len != len)
++		return 1;
++
++	/* Case-sensitive compare for the variable name */
++	if (memcmp(str, name->name, guid))
++		return 1;
++
++	/* Case-insensitive compare for the GUID */
++	return strncasecmp(name->name + guid, str + guid, GUID_LEN);
++}
++
++static int efivarfs_d_hash(const struct dentry *dentry,
++			   const struct inode *inode, struct qstr *qstr)
++{
++	unsigned long hash = init_name_hash();
++	const unsigned char *s = qstr->name;
++	unsigned int len = qstr->len;
++
++	if (!efivarfs_valid_name(s, len))
++		return -EINVAL;
++
++	while (len-- > GUID_LEN)
++		hash = partial_name_hash(*s++, hash);
++
++	/* GUID is case-insensitive. */
++	while (len--)
++		hash = partial_name_hash(tolower(*s++), hash);
++
++	qstr->hash = end_name_hash(hash);
++	return 0;
++}
++
++/*
++ * Retaining negative dentries for an in-memory filesystem just wastes
++ * memory and lookup time: arrange for them to be deleted immediately.
++ */
++static int efivarfs_delete_dentry(const struct dentry *dentry)
++{
++	return 1;
++}
++
++static struct dentry_operations efivarfs_d_ops = {
++	.d_compare = efivarfs_d_compare,
++	.d_hash = efivarfs_d_hash,
++	.d_delete = efivarfs_delete_dentry,
++};
++
++static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
++{
++	struct qstr q;
++
++	q.name = name;
++	q.len = strlen(name);
++
++	if (efivarfs_d_hash(NULL, NULL, &q))
++		return NULL;
++
++	return d_alloc(parent, &q);
++}
++
+ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ 	struct inode *inode = NULL;
+@@ -1019,6 +1136,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	sb->s_blocksize_bits    = PAGE_CACHE_SHIFT;
+ 	sb->s_magic             = EFIVARFS_MAGIC;
+ 	sb->s_op                = &efivarfs_ops;
++	sb->s_d_op		= &efivarfs_d_ops;
+ 	sb->s_time_gran         = 1;
+ 
+ 	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+@@ -1059,7 +1177,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ 		if (!inode)
+ 			goto fail_name;
+ 
+-		dentry = d_alloc_name(root, name);
++		dentry = efivarfs_alloc_dentry(root, name);
+ 		if (!dentry)
+ 			goto fail_inode;
+ 
+@@ -1109,8 +1227,20 @@ static struct file_system_type efivarfs_type = {
+ 	.kill_sb = efivarfs_kill_sb,
+ };
+ 
++/*
++ * Handle negative dentry.
++ */
++static struct dentry *efivarfs_lookup(struct inode *dir, struct dentry *dentry,
++				      unsigned int flags)
++{
++	if (dentry->d_name.len > NAME_MAX)
++		return ERR_PTR(-ENAMETOOLONG);
++	d_add(dentry, NULL);
++	return NULL;
++}
++
+ static const struct inode_operations efivarfs_dir_inode_operations = {
+-	.lookup = simple_lookup,
++	.lookup = efivarfs_lookup,
+ 	.unlink = efivarfs_unlink,
+ 	.create = efivarfs_create,
+ };
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index eb2ee11..ceb3040 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1697,6 +1697,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+@@ -2070,6 +2071,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 34e2547..266e2ae 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -554,6 +554,9 @@
+ #define USB_VENDOR_ID_MADCATZ		0x0738
+ #define USB_DEVICE_ID_MADCATZ_BEATPAD	0x4540
+ 
++#define USB_VENDOR_ID_MASTERKIT			0x16c0
++#define USB_DEVICE_ID_MASTERKIT_MA901RADIO	0x05df
++
+ #define USB_VENDOR_ID_MCC		0x09db
+ #define USB_DEVICE_ID_MCC_PMD1024LS	0x0076
+ #define USB_DEVICE_ID_MCC_PMD1208LS	0x007a
+@@ -709,6 +712,7 @@
+ 
+ #define USB_VENDOR_ID_SONY			0x054c
+ #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE	0x024b
++#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE	0x0374
+ #define USB_DEVICE_ID_SONY_PS3_BDREMOTE		0x0306
+ #define USB_DEVICE_ID_SONY_PS3_CONTROLLER	0x0268
+ #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER	0x042f
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 7f33ebf..126d6ae 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -43,9 +43,19 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ {
+ 	struct sony_sc *sc = hid_get_drvdata(hdev);
+ 
+-	if ((sc->quirks & VAIO_RDESC_CONSTANT) &&
+-			*rsize >= 56 && rdesc[54] == 0x81 && rdesc[55] == 0x07) {
+-		hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
++	/*
++	 * Some Sony RF receivers wrongly declare the mouse pointer as a
++	 * a constant non-data variable.
++	 */
++	if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 &&
++	    /* usage page: generic desktop controls */
++	    /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */
++	    /* usage: mouse */
++	    rdesc[2] == 0x09 && rdesc[3] == 0x02 &&
++	    /* input (usage page for x,y axes): constant, variable, relative */
++	    rdesc[54] == 0x81 && rdesc[55] == 0x07) {
++		hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n");
++		/* input: data, variable, relative */
+ 		rdesc[55] = 0x06;
+ 	}
+ 
+@@ -217,6 +227,8 @@ static const struct hid_device_id sony_devices[] = {
+ 		.driver_data = SIXAXIS_CONTROLLER_BT },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE),
+ 		.driver_data = VAIO_RDESC_CONSTANT },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
++		.driver_data = VAIO_RDESC_CONSTANT },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(hid, sony_devices);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index d5088ce..7ccf328 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -700,23 +700,24 @@ static int srp_reconnect_target(struct srp_target_port *target)
+ 	struct Scsi_Host *shost = target->scsi_host;
+ 	int i, ret;
+ 
+-	if (target->state != SRP_TARGET_LIVE)
+-		return -EAGAIN;
+-
+ 	scsi_target_block(&shost->shost_gendev);
+ 
+ 	srp_disconnect_target(target);
+ 	/*
+-	 * Now get a new local CM ID so that we avoid confusing the
+-	 * target in case things are really fouled up.
++	 * Now get a new local CM ID so that we avoid confusing the target in
++	 * case things are really fouled up. Doing so also ensures that all CM
++	 * callbacks will have finished before a new QP is allocated.
+ 	 */
+ 	ret = srp_new_cm_id(target);
+-	if (ret)
+-		goto unblock;
+-
+-	ret = srp_create_target_ib(target);
+-	if (ret)
+-		goto unblock;
++	/*
++	 * Whether or not creating a new CM ID succeeded, create a new
++	 * QP. This guarantees that all completion callback function
++	 * invocations have finished before request resetting starts.
++	 */
++	if (ret == 0)
++		ret = srp_create_target_ib(target);
++	else
++		srp_create_target_ib(target);
+ 
+ 	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+ 		struct srp_request *req = &target->req_ring[i];
+@@ -728,11 +729,12 @@ static int srp_reconnect_target(struct srp_target_port *target)
+ 	for (i = 0; i < SRP_SQ_SIZE; ++i)
+ 		list_add(&target->tx_ring[i]->list, &target->free_tx);
+ 
+-	ret = srp_connect_target(target);
++	if (ret == 0)
++		ret = srp_connect_target(target);
+ 
+-unblock:
+ 	scsi_target_unblock(&shost->shost_gendev, ret == 0 ? SDEV_RUNNING :
+ 			    SDEV_TRANSPORT_OFFLINE);
++	target->transport_offline = !!ret;
+ 
+ 	if (ret)
+ 		goto err;
+@@ -1352,6 +1354,12 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
+ 	unsigned long flags;
+ 	int len;
+ 
++	if (unlikely(target->transport_offline)) {
++		scmnd->result = DID_NO_CONNECT << 16;
++		scmnd->scsi_done(scmnd);
++		return 0;
++	}
++
+ 	spin_lock_irqsave(&target->lock, flags);
+ 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
+ 	if (!iu)
+@@ -1695,6 +1703,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
+ 	struct srp_iu *iu;
+ 	struct srp_tsk_mgmt *tsk_mgmt;
+ 
++	if (!target->connected || target->qp_in_error)
++		return -1;
++
+ 	init_completion(&target->tsk_mgmt_done);
+ 
+ 	spin_lock_irq(&target->lock);
+@@ -1736,7 +1747,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 
+ 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
+ 
+-	if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
++	if (!req || !srp_claim_req(target, req, scmnd))
+ 		return FAILED;
+ 	srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+ 			  SRP_TSK_ABORT_TASK);
+@@ -1754,8 +1765,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
+ 
+ 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
+ 
+-	if (target->qp_in_error)
+-		return FAILED;
+ 	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+ 			      SRP_TSK_LUN_RESET))
+ 		return FAILED;
+@@ -1972,7 +1981,6 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+ 	spin_unlock(&host->target_lock);
+ 
+ 	target->state = SRP_TARGET_LIVE;
+-	target->connected = false;
+ 
+ 	scsi_scan_target(&target->scsi_host->shost_gendev,
+ 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
+index de2d0b3..66fbedd 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -140,6 +140,7 @@ struct srp_target_port {
+ 	unsigned int		cmd_sg_cnt;
+ 	unsigned int		indirect_size;
+ 	bool			allow_ext_sg;
++	bool			transport_offline;
+ 
+ 	/* Everything above this point is used in the hot path of
+ 	 * command processing. Try to keep them packed into cachelines.
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index faf10ba..b6ecddb 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -1876,11 +1876,6 @@ static int amd_iommu_init_dma(void)
+ 	struct amd_iommu *iommu;
+ 	int ret;
+ 
+-	init_device_table_dma();
+-
+-	for_each_iommu(iommu)
+-		iommu_flush_all_caches(iommu);
+-
+ 	if (iommu_pass_through)
+ 		ret = amd_iommu_init_passthrough();
+ 	else
+@@ -1889,6 +1884,11 @@ static int amd_iommu_init_dma(void)
+ 	if (ret)
+ 		return ret;
+ 
++	init_device_table_dma();
++
++	for_each_iommu(iommu)
++		iommu_flush_all_caches(iommu);
++
+ 	amd_iommu_init_api();
+ 
+ 	amd_iommu_init_notifier();
+diff --git a/drivers/media/pci/cx18/cx18-alsa-main.c b/drivers/media/pci/cx18/cx18-alsa-main.c
+index 8e971ff..b2c8c34 100644
+--- a/drivers/media/pci/cx18/cx18-alsa-main.c
++++ b/drivers/media/pci/cx18/cx18-alsa-main.c
+@@ -197,7 +197,7 @@ err_exit:
+ 	return ret;
+ }
+ 
+-static int __init cx18_alsa_load(struct cx18 *cx)
++static int cx18_alsa_load(struct cx18 *cx)
+ {
+ 	struct v4l2_device *v4l2_dev = &cx->v4l2_dev;
+ 	struct cx18_stream *s;
+diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.h b/drivers/media/pci/cx18/cx18-alsa-pcm.h
+index d26e51f..e2b2c5b 100644
+--- a/drivers/media/pci/cx18/cx18-alsa-pcm.h
++++ b/drivers/media/pci/cx18/cx18-alsa-pcm.h
+@@ -20,7 +20,7 @@
+  *  02111-1307  USA
+  */
+ 
+-int __init snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
++int snd_cx18_pcm_create(struct snd_cx18_card *cxsc);
+ 
+ /* Used by cx18-mailbox to announce the PCM data to the module */
+ void cx18_alsa_announce_pcm_data(struct snd_cx18_card *card, u8 *pcm_data,
+diff --git a/drivers/media/pci/ivtv/ivtv-alsa-main.c b/drivers/media/pci/ivtv/ivtv-alsa-main.c
+index 4a221c6..e970cfa 100644
+--- a/drivers/media/pci/ivtv/ivtv-alsa-main.c
++++ b/drivers/media/pci/ivtv/ivtv-alsa-main.c
+@@ -205,7 +205,7 @@ err_exit:
+ 	return ret;
+ }
+ 
+-static int __init ivtv_alsa_load(struct ivtv *itv)
++static int ivtv_alsa_load(struct ivtv *itv)
+ {
+ 	struct v4l2_device *v4l2_dev = &itv->v4l2_dev;
+ 	struct ivtv_stream *s;
+diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+index 23dfe0d..186814e 100644
+--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
++++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.h
+@@ -20,4 +20,4 @@
+  *  02111-1307  USA
+  */
+ 
+-int __init snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
++int snd_ivtv_pcm_create(struct snd_ivtv_card *itvsc);
+diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
+index 35cc526..8e9a668 100644
+--- a/drivers/media/platform/omap/omap_vout.c
++++ b/drivers/media/platform/omap/omap_vout.c
+@@ -205,19 +205,21 @@ static u32 omap_vout_uservirt_to_phys(u32 virtp)
+ 	struct vm_area_struct *vma;
+ 	struct mm_struct *mm = current->mm;
+ 
+-	vma = find_vma(mm, virtp);
+ 	/* For kernel direct-mapped memory, take the easy way */
+-	if (virtp >= PAGE_OFFSET) {
+-		physp = virt_to_phys((void *) virtp);
+-	} else if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
++	if (virtp >= PAGE_OFFSET)
++		return virt_to_phys((void *) virtp);
++
++	down_read(&current->mm->mmap_sem);
++	vma = find_vma(mm, virtp);
++	if (vma && (vma->vm_flags & VM_IO) && vma->vm_pgoff) {
+ 		/* this will catch, kernel-allocated, mmaped-to-usermode
+ 		   addresses */
+ 		physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
++		up_read(&current->mm->mmap_sem);
+ 	} else {
+ 		/* otherwise, use get_user_pages() for general userland pages */
+ 		int res, nr_pages = 1;
+ 		struct page *pages;
+-		down_read(&current->mm->mmap_sem);
+ 
+ 		res = get_user_pages(current, current->mm, virtp, nr_pages, 1,
+ 				0, &pages, NULL);
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 601d1ac1..d593bc6 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -789,8 +789,10 @@ static ssize_t show_protocols(struct device *device,
+ 	} else if (dev->raw) {
+ 		enabled = dev->raw->enabled_protocols;
+ 		allowed = ir_raw_get_allowed_protocols();
+-	} else
++	} else {
++		mutex_unlock(&dev->lock);
+ 		return -ENODEV;
++	}
+ 
+ 	IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
+ 		   (long long)allowed,
+diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
+index 513969f..98a7f5e 100644
+--- a/drivers/media/v4l2-core/v4l2-device.c
++++ b/drivers/media/v4l2-core/v4l2-device.c
+@@ -159,31 +159,21 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ 	sd->v4l2_dev = v4l2_dev;
+ 	if (sd->internal_ops && sd->internal_ops->registered) {
+ 		err = sd->internal_ops->registered(sd);
+-		if (err) {
+-			module_put(sd->owner);
+-			return err;
+-		}
++		if (err)
++			goto error_module;
+ 	}
+ 
+ 	/* This just returns 0 if either of the two args is NULL */
+ 	err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
+-	if (err) {
+-		if (sd->internal_ops && sd->internal_ops->unregistered)
+-			sd->internal_ops->unregistered(sd);
+-		module_put(sd->owner);
+-		return err;
+-	}
++	if (err)
++		goto error_unregister;
+ 
+ #if defined(CONFIG_MEDIA_CONTROLLER)
+ 	/* Register the entity. */
+ 	if (v4l2_dev->mdev) {
+ 		err = media_device_register_entity(v4l2_dev->mdev, entity);
+-		if (err < 0) {
+-			if (sd->internal_ops && sd->internal_ops->unregistered)
+-				sd->internal_ops->unregistered(sd);
+-			module_put(sd->owner);
+-			return err;
+-		}
++		if (err < 0)
++			goto error_unregister;
+ 	}
+ #endif
+ 
+@@ -192,6 +182,14 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
+ 	spin_unlock(&v4l2_dev->lock);
+ 
+ 	return 0;
++
++error_unregister:
++	if (sd->internal_ops && sd->internal_ops->unregistered)
++		sd->internal_ops->unregistered(sd);
++error_module:
++	module_put(sd->owner);
++	sd->v4l2_dev = NULL;
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
+ 
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 806e34c..0568273 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -4214,7 +4214,6 @@ redo:
+ 	mutex_unlock(&wl->mutex);
+ 	cancel_delayed_work_sync(&dev->periodic_work);
+ 	cancel_work_sync(&wl->tx_work);
+-	cancel_work_sync(&wl->firmware_load);
+ 	mutex_lock(&wl->mutex);
+ 	dev = wl->current_dev;
+ 	if (!dev || b43_status(dev) < B43_STAT_STARTED) {
+@@ -5434,6 +5433,7 @@ static void b43_bcma_remove(struct bcma_device *core)
+ 	/* We must cancel any work here before unregistering from ieee80211,
+ 	 * as the ieee80211 unreg will destroy the workqueue. */
+ 	cancel_work_sync(&wldev->restart_work);
++	cancel_work_sync(&wl->firmware_load);
+ 
+ 	B43_WARN_ON(!wl);
+ 	if (!wldev->fw.ucode.data)
+@@ -5510,6 +5510,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
+ 	/* We must cancel any work here before unregistering from ieee80211,
+ 	 * as the ieee80211 unreg will destroy the workqueue. */
+ 	cancel_work_sync(&wldev->restart_work);
++	cancel_work_sync(&wl->firmware_load);
+ 
+ 	B43_WARN_ON(!wl);
+ 	if (!wldev->fw.ucode.data)
+diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
+index 20e2a7d..056222e 100644
+--- a/drivers/power/ab8500_btemp.c
++++ b/drivers/power/ab8500_btemp.c
+@@ -1123,7 +1123,7 @@ static void __exit ab8500_btemp_exit(void)
+ 	platform_driver_unregister(&ab8500_btemp_driver);
+ }
+ 
+-subsys_initcall_sync(ab8500_btemp_init);
++device_initcall(ab8500_btemp_init);
+ module_exit(ab8500_btemp_exit);
+ 
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
+index 2970891..eb7b4a6 100644
+--- a/drivers/power/abx500_chargalg.c
++++ b/drivers/power/abx500_chargalg.c
+@@ -1698,7 +1698,7 @@ static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+ static struct attribute abx500_chargalg_en_charger = \
+ {
+ 	.name = "chargalg",
+-	.mode = S_IWUGO,
++	.mode = S_IWUSR,
+ };
+ 
+ static struct attribute *abx500_chargalg_chg[] = {
+diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
+index 36b34ef..7087d0d 100644
+--- a/drivers/power/bq27x00_battery.c
++++ b/drivers/power/bq27x00_battery.c
+@@ -448,7 +448,6 @@ static void bq27x00_update(struct bq27x00_device_info *di)
+ 		cache.temperature = bq27x00_battery_read_temperature(di);
+ 		if (!is_bq27425)
+ 			cache.cycle_count = bq27x00_battery_read_cyct(di);
+-		cache.cycle_count = bq27x00_battery_read_cyct(di);
+ 		cache.power_avg =
+ 			bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG);
+ 
+@@ -696,7 +695,6 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
+ 	int ret;
+ 
+ 	di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
+-	di->chip = BQ27425;
+ 	if (di->chip == BQ27425) {
+ 		di->bat.properties = bq27425_battery_props;
+ 		di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props);
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index 8f14c42..6894b3e 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -1779,7 +1779,7 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
+ 
+ 	mask = 0;
+ 	read_subdev = comedi_get_read_subdevice(dev_file_info);
+-	if (read_subdev) {
++	if (read_subdev && read_subdev->async) {
+ 		poll_wait(file, &read_subdev->async->wait_head, wait);
+ 		if (!read_subdev->busy
+ 		    || comedi_buf_read_n_available(read_subdev->async) > 0
+@@ -1789,7 +1789,7 @@ static unsigned int comedi_poll(struct file *file, poll_table *wait)
+ 		}
+ 	}
+ 	write_subdev = comedi_get_write_subdevice(dev_file_info);
+-	if (write_subdev) {
++	if (write_subdev && write_subdev->async) {
+ 		poll_wait(file, &write_subdev->async->wait_head, wait);
+ 		comedi_buf_write_alloc(write_subdev->async,
+ 				       write_subdev->async->prealloc_bufsz);
+@@ -1831,7 +1831,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
+ 	}
+ 
+ 	s = comedi_get_write_subdevice(dev_file_info);
+-	if (s == NULL) {
++	if (s == NULL || s->async == NULL) {
+ 		retval = -EIO;
+ 		goto done;
+ 	}
+@@ -1942,7 +1942,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
+ 	}
+ 
+ 	s = comedi_get_read_subdevice(dev_file_info);
+-	if (s == NULL) {
++	if (s == NULL || s->async == NULL) {
+ 		retval = -EIO;
+ 		goto done;
+ 	}
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index f2aa754..96f4981 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -1182,24 +1182,18 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
+ 
+ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ 	struct se_portal_group *tpg,
++	struct se_node_acl *nacl,
+ 	u32 mapped_lun,
+-	char *initiatorname,
+ 	int *ret)
+ {
+ 	struct se_lun_acl *lacl;
+-	struct se_node_acl *nacl;
+ 
+-	if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
++	if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
+ 		pr_err("%s InitiatorName exceeds maximum size.\n",
+ 			tpg->se_tpg_tfo->get_fabric_name());
+ 		*ret = -EOVERFLOW;
+ 		return NULL;
+ 	}
+-	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
+-	if (!nacl) {
+-		*ret = -EINVAL;
+-		return NULL;
+-	}
+ 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
+ 	if (!lacl) {
+ 		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
+@@ -1210,7 +1204,8 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
+ 	INIT_LIST_HEAD(&lacl->lacl_list);
+ 	lacl->mapped_lun = mapped_lun;
+ 	lacl->se_lun_nacl = nacl;
+-	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
++	snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
++		 nacl->initiatorname);
+ 
+ 	return lacl;
+ }
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index c57bbbc..04c775c 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -354,9 +354,17 @@ static struct config_group *target_fabric_make_mappedlun(
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
++	if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
++		pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG"
++			"-1: %u for Target Portal Group: %u\n", mapped_lun,
++			TRANSPORT_MAX_LUNS_PER_TPG-1,
++			se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
++		ret = -EINVAL;
++		goto out;
++	}
+ 
+-	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
+-			config_item_name(acl_ci), &ret);
++	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl,
++			mapped_lun, &ret);
+ 	if (!lacl) {
+ 		ret = -EINVAL;
+ 		goto out;
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index 93e9c1f..396e1eb 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -45,7 +45,7 @@ struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u3
+ int	core_dev_del_lun(struct se_portal_group *, u32);
+ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+-		u32, char *, int *);
++		struct se_node_acl *, u32, int *);
+ int	core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+ 		struct se_lun_acl *, u32, u32);
+ int	core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 5192ac0..9169d6a 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -111,16 +111,10 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
+ 	struct se_node_acl *acl;
+ 
+ 	spin_lock_irq(&tpg->acl_node_lock);
+-	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+-		if (!strcmp(acl->initiatorname, initiatorname) &&
+-		    !acl->dynamic_node_acl) {
+-			spin_unlock_irq(&tpg->acl_node_lock);
+-			return acl;
+-		}
+-	}
++	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
+ 	spin_unlock_irq(&tpg->acl_node_lock);
+ 
+-	return NULL;
++	return acl;
+ }
+ 
+ /*	core_tpg_add_node_to_devs():
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 4999563..1dae91d 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -405,7 +405,6 @@ struct dwc3_event_buffer {
+  * @number: endpoint number (1 - 15)
+  * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
+  * @resource_index: Resource transfer index
+- * @current_uf: Current uf received through last event parameter
+  * @interval: the intervall on which the ISOC transfer is started
+  * @name: a human readable name e.g. ep1out-bulk
+  * @direction: true for TX, false for RX
+@@ -439,7 +438,6 @@ struct dwc3_ep {
+ 	u8			number;
+ 	u8			type;
+ 	u8			resource_index;
+-	u16			current_uf;
+ 	u32			interval;
+ 
+ 	char			name[20];
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 2fdd767..09835b6 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -754,21 +754,18 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ 	struct dwc3		*dwc = dep->dwc;
+ 	struct dwc3_trb		*trb;
+ 
+-	unsigned int		cur_slot;
+-
+ 	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
+ 			dep->name, req, (unsigned long long) dma,
+ 			length, last ? " last" : "",
+ 			chain ? " chain" : "");
+ 
+-	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+-	cur_slot = dep->free_slot;
+-	dep->free_slot++;
+-
+ 	/* Skip the LINK-TRB on ISOC */
+-	if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
++	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
+-		return;
++		dep->free_slot++;
++
++	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
++	dep->free_slot++;
+ 
+ 	if (!req->trb) {
+ 		dwc3_gadget_move_request_queued(req);
+@@ -1091,7 +1088,10 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+ 		 * notion of current microframe.
+ 		 */
+ 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+-			dwc3_stop_active_transfer(dwc, dep->number);
++			if (list_empty(&dep->req_queued)) {
++				dwc3_stop_active_transfer(dwc, dep->number);
++				dep->flags = DWC3_EP_ENABLED;
++			}
+ 			return 0;
+ 		}
+ 
+@@ -1117,16 +1117,6 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+ 					dep->name);
+ 	}
+ 
+-	/*
+-	 * 3. Missed ISOC Handling. We need to start isoc transfer on the saved
+-	 * uframe number.
+-	 */
+-	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+-		(dep->flags & DWC3_EP_MISSED_ISOC)) {
+-			__dwc3_gadget_start_isoc(dwc, dep, dep->current_uf);
+-			dep->flags &= ~DWC3_EP_MISSED_ISOC;
+-	}
+-
+ 	return 0;
+ }
+ 
+@@ -1689,14 +1679,29 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 				if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
+ 					dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
+ 							dep->name);
+-					dep->current_uf = event->parameters &
+-						~(dep->interval - 1);
++					/*
++					 * If missed isoc occurred and there is
++					 * no request queued then issue END
++					 * TRANSFER, so that core generates
++					 * next xfernotready and we will issue
++					 * a fresh START TRANSFER.
++					 * If there are still queued request
++					 * then wait, do not issue either END
++					 * or UPDATE TRANSFER, just attach next
++					 * request in request_list during
++					 * giveback.If any future queued request
++					 * is successfully transferred then we
++					 * will issue UPDATE TRANSFER for all
++					 * request in the request_list.
++					 */
+ 					dep->flags |= DWC3_EP_MISSED_ISOC;
+ 				} else {
+ 					dev_err(dwc->dev, "incomplete IN transfer %s\n",
+ 							dep->name);
+ 					status = -ECONNRESET;
+ 				}
++			} else {
++				dep->flags &= ~DWC3_EP_MISSED_ISOC;
+ 			}
+ 		} else {
+ 			if (count && (event->status & DEPEVT_STATUS_SHORT))
+@@ -1723,6 +1728,23 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 			break;
+ 	} while (1);
+ 
++	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
++			list_empty(&dep->req_queued)) {
++		if (list_empty(&dep->request_list)) {
++			/*
++			 * If there is no entry in request list then do
++			 * not issue END TRANSFER now. Just set PENDING
++			 * flag, so that END TRANSFER is issued when an
++			 * entry is added into request list.
++			 */
++			dep->flags = DWC3_EP_PENDING_REQUEST;
++		} else {
++			dwc3_stop_active_transfer(dwc, dep->number);
++			dep->flags = DWC3_EP_ENABLED;
++		}
++		return 1;
++	}
++
+ 	if ((event->status & DEPEVT_STATUS_IOC) &&
+ 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
+ 		return 0;
+@@ -2157,6 +2179,26 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 		break;
+ 	}
+ 
++	/* Enable USB2 LPM Capability */
++
++	if ((dwc->revision > DWC3_REVISION_194A)
++			&& (speed != DWC3_DCFG_SUPERSPEED)) {
++		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
++		reg |= DWC3_DCFG_LPM_CAP;
++		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
++
++		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
++		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
++
++		/*
++		 * TODO: This should be configurable. For now using
++		 * maximum allowed HIRD threshold value of 0b1100
++		 */
++		reg |= DWC3_DCTL_HIRD_THRES(12);
++
++		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
++	}
++
+ 	/* Recent versions support automatic phy suspend and don't need this */
+ 	if (dwc->revision < DWC3_REVISION_194A) {
+ 		/* Suspend unneeded PHY */
+@@ -2463,20 +2505,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ 			DWC3_DEVTEN_DISCONNEVTEN);
+ 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+ 
+-	/* Enable USB2 LPM and automatic phy suspend only on recent versions */
++	/* automatic phy suspend only on recent versions */
+ 	if (dwc->revision >= DWC3_REVISION_194A) {
+-		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+-		reg |= DWC3_DCFG_LPM_CAP;
+-		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+-
+-		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+-		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
+-
+-		/* TODO: This should be configurable */
+-		reg |= DWC3_DCTL_HIRD_THRES(28);
+-
+-		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+-
+ 		dwc3_gadget_usb2_phy_suspend(dwc, false);
+ 		dwc3_gadget_usb3_phy_suspend(dwc, false);
+ 	}
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index cf5b44b..f853263 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -261,9 +261,9 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
+ 		dio->end_io(dio->iocb, offset, transferred,
+ 			    dio->private, ret, is_async);
+ 	} else {
++		inode_dio_done(dio->inode);
+ 		if (is_async)
+ 			aio_complete(dio->iocb, ret, 0);
+-		inode_dio_done(dio->inode);
+ 	}
+ 
+ 	return ret;
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index cf18217..2f2e0da 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -358,7 +358,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
+ }
+ 
+ /**
+- * ext4_read_block_bitmap()
++ * ext4_read_block_bitmap_nowait()
+  * @sb:			super block
+  * @block_group:	given block group
+  *
+@@ -457,6 +457,8 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
+ 	struct buffer_head *bh;
+ 
+ 	bh = ext4_read_block_bitmap_nowait(sb, block_group);
++	if (!bh)
++		return NULL;
+ 	if (ext4_wait_block_bitmap(sb, block_group, bh)) {
+ 		put_bh(bh);
+ 		return NULL;
+@@ -482,11 +484,16 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+ 
+ 	free_clusters  = percpu_counter_read_positive(fcc);
+ 	dirty_clusters = percpu_counter_read_positive(dcc);
+-	root_clusters = EXT4_B2C(sbi, ext4_r_blocks_count(sbi->s_es));
++
++	/*
++	 * r_blocks_count should always be multiple of the cluster ratio so
++	 * we are safe to do a plane bit shift only.
++	 */
++	root_clusters = ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+ 
+ 	if (free_clusters - (nclusters + root_clusters + dirty_clusters) <
+ 					EXT4_FREECLUSTERS_WATERMARK) {
+-		free_clusters  = EXT4_C2B(sbi, percpu_counter_sum_positive(fcc));
++		free_clusters  = percpu_counter_sum_positive(fcc);
+ 		dirty_clusters = percpu_counter_sum_positive(dcc);
+ 	}
+ 	/* Check whether we have space after accounting for current
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 5ae1674..d42a8c4 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -725,6 +725,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ 	struct ext4_extent_header *eh;
+ 	struct buffer_head *bh;
+ 	short int depth, i, ppos = 0, alloc = 0;
++	int ret;
+ 
+ 	eh = ext_inode_hdr(inode);
+ 	depth = ext_depth(inode);
+@@ -752,12 +753,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ 		path[ppos].p_ext = NULL;
+ 
+ 		bh = sb_getblk(inode->i_sb, path[ppos].p_block);
+-		if (unlikely(!bh))
++		if (unlikely(!bh)) {
++			ret = -ENOMEM;
+ 			goto err;
++		}
+ 		if (!bh_uptodate_or_lock(bh)) {
+ 			trace_ext4_ext_load_extent(inode, block,
+ 						path[ppos].p_block);
+-			if (bh_submit_read(bh) < 0) {
++			ret = bh_submit_read(bh);
++			if (ret < 0) {
+ 				put_bh(bh);
+ 				goto err;
+ 			}
+@@ -768,13 +772,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
+ 			put_bh(bh);
+ 			EXT4_ERROR_INODE(inode,
+ 					 "ppos %d > depth %d", ppos, depth);
++			ret = -EIO;
+ 			goto err;
+ 		}
+ 		path[ppos].p_bh = bh;
+ 		path[ppos].p_hdr = eh;
+ 		i--;
+ 
+-		if (ext4_ext_check_block(inode, eh, i, bh))
++		ret = ext4_ext_check_block(inode, eh, i, bh);
++		if (ret < 0)
+ 			goto err;
+ 	}
+ 
+@@ -796,7 +802,7 @@ err:
+ 	ext4_ext_drop_refs(path);
+ 	if (alloc)
+ 		kfree(path);
+-	return ERR_PTR(-EIO);
++	return ERR_PTR(ret);
+ }
+ 
+ /*
+@@ -951,7 +957,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ 	}
+ 	bh = sb_getblk(inode->i_sb, newblock);
+ 	if (!bh) {
+-		err = -EIO;
++		err = -ENOMEM;
+ 		goto cleanup;
+ 	}
+ 	lock_buffer(bh);
+@@ -1024,7 +1030,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ 		newblock = ablocks[--a];
+ 		bh = sb_getblk(inode->i_sb, newblock);
+ 		if (!bh) {
+-			err = -EIO;
++			err = -ENOMEM;
+ 			goto cleanup;
+ 		}
+ 		lock_buffer(bh);
+@@ -1136,11 +1142,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ 		return err;
+ 
+ 	bh = sb_getblk(inode->i_sb, newblock);
+-	if (!bh) {
+-		err = -EIO;
+-		ext4_std_error(inode->i_sb, err);
+-		return err;
+-	}
++	if (!bh)
++		return -ENOMEM;
+ 	lock_buffer(bh);
+ 
+ 	err = ext4_journal_get_create_access(handle, bh);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 20862f9..8d83d1e 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -146,6 +146,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ 	struct super_block *sb = inode->i_sb;
+ 	Indirect *p = chain;
+ 	struct buffer_head *bh;
++	int ret = -EIO;
+ 
+ 	*err = 0;
+ 	/* i_data is not going away, no lock needed */
+@@ -154,8 +155,10 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ 		goto no_block;
+ 	while (--depth) {
+ 		bh = sb_getblk(sb, le32_to_cpu(p->key));
+-		if (unlikely(!bh))
++		if (unlikely(!bh)) {
++			ret = -ENOMEM;
+ 			goto failure;
++		}
+ 
+ 		if (!bh_uptodate_or_lock(bh)) {
+ 			if (bh_submit_read(bh) < 0) {
+@@ -177,7 +180,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
+ 	return NULL;
+ 
+ failure:
+-	*err = -EIO;
++	*err = ret;
+ no_block:
+ 	return p;
+ }
+@@ -471,7 +474,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
+ 		 */
+ 		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+ 		if (unlikely(!bh)) {
+-			err = -EIO;
++			err = -ENOMEM;
+ 			goto failed;
+ 		}
+ 
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 387c47c..93a3408 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1188,7 +1188,7 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
+ 
+ 	data_bh = sb_getblk(inode->i_sb, map.m_pblk);
+ 	if (!data_bh) {
+-		error = -EIO;
++		error = -ENOMEM;
+ 		goto out_restore;
+ 	}
+ 
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index cbfe13b..39f1fa7 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -714,7 +714,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
+ 
+ 	bh = sb_getblk(inode->i_sb, map.m_pblk);
+ 	if (!bh) {
+-		*errp = -EIO;
++		*errp = -ENOMEM;
+ 		return NULL;
+ 	}
+ 	if (map.m_flags & EXT4_MAP_NEW) {
+@@ -2977,9 +2977,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+ 	if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
+ 		ext4_free_io_end(io_end);
+ out:
++		inode_dio_done(inode);
+ 		if (is_async)
+ 			aio_complete(iocb, ret, 0);
+-		inode_dio_done(inode);
+ 		return;
+ 	}
+ 
+@@ -3660,11 +3660,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
+ 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
+ 
+ 	bh = sb_getblk(sb, block);
+-	if (!bh) {
+-		EXT4_ERROR_INODE_BLOCK(inode, block,
+-				       "unable to read itable block");
+-		return -EIO;
+-	}
++	if (!bh)
++		return -ENOMEM;
+ 	if (!buffer_uptodate(bh)) {
+ 		lock_buffer(bh);
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 1bf6fe7..061727a 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4136,7 +4136,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
+ 		/* The max size of hash table is PREALLOC_TB_SIZE */
+ 		order = PREALLOC_TB_SIZE - 1;
+ 	/* Add the prealloc space to lg */
+-	rcu_read_lock();
++	spin_lock(&lg->lg_prealloc_lock);
+ 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
+ 						pa_inode_list) {
+ 		spin_lock(&tmp_pa->pa_lock);
+@@ -4160,12 +4160,12 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
+ 	if (!added)
+ 		list_add_tail_rcu(&pa->pa_inode_list,
+ 					&lg->lg_prealloc_list[order]);
+-	rcu_read_unlock();
++	spin_unlock(&lg->lg_prealloc_lock);
+ 
+ 	/* Now trim the list to be not more than 8 elements */
+ 	if (lg_prealloc_count > 8) {
+ 		ext4_mb_discard_lg_preallocations(sb, lg,
+-						order, lg_prealloc_count);
++						  order, lg_prealloc_count);
+ 		return;
+ 	}
+ 	return ;
+diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
+index fe7c63f..44734f1 100644
+--- a/fs/ext4/mmp.c
++++ b/fs/ext4/mmp.c
+@@ -80,6 +80,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+ 	 * is not blocked in the elevator. */
+ 	if (!*bh)
+ 		*bh = sb_getblk(sb, mmp_block);
++	if (!*bh)
++		return -ENOMEM;
+ 	if (*bh) {
+ 		get_bh(*bh);
+ 		lock_buffer(*bh);
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 0016fbc..b42d04f 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -103,14 +103,13 @@ static int ext4_end_io(ext4_io_end_t *io)
+ 			 "(inode %lu, offset %llu, size %zd, error %d)",
+ 			 inode->i_ino, offset, size, ret);
+ 	}
+-	if (io->iocb)
+-		aio_complete(io->iocb, io->result, 0);
+-
+-	if (io->flag & EXT4_IO_END_DIRECT)
+-		inode_dio_done(inode);
+ 	/* Wake up anyone waiting on unwritten extent conversion */
+ 	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
+ 		wake_up_all(ext4_ioend_wq(inode));
++	if (io->flag & EXT4_IO_END_DIRECT)
++		inode_dio_done(inode);
++	if (io->iocb)
++		aio_complete(io->iocb, io->result, 0);
+ 	return ret;
+ }
+ 
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index d99387b..02824dc 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -334,7 +334,7 @@ static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
+ 
+ 	bh = sb_getblk(sb, blk);
+ 	if (!bh)
+-		return ERR_PTR(-EIO);
++		return ERR_PTR(-ENOMEM);
+ 	if ((err = ext4_journal_get_write_access(handle, bh))) {
+ 		brelse(bh);
+ 		bh = ERR_PTR(err);
+@@ -411,7 +411,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
+ 
+ 		bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
+ 		if (!bh)
+-			return -EIO;
++			return -ENOMEM;
+ 
+ 		err = ext4_journal_get_write_access(handle, bh);
+ 		if (err)
+@@ -501,7 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
+ 
+ 			gdb = sb_getblk(sb, block);
+ 			if (!gdb) {
+-				err = -EIO;
++				err = -ENOMEM;
+ 				goto out;
+ 			}
+ 
+@@ -1065,7 +1065,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
+ 
+ 		bh = sb_getblk(sb, backup_block);
+ 		if (!bh) {
+-			err = -EIO;
++			err = -ENOMEM;
+ 			break;
+ 		}
+ 		ext4_debug("update metadata backup %llu(+%llu)\n",
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 3d4fb81..0465f36 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -4008,7 +4008,7 @@ no_journal:
+ 	    !(sb->s_flags & MS_RDONLY)) {
+ 		err = ext4_enable_quotas(sb);
+ 		if (err)
+-			goto failed_mount7;
++			goto failed_mount8;
+ 	}
+ #endif  /* CONFIG_QUOTA */
+ 
+@@ -4035,6 +4035,10 @@ cantfind_ext4:
+ 		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
+ 	goto failed_mount;
+ 
++#ifdef CONFIG_QUOTA
++failed_mount8:
++	kobject_del(&sbi->s_kobj);
++#endif
+ failed_mount7:
+ 	ext4_unregister_li_request(sb);
+ failed_mount6:
+@@ -5005,9 +5009,9 @@ static int ext4_enable_quotas(struct super_block *sb)
+ 						DQUOT_USAGE_ENABLED);
+ 			if (err) {
+ 				ext4_warning(sb,
+-					"Failed to enable quota (type=%d) "
+-					"tracking. Please run e2fsck to fix.",
+-					type);
++					"Failed to enable quota tracking "
++					"(type=%d, err=%d). Please run "
++					"e2fsck to fix.", type, err);
+ 				return err;
+ 			}
+ 		}
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 3a91ebc..b93846b 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -549,7 +549,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ 		error = ext4_handle_dirty_xattr_block(handle, inode, bh);
+ 		if (IS_SYNC(inode))
+ 			ext4_handle_sync(handle);
+-		dquot_free_block(inode, 1);
++		dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
+ 		ea_bdebug(bh, "refcount now=%d; releasing",
+ 			  le32_to_cpu(BHDR(bh)->h_refcount));
+ 	}
+@@ -832,7 +832,8 @@ inserted:
+ 			else {
+ 				/* The old block is released after updating
+ 				   the inode. */
+-				error = dquot_alloc_block(inode, 1);
++				error = dquot_alloc_block(inode,
++						EXT4_C2B(EXT4_SB(sb), 1));
+ 				if (error)
+ 					goto cleanup;
+ 				error = ext4_journal_get_write_access(handle,
+@@ -887,16 +888,17 @@ inserted:
+ 
+ 			new_bh = sb_getblk(sb, block);
+ 			if (!new_bh) {
++				error = -ENOMEM;
+ getblk_failed:
+ 				ext4_free_blocks(handle, inode, NULL, block, 1,
+ 						 EXT4_FREE_BLOCKS_METADATA);
+-				error = -EIO;
+ 				goto cleanup;
+ 			}
+ 			lock_buffer(new_bh);
+ 			error = ext4_journal_get_create_access(handle, new_bh);
+ 			if (error) {
+ 				unlock_buffer(new_bh);
++				error = -EIO;
+ 				goto getblk_failed;
+ 			}
+ 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
+@@ -928,7 +930,7 @@ cleanup:
+ 	return error;
+ 
+ cleanup_dquot:
+-	dquot_free_block(inode, 1);
++	dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
+ 	goto cleanup;
+ 
+ bad_block:
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index b7c09f9..315e1f8 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -682,7 +682,14 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
+ 
+ 		spin_lock(&fc->lock);
+ 		fi->attr_version = ++fc->attr_version;
+-		drop_nlink(inode);
++		/*
++		 * If i_nlink == 0 then unlink doesn't make sense, yet this can
++		 * happen if userspace filesystem is careless.  It would be
++		 * difficult to enforce correct nlink usage so just ignore this
++		 * condition here
++		 */
++		if (inode->i_nlink > 0)
++			drop_nlink(inode);
+ 		spin_unlock(&fc->lock);
+ 		fuse_invalidate_attr(inode);
+ 		fuse_invalidate_attr(dir);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index ac8ed96c..a8309c6 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1060,6 +1060,8 @@ free_client(struct nfs4_client *clp)
+ 	}
+ 	free_svc_cred(&clp->cl_cred);
+ 	kfree(clp->cl_name.data);
++	idr_remove_all(&clp->cl_stateids);
++	idr_destroy(&clp->cl_stateids);
+ 	kfree(clp);
+ }
+ 
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index 6577432..340bd02 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -593,9 +593,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
+ 	level = ocfs2_iocb_rw_locked_level(iocb);
+ 	ocfs2_rw_unlock(inode, level);
+ 
++	inode_dio_done(inode);
+ 	if (is_async)
+ 		aio_complete(iocb, ret, 0);
+-	inode_dio_done(inode);
+ }
+ 
+ /*
+diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
+index f169da4..b7e74b5 100644
+--- a/fs/ocfs2/suballoc.c
++++ b/fs/ocfs2/suballoc.c
+@@ -642,7 +642,7 @@ ocfs2_block_group_alloc_discontig(handle_t *handle,
+ 	 * cluster groups will be staying in cache for the duration of
+ 	 * this operation.
+ 	 */
+-	ac->ac_allow_chain_relink = 0;
++	ac->ac_disable_chain_relink = 1;
+ 
+ 	/* Claim the first region */
+ 	status = ocfs2_block_group_claim_bits(osb, handle, ac, min_bits,
+@@ -1823,7 +1823,7 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
+ 	 * Do this *after* figuring out how many bits we're taking out
+ 	 * of our target group.
+ 	 */
+-	if (ac->ac_allow_chain_relink &&
++	if (!ac->ac_disable_chain_relink &&
+ 	    (prev_group_bh) &&
+ 	    (ocfs2_block_group_reasonably_empty(bg, res->sr_bits))) {
+ 		status = ocfs2_relink_block_group(handle, alloc_inode,
+@@ -1928,7 +1928,6 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+ 
+ 	victim = ocfs2_find_victim_chain(cl);
+ 	ac->ac_chain = victim;
+-	ac->ac_allow_chain_relink = 1;
+ 
+ 	status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
+ 				    res, &bits_left);
+@@ -1947,7 +1946,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
+ 	 * searching each chain in order. Don't allow chain relinking
+ 	 * because we only calculate enough journal credits for one
+ 	 * relink per alloc. */
+-	ac->ac_allow_chain_relink = 0;
++	ac->ac_disable_chain_relink = 1;
+ 	for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
+ 		if (i == victim)
+ 			continue;
+diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
+index b8afabf..a36d0aa 100644
+--- a/fs/ocfs2/suballoc.h
++++ b/fs/ocfs2/suballoc.h
+@@ -49,7 +49,7 @@ struct ocfs2_alloc_context {
+ 
+ 	/* these are used by the chain search */
+ 	u16    ac_chain;
+-	int    ac_allow_chain_relink;
++	int    ac_disable_chain_relink;
+ 	group_search_t *ac_group_search;
+ 
+ 	u64    ac_last_group;
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 0ba9ea1..2e3ea30 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7189,7 +7189,7 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ 	struct buffer_head *dir_bh = NULL;
+ 
+ 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+-	if (!ret) {
++	if (ret) {
+ 		mlog_errno(ret);
+ 		goto leave;
+ 	}
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 5ea2e77..86d1038 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -96,6 +96,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
+ 	}
+ }
+ 
++bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
++{
++	/*
++	 * In case of NMI path, pstore shouldn't be blocked
++	 * regardless of reason.
++	 */
++	if (in_nmi())
++		return true;
++
++	switch (reason) {
++	/* In panic case, other cpus are stopped by smp_send_stop(). */
++	case KMSG_DUMP_PANIC:
++	/* Emergency restart shouldn't be blocked by spin lock. */
++	case KMSG_DUMP_EMERG:
++		return true;
++	default:
++		return false;
++	}
++}
++EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
++
+ /*
+  * callback from kmsg_dump. (s2,l2) has the most recently
+  * written bytes, older bytes are in (s1,l1). Save as much
+@@ -114,10 +135,12 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ 
+ 	why = get_reason_str(reason);
+ 
+-	if (in_nmi()) {
+-		is_locked = spin_trylock(&psinfo->buf_lock);
+-		if (!is_locked)
+-			pr_err("pstore dump routine blocked in NMI, may corrupt error record\n");
++	if (pstore_cannot_block_path(reason)) {
++		is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
++		if (!is_locked) {
++			pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
++				       , in_nmi() ? "NMI" : why);
++		}
+ 	} else
+ 		spin_lock_irqsave(&psinfo->buf_lock, flags);
+ 	oopscount++;
+@@ -143,9 +166,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ 		total += hsize + len;
+ 		part++;
+ 	}
+-	if (in_nmi()) {
++	if (pstore_cannot_block_path(reason)) {
+ 		if (is_locked)
+-			spin_unlock(&psinfo->buf_lock);
++			spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ 	} else
+ 		spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ }
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
+index 769701c..ba32da3 100644
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -126,13 +126,14 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
+ 		else if (inum > o->inum)
+ 			p = p->rb_right;
+ 		else {
+-			if (o->dnext) {
++			if (o->del) {
+ 				spin_unlock(&c->orphan_lock);
+ 				dbg_gen("deleted twice ino %lu",
+ 					(unsigned long)inum);
+ 				return;
+ 			}
+-			if (o->cnext) {
++			if (o->cmt) {
++				o->del = 1;
+ 				o->dnext = c->orph_dnext;
+ 				c->orph_dnext = o;
+ 				spin_unlock(&c->orphan_lock);
+@@ -172,7 +173,9 @@ int ubifs_orphan_start_commit(struct ubifs_info *c)
+ 	last = &c->orph_cnext;
+ 	list_for_each_entry(orphan, &c->orph_new, new_list) {
+ 		ubifs_assert(orphan->new);
++		ubifs_assert(!orphan->cmt);
+ 		orphan->new = 0;
++		orphan->cmt = 1;
+ 		*last = orphan;
+ 		last = &orphan->cnext;
+ 	}
+@@ -299,7 +302,9 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
+ 	cnext = c->orph_cnext;
+ 	for (i = 0; i < cnt; i++) {
+ 		orphan = cnext;
++		ubifs_assert(orphan->cmt);
+ 		orph->inos[i] = cpu_to_le64(orphan->inum);
++		orphan->cmt = 0;
+ 		cnext = orphan->cnext;
+ 		orphan->cnext = NULL;
+ 	}
+@@ -378,6 +383,7 @@ static int consolidate(struct ubifs_info *c)
+ 		list_for_each_entry(orphan, &c->orph_list, list) {
+ 			if (orphan->new)
+ 				continue;
++			orphan->cmt = 1;
+ 			*last = orphan;
+ 			last = &orphan->cnext;
+ 			cnt += 1;
+@@ -442,6 +448,7 @@ static void erase_deleted(struct ubifs_info *c)
+ 		orphan = dnext;
+ 		dnext = orphan->dnext;
+ 		ubifs_assert(!orphan->new);
++		ubifs_assert(orphan->del);
+ 		rb_erase(&orphan->rb, &c->orph_tree);
+ 		list_del(&orphan->list);
+ 		c->tot_orphans -= 1;
+@@ -531,6 +538,7 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
+ 	rb_link_node(&orphan->rb, parent, p);
+ 	rb_insert_color(&orphan->rb, &c->orph_tree);
+ 	list_add_tail(&orphan->list, &c->orph_list);
++	orphan->del = 1;
+ 	orphan->dnext = c->orph_dnext;
+ 	c->orph_dnext = orphan;
+ 	dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
+diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
+index d133c27..b2babce 100644
+--- a/fs/ubifs/ubifs.h
++++ b/fs/ubifs/ubifs.h
+@@ -904,6 +904,8 @@ struct ubifs_budget_req {
+  * @dnext: next orphan to delete
+  * @inum: inode number
+  * @new: %1 => added since the last commit, otherwise %0
++ * @cmt: %1 => commit pending, otherwise %0
++ * @del: %1 => delete pending, otherwise %0
+  */
+ struct ubifs_orphan {
+ 	struct rb_node rb;
+@@ -912,7 +914,9 @@ struct ubifs_orphan {
+ 	struct ubifs_orphan *cnext;
+ 	struct ubifs_orphan *dnext;
+ 	ino_t inum;
+-	int new;
++	unsigned new:1;
++	unsigned cmt:1;
++	unsigned del:1;
+ };
+ 
+ /**
+diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
+index cdb2d33..572a858 100644
+--- a/fs/xfs/xfs_bmap.c
++++ b/fs/xfs/xfs_bmap.c
+@@ -147,7 +147,10 @@ xfs_bmap_local_to_extents(
+ 	xfs_fsblock_t	*firstblock,	/* first block allocated in xaction */
+ 	xfs_extlen_t	total,		/* total blocks needed by transaction */
+ 	int		*logflagsp,	/* inode logging flags */
+-	int		whichfork);	/* data or attr fork */
++	int		whichfork,	/* data or attr fork */
++	void		(*init_fn)(struct xfs_buf *bp,
++				   struct xfs_inode *ip,
++				   struct xfs_ifork *ifp));
+ 
+ /*
+  * Search the extents list for the inode, for the extent containing bno.
+@@ -357,7 +360,42 @@ xfs_bmap_add_attrfork_extents(
+ }
+ 
+ /*
+- * Called from xfs_bmap_add_attrfork to handle local format files.
++ * Block initialisation functions for local to extent format conversion.
++ * As these get more complex, they will be moved to the relevant files,
++ * but for now they are too simple to worry about.
++ */
++STATIC void
++xfs_bmap_local_to_extents_init_fn(
++	struct xfs_buf		*bp,
++	struct xfs_inode	*ip,
++	struct xfs_ifork	*ifp)
++{
++	bp->b_ops = &xfs_bmbt_buf_ops;
++	memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
++}
++
++STATIC void
++xfs_symlink_local_to_remote(
++	struct xfs_buf		*bp,
++	struct xfs_inode	*ip,
++	struct xfs_ifork	*ifp)
++{
++	/* remote symlink blocks are not verifiable until CRCs come along */
++	bp->b_ops = NULL;
++	memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
++}
++
++/*
++ * Called from xfs_bmap_add_attrfork to handle local format files. Each
++ * different data fork content type needs a different callout to do the
++ * conversion. Some are basic and only require special block initialisation
++ * callouts for the data formating, others (directories) are so specialised they
++ * handle everything themselves.
++ *
++ * XXX (dgc): investigate whether directory conversion can use the generic
++ * formatting callout. It should be possible - it's just a very complex
++ * formatter. it would also require passing the transaction through to the init
++ * function.
+  */
+ STATIC int					/* error */
+ xfs_bmap_add_attrfork_local(
+@@ -368,25 +406,29 @@ xfs_bmap_add_attrfork_local(
+ 	int			*flags)		/* inode logging flags */
+ {
+ 	xfs_da_args_t		dargs;		/* args for dir/attr code */
+-	int			error;		/* error return value */
+-	xfs_mount_t		*mp;		/* mount structure pointer */
+ 
+ 	if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
+ 		return 0;
++
+ 	if (S_ISDIR(ip->i_d.di_mode)) {
+-		mp = ip->i_mount;
+ 		memset(&dargs, 0, sizeof(dargs));
+ 		dargs.dp = ip;
+ 		dargs.firstblock = firstblock;
+ 		dargs.flist = flist;
+-		dargs.total = mp->m_dirblkfsbs;
++		dargs.total = ip->i_mount->m_dirblkfsbs;
+ 		dargs.whichfork = XFS_DATA_FORK;
+ 		dargs.trans = tp;
+-		error = xfs_dir2_sf_to_block(&dargs);
+-	} else
+-		error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
+-			XFS_DATA_FORK);
+-	return error;
++		return xfs_dir2_sf_to_block(&dargs);
++	}
++
++	if (S_ISLNK(ip->i_d.di_mode))
++		return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
++						 flags, XFS_DATA_FORK,
++						 xfs_symlink_local_to_remote);
++
++	return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
++					 XFS_DATA_FORK,
++					 xfs_bmap_local_to_extents_init_fn);
+ }
+ 
+ /*
+@@ -3221,7 +3263,10 @@ xfs_bmap_local_to_extents(
+ 	xfs_fsblock_t	*firstblock,	/* first block allocated in xaction */
+ 	xfs_extlen_t	total,		/* total blocks needed by transaction */
+ 	int		*logflagsp,	/* inode logging flags */
+-	int		whichfork)	/* data or attr fork */
++	int		whichfork,
++	void		(*init_fn)(struct xfs_buf *bp,
++				   struct xfs_inode *ip,
++				   struct xfs_ifork *ifp))
+ {
+ 	int		error;		/* error return value */
+ 	int		flags;		/* logging flags returned */
+@@ -3241,12 +3286,12 @@ xfs_bmap_local_to_extents(
+ 		xfs_buf_t	*bp;	/* buffer for extent block */
+ 		xfs_bmbt_rec_host_t *ep;/* extent record pointer */
+ 
++		ASSERT((ifp->if_flags &
++			(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
+ 		memset(&args, 0, sizeof(args));
+ 		args.tp = tp;
+ 		args.mp = ip->i_mount;
+ 		args.firstblock = *firstblock;
+-		ASSERT((ifp->if_flags &
+-			(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
+ 		/*
+ 		 * Allocate a block.  We know we need only one, since the
+ 		 * file currently fits in an inode.
+@@ -3262,17 +3307,20 @@ xfs_bmap_local_to_extents(
+ 		args.mod = args.minleft = args.alignment = args.wasdel =
+ 			args.isfl = args.minalignslop = 0;
+ 		args.minlen = args.maxlen = args.prod = 1;
+-		if ((error = xfs_alloc_vextent(&args)))
++		error = xfs_alloc_vextent(&args);
++		if (error)
+ 			goto done;
+-		/*
+-		 * Can't fail, the space was reserved.
+-		 */
++
++		/* Can't fail, the space was reserved. */
+ 		ASSERT(args.fsbno != NULLFSBLOCK);
+ 		ASSERT(args.len == 1);
+ 		*firstblock = args.fsbno;
+ 		bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
+-		bp->b_ops = &xfs_bmbt_buf_ops;
+-		memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
++
++		/* initialise the block and copy the data */
++		init_fn(bp, ip, ifp);
++
++		/* account for the change in fork size and log everything */
+ 		xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
+ 		xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
+ 		xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
+@@ -4919,8 +4967,32 @@ xfs_bmapi_write(
+ 	XFS_STATS_INC(xs_blk_mapw);
+ 
+ 	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
++		/*
++		 * XXX (dgc): This assumes we are only called for inodes that
++		 * contain content neutral data in local format. Anything that
++		 * contains caller-specific data in local format that needs
++		 * transformation to move to a block format needs to do the
++		 * conversion to extent format itself.
++		 *
++		 * Directory data forks and attribute forks handle this
++		 * themselves, but with the addition of metadata verifiers every
++		 * data fork in local format now contains caller specific data
++		 * and as such conversion through this function is likely to be
++		 * broken.
++		 *
++		 * The only likely user of this branch is for remote symlinks,
++		 * but we cannot overwrite the data fork contents of the symlink
++		 * (EEXIST occurs higher up the stack) and so it will never go
++		 * from local format to extent format here. Hence I don't think
++		 * this branch is ever executed intentionally and we should
++		 * consider removing it and asserting that xfs_bmapi_write()
++		 * cannot be called directly on local format forks. i.e. callers
++		 * are completely responsible for local to extent format
++		 * conversion, not xfs_bmapi_write().
++		 */
+ 		error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
+-						  &bma.logflags, whichfork);
++					&bma.logflags, whichfork,
++					xfs_bmap_local_to_extents_init_fn);
+ 		if (error)
+ 			goto error0;
+ 	}
+diff --git a/include/linux/llist.h b/include/linux/llist.h
+index d0ab98f..a5199f6 100644
+--- a/include/linux/llist.h
++++ b/include/linux/llist.h
+@@ -125,31 +125,6 @@ static inline void init_llist_head(struct llist_head *list)
+ 	     (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
+ 
+ /**
+- * llist_for_each_entry_safe - iterate safely against remove over some entries
+- * of lock-less list of given type.
+- * @pos:	the type * to use as a loop cursor.
+- * @n:		another type * to use as a temporary storage.
+- * @node:	the fist entry of deleted list entries.
+- * @member:	the name of the llist_node with the struct.
+- *
+- * In general, some entries of the lock-less list can be traversed
+- * safely only after being removed from list, so start with an entry
+- * instead of list head. This variant allows removal of entries
+- * as we iterate.
+- *
+- * If being used on entries deleted from lock-less list directly, the
+- * traverse order is from the newest to the oldest added entry.  If
+- * you want to traverse from the oldest to the newest, you must
+- * reverse the order by yourself before traversing.
+- */
+-#define llist_for_each_entry_safe(pos, n, node, member)		\
+-	for ((pos) = llist_entry((node), typeof(*(pos)), member),	\
+-	     (n) = (pos)->member.next;					\
+-	     &(pos)->member != NULL;					\
+-	     (pos) = llist_entry(n, typeof(*(pos)), member),		\
+-	     (n) = (&(pos)->member != NULL) ? (pos)->member.next : NULL)
+-
+-/**
+  * llist_empty - tests whether a lock-less list is empty
+  * @head:	the list to test
+  *
+diff --git a/include/linux/pstore.h b/include/linux/pstore.h
+index 1788909..75d0176 100644
+--- a/include/linux/pstore.h
++++ b/include/linux/pstore.h
+@@ -68,12 +68,18 @@ struct pstore_info {
+ 
+ #ifdef CONFIG_PSTORE
+ extern int pstore_register(struct pstore_info *);
++extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+ #else
+ static inline int
+ pstore_register(struct pstore_info *psi)
+ {
+ 	return -ENODEV;
+ }
++static inline bool
++pstore_cannot_block_path(enum kmsg_dump_reason reason)
++{
++	return false;
++}
+ #endif
+ 
+ #endif /*_LINUX_PSTORE_H*/
+diff --git a/include/linux/quota.h b/include/linux/quota.h
+index 58fdef12..d133711 100644
+--- a/include/linux/quota.h
++++ b/include/linux/quota.h
+@@ -405,6 +405,7 @@ struct quota_module_name {
+ #define INIT_QUOTA_MODULE_NAMES {\
+ 	{QFMT_VFS_OLD, "quota_v1"},\
+ 	{QFMT_VFS_V0, "quota_v2"},\
++	{QFMT_VFS_V1, "quota_v2"},\
+ 	{0, NULL}}
+ 
+ #endif /* _QUOTA_ */
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 4855892..1e23664 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -426,12 +426,20 @@ static void __put_css_set(struct css_set *cg, int taskexit)
+ 		struct cgroup *cgrp = link->cgrp;
+ 		list_del(&link->cg_link_list);
+ 		list_del(&link->cgrp_link_list);
++
++		/*
++		 * We may not be holding cgroup_mutex, and if cgrp->count is
++		 * dropped to 0 the cgroup can be destroyed at any time, hence
++		 * rcu_read_lock is used to keep it alive.
++		 */
++		rcu_read_lock();
+ 		if (atomic_dec_and_test(&cgrp->count) &&
+ 		    notify_on_release(cgrp)) {
+ 			if (taskexit)
+ 				set_bit(CGRP_RELEASABLE, &cgrp->flags);
+ 			check_for_release(cgrp);
+ 		}
++		rcu_read_unlock();
+ 
+ 		kfree(link);
+ 	}
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 7bb63ee..5bb9bf1 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2511,8 +2511,16 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
+ 
+ 	dentry = task_cs(tsk)->css.cgroup->dentry;
+ 	spin_lock(&cpuset_buffer_lock);
+-	snprintf(cpuset_name, CPUSET_NAME_LEN,
+-		 dentry ? (const char *)dentry->d_name.name : "/");
++
++	if (!dentry) {
++		strcpy(cpuset_name, "/");
++	} else {
++		spin_lock(&dentry->d_lock);
++		strlcpy(cpuset_name, (const char *)dentry->d_name.name,
++			CPUSET_NAME_LEN);
++		spin_unlock(&dentry->d_lock);
++	}
++
+ 	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
+ 			   tsk->mems_allowed);
+ 	printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
+diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
+index 69185ae..e885be1 100644
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -639,6 +639,13 @@ static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
+ {
+ 	struct k_itimer *timr;
+ 
++	/*
++	 * timer_t could be any type >= int and we want to make sure any
++	 * @timer_id outside positive int range fails lookup.
++	 */
++	if ((unsigned long long)timer_id > INT_MAX)
++		return NULL;
++
+ 	rcu_read_lock();
+ 	timr = idr_find(&posix_timers_id, (int)timer_id);
+ 	if (timr) {
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index 5a63844..0ddf3a0 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
+ 
+ 		/* Convert the decnet address to binary */
+ 		result = -EIO;
+-		nodep = strchr(buf, '.') + 1;
++		nodep = strchr(buf, '.');
+ 		if (!nodep)
+ 			goto out;
++		++nodep;
+ 
+ 		area = simple_strtoul(buf, NULL, 10);
+ 		node = simple_strtoul(nodep, NULL, 10);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 41473b4..43defd1 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3970,37 +3970,51 @@ static void ftrace_init_module(struct module *mod,
+ 	ftrace_process_locs(mod, start, end);
+ }
+ 
+-static int ftrace_module_notify(struct notifier_block *self,
+-				unsigned long val, void *data)
++static int ftrace_module_notify_enter(struct notifier_block *self,
++				      unsigned long val, void *data)
+ {
+ 	struct module *mod = data;
+ 
+-	switch (val) {
+-	case MODULE_STATE_COMING:
++	if (val == MODULE_STATE_COMING)
+ 		ftrace_init_module(mod, mod->ftrace_callsites,
+ 				   mod->ftrace_callsites +
+ 				   mod->num_ftrace_callsites);
+-		break;
+-	case MODULE_STATE_GOING:
++	return 0;
++}
++
++static int ftrace_module_notify_exit(struct notifier_block *self,
++				     unsigned long val, void *data)
++{
++	struct module *mod = data;
++
++	if (val == MODULE_STATE_GOING)
+ 		ftrace_release_mod(mod);
+-		break;
+-	}
+ 
+ 	return 0;
+ }
+ #else
+-static int ftrace_module_notify(struct notifier_block *self,
+-				unsigned long val, void *data)
++static int ftrace_module_notify_enter(struct notifier_block *self,
++				      unsigned long val, void *data)
++{
++	return 0;
++}
++static int ftrace_module_notify_exit(struct notifier_block *self,
++				     unsigned long val, void *data)
+ {
+ 	return 0;
+ }
+ #endif /* CONFIG_MODULES */
+ 
+-struct notifier_block ftrace_module_nb = {
+-	.notifier_call = ftrace_module_notify,
++struct notifier_block ftrace_module_enter_nb = {
++	.notifier_call = ftrace_module_notify_enter,
+ 	.priority = INT_MAX,	/* Run before anything that can use kprobes */
+ };
+ 
++struct notifier_block ftrace_module_exit_nb = {
++	.notifier_call = ftrace_module_notify_exit,
++	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
++};
++
+ extern unsigned long __start_mcount_loc[];
+ extern unsigned long __stop_mcount_loc[];
+ 
+@@ -4032,9 +4046,13 @@ void __init ftrace_init(void)
+ 				  __start_mcount_loc,
+ 				  __stop_mcount_loc);
+ 
+-	ret = register_module_notifier(&ftrace_module_nb);
++	ret = register_module_notifier(&ftrace_module_enter_nb);
++	if (ret)
++		pr_warning("Failed to register trace ftrace module enter notifier\n");
++
++	ret = register_module_notifier(&ftrace_module_exit_nb);
+ 	if (ret)
+-		pr_warning("Failed to register trace ftrace module notifier\n");
++		pr_warning("Failed to register trace ftrace module exit notifier\n");
+ 
+ 	set_ftrace_early_filters();
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 033ad5b..3a3a98f 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -138,6 +138,7 @@ struct worker {
+ 	};
+ 
+ 	struct work_struct	*current_work;	/* L: work being processed */
++	work_func_t		current_func;	/* L: current_work's fn */
+ 	struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
+ 	struct list_head	scheduled;	/* L: scheduled works */
+ 	struct task_struct	*task;		/* I: worker task */
+@@ -910,7 +911,8 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
+ 	struct hlist_node *tmp;
+ 
+ 	hlist_for_each_entry(worker, tmp, bwh, hentry)
+-		if (worker->current_work == work)
++		if (worker->current_work == work &&
++		    worker->current_func == work->func)
+ 			return worker;
+ 	return NULL;
+ }
+@@ -920,9 +922,27 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
+  * @gcwq: gcwq of interest
+  * @work: work to find worker for
+  *
+- * Find a worker which is executing @work on @gcwq.  This function is
+- * identical to __find_worker_executing_work() except that this
+- * function calculates @bwh itself.
++ * Find a worker which is executing @work on @gcwq by searching
++ * @gcwq->busy_hash which is keyed by the address of @work.  For a worker
++ * to match, its current execution should match the address of @work and
++ * its work function.  This is to avoid unwanted dependency between
++ * unrelated work executions through a work item being recycled while still
++ * being executed.
++ *
++ * This is a bit tricky.  A work item may be freed once its execution
++ * starts and nothing prevents the freed area from being recycled for
++ * another work item.  If the same work item address ends up being reused
++ * before the original execution finishes, workqueue will identify the
++ * recycled work item as currently executing and make it wait until the
++ * current execution finishes, introducing an unwanted dependency.
++ *
++ * This function checks the work item address, work function and workqueue
++ * to avoid false positives.  Note that this isn't complete as one may
++ * construct a work function which can introduce dependency onto itself
++ * through a recycled work item.  Well, if somebody wants to shoot oneself
++ * in the foot that badly, there's only so much we can do, and if such
++ * deadlock actually occurs, it should be easy to locate the culprit work
++ * function.
+  *
+  * CONTEXT:
+  * spin_lock_irq(gcwq->lock).
+@@ -2168,7 +2188,6 @@ __acquires(&gcwq->lock)
+ 	struct global_cwq *gcwq = pool->gcwq;
+ 	struct hlist_head *bwh = busy_worker_head(gcwq, work);
+ 	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
+-	work_func_t f = work->func;
+ 	int work_color;
+ 	struct worker *collision;
+ #ifdef CONFIG_LOCKDEP
+@@ -2208,6 +2227,7 @@ __acquires(&gcwq->lock)
+ 	debug_work_deactivate(work);
+ 	hlist_add_head(&worker->hentry, bwh);
+ 	worker->current_work = work;
++	worker->current_func = work->func;
+ 	worker->current_cwq = cwq;
+ 	work_color = get_work_color(work);
+ 
+@@ -2240,7 +2260,7 @@ __acquires(&gcwq->lock)
+ 	lock_map_acquire_read(&cwq->wq->lockdep_map);
+ 	lock_map_acquire(&lockdep_map);
+ 	trace_workqueue_execute_start(work);
+-	f(work);
++	worker->current_func(work);
+ 	/*
+ 	 * While we must be careful to not use "work" after this, the trace
+ 	 * point will only record its address.
+@@ -2252,7 +2272,8 @@ __acquires(&gcwq->lock)
+ 	if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
+ 		pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
+ 		       "     last function: %pf\n",
+-		       current->comm, preempt_count(), task_pid_nr(current), f);
++		       current->comm, preempt_count(), task_pid_nr(current),
++		       worker->current_func);
+ 		debug_show_held_locks(current);
+ 		dump_stack();
+ 	}
+@@ -2266,6 +2287,7 @@ __acquires(&gcwq->lock)
+ 	/* we're done with it, release */
+ 	hlist_del_init(&worker->hentry);
+ 	worker->current_work = NULL;
++	worker->current_func = NULL;
+ 	worker->current_cwq = NULL;
+ 	cwq_dec_nr_in_flight(cwq, work_color);
+ }
+diff --git a/lib/idr.c b/lib/idr.c
+index 6482390..ca5aa00 100644
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -625,7 +625,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
+ 			return p;
+ 		}
+ 
+-		id += 1 << n;
++		/*
++		 * Proceed to the next layer at the current level.  Unlike
++		 * idr_for_each(), @id isn't guaranteed to be aligned to
++		 * layer boundary at this point and adding 1 << n may
++		 * incorrectly skip IDs.  Make sure we jump to the
++		 * beginning of the next layer using round_up().
++		 */
++		id = round_up(id + 1, 1 << n);
+ 		while (n < fls(id)) {
+ 			n += IDR_BITS;
+ 			p = *--paa;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index d1e4124..8832b87 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2169,9 +2169,28 @@ int expand_downwards(struct vm_area_struct *vma,
+ 	return error;
+ }
+ 
++/*
++ * Note how expand_stack() refuses to expand the stack all the way to
++ * abut the next virtual mapping, *unless* that mapping itself is also
++ * a stack mapping. We want to leave room for a guard page, after all
++ * (the guard page itself is not added here, that is done by the
++ * actual page faulting logic)
++ *
++ * This matches the behavior of the guard page logic (see mm/memory.c:
++ * check_stack_guard_page()), which only allows the guard page to be
++ * removed under these circumstances.
++ */
+ #ifdef CONFIG_STACK_GROWSUP
+ int expand_stack(struct vm_area_struct *vma, unsigned long address)
+ {
++	struct vm_area_struct *next;
++
++	address &= PAGE_MASK;
++	next = vma->vm_next;
++	if (next && next->vm_start == address + PAGE_SIZE) {
++		if (!(next->vm_flags & VM_GROWSUP))
++			return -ENOMEM;
++	}
+ 	return expand_upwards(vma, address);
+ }
+ 
+@@ -2194,6 +2213,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
+ #else
+ int expand_stack(struct vm_area_struct *vma, unsigned long address)
+ {
++	struct vm_area_struct *prev;
++
++	address &= PAGE_MASK;
++	prev = vma->vm_prev;
++	if (prev && prev->vm_end == address) {
++		if (!(prev->vm_flags & VM_GROWSDOWN))
++			return -ENOMEM;
++	}
+ 	return expand_downwards(vma, address);
+ }
+ 
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index dbf12ac..2d34b6b 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
+ 
+ void svc_shutdown_net(struct svc_serv *serv, struct net *net)
+ {
+-	/*
+-	 * The set of xprts (contained in the sv_tempsocks and
+-	 * sv_permsocks lists) is now constant, since it is modified
+-	 * only by accepting new sockets (done by service threads in
+-	 * svc_recv) or aging old ones (done by sv_temptimer), or
+-	 * configuration changes (excluded by whatever locking the
+-	 * caller is using--nfsd_mutex in the case of nfsd).  So it's
+-	 * safe to traverse those lists and shut everything down:
+-	 */
+ 	svc_close_net(serv, net);
+ 
+ 	if (serv->sv_shutdown)
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index b8e47fa..ca71056 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -856,7 +856,6 @@ static void svc_age_temp_xprts(unsigned long closure)
+ 	struct svc_serv *serv = (struct svc_serv *)closure;
+ 	struct svc_xprt *xprt;
+ 	struct list_head *le, *next;
+-	LIST_HEAD(to_be_aged);
+ 
+ 	dprintk("svc_age_temp_xprts\n");
+ 
+@@ -877,25 +876,15 @@ static void svc_age_temp_xprts(unsigned long closure)
+ 		if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
+ 		    test_bit(XPT_BUSY, &xprt->xpt_flags))
+ 			continue;
+-		svc_xprt_get(xprt);
+-		list_move(le, &to_be_aged);
++		list_del_init(le);
+ 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ 		set_bit(XPT_DETACHED, &xprt->xpt_flags);
+-	}
+-	spin_unlock_bh(&serv->sv_lock);
+-
+-	while (!list_empty(&to_be_aged)) {
+-		le = to_be_aged.next;
+-		/* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
+-		list_del_init(le);
+-		xprt = list_entry(le, struct svc_xprt, xpt_list);
+-
+ 		dprintk("queuing xprt %p for closing\n", xprt);
+ 
+ 		/* a thread will dequeue and close it soon */
+ 		svc_xprt_enqueue(xprt);
+-		svc_xprt_put(xprt);
+ 	}
++	spin_unlock_bh(&serv->sv_lock);
+ 
+ 	mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
+ }
+@@ -959,21 +948,24 @@ void svc_close_xprt(struct svc_xprt *xprt)
+ }
+ EXPORT_SYMBOL_GPL(svc_close_xprt);
+ 
+-static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
++static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
+ {
+ 	struct svc_xprt *xprt;
++	int ret = 0;
+ 
+ 	spin_lock(&serv->sv_lock);
+ 	list_for_each_entry(xprt, xprt_list, xpt_list) {
+ 		if (xprt->xpt_net != net)
+ 			continue;
++		ret++;
+ 		set_bit(XPT_CLOSE, &xprt->xpt_flags);
+-		set_bit(XPT_BUSY, &xprt->xpt_flags);
++		svc_xprt_enqueue(xprt);
+ 	}
+ 	spin_unlock(&serv->sv_lock);
++	return ret;
+ }
+ 
+-static void svc_clear_pools(struct svc_serv *serv, struct net *net)
++static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
+ {
+ 	struct svc_pool *pool;
+ 	struct svc_xprt *xprt;
+@@ -988,42 +980,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net)
+ 			if (xprt->xpt_net != net)
+ 				continue;
+ 			list_del_init(&xprt->xpt_ready);
++			spin_unlock_bh(&pool->sp_lock);
++			return xprt;
+ 		}
+ 		spin_unlock_bh(&pool->sp_lock);
+ 	}
++	return NULL;
+ }
+ 
+-static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
++static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
+ {
+ 	struct svc_xprt *xprt;
+-	struct svc_xprt *tmp;
+-	LIST_HEAD(victims);
+-
+-	spin_lock(&serv->sv_lock);
+-	list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) {
+-		if (xprt->xpt_net != net)
+-			continue;
+-		list_move(&xprt->xpt_list, &victims);
+-	}
+-	spin_unlock(&serv->sv_lock);
+ 
+-	list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
++	while ((xprt = svc_dequeue_net(serv, net))) {
++		set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ 		svc_delete_xprt(xprt);
++	}
+ }
+ 
++/*
++ * Server threads may still be running (especially in the case where the
++ * service is still running in other network namespaces).
++ *
++ * So we shut down sockets the same way we would on a running server, by
++ * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
++ * the close.  In the case there are no such other threads,
++ * threads running, svc_clean_up_xprts() does a simple version of a
++ * server's main event loop, and in the case where there are other
++ * threads, we may need to wait a little while and then check again to
++ * see if they're done.
++ */
+ void svc_close_net(struct svc_serv *serv, struct net *net)
+ {
+-	svc_close_list(serv, &serv->sv_tempsocks, net);
+-	svc_close_list(serv, &serv->sv_permsocks, net);
++	int delay = 0;
+ 
+-	svc_clear_pools(serv, net);
+-	/*
+-	 * At this point the sp_sockets lists will stay empty, since
+-	 * svc_xprt_enqueue will not add new entries without taking the
+-	 * sp_lock and checking XPT_BUSY.
+-	 */
+-	svc_clear_list(serv, &serv->sv_tempsocks, net);
+-	svc_clear_list(serv, &serv->sv_permsocks, net);
++	while (svc_close_list(serv, &serv->sv_permsocks, net) +
++	       svc_close_list(serv, &serv->sv_tempsocks, net)) {
++
++		svc_clean_up_xprts(serv, net);
++		msleep(delay++);
++	}
+ }
+ 
+ /*
+diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
+index cdd100d..9febe55 100644
+--- a/sound/pci/bt87x.c
++++ b/sound/pci/bt87x.c
+@@ -836,6 +836,8 @@ static struct {
+ 	{0x7063, 0x2000}, /* pcHDTV HD-2000 TV */
+ };
+ 
++static struct pci_driver driver;
++
+ /* return the id of the card, or a negative value if it's blacklisted */
+ static int snd_bt87x_detect_card(struct pci_dev *pci)
+ {
+@@ -962,11 +964,24 @@ static DEFINE_PCI_DEVICE_TABLE(snd_bt87x_default_ids) = {
+ 	{ }
+ };
+ 
+-static struct pci_driver bt87x_driver = {
++static struct pci_driver driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = snd_bt87x_ids,
+ 	.probe = snd_bt87x_probe,
+ 	.remove = snd_bt87x_remove,
+ };
+ 
+-module_pci_driver(bt87x_driver);
++static int __init alsa_card_bt87x_init(void)
++{
++	if (load_all)
++		driver.id_table = snd_bt87x_default_ids;
++	return pci_register_driver(&driver);
++}
++
++static void __exit alsa_card_bt87x_exit(void)
++{
++	pci_unregister_driver(&driver);
++}
++
++module_init(alsa_card_bt87x_init)
++module_exit(alsa_card_bt87x_exit)
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index a7c296a..e6b0166 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -862,6 +862,12 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
+ 			   filename, emu->firmware->size);
+ 	}
+ 
++	err = snd_emu1010_load_firmware(emu);
++	if (err != 0) {
++		snd_printk(KERN_INFO "emu1010: Loading Firmware failed\n");
++		return err;
++	}
++
+ 	/* ID, should read & 0x7f = 0x55 when FPGA programmed. */
+ 	snd_emu1010_fpga_read(emu, EMU_HANA_ID, &reg);
+ 	if ((reg & 0x3f) != 0x15) {
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index b14813d..c690b2a 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1573,6 +1573,9 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
+ 
+ 	if (pcmdev > 0)
+ 		sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
++	if (!is_jack_detectable(codec, per_pin->pin_nid))
++		strncat(hdmi_str, " Phantom",
++			sizeof(hdmi_str) - strlen(hdmi_str) - 1);
+ 
+ 	return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str, 0);
+ }

diff --git a/3.8.2/4420_grsecurity-2.9.1-3.8.1-201303012255.patch b/3.8.2/4420_grsecurity-2.9.1-3.8.2-201303041742.patch
similarity index 99%
rename from 3.8.2/4420_grsecurity-2.9.1-3.8.1-201303012255.patch
rename to 3.8.2/4420_grsecurity-2.9.1-3.8.2-201303041742.patch
index b69296b..c57c85d 100644
--- a/3.8.2/4420_grsecurity-2.9.1-3.8.1-201303012255.patch
+++ b/3.8.2/4420_grsecurity-2.9.1-3.8.2-201303041742.patch
@@ -223,10 +223,10 @@ index b89a739..dba90c5 100644
 +zconf.lex.c
  zoffset.h
 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 6c72381..2fe9ae4 100644
+index 986614d..0afd461 100644
 --- a/Documentation/kernel-parameters.txt
 +++ b/Documentation/kernel-parameters.txt
-@@ -917,6 +917,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
  			Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
  			Default: 1024
  
@@ -237,7 +237,7 @@ index 6c72381..2fe9ae4 100644
  	hashdist=	[KNL,NUMA] Large hashes allocated during boot
  			are distributed across NUMA nodes.  Defaults on
  			for 64-bit NUMA, off otherwise.
-@@ -2116,6 +2120,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2121,6 +2125,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
  			the specified number of seconds.  This is to be used if
  			your oopses keep scrolling off the screen.
  
@@ -252,7 +252,7 @@ index 6c72381..2fe9ae4 100644
  
  	pcd.		[PARIDE]
 diff --git a/Makefile b/Makefile
-index 746c856..c014cfa 100644
+index 20d5318..19c7540 100644
 --- a/Makefile
 +++ b/Makefile
 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -2307,20 +2307,22 @@ index 96ee092..37f1844 100644
  #define PSR_ENDIAN_MASK	0x00000200	/* Endianness state mask */
  
 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
-index 60d3b73..9168db0 100644
+index 60d3b73..d27ee09 100644
 --- a/arch/arm/kernel/armksyms.c
 +++ b/arch/arm/kernel/armksyms.c
-@@ -89,8 +89,8 @@ EXPORT_SYMBOL(__memzero);
+@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
  #ifdef CONFIG_MMU
  EXPORT_SYMBOL(copy_page);
  
 -EXPORT_SYMBOL(__copy_from_user);
 -EXPORT_SYMBOL(__copy_to_user);
+-EXPORT_SYMBOL(__clear_user);
 +EXPORT_SYMBOL(___copy_from_user);
 +EXPORT_SYMBOL(___copy_to_user);
- EXPORT_SYMBOL(__clear_user);
++EXPORT_SYMBOL(___clear_user);
  
  EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
 index 0f82098..3dbd3ee 100644
 --- a/arch/arm/kernel/entry-armv.S
@@ -3267,6 +3269,30 @@ index 4653efb..8c60bf7 100644
  
  /* omap_hwmod_list contains all registered struct omap_hwmods */
  static LIST_HEAD(omap_hwmod_list);
+diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
+index 7c2b4ed..b2ea51f 100644
+--- a/arch/arm/mach-omap2/wd_timer.c
++++ b/arch/arm/mach-omap2/wd_timer.c
+@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
+ 	struct omap_hwmod *oh;
+ 	char *oh_name = "wd_timer2";
+ 	char *dev_name = "omap_wdt";
+-	struct omap_wd_timer_platform_data pdata;
++	static struct omap_wd_timer_platform_data pdata = {
++		.read_reset_sources = prm_read_reset_sources
++	};
+ 
+ 	if (!cpu_class_is_omap2() || of_have_populated_dt())
+ 		return 0;
+@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
+ 		return -EINVAL;
+ 	}
+ 
+-	pdata.read_reset_sources = prm_read_reset_sources;
+-
+ 	pdev = omap_device_build(dev_name, id, oh, &pdata,
+ 				 sizeof(struct omap_wd_timer_platform_data),
+ 				 NULL, 0, 0);
 diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
 index 6be4c4d..32ac32a 100644
 --- a/arch/arm/mach-ux500/include/mach/setup.h
@@ -10070,10 +10096,10 @@ index 8a84501..b2d165f 100644
  KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
  GCOV_PROFILE := n
 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
-index f8fa411..c570c53 100644
+index c205035..5853587 100644
 --- a/arch/x86/boot/compressed/eboot.c
 +++ b/arch/x86/boot/compressed/eboot.c
-@@ -145,7 +145,6 @@ again:
+@@ -150,7 +150,6 @@ again:
  		*addr = max_addr;
  	}
  
@@ -10081,7 +10107,7 @@ index f8fa411..c570c53 100644
  	efi_call_phys1(sys_table->boottime->free_pool, map);
  
  fail:
-@@ -209,7 +208,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
+@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
  	if (i == map_size / desc_size)
  		status = EFI_NOT_FOUND;
  
@@ -16204,18 +16230,9 @@ index ef5ccca..bd83949 100644
  }
  
 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
-index b994cc8..812b537 100644
+index cbf5121..812b537 100644
 --- a/arch/x86/kernel/apic/apic.c
 +++ b/arch/x86/kernel/apic/apic.c
-@@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg)
- {
- 	if (config_enabled(CONFIG_X86_32) && !arg)
- 		force_enable_local_apic = 1;
--	else if (!strncmp(arg, "notscdeadline", 13))
-+	else if (arg && !strncmp(arg, "notscdeadline", 13))
- 		setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
- 	return 0;
- }
 @@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
  /*
   * Debug level, exported for io_apic.c
@@ -19375,91 +19392,6 @@ index 1d41402..af9a46a 100644
  	if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
  		return -EFAULT;
  
-diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
-index 48d9d4e..992f442 100644
---- a/arch/x86/kernel/head.c
-+++ b/arch/x86/kernel/head.c
-@@ -5,8 +5,6 @@
- #include <asm/setup.h>
- #include <asm/bios_ebda.h>
- 
--#define BIOS_LOWMEM_KILOBYTES 0x413
--
- /*
-  * The BIOS places the EBDA/XBDA at the top of conventional
-  * memory, and usually decreases the reported amount of
-@@ -16,17 +14,30 @@
-  * chipset: reserve a page before VGA to prevent PCI prefetch
-  * into it (errata #56). Usually the page is reserved anyways,
-  * unless you have no PS/2 mouse plugged in.
-+ *
-+ * This functions is deliberately very conservative.  Losing
-+ * memory in the bottom megabyte is rarely a problem, as long
-+ * as we have enough memory to install the trampoline.  Using
-+ * memory that is in use by the BIOS or by some DMA device
-+ * the BIOS didn't shut down *is* a big problem.
-  */
-+
-+#define BIOS_LOWMEM_KILOBYTES	0x413
-+#define LOWMEM_CAP		0x9f000U	/* Absolute maximum */
-+#define INSANE_CUTOFF		0x20000U	/* Less than this = insane */
-+
- void __init reserve_ebda_region(void)
- {
- 	unsigned int lowmem, ebda_addr;
- 
--	/* To determine the position of the EBDA and the */
--	/* end of conventional memory, we need to look at */
--	/* the BIOS data area. In a paravirtual environment */
--	/* that area is absent. We'll just have to assume */
--	/* that the paravirt case can handle memory setup */
--	/* correctly, without our help. */
-+	/*
-+	 * To determine the position of the EBDA and the
-+	 * end of conventional memory, we need to look at
-+	 * the BIOS data area. In a paravirtual environment
-+	 * that area is absent. We'll just have to assume
-+	 * that the paravirt case can handle memory setup
-+	 * correctly, without our help.
-+	 */
- 	if (paravirt_enabled())
- 		return;
- 
-@@ -37,19 +48,23 @@ void __init reserve_ebda_region(void)
- 	/* start of EBDA area */
- 	ebda_addr = get_bios_ebda();
- 
--	/* Fixup: bios puts an EBDA in the top 64K segment */
--	/* of conventional memory, but does not adjust lowmem. */
--	if ((lowmem - ebda_addr) <= 0x10000)
--		lowmem = ebda_addr;
-+	/*
-+	 * Note: some old Dells seem to need 4k EBDA without
-+	 * reporting so, so just consider the memory above 0x9f000
-+	 * to be off limits (bugzilla 2990).
-+	 */
- 
--	/* Fixup: bios does not report an EBDA at all. */
--	/* Some old Dells seem to need 4k anyhow (bugzilla 2990) */
--	if ((ebda_addr == 0) && (lowmem >= 0x9f000))
--		lowmem = 0x9f000;
-+	/* If the EBDA address is below 128K, assume it is bogus */
-+	if (ebda_addr < INSANE_CUTOFF)
-+		ebda_addr = LOWMEM_CAP;
- 
--	/* Paranoia: should never happen, but... */
--	if ((lowmem == 0) || (lowmem >= 0x100000))
--		lowmem = 0x9f000;
-+	/* If lowmem is less than 128K, assume it is bogus */
-+	if (lowmem < INSANE_CUTOFF)
-+		lowmem = LOWMEM_CAP;
-+
-+	/* Use the lower of the lowmem and EBDA markers as the cutoff */
-+	lowmem = min(lowmem, ebda_addr);
-+	lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
- 
- 	/* reserve all memory between lowmem and the 1MB mark */
- 	memblock_reserve(lowmem, 0x100000 - lowmem);
 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
 index c18f59d..9c0c9f6 100644
 --- a/arch/x86/kernel/head32.c
@@ -33418,10 +33350,10 @@ index 982f1f5..d21e5da 100644
  	iounmap(buf);
  	return 0;
 diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
-index f5596db..9355ce6 100644
+index bcb201c..f9782e5 100644
 --- a/drivers/firmware/efivars.c
 +++ b/drivers/firmware/efivars.c
-@@ -132,7 +132,7 @@ struct efivar_attribute {
+@@ -133,7 +133,7 @@ struct efivar_attribute {
  };
  
  static struct efivars __efivars;
@@ -34535,10 +34467,10 @@ index 8a8725c..afed796 100644
  			marker = list_first_entry(&queue->head,
  						 struct vmw_marker, head);
 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index eb2ee11..6cc50ab 100644
+index ceb3040..6160c5c 100644
 --- a/drivers/hid/hid-core.c
 +++ b/drivers/hid/hid-core.c
-@@ -2240,7 +2240,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
  
  int hid_add_device(struct hid_device *hdev)
  {
@@ -34547,7 +34479,7 @@ index eb2ee11..6cc50ab 100644
  	int ret;
  
  	if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2274,7 +2274,7 @@ int hid_add_device(struct hid_device *hdev)
+@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
  	/* XXX hack, any other cleaner solution after the driver core
  	 * is converted to allow more than 20 bytes as the device name? */
  	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
@@ -36416,7 +36348,7 @@ index 404f63a..4796533 100644
  #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
  extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
-index 35cc526..9d90d83 100644
+index 8e9a668..78d6310 100644
 --- a/drivers/media/platform/omap/omap_vout.c
 +++ b/drivers/media/platform/omap/omap_vout.c
 @@ -63,7 +63,6 @@ enum omap_vout_channels {
@@ -36427,7 +36359,7 @@ index 35cc526..9d90d83 100644
  /* Variables configurable through module params*/
  static u32 video1_numbuffers = 3;
  static u32 video2_numbuffers = 3;
-@@ -1010,6 +1009,12 @@ static int omap_vout_open(struct file *file)
+@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
  {
  	struct videobuf_queue *q;
  	struct omap_vout_device *vout = NULL;
@@ -36440,7 +36372,7 @@ index 35cc526..9d90d83 100644
  
  	vout = video_drvdata(file);
  	v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
-@@ -1027,10 +1032,6 @@ static int omap_vout_open(struct file *file)
+@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
  	vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
  
  	q = &vout->vbq;
@@ -40439,10 +40371,10 @@ index 0d4aa82..f7832d4 100644
  
  /* core tmem accessor functions */
 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
-index f2aa754..11337b1 100644
+index 96f4981..4daaa7e 100644
 --- a/drivers/target/target_core_device.c
 +++ b/drivers/target/target_core_device.c
-@@ -1375,7 +1375,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
  	spin_lock_init(&dev->se_port_lock);
  	spin_lock_init(&dev->se_tmr_lock);
  	spin_lock_init(&dev->qf_cmd_lock);
@@ -47537,7 +47469,7 @@ index b2a34a1..162fa69 100644
  	return rc;
  }
 diff --git a/fs/exec.c b/fs/exec.c
-index 20df02c..1dff97d 100644
+index 20df02c..5af5d91 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,6 +55,17 @@
@@ -47606,8 +47538,8 @@ index 20df02c..1dff97d 100644
 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
 +		// only allow 512KB for argv+env on suid/sgid binaries
 +		// to prevent easy ASLR exhaustion
-+		if (((bprm->cred->euid != current_euid()) ||
-+		     (bprm->cred->egid != current_egid())) &&
++		if (((!uid_eq(bprm->cred->euid, current_euid())) ||
++		     (!gid_eq(bprm->cred->egid, current_egid()))) &&
 +		    (size > (512 * 1024))) {
 +			put_page(page);
 +			return NULL;
@@ -47930,7 +47862,7 @@ index 20df02c..1dff97d 100644
 +	/* limit suid stack to 8MB
 +	 * we saved the old limits above and will restore them if this exec fails
 +	 */
-+	if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
++	if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
 +	    (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
 +		current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
 +#endif
@@ -48289,10 +48221,10 @@ index 22548f5..41521d8 100644
  	}
  	return 1;
 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
-index cf18217..8f6b9c3 100644
+index 2f2e0da..89b113a 100644
 --- a/fs/ext4/balloc.c
 +++ b/fs/ext4/balloc.c
-@@ -498,8 +498,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
+@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
  	/* Hm, nope.  Are (enough) root reserved clusters available? */
  	if (uid_eq(sbi->s_resuid, current_fsuid()) ||
  	    (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
@@ -48338,7 +48270,7 @@ index 8462eb3..4a71af6 100644
  
  	/* locality groups */
 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
-index 1bf6fe7..1a5bdef 100644
+index 061727a..7622abf 100644
 --- a/fs/ext4/mballoc.c
 +++ b/fs/ext4/mballoc.c
 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
@@ -50125,10 +50057,10 @@ index e83351a..41e3c9c 100644
  		if (!ret)
  			ret = -EPIPE;
 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
-index b7c09f9..3eff736 100644
+index 315e1f8..91f890c 100644
 --- a/fs/fuse/dir.c
 +++ b/fs/fuse/dir.c
-@@ -1226,7 +1226,7 @@ static char *read_link(struct dentry *dentry)
+@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
  	return link;
  }
  
@@ -51152,7 +51084,7 @@ index d355e6e..578d905 100644
  
  enum ocfs2_local_alloc_state
 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
-index f169da4..9112253 100644
+index b7e74b5..19c6536 100644
 --- a/fs/ocfs2/suballoc.c
 +++ b/fs/ocfs2/suballoc.c
 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
@@ -51164,7 +51096,7 @@ index f169da4..9112253 100644
  
  		/* You should never ask for this much metadata */
  		BUG_ON(bits_wanted >
-@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
+@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
  		mlog_errno(status);
  		goto bail;
  	}
@@ -51173,7 +51105,7 @@ index f169da4..9112253 100644
  
  	*suballoc_loc = res.sr_bg_blkno;
  	*suballoc_bit_start = res.sr_bit_offset;
-@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
+@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
  	trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
  					   res->sr_bits);
  
@@ -51182,7 +51114,7 @@ index f169da4..9112253 100644
  
  	BUG_ON(res->sr_bits != 1);
  
-@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
+@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
  		mlog_errno(status);
  		goto bail;
  	}
@@ -51191,7 +51123,7 @@ index f169da4..9112253 100644
  
  	BUG_ON(res.sr_bits != 1);
  
-@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
  						      cluster_start,
  						      num_clusters);
  		if (!status)
@@ -51200,7 +51132,7 @@ index f169da4..9112253 100644
  	} else {
  		if (min_clusters > (osb->bitmap_cpg - 1)) {
  			/* The only paths asking for contiguousness
-@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
+@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
  				ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
  								 res.sr_bg_blkno,
  								 res.sr_bit_offset);
@@ -51672,7 +51604,7 @@ index 6a91e6f..e54dbc14 100644
  static struct pid *
  get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
 diff --git a/fs/proc/base.c b/fs/proc/base.c
-index 9b43ff77..3d6a99f 100644
+index 9b43ff77..ba3e990 100644
 --- a/fs/proc/base.c
 +++ b/fs/proc/base.c
 @@ -111,6 +111,14 @@ struct pid_entry {
@@ -51790,7 +51722,7 @@ index 9b43ff77..3d6a99f 100644
 +		const struct cred *tmpcred = current_cred();
 +		const struct cred *cred = __task_cred(task);
 +
-+		if (!tmpcred->uid || (tmpcred->uid == cred->uid)
++		if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
 +			|| in_group_p(grsec_proc_gid)
 +#endif
@@ -52294,7 +52226,7 @@ index b1822dd..df622cb 100644
  
  	seq_putc(m, '\n');
 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
-index fe72cd0..cb9b67d 100644
+index fe72cd0..21b52ff 100644
 --- a/fs/proc/proc_net.c
 +++ b/fs/proc/proc_net.c
 @@ -23,6 +23,7 @@
@@ -52314,10 +52246,10 @@ index fe72cd0..cb9b67d 100644
 +#endif
 +
 +#ifdef CONFIG_GRKERNSEC_PROC_USER
-+	if (cred->fsuid)
++	if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
 +		return net;
 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
-+	if (cred->fsuid && !in_group_p(grsec_proc_gid))
++	if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
 +		return net;
 +#endif
  
@@ -53336,10 +53268,10 @@ index 9fbea87..6b19972 100644
  	struct posix_acl *acl;
  	struct posix_acl_entry *acl_e;
 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
-index cdb2d33..704ce7f 100644
+index 572a858..12a9b0d 100644
 --- a/fs/xfs/xfs_bmap.c
 +++ b/fs/xfs/xfs_bmap.c
-@@ -189,7 +189,7 @@ xfs_bmap_validate_ret(
+@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
  	int			nmap,
  	int			ret_nmap);
  #else
@@ -54468,10 +54400,10 @@ index 0000000..1b9afa9
 +endif
 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
 new file mode 100644
-index 0000000..69e1320
+index 0000000..6b7b8f7
 --- /dev/null
 +++ b/grsecurity/gracl.c
-@@ -0,0 +1,4019 @@
+@@ -0,0 +1,4067 @@
 +#include <linux/kernel.h>
 +#include <linux/module.h>
 +#include <linux/sched.h>
@@ -56513,7 +56445,7 @@ index 0000000..69e1320
 +	const struct cred *cred = current_cred();
 +
 +	security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
-+		       cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++		       GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
 +		       task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
 +		       1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
 +
@@ -56521,16 +56453,29 @@ index 0000000..69e1320
 +}
 +
 +static void
-+gr_log_learn_id_change(const char type, const unsigned int real, 
-+		       const unsigned int effective, const unsigned int fs)
++gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
 +{
 +	struct task_struct *task = current;
 +	const struct cred *cred = current_cred();
 +
 +	security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
-+		       cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++		       GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
 +		       task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
-+		       type, real, effective, fs, &task->signal->saved_ip);
++		       'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
++
++	return;
++}
++
++static void
++gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
++{
++	struct task_struct *task = current;
++	const struct cred *cred = current_cred();
++
++	security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
++		       GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
++		       task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
++		       'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
 +
 +	return;
 +}
@@ -56803,23 +56748,28 @@ index 0000000..69e1320
 +extern int __gr_process_user_ban(struct user_struct *user);
 +
 +int
-+gr_check_user_change(int real, int effective, int fs)
++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
 +{
 +	unsigned int i;
 +	__u16 num;
 +	uid_t *uidlist;
-+	int curuid;
++	uid_t curuid;
 +	int realok = 0;
 +	int effectiveok = 0;
 +	int fsok = 0;
++	uid_t globalreal, globaleffective, globalfs;
 +
 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
 +	struct user_struct *user;
 +
-+	if (real == -1)
++	if (!uid_valid(real))
 +		goto skipit;
 +
-+	user = find_user(real);
++	/* find user based on global namespace */
++
++	globalreal = GR_GLOBAL_UID(real);
++
++	user = find_user(make_kuid(&init_user_ns, globalreal));
 +	if (user == NULL)
 +		goto skipit;
 +
@@ -56839,7 +56789,7 @@ index 0000000..69e1320
 +		return 0;
 +
 +	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+		gr_log_learn_id_change('u', real, effective, fs);
++		gr_log_learn_uid_change(real, effective, fs);
 +
 +	num = current->acl->user_trans_num;
 +	uidlist = current->acl->user_transitions;
@@ -56847,31 +56797,43 @@ index 0000000..69e1320
 +	if (uidlist == NULL)
 +		return 0;
 +
-+	if (real == -1)
++	if (!uid_valid(real)) {
 +		realok = 1;
-+	if (effective == -1)
++		globalreal = (uid_t)-1;		
++	} else {
++		globalreal = GR_GLOBAL_UID(real);		
++	}
++	if (!uid_valid(effective)) {
 +		effectiveok = 1;
-+	if (fs == -1)
++		globaleffective = (uid_t)-1;
++	} else {
++		globaleffective = GR_GLOBAL_UID(effective);
++	}
++	if (!uid_valid(fs)) {
 +		fsok = 1;
++		globalfs = (uid_t)-1;
++	} else {
++		globalfs = GR_GLOBAL_UID(fs);
++	}
 +
 +	if (current->acl->user_trans_type & GR_ID_ALLOW) {
 +		for (i = 0; i < num; i++) {
-+			curuid = (int)uidlist[i];
-+			if (real == curuid)
++			curuid = uidlist[i];
++			if (globalreal == curuid)
 +				realok = 1;
-+			if (effective == curuid)
++			if (globaleffective == curuid)
 +				effectiveok = 1;
-+			if (fs == curuid)
++			if (globalfs == curuid)
 +				fsok = 1;
 +		}
 +	} else if (current->acl->user_trans_type & GR_ID_DENY) {
 +		for (i = 0; i < num; i++) {
-+			curuid = (int)uidlist[i];
-+			if (real == curuid)
++			curuid = uidlist[i];
++			if (globalreal == curuid)
 +				break;
-+			if (effective == curuid)
++			if (globaleffective == curuid)
 +				break;
-+			if (fs == curuid)
++			if (globalfs == curuid)
 +				break;
 +		}
 +		/* not in deny list */
@@ -56885,27 +56847,28 @@ index 0000000..69e1320
 +	if (realok && effectiveok && fsok)
 +		return 0;
 +	else {
-+		gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++		gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
 +		return 1;
 +	}
 +}
 +
 +int
-+gr_check_group_change(int real, int effective, int fs)
++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
 +{
 +	unsigned int i;
 +	__u16 num;
 +	gid_t *gidlist;
-+	int curgid;
++	gid_t curgid;
 +	int realok = 0;
 +	int effectiveok = 0;
 +	int fsok = 0;
++	gid_t globalreal, globaleffective, globalfs;
 +
 +	if (unlikely(!(gr_status & GR_READY)))
 +		return 0;
 +
 +	if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
-+		gr_log_learn_id_change('g', real, effective, fs);
++		gr_log_learn_gid_change(real, effective, fs);
 +
 +	num = current->acl->group_trans_num;
 +	gidlist = current->acl->group_transitions;
@@ -56913,31 +56876,43 @@ index 0000000..69e1320
 +	if (gidlist == NULL)
 +		return 0;
 +
-+	if (real == -1)
++	if (!gid_valid(real)) {
 +		realok = 1;
-+	if (effective == -1)
++		globalreal = (gid_t)-1;		
++	} else {
++		globalreal = GR_GLOBAL_GID(real);
++	}
++	if (!gid_valid(effective)) {
 +		effectiveok = 1;
-+	if (fs == -1)
++		globaleffective = (gid_t)-1;		
++	} else {
++		globaleffective = GR_GLOBAL_GID(effective);
++	}
++	if (!gid_valid(fs)) {
 +		fsok = 1;
++		globalfs = (gid_t)-1;		
++	} else {
++		globalfs = GR_GLOBAL_GID(fs);
++	}
 +
 +	if (current->acl->group_trans_type & GR_ID_ALLOW) {
 +		for (i = 0; i < num; i++) {
-+			curgid = (int)gidlist[i];
-+			if (real == curgid)
++			curgid = gidlist[i];
++			if (globalreal == curgid)
 +				realok = 1;
-+			if (effective == curgid)
++			if (globaleffective == curgid)
 +				effectiveok = 1;
-+			if (fs == curgid)
++			if (globalfs == curgid)
 +				fsok = 1;
 +		}
 +	} else if (current->acl->group_trans_type & GR_ID_DENY) {
 +		for (i = 0; i < num; i++) {
-+			curgid = (int)gidlist[i];
-+			if (real == curgid)
++			curgid = gidlist[i];
++			if (globalreal == curgid)
 +				break;
-+			if (effective == curgid)
++			if (globaleffective == curgid)
 +				break;
-+			if (fs == curgid)
++			if (globalfs == curgid)
 +				break;
 +		}
 +		/* not in deny list */
@@ -56951,7 +56926,7 @@ index 0000000..69e1320
 +	if (realok && effectiveok && fsok)
 +		return 0;
 +	else {
-+		gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
++		gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
 +		return 1;
 +	}
 +}
@@ -56959,16 +56934,21 @@ index 0000000..69e1320
 +extern int gr_acl_is_capable(const int cap);
 +
 +void
-+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
++gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
 +{
 +	struct acl_role_label *role = task->role;
 +	struct acl_subject_label *subj = NULL;
 +	struct acl_object_label *obj;
 +	struct file *filp;
++	uid_t uid;
++	gid_t gid;
 +
 +	if (unlikely(!(gr_status & GR_READY)))
 +		return;
 +
++	uid = GR_GLOBAL_UID(kuid);
++	gid = GR_GLOBAL_GID(kgid);
++
 +	filp = task->exec_file;
 +
 +	/* kernel process, we'll give them the kernel role */
@@ -57922,7 +57902,7 @@ index 0000000..69e1320
 +
 +		if (task->exec_file) {
 +			cred = __task_cred(task);
-+			task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
++			task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
 +			ret = gr_apply_subject_to_task(task);
 +			if (ret) {
 +				read_unlock(&grsec_exec_file_lock);
@@ -58005,7 +57985,7 @@ index 0000000..69e1320
 +		rcu_read_lock();
 +		cred = __task_cred(task);
 +		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
-+			       task->role->roletype, cred->uid, cred->gid, acl->filename,
++			       task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
 +			       acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
 +			       "", (unsigned long) res, &task->signal->saved_ip);
 +		rcu_read_unlock();
@@ -58604,7 +58584,7 @@ index 0000000..34fefda
 +}
 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
 new file mode 100644
-index 0000000..6d21049
+index 0000000..bdd51ea
 --- /dev/null
 +++ b/grsecurity/gracl_cap.c
 @@ -0,0 +1,110 @@
@@ -58659,8 +58639,8 @@ index 0000000..6d21049
 +	if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
 +	    && cap_raised(cred->cap_effective, cap)) {
 +		security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
-+			       task->role->roletype, cred->uid,
-+			       cred->gid, task->exec_file ?
++			       task->role->roletype, GR_GLOBAL_UID(cred->uid),
++			       GR_GLOBAL_GID(cred->gid), task->exec_file ?
 +			       gr_to_filename(task->exec_file->f_path.dentry,
 +			       task->exec_file->f_path.mnt) : curracl->filename,
 +			       curracl->filename, 0UL,
@@ -59157,7 +59137,7 @@ index 0000000..a340c17
 +}
 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
 new file mode 100644
-index 0000000..58800a7
+index 0000000..4699807
 --- /dev/null
 +++ b/grsecurity/gracl_ip.c
 @@ -0,0 +1,384 @@
@@ -59277,8 +59257,8 @@ index 0000000..58800a7
 +		if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
 +			__u32 fakeip = 0;
 +			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+				       current->role->roletype, cred->uid,
-+				       cred->gid, current->exec_file ?
++				       current->role->roletype, GR_GLOBAL_UID(cred->uid),
++				       GR_GLOBAL_GID(cred->gid), current->exec_file ?
 +				       gr_to_filename(current->exec_file->f_path.dentry,
 +				       current->exec_file->f_path.mnt) :
 +				       curr->filename, curr->filename,
@@ -59305,8 +59285,8 @@ index 0000000..58800a7
 +		if (type == SOCK_RAW || type == SOCK_PACKET) {
 +			__u32 fakeip = 0;
 +			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+				       current->role->roletype, cred->uid,
-+				       cred->gid, current->exec_file ?
++				       current->role->roletype, GR_GLOBAL_UID(cred->uid),
++				       GR_GLOBAL_GID(cred->gid), current->exec_file ?
 +				       gr_to_filename(current->exec_file->f_path.dentry,
 +				       current->exec_file->f_path.mnt) :
 +				       curr->filename, curr->filename,
@@ -59315,8 +59295,8 @@ index 0000000..58800a7
 +		} else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
 +			__u32 fakeip = 0;
 +			security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+				       current->role->roletype, cred->uid,
-+				       cred->gid, current->exec_file ?
++				       current->role->roletype, GR_GLOBAL_UID(cred->uid),
++				       GR_GLOBAL_GID(cred->gid), current->exec_file ?
 +				       gr_to_filename(current->exec_file->f_path.dentry,
 +				       current->exec_file->f_path.mnt) :
 +				       curr->filename, curr->filename,
@@ -59412,8 +59392,8 @@ index 0000000..58800a7
 +
 +	if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
 +		security_learn(GR_IP_LEARN_MSG, current->role->rolename,
-+			       current->role->roletype, cred->uid,
-+			       cred->gid, current->exec_file ?
++			       current->role->roletype, GR_GLOBAL_UID(cred->uid),
++			       GR_GLOBAL_GID(cred->gid), current->exec_file ?
 +			       gr_to_filename(current->exec_file->f_path.dentry,
 +			       current->exec_file->f_path.mnt) :
 +			       curr->filename, curr->filename,
@@ -59834,10 +59814,10 @@ index 0000000..39645c9
 +}
 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
 new file mode 100644
-index 0000000..25197e9
+index 0000000..10398db
 --- /dev/null
 +++ b/grsecurity/gracl_segv.c
-@@ -0,0 +1,299 @@
+@@ -0,0 +1,303 @@
 +#include <linux/kernel.h>
 +#include <linux/mm.h>
 +#include <asm/uaccess.h>
@@ -59939,9 +59919,10 @@ index 0000000..25197e9
 +}
 +
 +static __inline__ void
-+gr_insert_uid(const uid_t uid, const unsigned long expires)
++gr_insert_uid(const kuid_t kuid, const unsigned long expires)
 +{
 +	int loc;
++	uid_t uid = GR_GLOBAL_UID(kuid);
 +
 +	if (uid_used == GR_UIDTABLE_MAX)
 +		return;
@@ -59976,14 +59957,17 @@ index 0000000..25197e9
 +}
 +
 +int
-+gr_check_crash_uid(const uid_t uid)
++gr_check_crash_uid(const kuid_t kuid)
 +{
 +	int loc;
 +	int ret = 0;
++	uid_t uid;
 +
 +	if (unlikely(!gr_acl_is_enabled()))
 +		return 0;
 +
++	uid = GR_GLOBAL_UID(kuid);
++
 +	spin_lock(&gr_uid_lock);
 +	loc = gr_find_uid(uid);
 +
@@ -60006,8 +59990,8 @@ index 0000000..25197e9
 +	if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
 +	    !uid_eq(cred->uid, cred->fsuid))
 +		return 1;
-+	if (!uid_eq(cred->gid, cred->egid) || !uid_eq(cred->gid, cred->sgid) ||
-+	    !uid_eq(cred->gid, cred->fsgid))
++	if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
++	    !gid_eq(cred->gid, cred->fsgid))
 +		return 1;
 +
 +	return 0;
@@ -60139,7 +60123,7 @@ index 0000000..25197e9
 +}
 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
 new file mode 100644
-index 0000000..9d83a69
+index 0000000..120978a
 --- /dev/null
 +++ b/grsecurity/gracl_shm.c
 @@ -0,0 +1,40 @@
@@ -60154,7 +60138,7 @@ index 0000000..9d83a69
 +
 +int
 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+		const time_t shm_createtime, const uid_t cuid, const int shmid)
++		const time_t shm_createtime, const kuid_t cuid, const int shmid)
 +{
 +	struct task_struct *task;
 +
@@ -60175,7 +60159,7 @@ index 0000000..9d83a69
 +		     (task->acl != current->acl))) {
 +		read_unlock(&tasklist_lock);
 +		rcu_read_unlock();
-+		gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
++		gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
 +		return 0;
 +	}
 +	read_unlock(&tasklist_lock);
@@ -60573,7 +60557,7 @@ index 0000000..70fe0ae
 +}
 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
 new file mode 100644
-index 0000000..e6796b3
+index 0000000..207d409
 --- /dev/null
 +++ b/grsecurity/grsec_disabled.c
 @@ -0,0 +1,434 @@
@@ -60716,7 +60700,7 @@ index 0000000..e6796b3
 +}
 +
 +int
-+gr_check_crash_uid(const uid_t uid)
++gr_check_crash_uid(const kuid_t uid)
 +{
 +	return 0;
 +}
@@ -60893,7 +60877,7 @@ index 0000000..e6796b3
 +
 +int
 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+		const time_t shm_createtime, const uid_t cuid, const int shmid)
++		const time_t shm_createtime, const kuid_t cuid, const int shmid)
 +{
 +	return 1;
 +}
@@ -60950,7 +60934,7 @@ index 0000000..e6796b3
 +}
 +
 +void
-+gr_set_role_label(const uid_t uid, const gid_t gid)
++gr_set_role_label(const kuid_t uid, const kgid_t gid)
 +{
 +	return;
 +}
@@ -60980,13 +60964,13 @@ index 0000000..e6796b3
 +}
 +
 +int
-+gr_check_user_change(int real, int effective, int fs)
++gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
 +{
 +	return 0;
 +}
 +
 +int
-+gr_check_group_change(int real, int effective, int fs)
++gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
 +{
 +	return 0;
 +}
@@ -61193,7 +61177,7 @@ index 0000000..abfa971
 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
 new file mode 100644
-index 0000000..d3ee748
+index 0000000..06cc6ea
 --- /dev/null
 +++ b/grsecurity/grsec_fifo.c
 @@ -0,0 +1,24 @@
@@ -61212,10 +61196,10 @@ index 0000000..d3ee748
 +
 +	if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
 +	    !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
-+	    (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
-+	    (cred->fsuid != dentry->d_inode->i_uid)) {
++	    !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
++	    !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
 +		if (!inode_permission(dentry->d_inode, acc_mode))
-+			gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
++			gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
 +		return -EACCES;
 +	}
 +#endif
@@ -61252,7 +61236,7 @@ index 0000000..8ca18bf
 +}
 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
 new file mode 100644
-index 0000000..05a6015
+index 0000000..a862e9f
 --- /dev/null
 +++ b/grsecurity/grsec_init.c
 @@ -0,0 +1,283 @@
@@ -61268,7 +61252,7 @@ index 0000000..05a6015
 +int grsec_enable_ptrace_readexec;
 +int grsec_enable_setxid;
 +int grsec_enable_symlinkown;
-+int grsec_symlinkown_gid;
++kgid_t grsec_symlinkown_gid;
 +int grsec_enable_brute;
 +int grsec_enable_link;
 +int grsec_enable_dmesg;
@@ -61281,7 +61265,7 @@ index 0000000..05a6015
 +int grsec_enable_time;
 +int grsec_enable_audit_textrel;
 +int grsec_enable_group;
-+int grsec_audit_gid;
++kgid_t grsec_audit_gid;
 +int grsec_enable_chdir;
 +int grsec_enable_mount;
 +int grsec_enable_rofs;
@@ -61300,7 +61284,7 @@ index 0000000..05a6015
 +int grsec_enable_chroot_sysctl;
 +int grsec_enable_chroot_unix;
 +int grsec_enable_tpe;
-+int grsec_tpe_gid;
++kgid_t grsec_tpe_gid;
 +int grsec_enable_blackhole;
 +#ifdef CONFIG_IPV6_MODULE
 +EXPORT_SYMBOL(grsec_enable_blackhole);
@@ -61309,11 +61293,11 @@ index 0000000..05a6015
 +int grsec_enable_tpe_all;
 +int grsec_enable_tpe_invert;
 +int grsec_enable_socket_all;
-+int grsec_socket_all_gid;
++kgid_t grsec_socket_all_gid;
 +int grsec_enable_socket_client;
-+int grsec_socket_client_gid;
++kgid_t grsec_socket_client_gid;
 +int grsec_enable_socket_server;
-+int grsec_socket_server_gid;
++kgid_t grsec_socket_server_gid;
 +int grsec_resource_logging;
 +int grsec_disable_privio;
 +int grsec_enable_log_rwxmaps;
@@ -61419,7 +61403,7 @@ index 0000000..05a6015
 +#endif
 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
 +	grsec_enable_group = 1;
-+	grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
++	grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
 +#endif
 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
 +	grsec_enable_ptrace_readexec = 1;
@@ -61514,26 +61498,26 @@ index 0000000..05a6015
 +#endif
 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
 +	grsec_enable_symlinkown = 1;
-+	grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
++	grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
 +#endif
 +#ifdef CONFIG_GRKERNSEC_TPE
 +	grsec_enable_tpe = 1;
-+	grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
++	grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
 +	grsec_enable_tpe_all = 1;
 +#endif
 +#endif
 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
 +	grsec_enable_socket_all = 1;
-+	grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
++	grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
 +#endif
 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
 +	grsec_enable_socket_client = 1;
-+	grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
++	grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
 +#endif
 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
 +	grsec_enable_socket_server = 1;
-+	grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
++	grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
 +#endif
 +#endif
 +
@@ -61605,10 +61589,10 @@ index 0000000..6095407
 +}
 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
 new file mode 100644
-index 0000000..7bd6c2b
+index 0000000..7c06085
 --- /dev/null
 +++ b/grsecurity/grsec_log.c
-@@ -0,0 +1,329 @@
+@@ -0,0 +1,326 @@
 +#include <linux/kernel.h>
 +#include <linux/sched.h>
 +#include <linux/file.h>
@@ -61624,9 +61608,6 @@ index 0000000..7bd6c2b
 +#define ENABLE_PREEMPT()
 +#endif
 +
-+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
-+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
-+
 +#define BEGIN_LOCKS(x) \
 +	DISABLE_PREEMPT(); \
 +	rcu_read_lock(); \
@@ -63107,7 +63088,7 @@ index 0000000..0dc13c3
 +EXPORT_SYMBOL(gr_log_timechange);
 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
 new file mode 100644
-index 0000000..07e0dc0
+index 0000000..ac20d7f
 --- /dev/null
 +++ b/grsecurity/grsec_tpe.c
 @@ -0,0 +1,73 @@
@@ -63129,7 +63110,7 @@ index 0000000..07e0dc0
 +	char *msg2 = NULL;
 +
 +	// never restrict root
-+	if (!cred->uid)
++	if (uid_eq(cred->uid, GLOBAL_ROOT_UID))
 +		return 1;
 +
 +	if (grsec_enable_tpe) {
@@ -63150,7 +63131,7 @@ index 0000000..07e0dc0
 +	if (!msg)
 +		goto next_check;
 +
-+	if (inode->i_uid)
++	if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
 +		msg2 = "file in non-root-owned directory";
 +	else if (inode->i_mode & S_IWOTH)
 +		msg2 = "file in world-writable directory";
@@ -63169,7 +63150,7 @@ index 0000000..07e0dc0
 +	if (!grsec_enable_tpe || !grsec_enable_tpe_all)
 +		return 1;
 +
-+	if (inode->i_uid && (inode->i_uid != cred->uid))
++	if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID) && !uid_eq(inode->i_uid, cred->uid))
 +		msg = "directory not owned by user";
 +	else if (inode->i_mode & S_IWOTH)
 +		msg = "file in world-writable directory";
@@ -65046,7 +65027,7 @@ index 0000000..be66033
 +#endif
 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
 new file mode 100644
-index 0000000..baa6e96
+index 0000000..9bb6662
 --- /dev/null
 +++ b/include/linux/grinternal.h
 @@ -0,0 +1,215 @@
@@ -65112,18 +65093,18 @@ index 0000000..baa6e96
 +extern int grsec_enable_chroot_sysctl;
 +extern int grsec_enable_chroot_unix;
 +extern int grsec_enable_symlinkown;
-+extern int grsec_symlinkown_gid;
++extern kgid_t grsec_symlinkown_gid;
 +extern int grsec_enable_tpe;
-+extern int grsec_tpe_gid;
++extern kgid_t grsec_tpe_gid;
 +extern int grsec_enable_tpe_all;
 +extern int grsec_enable_tpe_invert;
 +extern int grsec_enable_socket_all;
-+extern int grsec_socket_all_gid;
++extern kgid_t grsec_socket_all_gid;
 +extern int grsec_enable_socket_client;
-+extern int grsec_socket_client_gid;
++extern kgid_t grsec_socket_client_gid;
 +extern int grsec_enable_socket_server;
-+extern int grsec_socket_server_gid;
-+extern int grsec_audit_gid;
++extern kgid_t grsec_socket_server_gid;
++extern kgid_t grsec_audit_gid;
 +extern int grsec_enable_group;
 +extern int grsec_enable_audit_textrel;
 +extern int grsec_enable_log_rwxmaps;
@@ -65384,7 +65365,7 @@ index 0000000..2bd4c8d
 +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds.  Please investigate the crash report for "
 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
 new file mode 100644
-index 0000000..c5e5913
+index 0000000..1ae241a
 --- /dev/null
 +++ b/include/linux/grsecurity.h
 @@ -0,0 +1,257 @@
@@ -65432,8 +65413,8 @@ index 0000000..c5e5913
 +
 +int gr_acl_enable_at_secure(void);
 +
-+int gr_check_user_change(int real, int effective, int fs);
-+int gr_check_group_change(int real, int effective, int fs);
++int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
++int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
 +
 +void gr_del_task_from_ip_table(struct task_struct *p);
 +
@@ -65505,7 +65486,7 @@ index 0000000..c5e5913
 +void gr_copy_label(struct task_struct *tsk);
 +void gr_handle_crash(struct task_struct *task, const int sig);
 +int gr_handle_signal(const struct task_struct *p, const int sig);
-+int gr_check_crash_uid(const uid_t uid);
++int gr_check_crash_uid(const kuid_t uid);
 +int gr_check_protected_task(const struct task_struct *task);
 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
 +int gr_acl_handle_mmap(const struct file *file,
@@ -65532,8 +65513,8 @@ index 0000000..c5e5913
 +int gr_check_crash_exec(const struct file *filp);
 +int gr_acl_is_enabled(void);
 +void gr_set_kernel_label(struct task_struct *task);
-+void gr_set_role_label(struct task_struct *task, const uid_t uid,
-+			      const gid_t gid);
++void gr_set_role_label(struct task_struct *task, const kuid_t uid,
++			      const kgid_t gid);
 +int gr_set_proc_label(const struct dentry *dentry,
 +			const struct vfsmount *mnt,
 +			const int unsafe_flags);
@@ -65633,7 +65614,7 @@ index 0000000..c5e5913
 +extern int grsec_disable_privio;
 +
 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+extern int grsec_proc_gid;
++extern kgid_t grsec_proc_gid;
 +#endif
 +
 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
@@ -67652,6 +67633,18 @@ index 5ca0951..ab496a5 100644
  		ret;					\
  	})
  
+diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
+index 8e522cbc..1b67af5 100644
+--- a/include/linux/uidgid.h
++++ b/include/linux/uidgid.h
+@@ -197,4 +197,7 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+ 
+ #endif /* CONFIG_USER_NS */
+ 
++#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
++#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
++
+ #endif /* _LINUX_UIDGID_H */
 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
 index 99c1b4d..bb94261 100644
 --- a/include/linux/unaligned/access_ok.h
@@ -68732,7 +68725,7 @@ index 0993a22..32ba2fe 100644
  	void *pmi_pal;
  	u8 *vbe_state_orig;		/*
 diff --git a/init/Kconfig b/init/Kconfig
-index be8b7f5..b13cb62 100644
+index be8b7f5..1eeca9b 100644
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -990,6 +990,7 @@ endif # CGROUPS
@@ -68743,16 +68736,7 @@ index be8b7f5..b13cb62 100644
  	default n
  	help
  	  Enables additional kernel features in a sake of checkpoint/restore.
-@@ -1079,6 +1080,8 @@ config UIDGID_CONVERTED
- 	depends on OCFS2_FS = n
- 	depends on XFS_FS = n
- 
-+	depends on GRKERNSEC = n
-+
- config UIDGID_STRICT_TYPE_CHECKS
- 	bool "Require conversions between uid/gids and their internal representation"
- 	depends on UIDGID_CONVERTED
-@@ -1468,7 +1471,7 @@ config SLUB_DEBUG
+@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
  
  config COMPAT_BRK
  	bool "Disable heap randomization"
@@ -68761,7 +68745,7 @@ index be8b7f5..b13cb62 100644
  	help
  	  Randomizing heap placement makes heap exploits harder, but it
  	  also breaks ancient binaries (including anything libc5 based).
-@@ -1711,7 +1714,7 @@ config INIT_ALL_POSSIBLE
+@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
  config STOP_MACHINE
  	bool
  	default y
@@ -69091,7 +69075,7 @@ index 84c6bf1..8899338 100644
  	next_state = Reset;
  	return 0;
 diff --git a/init/main.c b/init/main.c
-index cee4b5c..47f445e 100644
+index cee4b5c..9c267d9 100644
 --- a/init/main.c
 +++ b/init/main.c
 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
@@ -69108,10 +69092,10 @@ index cee4b5c..47f445e 100644
  __setup("reset_devices", set_reset_devices);
  
 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
-+int grsec_proc_gid = CONFIG_GRKERNSEC_PROC_GID;
++kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
 +static int __init setup_grsec_proc_gid(char *str)
 +{
-+	grsec_proc_gid = (int)simple_strtol(str, NULL, 0);
++	grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
 +	return 1;
 +}
 +__setup("grsec_proc_gid=", setup_grsec_proc_gid);
@@ -69334,7 +69318,7 @@ index 58d31f1..cce7a55 100644
  	sem_params.flg = semflg;
  	sem_params.u.nsems = nsems;
 diff --git a/ipc/shm.c b/ipc/shm.c
-index 4fa6d8f..38dfd0c 100644
+index 4fa6d8f..55cff14 100644
 --- a/ipc/shm.c
 +++ b/ipc/shm.c
 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
@@ -69343,7 +69327,7 @@ index 4fa6d8f..38dfd0c 100644
  
 +#ifdef CONFIG_GRKERNSEC
 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
-+			   const time_t shm_createtime, const uid_t cuid,
++			   const time_t shm_createtime, const kuid_t cuid,
 +			   const int shmid);
 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
 +			   const time_t shm_createtime);
@@ -69599,10 +69583,10 @@ index 493d972..f87dfbd 100644
 +	return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
 +}
 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
-index 4855892..30d23b4 100644
+index 1e23664..570a83d 100644
 --- a/kernel/cgroup.c
 +++ b/kernel/cgroup.c
-@@ -5535,7 +5535,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
+@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
  		struct css_set *cg = link->cg;
  		struct task_struct *task;
  		int count = 0;
@@ -69794,7 +69778,7 @@ index 42e8fa0..9e7406b 100644
  		return -ENOMEM;
  
 diff --git a/kernel/cred.c b/kernel/cred.c
-index e0573a4..eefe488 100644
+index e0573a4..3874e41 100644
 --- a/kernel/cred.c
 +++ b/kernel/cred.c
 @@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
@@ -69832,7 +69816,7 @@ index e0573a4..eefe488 100644
  	/* dumpability changes */
  	if (!uid_eq(old->euid, new->euid) ||
  	    !gid_eq(old->egid, new->egid) ||
-@@ -479,6 +491,101 @@ int commit_creds(struct cred *new)
+@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
  	put_cred(old);
  	return 0;
  }
@@ -69846,7 +69830,7 @@ index e0573a4..eefe488 100644
 +
 +	current->delayed_cred = NULL;
 +
-+	if (current_uid() && new != NULL) {
++	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
 +		// from doing get_cred on it when queueing this
 +		put_cred(new);
 +		return;
@@ -69907,7 +69891,8 @@ index e0573a4..eefe488 100644
 +	   init_cred
 +	*/
 +	if (grsec_enable_setxid && !current_is_single_threaded() &&
-+	    !current_uid() && new->uid) {
++	    uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
++	    !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
 +		schedule_it = 1;
 +	}
 +	ret = __commit_creds(new);
@@ -70639,7 +70624,7 @@ index 60f48fa..7f3a770 100644
  
  static int
 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
-index 2169fee..45c017a 100644
+index 2169fee..706ccca 100644
 --- a/kernel/kallsyms.c
 +++ b/kernel/kallsyms.c
 @@ -11,6 +11,9 @@
@@ -70728,7 +70713,7 @@ index 2169fee..45c017a 100644
  	struct kallsym_iter *iter = m->private;
  
 +#ifdef CONFIG_GRKERNSEC_HIDESYM
-+	if (current_uid())
++	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
 +		return 0;
 +#endif
 +
@@ -70782,7 +70767,7 @@ index 5e4bd78..00c5b91 100644
  
  	/* Don't allow clients that don't understand the native
 diff --git a/kernel/kmod.c b/kernel/kmod.c
-index 0023a87..3fe3781 100644
+index 0023a87..b893e79 100644
 --- a/kernel/kmod.c
 +++ b/kernel/kmod.c
 @@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
@@ -70840,7 +70825,7 @@ index 0023a87..3fe3781 100644
  		return ret;
  
 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+	if (!current_uid()) {
++	if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
 +		/* hack to workaround consolekit/udisks stupidity */
 +		read_lock(&tasklist_lock);
 +		if (!strcmp(current->comm, "mount") &&
@@ -70885,12 +70870,12 @@ index 0023a87..3fe3781 100644
 +	int ret;
 +
 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
-+	if (current_uid()) {
++	if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
 +		char module_param[MODULE_NAME_LEN];
 +
 +		memset(module_param, 0, sizeof(module_param));
 +
-+		snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
++		snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
 +
 +		va_start(args, fmt);
 +		ret = ____request_module(wait, module_param, fmt, args);
@@ -72087,7 +72072,7 @@ index 942ca27..111e609 100644
  		.clock_get	= thread_cpu_clock_get,
  		.timer_create	= thread_cpu_timer_create,
 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
-index 69185ae..cc2847a 100644
+index e885be1..380fe76 100644
 --- a/kernel/posix-timers.c
 +++ b/kernel/posix-timers.c
 @@ -43,6 +43,7 @@
@@ -72170,7 +72155,7 @@ index 69185ae..cc2847a 100644
  }
  
  static int common_timer_create(struct k_itimer *new_timer)
-@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
  	if (copy_from_user(&new_tp, tp, sizeof (*tp)))
  		return -EFAULT;
  
@@ -73541,7 +73526,7 @@ index 2f194e9..2c05ea9 100644
  	.priority	= 10,
  };
 diff --git a/kernel/sys.c b/kernel/sys.c
-index 265b376..b0cd50d 100644
+index 265b376..4e42ef5 100644
 --- a/kernel/sys.c
 +++ b/kernel/sys.c
 @@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
@@ -73561,7 +73546,7 @@ index 265b376..b0cd50d 100644
  			goto error;
  	}
  
-+	if (gr_check_group_change(new->gid, new->egid, -1))
++	if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
 +		goto error;
 +
  	if (rgid != (gid_t) -1 ||
@@ -73591,7 +73576,7 @@ index 265b376..b0cd50d 100644
  			goto error;
  	}
  
-+	if (gr_check_user_change(new->uid, new->euid, -1))
++	if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
 +		goto error;
 +
  	if (!uid_eq(new->uid, old->uid)) {
@@ -73614,7 +73599,7 @@ index 265b376..b0cd50d 100644
  			goto error;
  	}
  
-+	if (gr_check_user_change(kruid, keuid, -1))
++	if (gr_check_user_change(kruid, keuid, INVALID_UID))
 +		goto error;
 +
  	if (ruid != (uid_t) -1) {
@@ -73624,7 +73609,7 @@ index 265b376..b0cd50d 100644
  			goto error;
  	}
  
-+	if (gr_check_group_change(krgid, kegid, -1))
++	if (gr_check_group_change(krgid, kegid, INVALID_GID))
 +		goto error;
 +
  	if (rgid != (gid_t) -1)
@@ -73634,7 +73619,7 @@ index 265b376..b0cd50d 100644
  	if (!uid_valid(kuid))
  		return old_fsuid;
  
-+	if (gr_check_user_change(-1, -1, kuid))
++	if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
 +		goto error;
 +
  	new = prepare_creds();
@@ -73652,7 +73637,7 @@ index 265b376..b0cd50d 100644
  	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
  	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
  	    nsown_capable(CAP_SETGID)) {
-+		if (gr_check_group_change(-1, -1, kgid))
++		if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
 +			goto error;
 +
  		if (!gid_eq(kgid, old->fsgid)) {
@@ -73896,7 +73881,7 @@ index c88878d..99d321b 100644
  EXPORT_SYMBOL(proc_doulongvec_minmax);
  EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
-index 5a63844..a199f50 100644
+index 0ddf3a0..a199f50 100644
 --- a/kernel/sysctl_binary.c
 +++ b/kernel/sysctl_binary.c
 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
@@ -73953,19 +73938,7 @@ index 5a63844..a199f50 100644
  		set_fs(old_fs);
  		if (result < 0)
  			goto out;
-@@ -1194,9 +1194,10 @@ static ssize_t bin_dn_node_address(struct file *file,
- 
- 		/* Convert the decnet address to binary */
- 		result = -EIO;
--		nodep = strchr(buf, '.') + 1;
-+		nodep = strchr(buf, '.');
- 		if (!nodep)
- 			goto out;
-+		++nodep;
- 
- 		area = simple_strtoul(buf, NULL, 10);
- 		node = simple_strtoul(nodep, NULL, 10);
-@@ -1233,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
+@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
  				le16_to_cpu(dnaddr) & 0x3ff);
  
  		set_fs(KERNEL_DS);
@@ -74236,7 +74209,7 @@ index c0bd030..62a1927 100644
  	ret = -EIO;
  	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
-index 41473b4..325fcfc 100644
+index 43defd1..76da436 100644
 --- a/kernel/trace/ftrace.c
 +++ b/kernel/trace/ftrace.c
 @@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
@@ -74279,7 +74252,7 @@ index 41473b4..325fcfc 100644
  
  	start_pg = ftrace_allocate_pages(count);
  	if (!start_pg)
-@@ -4541,8 +4548,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
+@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  
  static int ftrace_graph_active;
@@ -74288,7 +74261,7 @@ index 41473b4..325fcfc 100644
  int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
  {
  	return 0;
-@@ -4686,6 +4691,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
+@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
  	return NOTIFY_DONE;
  }
  
@@ -74299,7 +74272,7 @@ index 41473b4..325fcfc 100644
  int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  			trace_func_graph_ent_t entryfunc)
  {
-@@ -4699,7 +4708,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
  		goto out;
  	}
  
@@ -74955,26 +74928,6 @@ index 5e396ac..58d5de1 100644
  		err_printk(dev, NULL, "DMA-API: device driver maps memory from"
  				"stack [addr=%p]\n", addr);
  }
-diff --git a/lib/idr.c b/lib/idr.c
-index 6482390..ca5aa00 100644
---- a/lib/idr.c
-+++ b/lib/idr.c
-@@ -625,7 +625,14 @@ void *idr_get_next(struct idr *idp, int *nextidp)
- 			return p;
- 		}
- 
--		id += 1 << n;
-+		/*
-+		 * Proceed to the next layer at the current level.  Unlike
-+		 * idr_for_each(), @id isn't guaranteed to be aligned to
-+		 * layer boundary at this point and adding 1 << n may
-+		 * incorrectly skip IDs.  Make sure we jump to the
-+		 * beginning of the next layer using round_up().
-+		 */
-+		id = round_up(id + 1, 1 << n);
- 		while (n < fls(id)) {
- 			n += IDR_BITS;
- 			p = *--paa;
 diff --git a/lib/inflate.c b/lib/inflate.c
 index 013a761..c28f3fc 100644
 --- a/lib/inflate.c
@@ -76555,7 +76508,7 @@ index c9bd528..da8d069 100644
  	    capable(CAP_IPC_LOCK))
  		ret = do_mlockall(flags);
 diff --git a/mm/mmap.c b/mm/mmap.c
-index d1e4124..7d36e4f 100644
+index 8832b87..7d36e4f 100644
 --- a/mm/mmap.c
 +++ b/mm/mmap.c
 @@ -32,6 +32,7 @@
@@ -77301,51 +77254,7 @@ index d1e4124..7d36e4f 100644
  				spin_unlock(&vma->vm_mm->page_table_lock);
  
  				perf_event_mmap(vma);
-@@ -2169,9 +2477,28 @@ int expand_downwards(struct vm_area_struct *vma,
- 	return error;
- }
- 
-+/*
-+ * Note how expand_stack() refuses to expand the stack all the way to
-+ * abut the next virtual mapping, *unless* that mapping itself is also
-+ * a stack mapping. We want to leave room for a guard page, after all
-+ * (the guard page itself is not added here, that is done by the
-+ * actual page faulting logic)
-+ *
-+ * This matches the behavior of the guard page logic (see mm/memory.c:
-+ * check_stack_guard_page()), which only allows the guard page to be
-+ * removed under these circumstances.
-+ */
- #ifdef CONFIG_STACK_GROWSUP
- int expand_stack(struct vm_area_struct *vma, unsigned long address)
- {
-+	struct vm_area_struct *next;
-+
-+	address &= PAGE_MASK;
-+	next = vma->vm_next;
-+	if (next && next->vm_start == address + PAGE_SIZE) {
-+		if (!(next->vm_flags & VM_GROWSUP))
-+			return -ENOMEM;
-+	}
- 	return expand_upwards(vma, address);
- }
- 
-@@ -2194,6 +2521,14 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
- #else
- int expand_stack(struct vm_area_struct *vma, unsigned long address)
- {
-+	struct vm_area_struct *prev;
-+
-+	address &= PAGE_MASK;
-+	prev = vma->vm_prev;
-+	if (prev && prev->vm_end == address) {
-+		if (!(prev->vm_flags & VM_GROWSDOWN))
-+			return -ENOMEM;
-+	}
- 	return expand_downwards(vma, address);
- }
- 
-@@ -2236,6 +2571,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2263,6 +2571,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
  	do {
  		long nrpages = vma_pages(vma);
  
@@ -77359,7 +77268,7 @@ index d1e4124..7d36e4f 100644
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += nrpages;
  		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
-@@ -2281,6 +2623,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2308,6 +2623,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  	vma->vm_prev = NULL;
  	do {
@@ -77376,7 +77285,7 @@ index d1e4124..7d36e4f 100644
  		vma_rb_erase(vma, &mm->mm_rb);
  		mm->map_count--;
  		tail_vma = vma;
-@@ -2312,14 +2664,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2339,14 +2664,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	struct vm_area_struct *new;
  	int err = -ENOMEM;
  
@@ -77410,7 +77319,7 @@ index d1e4124..7d36e4f 100644
  	/* most fields are the same, copy all, and then fixup */
  	*new = *vma;
  
-@@ -2332,6 +2703,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2359,6 +2703,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  	}
  
@@ -77433,7 +77342,7 @@ index d1e4124..7d36e4f 100644
  	pol = mpol_dup(vma_policy(vma));
  	if (IS_ERR(pol)) {
  		err = PTR_ERR(pol);
-@@ -2354,6 +2741,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2381,6 +2741,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  	else
  		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  
@@ -77470,7 +77379,7 @@ index d1e4124..7d36e4f 100644
  	/* Success. */
  	if (!err)
  		return 0;
-@@ -2363,10 +2780,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2390,10 +2780,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  		new->vm_ops->close(new);
  	if (new->vm_file)
  		fput(new->vm_file);
@@ -77490,7 +77399,7 @@ index d1e4124..7d36e4f 100644
  	kmem_cache_free(vm_area_cachep, new);
   out_err:
  	return err;
-@@ -2379,6 +2804,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+@@ -2406,6 +2804,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  	      unsigned long addr, int new_below)
  {
@@ -77506,7 +77415,7 @@ index d1e4124..7d36e4f 100644
  	if (mm->map_count >= sysctl_max_map_count)
  		return -ENOMEM;
  
-@@ -2390,11 +2824,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+@@ -2417,11 +2824,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
   * work.  This now handles partial unmappings.
   * Jeremy Fitzhardinge <jeremy@goop.org>
   */
@@ -77537,7 +77446,7 @@ index d1e4124..7d36e4f 100644
  	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  		return -EINVAL;
  
-@@ -2469,6 +2922,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+@@ -2496,6 +2922,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  	/* Fix up all other VM information */
  	remove_vma_list(mm, vma);
  
@@ -77546,7 +77455,7 @@ index d1e4124..7d36e4f 100644
  	return 0;
  }
  
-@@ -2477,6 +2932,13 @@ int vm_munmap(unsigned long start, size_t len)
+@@ -2504,6 +2932,13 @@ int vm_munmap(unsigned long start, size_t len)
  	int ret;
  	struct mm_struct *mm = current->mm;
  
@@ -77560,7 +77469,7 @@ index d1e4124..7d36e4f 100644
  	down_write(&mm->mmap_sem);
  	ret = do_munmap(mm, start, len);
  	up_write(&mm->mmap_sem);
-@@ -2490,16 +2952,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
+@@ -2517,16 +2952,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
  	return vm_munmap(addr, len);
  }
  
@@ -77577,7 +77486,7 @@ index d1e4124..7d36e4f 100644
  /*
   *  this is really a simplified "do_mmap".  it only handles
   *  anonymous maps.  eventually we may be able to do some
-@@ -2513,6 +2965,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2540,6 +2965,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	struct rb_node ** rb_link, * rb_parent;
  	pgoff_t pgoff = addr >> PAGE_SHIFT;
  	int error;
@@ -77585,7 +77494,7 @@ index d1e4124..7d36e4f 100644
  
  	len = PAGE_ALIGN(len);
  	if (!len)
-@@ -2520,16 +2973,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2547,16 +2973,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  
  	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  
@@ -77617,7 +77526,7 @@ index d1e4124..7d36e4f 100644
  		locked += mm->locked_vm;
  		lock_limit = rlimit(RLIMIT_MEMLOCK);
  		lock_limit >>= PAGE_SHIFT;
-@@ -2546,21 +3013,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2573,21 +3013,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	/*
  	 * Clear old maps.  this also does some error checking for us
  	 */
@@ -77642,7 +77551,7 @@ index d1e4124..7d36e4f 100644
  		return -ENOMEM;
  
  	/* Can we just expand an old private anonymous mapping? */
-@@ -2574,7 +3040,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2601,7 +3040,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	 */
  	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
  	if (!vma) {
@@ -77651,7 +77560,7 @@ index d1e4124..7d36e4f 100644
  		return -ENOMEM;
  	}
  
-@@ -2588,11 +3054,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
+@@ -2615,11 +3054,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
  	vma_link(mm, vma, prev, rb_link, rb_parent);
  out:
  	perf_event_mmap(vma);
@@ -77666,7 +77575,7 @@ index d1e4124..7d36e4f 100644
  	return addr;
  }
  
-@@ -2650,6 +3117,7 @@ void exit_mmap(struct mm_struct *mm)
+@@ -2677,6 +3117,7 @@ void exit_mmap(struct mm_struct *mm)
  	while (vma) {
  		if (vma->vm_flags & VM_ACCOUNT)
  			nr_accounted += vma_pages(vma);
@@ -77674,7 +77583,7 @@ index d1e4124..7d36e4f 100644
  		vma = remove_vma(vma);
  	}
  	vm_unacct_memory(nr_accounted);
-@@ -2666,6 +3134,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2693,6 +3134,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  	struct vm_area_struct *prev;
  	struct rb_node **rb_link, *rb_parent;
  
@@ -77688,7 +77597,7 @@ index d1e4124..7d36e4f 100644
  	/*
  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  	 * until its first write fault, when page's anon_vma and index
-@@ -2689,7 +3164,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
+@@ -2716,7 +3164,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
  		return -ENOMEM;
  
@@ -77710,7 +77619,7 @@ index d1e4124..7d36e4f 100644
  	return 0;
  }
  
-@@ -2709,6 +3198,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2736,6 +3198,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	struct mempolicy *pol;
  	bool faulted_in_anon_vma = true;
  
@@ -77719,7 +77628,7 @@ index d1e4124..7d36e4f 100644
  	/*
  	 * If anonymous vma has not yet been faulted, update new pgoff
  	 * to match new location, to increase its chance of merging.
-@@ -2775,6 +3266,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
+@@ -2802,6 +3266,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  	return NULL;
  }
  
@@ -77759,7 +77668,7 @@ index d1e4124..7d36e4f 100644
  /*
   * Return true if the calling process may expand its vm space by the passed
   * number of pages
-@@ -2786,6 +3310,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
+@@ -2813,6 +3310,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
  
  	lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
  
@@ -77772,7 +77681,7 @@ index d1e4124..7d36e4f 100644
  	if (cur + npages > lim)
  		return 0;
  	return 1;
-@@ -2856,6 +3386,22 @@ int install_special_mapping(struct mm_struct *mm,
+@@ -2883,6 +3386,22 @@ int install_special_mapping(struct mm_struct *mm,
  	vma->vm_start = addr;
  	vma->vm_end = addr + len;
  
@@ -85392,6 +85301,21 @@ index 38be92c..21f49ee 100644
  	.name =				"smack",
  
  	.ptrace_access_check =		smack_ptrace_access_check,
+diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
+index 390c646..f2f8db3 100644
+--- a/security/tomoyo/mount.c
++++ b/security/tomoyo/mount.c
+@@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+ 		   type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
+ 		need_dev = -1; /* dev_name is a directory */
+ 	} else {
++		if (!capable(CAP_SYS_ADMIN)) {
++			error = -EPERM;
++			goto out;
++		}
+ 		fstype = get_fs_type(type);
+ 		if (!fstype) {
+ 			error = -ENODEV;
 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
 index a2ee362..5754f34 100644
 --- a/security/tomoyo/tomoyo.c


                 reply	other threads:[~2013-03-06 12:29 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1362572950.40b9f3d9591cf0d15f06b79fd94b43f062293a0d.blueness@gentoo \
    --to=blueness@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox