public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.3.6/, 3.2.17/
@ 2012-05-21 14:40 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2012-05-21 14:40 UTC (permalink / raw
  To: gentoo-commits

commit:     154077e80521647e6e280dfe21534f7b01803a6a
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Mon May 21 14:40:21 2012 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Mon May 21 14:40:21 2012 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=154077e8

Grsec/PaX: 2.9-{2.6.32.59,3.2.17,3.3.6}-201205191125

---
 2.6.32/0000_README                                 |    2 +-
 ...20_grsecurity-2.9-2.6.32.59-201205191123.patch} |  131 +++++++-
 3.2.17/0000_README                                 |    2 +-
 ... 4420_grsecurity-2.9-3.2.17-201205191125.patch} |  313 ++++++++++++++++++-
 3.3.6/0000_README                                  |    2 +-
 ...> 4420_grsecurity-2.9-3.3.6-201205191125.patch} |  326 +++++++++++++++++++-
 6 files changed, 734 insertions(+), 42 deletions(-)

diff --git a/2.6.32/0000_README b/2.6.32/0000_README
index 3655217..fc95969 100644
--- a/2.6.32/0000_README
+++ b/2.6.32/0000_README
@@ -30,7 +30,7 @@ Patch:	1058_linux-2.6.32.59.patch
 From:	http://www.kernel.org
 Desc:	Linux 2.6.32.59
 
-Patch:	4420_grsecurity-2.9-2.6.32.59-201205131656.patch
+Patch:	4420_grsecurity-2.9-2.6.32.59-201205191123.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205131656.patch b/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205191123.patch
similarity index 99%
rename from 2.6.32/4420_grsecurity-2.9-2.6.32.59-201205131656.patch
rename to 2.6.32/4420_grsecurity-2.9-2.6.32.59-201205191123.patch
index d324f88..2e42506 100644
--- a/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205131656.patch
+++ b/2.6.32/4420_grsecurity-2.9-2.6.32.59-201205191123.patch
@@ -27069,10 +27069,18 @@ index f46c3407..f7e72b0 100644
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 73ffd55..f61c2a7 100644
+index 73ffd55..10ae23f 100644
 --- a/arch/x86/mm/init.c
 +++ b/arch/x86/mm/init.c
-@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
+@@ -13,6 +13,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/tlb.h>
+ #include <asm/proto.h>
++#include <asm/bios_ebda.h>
+ 
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+ 
+@@ -69,11 +70,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
  	 * cause a hotspot and fill up ZONE_DMA. The page tables
  	 * need roughly 0.5KB per GB.
  	 */
@@ -27085,7 +27093,7 @@ index 73ffd55..f61c2a7 100644
  	e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
  					tables, PAGE_SIZE);
  	if (e820_table_start == -1UL)
-@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+@@ -147,7 +144,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
  #endif
  
  	set_nx();
@@ -27094,11 +27102,16 @@ index 73ffd55..f61c2a7 100644
  		printk(KERN_INFO "NX (Execute Disable) protection: active\n");
  
  	/* Enable PSE if available */
-@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+@@ -329,10 +326,32 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
   * Access has to be given to non-kernel-ram areas as well, these contain the PCI
   * mmio resources as well as potential bios/acpi data regions.
   */
 +
++#ifdef CONFIG_GRKERNSEC_KMEM
++static unsigned int ebda_start __read_only;
++static unsigned int ebda_end __read_only;
++#endif
++
  int devmem_is_allowed(unsigned long pagenr)
  {
 +#ifdef CONFIG_GRKERNSEC_KMEM
@@ -27106,7 +27119,7 @@ index 73ffd55..f61c2a7 100644
 +	if (!pagenr)
 +		return 1;
 +	/* allow EBDA */
-+	if ((0x9f000 >> PAGE_SHIFT) == pagenr)
++	if (pagenr >= ebda_start && pagenr < ebda_end)
 +		return 1;
 +	/* allow ISA/video mem */
 +	if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
@@ -27122,18 +27135,48 @@ index 73ffd55..f61c2a7 100644
  	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
  		return 0;
  	if (!page_is_ram(pagenr))
-@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+@@ -377,8 +396,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ #endif
+ }
  
++#ifdef CONFIG_GRKERNSEC_KMEM
++static inline void gr_init_ebda(void)
++{
++	unsigned int ebda_addr;
++	unsigned int ebda_size = 0;
++
++	ebda_addr = get_bios_ebda();
++	if (ebda_addr) {
++		ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
++		ebda_size <<= 10;
++	}
++	if (ebda_addr && ebda_size) {
++		ebda_start = ebda_addr >> PAGE_SHIFT;
++		ebda_end = min(PAGE_ALIGN(ebda_addr + ebda_size), 0xa0000) >> PAGE_SHIFT;
++	} else {
++		ebda_start = 0x9f000 >> PAGE_SHIFT;
++		ebda_end = 0xa0000 >> PAGE_SHIFT;
++	}
++}
++#else
++static inline void gr_init_ebda(void) { }
++#endif
++
  void free_initmem(void)
  {
-+
 +#ifdef CONFIG_PAX_KERNEXEC
 +#ifdef CONFIG_X86_32
 +	/* PaX: limit KERNEL_CS to actual size */
 +	unsigned long addr, limit;
 +	struct desc_struct d;
 +	int cpu;
++#endif
++#endif
++
++	gr_init_ebda();
 +
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
 +	limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
 +	limit = (limit - 1UL) >> PAGE_SHIFT;
 +
@@ -38580,6 +38623,19 @@ index 818b617..4656e38 100644
  		if (!*param->name) {
  			DMWARN("name not supplied when creating device");
  			return -EINVAL;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index f1c8cae..59f0822 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -133,7 +133,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ {
+ 	struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+ 
+-	if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	spin_lock(&receiving_list_lock);
 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
 index 6021d0a..a878643 100644
 --- a/drivers/md/dm-raid1.c
@@ -53290,7 +53346,7 @@ index 7fa7a90..fef924d 100644
  		return -EOPNOTSUPP;
  	}
 diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
-index 266ec87..b7e1172 100644
+index 266ec87..466b2f5 100644
 --- a/drivers/net/dl2k.h
 +++ b/drivers/net/dl2k.h
 @@ -471,13 +471,6 @@ struct ioctl_data {
@@ -53307,6 +53363,14 @@ index 266ec87..b7e1172 100644
  /* The Rx and Tx buffer descriptors. */
  struct netdev_desc {
  	__le64 next_desc;
+@@ -551,4 +544,7 @@ MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
+ #define DEFAULT_RXT		750
+ #define DEFAULT_TXC		1
+ #define MAX_TXC			8
++
++#include <linux/mii.h>
++
+ #endif				/* __DL2K_H__ */
 diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
 index d1e0563..b9e129c 100644
 --- a/drivers/net/e1000e/82571.c
@@ -61542,7 +61606,7 @@ index cd3910b..ff053d3 100644
  	.open	 = rpcrouter_open,
  	.release = rpcrouter_release,
 diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
-index c24e4e0..07665be 100644
+index c24e4e0..629999b 100644
 --- a/drivers/staging/dst/dcore.c
 +++ b/drivers/staging/dst/dcore.c
 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
@@ -61563,6 +61627,15 @@ index c24e4e0..07665be 100644
  	snprintf(n->name, sizeof(n->name), "%s", ctl->name);
  
  	err = dst_node_sysfs_init(n);
+@@ -855,7 +855,7 @@ static void cn_dst_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ 	struct dst_node *n = NULL, *tmp;
+ 	unsigned int hash;
+ 
+-	if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) {
++	if (!capable(CAP_SYS_ADMIN)) {
+ 		err = -EPERM;
+ 		goto out;
+ 	}
 diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
 index 557d372..8d84422 100644
 --- a/drivers/staging/dst/trans.c
@@ -61842,6 +61915,19 @@ index 2eb8e3d..57616a7 100644
  	.owner = THIS_MODULE,
  	.open = poch_open,
  	.release = poch_release,
+diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
+index 5d04bf5..d4baff2 100644
+--- a/drivers/staging/pohmelfs/config.c
++++ b/drivers/staging/pohmelfs/config.c
+@@ -531,7 +531,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n
+ {
+ 	int err;
+ 
+-	if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	switch (msg->flags) {
 diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
 index c94de31..19402bc 100644
 --- a/drivers/staging/pohmelfs/inode.c
@@ -65596,7 +65682,7 @@ index d94c57f..912984c 100644
  	.update_status	= riva_bl_update_status,
  };
 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
-index 54fbb29..2c108fc 100644
+index 54fbb29..37bab4b 100644
 --- a/drivers/video/uvesafb.c
 +++ b/drivers/video/uvesafb.c
 @@ -18,6 +18,7 @@
@@ -65607,6 +65693,15 @@ index 54fbb29..2c108fc 100644
  #include <video/edid.h>
  #include <video/uvesafb.h>
  #ifdef CONFIG_X86
+@@ -72,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
+ 	struct uvesafb_task *utask;
+ 	struct uvesafb_ktask *task;
+ 
+-	if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	if (msg->seq >= UVESAFB_TASKS_MAX)
 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
  		NULL,
  	};
@@ -102188,10 +102283,20 @@ index 58f141b..b759702 100644
  			(jiffies ^ (jiffies >> 7))));
  
 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
-index f095659..adc892a 100644
+index f095659..537313b 100644
 --- a/net/ipv4/tcp.c
 +++ b/net/ipv4/tcp.c
-@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+@@ -838,8 +838,7 @@ new_segment:
+ wait_for_sndbuf:
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ wait_for_memory:
+-		if (copied)
+-			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
++		tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ 
+ 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+ 			goto do_error;
+@@ -2085,6 +2084,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
  	int val;
  	int err = 0;
  
@@ -102200,7 +102305,7 @@ index f095659..adc892a 100644
  	/* This is a string value all the others are int's */
  	if (optname == TCP_CONGESTION) {
  		char name[TCP_CA_NAME_MAX];
-@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
+@@ -2355,6 +2356,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
  	struct tcp_sock *tp = tcp_sk(sk);
  	int val, len;
  

diff --git a/3.2.17/0000_README b/3.2.17/0000_README
index d74a42e..bf0e817 100644
--- a/3.2.17/0000_README
+++ b/3.2.17/0000_README
@@ -6,7 +6,7 @@ Patch:	1016_linux-3.2.17.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.17
 
-Patch:	4420_grsecurity-2.9-3.2.17-201205131657.patch
+Patch:	4420_grsecurity-2.9-3.2.17-201205191125.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.17/4420_grsecurity-2.9-3.2.17-201205131657.patch b/3.2.17/4420_grsecurity-2.9-3.2.17-201205191125.patch
similarity index 99%
rename from 3.2.17/4420_grsecurity-2.9-3.2.17-201205131657.patch
rename to 3.2.17/4420_grsecurity-2.9-3.2.17-201205191125.patch
index 8ddeecb..23a68ae 100644
--- a/3.2.17/4420_grsecurity-2.9-3.2.17-201205131657.patch
+++ b/3.2.17/4420_grsecurity-2.9-3.2.17-201205191125.patch
@@ -11436,10 +11436,67 @@ index 98391db..8f6984e 100644
  
  static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index effff47..f9e4035 100644
+index effff47..bbb8295 100644
 --- a/arch/x86/include/asm/pgtable-3level.h
 +++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ 	ptep->pte_low = pte.pte_low;
+ }
+ 
++#define  __HAVE_ARCH_READ_PMD_ATOMIC
++/*
++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
++ * a "*pmdp" dereference done by gcc. Problem is, in certain places
++ * where pte_offset_map_lock is called, concurrent page faults are
++ * allowed, if the mmap_sem is hold for reading. An example is mincore
++ * vs page faults vs MADV_DONTNEED. On the page fault side
++ * pmd_populate rightfully does a set_64bit, but if we're reading the
++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
++ * because gcc will not read the 64bit of the pmd atomically. To fix
++ * this all places running pmd_offset_map_lock() while holding the
++ * mmap_sem in read mode, shall read the pmdp pointer using this
++ * function to know if the pmd is null nor not, and in turn to know if
++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
++ * operations.
++ *
++ * Without THP if the mmap_sem is hold for reading, the
++ * pmd can only transition from null to not null while read_pmd_atomic runs.
++ * So there's no need of literally reading it atomically.
++ *
++ * With THP if the mmap_sem is hold for reading, the pmd can become
++ * THP or null or point to a pte (and in turn become "stable") at any
++ * time under read_pmd_atomic, so it's mandatory to read it atomically
++ * with cmpxchg8b.
++ */
++#ifndef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
++{
++	pmdval_t ret;
++	u32 *tmp = (u32 *)pmdp;
++
++	ret = (pmdval_t) (*tmp);
++	if (ret) {
++		/*
++		 * If the low part is null, we must not read the high part
++		 * or we can end up with a partial pmd.
++		 */
++		smp_rmb();
++		ret |= ((pmdval_t)*(tmp + 1)) << 32;
++	}
++
++	return __pmd(ret);
++}
++#else /* CONFIG_TRANSPARENT_HUGEPAGE */
++static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
++{
++	return __pmd(atomic64_read((atomic64_t *)pmdp));
++}
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
+@@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  
  static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  {
@@ -25048,18 +25105,19 @@ index f581a18..a269cab 100644
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 87488b9..399f416 100644
+index 87488b9..6371e97 100644
 --- a/arch/x86/mm/init.c
 +++ b/arch/x86/mm/init.c
-@@ -15,6 +15,7 @@
+@@ -15,6 +15,8 @@
  #include <asm/tlbflush.h>
  #include <asm/tlb.h>
  #include <asm/proto.h>
 +#include <asm/desc.h>
++#include <asm/bios_ebda.h>
  
  unsigned long __initdata pgt_buf_start;
  unsigned long __meminitdata pgt_buf_end;
-@@ -31,7 +32,7 @@ int direct_gbpages
+@@ -31,7 +33,7 @@ int direct_gbpages
  static void __init find_early_table_space(unsigned long end, int use_pse,
  					  int use_gbpages)
  {
@@ -25068,8 +25126,16 @@ index 87488b9..399f416 100644
  	phys_addr_t base;
  
  	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-@@ -312,8 +313,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+@@ -310,10 +312,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+  * mmio resources as well as potential bios/acpi data regions.
   */
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++static unsigned int ebda_start __read_only;
++static unsigned int ebda_end __read_only;
++#endif
++
  int devmem_is_allowed(unsigned long pagenr)
  {
 +#ifdef CONFIG_GRKERNSEC_KMEM
@@ -25077,7 +25143,7 @@ index 87488b9..399f416 100644
 +	if (!pagenr)
 +		return 1;
 +	/* allow EBDA */
-+	if ((0x9f000 >> PAGE_SHIFT) == pagenr)
++	if (pagenr >= ebda_start && pagenr < ebda_end)
 +		return 1;
 +#else
 +	if (!pagenr)
@@ -25099,18 +25165,48 @@ index 87488b9..399f416 100644
  	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
  		return 0;
  	if (!page_is_ram(pagenr))
-@@ -372,6 +394,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+@@ -370,8 +399,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ #endif
+ }
  
++#ifdef CONFIG_GRKERNSEC_KMEM
++static inline void gr_init_ebda(void)
++{
++	unsigned int ebda_addr;
++	unsigned int ebda_size = 0;
++
++	ebda_addr = get_bios_ebda();
++	if (ebda_addr) {
++		ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
++		ebda_size <<= 10;
++	}
++	if (ebda_addr && ebda_size) {
++		ebda_start = ebda_addr >> PAGE_SHIFT;
++		ebda_end = min(PAGE_ALIGN(ebda_addr + ebda_size), 0xa0000) >> PAGE_SHIFT;
++	} else {
++		ebda_start = 0x9f000 >> PAGE_SHIFT;
++		ebda_end = 0xa0000 >> PAGE_SHIFT;
++	}
++}
++#else
++static inline void gr_init_ebda(void) { }
++#endif
++
  void free_initmem(void)
  {
-+
 +#ifdef CONFIG_PAX_KERNEXEC
 +#ifdef CONFIG_X86_32
 +	/* PaX: limit KERNEL_CS to actual size */
 +	unsigned long addr, limit;
 +	struct desc_struct d;
 +	int cpu;
++#endif
++#endif
 +
++	gr_init_ebda();
++
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
 +	limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
 +	limit = (limit - 1UL) >> PAGE_SHIFT;
 +
@@ -29545,9 +29641,18 @@ index 0358e55..bc33689 100644
  	mdev->bm_writ_cnt  =
  	mdev->read_cnt     =
 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
-index af2a250..219c74b 100644
+index af2a250..0fdeb75 100644
 --- a/drivers/block/drbd/drbd_nl.c
 +++ b/drivers/block/drbd/drbd_nl.c
+@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
+ 		return;
+ 	}
+ 
+-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
++	if (!capable(CAP_SYS_ADMIN)) {
+ 		retcode = ERR_PERM;
+ 		goto fail;
+ 	}
 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
  	module_put(THIS_MODULE);
  }
@@ -33943,6 +34048,19 @@ index 1ce84ed..0fdd40a 100644
  		if (!*param->name) {
  			DMWARN("name not supplied when creating device");
  			return -EINVAL;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 1f23e04..08d9a20 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ {
+ 	struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+ 
+-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	spin_lock(&receiving_list_lock);
 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
 index 9bfd057..01180bc 100644
 --- a/drivers/md/dm-raid1.c
@@ -35672,6 +35790,23 @@ index 61d2bdd..7f1154a 100644
   	{ "100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
  	{ "100/10M Ethernet PCI Adapter",	HAS_CHIP_XCVR },
  	{ "1000/100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index cf480b5..de00805 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -494,7 +494,11 @@ out:
+ static void e1000_down_and_stop(struct e1000_adapter *adapter)
+ {
+ 	set_bit(__E1000_DOWN, &adapter->flags);
+-	cancel_work_sync(&adapter->reset_task);
++
++	/* Only kill reset task if adapter is not resetting */
++	if (!test_bit(__E1000_RESETTING, &adapter->flags))
++		cancel_work_sync(&adapter->reset_task);
++
+ 	cancel_delayed_work_sync(&adapter->watchdog_task);
+ 	cancel_delayed_work_sync(&adapter->phy_info_task);
+ 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
 index e1159e5..e18684d 100644
 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -42615,7 +42750,7 @@ index 41746bb..febcb44 100644
  	return count;
  }
 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
-index 8813588..1c6b358 100644
+index 8813588..7265ef8 100644
 --- a/drivers/video/uvesafb.c
 +++ b/drivers/video/uvesafb.c
 @@ -19,6 +19,7 @@
@@ -42626,6 +42761,15 @@ index 8813588..1c6b358 100644
  #include <video/edid.h>
  #include <video/uvesafb.h>
  #ifdef CONFIG_X86
+@@ -73,7 +74,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
+ 	struct uvesafb_task *utask;
+ 	struct uvesafb_ktask *task;
+ 
+-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	if (msg->seq >= UVESAFB_TASKS_MAX)
 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
  		NULL,
  	};
@@ -48370,6 +48514,96 @@ index 50a15fa..ca113f9 100644
  }
  
  void nfs_fattr_init(struct nfs_fattr *fattr)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 51f6a40..fb4bb6f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3581,19 +3581,23 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ 	if (npages == 0)
+ 		npages = 1;
+ 
++	/* Add an extra page to handle the bitmap returned */
++	npages++;
++
+ 	for (i = 0; i < npages; i++) {
+ 		pages[i] = alloc_page(GFP_KERNEL);
+ 		if (!pages[i])
+ 			goto out_free;
+ 	}
+-	if (npages > 1) {
+-		/* for decoding across pages */
+-		res.acl_scratch = alloc_page(GFP_KERNEL);
+-		if (!res.acl_scratch)
+-			goto out_free;
+-	}
++
++	/* for decoding across pages */
++	res.acl_scratch = alloc_page(GFP_KERNEL);
++	if (!res.acl_scratch)
++		goto out_free;
++
+ 	args.acl_len = npages * PAGE_SIZE;
+ 	args.acl_pgbase = 0;
++
+ 	/* Let decode_getfacl know not to fail if the ACL data is larger than
+ 	 * the page we send as a guess */
+ 	if (buf == NULL)
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 68adab4..d7e6f7b 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4965,11 +4965,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 		 bitmap[3] = {0};
+ 	struct kvec *iov = req->rq_rcv_buf.head;
+ 	int status;
++	size_t page_len = xdr->buf->page_len;
+ 
+ 	res->acl_len = 0;
+ 	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ 		goto out;
++
+ 	bm_p = xdr->p;
++	res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++	res->acl_data_offset <<= 2;
++	/* Check if the acl data starts beyond the allocated buffer */
++	if (res->acl_data_offset > page_len)
++		return -ERANGE;
++
+ 	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ 		goto out;
+ 	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4979,28 +4987,24 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 		return -EIO;
+ 	if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
+ 		size_t hdrlen;
+-		u32 recvd;
+ 
+ 		/* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ 		 * are stored with the acl data to handle the problem of
+ 		 * variable length bitmaps.*/
+ 		xdr->p = bm_p;
+-		res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+-		res->acl_data_offset <<= 2;
+ 
+ 		/* We ignore &savep and don't do consistency checks on
+ 		 * the attr length.  Let userspace figure it out.... */
+ 		hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
+ 		attrlen += res->acl_data_offset;
+-		recvd = req->rq_rcv_buf.len - hdrlen;
+-		if (attrlen > recvd) {
++		if (attrlen > page_len) {
+ 			if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
+ 				/* getxattr interface called with a NULL buf */
+ 				res->acl_len = attrlen;
+ 				goto out;
+ 			}
+-			dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+-					attrlen, recvd);
++			dprintk("NFS: acl reply: attrlen %zu > page_len %u\n",
++					attrlen, page_len);
+ 			return -EINVAL;
+ 		}
+ 		xdr_read_pages(xdr, attrlen);
 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
 index 5c3cd82..ed535e5 100644
 --- a/fs/nfsd/vfs.c
@@ -61170,10 +61404,49 @@ index 810431d..ccc3638 100644
   * The "pgd_xxx()" functions here are trivial for a folded two-level
   * setup: the pud is never bad, and a pud always exists (as it's folded
 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
-index a03c098..7e5b223 100644
+index a03c098..19751cf 100644
 --- a/include/asm-generic/pgtable.h
 +++ b/include/asm-generic/pgtable.h
-@@ -502,6 +502,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
+@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
+ #endif /* __HAVE_ARCH_PMD_WRITE */
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
++#ifndef  __HAVE_ARCH_READ_PMD_ATOMIC
++static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
++{
++	/*
++	 * Depend on compiler for an atomic pmd read. NOTE: this is
++	 * only going to work, if the pmdval_t isn't larger than
++	 * an unsigned long.
++	 */
++	return *pmdp;
++}
++#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
++
+ /*
+  * This function is meant to be used by sites walking pagetables with
+  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+@@ -458,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
+  * undefined so behaving like if the pmd was none is safe (because it
+  * can return none anyway). The compiler level barrier() is critically
+  * important to compute the two checks atomically on the same pmdval.
++ *
++ * For 32bit kernels with a 64bit large pmd_t this automatically takes
++ * care of reading the pmd atomically to avoid SMP race conditions
++ * against pmd_populate() when the mmap_sem is hold for reading by the
++ * caller (a special atomic read not done by "gcc" as in the generic
++ * version above, is also needed when THP is disabled because the page
++ * fault can populate the pmd from under us).
+  */
+ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+ {
+-	/* depend on compiler for an atomic pmd read */
+-	pmd_t pmdval = *pmd;
++	pmd_t pmdval = read_pmd_atomic(pmd);
+ 	/*
+ 	 * The barrier will stabilize the pmdval in a register or on
+ 	 * the stack so that it will stop changing under the code.
+@@ -502,6 +520,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
  #endif
  }
  
@@ -77251,6 +77524,20 @@ index 94cdbc5..0cb0063 100644
  		if (peer->tcp_ts_stamp) {
  			ts = peer->tcp_ts;
  			tsage = get_seconds() - peer->tcp_ts_stamp;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7904db4..0f12572 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -851,8 +851,7 @@ new_segment:
+ wait_for_sndbuf:
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ wait_for_memory:
+-		if (copied)
+-			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
++		tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ 
+ 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+ 			goto do_error;
 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
 index de69cec..74908e1 100644
 --- a/net/ipv4/tcp_ipv4.c

diff --git a/3.3.6/0000_README b/3.3.6/0000_README
index f827d9b..f465286 100644
--- a/3.3.6/0000_README
+++ b/3.3.6/0000_README
@@ -6,7 +6,7 @@ Patch:	1005_linux-3.3.6.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.3.6
 
-Patch:	4420_grsecurity-2.9-3.3.6-201205131658.patch
+Patch:	4420_grsecurity-2.9-3.3.6-201205191125.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.3.6/4420_grsecurity-2.9-3.3.6-201205131658.patch b/3.3.6/4420_grsecurity-2.9-3.3.6-201205191125.patch
similarity index 99%
rename from 3.3.6/4420_grsecurity-2.9-3.3.6-201205131658.patch
rename to 3.3.6/4420_grsecurity-2.9-3.3.6-201205191125.patch
index 0bad506..bfd5849 100644
--- a/3.3.6/4420_grsecurity-2.9-3.3.6-201205131658.patch
+++ b/3.3.6/4420_grsecurity-2.9-3.3.6-201205191125.patch
@@ -11386,10 +11386,67 @@ index 98391db..8f6984e 100644
  
  static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index effff47..f9e4035 100644
+index effff47..bbb8295 100644
 --- a/arch/x86/include/asm/pgtable-3level.h
 +++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ 	ptep->pte_low = pte.pte_low;
+ }
+ 
++#define  __HAVE_ARCH_READ_PMD_ATOMIC
++/*
++ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
++ * a "*pmdp" dereference done by gcc. Problem is, in certain places
++ * where pte_offset_map_lock is called, concurrent page faults are
++ * allowed, if the mmap_sem is hold for reading. An example is mincore
++ * vs page faults vs MADV_DONTNEED. On the page fault side
++ * pmd_populate rightfully does a set_64bit, but if we're reading the
++ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
++ * because gcc will not read the 64bit of the pmd atomically. To fix
++ * this all places running pmd_offset_map_lock() while holding the
++ * mmap_sem in read mode, shall read the pmdp pointer using this
++ * function to know if the pmd is null nor not, and in turn to know if
++ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
++ * operations.
++ *
++ * Without THP if the mmap_sem is hold for reading, the
++ * pmd can only transition from null to not null while read_pmd_atomic runs.
++ * So there's no need of literally reading it atomically.
++ *
++ * With THP if the mmap_sem is hold for reading, the pmd can become
++ * THP or null or point to a pte (and in turn become "stable") at any
++ * time under read_pmd_atomic, so it's mandatory to read it atomically
++ * with cmpxchg8b.
++ */
++#ifndef CONFIG_TRANSPARENT_HUGEPAGE
++static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
++{
++	pmdval_t ret;
++	u32 *tmp = (u32 *)pmdp;
++
++	ret = (pmdval_t) (*tmp);
++	if (ret) {
++		/*
++		 * If the low part is null, we must not read the high part
++		 * or we can end up with a partial pmd.
++		 */
++		smp_rmb();
++		ret |= ((pmdval_t)*(tmp + 1)) << 32;
++	}
++
++	return __pmd(ret);
++}
++#else /* CONFIG_TRANSPARENT_HUGEPAGE */
++static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
++{
++	return __pmd(atomic64_read((atomic64_t *)pmdp));
++}
++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
++
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ 	set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
+@@ -38,12 +88,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  
  static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  {
@@ -24949,18 +25006,19 @@ index 8ecbb4b..a269cab 100644
  	}
  	if (mm->get_unmapped_area == arch_get_unmapped_area)
 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 6cabf65..77e9c1c 100644
+index 6cabf65..00139c4 100644
 --- a/arch/x86/mm/init.c
 +++ b/arch/x86/mm/init.c
-@@ -17,6 +17,7 @@
+@@ -17,6 +17,8 @@
  #include <asm/tlb.h>
  #include <asm/proto.h>
  #include <asm/dma.h>		/* for MAX_DMA_PFN */
 +#include <asm/desc.h>
++#include <asm/bios_ebda.h>
  
  unsigned long __initdata pgt_buf_start;
  unsigned long __meminitdata pgt_buf_end;
-@@ -33,7 +34,7 @@ int direct_gbpages
+@@ -33,7 +35,7 @@ int direct_gbpages
  static void __init find_early_table_space(unsigned long end, int use_pse,
  					  int use_gbpages)
  {
@@ -24969,8 +25027,16 @@ index 6cabf65..77e9c1c 100644
  	phys_addr_t base;
  
  	puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-@@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+@@ -312,10 +314,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
+  * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+  * mmio resources as well as potential bios/acpi data regions.
   */
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++static unsigned int ebda_start __read_only;
++static unsigned int ebda_end __read_only;
++#endif
++
  int devmem_is_allowed(unsigned long pagenr)
  {
 +#ifdef CONFIG_GRKERNSEC_KMEM
@@ -24978,7 +25044,7 @@ index 6cabf65..77e9c1c 100644
 +	if (!pagenr)
 +		return 1;
 +	/* allow EBDA */
-+	if ((0x9f000 >> PAGE_SHIFT) == pagenr)
++	if (pagenr >= ebda_start && pagenr < ebda_end)
 +		return 1;
 +#else
 +	if (!pagenr)
@@ -25000,18 +25066,48 @@ index 6cabf65..77e9c1c 100644
  	if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
  		return 0;
  	if (!page_is_ram(pagenr))
-@@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+@@ -372,8 +401,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+ #endif
+ }
  
++#ifdef CONFIG_GRKERNSEC_KMEM
++static inline void gr_init_ebda(void)
++{
++	unsigned int ebda_addr;
++	unsigned int ebda_size = 0;
++
++	ebda_addr = get_bios_ebda();
++	if (ebda_addr) {
++		ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
++		ebda_size <<= 10;
++	}
++	if (ebda_addr && ebda_size) {
++		ebda_start = ebda_addr >> PAGE_SHIFT;
++		ebda_end = min(PAGE_ALIGN(ebda_addr + ebda_size), 0xa0000) >> PAGE_SHIFT;
++	} else {
++		ebda_start = 0x9f000 >> PAGE_SHIFT;
++		ebda_end = 0xa0000 >> PAGE_SHIFT;
++	}
++}
++#else
++static inline void gr_init_ebda(void) { }
++#endif
++
  void free_initmem(void)
  {
-+
 +#ifdef CONFIG_PAX_KERNEXEC
 +#ifdef CONFIG_X86_32
 +	/* PaX: limit KERNEL_CS to actual size */
 +	unsigned long addr, limit;
 +	struct desc_struct d;
 +	int cpu;
++#endif
++#endif
++
++	gr_init_ebda();
 +
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef CONFIG_X86_32
 +	limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
 +	limit = (limit - 1UL) >> PAGE_SHIFT;
 +
@@ -29479,9 +29575,18 @@ index 211fc44..c5116f1 100644
  	mdev->bm_writ_cnt  =
  	mdev->read_cnt     =
 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
-index af2a250..219c74b 100644
+index af2a250..0fdeb75 100644
 --- a/drivers/block/drbd/drbd_nl.c
 +++ b/drivers/block/drbd/drbd_nl.c
+@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
+ 		return;
+ 	}
+ 
+-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
++	if (!capable(CAP_SYS_ADMIN)) {
+ 		retcode = ERR_PERM;
+ 		goto fail;
+ 	}
 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
  	module_put(THIS_MODULE);
  }
@@ -33774,6 +33879,19 @@ index 1ce84ed..0fdd40a 100644
  		if (!*param->name) {
  			DMWARN("name not supplied when creating device");
  			return -EINVAL;
+diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
+index 1f23e04..08d9a20 100644
+--- a/drivers/md/dm-log-userspace-transfer.c
++++ b/drivers/md/dm-log-userspace-transfer.c
+@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
+ {
+ 	struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
+ 
+-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	spin_lock(&receiving_list_lock);
 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
 index 9bfd057..5373ff3 100644
 --- a/drivers/md/dm-raid1.c
@@ -35603,6 +35721,23 @@ index c82d444..0007fb4 100644
   	{ "100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
  	{ "100/10M Ethernet PCI Adapter",	HAS_CHIP_XCVR },
  	{ "1000/100/10M Ethernet PCI Adapter",	HAS_MII_XCVR },
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index b444f21..b72d976 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -492,7 +492,11 @@ out:
+ static void e1000_down_and_stop(struct e1000_adapter *adapter)
+ {
+ 	set_bit(__E1000_DOWN, &adapter->flags);
+-	cancel_work_sync(&adapter->reset_task);
++
++	/* Only kill reset task if adapter is not resetting */
++	if (!test_bit(__E1000_RESETTING, &adapter->flags))
++		cancel_work_sync(&adapter->reset_task);
++
+ 	cancel_delayed_work_sync(&adapter->watchdog_task);
+ 	cancel_delayed_work_sync(&adapter->phy_info_task);
+ 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
 index e1159e5..e18684d 100644
 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -42391,7 +42526,7 @@ index a40c05e..785c583 100644
  	return count;
  }
 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
-index 8408543..357841c 100644
+index 8408543..d6f20f1 100644
 --- a/drivers/video/uvesafb.c
 +++ b/drivers/video/uvesafb.c
 @@ -19,6 +19,7 @@
@@ -42402,6 +42537,15 @@ index 8408543..357841c 100644
  #include <video/edid.h>
  #include <video/uvesafb.h>
  #ifdef CONFIG_X86
+@@ -73,7 +74,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
+ 	struct uvesafb_task *utask;
+ 	struct uvesafb_ktask *task;
+ 
+-	if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
++	if (!capable(CAP_SYS_ADMIN))
+ 		return;
+ 
+ 	if (msg->seq >= UVESAFB_TASKS_MAX)
 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
  		NULL,
  	};
@@ -48031,6 +48175,96 @@ index f649fba..236bf92 100644
  }
  
  void nfs_fattr_init(struct nfs_fattr *fattr)
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2612223..e0ab779 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3588,19 +3588,23 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
+ 	if (npages == 0)
+ 		npages = 1;
+ 
++	/* Add an extra page to handle the bitmap returned */
++	npages++;
++
+ 	for (i = 0; i < npages; i++) {
+ 		pages[i] = alloc_page(GFP_KERNEL);
+ 		if (!pages[i])
+ 			goto out_free;
+ 	}
+-	if (npages > 1) {
+-		/* for decoding across pages */
+-		res.acl_scratch = alloc_page(GFP_KERNEL);
+-		if (!res.acl_scratch)
+-			goto out_free;
+-	}
++
++	/* for decoding across pages */
++	res.acl_scratch = alloc_page(GFP_KERNEL);
++	if (!res.acl_scratch)
++		goto out_free;
++
+ 	args.acl_len = npages * PAGE_SIZE;
+ 	args.acl_pgbase = 0;
++
+ 	/* Let decode_getfacl know not to fail if the ACL data is larger than
+ 	 * the page we send as a guess */
+ 	if (buf == NULL)
+diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
+index 33bd8d0..9b26eaf 100644
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4975,11 +4975,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 		 bitmap[3] = {0};
+ 	struct kvec *iov = req->rq_rcv_buf.head;
+ 	int status;
++	size_t page_len = xdr->buf->page_len;
+ 
+ 	res->acl_len = 0;
+ 	if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ 		goto out;
++
+ 	bm_p = xdr->p;
++	res->acl_data_offset = be32_to_cpup(bm_p) + 2;
++	res->acl_data_offset <<= 2;
++	/* Check if the acl data starts beyond the allocated buffer */
++	if (res->acl_data_offset > page_len)
++		return -ERANGE;
++
+ 	if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ 		goto out;
+ 	if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+@@ -4989,28 +4997,24 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ 		return -EIO;
+ 	if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
+ 		size_t hdrlen;
+-		u32 recvd;
+ 
+ 		/* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ 		 * are stored with the acl data to handle the problem of
+ 		 * variable length bitmaps.*/
+ 		xdr->p = bm_p;
+-		res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+-		res->acl_data_offset <<= 2;
+ 
+ 		/* We ignore &savep and don't do consistency checks on
+ 		 * the attr length.  Let userspace figure it out.... */
+ 		hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
+ 		attrlen += res->acl_data_offset;
+-		recvd = req->rq_rcv_buf.len - hdrlen;
+-		if (attrlen > recvd) {
++		if (attrlen > page_len) {
+ 			if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
+ 				/* getxattr interface called with a NULL buf */
+ 				res->acl_len = attrlen;
+ 				goto out;
+ 			}
+-			dprintk("NFS: acl reply: attrlen %u > recvd %u\n",
+-					attrlen, recvd);
++			dprintk("NFS: acl reply: attrlen %zu > page_len %u\n",
++					attrlen, page_len);
+ 			return -EINVAL;
+ 		}
+ 		xdr_read_pages(xdr, attrlen);
 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
 index b96fe94..a4dbece 100644
 --- a/fs/nfsd/vfs.c
@@ -60785,10 +61019,49 @@ index 810431d..ccc3638 100644
   * The "pgd_xxx()" functions here are trivial for a folded two-level
   * setup: the pud is never bad, and a pud always exists (as it's folded
 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
-index a03c098..7e5b223 100644
+index a03c098..19751cf 100644
 --- a/include/asm-generic/pgtable.h
 +++ b/include/asm-generic/pgtable.h
-@@ -502,6 +502,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
+@@ -445,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
+ #endif /* __HAVE_ARCH_PMD_WRITE */
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
++#ifndef  __HAVE_ARCH_READ_PMD_ATOMIC
++static inline pmd_t read_pmd_atomic(pmd_t *pmdp)
++{
++	/*
++	 * Depend on compiler for an atomic pmd read. NOTE: this is
++	 * only going to work, if the pmdval_t isn't larger than
++	 * an unsigned long.
++	 */
++	return *pmdp;
++}
++#endif /* __HAVE_ARCH_READ_PMD_ATOMIC */
++
+ /*
+  * This function is meant to be used by sites walking pagetables with
+  * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
+@@ -458,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
+  * undefined so behaving like if the pmd was none is safe (because it
+  * can return none anyway). The compiler level barrier() is critically
+  * important to compute the two checks atomically on the same pmdval.
++ *
++ * For 32bit kernels with a 64bit large pmd_t this automatically takes
++ * care of reading the pmd atomically to avoid SMP race conditions
++ * against pmd_populate() when the mmap_sem is hold for reading by the
++ * caller (a special atomic read not done by "gcc" as in the generic
++ * version above, is also needed when THP is disabled because the page
++ * fault can populate the pmd from under us).
+  */
+ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
+ {
+-	/* depend on compiler for an atomic pmd read */
+-	pmd_t pmdval = *pmd;
++	pmd_t pmdval = read_pmd_atomic(pmd);
+ 	/*
+ 	 * The barrier will stabilize the pmdval in a register or on
+ 	 * the stack so that it will stop changing under the code.
+@@ -502,6 +520,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
  #endif
  }
  
@@ -76614,6 +76887,20 @@ index 0197747..7adb0dc 100644
  		if (peer->tcp_ts_stamp) {
  			ts = peer->tcp_ts;
  			tsage = get_seconds() - peer->tcp_ts_stamp;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index e2327db..bf29e7c 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -849,8 +849,7 @@ new_segment:
+ wait_for_sndbuf:
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ wait_for_memory:
+-		if (copied)
+-			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
++		tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
+ 
+ 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
+ 			goto do_error;
 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
 index fd54c5f..96d6407 100644
 --- a/net/ipv4/tcp_ipv4.c
@@ -77911,6 +78198,19 @@ index 7dab229..212156f 100644
  		sax->fsa_ax25.sax25_call   = nr->source_addr;
  		*uaddr_len = sizeof(struct sockaddr_ax25);
  	}
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 2c03050..5cf68c1 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -322,7 +322,7 @@ static int queue_userspace_packet(int dp_ifindex, struct sk_buff *skb,
+ 			return -ENOMEM;
+ 
+ 		nskb = __vlan_put_tag(nskb, vlan_tx_tag_get(nskb));
+-		if (!skb)
++		if (!nskb)
+ 			return -ENOMEM;
+ 
+ 		nskb->vlan_tci = 0;
 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
 index 2dbb32b..a1b4722 100644
 --- a/net/packet/af_packet.c



^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2012-05-21 14:40 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-05-21 14:40 [gentoo-commits] proj/hardened-patchset:master commit in: 2.6.32/, 3.3.6/, 3.2.17/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox