public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.5/, 3.2.59/
@ 2014-06-05 22:22 Anthony G. Basile
  0 siblings, 0 replies; only message in thread
From: Anthony G. Basile @ 2014-06-05 22:22 UTC (permalink / raw
  To: gentoo-commits

commit:     60daeb8123a7055c17ab82f08a70679460278546
Author:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
AuthorDate: Thu Jun  5 22:27:14 2014 +0000
Commit:     Anthony G. Basile <blueness <AT> gentoo <DOT> org>
CommitDate: Thu Jun  5 22:27:14 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/hardened-patchset.git;a=commit;h=60daeb81

Grsec/PaX: 3.0-{3.2.59,3.14.5}-201406051310, CVE-2014-3153

---
 3.14.5/0000_README                                 |   2 +-
 ... 4420_grsecurity-3.0-3.14.5-201406051310.patch} | 567 ++++++++++++++++++++-
 3.2.59/0000_README                                 |   2 +-
 ... 4420_grsecurity-3.0-3.2.59-201406051309.patch} | 480 ++++++++++++++++-
 4 files changed, 1016 insertions(+), 35 deletions(-)

diff --git a/3.14.5/0000_README b/3.14.5/0000_README
index 287174d..d423279 100644
--- a/3.14.5/0000_README
+++ b/3.14.5/0000_README
@@ -2,7 +2,7 @@ README
 -----------------------------------------------------------------------------
 Individual Patch Descriptions:
 -----------------------------------------------------------------------------
-Patch:	4420_grsecurity-3.0-3.14.5-201406021708.patch
+Patch:	4420_grsecurity-3.0-3.14.5-201406051310.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch b/3.14.5/4420_grsecurity-3.0-3.14.5-201406051310.patch
similarity index 99%
rename from 3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch
rename to 3.14.5/4420_grsecurity-3.0-3.14.5-201406051310.patch
index 400f193..311f637 100644
--- a/3.14.5/4420_grsecurity-3.0-3.14.5-201406021708.patch
+++ b/3.14.5/4420_grsecurity-3.0-3.14.5-201406051310.patch
@@ -46525,6 +46525,18 @@ index be7d7a6..a8983f8 100644
  		break;
  	default:
  		dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+index 7d4f549..3e46c89 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+@@ -1022,6 +1022,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ 	struct qlcnic_dcb_cee *peer;
+ 	int i;
+ 
++	memset(info, 0, sizeof(*info));
+ 	*app_count = 0;
+ 
+ 	if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
 index 7763962..c3499a7 100644
 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -58815,7 +58827,7 @@ index e4141f2..d8263e8 100644
  		i += packet_length_size;
  		if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
 diff --git a/fs/exec.c b/fs/exec.c
-index 3d78fcc..75b208f 100644
+index 3d78fcc..6b2fd70 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,8 +55,20 @@
@@ -59561,7 +59573,7 @@ index 3d78fcc..75b208f 100644
 +#endif
 +
 +#ifndef CONFIG_STACK_GROWSUP
-+	const void * stackstart = task_stack_page(current);
++	unsigned long stackstart = (unsigned long)task_stack_page(current);
 +	if (unlikely(current_stack_pointer < stackstart + 512 ||
 +		     current_stack_pointer >= stackstart + THREAD_SIZE))
 +		BUG();
@@ -81417,6 +81429,36 @@ index 0000000..33f4af8
 +};
 +
 +#endif
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index aad8eea..034cda7 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
+ }
+ 
+ enum netlink_skb_flags {
+-	NETLINK_SKB_MMAPED	= 0x1,		/* Packet data is mmaped */
+-	NETLINK_SKB_TX		= 0x2,		/* Packet was sent by userspace */
+-	NETLINK_SKB_DELIVERED	= 0x4,		/* Packet was delivered */
++	NETLINK_SKB_MMAPED	= 0x1,	/* Packet data is mmaped */
++	NETLINK_SKB_TX		= 0x2,	/* Packet was sent by userspace */
++	NETLINK_SKB_DELIVERED	= 0x4,	/* Packet was delivered */
++	NETLINK_SKB_DST		= 0x8,	/* Dst set in sendto or sendmsg */
+ };
+ 
+ struct netlink_skb_parms {
+@@ -169,4 +170,11 @@ struct netlink_tap {
+ extern int netlink_add_tap(struct netlink_tap *nt);
+ extern int netlink_remove_tap(struct netlink_tap *nt);
+ 
++bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
++			  struct user_namespace *ns, int cap);
++bool netlink_ns_capable(const struct sk_buff *skb,
++			struct user_namespace *ns, int cap);
++bool netlink_capable(const struct sk_buff *skb, int cap);
++bool netlink_net_capable(const struct sk_buff *skb, int cap);
++
+ #endif	/* __LINUX_NETLINK_H */
 diff --git a/include/linux/nls.h b/include/linux/nls.h
 index 520681b..1d67ed2 100644
 --- a/include/linux/nls.h
@@ -81508,6 +81550,37 @@ index 5f2e559..7d59314 100644
  
  /**
   * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
+diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
+index 95961f0..0afb48f 100644
+--- a/include/linux/percpu-refcount.h
++++ b/include/linux/percpu-refcount.h
+@@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
+ 	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+ 
+ 	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+-		__this_cpu_inc(*pcpu_count);
++		this_cpu_inc(*pcpu_count);
+ 	else
+ 		atomic_inc(&ref->count);
+ 
+@@ -139,7 +139,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ 	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+ 
+ 	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+-		__this_cpu_inc(*pcpu_count);
++		this_cpu_inc(*pcpu_count);
+ 		ret = true;
+ 	}
+ 
+@@ -164,7 +164,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
+ 	pcpu_count = ACCESS_ONCE(ref->pcpu_count);
+ 
+ 	if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+-		__this_cpu_dec(*pcpu_count);
++		this_cpu_dec(*pcpu_count);
+ 	else if (unlikely(atomic_dec_and_test(&ref->count)))
+ 		ref->release(ref);
+ 
 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
 index e56b07f..aef789b 100644
 --- a/include/linux/perf_event.h
@@ -83816,7 +83889,7 @@ index a61b98c..aade1eb 100644
  int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
  int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
 diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
-index 567c681..cd73ac0 100644
+index 567c681..cd73ac02 100644
 --- a/include/net/llc_s_st.h
 +++ b/include/net/llc_s_st.h
 @@ -20,7 +20,7 @@ struct llc_sap_state_trans {
@@ -86824,7 +86897,7 @@ index a17621c..d9e4b37 100644
  			else
  				new_fs = fs;
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 6801b37..bb6becca 100644
+index 6801b37..c0f67cf 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
 @@ -54,6 +54,7 @@
@@ -86835,6 +86908,24 @@ index 6801b37..bb6becca 100644
  #include <linux/signal.h>
  #include <linux/export.h>
  #include <linux/magic.h>
+@@ -188,7 +189,7 @@ struct futex_pi_state {
+ 	atomic_t refcount;
+ 
+ 	union futex_key key;
+-};
++} __randomize_layout;
+ 
+ /**
+  * struct futex_q - The hashed futex queue entry, one per waiting task
+@@ -222,7 +223,7 @@ struct futex_q {
+ 	struct rt_mutex_waiter *rt_waiter;
+ 	union futex_key *requeue_pi_key;
+ 	u32 bitset;
+-};
++} __randomize_layout;
+ 
+ static const struct futex_q futex_q_init = {
+ 	/* list gets initialized in queue_me()*/
 @@ -380,6 +381,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
  	struct page *page, *page_head;
  	int err, ro = 0;
@@ -86856,7 +86947,326 @@ index 6801b37..bb6becca 100644
  
  	pagefault_disable();
  	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -2886,6 +2892,7 @@ static void __init futex_detect_cmpxchg(void)
+@@ -729,6 +735,55 @@ void exit_pi_state_list(struct task_struct *curr)
+ 	raw_spin_unlock_irq(&curr->pi_lock);
+ }
+ 
++/*
++ * We need to check the following states:
++ *
++ *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
++ *
++ * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
++ * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
++ *
++ * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
++ *
++ * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
++ * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
++ *
++ * [6]  Found  | Found    | task      | 0         | 1      | Valid
++ *
++ * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
++ *
++ * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
++ * [9]  Found  | Found    | task      | 0         | 0      | Invalid
++ * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
++ *
++ * [1]	Indicates that the kernel can acquire the futex atomically. We
++ *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
++ *
++ * [2]	Valid, if TID does not belong to a kernel thread. If no matching
++ *      thread is found then it indicates that the owner TID has died.
++ *
++ * [3]	Invalid. The waiter is queued on a non PI futex
++ *
++ * [4]	Valid state after exit_robust_list(), which sets the user space
++ *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
++ *
++ * [5]	The user space value got manipulated between exit_robust_list()
++ *	and exit_pi_state_list()
++ *
++ * [6]	Valid state after exit_pi_state_list() which sets the new owner in
++ *	the pi_state but cannot access the user space value.
++ *
++ * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
++ *
++ * [8]	Owner and user space value match
++ *
++ * [9]	There is no transient state which sets the user space TID to 0
++ *	except exit_robust_list(), but this is indicated by the
++ *	FUTEX_OWNER_DIED bit. See [4]
++ *
++ * [10] There is no transient state which leaves owner and user space
++ *	TID out of sync.
++ */
+ static int
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 		union futex_key *key, struct futex_pi_state **ps)
+@@ -741,12 +796,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 	plist_for_each_entry_safe(this, next, &hb->chain, list) {
+ 		if (match_futex(&this->key, key)) {
+ 			/*
+-			 * Another waiter already exists - bump up
+-			 * the refcount and return its pi_state:
++			 * Sanity check the waiter before increasing
++			 * the refcount and attaching to it.
+ 			 */
+ 			pi_state = this->pi_state;
+ 			/*
+-			 * Userspace might have messed up non-PI and PI futexes
++			 * Userspace might have messed up non-PI and
++			 * PI futexes [3]
+ 			 */
+ 			if (unlikely(!pi_state))
+ 				return -EINVAL;
+@@ -754,34 +810,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 			WARN_ON(!atomic_read(&pi_state->refcount));
+ 
+ 			/*
+-			 * When pi_state->owner is NULL then the owner died
+-			 * and another waiter is on the fly. pi_state->owner
+-			 * is fixed up by the task which acquires
+-			 * pi_state->rt_mutex.
+-			 *
+-			 * We do not check for pid == 0 which can happen when
+-			 * the owner died and robust_list_exit() cleared the
+-			 * TID.
++			 * Handle the owner died case:
+ 			 */
+-			if (pid && pi_state->owner) {
++			if (uval & FUTEX_OWNER_DIED) {
+ 				/*
+-				 * Bail out if user space manipulated the
+-				 * futex value.
++				 * exit_pi_state_list sets owner to NULL and
++				 * wakes the topmost waiter. The task which
++				 * acquires the pi_state->rt_mutex will fixup
++				 * owner.
+ 				 */
+-				if (pid != task_pid_vnr(pi_state->owner))
++				if (!pi_state->owner) {
++					/*
++					 * No pi state owner, but the user
++					 * space TID is not 0. Inconsistent
++					 * state. [5]
++					 */
++					if (pid)
++						return -EINVAL;
++					/*
++					 * Take a ref on the state and
++					 * return. [4]
++					 */
++					goto out_state;
++				}
++
++				/*
++				 * If TID is 0, then either the dying owner
++				 * has not yet executed exit_pi_state_list()
++				 * or some waiter acquired the rtmutex in the
++				 * pi state, but did not yet fixup the TID in
++				 * user space.
++				 *
++				 * Take a ref on the state and return. [6]
++				 */
++				if (!pid)
++					goto out_state;
++			} else {
++				/*
++				 * If the owner died bit is not set,
++				 * then the pi_state must have an
++				 * owner. [7]
++				 */
++				if (!pi_state->owner)
+ 					return -EINVAL;
+ 			}
+ 
++			/*
++			 * Bail out if user space manipulated the
++			 * futex value. If pi state exists then the
++			 * owner TID must be the same as the user
++			 * space TID. [9/10]
++			 */
++			if (pid != task_pid_vnr(pi_state->owner))
++				return -EINVAL;
++
++		out_state:
+ 			atomic_inc(&pi_state->refcount);
+ 			*ps = pi_state;
+-
+ 			return 0;
+ 		}
+ 	}
+ 
+ 	/*
+ 	 * We are the first waiter - try to look up the real owner and attach
+-	 * the new pi_state to it, but bail out when TID = 0
++	 * the new pi_state to it, but bail out when TID = 0 [1]
+ 	 */
+ 	if (!pid)
+ 		return -ESRCH;
+@@ -789,6 +881,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 	if (!p)
+ 		return -ESRCH;
+ 
++	if (!p->mm) {
++		put_task_struct(p);
++		return -EPERM;
++	}
++
+ 	/*
+ 	 * We need to look at the task state flags to figure out,
+ 	 * whether the task is exiting. To protect against the do_exit
+@@ -809,6 +906,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 		return ret;
+ 	}
+ 
++	/*
++	 * No existing pi state. First waiter. [2]
++	 */
+ 	pi_state = alloc_pi_state();
+ 
+ 	/*
+@@ -880,10 +980,18 @@ retry:
+ 		return -EDEADLK;
+ 
+ 	/*
+-	 * Surprise - we got the lock. Just return to userspace:
++	 * Surprise - we got the lock, but we do not trust user space at all.
+ 	 */
+-	if (unlikely(!curval))
+-		return 1;
++	if (unlikely(!curval)) {
++		/*
++		 * We verify whether there is kernel state for this
++		 * futex. If not, we can safely assume, that the 0 ->
++		 * TID transition is correct. If state exists, we do
++		 * not bother to fixup the user space state as it was
++		 * corrupted already.
++		 */
++		return futex_top_waiter(hb, key) ? -EINVAL : 1;
++	}
+ 
+ 	uval = curval;
+ 
+@@ -1014,6 +1122,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ 	struct task_struct *new_owner;
+ 	struct futex_pi_state *pi_state = this->pi_state;
+ 	u32 uninitialized_var(curval), newval;
++	int ret = 0;
+ 
+ 	if (!pi_state)
+ 		return -EINVAL;
+@@ -1037,23 +1146,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ 		new_owner = this->task;
+ 
+ 	/*
+-	 * We pass it to the next owner. (The WAITERS bit is always
+-	 * kept enabled while there is PI state around. We must also
+-	 * preserve the owner died bit.)
++	 * We pass it to the next owner. The WAITERS bit is always
++	 * kept enabled while there is PI state around. We cleanup the
++	 * owner died bit, because we are the owner.
+ 	 */
+-	if (!(uval & FUTEX_OWNER_DIED)) {
+-		int ret = 0;
++	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+ 
+-		newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+-
+-		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+-			ret = -EFAULT;
+-		else if (curval != uval)
+-			ret = -EINVAL;
+-		if (ret) {
+-			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+-			return ret;
+-		}
++	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++		ret = -EFAULT;
++	else if (curval != uval)
++		ret = -EINVAL;
++	if (ret) {
++		raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++		return ret;
+ 	}
+ 
+ 	raw_spin_lock_irq(&pi_state->owner->pi_lock);
+@@ -1411,6 +1516,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 
+ 	if (requeue_pi) {
+ 		/*
++		 * Requeue PI only works on two distinct uaddrs. This
++		 * check is only valid for private futexes. See below.
++		 */
++		if (uaddr1 == uaddr2)
++			return -EINVAL;
++
++		/*
+ 		 * requeue_pi requires a pi_state, try to allocate it now
+ 		 * without any locks in case it fails.
+ 		 */
+@@ -1448,6 +1560,15 @@ retry:
+ 	if (unlikely(ret != 0))
+ 		goto out_put_key1;
+ 
++	/*
++	 * The check above which compares uaddrs is not sufficient for
++	 * shared futexes. We need to compare the keys:
++	 */
++	if (requeue_pi && match_futex(&key1, &key2)) {
++		ret = -EINVAL;
++		goto out_put_keys;
++	}
++
+ 	hb1 = hash_futex(&key1);
+ 	hb2 = hash_futex(&key2);
+ 
+@@ -2287,9 +2408,10 @@ retry:
+ 	/*
+ 	 * To avoid races, try to do the TID -> 0 atomic transition
+ 	 * again. If it succeeds then we can return without waking
+-	 * anyone else up:
++	 * anyone else up. We only try this if neither the waiters nor
++	 * the owner died bit are set.
+ 	 */
+-	if (!(uval & FUTEX_OWNER_DIED) &&
++	if (!(uval & ~FUTEX_TID_MASK) &&
+ 	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
+ 		goto pi_faulted;
+ 	/*
+@@ -2319,11 +2441,9 @@ retry:
+ 	/*
+ 	 * No waiters - kernel unlocks the futex:
+ 	 */
+-	if (!(uval & FUTEX_OWNER_DIED)) {
+-		ret = unlock_futex_pi(uaddr, uval);
+-		if (ret == -EFAULT)
+-			goto pi_faulted;
+-	}
++	ret = unlock_futex_pi(uaddr, uval);
++	if (ret == -EFAULT)
++		goto pi_faulted;
+ 
+ out_unlock:
+ 	spin_unlock(&hb->lock);
+@@ -2485,6 +2605,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 	if (ret)
+ 		goto out_key2;
+ 
++	/*
++	 * The check above which compares uaddrs is not sufficient for
++	 * shared futexes. We need to compare the keys:
++	 */
++	if (match_futex(&q.key, &key2)) {
++		ret = -EINVAL;
++		goto out_put_keys;
++	}
++
+ 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ 	futex_wait_queue_me(hb, &q, to);
+ 
+@@ -2886,6 +3015,7 @@ static void __init futex_detect_cmpxchg(void)
  {
  #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
  	u32 curval;
@@ -86864,7 +87274,7 @@ index 6801b37..bb6becca 100644
  
  	/*
  	 * This will fail and we want it. Some arch implementations do
-@@ -2897,8 +2904,11 @@ static void __init futex_detect_cmpxchg(void)
+@@ -2897,8 +3027,11 @@ static void __init futex_detect_cmpxchg(void)
  	 * implementation, the non-functional ones will return
  	 * -ENOSYS.
  	 */
@@ -100950,7 +101360,7 @@ index b9ac598..f88cc56 100644
  		return;
  
 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
-index c4b7218..3e83259 100644
+index c4b7218..c7e9f14 100644
 --- a/net/iucv/af_iucv.c
 +++ b/net/iucv/af_iucv.c
 @@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
@@ -100966,6 +101376,15 @@ index c4b7218..3e83259 100644
  	}
  
  	write_unlock_bh(&iucv_sk_list.lock);
+@@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
+ 		spin_lock_irqsave(&list->lock, flags);
+ 
+ 		while (list_skb != (struct sk_buff *)list) {
+-			if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
++			if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
+ 				this = list_skb;
+ 				break;
+ 			}
 diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
 index cd5b8ec..f205e6b 100644
 --- a/net/iucv/iucv.c
@@ -101849,7 +102268,7 @@ index 11de55e..f25e448 100644
  	return 0;
  }
 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
-index 04748ab6..ca8f86f 100644
+index 04748ab6..c72ef1f 100644
 --- a/net/netlink/af_netlink.c
 +++ b/net/netlink/af_netlink.c
 @@ -257,7 +257,7 @@ static void netlink_overrun(struct sock *sk)
@@ -101861,7 +102280,137 @@ index 04748ab6..ca8f86f 100644
  }
  
  static void netlink_rcv_wake(struct sock *sk)
-@@ -2933,7 +2933,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
+@@ -1360,7 +1360,74 @@ retry:
+ 	return err;
+ }
+ 
+-static inline int netlink_capable(const struct socket *sock, unsigned int flag)
++/**
++ * __netlink_ns_capable - General netlink message capability test
++ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
++ * @user_ns: The user namespace of the capability to use
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap in the user namespace @user_ns.
++ */
++bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
++			struct user_namespace *user_ns, int cap)
++{
++	return ((nsp->flags & NETLINK_SKB_DST) ||
++		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
++		ns_capable(user_ns, cap);
++}
++EXPORT_SYMBOL(__netlink_ns_capable);
++
++/**
++ * netlink_ns_capable - General netlink message capability test
++ * @skb: socket buffer holding a netlink command from userspace
++ * @user_ns: The user namespace of the capability to use
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap in the user namespace @user_ns.
++ */
++bool netlink_ns_capable(const struct sk_buff *skb,
++			struct user_namespace *user_ns, int cap)
++{
++	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
++}
++EXPORT_SYMBOL(netlink_ns_capable);
++
++/**
++ * netlink_capable - Netlink global message capability test
++ * @skb: socket buffer holding a netlink command from userspace
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap in all user namespaces.
++ */
++bool netlink_capable(const struct sk_buff *skb, int cap)
++{
++	return netlink_ns_capable(skb, &init_user_ns, cap);
++}
++EXPORT_SYMBOL(netlink_capable);
++
++/**
++ * netlink_net_capable - Netlink network namespace message capability test
++ * @skb: socket buffer holding a netlink command from userspace
++ * @cap: The capability to use
++ *
++ * Test to see if the opener of the socket we received the message
++ * from had when the netlink socket was created and the sender of the
++ * message has has the capability @cap over the network namespace of
++ * the socket we received the message from.
++ */
++bool netlink_net_capable(const struct sk_buff *skb, int cap)
++{
++	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
++}
++EXPORT_SYMBOL(netlink_net_capable);
++
++static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
+ {
+ 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
+ 		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
+@@ -1428,7 +1495,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 
+ 	/* Only superuser is allowed to listen multicasts */
+ 	if (nladdr->nl_groups) {
+-		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
++		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
+ 			return -EPERM;
+ 		err = netlink_realloc_groups(sk);
+ 		if (err)
+@@ -1490,7 +1557,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 		return -EINVAL;
+ 
+ 	if ((nladdr->nl_groups || nladdr->nl_pid) &&
+-	    !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
++	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 		return -EPERM;
+ 
+ 	if (!nlk->portid)
+@@ -2096,7 +2163,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 		break;
+ 	case NETLINK_ADD_MEMBERSHIP:
+ 	case NETLINK_DROP_MEMBERSHIP: {
+-		if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
++		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
+ 			return -EPERM;
+ 		err = netlink_realloc_groups(sk);
+ 		if (err)
+@@ -2228,6 +2295,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 	struct sk_buff *skb;
+ 	int err;
+ 	struct scm_cookie scm;
++	u32 netlink_skb_flags = 0;
+ 
+ 	if (msg->msg_flags&MSG_OOB)
+ 		return -EOPNOTSUPP;
+@@ -2247,8 +2315,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 		dst_group = ffs(addr->nl_groups);
+ 		err =  -EPERM;
+ 		if ((dst_group || dst_portid) &&
+-		    !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
++		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 			goto out;
++		netlink_skb_flags |= NETLINK_SKB_DST;
+ 	} else {
+ 		dst_portid = nlk->dst_portid;
+ 		dst_group = nlk->dst_group;
+@@ -2278,6 +2347,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ 	NETLINK_CB(skb).portid	= nlk->portid;
+ 	NETLINK_CB(skb).dst_group = dst_group;
+ 	NETLINK_CB(skb).creds	= siocb->scm->creds;
++	NETLINK_CB(skb).flags	= netlink_skb_flags;
+ 
+ 	err = -EFAULT;
+ 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+@@ -2933,7 +3003,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
  			   sk_wmem_alloc_get(s),
  			   nlk->cb_running,
  			   atomic_read(&s->sk_refcnt),

diff --git a/3.2.59/0000_README b/3.2.59/0000_README
index 71c8053..d9b8617 100644
--- a/3.2.59/0000_README
+++ b/3.2.59/0000_README
@@ -154,7 +154,7 @@ Patch:	1058_linux-3.2.59.patch
 From:	http://www.kernel.org
 Desc:	Linux 3.2.59
 
-Patch:	4420_grsecurity-3.0-3.2.59-201406030716.patch
+Patch:	4420_grsecurity-3.0-3.2.59-201406051309.patch
 From:	http://www.grsecurity.net
 Desc:	hardened-sources base patch from upstream grsecurity
 

diff --git a/3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch b/3.2.59/4420_grsecurity-3.0-3.2.59-201406051309.patch
similarity index 99%
rename from 3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch
rename to 3.2.59/4420_grsecurity-3.0-3.2.59-201406051309.patch
index fc192d4..ff8e72f 100644
--- a/3.2.59/4420_grsecurity-3.0-3.2.59-201406030716.patch
+++ b/3.2.59/4420_grsecurity-3.0-3.2.59-201406051309.patch
@@ -37650,7 +37650,7 @@ index 632ae24..244cf4a 100644
  	if (drm_lock_free(&master->lock, lock->context)) {
  		/* FIXME: Should really bail out here. */
 diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
-index 0f9ef9b..48bd695 100644
+index 0f9ef9b..48bd6956 100644
 --- a/drivers/gpu/drm/drm_sysfs.c
 +++ b/drivers/gpu/drm/drm_sysfs.c
 @@ -495,7 +495,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
@@ -56707,7 +56707,7 @@ index 451b9b8..12e5a03 100644
  
  out_free_fd:
 diff --git a/fs/exec.c b/fs/exec.c
-index 78199eb..7ff0dd8 100644
+index 78199eb..38c4c00 100644
 --- a/fs/exec.c
 +++ b/fs/exec.c
 @@ -55,12 +55,35 @@
@@ -57576,7 +57576,7 @@ index 78199eb..7ff0dd8 100644
 +#endif
 +
 +#ifndef CONFIG_STACK_GROWSUP
-+	const void * stackstart = task_stack_page(current);
++	unsigned long stackstart = (unsigned long)task_stack_page(current);
 +	if (unlikely(current_stack_pointer < stackstart + 512 ||
 +		     current_stack_pointer >= stackstart + THREAD_SIZE))
 +		BUG();
@@ -64870,7 +64870,7 @@ index 8a89949..6776861 100644
  xfs_init_zones(void)
 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
 new file mode 100644
-index 0000000..2255157
+index 0000000..ddeec00
 --- /dev/null
 +++ b/grsecurity/Kconfig
 @@ -0,0 +1,1160 @@
@@ -65016,7 +65016,7 @@ index 0000000..2255157
 +config GRKERNSEC_KSTACKOVERFLOW
 +	bool "Prevent kernel stack overflows"
 +	default y if GRKERNSEC_CONFIG_AUTO
-+	depends on !IA64 && 64BIT && BROKEN
++	depends on !IA64 && 64BIT
 +	help
 +	  If you say Y here, the kernel's process stacks will be allocated
 +	  with vmalloc instead of the kernel's default allocator.  This
@@ -83699,7 +83699,7 @@ index 37a3bbd..55a4241 100644
  extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
  				       struct sk_buff *skb);
 diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
-index 567c681..cd73ac0 100644
+index 567c681..cd73ac02 100644
 --- a/include/net/llc_s_st.h
 +++ b/include/net/llc_s_st.h
 @@ -20,7 +20,7 @@ struct llc_sap_state_trans {
@@ -86873,7 +86873,7 @@ index ce0c182..b8e5b18 100644
  			else
  				new_fs = fs;
 diff --git a/kernel/futex.c b/kernel/futex.c
-index 8888815..9a6f6fb 100644
+index 8888815..36459d8 100644
 --- a/kernel/futex.c
 +++ b/kernel/futex.c
 @@ -54,6 +54,7 @@
@@ -86884,6 +86884,24 @@ index 8888815..9a6f6fb 100644
  #include <linux/signal.h>
  #include <linux/export.h>
  #include <linux/magic.h>
+@@ -97,7 +98,7 @@ struct futex_pi_state {
+ 	atomic_t refcount;
+ 
+ 	union futex_key key;
+-};
++} __randomize_layout;
+ 
+ /**
+  * struct futex_q - The hashed futex queue entry, one per waiting task
+@@ -131,7 +132,7 @@ struct futex_q {
+ 	struct rt_mutex_waiter *rt_waiter;
+ 	union futex_key *requeue_pi_key;
+ 	u32 bitset;
+-};
++} __randomize_layout;
+ 
+ static const struct futex_q futex_q_init = {
+ 	/* list gets initialized in queue_me()*/
 @@ -240,6 +241,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
  	struct page *page, *page_head;
  	int err, ro = 0;
@@ -86905,7 +86923,326 @@ index 8888815..9a6f6fb 100644
  
  	pagefault_disable();
  	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
-@@ -2744,6 +2750,7 @@ static int __init futex_init(void)
+@@ -588,6 +594,55 @@ void exit_pi_state_list(struct task_struct *curr)
+ 	raw_spin_unlock_irq(&curr->pi_lock);
+ }
+ 
++/*
++ * We need to check the following states:
++ *
++ *      Waiter | pi_state | pi->owner | uTID      | uODIED | ?
++ *
++ * [1]  NULL   | ---      | ---       | 0         | 0/1    | Valid
++ * [2]  NULL   | ---      | ---       | >0        | 0/1    | Valid
++ *
++ * [3]  Found  | NULL     | --        | Any       | 0/1    | Invalid
++ *
++ * [4]  Found  | Found    | NULL      | 0         | 1      | Valid
++ * [5]  Found  | Found    | NULL      | >0        | 1      | Invalid
++ *
++ * [6]  Found  | Found    | task      | 0         | 1      | Valid
++ *
++ * [7]  Found  | Found    | NULL      | Any       | 0      | Invalid
++ *
++ * [8]  Found  | Found    | task      | ==taskTID | 0/1    | Valid
++ * [9]  Found  | Found    | task      | 0         | 0      | Invalid
++ * [10] Found  | Found    | task      | !=taskTID | 0/1    | Invalid
++ *
++ * [1]	Indicates that the kernel can acquire the futex atomically. We
++ *	came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
++ *
++ * [2]	Valid, if TID does not belong to a kernel thread. If no matching
++ *      thread is found then it indicates that the owner TID has died.
++ *
++ * [3]	Invalid. The waiter is queued on a non PI futex
++ *
++ * [4]	Valid state after exit_robust_list(), which sets the user space
++ *	value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
++ *
++ * [5]	The user space value got manipulated between exit_robust_list()
++ *	and exit_pi_state_list()
++ *
++ * [6]	Valid state after exit_pi_state_list() which sets the new owner in
++ *	the pi_state but cannot access the user space value.
++ *
++ * [7]	pi_state->owner can only be NULL when the OWNER_DIED bit is set.
++ *
++ * [8]	Owner and user space value match
++ *
++ * [9]	There is no transient state which sets the user space TID to 0
++ *	except exit_robust_list(), but this is indicated by the
++ *	FUTEX_OWNER_DIED bit. See [4]
++ *
++ * [10] There is no transient state which leaves owner and user space
++ *	TID out of sync.
++ */
+ static int
+ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 		union futex_key *key, struct futex_pi_state **ps)
+@@ -603,12 +658,13 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 	plist_for_each_entry_safe(this, next, head, list) {
+ 		if (match_futex(&this->key, key)) {
+ 			/*
+-			 * Another waiter already exists - bump up
+-			 * the refcount and return its pi_state:
++			 * Sanity check the waiter before increasing
++			 * the refcount and attaching to it.
+ 			 */
+ 			pi_state = this->pi_state;
+ 			/*
+-			 * Userspace might have messed up non-PI and PI futexes
++			 * Userspace might have messed up non-PI and
++			 * PI futexes [3]
+ 			 */
+ 			if (unlikely(!pi_state))
+ 				return -EINVAL;
+@@ -616,34 +672,70 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 			WARN_ON(!atomic_read(&pi_state->refcount));
+ 
+ 			/*
+-			 * When pi_state->owner is NULL then the owner died
+-			 * and another waiter is on the fly. pi_state->owner
+-			 * is fixed up by the task which acquires
+-			 * pi_state->rt_mutex.
+-			 *
+-			 * We do not check for pid == 0 which can happen when
+-			 * the owner died and robust_list_exit() cleared the
+-			 * TID.
++			 * Handle the owner died case:
+ 			 */
+-			if (pid && pi_state->owner) {
++			if (uval & FUTEX_OWNER_DIED) {
+ 				/*
+-				 * Bail out if user space manipulated the
+-				 * futex value.
++				 * exit_pi_state_list sets owner to NULL and
++				 * wakes the topmost waiter. The task which
++				 * acquires the pi_state->rt_mutex will fixup
++				 * owner.
+ 				 */
+-				if (pid != task_pid_vnr(pi_state->owner))
++				if (!pi_state->owner) {
++					/*
++					 * No pi state owner, but the user
++					 * space TID is not 0. Inconsistent
++					 * state. [5]
++					 */
++					if (pid)
++						return -EINVAL;
++					/*
++					 * Take a ref on the state and
++					 * return. [4]
++					 */
++					goto out_state;
++				}
++
++				/*
++				 * If TID is 0, then either the dying owner
++				 * has not yet executed exit_pi_state_list()
++				 * or some waiter acquired the rtmutex in the
++				 * pi state, but did not yet fixup the TID in
++				 * user space.
++				 *
++				 * Take a ref on the state and return. [6]
++				 */
++				if (!pid)
++					goto out_state;
++			} else {
++				/*
++				 * If the owner died bit is not set,
++				 * then the pi_state must have an
++				 * owner. [7]
++				 */
++				if (!pi_state->owner)
+ 					return -EINVAL;
+ 			}
+ 
++			/*
++			 * Bail out if user space manipulated the
++			 * futex value. If pi state exists then the
++			 * owner TID must be the same as the user
++			 * space TID. [9/10]
++			 */
++			if (pid != task_pid_vnr(pi_state->owner))
++				return -EINVAL;
++
++		out_state:
+ 			atomic_inc(&pi_state->refcount);
+ 			*ps = pi_state;
+-
+ 			return 0;
+ 		}
+ 	}
+ 
+ 	/*
+ 	 * We are the first waiter - try to look up the real owner and attach
+-	 * the new pi_state to it, but bail out when TID = 0
++	 * the new pi_state to it, but bail out when TID = 0 [1]
+ 	 */
+ 	if (!pid)
+ 		return -ESRCH;
+@@ -651,6 +743,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 	if (!p)
+ 		return -ESRCH;
+ 
++	if (!p->mm) {
++		put_task_struct(p);
++		return -EPERM;
++	}
++
+ 	/*
+ 	 * We need to look at the task state flags to figure out,
+ 	 * whether the task is exiting. To protect against the do_exit
+@@ -671,6 +768,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+ 		return ret;
+ 	}
+ 
++	/*
++	 * No existing pi state. First waiter. [2]
++	 */
+ 	pi_state = alloc_pi_state();
+ 
+ 	/*
+@@ -742,10 +842,18 @@ retry:
+ 		return -EDEADLK;
+ 
+ 	/*
+-	 * Surprise - we got the lock. Just return to userspace:
++	 * Surprise - we got the lock, but we do not trust user space at all.
+ 	 */
+-	if (unlikely(!curval))
+-		return 1;
++	if (unlikely(!curval)) {
++		/*
++		 * We verify whether there is kernel state for this
++		 * futex. If not, we can safely assume, that the 0 ->
++		 * TID transition is correct. If state exists, we do
++		 * not bother to fixup the user space state as it was
++		 * corrupted already.
++		 */
++		return futex_top_waiter(hb, key) ? -EINVAL : 1;
++	}
+ 
+ 	uval = curval;
+ 
+@@ -875,6 +983,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ 	struct task_struct *new_owner;
+ 	struct futex_pi_state *pi_state = this->pi_state;
+ 	u32 uninitialized_var(curval), newval;
++	int ret = 0;
+ 
+ 	if (!pi_state)
+ 		return -EINVAL;
+@@ -898,23 +1007,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ 		new_owner = this->task;
+ 
+ 	/*
+-	 * We pass it to the next owner. (The WAITERS bit is always
+-	 * kept enabled while there is PI state around. We must also
+-	 * preserve the owner died bit.)
++	 * We pass it to the next owner. The WAITERS bit is always
++	 * kept enabled while there is PI state around. We cleanup the
++	 * owner died bit, because we are the owner.
+ 	 */
+-	if (!(uval & FUTEX_OWNER_DIED)) {
+-		int ret = 0;
++	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+ 
+-		newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+-
+-		if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+-			ret = -EFAULT;
+-		else if (curval != uval)
+-			ret = -EINVAL;
+-		if (ret) {
+-			raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+-			return ret;
+-		}
++	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++		ret = -EFAULT;
++	else if (curval != uval)
++		ret = -EINVAL;
++	if (ret) {
++		raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++		return ret;
+ 	}
+ 
+ 	raw_spin_lock_irq(&pi_state->owner->pi_lock);
+@@ -1272,6 +1377,13 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 
+ 	if (requeue_pi) {
+ 		/*
++		 * Requeue PI only works on two distinct uaddrs. This
++		 * check is only valid for private futexes. See below.
++		 */
++		if (uaddr1 == uaddr2)
++			return -EINVAL;
++
++		/*
+ 		 * requeue_pi requires a pi_state, try to allocate it now
+ 		 * without any locks in case it fails.
+ 		 */
+@@ -1309,6 +1421,15 @@ retry:
+ 	if (unlikely(ret != 0))
+ 		goto out_put_key1;
+ 
++	/*
++	 * The check above which compares uaddrs is not sufficient for
++	 * shared futexes. We need to compare the keys:
++	 */
++	if (requeue_pi && match_futex(&key1, &key2)) {
++		ret = -EINVAL;
++		goto out_put_keys;
++	}
++
+ 	hb1 = hash_futex(&key1);
+ 	hb2 = hash_futex(&key2);
+ 
+@@ -2133,9 +2254,10 @@ retry:
+ 	/*
+ 	 * To avoid races, try to do the TID -> 0 atomic transition
+ 	 * again. If it succeeds then we can return without waking
+-	 * anyone else up:
++	 * anyone else up. We only try this if neither the waiters nor
++	 * the owner died bit are set.
+ 	 */
+-	if (!(uval & FUTEX_OWNER_DIED) &&
++	if (!(uval & ~FUTEX_TID_MASK) &&
+ 	    cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
+ 		goto pi_faulted;
+ 	/*
+@@ -2167,11 +2289,9 @@ retry:
+ 	/*
+ 	 * No waiters - kernel unlocks the futex:
+ 	 */
+-	if (!(uval & FUTEX_OWNER_DIED)) {
+-		ret = unlock_futex_pi(uaddr, uval);
+-		if (ret == -EFAULT)
+-			goto pi_faulted;
+-	}
++	ret = unlock_futex_pi(uaddr, uval);
++	if (ret == -EFAULT)
++		goto pi_faulted;
+ 
+ out_unlock:
+ 	spin_unlock(&hb->lock);
+@@ -2331,6 +2451,15 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 	if (ret)
+ 		goto out_key2;
+ 
++	/*
++	 * The check above which compares uaddrs is not sufficient for
++	 * shared futexes. We need to compare the keys:
++	 */
++	if (match_futex(&q.key, &key2)) {
++		ret = -EINVAL;
++		goto out_put_keys;
++	}
++
+ 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ 	futex_wait_queue_me(hb, &q, to);
+ 
+@@ -2744,6 +2873,7 @@ static int __init futex_init(void)
  {
  	u32 curval;
  	int i;
@@ -86913,7 +87250,7 @@ index 8888815..9a6f6fb 100644
  
  	/*
  	 * This will fail and we want it. Some arch implementations do
-@@ -2755,8 +2762,11 @@ static int __init futex_init(void)
+@@ -2755,8 +2885,11 @@ static int __init futex_init(void)
  	 * implementation, the non-functional ones will return
  	 * -ENOSYS.
  	 */
@@ -98293,10 +98630,41 @@ index 136ac4f..f917fa9 100644
  	mm->unmap_area = arch_unmap_area;
  }
 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index eeba3bb..5fc3323 100644
+index eeba3bb..0c8633f 100644
 --- a/mm/vmalloc.c
 +++ b/mm/vmalloc.c
-@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+@@ -27,10 +27,30 @@
+ #include <linux/pfn.h>
+ #include <linux/kmemleak.h>
+ #include <linux/atomic.h>
++#include <linux/llist.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include <asm/shmparam.h>
+ 
++struct vfree_deferred {
++	struct llist_head list;
++	struct work_struct wq;
++};
++static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
++
++static void __vunmap(const void *, int);
++
++static void free_work(struct work_struct *w)
++{
++	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
++	struct llist_node *llnode = llist_del_all(&p->list);
++	while (llnode) {
++		void *p = llnode;
++		llnode = llist_next(llnode);
++		__vunmap(p, 1);
++	}
++}
++
+ /*** Page table manipulation functions ***/
+ 
+ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
+@@ -39,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  
  	pte = pte_offset_kernel(pmd, addr);
  	do {
@@ -98318,7 +98686,7 @@ index eeba3bb..5fc3323 100644
  	} while (pte++, addr += PAGE_SIZE, addr != end);
  }
  
-@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
+@@ -100,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
  	pte = pte_alloc_kernel(pmd, addr);
  	if (!pte)
  		return -ENOMEM;
@@ -98350,7 +98718,7 @@ index eeba3bb..5fc3323 100644
  	return 0;
  }
  
-@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
+@@ -119,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
  	pmd_t *pmd;
  	unsigned long next;
  
@@ -98359,7 +98727,7 @@ index eeba3bb..5fc3323 100644
  	if (!pmd)
  		return -ENOMEM;
  	do {
-@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
+@@ -136,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  	pud_t *pud;
  	unsigned long next;
  
@@ -98368,7 +98736,7 @@ index eeba3bb..5fc3323 100644
  	if (!pud)
  		return -ENOMEM;
  	do {
-@@ -196,6 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
+@@ -196,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
  	if (addr >= MODULES_VADDR && addr < MODULES_END)
  		return 1;
  #endif
@@ -98381,7 +98749,7 @@ index eeba3bb..5fc3323 100644
  	return is_vmalloc_addr(x);
  }
  
-@@ -216,8 +246,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
+@@ -216,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
  
  	if (!pgd_none(*pgd)) {
  		pud_t *pud = pud_offset(pgd, addr);
@@ -98396,7 +98764,22 @@ index eeba3bb..5fc3323 100644
  			if (!pmd_none(*pmd)) {
  				pte_t *ptep, pte;
  
-@@ -1295,6 +1331,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
+@@ -1151,10 +1207,14 @@ void __init vmalloc_init(void)
+ 
+ 	for_each_possible_cpu(i) {
+ 		struct vmap_block_queue *vbq;
++		struct vfree_deferred *p;
+ 
+ 		vbq = &per_cpu(vmap_block_queue, i);
+ 		spin_lock_init(&vbq->lock);
+ 		INIT_LIST_HEAD(&vbq->free);
++		p = &per_cpu(vfree_deferred, i);
++		init_llist_head(&p->list);
++		INIT_WORK(&p->wq, free_work);
+ 	}
+ 
+ 	/* Import existing vmlist entries. */
+@@ -1295,6 +1355,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
  	struct vm_struct *area;
  
  	BUG_ON(in_interrupt());
@@ -98413,7 +98796,56 @@ index eeba3bb..5fc3323 100644
  	if (flags & VM_IOREMAP) {
  		int bit = fls(size);
  
-@@ -1527,6 +1573,11 @@ void *vmap(struct page **pages, unsigned int count,
+@@ -1469,7 +1539,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
+ 	kfree(area);
+ 	return;
+ }
+-
++ 
+ /**
+  *	vfree  -  release memory allocated by vmalloc()
+  *	@addr:		memory base address
+@@ -1478,15 +1548,26 @@ static void __vunmap(const void *addr, int deallocate_pages)
+  *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
+  *	NULL, no operation is performed.
+  *
+- *	Must not be called in interrupt context.
++ *	Must not be called in NMI context (strictly speaking, only if we don't
++ *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
++ *	conventions for vfree() arch-depenedent would be a really bad idea)
++ *
++ *	NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
+  */
+ void vfree(const void *addr)
+ {
+-	BUG_ON(in_interrupt());
++	BUG_ON(in_nmi());
+ 
+ 	kmemleak_free(addr);
+ 
+-	__vunmap(addr, 1);
++	if (!addr)
++		return;
++	if (unlikely(in_interrupt())) {
++		struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
++		if (llist_add((struct llist_node *)addr, &p->list))
++			schedule_work(&p->wq);
++	} else
++		__vunmap(addr, 1);
+ }
+ EXPORT_SYMBOL(vfree);
+ 
+@@ -1503,7 +1584,8 @@ void vunmap(const void *addr)
+ {
+ 	BUG_ON(in_interrupt());
+ 	might_sleep();
+-	__vunmap(addr, 0);
++	if (addr)
++		__vunmap(addr, 0);
+ }
+ EXPORT_SYMBOL(vunmap);
+ 
+@@ -1527,6 +1609,11 @@ void *vmap(struct page **pages, unsigned int count,
  	if (count > totalram_pages)
  		return NULL;
  
@@ -98425,7 +98857,7 @@ index eeba3bb..5fc3323 100644
  	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
  					__builtin_return_address(0));
  	if (!area)
-@@ -1628,6 +1679,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+@@ -1628,6 +1715,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
  	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
  		goto fail;
  
@@ -98439,7 +98871,7 @@ index eeba3bb..5fc3323 100644
  	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
  				  start, end, node, gfp_mask, caller);
  	if (!area)
-@@ -1694,6 +1752,18 @@ static inline void *__vmalloc_node_flags(unsigned long size,
+@@ -1694,6 +1788,18 @@ static inline void *__vmalloc_node_flags(unsigned long size,
  					node, __builtin_return_address(0));
  }
  
@@ -98458,7 +98890,7 @@ index eeba3bb..5fc3323 100644
  /**
   *	vmalloc  -  allocate virtually contiguous memory
   *	@size:		allocation size
-@@ -1801,10 +1871,9 @@ EXPORT_SYMBOL(vzalloc_node);
+@@ -1801,10 +1907,9 @@ EXPORT_SYMBOL(vzalloc_node);
   *	For tight control over page level allocator and protection flags
   *	use __vmalloc() instead.
   */
@@ -98470,7 +98902,7 @@ index eeba3bb..5fc3323 100644
  			      -1, __builtin_return_address(0));
  }
  
-@@ -2099,6 +2168,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+@@ -2099,6 +2204,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  	unsigned long uaddr = vma->vm_start;
  	unsigned long usize = vma->vm_end - vma->vm_start;
  
@@ -98479,7 +98911,7 @@ index eeba3bb..5fc3323 100644
  	if ((PAGE_SIZE-1) & (unsigned long)addr)
  		return -EINVAL;
  
-@@ -2351,8 +2422,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+@@ -2351,8 +2458,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  		return NULL;
  	}
  
@@ -98490,7 +98922,7 @@ index eeba3bb..5fc3323 100644
  	if (!vas || !vms)
  		goto err_free;
  
-@@ -2536,11 +2607,15 @@ static int s_show(struct seq_file *m, void *p)
+@@ -2536,11 +2643,15 @@ static int s_show(struct seq_file *m, void *p)
  {
  	struct vm_struct *v = p;
  


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2014-06-05 22:22 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-06-05 22:22 [gentoo-commits] proj/hardened-patchset:master commit in: 3.14.5/, 3.2.59/ Anthony G. Basile

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox