public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] gentoo-x86 commit in sys-kernel/spl/files: spl-0.6.0_rc12-linux-3.7-compat.patch spl-0.6.0_rc12-fix-race-in-slabs.patch spl-0.6.0_rc12-fix-3.6-compat-regression.patch
@ 2012-12-11 19:42 Richard Yao (ryao)
  0 siblings, 0 replies; only message in thread
From: Richard Yao (ryao) @ 2012-12-11 19:42 UTC (permalink / raw
  To: gentoo-commits

ryao        12/12/11 19:42:28

  Added:                spl-0.6.0_rc12-linux-3.7-compat.patch
                        spl-0.6.0_rc12-fix-race-in-slabs.patch
                        spl-0.6.0_rc12-fix-3.6-compat-regression.patch
  Log:
  Linux 3.7 Support; Linux 3.6 Stability Fix
  
  (Portage version: 2.1.11.31/cvs/Linux x86_64, unsigned Manifest commit)

Revision  Changes    Path
1.1                  sys-kernel/spl/files/spl-0.6.0_rc12-linux-3.7-compat.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-kernel/spl/files/spl-0.6.0_rc12-linux-3.7-compat.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-kernel/spl/files/spl-0.6.0_rc12-linux-3.7-compat.patch?rev=1.1&content-type=text/plain

Index: spl-0.6.0_rc12-linux-3.7-compat.patch
===================================================================
From b84412a6e8a1d22780a8203e00049c6e43efacac Mon Sep 17 00:00:00 2001
From: Brian Behlendorf <behlendorf1@llnl.gov>
Date: Fri, 30 Nov 2012 15:46:30 -0800
Subject: [PATCH] Linux compat 3.7, kernel_thread()

The preferred kernel interface for creating threads has been
kthread_create() for a long time now.  However, several of the
SPLAT tests still use the legacy kernel_thread() function which
has finally been dropped (mostly).

Update the condvar and rwlock SPLAT tests to use the modern
interface.  Frankly this is something we should have done a
long time ago.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #194
---
 module/splat/splat-condvar.c |   70 +++++++++++++++++++++---------------------
 module/splat/splat-rwlock.c  |   57 ++++++++++++++++------------------
 2 files changed, 61 insertions(+), 66 deletions(-)

diff --git a/module/splat/splat-condvar.c b/module/splat/splat-condvar.c
index 69fefc9..1fe306c 100644
--- a/module/splat/splat-condvar.c
+++ b/module/splat/splat-condvar.c
@@ -24,6 +24,7 @@
  *  Solaris Porting LAyer Tests (SPLAT) Condition Variable Tests.
 \*****************************************************************************/
 
+#include <linux/kthread.h>
 #include <sys/condvar.h>
 #include "splat-internal.h"
 
@@ -51,20 +52,20 @@
 #define SPLAT_CONDVAR_TEST5_DESC	"Timeout thread, cv_wait_timeout()"
 
 #define SPLAT_CONDVAR_TEST_MAGIC	0x115599DDUL
-#define SPLAT_CONDVAR_TEST_NAME		"condvar_test"
+#define SPLAT_CONDVAR_TEST_NAME		"condvar"
 #define SPLAT_CONDVAR_TEST_COUNT	8
 
 typedef struct condvar_priv {
-        unsigned long cv_magic;
-        struct file *cv_file;
+	unsigned long cv_magic;
+	struct file *cv_file;
 	kcondvar_t cv_condvar;
 	kmutex_t cv_mtx;
 } condvar_priv_t;
 
 typedef struct condvar_thr {
-	int ct_id;
 	const char *ct_name;
 	condvar_priv_t *ct_cvp;
+	struct task_struct *ct_thread;
 	int ct_rc;
 } condvar_thr_t;
 
@@ -73,20 +74,17 @@
 {
 	condvar_thr_t *ct = (condvar_thr_t *)arg;
 	condvar_priv_t *cv = ct->ct_cvp;
-	char name[16];
 
 	ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);
-        snprintf(name, sizeof(name),"%s%d",SPLAT_CONDVAR_TEST_NAME,ct->ct_id);
-	daemonize(name);
 
 	mutex_enter(&cv->cv_mtx);
 	splat_vprint(cv->cv_file, ct->ct_name,
-	           "%s thread sleeping with %d waiters\n",
-		   name, atomic_read(&cv->cv_condvar.cv_waiters));
+	    "%s thread sleeping with %d waiters\n",
+	    ct->ct_thread->comm, atomic_read(&cv->cv_condvar.cv_waiters));
 	cv_wait(&cv->cv_condvar, &cv->cv_mtx);
 	splat_vprint(cv->cv_file, ct->ct_name,
-	           "%s thread woken %d waiters remain\n",
-		   name, atomic_read(&cv->cv_condvar.cv_waiters));
+	    "%s thread woken %d waiters remain\n",
+	    ct->ct_thread->comm, atomic_read(&cv->cv_condvar.cv_waiters));
 	mutex_exit(&cv->cv_mtx);
 
 	return 0;
@@ -96,7 +94,6 @@
 splat_condvar_test1(struct file *file, void *arg)
 {
 	int i, count = 0, rc = 0;
-	long pids[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_priv_t cv;
 
@@ -109,13 +106,15 @@
 	 * long as we know how many we managed to create and should expect. */
 	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
 		ct[i].ct_cvp = &cv;
-		ct[i].ct_id = i;
 		ct[i].ct_name = SPLAT_CONDVAR_TEST1_NAME;
 		ct[i].ct_rc = 0;
+		ct[i].ct_thread = kthread_create(splat_condvar_test12_thread,
+		    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
-		pids[i] = kernel_thread(splat_condvar_test12_thread, &ct[i], 0);
-		if (pids[i] >= 0)
+		if (!IS_ERR(ct[i].ct_thread)) {
+			wake_up_process(ct[i].ct_thread);
 			count++;
+		}
 	}
 
 	/* Wait until all threads are waiting on the condition variable */
@@ -160,7 +159,6 @@
 splat_condvar_test2(struct file *file, void *arg)
 {
 	int i, count = 0, rc = 0;
-	long pids[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_priv_t cv;
 
@@ -173,13 +171,15 @@
 	 * long as we know how many we managed to create and should expect. */
 	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
 		ct[i].ct_cvp = &cv;
-		ct[i].ct_id = i;
 		ct[i].ct_name = SPLAT_CONDVAR_TEST2_NAME;
 		ct[i].ct_rc = 0;
+		ct[i].ct_thread = kthread_create(splat_condvar_test12_thread,
+		    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
-		pids[i] = kernel_thread(splat_condvar_test12_thread, &ct[i], 0);
-		if (pids[i] > 0)
+		if (!IS_ERR(ct[i].ct_thread)) {
+			wake_up_process(ct[i].ct_thread);
 			count++;
+		}
 	}
 
 	/* Wait until all threads are waiting on the condition variable */
@@ -208,17 +208,14 @@
 {
 	condvar_thr_t *ct = (condvar_thr_t *)arg;
 	condvar_priv_t *cv = ct->ct_cvp;
-	char name[16];
 	clock_t rc;
 
 	ASSERT(cv->cv_magic == SPLAT_CONDVAR_TEST_MAGIC);
-        snprintf(name, sizeof(name), "%s%d", SPLAT_CONDVAR_TEST_NAME, ct->ct_id);
-	daemonize(name);
 
 	mutex_enter(&cv->cv_mtx);
 	splat_vprint(cv->cv_file, ct->ct_name,
-	           "%s thread sleeping with %d waiters\n",
-		   name, atomic_read(&cv->cv_condvar.cv_waiters));
+	    "%s thread sleeping with %d waiters\n",
+	    ct->ct_thread->comm, atomic_read(&cv->cv_condvar.cv_waiters));
 
 	/* Sleep no longer than 3 seconds, for this test we should
 	 * actually never sleep that long without being woken up. */
@@ -226,11 +223,12 @@
 	if (rc == -1) {
 		ct->ct_rc = -ETIMEDOUT;
 		splat_vprint(cv->cv_file, ct->ct_name, "%s thread timed out, "
-		           "should have been woken\n", name);
+		    "should have been woken\n", ct->ct_thread->comm);
 	} else {
 		splat_vprint(cv->cv_file, ct->ct_name,
-		           "%s thread woken %d waiters remain\n",
-			   name, atomic_read(&cv->cv_condvar.cv_waiters));
+		    "%s thread woken %d waiters remain\n",
+		    ct->ct_thread->comm,
+		    atomic_read(&cv->cv_condvar.cv_waiters));
 	}
 
 	mutex_exit(&cv->cv_mtx);
@@ -242,7 +240,6 @@
 splat_condvar_test3(struct file *file, void *arg)
 {
 	int i, count = 0, rc = 0;
-	long pids[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_priv_t cv;
 
@@ -255,13 +252,15 @@
 	 * long as we know how many we managed to create and should expect. */
 	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
 		ct[i].ct_cvp = &cv;
-		ct[i].ct_id = i;
 		ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
 		ct[i].ct_rc = 0;
+		ct[i].ct_thread = kthread_create(splat_condvar_test34_thread,
+		    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
-		pids[i] = kernel_thread(splat_condvar_test34_thread, &ct[i], 0);
-		if (pids[i] >= 0)
+		if (!IS_ERR(ct[i].ct_thread)) {
+			wake_up_process(ct[i].ct_thread);
 			count++;
+		}
 	}
 
 	/* Wait until all threads are waiting on the condition variable */
@@ -311,7 +310,6 @@
 splat_condvar_test4(struct file *file, void *arg)
 {
 	int i, count = 0, rc = 0;
-	long pids[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_thr_t ct[SPLAT_CONDVAR_TEST_COUNT];
 	condvar_priv_t cv;
 
@@ -324,13 +322,15 @@
 	 * long as we know how many we managed to create and should expect. */
 	for (i = 0; i < SPLAT_CONDVAR_TEST_COUNT; i++) {
 		ct[i].ct_cvp = &cv;
-		ct[i].ct_id = i;
 		ct[i].ct_name = SPLAT_CONDVAR_TEST3_NAME;
 		ct[i].ct_rc = 0;
+		ct[i].ct_thread = kthread_create(splat_condvar_test34_thread,
+		    &ct[i], "%s/%d", SPLAT_CONDVAR_TEST_NAME, i);
 
-		pids[i] = kernel_thread(splat_condvar_test34_thread, &ct[i], 0);
-		if (pids[i] >= 0)
+		if (!IS_ERR(ct[i].ct_thread)) {
+			wake_up_process(ct[i].ct_thread);
 			count++;
+		}
 	}
 
 	/* Wait until all threads are waiting on the condition variable */
diff --git a/module/splat/splat-rwlock.c b/module/splat/splat-rwlock.c
index 9e335d7..f4a0109 100644
--- a/module/splat/splat-rwlock.c
+++ b/module/splat/splat-rwlock.c
@@ -82,7 +82,7 @@
 typedef struct rw_thr {
 	const char *rwt_name;
 	rw_priv_t *rwt_rwp;
-	int rwt_id;
+	struct task_struct *rwt_thread;
 } rw_thr_t;
 
 void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
@@ -106,17 +106,15 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
 	rw_thr_t *rwt = (rw_thr_t *)arg;
 	rw_priv_t *rwp = rwt->rwt_rwp;
 	uint8_t rnd;
-	char name[16];
 
 	ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
-	snprintf(name, sizeof(name), "rwlock_wr_thr%d", rwt->rwt_id);
-	daemonize(name);
+
 	get_random_bytes((void *)&rnd, 1);
 	msleep((unsigned int)rnd);
 
 	splat_vprint(rwp->rw_file, rwt->rwt_name,
-		     "%s trying to acquire rwlock (%d holding/%d waiting)\n",
-		     name, rwp->rw_holders, rwp->rw_waiters);
+	    "%s trying to acquire rwlock (%d holding/%d waiting)\n",
+	    rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
 	spin_lock(&rwp->rw_lock);
 	rwp->rw_waiters++;
 	spin_unlock(&rwp->rw_lock);
@@ -127,20 +125,20 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
 	rwp->rw_holders++;
 	spin_unlock(&rwp->rw_lock);
 	splat_vprint(rwp->rw_file, rwt->rwt_name,
-		     "%s acquired rwlock (%d holding/%d waiting)\n",
-		     name, rwp->rw_holders, rwp->rw_waiters);
+	    "%s acquired rwlock (%d holding/%d waiting)\n",
+	    rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
 
 	/* Wait for control thread to signal we can release the write lock */
 	wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
-				 rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR));
+	    rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR));
 
 	spin_lock(&rwp->rw_lock);
 	rwp->rw_completed++;
 	rwp->rw_holders--;
 	spin_unlock(&rwp->rw_lock);
 	splat_vprint(rwp->rw_file, rwt->rwt_name,
-		   "%s dropped rwlock (%d holding/%d waiting)\n",
-		   name, rwp->rw_holders, rwp->rw_waiters);
+	    "%s dropped rwlock (%d holding/%d waiting)\n",
+	    rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
 
 	rw_exit(&rwp->rw_rwlock);
 
@@ -153,21 +151,19 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
 	rw_thr_t *rwt = (rw_thr_t *)arg;
 	rw_priv_t *rwp = rwt->rwt_rwp;
 	uint8_t rnd;
-	char name[16];
 
 	ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC);
-	snprintf(name, sizeof(name), "rwlock_rd_thr%d", rwt->rwt_id);
-	daemonize(name);
+
 	get_random_bytes((void *)&rnd, 1);
 	msleep((unsigned int)rnd);
 
 	/* Don't try and take the semaphore until after someone has it */
-	wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
-				 rwp->rw_holders > 0));
+	wait_event_interruptible(rwp->rw_waitq,
+	    splat_locked_test(&rwp->rw_lock, rwp->rw_holders > 0));
 
 	splat_vprint(rwp->rw_file, rwt->rwt_name,
-		     "%s trying to acquire rwlock (%d holding/%d waiting)\n",
-		     name, rwp->rw_holders, rwp->rw_waiters);
+	    "%s trying to acquire rwlock (%d holding/%d waiting)\n",
+	    rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
 	spin_lock(&rwp->rw_lock);
 	rwp->rw_waiters++;
 	spin_unlock(&rwp->rw_lock);
@@ -178,20 +174,20 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
 	rwp->rw_holders++;
 	spin_unlock(&rwp->rw_lock);
 	splat_vprint(rwp->rw_file, rwt->rwt_name,
-		     "%s acquired rwlock (%d holding/%d waiting)\n",
-		     name, rwp->rw_holders, rwp->rw_waiters);
+	    "%s acquired rwlock (%d holding/%d waiting)\n",
+	    rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
 
 	/* Wait for control thread to signal we can release the read lock */
 	wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock,
-				 rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD));
+	    rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD));
 
 	spin_lock(&rwp->rw_lock);
 	rwp->rw_completed++;
 	rwp->rw_holders--;
 	spin_unlock(&rwp->rw_lock);
 	splat_vprint(rwp->rw_file, rwt->rwt_name,
-		     "%s dropped rwlock (%d holding/%d waiting)\n",
-		     name, rwp->rw_holders, rwp->rw_waiters);
+	    "%s dropped rwlock (%d holding/%d waiting)\n",
+	    rwt->rwt_thread->comm, rwp->rw_holders, rwp->rw_waiters);
 
 	rw_exit(&rwp->rw_rwlock);
 
@@ -202,7 +198,6 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
 splat_rwlock_test1(struct file *file, void *arg)
 {
 	int i, count = 0, rc = 0;
-	long pids[SPLAT_RWLOCK_TEST_COUNT];
 	rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT];
 	rw_priv_t *rwp;
 
@@ -214,22 +209,22 @@ void splat_init_rw_priv(rw_priv_t *rwp, struct file *file)
 
 	/* Create some threads, the exact number isn't important just as
 	 * long as we know how many we managed to create and should expect. */
-
-
-
 	for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) {
 		rwt[i].rwt_rwp = rwp;
-		rwt[i].rwt_id = i;
 		rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME;
 
 		/* The first thread will be the writer */
 		if (i == 0)
-			pids[i] = kernel_thread(splat_rwlock_wr_thr, &rwt[i], 0);
+			rwt[i].rwt_thread = kthread_create(splat_rwlock_wr_thr,
+			    &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
 		else
-			pids[i] = kernel_thread(splat_rwlock_rd_thr, &rwt[i], 0);
+			rwt[i].rwt_thread = kthread_create(splat_rwlock_rd_thr,
+			    &rwt[i], "%s/%d", SPLAT_RWLOCK_TEST_NAME, i);
 
-		if (pids[i] >= 0)
+		if (!IS_ERR(rwt[i].rwt_thread)) {
+			wake_up_process(rwt[i].rwt_thread);
 			count++;
+		}
 	}
 
 	/* Wait for the writer */
-- 
1.7.10




1.1                  sys-kernel/spl/files/spl-0.6.0_rc12-fix-race-in-slabs.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-kernel/spl/files/spl-0.6.0_rc12-fix-race-in-slabs.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-kernel/spl/files/spl-0.6.0_rc12-fix-race-in-slabs.patch?rev=1.1&content-type=text/plain

Index: spl-0.6.0_rc12-fix-race-in-slabs.patch
===================================================================
From 043f9b57247eafb8e28e7b9465470ece87090228 Mon Sep 17 00:00:00 2001
From: Brian Behlendorf <behlendorf1@llnl.gov>
Date: Mon, 26 Nov 2012 16:52:28 -0800
Subject: [PATCH] Disable FS reclaim when allocating new slabs

Allowing the spl_cache_grow_work() function to reclaim inodes
allows for two unlikely deadlocks.  Therefore, we clear __GFP_FS
for these allocations.  The two deadlocks are:

* While holding the ZFS_OBJ_HOLD_ENTER(zsb, obj1) lock a function
  calls kmem_cache_alloc() which happens to need to allocate a
  new slab.  To allocate the new slab we enter FS level reclaim
  and attempt to evict several inodes.  To evict these inodes we
  need to take the ZFS_OBJ_HOLD_ENTER(zsb, obj2) lock and it
  just happens that obj1 and obj2 use the same hashed lock.

* Similar to the first case however instead of getting blocked
  on the hash lock we block in txg_wait_open() which is waiting
  for the next txg which isn't coming because the txg_sync
  thread is blocked in kmem_cache_alloc().

Note this isn't a 100% fix because vmalloc() won't strictly
honor __GFP_FS.  However, it practice this is sufficient because
several very unlikely things must all occur concurrently.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue zfsonlinux/zfs#1101
---
 module/spl/spl-kmem.c |    2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index f3113e0..b171d44 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -1773,7 +1773,7 @@ static int spl_cache_flush(spl_kmem_cache_t *skc,
 
 		atomic_inc(&skc->skc_ref);
 		ska->ska_cache = skc;
-		ska->ska_flags = flags;
+		ska->ska_flags = flags & ~__GFP_FS;
 		spl_init_delayed_work(&ska->ska_work, spl_cache_grow_work, ska);
 		schedule_delayed_work(&ska->ska_work, 0);
 	}
-- 
1.7.10




1.1                  sys-kernel/spl/files/spl-0.6.0_rc12-fix-3.6-compat-regression.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-kernel/spl/files/spl-0.6.0_rc12-fix-3.6-compat-regression.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo-x86/sys-kernel/spl/files/spl-0.6.0_rc12-fix-3.6-compat-regression.patch?rev=1.1&content-type=text/plain

Index: spl-0.6.0_rc12-fix-3.6-compat-regression.patch
===================================================================
From 053678f3b0572db442551dfa547dea939f45c96b Mon Sep 17 00:00:00 2001
From: Brian Behlendorf <behlendorf1@llnl.gov>
Date: Mon, 3 Dec 2012 12:01:28 -0800
Subject: [PATCH] Handle errors from spl_kern_path_locked()

When the Linux 3.6 KERN_PATH_LOCKED compatibility code was added
by commit bcb1589 an entirely new vn_remove() implementation was
added.  That function did not properly handle an error from
spl_kern_path_locked() which would result in an panic.  This
patch addresses the issue by returning the error to the caller.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #187
---
 module/spl/spl-vnode.c |    2 ++
 1 file changed, 2 insertions(+)

diff --git a/module/spl/spl-vnode.c b/module/spl/spl-vnode.c
index a0fed32..0ecd9ad 100644
--- a/module/spl/spl-vnode.c
+++ b/module/spl/spl-vnode.c
@@ -332,6 +332,8 @@
 		rc = vfs_unlink(parent.dentry->d_inode, dentry);
 exit1:
 		dput(dentry);
+	} else {
+		return (-rc);
 	}
 
 	spl_inode_unlock(parent.dentry->d_inode);
-- 
1.7.10






^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2012-12-11 19:42 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-12-11 19:42 [gentoo-commits] gentoo-x86 commit in sys-kernel/spl/files: spl-0.6.0_rc12-linux-3.7-compat.patch spl-0.6.0_rc12-fix-race-in-slabs.patch spl-0.6.0_rc12-fix-3.6-compat-regression.patch Richard Yao (ryao)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox