public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Sam James" <sam@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] repo/gentoo:master commit in: net-misc/asterisk/files/, net-misc/asterisk/
Date: Sat, 22 May 2021 15:26:52 +0000 (UTC)	[thread overview]
Message-ID: <1621697200.64969b9652f34bea460cddebde40c14569427052.sam@gentoo> (raw)

commit:     64969b9652f34bea460cddebde40c14569427052
Author:     Jaco Kroon <jaco <AT> uls <DOT> co <DOT> za>
AuthorDate: Sat May 22 14:58:46 2021 +0000
Commit:     Sam James <sam <AT> gentoo <DOT> org>
CommitDate: Sat May 22 15:26:40 2021 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=64969b96

net-misc/asterisk: 16.18.0 version bump shutdown deadlock.

Introduce combined patch of:

https://gerrit.asterisk.org/c/asterisk/+/15942 (CLI: locks show)
https://gerrit.asterisk.org/c/asterisk/+/15943 (unload memory corruption)
https://gerrit.asterisk.org/c/asterisk/+/15944 (error path ref counting)
https://gerrit.asterisk.org/c/asterisk/+/15945 (ast_module_ref usage)

Package-Manager: Portage-3.0.18, Repoman-3.0.2
Signed-off-by: Jaco Kroon <jaco <AT> uls.co.za>
Signed-off-by: Sam James <sam <AT> gentoo.org>

 ...k-16.18.0.ebuild => asterisk-16.18.0-r1.ebuild} |   3 +-
 .../asterisk-16.18.0-r1-func_lock-fix-races.patch  | 177 +++++++++++++++++++++
 2 files changed, 179 insertions(+), 1 deletion(-)

diff --git a/net-misc/asterisk/asterisk-16.18.0.ebuild b/net-misc/asterisk/asterisk-16.18.0-r1.ebuild
similarity index 98%
rename from net-misc/asterisk/asterisk-16.18.0.ebuild
rename to net-misc/asterisk/asterisk-16.18.0-r1.ebuild
index fd51937b846..520bcc0b156 100644
--- a/net-misc/asterisk/asterisk-16.18.0.ebuild
+++ b/net-misc/asterisk/asterisk-16.18.0-r1.ebuild
@@ -28,7 +28,8 @@ REQUIRED_USE="gtalk? ( xmpp )
 "
 
 PATCHES=(
-	"${FILESDIR}/asterisk-16.16.2-no-var-run-install.patch"
+	"${FILESDIR}/${PN}-16.16.2-no-var-run-install.patch"
+	"${FILESDIR}/${PN}-16.18.0-r1-func_lock-fix-races.patch"
 )
 
 DEPEND="acct-user/asterisk

diff --git a/net-misc/asterisk/files/asterisk-16.18.0-r1-func_lock-fix-races.patch b/net-misc/asterisk/files/asterisk-16.18.0-r1-func_lock-fix-races.patch
new file mode 100644
index 00000000000..a18ef34d499
--- /dev/null
+++ b/net-misc/asterisk/files/asterisk-16.18.0-r1-func_lock-fix-races.patch
@@ -0,0 +1,177 @@
+Combined patch from upstream (All authored by myself):
+
+https://gerrit.asterisk.org/c/asterisk/+/15942 (CLI: locks show)
+https://gerrit.asterisk.org/c/asterisk/+/15943 (unload memory corruption)
+https://gerrit.asterisk.org/c/asterisk/+/15944 (error path ref counting)
+https://gerrit.asterisk.org/c/asterisk/+/15945 (ast_module_ref usage)
+
+The cause of my nightmares was the unload memory corruption, however,
+the other two whilst much less likely to occur are just as serious.
+
+Fixes on all has been well tested.  The individual patches are quite small.
+
+Signed-off-by: Jaco Kroon <jaco@uls.co.za>
+---
+diff --git a/funcs/func_lock.c b/funcs/func_lock.c
+index 072640751e..31a7fcda29 100644
+--- a/funcs/func_lock.c
++++ b/funcs/func_lock.c
+@@ -42,6 +42,7 @@
+ #include "asterisk/linkedlists.h"
+ #include "asterisk/astobj2.h"
+ #include "asterisk/utils.h"
++#include "asterisk/cli.h"
+ 
+ /*** DOCUMENTATION
+ 	<function name="LOCK" language="en_US">
+@@ -157,6 +158,8 @@ static void lock_free(void *data)
+ 	AST_LIST_UNLOCK(oldlist);
+ 	AST_LIST_HEAD_DESTROY(oldlist);
+ 	ast_free(oldlist);
++
++	ast_module_unref(ast_module_info->self);
+ }
+ 
+ static void lock_fixup(void *data, struct ast_channel *oldchan, struct ast_channel *newchan)
+@@ -191,7 +194,12 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
+ 	struct timeval now;
+ 
+ 	if (!lock_store) {
+-		ast_debug(1, "Channel %s has no lock datastore, so we're allocating one.\n", ast_channel_name(chan));
++		if (unloading) {
++			ast_log(LOG_ERROR, "%sLOCK has no datastore and func_lock is unloading, failing.\n",
++					trylock ? "TRY" : "");
++			return -1;
++		}
++
+ 		lock_store = ast_datastore_alloc(&lock_info, NULL);
+ 		if (!lock_store) {
+ 			ast_log(LOG_ERROR, "Unable to allocate new datastore.  No locks will be obtained.\n");
+@@ -210,6 +218,9 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
+ 		lock_store->data = list;
+ 		AST_LIST_HEAD_INIT(list);
+ 		ast_channel_datastore_add(chan, lock_store);
++
++		/* We cannot unload until this channel has released the lock_store */
++		ast_module_ref(ast_module_info->self);
+ 	} else
+ 		list = lock_store->data;
+ 
+@@ -223,6 +234,9 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
+ 
+ 	if (!current) {
+ 		if (unloading) {
++			ast_log(LOG_ERROR,
++				"Lock doesn't exist whilst unloading.  %sLOCK will fail.\n",
++				trylock ? "TRY" : "");
+ 			/* Don't bother */
+ 			AST_LIST_UNLOCK(&locklist);
+ 			return -1;
+@@ -249,7 +263,6 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
+ 			AST_LIST_UNLOCK(&locklist);
+ 			return -1;
+ 		}
+-		current->requesters = 0;
+ 		AST_LIST_INSERT_TAIL(&locklist, current, entries);
+ 	}
+ 	/* Add to requester list */
+@@ -268,7 +281,13 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
+ 
+ 	if (!clframe) {
+ 		if (unloading) {
++			ast_log(LOG_ERROR,
++				"Busy unloading.  %sLOCK will fail.\n",
++				trylock ? "TRY" : "");
+ 			/* Don't bother */
++			ast_mutex_lock(&current->mutex);
++			current->requesters--;
++			ast_mutex_unlock(&current->mutex);
+ 			AST_LIST_UNLOCK(list);
+ 			return -1;
+ 		}
+@@ -277,6 +296,9 @@ static int get_lock(struct ast_channel *chan, char *lockname, int trylock)
+ 			ast_log(LOG_ERROR,
+ 				"Unable to allocate channel lock frame.  %sLOCK will fail.\n",
+ 				trylock ? "TRY" : "");
++			ast_mutex_lock(&current->mutex);
++			current->requesters--;
++			ast_mutex_unlock(&current->mutex);
+ 			AST_LIST_UNLOCK(list);
+ 			return -1;
+ 		}
+@@ -409,6 +431,37 @@ static int trylock_read(struct ast_channel *chan, const char *cmd, char *data, c
+ 	return 0;
+ }
+ 
++static char *handle_cli_locks_show(struct ast_cli_entry *e, int cmd, struct ast_cli_args *a)
++{
++	int c = 0;
++	struct lock_frame* current;
++	switch (cmd) {
++	case CLI_INIT:
++		e->command = "locks show";
++		e->usage =
++			"Usage: locks show\n"
++			"       List all locks known to func_lock, along with their current status.\n";
++		return NULL;
++	case CLI_GENERATE:
++		return NULL;
++	}
++
++	ast_cli(a->fd, "func_lock locks:\n");
++	ast_cli(a->fd, "%-40s Requesters Owner\n", "Name");
++	AST_LIST_LOCK(&locklist);
++	AST_LIST_TRAVERSE(&locklist, current, entries) {
++		ast_mutex_lock(&current->mutex);
++		ast_cli(a->fd, "%-40s %-10d %s\n", current->name, current->requesters,
++				current->owner ? ast_channel_name(current->owner) : "(unlocked)");
++		ast_mutex_unlock(&current->mutex);
++		c++;
++	}
++	AST_LIST_UNLOCK(&locklist);
++	ast_cli(a->fd, "%d total locks listed.\n", c);
++
++	return 0;
++}
++
+ static struct ast_custom_function lock_function = {
+ 	.name = "LOCK",
+ 	.read = lock_read,
+@@ -427,6 +480,8 @@ static struct ast_custom_function unlock_function = {
+ 	.read_max = 2,
+ };
+ 
++static struct ast_cli_entry cli_locks_show = AST_CLI_DEFINE(handle_cli_locks_show, "List func_lock locks.");
++
+ static int unload_module(void)
+ {
+ 	struct lock_frame *current;
+@@ -439,10 +494,19 @@ static int unload_module(void)
+ 	ast_custom_function_unregister(&lock_function);
+ 	ast_custom_function_unregister(&trylock_function);
+ 
++	ast_cli_unregister(&cli_locks_show);
++
+ 	AST_LIST_LOCK(&locklist);
+-	AST_LIST_TRAVERSE(&locklist, current, entries) {
++	while ((current = AST_LIST_REMOVE_HEAD(&locklist, entries))) {
++		int warned = 0;
+ 		ast_mutex_lock(&current->mutex);
+ 		while (current->owner || current->requesters) {
++			if (!warned) {
++				ast_log(LOG_WARNING, "Waiting for %d requesters for %s lock %s.\n",
++						current->requesters, current->owner ? "locked" : "unlocked",
++						current->name);
++				warned = 1;
++			}
+ 			/* either the mutex is locked, or other parties are currently in get_lock,
+ 			 * we need to wait for all of those to clear first */
+ 			ast_cond_wait(&current->cond, &current->mutex);
+@@ -470,6 +534,7 @@ static int load_module(void)
+ 	int res = ast_custom_function_register_escalating(&lock_function, AST_CFE_READ);
+ 	res |= ast_custom_function_register_escalating(&trylock_function, AST_CFE_READ);
+ 	res |= ast_custom_function_register_escalating(&unlock_function, AST_CFE_READ);
++	res |= ast_cli_register(&cli_locks_show);
+ 
+ 	return res;
+ }


             reply	other threads:[~2021-05-22 15:26 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-22 15:26 Sam James [this message]
  -- strict thread matches above, loose matches on Subject: below --
2024-08-13 16:58 [gentoo-commits] repo/gentoo:master commit in: net-misc/asterisk/files/, net-misc/asterisk/ Joonas Niilola
2022-07-13  8:01 Sam James
2021-11-12  1:36 Sam James
2021-03-19  8:52 Joonas Niilola
2021-02-10 19:05 Sam James
2021-01-07 19:41 Andreas Sturmlechner
2020-03-20  9:10 Joonas Niilola

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1621697200.64969b9652f34bea460cddebde40c14569427052.sam@gentoo \
    --to=sam@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox