* [gentoo-commits] repo/gentoo:master commit in: media-video/pipewire/files/1.2.0/, media-video/pipewire/
@ 2024-07-06 5:41 Sam James
0 siblings, 0 replies; only message in thread
From: Sam James @ 2024-07-06 5:41 UTC (permalink / raw
To: gentoo-commits
commit: a58e3aa65a34cf2f8be43eaeb9c1414308b1a347
Author: Sam James <sam <AT> gentoo <DOT> org>
AuthorDate: Sat Jul 6 05:40:51 2024 +0000
Commit: Sam James <sam <AT> gentoo <DOT> org>
CommitDate: Sat Jul 6 05:41:12 2024 +0000
URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=a58e3aa6
media-video/pipewire: backport recommended 1.2.x fixes
Upstream recommend:
"""
b8d07e40 fix KODI no sound
e6c0014f fix ardour export
525360d7 fix plasma thumbnail
"""
Take some other fixes on the 1.2 branch from 1.2.0..HEAD too.
Signed-off-by: Sam James <sam <AT> gentoo.org>
...fix-required-state-for-async-driver-nodes.patch | 82 ++++++++++++
...odule-raop-only-set-softVolume-when-valid.patch | 30 +++++
...-node-collect-with-groups-and-sync-enable.patch | 149 +++++++++++++++++++++
...-impl-node-disable-async-for-driver-nodes.patch | 112 ++++++++++++++++
...05-impl-node-set-INACTIVE-state-on-server.patch | 30 +++++
...re-1.2.0-r1.ebuild => pipewire-1.2.0-r2.ebuild} | 0
6 files changed, 403 insertions(+)
diff --git a/media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch b/media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch
new file mode 100644
index 000000000000..105c8dd1676a
--- /dev/null
+++ b/media-video/pipewire/files/1.2.0/0001-impl-node-fix-required-state-for-async-driver-nodes.patch
@@ -0,0 +1,82 @@
+From b8d07e40d66f12ac28aab710cfeb181bf25bc59a Mon Sep 17 00:00:00 2001
+From: Wim Taymans <wtaymans@redhat.com>
+Date: Mon, 1 Jul 2024 10:36:09 +0200
+Subject: [PATCH 1/5] impl-node: fix required state for async driver nodes
+
+When the node activation.required was incremented because it was a
+driver, only decrement it in that case, regardless of the current driver
+state of the node.
+
+This fixes the case of KODI where the required field gets out of sync
+and things become unschedulable.
+
+Fixes #4087
+---
+ src/pipewire/impl-node.c | 22 ++++++++++++++--------
+ src/pipewire/private.h | 1 +
+ 2 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c
+index 12629ee64..4def52897 100644
+--- a/src/pipewire/impl-node.c
++++ b/src/pipewire/impl-node.c
+@@ -112,13 +112,17 @@ static inline void activate_target(struct pw_impl_node *node, struct pw_node_tar
+ {
+ struct pw_node_activation_state *state = &t->activation->state[0];
+ if (!t->active) {
+- if ((!node->async || node->driving) && !node->exported) {
+- SPA_ATOMIC_INC(state->required);
+- SPA_ATOMIC_INC(state->pending);
++ if (!node->async || node->driving) {
++ if (!node->exported) {
++ SPA_ATOMIC_INC(state->required);
++ SPA_ATOMIC_INC(state->pending);
++ }
+ }
++ t->active_driving = node->driving;
+ t->active = true;
+- pw_log_debug("%p: target state:%p id:%d pending:%d/%d",
+- node, state, t->id, state->pending, state->required);
++ pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d",
++ node, state, t->id, state->pending, state->required,
++ node->async, node->driving, node->exported);
+ }
+ }
+
+@@ -126,7 +130,7 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t
+ {
+ if (t->active) {
+ struct pw_node_activation_state *state = &t->activation->state[0];
+- if (!node->async || node->driving) {
++ if (!node->async || t->active_driving) {
+ /* the driver copies the required to the pending state
+ * so first try to resume the node and then decrement the
+ * required state. This way we either resume with the old value
+@@ -137,8 +141,10 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t
+ SPA_ATOMIC_DEC(state->required);
+ }
+ t->active = false;
+- pw_log_debug("%p: target state:%p id:%d pending:%d/%d trigger:%"PRIu64,
+- node, state, t->id, state->pending, state->required, trigger);
++ t->active_driving = false;
++ pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d trigger:%"PRIu64,
++ node, state, t->id, state->pending, state->required,
++ node->async, node->driving, node->exported, trigger);
+ }
+ }
+
+diff --git a/src/pipewire/private.h b/src/pipewire/private.h
+index 8c01fe8d5..25af677ac 100644
+--- a/src/pipewire/private.h
++++ b/src/pipewire/private.h
+@@ -541,6 +541,7 @@ struct pw_node_target {
+ int fd;
+ void (*trigger)(struct pw_node_target *t, uint64_t nsec);
+ unsigned int active:1;
++ unsigned int active_driving:1;
+ unsigned int added:1;
+ };
+
+--
+2.45.2
+
diff --git a/media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch b/media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch
new file mode 100644
index 000000000000..343b42dfdc8a
--- /dev/null
+++ b/media-video/pipewire/files/1.2.0/0002-module-raop-only-set-softVolume-when-valid.patch
@@ -0,0 +1,30 @@
+From 82b9fa118f2fa009b5eb2891378fe003e2573bbe Mon Sep 17 00:00:00 2001
+From: Wim Taymans <wtaymans@redhat.com>
+Date: Mon, 1 Jul 2024 11:27:17 +0200
+Subject: [PATCH 2/5] module-raop: only set softVolume when valid
+
+---
+ src/modules/module-raop-sink.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/modules/module-raop-sink.c b/src/modules/module-raop-sink.c
+index 05e467d24..8ad28693f 100644
+--- a/src/modules/module-raop-sink.c
++++ b/src/modules/module-raop-sink.c
+@@ -1643,10 +1643,10 @@ static void stream_props_changed(struct impl *impl, uint32_t id, const struct sp
+ impl->volume = volume;
+
+ rtsp_send_volume(impl);
++ spa_pod_builder_prop(&b, SPA_PROP_softVolumes, 0);
++ spa_pod_builder_array(&b, sizeof(float), SPA_TYPE_Float,
++ n_vols, soft_vols);
+ }
+- spa_pod_builder_prop(&b, SPA_PROP_softVolumes, 0);
+- spa_pod_builder_array(&b, sizeof(float), SPA_TYPE_Float,
+- n_vols, soft_vols);
+ spa_pod_builder_raw_padded(&b, prop, SPA_POD_PROP_SIZE(prop));
+ break;
+ }
+--
+2.45.2
+
diff --git a/media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch b/media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch
new file mode 100644
index 000000000000..5e18550bf193
--- /dev/null
+++ b/media-video/pipewire/files/1.2.0/0003-context-Fix-node-collect-with-groups-and-sync-enable.patch
@@ -0,0 +1,149 @@
+From e6c0014f94e995e49b72bea7ae56b960416e6b29 Mon Sep 17 00:00:00 2001
+From: Wim Taymans <wtaymans@redhat.com>
+Date: Mon, 1 Jul 2024 14:50:34 +0200
+Subject: [PATCH 3/5] context: Fix node collect with groups and sync enabled
+
+Keep track of the sync nodes we added to a driver and bring in the other
+nodes from the same sync group, group or link groups. This makes it
+possible to have disjoint sync groups each with their own driver.
+
+Fixes export in ardour8
+
+Fixes #4083
+---
+ src/pipewire/context.c | 49 +++++++++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 24 deletions(-)
+
+diff --git a/src/pipewire/context.c b/src/pipewire/context.c
+index 686dd5eee..f3e1b4d76 100644
+--- a/src/pipewire/context.c
++++ b/src/pipewire/context.c
+@@ -1163,13 +1163,14 @@ static inline int run_nodes(struct pw_context *context, struct pw_impl_node *nod
+ * This ensures that we only activate the paths from the runnable nodes to the
+ * driver nodes and leave the other nodes idle.
+ */
+-static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, struct spa_list *collect,
+- char **sync)
++static int collect_nodes(struct pw_context *context, struct pw_impl_node *node, struct spa_list *collect)
+ {
+ struct spa_list queue;
+ struct pw_impl_node *n, *t;
+ struct pw_impl_port *p;
+ struct pw_impl_link *l;
++ uint32_t n_sync;
++ char *sync[MAX_SYNC+1];
+
+ pw_log_debug("node %p: '%s'", node, node->name);
+
+@@ -1178,20 +1179,30 @@ static int collect_nodes(struct pw_context *context, struct pw_impl_node *node,
+ spa_list_append(&queue, &node->sort_link);
+ node->visited = true;
+
++ n_sync = 0;
++ sync[0] = NULL;
++
+ /* now follow all the links from the nodes in the queue
+ * and add the peers to the queue. */
+ spa_list_consume(n, &queue, sort_link) {
+ spa_list_remove(&n->sort_link);
+ spa_list_append(collect, &n->sort_link);
+
+- pw_log_debug(" next node %p: '%s' runnable:%u", n, n->name, n->runnable);
++ pw_log_debug(" next node %p: '%s' runnable:%u active:%d",
++ n, n->name, n->runnable, n->active);
+
+ if (!n->active)
+ continue;
+
+- if (sync[0] != NULL) {
+- if (pw_strv_find_common(n->sync_groups, sync) < 0)
+- continue;
++ if (n->sync) {
++ for (uint32_t i = 0; n->sync_groups[i]; i++) {
++ if (n_sync >= MAX_SYNC)
++ break;
++ if (pw_strv_find(sync, n->sync_groups[i]) >= 0)
++ continue;
++ sync[n_sync++] = n->sync_groups[i];
++ sync[n_sync] = NULL;
++ }
+ }
+
+ spa_list_for_each(p, &n->input_ports, link) {
+@@ -1242,6 +1253,8 @@ static int collect_nodes(struct pw_context *context, struct pw_impl_node *node,
+ spa_list_for_each(t, &context->node_list, link) {
+ if (t->exported || !t->active || t->visited)
+ continue;
++ /* the other node will be scheduled with this one if it's in
++ * the same group or link group */
+ if (pw_strv_find_common(t->groups, n->groups) < 0 &&
+ pw_strv_find_common(t->link_groups, n->link_groups) < 0 &&
+ pw_strv_find_common(t->sync_groups, sync) < 0)
+@@ -1253,7 +1266,8 @@ static int collect_nodes(struct pw_context *context, struct pw_impl_node *node,
+ spa_list_append(&queue, &t->sort_link);
+ }
+ }
+- pw_log_debug(" next node %p: '%s' runnable:%u", n, n->name, n->runnable);
++ pw_log_debug(" next node %p: '%s' runnable:%u %p %p %p", n, n->name, n->runnable,
++ n->groups, n->link_groups, sync);
+ }
+ spa_list_for_each(n, collect, sort_link)
+ if (!n->driving && n->runnable) {
+@@ -1497,10 +1511,9 @@ int pw_context_recalc_graph(struct pw_context *context, const char *reason)
+ struct pw_impl_node *n, *s, *target, *fallback;
+ const uint32_t *rates;
+ uint32_t max_quantum, min_quantum, def_quantum, rate_quantum, floor_quantum, ceil_quantum;
+- uint32_t n_rates, def_rate, n_sync;
++ uint32_t n_rates, def_rate;
+ bool freewheel, global_force_rate, global_force_quantum, transport_start;
+ struct spa_list collect;
+- char *sync[MAX_SYNC+1];
+
+ pw_log_info("%p: busy:%d reason:%s", context, impl->recalc, reason);
+
+@@ -1514,23 +1527,11 @@ again:
+ freewheel = false;
+ transport_start = false;
+
+- /* clean up the flags first and collect sync */
+- n_sync = 0;
+- sync[0] = NULL;
++ /* clean up the flags first */
+ spa_list_for_each(n, &context->node_list, link) {
+ n->visited = false;
+ n->checked = 0;
+ n->runnable = n->always_process && n->active;
+- if (n->sync) {
+- for (uint32_t i = 0; n->sync_groups[i]; i++) {
+- if (n_sync >= MAX_SYNC)
+- break;
+- if (pw_strv_find(sync, n->sync_groups[i]) >= 0)
+- continue;
+- sync[n_sync++] = n->sync_groups[i];
+- sync[n_sync] = NULL;
+- }
+- }
+ }
+
+ get_quantums(context, &def_quantum, &min_quantum, &max_quantum, &rate_quantum,
+@@ -1551,7 +1552,7 @@ again:
+
+ if (!n->visited) {
+ spa_list_init(&collect);
+- collect_nodes(context, n, &collect, sync);
++ collect_nodes(context, n, &collect);
+ move_to_driver(context, &collect, n);
+ }
+ /* from now on we are only interested in active driving nodes
+@@ -1605,7 +1606,7 @@ again:
+
+ /* collect all nodes in this group */
+ spa_list_init(&collect);
+- collect_nodes(context, n, &collect, sync);
++ collect_nodes(context, n, &collect);
+
+ driver = NULL;
+ spa_list_for_each(t, &collect, sort_link) {
+--
+2.45.2
+
diff --git a/media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch b/media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch
new file mode 100644
index 000000000000..a7528249f1e3
--- /dev/null
+++ b/media-video/pipewire/files/1.2.0/0004-impl-node-disable-async-for-driver-nodes.patch
@@ -0,0 +1,112 @@
+From 525360d70ab1698afaaaf20f7e58002b8756353f Mon Sep 17 00:00:00 2001
+From: Wim Taymans <wtaymans@redhat.com>
+Date: Wed, 3 Jul 2024 13:31:24 +0200
+Subject: [PATCH 4/5] impl-node: disable async for driver nodes
+
+Make it so that a driver node can never be scheduled async. It could
+possibly make sense when the driver node is not currently driving the
+graph but when it drives the graph it always needs to be sync. This
+also simplifies the target activation because we can simply check the
+async state and ignore if the node is driving or not.
+
+Also make sure that we never make an async link with a driver output port.
+This does not make sense because the driver node will always be
+triggered sync first and before the async node so we can simply make
+a sync link.
+
+This fixes the modified (only generate 1 buffer) video-src -> video-play
+case where the buffer never arrives in video-play because of the
+useless async link.
+
+Fixes #4092
+---
+ src/pipewire/impl-link.c | 8 +++++---
+ src/pipewire/impl-node.c | 7 +++----
+ src/pipewire/private.h | 1 -
+ 3 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/src/pipewire/impl-link.c b/src/pipewire/impl-link.c
+index 39e9bd17d..6dc2e1a59 100644
+--- a/src/pipewire/impl-link.c
++++ b/src/pipewire/impl-link.c
+@@ -1321,7 +1321,8 @@ struct pw_impl_link *pw_context_create_link(struct pw_context *context,
+ if (this->passive && str == NULL)
+ pw_properties_set(properties, PW_KEY_LINK_PASSIVE, "true");
+
+- impl->async = (output_node->async || input_node->async) &&
++ impl->async = !output_node->driver &&
++ (output_node->async || input_node->async) &&
+ SPA_FLAG_IS_SET(output->flags, PW_IMPL_PORT_FLAG_ASYNC) &&
+ SPA_FLAG_IS_SET(input->flags, PW_IMPL_PORT_FLAG_ASYNC);
+
+@@ -1375,8 +1376,9 @@ struct pw_impl_link *pw_context_create_link(struct pw_context *context,
+ this->name = spa_aprintf("%d.%d.%d -> %d.%d.%d",
+ output_node->info.id, output->port_id, this->rt.out_mix.port.port_id,
+ input_node->info.id, input->port_id, this->rt.in_mix.port.port_id);
+- pw_log_info("(%s) (%s) -> (%s) async:%04x:%04x:%d", this->name, output_node->name,
+- input_node->name, output->flags, input->flags, impl->async);
++ pw_log_info("(%s) (%s) -> (%s) async:%d:%04x:%04x:%d", this->name, output_node->name,
++ input_node->name, output_node->driving,
++ output->flags, input->flags, impl->async);
+
+ pw_impl_port_emit_link_added(output, this);
+ pw_impl_port_emit_link_added(input, this);
+diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c
+index 4def52897..c75e5793e 100644
+--- a/src/pipewire/impl-node.c
++++ b/src/pipewire/impl-node.c
+@@ -112,13 +112,12 @@ static inline void activate_target(struct pw_impl_node *node, struct pw_node_tar
+ {
+ struct pw_node_activation_state *state = &t->activation->state[0];
+ if (!t->active) {
+- if (!node->async || node->driving) {
++ if (!node->async) {
+ if (!node->exported) {
+ SPA_ATOMIC_INC(state->required);
+ SPA_ATOMIC_INC(state->pending);
+ }
+ }
+- t->active_driving = node->driving;
+ t->active = true;
+ pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d",
+ node, state, t->id, state->pending, state->required,
+@@ -130,7 +129,7 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t
+ {
+ if (t->active) {
+ struct pw_node_activation_state *state = &t->activation->state[0];
+- if (!node->async || t->active_driving) {
++ if (!node->async) {
+ /* the driver copies the required to the pending state
+ * so first try to resume the node and then decrement the
+ * required state. This way we either resume with the old value
+@@ -141,7 +140,6 @@ static inline void deactivate_target(struct pw_impl_node *node, struct pw_node_t
+ SPA_ATOMIC_DEC(state->required);
+ }
+ t->active = false;
+- t->active_driving = false;
+ pw_log_debug("%p: target state:%p id:%d pending:%d/%d %d:%d:%d trigger:%"PRIu64,
+ node, state, t->id, state->pending, state->required,
+ node->async, node->driving, node->exported, trigger);
+@@ -1202,6 +1200,7 @@ static void check_properties(struct pw_impl_node *node)
+ recalc_reason = "transport changed";
+ }
+ async = pw_properties_get_bool(node->properties, PW_KEY_NODE_ASYNC, false);
++ async &= !node->driver;
+ if (async != node->async) {
+ pw_log_info("%p: async %d -> %d", node, node->async, async);
+ node->async = async;
+diff --git a/src/pipewire/private.h b/src/pipewire/private.h
+index 25af677ac..8c01fe8d5 100644
+--- a/src/pipewire/private.h
++++ b/src/pipewire/private.h
+@@ -541,7 +541,6 @@ struct pw_node_target {
+ int fd;
+ void (*trigger)(struct pw_node_target *t, uint64_t nsec);
+ unsigned int active:1;
+- unsigned int active_driving:1;
+ unsigned int added:1;
+ };
+
+--
+2.45.2
+
diff --git a/media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch b/media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch
new file mode 100644
index 000000000000..9821c151c97b
--- /dev/null
+++ b/media-video/pipewire/files/1.2.0/0005-impl-node-set-INACTIVE-state-on-server.patch
@@ -0,0 +1,30 @@
+From d08df293a95ce976df1cc8c3ec367a8d5d84db35 Mon Sep 17 00:00:00 2001
+From: Wim Taymans <wtaymans@redhat.com>
+Date: Wed, 3 Jul 2024 17:42:39 +0200
+Subject: [PATCH 5/5] impl-node: set INACTIVE state on server
+
+Don't wait for the client to set the INACTIVE state, do it on the
+server. We already decremented the target required so we don't want to
+schedule the node anymore.
+
+Fixes some xruns when removing nodes in a stress test.
+---
+ src/pipewire/impl-node.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/pipewire/impl-node.c b/src/pipewire/impl-node.c
+index c75e5793e..be25aa83e 100644
+--- a/src/pipewire/impl-node.c
++++ b/src/pipewire/impl-node.c
+@@ -221,7 +221,7 @@ do_node_unprepare(struct spa_loop *loop, bool async, uint32_t seq,
+ if (!this->rt.prepared)
+ return 0;
+
+- if (!this->remote || this->rt.target.activation->client_version < 1) {
++ if (!this->exported) {
+ /* We mark ourself as finished now, this will avoid going further into the process loop
+ * in case our fd was ready (removing ourselfs from the loop should avoid that as well).
+ * If we were supposed to be scheduled make sure we continue the graph for the peers we
+--
+2.45.2
+
diff --git a/media-video/pipewire/pipewire-1.2.0-r1.ebuild b/media-video/pipewire/pipewire-1.2.0-r2.ebuild
similarity index 100%
rename from media-video/pipewire/pipewire-1.2.0-r1.ebuild
rename to media-video/pipewire/pipewire-1.2.0-r2.ebuild
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2024-07-06 5:41 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-07-06 5:41 [gentoo-commits] repo/gentoo:master commit in: media-video/pipewire/files/1.2.0/, media-video/pipewire/ Sam James
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox