public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.12 commit in: /
Date: Fri, 18 Jun 2021 12:21:01 +0000 (UTC)	[thread overview]
Message-ID: <1624018831.b221d7caaa2b0581ee90c76956e47540959508ca.mpagano@gentoo> (raw)

commit:     b221d7caaa2b0581ee90c76956e47540959508ca
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 18 12:20:31 2021 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 18 12:20:31 2021 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b221d7ca

Update BMQ to -r1 separate compilation fix

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 ...=> 5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch | 149 ++++++++-------------
 5022_BMQ-and-PDS-compilation-fix.patch             |  33 +++++
 2 files changed, 88 insertions(+), 94 deletions(-)

diff --git a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch b/5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch
similarity index 99%
rename from 5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
rename to 5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch
index 7e92738..1060af5 100644
--- a/5020_BMQ-and-PDS-io-scheduler-v5.12-r0.patch
+++ b/5020_BMQ-and-PDS-io-scheduler-v5.12-r1.patch
@@ -831,10 +831,10 @@ index 5fc9c9b70862..eb6d7d87779f 100644
  obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
 diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
 new file mode 100644
-index 000000000000..f69ed4d89395
+index 000000000000..c85e3ccf9302
 --- /dev/null
 +++ b/kernel/sched/alt_core.c
-@@ -0,0 +1,7149 @@
+@@ -0,0 +1,7138 @@
 +/*
 + *  kernel/sched/alt_core.c
 + *
@@ -889,7 +889,7 @@ index 000000000000..f69ed4d89395
 + */
 +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
 +
-+#define ALT_SCHED_VERSION "v5.11-r3"
++#define ALT_SCHED_VERSION "v5.12-r1"
 +
 +/* rt_prio(prio) defined in include/linux/sched/rt.h */
 +#define rt_task(p)		rt_prio((p)->prio)
@@ -1934,8 +1934,6 @@ index 000000000000..f69ed4d89395
 +}
 +
 +#define SCA_CHECK		0x01
-+#define SCA_MIGRATE_DISABLE	0x02
-+#define SCA_MIGRATE_ENABLE	0x04
 +
 +#ifdef CONFIG_SMP
 +
@@ -1975,23 +1973,31 @@ index 000000000000..f69ed4d89395
 +	__set_task_cpu(p, new_cpu);
 +}
 +
-+static inline bool is_per_cpu_kthread(struct task_struct *p)
-+{
-+	return ((p->flags & PF_KTHREAD) && (1 == p->nr_cpus_allowed));
-+}
-+
 +#define MDF_FORCE_ENABLED	0x80
 +
 +static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
-+
-+static int __set_cpus_allowed_ptr(struct task_struct *p,
-+				  const struct cpumask *new_mask,
-+				  u32 flags);
++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
++{
++	/*
++	 * This here violates the locking rules for affinity, since we're only
++	 * supposed to change these variables while holding both rq->lock and
++	 * p->pi_lock.
++	 *
++	 * HOWEVER, it magically works, because ttwu() is the only code that
++	 * accesses these variables under p->pi_lock and only does so after
++	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
++	 * before finish_task().
++	 *
++	 * XXX do further audits, this smells like something putrid.
++	 */
++	SCHED_WARN_ON(!p->on_cpu);
++	p->cpus_ptr = new_mask;
++}
 +
 +void migrate_disable(void)
 +{
 +	struct task_struct *p = current;
++	int cpu;
 +
 +	if (p->migration_disabled) {
 +		p->migration_disabled++;
@@ -1999,16 +2005,18 @@ index 000000000000..f69ed4d89395
 +	}
 +
 +	preempt_disable();
-+	this_rq()->nr_pinned++;
-+	p->migration_disabled = 1;
-+	p->migration_flags &= ~MDF_FORCE_ENABLED;
-+
-+	/*
-+	 * Violates locking rules! see comment in __do_set_cpus_allowed().
-+	 */
-+	if (p->cpus_ptr == &p->cpus_mask)
-+		__do_set_cpus_allowed(p, cpumask_of(smp_processor_id()), SCA_MIGRATE_DISABLE);
++	cpu = smp_processor_id();
++	if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
++		cpu_rq(cpu)->nr_pinned++;
++		p->migration_disabled = 1;
++		p->migration_flags &= ~MDF_FORCE_ENABLED;
 +
++		/*
++		 * Violates locking rules! see comment in __do_set_cpus_ptr().
++		 */
++		if (p->cpus_ptr == &p->cpus_mask)
++			__do_set_cpus_ptr(p, cpumask_of(cpu));
++	}
 +	preempt_enable();
 +}
 +EXPORT_SYMBOL_GPL(migrate_disable);
@@ -2035,7 +2043,7 @@ index 000000000000..f69ed4d89395
 +	 */
 +	WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
 +	if (p->cpus_ptr != &p->cpus_mask)
-+		__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++		__do_set_cpus_ptr(p, &p->cpus_mask);
 +	/*
 +	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
 +	 * regular cpus_mask, otherwise things that race (eg.
@@ -2188,43 +2196,22 @@ index 000000000000..f69ed4d89395
 +}
 +
 +static inline void
-+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
 +{
-+	if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
-+		p->cpus_ptr = new_mask;
-+		return;
-+	}
-+
 +	cpumask_copy(&p->cpus_mask, new_mask);
 +	p->nr_cpus_allowed = cpumask_weight(new_mask);
 +}
 +
 +static void
-+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 +{
-+	/*
-+	 * This here violates the locking rules for affinity, since we're only
-+	 * supposed to change these variables while holding both rq->lock and
-+	 * p->pi_lock.
-+	 *
-+	 * HOWEVER, it magically works, because ttwu() is the only code that
-+	 * accesses these variables under p->pi_lock and only does so after
-+	 * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
-+	 * before finish_task().
-+	 *
-+	 * XXX do further audits, this smells like something putrid.
-+	 */
-+	if (flags & (SCA_MIGRATE_DISABLE | SCA_MIGRATE_ENABLE))
-+		SCHED_WARN_ON(!p->on_cpu);
-+	else
-+		lockdep_assert_held(&p->pi_lock);
-+
-+	set_cpus_allowed_common(p, new_mask, flags);
++	lockdep_assert_held(&p->pi_lock);
++	set_cpus_allowed_common(p, new_mask);
 +}
 +
 +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 +{
-+	__do_set_cpus_allowed(p, new_mask, 0);
++	__do_set_cpus_allowed(p, new_mask);
 +}
 +
 +#endif
@@ -2469,7 +2456,7 @@ index 000000000000..f69ed4d89395
 +{
 +	cpumask_t chk_mask, tmp;
 +
-+	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_online_mask)))
++	if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask)))
 +		return select_fallback_rq(task_cpu(p), p);
 +
 +	if (
@@ -2583,15 +2570,15 @@ index 000000000000..f69ed4d89395
 +		goto out;
 +	}
 +
-+	__do_set_cpus_allowed(p, new_mask, flags);
++	__do_set_cpus_allowed(p, new_mask);
 +
 +	/* Can the task run on the task's current CPU? If so, we're done */
 +	if (cpumask_test_cpu(task_cpu(p), new_mask))
 +		goto out;
 +
 +	if (p->migration_disabled) {
-+		if (p->cpus_ptr != &p->cpus_mask)
-+			__do_set_cpus_allowed(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
++		if (likely(p->cpus_ptr != &p->cpus_mask))
++			__do_set_cpus_ptr(p, &p->cpus_mask);
 +		p->migration_disabled = 0;
 +		p->migration_flags |= MDF_FORCE_ENABLED;
 +		/* When p is migrate_disabled, rq->lock should be held */
@@ -4270,6 +4257,10 @@ index 000000000000..f69ed4d89395
 +	if (cpumask_empty(&sched_sg_idle_mask))
 +		return;
 +
++	/* exit when cpu is offline */
++	if (unlikely(!rq->online))
++		return;
++
 +	cpu = cpu_of(rq);
 +	/*
 +	 * Only cpu in slibing idle group will do the checking and then
@@ -4653,15 +4644,13 @@ index 000000000000..f69ed4d89395
 +
 +			if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
 +				src_rq->nr_running -= nr_migrated;
-+#ifdef CONFIG_SMP
 +				if (src_rq->nr_running < 2)
 +					cpumask_clear_cpu(i, &sched_rq_pending_mask);
-+#endif
++
 +				rq->nr_running += nr_migrated;
-+#ifdef CONFIG_SMP
 +				if (rq->nr_running > 1)
 +					cpumask_set_cpu(cpu, &sched_rq_pending_mask);
-+#endif
++
 +				update_sched_rq_watermark(rq);
 +				cpufreq_update_util(rq, 0);
 +
@@ -6921,7 +6910,7 @@ index 000000000000..f69ed4d89395
 +	 *
 +	 * And since this is boot we can forgo the serialisation.
 +	 */
-+	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
++	set_cpus_allowed_common(idle, cpumask_of(cpu));
 +#endif
 +
 +	/* Silence PROVE_RCU */
@@ -8943,7 +8932,7 @@ index 000000000000..7c71f1141d00
 +		boost_task(p);
 +}
 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
-index 50cbad89f7fa..fb703fd370fd 100644
+index 50cbad89f7fa..41946f19468b 100644
 --- a/kernel/sched/cpufreq_schedutil.c
 +++ b/kernel/sched/cpufreq_schedutil.c
 @@ -57,6 +57,13 @@ struct sugov_cpu {
@@ -9063,25 +9052,16 @@ index 50cbad89f7fa..fb703fd370fd 100644
  	if (ret) {
  		kthread_stop(thread);
  		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
-@@ -835,6 +903,7 @@ struct cpufreq_governor *cpufreq_default_governor(void)
- cpufreq_governor_init(schedutil_gov);
- 
+@@ -837,7 +905,9 @@ cpufreq_governor_init(schedutil_gov);
  #ifdef CONFIG_ENERGY_MODEL
-+#ifndef CONFIG_SCHED_ALT
  static void rebuild_sd_workfn(struct work_struct *work)
  {
++#ifndef CONFIG_SCHED_ALT
  	rebuild_sched_domains_energy();
-@@ -858,4 +927,10 @@ void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- 	}
- 
++#endif /* CONFIG_SCHED_ALT */
  }
-+#else /* CONFIG_SCHED_ALT */
-+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-+				  struct cpufreq_governor *old_gov)
-+{
-+}
-+#endif
- #endif
+ static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+ 
 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
 index 5f611658eeab..631276f56ba0 100644
 --- a/kernel/sched/cputime.c
@@ -9802,23 +9782,4 @@ index 73ef12092250..24bf8ef1249a 100644
 +#endif
  	};
  	struct wakeup_test_data *x = data;
-diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
-index bc722a476..26a33f76b 100644
---- a/kernel/sched/pelt.h
-+++ b/kernel/sched/pelt.h
-@@ -44,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
- 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
- }
  
-+#ifndef CONFIG_SCHED_ALT
- static inline void cfs_se_util_change(struct sched_avg *avg)
- {
- 	unsigned int enqueued;
-@@ -61,7 +62,6 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
- 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
- }
- 
--#ifndef CONFIG_SCHED_ALT
- /*
-  * The clock_pelt scales the time to reflect the effective amount of
-  * computation done during the running delta time but then sync back to

diff --git a/5022_BMQ-and-PDS-compilation-fix.patch b/5022_BMQ-and-PDS-compilation-fix.patch
new file mode 100644
index 0000000..f59ed5c
--- /dev/null
+++ b/5022_BMQ-and-PDS-compilation-fix.patch
@@ -0,0 +1,33 @@
+From b2dc217bab541a5e737b52137f1bcce0b1cc2ed5 Mon Sep 17 00:00:00 2001
+From: Piotr Gorski <lucjan.lucjanov@gmail.com>
+Date: Mon, 14 Jun 2021 15:46:03 +0200
+Subject: [PATCH] prjc: fix compilation error
+
+Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
+---
+ kernel/sched/pelt.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
+index bc722a476..26a33f76b 100644
+--- a/kernel/sched/pelt.h
++++ b/kernel/sched/pelt.h
+@@ -44,6 +44,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
+ 	return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ }
+ 
++#ifndef CONFIG_SCHED_ALT
+ static inline void cfs_se_util_change(struct sched_avg *avg)
+ {
+ 	unsigned int enqueued;
+@@ -61,7 +62,6 @@ static inline void cfs_se_util_change(struct sched_avg *avg)
+ 	WRITE_ONCE(avg->util_est.enqueued, enqueued);
+ }
+ 
+-#ifndef CONFIG_SCHED_ALT
+ /*
+  * The clock_pelt scales the time to reflect the effective amount of
+  * computation done during the running delta time but then sync back to
+-- 
+2.32.0
+


             reply	other threads:[~2021-06-18 12:21 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-18 12:21 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-07-20 15:49 [gentoo-commits] proj/linux-patches:5.12 commit in: / Alice Ferrazzi
2021-07-19 11:16 Mike Pagano
2021-07-14 16:18 Mike Pagano
2021-07-13 12:36 Mike Pagano
2021-07-11 14:42 Mike Pagano
2021-07-07 13:12 Mike Pagano
2021-07-04 15:43 Mike Pagano
2021-07-01 14:28 Mike Pagano
2021-06-30 14:22 Mike Pagano
2021-06-23 15:15 Mike Pagano
2021-06-18 12:00 Mike Pagano
2021-06-18 11:35 Mike Pagano
2021-06-16 12:25 Mike Pagano
2021-06-11 13:21 Mike Pagano
2021-06-10 12:14 Mike Pagano
2021-06-08 22:15 Mike Pagano
2021-06-08 16:48 Mike Pagano
2021-06-08 16:26 Mike Pagano
2021-06-03 10:22 Alice Ferrazzi
2021-05-28 12:17 Alice Ferrazzi
2021-05-26 12:08 Mike Pagano
2021-05-24 17:26 Mike Pagano
2021-05-22 16:50 Mike Pagano
2021-05-19 12:25 Mike Pagano
2021-05-14 14:02 Alice Ferrazzi
2021-05-12 12:30 Mike Pagano
2021-05-07 13:15 Alice Ferrazzi
2021-05-02 16:05 Mike Pagano
2021-04-30 18:53 Mike Pagano
2021-04-27 11:53 Mike Pagano
2021-04-18 22:03 Mike Pagano
2021-03-23 12:29 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1624018831.b221d7caaa2b0581ee90c76956e47540959508ca.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox