Commit 713a2e21 authored by Waiman Long's avatar Waiman Long Committed by Peter Zijlstra

sched: Introduce affinity_context

In order to prepare for passing through additional data through the
affinity call-chains, convert the mask and flags argument into a
structure.
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220922180041.1768141-5-longman@redhat.com
parent 5584e8ac
This diff is collapsed.
...@@ -2485,8 +2485,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p) ...@@ -2485,8 +2485,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
} }
static void set_cpus_allowed_dl(struct task_struct *p, static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask, struct affinity_context *ctx)
u32 flags)
{ {
struct root_domain *src_rd; struct root_domain *src_rd;
struct rq *rq; struct rq *rq;
...@@ -2501,7 +2500,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, ...@@ -2501,7 +2500,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
* update. We already made space for us in the destination * update. We already made space for us in the destination
* domain (see cpuset_can_attach()). * domain (see cpuset_can_attach()).
*/ */
if (!cpumask_intersects(src_rd->span, new_mask)) { if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
struct dl_bw *src_dl_b; struct dl_bw *src_dl_b;
src_dl_b = dl_bw_of(cpu_of(rq)); src_dl_b = dl_bw_of(cpu_of(rq));
...@@ -2515,7 +2514,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, ...@@ -2515,7 +2514,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
raw_spin_unlock(&src_dl_b->lock); raw_spin_unlock(&src_dl_b->lock);
} }
set_cpus_allowed_common(p, new_mask, flags); set_cpus_allowed_common(p, ctx);
} }
/* Assumes rq->lock is held */ /* Assumes rq->lock is held */
......
...@@ -2145,6 +2145,11 @@ extern const u32 sched_prio_to_wmult[40]; ...@@ -2145,6 +2145,11 @@ extern const u32 sched_prio_to_wmult[40];
#define RETRY_TASK ((void *)-1UL) #define RETRY_TASK ((void *)-1UL)
struct affinity_context {
const struct cpumask *new_mask;
unsigned int flags;
};
struct sched_class { struct sched_class {
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
...@@ -2173,9 +2178,7 @@ struct sched_class { ...@@ -2173,9 +2178,7 @@ struct sched_class {
void (*task_woken)(struct rq *this_rq, struct task_struct *task); void (*task_woken)(struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p, void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);
const struct cpumask *newmask,
u32 flags);
void (*rq_online)(struct rq *rq); void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq); void (*rq_offline)(struct rq *rq);
...@@ -2286,7 +2289,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu); ...@@ -2286,7 +2289,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);
extern void trigger_load_balance(struct rq *rq); extern void trigger_load_balance(struct rq *rq);
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags); extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);
static inline struct task_struct *get_push_task(struct rq *rq) static inline struct task_struct *get_push_task(struct rq *rq)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment