Commit 904cbab7 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Peter Zijlstra

sched: Make const-safe

With a modified container_of() that preserves constness, the compiler
finds some pointers which should have been marked as const.  task_of()
also needs to become const-preserving for the !FAIR_GROUP_SCHED case so
that cfs_rq_of() can take a const argument.  No change to generated code.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20221212144946.2657785-1-willy@infradead.org
parent b344b8f2
...@@ -152,7 +152,7 @@ __read_mostly int scheduler_running; ...@@ -152,7 +152,7 @@ __read_mostly int scheduler_running;
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */ /* kernel prio, less is more */
static inline int __task_prio(struct task_struct *p) static inline int __task_prio(const struct task_struct *p)
{ {
if (p->sched_class == &stop_sched_class) /* trumps deadline */ if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2; return -2;
...@@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p) ...@@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p)
*/ */
/* real prio, less is less */ /* real prio, less is less */
static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) static inline bool prio_less(const struct task_struct *a,
const struct task_struct *b, bool in_fi)
{ {
int pa = __task_prio(a), pb = __task_prio(b); int pa = __task_prio(a), pb = __task_prio(b);
...@@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool ...@@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool
return false; return false;
} }
static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b) static inline bool __sched_core_less(const struct task_struct *a,
const struct task_struct *b)
{ {
if (a->core_cookie < b->core_cookie) if (a->core_cookie < b->core_cookie)
return true; return true;
......
...@@ -468,7 +468,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse) ...@@ -468,7 +468,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse)
return NULL; return NULL;
} }
static inline struct sched_entity *parent_entity(struct sched_entity *se) static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{ {
return se->parent; return se->parent;
} }
...@@ -595,8 +595,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) ...@@ -595,8 +595,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime; return min_vruntime;
} }
static inline bool entity_before(struct sched_entity *a, static inline bool entity_before(const struct sched_entity *a,
struct sched_entity *b) const struct sched_entity *b)
{ {
return (s64)(a->vruntime - b->vruntime) < 0; return (s64)(a->vruntime - b->vruntime) < 0;
} }
...@@ -11852,7 +11852,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr) ...@@ -11852,7 +11852,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
/* /*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed. * se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/ */
static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle) static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
bool forceidle)
{ {
for_each_sched_entity(se) { for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
...@@ -11877,11 +11878,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi) ...@@ -11877,11 +11878,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
se_fi_update(se, rq->core->core_forceidle_seq, in_fi); se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
} }
bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool in_fi)
{ {
struct rq *rq = task_rq(a); struct rq *rq = task_rq(a);
struct sched_entity *sea = &a->se; const struct sched_entity *sea = &a->se;
struct sched_entity *seb = &b->se; const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa; struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb; struct cfs_rq *cfs_rqb;
s64 delta; s64 delta;
......
...@@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample) ...@@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample)
#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV) #define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{ {
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
return unlikely(dl_se->flags & SCHED_FLAG_SUGOV); return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
...@@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se) ...@@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
/* /*
* Tells if entity @a should preempt entity @b. * Tells if entity @a should preempt entity @b.
*/ */
static inline bool static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b) const struct sched_dl_entity *b)
{ {
return dl_entity_is_special(a) || return dl_entity_is_special(a) ||
dl_time_before(a->deadline, b->deadline); dl_time_before(a->deadline, b->deadline);
...@@ -1244,7 +1244,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq) ...@@ -1244,7 +1244,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock; return &rq->__lock;
} }
bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi); bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
bool fi);
/* /*
* Helpers to check if the CPU's core cookie matches with the task's cookie * Helpers to check if the CPU's core cookie matches with the task's cookie
...@@ -1423,7 +1424,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) ...@@ -1423,7 +1424,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
} }
/* runqueue on which this entity is (to be) queued */ /* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{ {
return se->cfs_rq; return se->cfs_rq;
} }
...@@ -1436,19 +1437,16 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) ...@@ -1436,19 +1437,16 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
#else #else
static inline struct task_struct *task_of(struct sched_entity *se) #define task_of(_se) container_of(_se, struct task_struct, se)
{
return container_of(se, struct task_struct, se);
}
static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
{ {
return &task_rq(p)->cfs; return &task_rq(p)->cfs;
} }
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{ {
struct task_struct *p = task_of(se); const struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p); struct rq *rq = task_rq(p);
return &rq->cfs; return &rq->cfs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment