Commit b0fb2938 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_cache_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 resource control updates from Borislav Petkov:
 "Avoid IPI-ing a task in certain cases and prevent load/store tearing
  when accessing a task's resctrl fields concurrently"

* tag 'x86_cache_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Apply READ_ONCE/WRITE_ONCE to task_struct.{rmid,closid}
  x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI
  x86/resctrl: Add printf attribute to log function
parents 0570b693 6d3b47dd
...@@ -56,19 +56,22 @@ static void __resctrl_sched_in(void) ...@@ -56,19 +56,22 @@ static void __resctrl_sched_in(void)
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid; u32 closid = state->default_closid;
u32 rmid = state->default_rmid; u32 rmid = state->default_rmid;
u32 tmp;
/* /*
* If this task has a closid/rmid assigned, use it. * If this task has a closid/rmid assigned, use it.
* Else use the closid/rmid assigned to this cpu. * Else use the closid/rmid assigned to this cpu.
*/ */
if (static_branch_likely(&rdt_alloc_enable_key)) { if (static_branch_likely(&rdt_alloc_enable_key)) {
if (current->closid) tmp = READ_ONCE(current->closid);
closid = current->closid; if (tmp)
closid = tmp;
} }
if (static_branch_likely(&rdt_mon_enable_key)) { if (static_branch_likely(&rdt_mon_enable_key)) {
if (current->rmid) tmp = READ_ONCE(current->rmid);
rmid = current->rmid; if (tmp)
rmid = tmp;
} }
if (closid != state->cur_closid || rmid != state->cur_rmid) { if (closid != state->cur_closid || rmid != state->cur_rmid) {
......
...@@ -572,6 +572,7 @@ union cpuid_0x10_x_edx { ...@@ -572,6 +572,7 @@ union cpuid_0x10_x_edx {
void rdt_last_cmd_clear(void); void rdt_last_cmd_clear(void);
void rdt_last_cmd_puts(const char *s); void rdt_last_cmd_puts(const char *s);
__printf(1, 2)
void rdt_last_cmd_printf(const char *fmt, ...); void rdt_last_cmd_printf(const char *fmt, ...);
void rdt_ctrl_update(void *arg); void rdt_ctrl_update(void *arg);
......
...@@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk, ...@@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/ */
if (rdtgrp->type == RDTCTRL_GROUP) { if (rdtgrp->type == RDTCTRL_GROUP) {
tsk->closid = rdtgrp->closid; WRITE_ONCE(tsk->closid, rdtgrp->closid);
tsk->rmid = rdtgrp->mon.rmid; WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else if (rdtgrp->type == RDTMON_GROUP) { } else if (rdtgrp->type == RDTMON_GROUP) {
if (rdtgrp->mon.parent->closid == tsk->closid) { if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk->rmid = rdtgrp->mon.rmid; WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
} else { } else {
rdt_last_cmd_puts("Can't move task to different control group\n"); rdt_last_cmd_puts("Can't move task to different control group\n");
return -EINVAL; return -EINVAL;
...@@ -2310,22 +2310,18 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, ...@@ -2310,22 +2310,18 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
for_each_process_thread(p, t) { for_each_process_thread(p, t) {
if (!from || is_closid_match(t, from) || if (!from || is_closid_match(t, from) ||
is_rmid_match(t, from)) { is_rmid_match(t, from)) {
t->closid = to->closid; WRITE_ONCE(t->closid, to->closid);
t->rmid = to->mon.rmid; WRITE_ONCE(t->rmid, to->mon.rmid);
#ifdef CONFIG_SMP
/* /*
* This is safe on x86 w/o barriers as the ordering * If the task is on a CPU, set the CPU in the mask.
* of writing to task_cpu() and t->on_cpu is * The detection is inaccurate as tasks might move or
* reverse to the reading here. The detection is * schedule before the smp function call takes place.
* inaccurate as tasks might move or schedule * In such a case the function call is pointless, but
* before the smp function call takes place. In
* such a case the function call is pointless, but
* there is no other side effect. * there is no other side effect.
*/ */
if (mask && t->on_cpu) if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
cpumask_set_cpu(task_cpu(t), mask); cpumask_set_cpu(task_cpu(t), mask);
#endif
} }
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment