Commit a9fcf862 authored by Vikas Shivappa's avatar Vikas Shivappa Committed by Thomas Gleixner

x86/intel_rdt/cqm: Add cpus file support

The cpus file is extended to support resource monitoring. This is used
to over-ride the RMID of the default group when running on specific
CPUs. It works similar to the resource control. The "cpus" and
"cpus_list" file is present in default group, ctrl_mon groups and
monitor groups.

Each "cpus" file or cpu_list file reads a cpumask or list showing which
CPUs belong to the resource group. By default all online cpus belong to
the default root group. A CPU can be present in one "ctrl_mon" and one
"monitor" group simultaneously. They can be added to a resource group by
writing the CPU to the file. When a CPU is added to a ctrl_mon group it
is automatically removed from the previous ctrl_mon group. A CPU can be
added to a monitor group only if it is present in the parent ctrl_mon
group and when a CPU is added to a monitor group, it is automatically
removed from the previous monitor group. When CPUs go offline, they are
automatically removed from the ctrl_mon and monitor groups.
Signed-off-by: default avatarVikas Shivappa <vikas.shivappa@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: ravi.v.shankar@intel.com
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: peterz@infradead.org
Cc: eranian@google.com
Cc: vikas.shivappa@intel.com
Cc: ak@linux.intel.com
Cc: davidcc@google.com
Cc: reinette.chatre@intel.com
Link: http://lkml.kernel.org/r/1501017287-28083-18-git-send-email-vikas.shivappa@linux.intel.com
parent b09d981b
...@@ -181,15 +181,17 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, ...@@ -181,15 +181,17 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
/* /*
* This is safe against intel_rdt_sched_in() called from __switch_to() * This is safe against intel_rdt_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call * because __switch_to() is executed with interrupts disabled. A local call
* from update_closid() is proteced against __switch_to() because * from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled. * preemption is disabled.
*/ */
static void update_cpu_closid(void *info) static void update_cpu_closid_rmid(void *info)
{ {
struct rdtgroup *r = info; struct rdtgroup *r = info;
if (r) if (r) {
this_cpu_write(rdt_cpu_default.closid, r->closid); this_cpu_write(rdt_cpu_default.closid, r->closid);
this_cpu_write(rdt_cpu_default.rmid, r->mon.rmid);
}
/* /*
* We cannot unconditionally write the MSR because the current * We cannot unconditionally write the MSR because the current
...@@ -205,20 +207,72 @@ static void update_cpu_closid(void *info) ...@@ -205,20 +207,72 @@ static void update_cpu_closid(void *info)
* Per task closids/rmids must have been set up before calling this function. * Per task closids/rmids must have been set up before calling this function.
*/ */
static void static void
update_closid(const struct cpumask *cpu_mask, struct rdtgroup *r) update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
{ {
int cpu = get_cpu(); int cpu = get_cpu();
if (cpumask_test_cpu(cpu, cpu_mask)) if (cpumask_test_cpu(cpu, cpu_mask))
update_cpu_closid(r); update_cpu_closid_rmid(r);
smp_call_function_many(cpu_mask, update_cpu_closid, r, 1); smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
put_cpu(); put_cpu();
} }
static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
cpumask_var_t tmpmask) cpumask_var_t tmpmask)
{ {
struct rdtgroup *r; struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
struct list_head *head;
/* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask))
return -EINVAL;
/* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
if (cpumask_weight(tmpmask)) {
/* Give any dropped cpus to parent rdtgroup */
cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
update_closid_rmid(tmpmask, prgrp);
}
/*
* If we added cpus, remove them from previous group that owned them
* and update per-cpu rmid
*/
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
head = &prgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
if (crgrp == rdtgrp)
continue;
cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
tmpmask);
}
update_closid_rmid(tmpmask, rdtgrp);
}
/* Done pushing/pulling - update this group with new mask */
cpumask_copy(&rdtgrp->cpu_mask, newmask);
return 0;
}
static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
{
struct rdtgroup *crgrp;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
/* update the child mon group masks as well*/
list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
}
static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
{
struct rdtgroup *r, *crgrp;
struct list_head *head;
/* Check whether cpus are dropped from this group */ /* Check whether cpus are dropped from this group */
cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
...@@ -230,33 +284,47 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -230,33 +284,47 @@ static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Give any dropped cpus to rdtgroup_default */ /* Give any dropped cpus to rdtgroup_default */
cpumask_or(&rdtgroup_default.cpu_mask, cpumask_or(&rdtgroup_default.cpu_mask,
&rdtgroup_default.cpu_mask, tmpmask); &rdtgroup_default.cpu_mask, tmpmask);
update_closid(tmpmask, &rdtgroup_default); update_closid_rmid(tmpmask, &rdtgroup_default);
} }
/* /*
* If we added cpus, remove them from previous group that owned them * If we added cpus, remove them from previous group and
* and update per-cpu closid * the prev group's child groups that owned them
* and update per-cpu closid/rmid.
*/ */
cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
if (cpumask_weight(tmpmask)) { if (cpumask_weight(tmpmask)) {
list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
if (r == rdtgrp) if (r == rdtgrp)
continue; continue;
cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask); cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
if (cpumask_weight(tmpmask1))
cpumask_rdtgrp_clear(r, tmpmask1);
} }
update_closid(tmpmask, rdtgrp); update_closid_rmid(tmpmask, rdtgrp);
} }
/* Done pushing/pulling - update this group with new mask */ /* Done pushing/pulling - update this group with new mask */
cpumask_copy(&rdtgrp->cpu_mask, newmask); cpumask_copy(&rdtgrp->cpu_mask, newmask);
/*
* Clear child mon group masks since there is a new parent mask
* now and update the rmid for the cpus the child lost.
*/
head = &rdtgrp->mon.crdtgrp_list;
list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
update_closid_rmid(tmpmask, rdtgrp);
cpumask_clear(&crgrp->cpu_mask);
}
return 0; return 0;
} }
static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off) char *buf, size_t nbytes, loff_t off)
{ {
cpumask_var_t tmpmask, newmask; cpumask_var_t tmpmask, newmask, tmpmask1;
struct rdtgroup *rdtgrp; struct rdtgroup *rdtgrp;
int ret; int ret;
...@@ -269,6 +337,11 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -269,6 +337,11 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
free_cpumask_var(tmpmask); free_cpumask_var(tmpmask);
return -ENOMEM; return -ENOMEM;
} }
if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
free_cpumask_var(tmpmask);
free_cpumask_var(newmask);
return -ENOMEM;
}
rdtgrp = rdtgroup_kn_lock_live(of->kn); rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) { if (!rdtgrp) {
...@@ -292,7 +365,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -292,7 +365,9 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
} }
if (rdtgrp->type == RDTCTRL_GROUP) if (rdtgrp->type == RDTCTRL_GROUP)
ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask); ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
else if (rdtgrp->type == RDTMON_GROUP)
ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
else else
ret = -EINVAL; ret = -EINVAL;
...@@ -300,6 +375,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -300,6 +375,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
free_cpumask_var(tmpmask); free_cpumask_var(tmpmask);
free_cpumask_var(newmask); free_cpumask_var(newmask);
free_cpumask_var(tmpmask1);
return ret ?: nbytes; return ret ?: nbytes;
} }
...@@ -1113,7 +1189,7 @@ static void rmdir_all_sub(void) ...@@ -1113,7 +1189,7 @@ static void rmdir_all_sub(void)
} }
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
get_online_cpus(); get_online_cpus();
update_closid(cpu_online_mask, &rdtgroup_default); update_closid_rmid(cpu_online_mask, &rdtgroup_default);
put_online_cpus(); put_online_cpus();
kernfs_remove(kn_info); kernfs_remove(kn_info);
...@@ -1374,7 +1450,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn) ...@@ -1374,7 +1450,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
* task running on them. * task running on them.
*/ */
cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
update_closid(tmpmask, NULL); update_closid_rmid(tmpmask, NULL);
rdtgrp->flags = RDT_DELETED; rdtgrp->flags = RDT_DELETED;
closid_free(rdtgrp->closid); closid_free(rdtgrp->closid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment