Commit a52fb43a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cache control updates from Borislav Petkov:

 - The generalization of the RDT code to accommodate the addition of
   AMD's very similar implementation of the cache monitoring feature.

   This entails a subsystem move into a separate and generic
   arch/x86/kernel/cpu/resctrl/ directory along with adding
   vendor-specific initialization and feature detection helpers.

   Ontop of that is the unification of user-visible strings, both in the
   resctrl filesystem error handling and Kconfig.

   Provided by Babu Moger and Sherry Hurwitz.

 - Code simplifications and error handling improvements by Reinette
   Chatre.

* 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Fix rdt_find_domain() return value and checks
  x86/resctrl: Remove unnecessary check for cbm_validate()
  x86/resctrl: Use rdt_last_cmd_puts() where possible
  MAINTAINERS: Update resctrl filename patterns
  Documentation: Rename and update intel_rdt_ui.txt to resctrl_ui.txt
  x86/resctrl: Introduce AMD QOS feature
  x86/resctrl: Fixup the user-visible strings
  x86/resctrl: Add AMD's X86_FEATURE_MBA to the scattered CPUID features
  x86/resctrl: Rename the config option INTEL_RDT to RESCTRL
  x86/resctrl: Add vendor check for the MBA software controller
  x86/resctrl: Bring cbm_validate() into the resource structure
  x86/resctrl: Initialize the vendor-specific resource functions
  x86/resctrl: Move all the macros to resctrl/internal.h
  x86/resctrl: Re-arrange the RDT init code
  x86/resctrl: Rename the RDT functions and definitions
  x86/resctrl: Rename and move rdt files to a separate directory
parents 42b00f12 52eb7433
User Interface for Resource Allocation in Intel Resource Director Technology User Interface for Resource Control feature
Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
Copyright (C) 2016 Intel Corporation Copyright (C) 2016 Intel Corporation
...@@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com> ...@@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com> Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com> Vikas Shivappa <vikas.shivappa@intel.com>
This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
X86 /proc/cpuinfo flag bits: flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a" RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2" CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
......
...@@ -12717,9 +12717,9 @@ M: Fenghua Yu <fenghua.yu@intel.com> ...@@ -12717,9 +12717,9 @@ M: Fenghua Yu <fenghua.yu@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com> M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported
F: arch/x86/kernel/cpu/intel_rdt* F: arch/x86/kernel/cpu/resctrl/
F: arch/x86/include/asm/intel_rdt_sched.h F: arch/x86/include/asm/resctrl_sched.h
F: Documentation/x86/intel_rdt* F: Documentation/x86/resctrl*
READ-COPY UPDATE (RCU) READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
......
...@@ -444,15 +444,23 @@ config RETPOLINE ...@@ -444,15 +444,23 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower. support for full protection. The kernel may run slower.
config INTEL_RDT config RESCTRL
bool "Intel Resource Director Technology support" bool "Resource Control support"
depends on X86 && CPU_SUP_INTEL depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS select KERNFS
help help
Select to enable resource allocation and monitoring which are Enable Resource Control support.
sub-features of Intel Resource Director Technology(RDT). More
information about RDT can be found in the Intel x86 Provide support for the allocation and monitoring of system resources
Architecture Software Developer Manual. usage by the CPU.
Intel calls this Intel Resource Director Technology
(Intel(R) RDT). More information about RDT can be found in the
Intel x86 Architecture Software Developer Manual.
AMD calls this AMD Platform Quality of Service (AMD QoS).
More information about AMD QoS can be found in the AMD64 Technology
Platform Quality of Service Extensions manual.
Say N if unsure. Say N if unsure.
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_SCHED_H #ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_INTEL_RDT_SCHED_H #define _ASM_X86_RESCTRL_SCHED_H
#ifdef CONFIG_INTEL_RDT #ifdef CONFIG_RESCTRL
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define IA32_PQR_ASSOC 0x0c8f #define IA32_PQR_ASSOC 0x0c8f
/** /**
* struct intel_pqr_state - State cache for the PQR MSR * struct resctrl_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID * @cur_rmid: The cached Resource Monitoring ID
* @cur_closid: The cached Class Of Service ID * @cur_closid: The cached Class Of Service ID
* @default_rmid: The user assigned Resource Monitoring ID * @default_rmid: The user assigned Resource Monitoring ID
...@@ -24,21 +24,21 @@ ...@@ -24,21 +24,21 @@
* The cache also helps to avoid pointless updates if the value does * The cache also helps to avoid pointless updates if the value does
* not change. * not change.
*/ */
struct intel_pqr_state { struct resctrl_pqr_state {
u32 cur_rmid; u32 cur_rmid;
u32 cur_closid; u32 cur_closid;
u32 default_rmid; u32 default_rmid;
u32 default_closid; u32 default_closid;
}; };
DECLARE_PER_CPU(struct intel_pqr_state, pqr_state); DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
/* /*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
* *
* Following considerations are made so that this has minimal impact * Following considerations are made so that this has minimal impact
* on scheduler hot path: * on scheduler hot path:
...@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); ...@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible. * simple as possible.
* Must be called with preemption disabled. * Must be called with preemption disabled.
*/ */
static void __intel_rdt_sched_in(void) static void __resctrl_sched_in(void)
{ {
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state); struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid; u32 closid = state->default_closid;
u32 rmid = state->default_rmid; u32 rmid = state->default_rmid;
...@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void) ...@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
} }
} }
static inline void intel_rdt_sched_in(void) static inline void resctrl_sched_in(void)
{ {
if (static_branch_likely(&rdt_enable_key)) if (static_branch_likely(&rdt_enable_key))
__intel_rdt_sched_in(); __resctrl_sched_in();
} }
#else #else
static inline void intel_rdt_sched_in(void) {} static inline void resctrl_sched_in(void) {}
#endif /* CONFIG_INTEL_RDT */ #endif /* CONFIG_RESCTRL */
#endif /* _ASM_X86_INTEL_RDT_SCHED_H */ #endif /* _ASM_X86_RESCTRL_SCHED_H */
...@@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o ...@@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
obj-$(CONFIG_X86_MCE) += mcheck/ obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/ obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
CFLAGS_pseudo_lock.o = -I$(src)
...@@ -27,7 +27,54 @@ ...@@ -27,7 +27,54 @@
#include <linux/kernfs.h> #include <linux/kernfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "intel_rdt.h" #include "internal.h"
/*
* Check whether MBA bandwidth percentage value is correct. The value is
* checked against the minimum and maximum bandwidth values specified by
* the hardware. The allocated bandwidth percentage is rounded to the next
* control step available on the hardware.
*/
static bool bw_validate_amd(char *buf, unsigned long *data,
struct rdt_resource *r)
{
unsigned long bw;
int ret;
ret = kstrtoul(buf, 10, &bw);
if (ret) {
rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
return false;
}
if (bw < r->membw.min_bw || bw > r->default_ctrl) {
rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
r->membw.min_bw, r->default_ctrl);
return false;
}
*data = roundup(bw, (unsigned long)r->membw.bw_gran);
return true;
}
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d)
{
unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
if (!bw_validate_amd(data->buf, &bw_val, r))
return -EINVAL;
d->new_ctrl = bw_val;
d->have_new_ctrl = true;
return 0;
}
/* /*
* Check whether MBA bandwidth percentage value is correct. The value is * Check whether MBA bandwidth percentage value is correct. The value is
...@@ -65,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) ...@@ -65,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true; return true;
} }
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d) struct rdt_domain *d)
{ {
unsigned long bw_val; unsigned long bw_val;
if (d->have_new_ctrl) { if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id); rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL; return -EINVAL;
} }
...@@ -89,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -89,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set. * Additionally Haswell requires at least two bits set.
*/ */
static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
{ {
unsigned long first_bit, zero_bit, val; unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len; unsigned int cbm_len = r->cache.cbm_len;
...@@ -97,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) ...@@ -97,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
ret = kstrtoul(buf, 16, &val); ret = kstrtoul(buf, 16, &val);
if (ret) { if (ret) {
rdt_last_cmd_printf("non-hex character in mask %s\n", buf); rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
return false; return false;
} }
if (val == 0 || val > r->default_ctrl) { if (val == 0 || val > r->default_ctrl) {
rdt_last_cmd_puts("mask out of range\n"); rdt_last_cmd_puts("Mask out of range\n");
return false; return false;
} }
...@@ -110,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) ...@@ -110,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) { if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val); rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
return false; return false;
} }
if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
rdt_last_cmd_printf("Need at least %d bits in mask\n", rdt_last_cmd_printf("Need at least %d bits in the mask\n",
r->cache.min_cbm_bits); r->cache.min_cbm_bits);
return false; return false;
} }
...@@ -124,6 +171,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) ...@@ -124,6 +171,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
return true; return true;
} }
/*
* Check whether a cache bit mask is valid. AMD allows non-contiguous
* bitmasks
*/
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long val;
int ret;
ret = kstrtoul(buf, 16, &val);
if (ret) {
rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
return false;
}
if (val > r->default_ctrl) {
rdt_last_cmd_puts("Mask out of range\n");
return false;
}
*data = val;
return true;
}
/* /*
* Read one cache bit mask (hex). Check that it is valid for the current * Read one cache bit mask (hex). Check that it is valid for the current
* resource type. * resource type.
...@@ -135,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -135,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
u32 cbm_val; u32 cbm_val;
if (d->have_new_ctrl) { if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id); rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL; return -EINVAL;
} }
...@@ -145,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -145,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
*/ */
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
rdtgroup_pseudo_locked_in_hierarchy(d)) { rdtgroup_pseudo_locked_in_hierarchy(d)) {
rdt_last_cmd_printf("pseudo-locked region in hierarchy\n"); rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
return -EINVAL; return -EINVAL;
} }
if (!cbm_validate(data->buf, &cbm_val, r)) if (!r->cbm_validate(data->buf, &cbm_val, r))
return -EINVAL; return -EINVAL;
if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_SHAREABLE) && rdtgrp->mode == RDT_MODE_SHAREABLE) &&
rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n"); rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
return -EINVAL; return -EINVAL;
} }
...@@ -164,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, ...@@ -164,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
* either is exclusive. * either is exclusive.
*/ */
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) { if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
rdt_last_cmd_printf("overlaps with exclusive group\n"); rdt_last_cmd_puts("Overlaps with exclusive group\n");
return -EINVAL; return -EINVAL;
} }
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) { if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
rdt_last_cmd_printf("overlaps with other group\n"); rdt_last_cmd_puts("Overlaps with other group\n");
return -EINVAL; return -EINVAL;
} }
} }
...@@ -293,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok, ...@@ -293,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid) if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
return parse_line(tok, r, rdtgrp); return parse_line(tok, r, rdtgrp);
} }
rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname); rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
return -EINVAL; return -EINVAL;
} }
...@@ -326,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -326,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
*/ */
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("resource group is pseudo-locked\n"); rdt_last_cmd_puts("Resource group is pseudo-locked\n");
goto out; goto out;
} }
...@@ -467,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) ...@@ -467,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
r = &rdt_resources_all[resid]; r = &rdt_resources_all[resid];
d = rdt_find_domain(r, domid, NULL); d = rdt_find_domain(r, domid, NULL);
if (!d) { if (IS_ERR_OR_NULL(d)) {
ret = -ENOENT; ret = -ENOENT;
goto out; goto out;
} }
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_H #ifndef _ASM_X86_RESCTRL_INTERNAL_H
#define _ASM_X86_INTEL_RDT_H #define _ASM_X86_RESCTRL_INTERNAL_H
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernfs.h> #include <linux/kernfs.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#define IA32_L3_QOS_CFG 0xc81 #define MSR_IA32_L3_QOS_CFG 0xc81
#define IA32_L2_QOS_CFG 0xc82 #define MSR_IA32_L2_QOS_CFG 0xc82
#define IA32_L3_CBM_BASE 0xc90 #define MSR_IA32_L3_CBM_BASE 0xc90
#define IA32_L2_CBM_BASE 0xd10 #define MSR_IA32_L2_CBM_BASE 0xd10
#define IA32_MBA_THRTL_BASE 0xd50 #define MSR_IA32_MBA_THRTL_BASE 0xd50
#define MSR_IA32_MBA_BW_BASE 0xc0000200
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
#define L3_QOS_CDP_ENABLE 0x01ULL #define L3_QOS_CDP_ENABLE 0x01ULL
...@@ -29,6 +33,9 @@ ...@@ -29,6 +33,9 @@
#define MBM_CNTR_WIDTH 24 #define MBM_CNTR_WIDTH 24
#define MBM_OVERFLOW_INTERVAL 1000 #define MBM_OVERFLOW_INTERVAL 1000
#define MAX_MBA_BW 100u #define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
#define MBA_MAX_MBPS U32_MAX
#define MAX_MBA_BW_AMD 0x800
#define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62) #define RMID_VAL_UNAVAIL BIT_ULL(62)
...@@ -69,7 +76,7 @@ struct rmid_read { ...@@ -69,7 +76,7 @@ struct rmid_read {
u64 val; u64 val;
}; };
extern unsigned int intel_cqm_threshold; extern unsigned int resctrl_cqm_threshold;
extern bool rdt_alloc_capable; extern bool rdt_alloc_capable;
extern bool rdt_mon_capable; extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features; extern unsigned int rdt_mon_features;
...@@ -405,6 +412,7 @@ struct rdt_parse_data { ...@@ -405,6 +412,7 @@ struct rdt_parse_data {
* @cache: Cache allocation related data * @cache: Cache allocation related data
* @format_str: Per resource format string to show domain value * @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values * @parse_ctrlval: Per resource function pointer to parse control values
* @cbm_validate Cache bitmask validate function
* @evt_list: List of monitoring events * @evt_list: List of monitoring events
* @num_rmid: Number of RMIDs available * @num_rmid: Number of RMIDs available
* @mon_scale: cqm counter * mon_scale = occupancy in bytes * @mon_scale: cqm counter * mon_scale = occupancy in bytes
...@@ -431,6 +439,7 @@ struct rdt_resource { ...@@ -431,6 +439,7 @@ struct rdt_resource {
int (*parse_ctrlval)(struct rdt_parse_data *data, int (*parse_ctrlval)(struct rdt_parse_data *data,
struct rdt_resource *r, struct rdt_resource *r,
struct rdt_domain *d); struct rdt_domain *d);
bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r);
struct list_head evt_list; struct list_head evt_list;
int num_rmid; int num_rmid;
unsigned int mon_scale; unsigned int mon_scale;
...@@ -439,7 +448,9 @@ struct rdt_resource { ...@@ -439,7 +448,9 @@ struct rdt_resource {
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r, int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d); struct rdt_domain *d);
int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r, int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d); struct rdt_domain *d);
extern struct mutex rdtgroup_mutex; extern struct mutex rdtgroup_mutex;
...@@ -463,6 +474,10 @@ enum { ...@@ -463,6 +474,10 @@ enum {
RDT_NUM_RESOURCES, RDT_NUM_RESOURCES,
}; };
#define for_each_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++)
#define for_each_capable_rdt_resource(r) \ #define for_each_capable_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++) \ r++) \
...@@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); ...@@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
void cqm_handle_limbo(struct work_struct *work); void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free); void __check_limbo(struct rdt_domain *d, bool force_free);
bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
#endif /* _ASM_X86_INTEL_RDT_H */ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
...@@ -26,10 +26,7 @@ ...@@ -26,10 +26,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include "intel_rdt.h" #include "internal.h"
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
struct rmid_entry { struct rmid_entry {
u32 rmid; u32 rmid;
...@@ -73,7 +70,7 @@ unsigned int rdt_mon_features; ...@@ -73,7 +70,7 @@ unsigned int rdt_mon_features;
* This is the threshold cache occupancy at which we will consider an * This is the threshold cache occupancy at which we will consider an
* RMID available for re-allocation. * RMID available for re-allocation.
*/ */
unsigned int intel_cqm_threshold; unsigned int resctrl_cqm_threshold;
static inline struct rmid_entry *__rmid_entry(u32 rmid) static inline struct rmid_entry *__rmid_entry(u32 rmid)
{ {
...@@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry) ...@@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
{ {
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
return val >= intel_cqm_threshold; return val >= resctrl_cqm_threshold;
} }
/* /*
...@@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) ...@@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
list_for_each_entry(d, &r->domains, list) { list_for_each_entry(d, &r->domains, list) {
if (cpumask_test_cpu(cpu, &d->cpu_mask)) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
if (val <= intel_cqm_threshold) if (val <= resctrl_cqm_threshold)
continue; continue;
} }
...@@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r) ...@@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r)
int rdt_get_mon_l3_config(struct rdt_resource *r) int rdt_get_mon_l3_config(struct rdt_resource *r)
{ {
unsigned int cl_size = boot_cpu_data.x86_cache_size;
int ret; int ret;
r->mon_scale = boot_cpu_data.x86_cache_occ_scale; r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
...@@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r) ...@@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
* *
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
*/ */
intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid; resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
intel_cqm_threshold /= r->mon_scale; resctrl_cqm_threshold /= r->mon_scale;
ret = dom_data_init(r); ret = dom_data_init(r);
if (ret) if (ret)
......
...@@ -24,14 +24,14 @@ ...@@ -24,14 +24,14 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/intel_rdt_sched.h> #include <asm/resctrl_sched.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include "../../events/perf_event.h" /* For X86_CONFIG() */ #include "../../events/perf_event.h" /* For X86_CONFIG() */
#include "intel_rdt.h" #include "internal.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "intel_rdt_pseudo_lock_event.h" #include "pseudo_lock_event.h"
/* /*
* MSR_MISC_FEATURE_CONTROL register enables the modification of hardware * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
...@@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) ...@@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
for_each_cpu(cpu, &plr->d->cpu_mask) { for_each_cpu(cpu, &plr->d->cpu_mask) {
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
if (!pm_req) { if (!pm_req) {
rdt_last_cmd_puts("fail allocating mem for PM QoS\n"); rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;
} }
...@@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) ...@@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
DEV_PM_QOS_RESUME_LATENCY, DEV_PM_QOS_RESUME_LATENCY,
30); 30);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_printf("fail to add latency req cpu%d\n", rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
cpu); cpu);
kfree(pm_req); kfree(pm_req);
ret = -1; ret = -1;
...@@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) ...@@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
plr->cpu = cpumask_first(&plr->d->cpu_mask); plr->cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(plr->cpu)) { if (!cpu_online(plr->cpu)) {
rdt_last_cmd_printf("cpu %u associated with cache not online\n", rdt_last_cmd_printf("CPU %u associated with cache not online\n",
plr->cpu); plr->cpu);
ret = -ENODEV; ret = -ENODEV;
goto out_region; goto out_region;
...@@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) ...@@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
} }
ret = -1; ret = -1;
rdt_last_cmd_puts("unable to determine cache line size\n"); rdt_last_cmd_puts("Unable to determine cache line size\n");
out_region: out_region:
pseudo_lock_region_clear(plr); pseudo_lock_region_clear(plr);
return ret; return ret;
...@@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) ...@@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
* KMALLOC_MAX_SIZE. * KMALLOC_MAX_SIZE.
*/ */
if (plr->size > KMALLOC_MAX_SIZE) { if (plr->size > KMALLOC_MAX_SIZE) {
rdt_last_cmd_puts("requested region exceeds maximum size\n"); rdt_last_cmd_puts("Requested region exceeds maximum size\n");
ret = -E2BIG; ret = -E2BIG;
goto out_region; goto out_region;
} }
plr->kmem = kzalloc(plr->size, GFP_KERNEL); plr->kmem = kzalloc(plr->size, GFP_KERNEL);
if (!plr->kmem) { if (!plr->kmem) {
rdt_last_cmd_puts("unable to allocate memory\n"); rdt_last_cmd_puts("Unable to allocate memory\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_region; goto out_region;
} }
...@@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) ...@@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
* default closid associated with it. * default closid associated with it.
*/ */
if (rdtgrp == &rdtgroup_default) { if (rdtgrp == &rdtgroup_default) {
rdt_last_cmd_puts("cannot pseudo-lock default group\n"); rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
return -EINVAL; return -EINVAL;
} }
...@@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) ...@@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
*/ */
prefetch_disable_bits = get_prefetch_disable_bits(); prefetch_disable_bits = get_prefetch_disable_bits();
if (prefetch_disable_bits == 0) { if (prefetch_disable_bits == 0) {
rdt_last_cmd_puts("pseudo-locking not supported\n"); rdt_last_cmd_puts("Pseudo-locking not supported\n");
return -EINVAL; return -EINVAL;
} }
if (rdtgroup_monitor_in_progress(rdtgrp)) { if (rdtgroup_monitor_in_progress(rdtgrp)) {
rdt_last_cmd_puts("monitoring in progress\n"); rdt_last_cmd_puts("Monitoring in progress\n");
return -EINVAL; return -EINVAL;
} }
if (rdtgroup_tasks_assigned(rdtgrp)) { if (rdtgroup_tasks_assigned(rdtgrp)) {
rdt_last_cmd_puts("tasks assigned to resource group\n"); rdt_last_cmd_puts("Tasks assigned to resource group\n");
return -EINVAL; return -EINVAL;
} }
...@@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) ...@@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
} }
if (rdtgroup_locksetup_user_restrict(rdtgrp)) { if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
rdt_last_cmd_puts("unable to modify resctrl permissions\n"); rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
return -EIO; return -EIO;
} }
ret = pseudo_lock_init(rdtgrp); ret = pseudo_lock_init(rdtgrp);
if (ret) { if (ret) {
rdt_last_cmd_puts("unable to init pseudo-lock region\n"); rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
goto out_release; goto out_release;
} }
...@@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) ...@@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
if (rdt_mon_capable) { if (rdt_mon_capable) {
ret = alloc_rmid(); ret = alloc_rmid();
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("out of RMIDs\n"); rdt_last_cmd_puts("Out of RMIDs\n");
return ret; return ret;
} }
rdtgrp->mon.rmid = ret; rdtgrp->mon.rmid = ret;
...@@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) ...@@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
"pseudo_lock/%u", plr->cpu); "pseudo_lock/%u", plr->cpu);
if (IS_ERR(thread)) { if (IS_ERR(thread)) {
ret = PTR_ERR(thread); ret = PTR_ERR(thread);
rdt_last_cmd_printf("locking thread returned error %d\n", ret); rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
goto out_cstates; goto out_cstates;
} }
...@@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) ...@@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* the cleared, but not freed, plr struct resulting in an * the cleared, but not freed, plr struct resulting in an
* empty pseudo-locking loop. * empty pseudo-locking loop.
*/ */
rdt_last_cmd_puts("locking thread interrupted\n"); rdt_last_cmd_puts("Locking thread interrupted\n");
goto out_cstates; goto out_cstates;
} }
ret = pseudo_lock_minor_get(&new_minor); ret = pseudo_lock_minor_get(&new_minor);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("unable to obtain a new minor number\n"); rdt_last_cmd_puts("Unable to obtain a new minor number\n");
goto out_cstates; goto out_cstates;
} }
...@@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) ...@@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
if (IS_ERR(dev)) { if (IS_ERR(dev)) {
ret = PTR_ERR(dev); ret = PTR_ERR(dev);
rdt_last_cmd_printf("failed to create character device: %d\n", rdt_last_cmd_printf("Failed to create character device: %d\n",
ret); ret);
goto out_debugfs; goto out_debugfs;
} }
......
...@@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3, ...@@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3,
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event #define TRACE_INCLUDE_FILE pseudo_lock_event
#include <trace/define_trace.h> #include <trace/define_trace.h>
...@@ -35,8 +35,8 @@ ...@@ -35,8 +35,8 @@
#include <uapi/linux/magic.h> #include <uapi/linux/magic.h>
#include <asm/intel_rdt_sched.h> #include <asm/resctrl_sched.h>
#include "intel_rdt.h" #include "internal.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
...@@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, ...@@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
} }
/* /*
* This is safe against intel_rdt_sched_in() called from __switch_to() * This is safe against resctrl_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call * because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is proteced against __switch_to() because * from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled. * preemption is disabled.
...@@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info) ...@@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse * executing task might have its own closid selected. Just reuse
* the context switch code. * the context switch code.
*/ */
intel_rdt_sched_in(); resctrl_sched_in();
} }
/* /*
...@@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, ...@@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus belong to parent ctrl group */ /* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask)) { if (cpumask_weight(tmpmask)) {
rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n"); rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
return -EINVAL; return -EINVAL;
} }
...@@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
rdt_last_cmd_clear(); rdt_last_cmd_clear();
if (!rdtgrp) { if (!rdtgrp) {
ret = -ENOENT; ret = -ENOENT;
rdt_last_cmd_puts("directory was removed\n"); rdt_last_cmd_puts("Directory was removed\n");
goto unlock; goto unlock;
} }
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n"); rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock; goto unlock;
} }
...@@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
ret = cpumask_parse(buf, newmask); ret = cpumask_parse(buf, newmask);
if (ret) { if (ret) {
rdt_last_cmd_puts("bad cpu list/mask\n"); rdt_last_cmd_puts("Bad CPU list/mask\n");
goto unlock; goto unlock;
} }
...@@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, ...@@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
cpumask_andnot(tmpmask, newmask, cpu_online_mask); cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) { if (cpumask_weight(tmpmask)) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("can only assign online cpus\n"); rdt_last_cmd_puts("Can only assign online CPUs\n");
goto unlock; goto unlock;
} }
...@@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head) ...@@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head)
preempt_disable(); preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */ /* update PQR_ASSOC MSR to make resource group go into effect */
intel_rdt_sched_in(); resctrl_sched_in();
preempt_enable(); preempt_enable();
kfree(callback); kfree(callback);
...@@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, ...@@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/ */
atomic_dec(&rdtgrp->waitcount); atomic_dec(&rdtgrp->waitcount);
kfree(callback); kfree(callback);
rdt_last_cmd_puts("task exited\n"); rdt_last_cmd_puts("Task exited\n");
} else { } else {
/* /*
* For ctrl_mon groups move both closid and rmid. * For ctrl_mon groups move both closid and rmid.
...@@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, ...@@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n"); rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock; goto unlock;
} }
...@@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of, ...@@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
{ {
struct rdt_resource *r = of->kn->parent->priv; struct rdt_resource *r = of->kn->parent->priv;
seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale); seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
return 0; return 0;
} }
...@@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, ...@@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
if (bytes > (boot_cpu_data.x86_cache_size * 1024)) if (bytes > (boot_cpu_data.x86_cache_size * 1024))
return -EINVAL; return -EINVAL;
intel_cqm_threshold = bytes / r->mon_scale; resctrl_cqm_threshold = bytes / r->mon_scale;
return nbytes; return nbytes;
} }
...@@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, ...@@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
* peer RDT CDP resource. Hence the WARN. * peer RDT CDP resource. Hence the WARN.
*/ */
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(!_d_cdp)) { if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL; _r_cdp = NULL;
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) ...@@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
list_for_each_entry(d, &r->domains, list) { list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
rdtgrp->closid, false)) { rdtgrp->closid, false)) {
rdt_last_cmd_puts("schemata overlaps\n"); rdt_last_cmd_puts("Schemata overlaps\n");
return false; return false;
} }
} }
} }
if (!has_cache) { if (!has_cache) {
rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n"); rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
return false; return false;
} }
...@@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, ...@@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out; goto out;
if (mode == RDT_MODE_PSEUDO_LOCKED) { if (mode == RDT_MODE_PSEUDO_LOCKED) {
rdt_last_cmd_printf("cannot change pseudo-locked group\n"); rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, ...@@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out; goto out;
rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
} else { } else {
rdt_last_cmd_printf("unknown/unsupported mode\n"); rdt_last_cmd_puts("Unknown or unsupported mode\n");
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg) ...@@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg)
{ {
bool *enable = arg; bool *enable = arg;
wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
} }
static void l2_qos_cfg_update(void *arg) static void l2_qos_cfg_update(void *arg)
{ {
bool *enable = arg; bool *enable = arg;
wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
} }
static inline bool is_mba_linear(void) static inline bool is_mba_linear(void)
...@@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data) ...@@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data)
if (ret) if (ret)
goto out; goto out;
} else if (!strcmp(token, "mba_MBps")) { } else if (!strcmp(token, "mba_MBps")) {
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
ret = set_mba_sc(true); ret = set_mba_sc(true);
else
ret = -EINVAL;
if (ret) if (ret)
goto out; goto out;
} else { } else {
...@@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) ...@@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
tmp_cbm = d->new_ctrl; tmp_cbm = d->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
r->cache.min_cbm_bits) { r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n", rdt_last_cmd_printf("No space on %s:%d\n",
r->name, d->id); r->name, d->id);
return -ENOSPC; return -ENOSPC;
} }
...@@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) ...@@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
continue; continue;
ret = update_domains(r, rdtgrp->closid); ret = update_domains(r, rdtgrp->closid);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("failed to initialize allocations\n"); rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret; return ret;
} }
rdtgrp->mode = RDT_MODE_SHAREABLE; rdtgrp->mode = RDT_MODE_SHAREABLE;
...@@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdt_last_cmd_clear(); rdt_last_cmd_clear();
if (!prdtgrp) { if (!prdtgrp) {
ret = -ENODEV; ret = -ENODEV;
rdt_last_cmd_puts("directory was removed\n"); rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock; goto out_unlock;
} }
...@@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
ret = -EINVAL; ret = -EINVAL;
rdt_last_cmd_puts("pseudo-locking in progress\n"); rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto out_unlock; goto out_unlock;
} }
...@@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) { if (!rdtgrp) {
ret = -ENOSPC; ret = -ENOSPC;
rdt_last_cmd_puts("kernel out of memory\n"); rdt_last_cmd_puts("Kernel out of memory\n");
goto out_unlock; goto out_unlock;
} }
*r = rdtgrp; *r = rdtgrp;
...@@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, ...@@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
if (rdt_mon_capable) { if (rdt_mon_capable) {
ret = alloc_rmid(); ret = alloc_rmid();
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("out of RMIDs\n"); rdt_last_cmd_puts("Out of RMIDs\n");
goto out_destroy; goto out_destroy;
} }
rdtgrp->mon.rmid = ret; rdtgrp->mon.rmid = ret;
...@@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, ...@@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
kn = rdtgrp->kn; kn = rdtgrp->kn;
ret = closid_alloc(); ret = closid_alloc();
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("out of CLOSIDs\n"); rdt_last_cmd_puts("Out of CLOSIDs\n");
goto out_common_fail; goto out_common_fail;
} }
closid = ret; closid = ret;
......
...@@ -17,7 +17,11 @@ struct cpuid_bit { ...@@ -17,7 +17,11 @@ struct cpuid_bit {
u32 sub_leaf; u32 sub_leaf;
}; };
/* Please keep the leaf sorted by cpuid_bit.level for faster search. */ /*
* Please keep the leaf sorted by cpuid_bit.level for faster search.
* X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID
* levels are different and there is a separate entry for each.
*/
static const struct cpuid_bit cpuid_bits[] = { static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
...@@ -29,6 +33,7 @@ static const struct cpuid_bit cpuid_bits[] = { ...@@ -29,6 +33,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 }, { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 }, { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 } { 0, 0, 0, 0, 0 }
......
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/vm86.h> #include <asm/vm86.h>
#include <asm/intel_rdt_sched.h> #include <asm/resctrl_sched.h>
#include <asm/proto.h> #include <asm/proto.h>
#include "process.h" #include "process.h"
...@@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -298,7 +298,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
/* Load the Intel cache allocation PQR MSR. */ /* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in(); resctrl_sched_in();
return prev_p; return prev_p;
} }
......
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/intel_rdt_sched.h> #include <asm/resctrl_sched.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/fsgsbase.h> #include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
...@@ -622,7 +622,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -622,7 +622,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
} }
/* Load the Intel cache allocation PQR MSR. */ /* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in(); resctrl_sched_in();
return prev_p; return prev_p;
} }
......
...@@ -993,7 +993,7 @@ struct task_struct { ...@@ -993,7 +993,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */ /* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list; struct list_head cg_list;
#endif #endif
#ifdef CONFIG_INTEL_RDT #ifdef CONFIG_RESCTRL
u32 closid; u32 closid;
u32 rmid; u32 rmid;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment