Commit 295dad10 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_urgent_for_v6.0_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:
 "As usually the case, right after a major release, the tip urgent
  branches accumulate a couple more fixes than normal. And here is the
  x86, a bit bigger, urgent pile.

   - Use the correct CPU capability clearing function on the error path
     in Intel perf LBR

   - A CFI fix to ftrace along with a simplification

   - Adjust handling of zero capacity bit mask for resctrl cache
     allocation on AMD

   - A fix to the AMD microcode loader to attempt patch application on
     every logical thread

   - A couple of topology fixes to handle CPUID leaf 0x1f enumeration
     info properly

   - Drop a -mabi=ms compiler option check as both compilers support it
     now anyway

   - A couple of fixes to how the initial, statically allocated FPU
     buffer state is setup and its interaction with dynamic states at
     runtime"

* tag 'x86_urgent_for_v6.0_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Fix copy_xstate_to_uabi() to copy init states correctly
  perf/x86/intel/lbr: Use setup_clear_cpu_cap() instead of clear_cpu_cap()
  ftrace,kcfi: Separate ftrace_stub() and ftrace_stub_graph()
  x86/ftrace: Remove ftrace_epilogue()
  x86/resctrl: Fix min_cbm_bits for AMD
  x86/microcode/AMD: Apply the patch early on every logical thread
  x86/topology: Fix duplicated core ID within a package
  x86/topology: Fix multiple packages shown on a single-package system
  hwmon/coretemp: Handle large core ID value
  x86/Kconfig: Drop check for -mabi=ms for CONFIG_EFI_STUB
  x86/fpu: Exclude dynamic states from init_fpstate
  x86/fpu: Fix the init_fpstate size check with the actual size
  x86/fpu: Configure init_fpstate attributes orderly
parents 942e01ab 471f0aa7
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
...@@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller) ...@@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
SYM_FUNC_START(ftrace_stub) SYM_TYPED_FUNC_START(ftrace_stub)
ret ret
SYM_FUNC_END(ftrace_stub) SYM_FUNC_END(ftrace_stub)
SYM_TYPED_FUNC_START(ftrace_stub_graph)
ret
SYM_FUNC_END(ftrace_stub_graph)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
* void return_to_handler(void) * void return_to_handler(void)
......
...@@ -1973,7 +1973,6 @@ config EFI ...@@ -1973,7 +1973,6 @@ config EFI
config EFI_STUB config EFI_STUB
bool "EFI stub support" bool "EFI stub support"
depends on EFI depends on EFI
depends on $(cc-option,-mabi=ms) || X86_32
select RELOCATABLE select RELOCATABLE
help help
This kernel feature allows a bzImage to be loaded directly This kernel feature allows a bzImage to be loaded directly
......
...@@ -1596,7 +1596,7 @@ void __init intel_pmu_arch_lbr_init(void) ...@@ -1596,7 +1596,7 @@ void __init intel_pmu_arch_lbr_init(void)
return; return;
clear_arch_lbr: clear_arch_lbr:
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_ARCH_LBR); setup_clear_cpu_cap(X86_FEATURE_ARCH_LBR);
} }
/** /**
......
...@@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p ...@@ -440,7 +440,13 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_p
return ret; return ret;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev >= mc->hdr.patch_id)
/*
* Allow application of the same revision to pick up SMT-specific
* changes even if the revision of the other SMT thread is already
* up-to-date.
*/
if (rev > mc->hdr.patch_id)
return ret; return ret;
if (!__apply_microcode_amd(mc)) { if (!__apply_microcode_amd(mc)) {
...@@ -528,8 +534,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) ...@@ -528,8 +534,12 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
/* Check whether we have saved a new patch already: */ /*
if (*new_rev && rev < mc->hdr.patch_id) { * Check whether a new patch has been saved already. Also, allow application of
* the same revision in order to pick up SMT-thread-specific configuration even
* if the sibling SMT thread already has an up-to-date revision.
*/
if (*new_rev && rev <= mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) { if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id; *new_rev = mc->hdr.patch_id;
return; return;
......
...@@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = { ...@@ -66,9 +66,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3, .rid = RDT_RESOURCE_L3,
.name = "L3", .name = "L3",
.cache_level = 3, .cache_level = 3,
.cache = {
.min_cbm_bits = 1,
},
.domains = domain_init(RDT_RESOURCE_L3), .domains = domain_init(RDT_RESOURCE_L3),
.parse_ctrlval = parse_cbm, .parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x", .format_str = "%d=%0*x",
...@@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = { ...@@ -83,9 +80,6 @@ struct rdt_hw_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2, .rid = RDT_RESOURCE_L2,
.name = "L2", .name = "L2",
.cache_level = 2, .cache_level = 2,
.cache = {
.min_cbm_bits = 1,
},
.domains = domain_init(RDT_RESOURCE_L2), .domains = domain_init(RDT_RESOURCE_L2),
.parse_ctrlval = parse_cbm, .parse_ctrlval = parse_cbm,
.format_str = "%d=%0*x", .format_str = "%d=%0*x",
...@@ -836,6 +830,7 @@ static __init void rdt_init_res_defs_intel(void) ...@@ -836,6 +830,7 @@ static __init void rdt_init_res_defs_intel(void)
r->cache.arch_has_sparse_bitmaps = false; r->cache.arch_has_sparse_bitmaps = false;
r->cache.arch_has_empty_bitmaps = false; r->cache.arch_has_empty_bitmaps = false;
r->cache.arch_has_per_cpu_cfg = false; r->cache.arch_has_per_cpu_cfg = false;
r->cache.min_cbm_bits = 1;
} else if (r->rid == RDT_RESOURCE_MBA) { } else if (r->rid == RDT_RESOURCE_MBA) {
hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
hw_res->msr_update = mba_wrmsr_intel; hw_res->msr_update = mba_wrmsr_intel;
...@@ -856,6 +851,7 @@ static __init void rdt_init_res_defs_amd(void) ...@@ -856,6 +851,7 @@ static __init void rdt_init_res_defs_amd(void)
r->cache.arch_has_sparse_bitmaps = true; r->cache.arch_has_sparse_bitmaps = true;
r->cache.arch_has_empty_bitmaps = true; r->cache.arch_has_empty_bitmaps = true;
r->cache.arch_has_per_cpu_cfg = true; r->cache.arch_has_per_cpu_cfg = true;
r->cache.min_cbm_bits = 0;
} else if (r->rid == RDT_RESOURCE_MBA) { } else if (r->rid == RDT_RESOURCE_MBA) {
hw_res->msr_base = MSR_IA32_MBA_BW_BASE; hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
hw_res->msr_update = mba_wrmsr_amd; hw_res->msr_update = mba_wrmsr_amd;
......
...@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -96,6 +96,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width; unsigned int ht_mask_width, core_plus_mask_width, die_plus_mask_width;
unsigned int core_select_mask, core_level_siblings; unsigned int core_select_mask, core_level_siblings;
unsigned int die_select_mask, die_level_siblings; unsigned int die_select_mask, die_level_siblings;
unsigned int pkg_mask_width;
bool die_level_present = false; bool die_level_present = false;
int leaf; int leaf;
...@@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -111,10 +112,10 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx); core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
die_level_siblings = LEVEL_MAX_SIBLINGS(ebx); die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
sub_index = 1; sub_index = 1;
do { while (true) {
cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx); cpuid_count(leaf, sub_index, &eax, &ebx, &ecx, &edx);
/* /*
...@@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -132,10 +133,15 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax); die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
} }
if (LEAFB_SUBTYPE(ecx) != INVALID_TYPE)
pkg_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
else
break;
sub_index++; sub_index++;
} while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE); }
core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width; core_select_mask = (~(-1 << pkg_mask_width)) >> ht_mask_width;
die_select_mask = (~(-1 << die_plus_mask_width)) >> die_select_mask = (~(-1 << die_plus_mask_width)) >>
core_plus_mask_width; core_plus_mask_width;
...@@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c) ...@@ -148,7 +154,7 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
} }
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid,
die_plus_mask_width); pkg_mask_width);
/* /*
* Reinit the apicid, now that we have extended initial_apicid. * Reinit the apicid, now that we have extended initial_apicid.
*/ */
......
...@@ -210,13 +210,6 @@ static void __init fpu__init_system_xstate_size_legacy(void) ...@@ -210,13 +210,6 @@ static void __init fpu__init_system_xstate_size_legacy(void)
fpstate_reset(&current->thread.fpu); fpstate_reset(&current->thread.fpu);
} }
static void __init fpu__init_init_fpstate(void)
{
/* Bring init_fpstate size and features up to date */
init_fpstate.size = fpu_kernel_cfg.max_size;
init_fpstate.xfeatures = fpu_kernel_cfg.max_features;
}
/* /*
* Called on the boot CPU once per system bootup, to set up the initial * Called on the boot CPU once per system bootup, to set up the initial
* FPU state that is later cloned into all processes: * FPU state that is later cloned into all processes:
...@@ -236,5 +229,4 @@ void __init fpu__init_system(struct cpuinfo_x86 *c) ...@@ -236,5 +229,4 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
fpu__init_system_xstate_size_legacy(); fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate(fpu_kernel_cfg.max_size); fpu__init_system_xstate(fpu_kernel_cfg.max_size);
fpu__init_task_struct_size(); fpu__init_task_struct_size();
fpu__init_init_fpstate();
} }
...@@ -360,7 +360,7 @@ static void __init setup_init_fpu_buf(void) ...@@ -360,7 +360,7 @@ static void __init setup_init_fpu_buf(void)
print_xstate_features(); print_xstate_features();
xstate_init_xcomp_bv(&init_fpstate.regs.xsave, fpu_kernel_cfg.max_features); xstate_init_xcomp_bv(&init_fpstate.regs.xsave, init_fpstate.xfeatures);
/* /*
* Init all the features state with header.xfeatures being 0x0 * Init all the features state with header.xfeatures being 0x0
...@@ -678,20 +678,6 @@ static unsigned int __init get_xsave_size_user(void) ...@@ -678,20 +678,6 @@ static unsigned int __init get_xsave_size_user(void)
return ebx; return ebx;
} }
/*
* Will the runtime-enumerated 'xstate_size' fit in the init
* task's statically-allocated buffer?
*/
static bool __init is_supported_xstate_size(unsigned int test_xstate_size)
{
if (test_xstate_size <= sizeof(init_fpstate.regs))
return true;
pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
sizeof(init_fpstate.regs), test_xstate_size);
return false;
}
static int __init init_xstate_size(void) static int __init init_xstate_size(void)
{ {
/* Recompute the context size for enabled features: */ /* Recompute the context size for enabled features: */
...@@ -717,10 +703,6 @@ static int __init init_xstate_size(void) ...@@ -717,10 +703,6 @@ static int __init init_xstate_size(void)
kernel_default_size = kernel_default_size =
xstate_calculate_size(fpu_kernel_cfg.default_features, compacted); xstate_calculate_size(fpu_kernel_cfg.default_features, compacted);
/* Ensure we have the space to store all default enabled features. */
if (!is_supported_xstate_size(kernel_default_size))
return -EINVAL;
if (!paranoid_xstate_size_valid(kernel_size)) if (!paranoid_xstate_size_valid(kernel_size))
return -EINVAL; return -EINVAL;
...@@ -875,6 +857,19 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) ...@@ -875,6 +857,19 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
update_regset_xstate_info(fpu_user_cfg.max_size, update_regset_xstate_info(fpu_user_cfg.max_size,
fpu_user_cfg.max_features); fpu_user_cfg.max_features);
/*
* init_fpstate excludes dynamic states as they are large but init
* state is zero.
*/
init_fpstate.size = fpu_kernel_cfg.default_size;
init_fpstate.xfeatures = fpu_kernel_cfg.default_features;
if (init_fpstate.size > sizeof(init_fpstate.regs)) {
pr_warn("x86/fpu: init_fpstate buffer too small (%zu < %d), disabling XSAVE\n",
sizeof(init_fpstate.regs), init_fpstate.size);
goto out_disable;
}
setup_init_fpu_buf(); setup_init_fpu_buf();
/* /*
...@@ -1130,6 +1125,15 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate, ...@@ -1130,6 +1125,15 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
*/ */
mask = fpstate->user_xfeatures; mask = fpstate->user_xfeatures;
/*
* Dynamic features are not present in init_fpstate. When they are
* in an all zeros init state, remove those from 'mask' to zero
* those features in the user buffer instead of retrieving them
* from init_fpstate.
*/
if (fpu_state_size_dynamic())
mask &= (header.xfeatures | xinit->header.xcomp_bv);
for_each_extended_xfeature(i, mask) { for_each_extended_xfeature(i, mask) {
/* /*
* If there was a feature or alignment gap, zero the space * If there was a feature or alignment gap, zero the space
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/export.h> #include <asm/export.h>
...@@ -129,6 +130,14 @@ ...@@ -129,6 +130,14 @@
.endm .endm
SYM_TYPED_FUNC_START(ftrace_stub)
RET
SYM_FUNC_END(ftrace_stub)
SYM_TYPED_FUNC_START(ftrace_stub_graph)
RET
SYM_FUNC_END(ftrace_stub_graph)
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
SYM_FUNC_START(__fentry__) SYM_FUNC_START(__fentry__)
...@@ -172,21 +181,10 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) ...@@ -172,21 +181,10 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
*/ */
SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL) SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
RET
jmp ftrace_epilogue
SYM_FUNC_END(ftrace_caller); SYM_FUNC_END(ftrace_caller);
STACK_FRAME_NON_STANDARD_FP(ftrace_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
SYM_FUNC_START(ftrace_epilogue)
/*
* This is weak to keep gas from relaxing the jumps.
*/
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
UNWIND_HINT_FUNC
ENDBR
RET
SYM_FUNC_END(ftrace_epilogue)
SYM_FUNC_START(ftrace_regs_caller) SYM_FUNC_START(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */ /* Save the current flags before any operations that can change them */
pushfq pushfq
...@@ -262,14 +260,11 @@ SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL) ...@@ -262,14 +260,11 @@ SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
popfq popfq
/* /*
* As this jmp to ftrace_epilogue can be a short jump * The trampoline will add the return.
* it must not be copied into the trampoline.
* The trampoline will add the code to jump
* to the return.
*/ */
SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
jmp ftrace_epilogue RET
/* Swap the flags with orig_rax */ /* Swap the flags with orig_rax */
1: movq MCOUNT_REG_SIZE(%rsp), %rdi 1: movq MCOUNT_REG_SIZE(%rsp), %rdi
...@@ -280,7 +275,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) ...@@ -280,7 +275,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
/* Restore flags */ /* Restore flags */
popfq popfq
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
jmp ftrace_epilogue RET
SYM_FUNC_END(ftrace_regs_caller) SYM_FUNC_END(ftrace_regs_caller)
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
...@@ -291,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller) ...@@ -291,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
SYM_FUNC_START(__fentry__) SYM_FUNC_START(__fentry__)
cmpq $ftrace_stub, ftrace_trace_function cmpq $ftrace_stub, ftrace_trace_function
jnz trace jnz trace
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
ENDBR
RET RET
trace: trace:
......
...@@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); ...@@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id)
#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) \ #define for_each_sibling(i, cpu) \
for_each_cpu(i, topology_sibling_cpumask(cpu)) for_each_cpu(i, topology_sibling_cpumask(cpu))
...@@ -91,6 +88,8 @@ struct temp_data { ...@@ -91,6 +88,8 @@ struct temp_data {
struct platform_data { struct platform_data {
struct device *hwmon_dev; struct device *hwmon_dev;
u16 pkg_id; u16 pkg_id;
u16 cpu_map[NUM_REAL_CORES];
struct ida ida;
struct cpumask cpumask; struct cpumask cpumask;
struct temp_data *core_data[MAX_CORE_DATA]; struct temp_data *core_data[MAX_CORE_DATA];
struct device_attribute name_attr; struct device_attribute name_attr;
...@@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) ...@@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
MSR_IA32_THERM_STATUS; MSR_IA32_THERM_STATUS;
tdata->is_pkg_data = pkg_flag; tdata->is_pkg_data = pkg_flag;
tdata->cpu = cpu; tdata->cpu = cpu;
tdata->cpu_core_id = TO_CORE_ID(cpu); tdata->cpu_core_id = topology_core_id(cpu);
tdata->attr_size = MAX_CORE_ATTRS; tdata->attr_size = MAX_CORE_ATTRS;
mutex_init(&tdata->update_lock); mutex_init(&tdata->update_lock);
return tdata; return tdata;
...@@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, ...@@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
struct platform_data *pdata = platform_get_drvdata(pdev); struct platform_data *pdata = platform_get_drvdata(pdev);
struct cpuinfo_x86 *c = &cpu_data(cpu); struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 eax, edx; u32 eax, edx;
int err, attr_no; int err, index, attr_no;
/* /*
* Find attr number for sysfs: * Find attr number for sysfs:
...@@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, ...@@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* The attr number is always core id + 2 * The attr number is always core id + 2
* The Pkgtemp will always show up as temp1_*, if available * The Pkgtemp will always show up as temp1_*, if available
*/ */
attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu); if (pkg_flag) {
attr_no = PKG_SYSFS_ATTR_NO;
} else {
index = ida_alloc(&pdata->ida, GFP_KERNEL);
if (index < 0)
return index;
pdata->cpu_map[index] = topology_core_id(cpu);
attr_no = index + BASE_SYSFS_ATTR_NO;
}
if (attr_no > MAX_CORE_DATA - 1) if (attr_no > MAX_CORE_DATA - 1) {
return -ERANGE; err = -ERANGE;
goto ida_free;
}
tdata = init_temp_data(cpu, pkg_flag); tdata = init_temp_data(cpu, pkg_flag);
if (!tdata) if (!tdata) {
return -ENOMEM; err = -ENOMEM;
goto ida_free;
}
/* Test if we can access the status register */ /* Test if we can access the status register */
err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx);
...@@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, ...@@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
exit_free: exit_free:
pdata->core_data[attr_no] = NULL; pdata->core_data[attr_no] = NULL;
kfree(tdata); kfree(tdata);
ida_free:
if (!pkg_flag)
ida_free(&pdata->ida, index);
return err; return err;
} }
...@@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) ...@@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
kfree(pdata->core_data[indx]); kfree(pdata->core_data[indx]);
pdata->core_data[indx] = NULL; pdata->core_data[indx] = NULL;
if (indx >= BASE_SYSFS_ATTR_NO)
ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
} }
static int coretemp_probe(struct platform_device *pdev) static int coretemp_probe(struct platform_device *pdev)
...@@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev) ...@@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
pdata->pkg_id = pdev->id; pdata->pkg_id = pdev->id;
ida_init(&pdata->ida);
platform_set_drvdata(pdev, pdata); platform_set_drvdata(pdev, pdata);
pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
...@@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev) ...@@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev)
if (pdata->core_data[i]) if (pdata->core_data[i])
coretemp_remove_core(pdata, i); coretemp_remove_core(pdata, i);
ida_destroy(&pdata->ida);
return 0; return 0;
} }
...@@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu) ...@@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct platform_device *pdev = coretemp_get_pdev(cpu); struct platform_device *pdev = coretemp_get_pdev(cpu);
struct platform_data *pd; struct platform_data *pd;
struct temp_data *tdata; struct temp_data *tdata;
int indx, target; int i, indx = -1, target;
/* /*
* Don't execute this on suspend as the device remove locks * Don't execute this on suspend as the device remove locks
...@@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu) ...@@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu)
if (!pdev) if (!pdev)
return 0; return 0;
/* The core id is too big, just return */ pd = platform_get_drvdata(pdev);
indx = TO_ATTR_NO(cpu);
if (indx > MAX_CORE_DATA - 1) for (i = 0; i < NUM_REAL_CORES; i++) {
if (pd->cpu_map[i] == topology_core_id(cpu)) {
indx = i + BASE_SYSFS_ATTR_NO;
break;
}
}
/* Too many cores and this core is not populated, just return */
if (indx < 0)
return 0; return 0;
pd = platform_get_drvdata(pdev);
tdata = pd->core_data[indx]; tdata = pd->core_data[indx];
cpumask_clear_cpu(cpu, &pd->cpumask); cpumask_clear_cpu(cpu, &pd->cpumask);
......
...@@ -162,6 +162,16 @@ ...@@ -162,6 +162,16 @@
#define PATCHABLE_DISCARDS *(__patchable_function_entries) #define PATCHABLE_DISCARDS *(__patchable_function_entries)
#endif #endif
#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
/*
* Simply points to ftrace_stub, but with the proper protocol.
* Defined by the linker script in linux/vmlinux.lds.h
*/
#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
#else
#define FTRACE_STUB_HACK
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD #ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* /*
* The ftrace call sites are logged to a section whose name depends on the * The ftrace call sites are logged to a section whose name depends on the
...@@ -169,10 +179,6 @@ ...@@ -169,10 +179,6 @@
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition. * dependencies for FTRACE_CALLSITE_SECTION's definition.
* *
* Need to also make ftrace_stub_graph point to ftrace_stub
* so that the same stub location may have different protocols
* and not mess up with C verifiers.
*
* ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
* as some archs will have a different prototype for that function * as some archs will have a different prototype for that function
* but ftrace_ops_list_func() will have a single prototype. * but ftrace_ops_list_func() will have a single prototype.
...@@ -182,11 +188,11 @@ ...@@ -182,11 +188,11 @@
KEEP(*(__mcount_loc)) \ KEEP(*(__mcount_loc)) \
KEEP_PATCHABLE \ KEEP_PATCHABLE \
__stop_mcount_loc = .; \ __stop_mcount_loc = .; \
ftrace_stub_graph = ftrace_stub; \ FTRACE_STUB_HACK \
ftrace_ops_list_func = arch_ftrace_ops_list_func; ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else #else
# ifdef CONFIG_FUNCTION_TRACER # ifdef CONFIG_FUNCTION_TRACER
# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \ # define MCOUNT_REC() FTRACE_STUB_HACK \
ftrace_ops_list_func = arch_ftrace_ops_list_func; ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else # else
# define MCOUNT_REC() # define MCOUNT_REC()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment