Commit 9fe30842 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-urgent-2024-04-07' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:

 - Fix MCE timer reinit locking

 - Fix/improve CoCo guest random entropy pool init

 - Fix SEV-SNP late disable bugs

 - Fix false positive objtool build warning

 - Fix header dependency bug

 - Fix resctrl CPU offlining bug

* tag 'x86-urgent-2024-04-07' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/retpoline: Add NOENDBR annotation to the SRSO dummy return thunk
  x86/mce: Make sure to grab mce_sysfs_mutex in set_bank()
  x86/CPU/AMD: Track SNP host status with cc_platform_*()
  x86/cc: Add cc_platform_set/_clear() helpers
  x86/kvm/Kconfig: Have KVM_AMD_SEV select ARCH_HAS_CC_PLATFORM
  x86/coco: Require seeding RNG with RDRAND on CoCo systems
  x86/numa/32: Include missing <asm/pgtable_areas.h>
  x86/resctrl: Fix uninitialized memory read when last CPU of domain goes offline
parents 3520c35e b377c66a
...@@ -3,19 +3,28 @@ ...@@ -3,19 +3,28 @@
* Confidential Computing Platform Capability checks * Confidential Computing Platform Capability checks
* *
* Copyright (C) 2021 Advanced Micro Devices, Inc. * Copyright (C) 2021 Advanced Micro Devices, Inc.
* Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* *
* Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com>
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/cc_platform.h> #include <linux/cc_platform.h>
#include <linux/string.h>
#include <linux/random.h>
#include <asm/archrandom.h>
#include <asm/coco.h> #include <asm/coco.h>
#include <asm/processor.h> #include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE; enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
u64 cc_mask __ro_after_init; u64 cc_mask __ro_after_init;
static struct cc_attr_flags {
__u64 host_sev_snp : 1,
__resv : 63;
} cc_flags;
static bool noinstr intel_cc_platform_has(enum cc_attr attr) static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{ {
switch (attr) { switch (attr) {
...@@ -89,6 +98,9 @@ static bool noinstr amd_cc_platform_has(enum cc_attr attr) ...@@ -89,6 +98,9 @@ static bool noinstr amd_cc_platform_has(enum cc_attr attr)
case CC_ATTR_GUEST_SEV_SNP: case CC_ATTR_GUEST_SEV_SNP:
return sev_status & MSR_AMD64_SEV_SNP_ENABLED; return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
case CC_ATTR_HOST_SEV_SNP:
return cc_flags.host_sev_snp;
default: default:
return false; return false;
} }
...@@ -148,3 +160,84 @@ u64 cc_mkdec(u64 val) ...@@ -148,3 +160,84 @@ u64 cc_mkdec(u64 val)
} }
} }
EXPORT_SYMBOL_GPL(cc_mkdec); EXPORT_SYMBOL_GPL(cc_mkdec);
static void amd_cc_platform_clear(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_HOST_SEV_SNP:
cc_flags.host_sev_snp = 0;
break;
default:
break;
}
}
void cc_platform_clear(enum cc_attr attr)
{
switch (cc_vendor) {
case CC_VENDOR_AMD:
amd_cc_platform_clear(attr);
break;
default:
break;
}
}
static void amd_cc_platform_set(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_HOST_SEV_SNP:
cc_flags.host_sev_snp = 1;
break;
default:
break;
}
}
void cc_platform_set(enum cc_attr attr)
{
switch (cc_vendor) {
case CC_VENDOR_AMD:
amd_cc_platform_set(attr);
break;
default:
break;
}
}
__init void cc_random_init(void)
{
/*
* The seed is 32 bytes (in units of longs), which is 256 bits, which
* is the security level that the RNG is targeting.
*/
unsigned long rng_seed[32 / sizeof(long)];
size_t i, longs;
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return;
/*
* Since the CoCo threat model includes the host, the only reliable
* source of entropy that can be neither observed nor manipulated is
* RDRAND. Usually, RDRAND failure is considered tolerable, but since
* CoCo guests have no other unobservable source of entropy, it's
* important to at least ensure the RNG gets some initial random seeds.
*/
for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) {
longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i);
/*
* A zero return value means that the guest doesn't have RDRAND
* or the CPU is physically broken, and in both cases that
* means most crypto inside of the CoCo instance will be
* broken, defeating the purpose of CoCo in the first place. So
* just panic here because it's absolutely unsafe to continue
* executing.
*/
if (longs == 0)
panic("RDRAND is defective.");
}
add_device_randomness(rng_seed, sizeof(rng_seed));
memzero_explicit(rng_seed, sizeof(rng_seed));
}
...@@ -22,6 +22,7 @@ static inline void cc_set_mask(u64 mask) ...@@ -22,6 +22,7 @@ static inline void cc_set_mask(u64 mask)
u64 cc_mkenc(u64 val); u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val); u64 cc_mkdec(u64 val);
void cc_random_init(void);
#else #else
#define cc_vendor (CC_VENDOR_NONE) #define cc_vendor (CC_VENDOR_NONE)
...@@ -34,6 +35,7 @@ static inline u64 cc_mkdec(u64 val) ...@@ -34,6 +35,7 @@ static inline u64 cc_mkdec(u64 val)
{ {
return val; return val;
} }
static inline void cc_random_init(void) { }
#endif #endif
#endif /* _ASM_X86_COCO_H */ #endif /* _ASM_X86_COCO_H */
...@@ -228,7 +228,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn ...@@ -228,7 +228,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct sn
void snp_accept_memory(phys_addr_t start, phys_addr_t end); void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status); u64 snp_get_unsupported_features(u64 status);
u64 sev_get_status(void); u64 sev_get_status(void);
void kdump_sev_callback(void);
void sev_show_status(void); void sev_show_status(void);
#else #else
static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_enter(struct pt_regs *regs) { }
...@@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in ...@@ -258,7 +257,6 @@ static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *in
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { } static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 snp_get_unsupported_features(u64 status) { return 0; } static inline u64 snp_get_unsupported_features(u64 status) { return 0; }
static inline u64 sev_get_status(void) { return 0; } static inline u64 sev_get_status(void) { return 0; }
static inline void kdump_sev_callback(void) { }
static inline void sev_show_status(void) { } static inline void sev_show_status(void) { }
#endif #endif
...@@ -270,6 +268,7 @@ int psmash(u64 pfn); ...@@ -270,6 +268,7 @@ int psmash(u64 pfn);
int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable); int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 asid, bool immutable);
int rmp_make_shared(u64 pfn, enum pg_level level); int rmp_make_shared(u64 pfn, enum pg_level level);
void snp_leak_pages(u64 pfn, unsigned int npages); void snp_leak_pages(u64 pfn, unsigned int npages);
void kdump_sev_callback(void);
#else #else
static inline bool snp_probe_rmptable_info(void) { return false; } static inline bool snp_probe_rmptable_info(void) { return false; }
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
...@@ -282,6 +281,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as ...@@ -282,6 +281,7 @@ static inline int rmp_make_private(u64 pfn, u64 gpa, enum pg_level level, u32 as
} }
static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; } static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV; }
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {} static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
static inline void kdump_sev_callback(void) { }
#endif #endif
#endif #endif
...@@ -345,6 +345,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c) ...@@ -345,6 +345,28 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
#endif #endif
} }
static void bsp_determine_snp(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
cc_vendor = CC_VENDOR_AMD;
if (cpu_has(c, X86_FEATURE_SEV_SNP)) {
/*
* RMP table entry format is not architectural and is defined by the
* per-processor PPR. Restrict SNP support on the known CPU models
* for which the RMP table entry format is currently defined for.
*/
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
c->x86 >= 0x19 && snp_probe_rmptable_info()) {
cc_platform_set(CC_ATTR_HOST_SEV_SNP);
} else {
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
}
}
#endif
}
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
...@@ -452,21 +474,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) ...@@ -452,21 +474,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break; break;
} }
if (cpu_has(c, X86_FEATURE_SEV_SNP)) { bsp_determine_snp(c);
/*
* RMP table entry format is not architectural and it can vary by processor
* and is defined by the per-processor PPR. Restrict SNP support on the
* known CPU model and family for which the RMP table entry format is
* currently defined for.
*/
if (!boot_cpu_has(X86_FEATURE_ZEN3) &&
!boot_cpu_has(X86_FEATURE_ZEN4) &&
!boot_cpu_has(X86_FEATURE_ZEN5))
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
else if (!snp_probe_rmptable_info())
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP);
}
return; return;
warn: warn:
......
...@@ -2500,12 +2500,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr, ...@@ -2500,12 +2500,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr,
return -EINVAL; return -EINVAL;
b = &per_cpu(mce_banks_array, s->id)[bank]; b = &per_cpu(mce_banks_array, s->id)[bank];
if (!b->init) if (!b->init)
return -ENODEV; return -ENODEV;
b->ctl = new; b->ctl = new;
mutex_lock(&mce_sysfs_mutex);
mce_restart(); mce_restart();
mutex_unlock(&mce_sysfs_mutex);
return size; return size;
} }
......
...@@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void) ...@@ -108,7 +108,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f))) (boot_cpu_data.x86 >= 0x0f)))
return; return;
if (cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return; return;
rdmsr(MSR_AMD64_SYSCFG, lo, hi); rdmsr(MSR_AMD64_SYSCFG, lo, hi);
......
...@@ -78,7 +78,8 @@ cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) ...@@ -78,7 +78,8 @@ cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu)
else else
cpu = cpumask_any_but(mask, exclude_cpu); cpu = cpumask_any_but(mask, exclude_cpu);
if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) /* Only continue if tick_nohz_full_mask has been initialized. */
if (!tick_nohz_full_enabled())
return cpu; return cpu;
/* If the CPU picked isn't marked nohz_full nothing more needs doing. */ /* If the CPU picked isn't marked nohz_full nothing more needs doing. */
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/bios_ebda.h> #include <asm/bios_ebda.h>
#include <asm/bugs.h> #include <asm/bugs.h>
#include <asm/cacheinfo.h> #include <asm/cacheinfo.h>
#include <asm/coco.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/efi.h> #include <asm/efi.h>
#include <asm/gart.h> #include <asm/gart.h>
...@@ -991,6 +992,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -991,6 +992,7 @@ void __init setup_arch(char **cmdline_p)
* memory size. * memory size.
*/ */
mem_encrypt_setup_arch(); mem_encrypt_setup_arch();
cc_random_init();
efi_fake_memmap(); efi_fake_memmap();
efi_find_mirror(); efi_find_mirror();
......
...@@ -2284,16 +2284,6 @@ static int __init snp_init_platform_device(void) ...@@ -2284,16 +2284,6 @@ static int __init snp_init_platform_device(void)
} }
device_initcall(snp_init_platform_device); device_initcall(snp_init_platform_device);
void kdump_sev_callback(void)
{
/*
* Do wbinvd() on remote CPUs when SNP is enabled in order to
* safely do SNP_SHUTDOWN on the local CPU.
*/
if (cpu_feature_enabled(X86_FEATURE_SEV_SNP))
wbinvd();
}
void sev_show_status(void) void sev_show_status(void)
{ {
int i; int i;
......
...@@ -122,6 +122,7 @@ config KVM_AMD_SEV ...@@ -122,6 +122,7 @@ config KVM_AMD_SEV
default y default y
depends on KVM_AMD && X86_64 depends on KVM_AMD && X86_64
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m) depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
select ARCH_HAS_CC_PLATFORM
help help
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
with Encrypted State (SEV-ES) on AMD processors. with Encrypted State (SEV-ES) on AMD processors.
......
...@@ -3184,7 +3184,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) ...@@ -3184,7 +3184,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
unsigned long pfn; unsigned long pfn;
struct page *p; struct page *p;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
/* /*
......
...@@ -229,6 +229,7 @@ SYM_CODE_END(srso_return_thunk) ...@@ -229,6 +229,7 @@ SYM_CODE_END(srso_return_thunk)
/* Dummy for the alternative in CALL_UNTRAIN_RET. */ /* Dummy for the alternative in CALL_UNTRAIN_RET. */
SYM_CODE_START(srso_alias_untrain_ret) SYM_CODE_START(srso_alias_untrain_ret)
ANNOTATE_UNRET_SAFE ANNOTATE_UNRET_SAFE
ANNOTATE_NOENDBR
ret ret
int3 int3
SYM_FUNC_END(srso_alias_untrain_ret) SYM_FUNC_END(srso_alias_untrain_ret)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/init.h> #include <linux/init.h>
#include <asm/pgtable_areas.h>
#include "numa_internal.h" #include "numa_internal.h"
......
...@@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu) ...@@ -77,7 +77,7 @@ static int __mfd_enable(unsigned int cpu)
{ {
u64 val; u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0; return 0;
rdmsrl(MSR_AMD64_SYSCFG, val); rdmsrl(MSR_AMD64_SYSCFG, val);
...@@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu) ...@@ -98,7 +98,7 @@ static int __snp_enable(unsigned int cpu)
{ {
u64 val; u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0; return 0;
rdmsrl(MSR_AMD64_SYSCFG, val); rdmsrl(MSR_AMD64_SYSCFG, val);
...@@ -174,11 +174,11 @@ static int __init snp_rmptable_init(void) ...@@ -174,11 +174,11 @@ static int __init snp_rmptable_init(void)
u64 rmptable_size; u64 rmptable_size;
u64 val; u64 val;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return 0; return 0;
if (!amd_iommu_snp_en) if (!amd_iommu_snp_en)
return 0; goto nosnp;
if (!probed_rmp_size) if (!probed_rmp_size)
goto nosnp; goto nosnp;
...@@ -225,7 +225,7 @@ static int __init snp_rmptable_init(void) ...@@ -225,7 +225,7 @@ static int __init snp_rmptable_init(void)
return 0; return 0;
nosnp: nosnp:
setup_clear_cpu_cap(X86_FEATURE_SEV_SNP); cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return -ENOSYS; return -ENOSYS;
} }
...@@ -246,7 +246,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level) ...@@ -246,7 +246,7 @@ static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
{ {
struct rmpentry *large_entry, *entry; struct rmpentry *large_entry, *entry;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
entry = get_rmpentry(pfn); entry = get_rmpentry(pfn);
...@@ -363,7 +363,7 @@ int psmash(u64 pfn) ...@@ -363,7 +363,7 @@ int psmash(u64 pfn)
unsigned long paddr = pfn << PAGE_SHIFT; unsigned long paddr = pfn << PAGE_SHIFT;
int ret; int ret;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV; return -ENODEV;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
...@@ -472,7 +472,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state) ...@@ -472,7 +472,7 @@ static int rmpupdate(u64 pfn, struct rmp_state *state)
unsigned long paddr = pfn << PAGE_SHIFT; unsigned long paddr = pfn << PAGE_SHIFT;
int ret, level; int ret, level;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV; return -ENODEV;
level = RMP_TO_PG_LEVEL(state->pagesize); level = RMP_TO_PG_LEVEL(state->pagesize);
...@@ -558,3 +558,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages) ...@@ -558,3 +558,13 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
spin_unlock(&snp_leaked_pages_list_lock); spin_unlock(&snp_leaked_pages_list_lock);
} }
EXPORT_SYMBOL_GPL(snp_leak_pages); EXPORT_SYMBOL_GPL(snp_leak_pages);
void kdump_sev_callback(void)
{
/*
* Do wbinvd() on remote CPUs when SNP is enabled in order to
* safely do SNP_SHUTDOWN on the local CPU.
*/
if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
wbinvd();
}
...@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error) ...@@ -1090,7 +1090,7 @@ static int __sev_snp_init_locked(int *error)
void *arg = &data; void *arg = &data;
int cmd, rc = 0; int cmd, rc = 0;
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV; return -ENODEV;
sev = psp->sev_data; sev = psp->sev_data;
......
...@@ -3228,7 +3228,7 @@ static bool __init detect_ivrs(void) ...@@ -3228,7 +3228,7 @@ static bool __init detect_ivrs(void)
static void iommu_snp_enable(void) static void iommu_snp_enable(void)
{ {
#ifdef CONFIG_KVM_AMD_SEV #ifdef CONFIG_KVM_AMD_SEV
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return; return;
/* /*
* The SNP support requires that IOMMU must be enabled, and is * The SNP support requires that IOMMU must be enabled, and is
...@@ -3236,12 +3236,14 @@ static void iommu_snp_enable(void) ...@@ -3236,12 +3236,14 @@ static void iommu_snp_enable(void)
*/ */
if (no_iommu || iommu_default_passthrough()) { if (no_iommu || iommu_default_passthrough()) {
pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return; return;
} }
amd_iommu_snp_en = check_feature(FEATURE_SNP); amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) { if (!amd_iommu_snp_en) {
pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
return; return;
} }
......
...@@ -90,6 +90,14 @@ enum cc_attr { ...@@ -90,6 +90,14 @@ enum cc_attr {
* Examples include TDX Guest. * Examples include TDX Guest.
*/ */
CC_ATTR_HOTPLUG_DISABLED, CC_ATTR_HOTPLUG_DISABLED,
/**
* @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
*
* The host kernel is running with the necessary features
* enabled to run SEV-SNP guests.
*/
CC_ATTR_HOST_SEV_SNP,
}; };
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM #ifdef CONFIG_ARCH_HAS_CC_PLATFORM
...@@ -107,10 +115,14 @@ enum cc_attr { ...@@ -107,10 +115,14 @@ enum cc_attr {
* * FALSE - Specified Confidential Computing attribute is not active * * FALSE - Specified Confidential Computing attribute is not active
*/ */
bool cc_platform_has(enum cc_attr attr); bool cc_platform_has(enum cc_attr attr);
void cc_platform_set(enum cc_attr attr);
void cc_platform_clear(enum cc_attr attr);
#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */ #else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
static inline bool cc_platform_has(enum cc_attr attr) { return false; } static inline bool cc_platform_has(enum cc_attr attr) { return false; }
static inline void cc_platform_set(enum cc_attr attr) { }
static inline void cc_platform_clear(enum cc_attr attr) { }
#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */ #endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment