Commit 2da68a77 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_sgx_for_6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 sgx updates from Dave Hansen:
 "The biggest deal in this series is support for a new hardware feature
  that allows enclaves to detect and mitigate single-stepping attacks.

  There's also a minor performance tweak and a little piece of the
  kmap_atomic() -> kmap_local() transition.

  Summary:

   - Introduce a new SGX feature (Asynchrounous Exit Notification) for
     bare-metal enclaves and KVM guests to mitigate single-step attacks

   - Increase batching to speed up enclave release

   - Replace kmap/kunmap_atomic() calls"

* tag 'x86_sgx_for_6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/sgx: Replace kmap/kunmap_atomic() calls
  KVM/VMX: Allow exposing EDECCSSA user leaf function to KVM guest
  x86/sgx: Allow enclaves to use Asynchrounous Exit Notification
  x86/sgx: Reduce delay and interference of enclave release
parents c1f0fcd8 89e927bb
...@@ -304,6 +304,7 @@ ...@@ -304,6 +304,7 @@
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */ #define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */ #define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ #define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
......
...@@ -115,17 +115,36 @@ enum sgx_miscselect { ...@@ -115,17 +115,36 @@ enum sgx_miscselect {
* %SGX_ATTR_EINITTOKENKEY: Allow to use token signing key that is used to * %SGX_ATTR_EINITTOKENKEY: Allow to use token signing key that is used to
* sign cryptographic tokens that can be passed to * sign cryptographic tokens that can be passed to
* EINIT as an authorization to run an enclave. * EINIT as an authorization to run an enclave.
* %SGX_ATTR_ASYNC_EXIT_NOTIFY: Allow enclaves to be notified after an
* asynchronous exit has occurred.
*/ */
enum sgx_attribute { enum sgx_attribute {
SGX_ATTR_INIT = BIT(0), SGX_ATTR_INIT = BIT(0),
SGX_ATTR_DEBUG = BIT(1), SGX_ATTR_DEBUG = BIT(1),
SGX_ATTR_MODE64BIT = BIT(2), SGX_ATTR_MODE64BIT = BIT(2),
/* BIT(3) is reserved */
SGX_ATTR_PROVISIONKEY = BIT(4), SGX_ATTR_PROVISIONKEY = BIT(4),
SGX_ATTR_EINITTOKENKEY = BIT(5), SGX_ATTR_EINITTOKENKEY = BIT(5),
/* BIT(6) is for CET */
SGX_ATTR_KSS = BIT(7), SGX_ATTR_KSS = BIT(7),
/* BIT(8) is reserved */
/* BIT(9) is reserved */
SGX_ATTR_ASYNC_EXIT_NOTIFY = BIT(10),
}; };
#define SGX_ATTR_RESERVED_MASK (BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8)) #define SGX_ATTR_RESERVED_MASK (BIT_ULL(3) | \
BIT_ULL(6) | \
BIT_ULL(8) | \
BIT_ULL(9) | \
GENMASK_ULL(63, 11))
#define SGX_ATTR_UNPRIV_MASK (SGX_ATTR_DEBUG | \
SGX_ATTR_MODE64BIT | \
SGX_ATTR_KSS | \
SGX_ATTR_ASYNC_EXIT_NOTIFY)
#define SGX_ATTR_PRIV_MASK (SGX_ATTR_PROVISIONKEY | \
SGX_ATTR_EINITTOKENKEY)
/** /**
* struct sgx_secs - SGX Enclave Control Structure (SECS) * struct sgx_secs - SGX Enclave Control Structure (SECS)
......
...@@ -75,6 +75,7 @@ static const struct cpuid_dep cpuid_deps[] = { ...@@ -75,6 +75,7 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_SGX_LC, X86_FEATURE_SGX }, { X86_FEATURE_SGX_LC, X86_FEATURE_SGX },
{ X86_FEATURE_SGX1, X86_FEATURE_SGX }, { X86_FEATURE_SGX1, X86_FEATURE_SGX },
{ X86_FEATURE_SGX2, X86_FEATURE_SGX1 }, { X86_FEATURE_SGX2, X86_FEATURE_SGX1 },
{ X86_FEATURE_SGX_EDECCSSA, X86_FEATURE_SGX1 },
{ X86_FEATURE_XFD, X86_FEATURE_XSAVES }, { X86_FEATURE_XFD, X86_FEATURE_XSAVES },
{ X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 },
{ X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD },
......
...@@ -40,6 +40,7 @@ static const struct cpuid_bit cpuid_bits[] = { ...@@ -40,6 +40,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 }, { X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 },
{ X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 }, { X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 },
{ X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 }, { X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 },
{ X86_FEATURE_SGX_EDECCSSA, CPUID_EAX, 11, 0x00000012, 0 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
......
...@@ -160,8 +160,8 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, ...@@ -160,8 +160,8 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
return ret; return ret;
pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.addr = encl_page->desc & PAGE_MASK;
pginfo.contents = (unsigned long)kmap_atomic(b.contents); pginfo.contents = (unsigned long)kmap_local_page(b.contents);
pcmd_page = kmap_atomic(b.pcmd); pcmd_page = kmap_local_page(b.pcmd);
pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset; pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
if (secs_page) if (secs_page)
...@@ -187,8 +187,8 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, ...@@ -187,8 +187,8 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
*/ */
pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE); pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
kunmap_atomic(pcmd_page); kunmap_local(pcmd_page);
kunmap_atomic((void *)(unsigned long)pginfo.contents); kunmap_local((void *)(unsigned long)pginfo.contents);
get_page(b.pcmd); get_page(b.pcmd);
sgx_encl_put_backing(&b); sgx_encl_put_backing(&b);
...@@ -197,10 +197,10 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, ...@@ -197,10 +197,10 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) { if (pcmd_page_empty && !reclaimer_writing_to_pcmd(encl, pcmd_first_page)) {
sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off)); sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
pcmd_page = kmap_atomic(b.pcmd); pcmd_page = kmap_local_page(b.pcmd);
if (memchr_inv(pcmd_page, 0, PAGE_SIZE)) if (memchr_inv(pcmd_page, 0, PAGE_SIZE))
pr_warn("PCMD page not empty after truncate.\n"); pr_warn("PCMD page not empty after truncate.\n");
kunmap_atomic(pcmd_page); kunmap_local(pcmd_page);
} }
put_page(b.pcmd); put_page(b.pcmd);
...@@ -680,11 +680,15 @@ const struct vm_operations_struct sgx_vm_ops = { ...@@ -680,11 +680,15 @@ const struct vm_operations_struct sgx_vm_ops = {
void sgx_encl_release(struct kref *ref) void sgx_encl_release(struct kref *ref)
{ {
struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1);
struct sgx_va_page *va_page; struct sgx_va_page *va_page;
struct sgx_encl_page *entry; struct sgx_encl_page *entry;
unsigned long index; unsigned long count = 0;
XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));
xa_for_each(&encl->page_array, index, entry) { xas_lock(&xas);
xas_for_each(&xas, entry, max_page_index) {
if (entry->epc_page) { if (entry->epc_page) {
/* /*
* The page and its radix tree entry cannot be freed * The page and its radix tree entry cannot be freed
...@@ -699,9 +703,20 @@ void sgx_encl_release(struct kref *ref) ...@@ -699,9 +703,20 @@ void sgx_encl_release(struct kref *ref)
} }
kfree(entry); kfree(entry);
/* Invoke scheduler to prevent soft lockups. */ /*
* Invoke scheduler on every XA_CHECK_SCHED iteration
* to prevent soft lockups.
*/
if (!(++count % XA_CHECK_SCHED)) {
xas_pause(&xas);
xas_unlock(&xas);
cond_resched(); cond_resched();
xas_lock(&xas);
}
} }
xas_unlock(&xas);
xa_destroy(&encl->page_array); xa_destroy(&encl->page_array);
......
...@@ -111,7 +111,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) ...@@ -111,7 +111,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
encl->base = secs->base; encl->base = secs->base;
encl->size = secs->size; encl->size = secs->size;
encl->attributes = secs->attributes; encl->attributes = secs->attributes;
encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS; encl->attributes_mask = SGX_ATTR_UNPRIV_MASK;
/* Set only after completion, as encl->lock has not been taken. */ /* Set only after completion, as encl->lock has not been taken. */
set_bit(SGX_ENCL_CREATED, &encl->flags); set_bit(SGX_ENCL_CREATED, &encl->flags);
...@@ -221,11 +221,11 @@ static int __sgx_encl_add_page(struct sgx_encl *encl, ...@@ -221,11 +221,11 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page);
pginfo.addr = encl_page->desc & PAGE_MASK; pginfo.addr = encl_page->desc & PAGE_MASK;
pginfo.metadata = (unsigned long)secinfo; pginfo.metadata = (unsigned long)secinfo;
pginfo.contents = (unsigned long)kmap_atomic(src_page); pginfo.contents = (unsigned long)kmap_local_page(src_page);
ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page)); ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page));
kunmap_atomic((void *)pginfo.contents); kunmap_local((void *)pginfo.contents);
put_page(src_page); put_page(src_page);
return ret ? -EIO : 0; return ret ? -EIO : 0;
......
...@@ -165,17 +165,17 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, ...@@ -165,17 +165,17 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
pginfo.addr = 0; pginfo.addr = 0;
pginfo.secs = 0; pginfo.secs = 0;
pginfo.contents = (unsigned long)kmap_atomic(backing->contents); pginfo.contents = (unsigned long)kmap_local_page(backing->contents);
pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) + pginfo.metadata = (unsigned long)kmap_local_page(backing->pcmd) +
backing->pcmd_offset; backing->pcmd_offset;
ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
set_page_dirty(backing->pcmd); set_page_dirty(backing->pcmd);
set_page_dirty(backing->contents); set_page_dirty(backing->contents);
kunmap_atomic((void *)(unsigned long)(pginfo.metadata - kunmap_local((void *)(unsigned long)(pginfo.metadata -
backing->pcmd_offset)); backing->pcmd_offset));
kunmap_atomic((void *)(unsigned long)pginfo.contents); kunmap_local((void *)(unsigned long)pginfo.contents);
return ret; return ret;
} }
......
...@@ -665,7 +665,7 @@ void kvm_set_cpu_caps(void) ...@@ -665,7 +665,7 @@ void kvm_set_cpu_caps(void)
); );
kvm_cpu_cap_init_scattered(CPUID_12_EAX, kvm_cpu_cap_init_scattered(CPUID_12_EAX,
SF(SGX1) | SF(SGX2) SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
); );
kvm_cpu_cap_mask(CPUID_8000_0001_ECX, kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
...@@ -1047,9 +1047,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -1047,9 +1047,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
* userspace. ATTRIBUTES.XFRM is not adjusted as userspace is * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
* expected to derive it from supported XCR0. * expected to derive it from supported XCR0.
*/ */
entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK;
SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY |
SGX_ATTR_KSS;
entry->ebx &= 0; entry->ebx &= 0;
break; break;
/* Intel PT */ /* Intel PT */
......
...@@ -23,6 +23,7 @@ enum kvm_only_cpuid_leafs { ...@@ -23,6 +23,7 @@ enum kvm_only_cpuid_leafs {
/* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */ /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
#define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0) #define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0)
#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1) #define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
#define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11)
struct cpuid_reg { struct cpuid_reg {
u32 function; u32 function;
...@@ -78,6 +79,8 @@ static __always_inline u32 __feature_translate(int x86_feature) ...@@ -78,6 +79,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
return KVM_X86_FEATURE_SGX1; return KVM_X86_FEATURE_SGX1;
else if (x86_feature == X86_FEATURE_SGX2) else if (x86_feature == X86_FEATURE_SGX2)
return KVM_X86_FEATURE_SGX2; return KVM_X86_FEATURE_SGX2;
else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
return KVM_X86_FEATURE_SGX_EDECCSSA;
return x86_feature; return x86_feature;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment