Commit a1375562 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_urgent_for_v6.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Dave Hansen:

 - A performance fix for recent large AMD systems that avoids an ancient
   cpu idle hardware workaround

 - A new Intel model number. Folks like these upstream as soon as
   possible so that each developer doing feature development doesn't
   need to carry their own #define

 - SGX fixes for a userspace crash and a rare kernel warning

* tag 'x86_urgent_for_v6.0-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  ACPI: processor idle: Practically limit "Dummy wait" workaround to old Intel systems
  x86/sgx: Handle VA page allocation failure for EAUG on PF.
  x86/sgx: Do not fail on incomplete sanitization on premature stop of ksgxd
  x86/cpu: Add CPU model numbers for Meteor Lake
parents 3800a713 e400ad8b
...@@ -115,6 +115,9 @@ ...@@ -115,6 +115,9 @@
#define INTEL_FAM6_RAPTORLAKE_P 0xBA #define INTEL_FAM6_RAPTORLAKE_P 0xBA
#define INTEL_FAM6_RAPTORLAKE_S 0xBF #define INTEL_FAM6_RAPTORLAKE_S 0xBF
#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_FAM6_METEORLAKE_L 0xAA
/* "Small Core" Processors (Atom) */ /* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
......
...@@ -344,8 +344,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma, ...@@ -344,8 +344,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
} }
va_page = sgx_encl_grow(encl, false); va_page = sgx_encl_grow(encl, false);
if (IS_ERR(va_page)) if (IS_ERR(va_page)) {
if (PTR_ERR(va_page) == -EBUSY)
vmret = VM_FAULT_NOPAGE;
goto err_out_epc; goto err_out_epc;
}
if (va_page) if (va_page)
list_add(&va_page->list, &encl->va_pages); list_add(&va_page->list, &encl->va_pages);
......
...@@ -49,9 +49,13 @@ static LIST_HEAD(sgx_dirty_page_list); ...@@ -49,9 +49,13 @@ static LIST_HEAD(sgx_dirty_page_list);
* Reset post-kexec EPC pages to the uninitialized state. The pages are removed * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
* from the input list, and made available for the page allocator. SECS pages * from the input list, and made available for the page allocator. SECS pages
* prepending their children in the input list are left intact. * prepending their children in the input list are left intact.
*
* Return 0 when sanitization was successful or kthread was stopped, and the
* number of unsanitized pages otherwise.
*/ */
static void __sgx_sanitize_pages(struct list_head *dirty_page_list) static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
{ {
unsigned long left_dirty = 0;
struct sgx_epc_page *page; struct sgx_epc_page *page;
LIST_HEAD(dirty); LIST_HEAD(dirty);
int ret; int ret;
...@@ -59,7 +63,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list) ...@@ -59,7 +63,7 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
/* dirty_page_list is thread-local, no need for a lock: */ /* dirty_page_list is thread-local, no need for a lock: */
while (!list_empty(dirty_page_list)) { while (!list_empty(dirty_page_list)) {
if (kthread_should_stop()) if (kthread_should_stop())
return; return 0;
page = list_first_entry(dirty_page_list, struct sgx_epc_page, list); page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
...@@ -92,12 +96,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list) ...@@ -92,12 +96,14 @@ static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
} else { } else {
/* The page is not yet clean - move to the dirty list. */ /* The page is not yet clean - move to the dirty list. */
list_move_tail(&page->list, &dirty); list_move_tail(&page->list, &dirty);
left_dirty++;
} }
cond_resched(); cond_resched();
} }
list_splice(&dirty, dirty_page_list); list_splice(&dirty, dirty_page_list);
return left_dirty;
} }
static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
...@@ -395,10 +401,7 @@ static int ksgxd(void *p) ...@@ -395,10 +401,7 @@ static int ksgxd(void *p)
* required for SECS pages, whose child pages blocked EREMOVE. * required for SECS pages, whose child pages blocked EREMOVE.
*/ */
__sgx_sanitize_pages(&sgx_dirty_page_list); __sgx_sanitize_pages(&sgx_dirty_page_list);
__sgx_sanitize_pages(&sgx_dirty_page_list); WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
/* sanity check: */
WARN_ON(!list_empty(&sgx_dirty_page_list));
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (try_to_freeze()) if (try_to_freeze())
......
...@@ -531,10 +531,27 @@ static void wait_for_freeze(void) ...@@ -531,10 +531,27 @@ static void wait_for_freeze(void)
/* No delay is needed if we are in guest */ /* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return; return;
/*
* Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
* not this code. Assume that any Intel systems using this
* are ancient and may need the dummy wait. This also assumes
* that the motivating chipset issue was Intel-only.
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
#endif #endif
/* Dummy wait op - must do something useless after P_LVL2 read /*
because chipsets cannot guarantee that STPCLK# signal * Dummy wait op - must do something useless after P_LVL2 read
gets asserted in time to freeze execution properly. */ * because chipsets cannot guarantee that STPCLK# signal gets
* asserted in time to freeze execution properly
*
* This workaround has been in place since the original ACPI
* implementation was merged, circa 2002.
*
* If a profile is pointing to this instruction, please first
* consider moving your system to a more modern idle
* mechanism.
*/
inl(acpi_gbl_FADT.xpm_timer_block.address); inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment