Commit 6e74b125 authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

x86/sev: Move sev_setup_arch() to mem_encrypt.c

Since commit:

  4d96f910 ("x86/sev: Replace occurrences of sev_active() with cc_platform_has()")

... the SWIOTLB bounce buffer size adjustment and restricted virtio memory
setting also inadvertently apply to TDX: the code is using
cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) as a gatekeeping condition,
which is also true for TDX, and this is also what we want.

To reflect this, move the corresponding code to generic mem_encrypt.c.

No functional changes intended.
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Reviewed-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20231010145220.3960055-2-alexander.shishkin@linux.intel.com
parent c9babd5d
......@@ -19,8 +19,10 @@
#ifdef CONFIG_X86_MEM_ENCRYPT
void __init mem_encrypt_init(void);
void __init mem_encrypt_setup_arch(void);
#else
static inline void mem_encrypt_init(void) { }
static inline void __init mem_encrypt_setup_arch(void) { }
#endif
#ifdef CONFIG_AMD_MEM_ENCRYPT
......@@ -43,7 +45,6 @@ void __init sme_map_bootdata(char *real_mode_data);
void __init sme_unmap_bootdata(char *real_mode_data);
void __init sme_early_init(void);
void __init sev_setup_arch(void);
void __init sme_encrypt_kernel(struct boot_params *bp);
void __init sme_enable(struct boot_params *bp);
......@@ -73,7 +74,6 @@ static inline void __init sme_map_bootdata(char *real_mode_data) { }
static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
static inline void __init sme_early_init(void) { }
static inline void __init sev_setup_arch(void) { }
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
......
......@@ -1124,7 +1124,7 @@ void __init setup_arch(char **cmdline_p)
* Needs to run after memblock setup because it needs the physical
* memory size.
*/
sev_setup_arch();
mem_encrypt_setup_arch();
efi_fake_memmap();
efi_find_mirror();
......
......@@ -12,6 +12,7 @@
#include <linux/swiotlb.h>
#include <linux/cc_platform.h>
#include <linux/mem_encrypt.h>
#include <linux/virtio_anchor.h>
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev)
......@@ -86,3 +87,36 @@ void __init mem_encrypt_init(void)
print_mem_encrypt_feature_info();
}
void __init mem_encrypt_setup_arch(void)
{
phys_addr_t total_mem = memblock_phys_mem_size();
unsigned long size;
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return;
/*
* For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
* Kernel uses SWIOTLB to make this happen without changing device
* drivers. However, depending on the workload being run, the
* default 64MB of SWIOTLB may not be enough and SWIOTLB may
* run out of buffers for DMA, resulting in I/O errors and/or
* performance degradation especially with high I/O workloads.
*
* Adjust the default size of SWIOTLB using a percentage of guest
* memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
* memory is allocated from low memory, ensure that the adjusted size
* is within the limits of low available memory.
*
* The percentage of guest memory used here for SWIOTLB buffers
* is more of an approximation of the static adjustment which
* 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
*/
size = total_mem * 6 / 100;
size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
swiotlb_adjust_size(size);
/* Set restricted memory access for virtio. */
virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
}
......@@ -20,7 +20,6 @@
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/virtio_config.h>
#include <linux/virtio_anchor.h>
#include <linux/cc_platform.h>
#include <asm/tlbflush.h>
......@@ -215,40 +214,6 @@ void __init sme_map_bootdata(char *real_mode_data)
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}
void __init sev_setup_arch(void)
{
phys_addr_t total_mem = memblock_phys_mem_size();
unsigned long size;
if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
return;
/*
* For SEV, all DMA has to occur via shared/unencrypted pages.
* SEV uses SWIOTLB to make this happen without changing device
* drivers. However, depending on the workload being run, the
* default 64MB of SWIOTLB may not be enough and SWIOTLB may
* run out of buffers for DMA, resulting in I/O errors and/or
* performance degradation especially with high I/O workloads.
*
* Adjust the default size of SWIOTLB for SEV guests using
* a percentage of guest memory for SWIOTLB buffers.
* Also, as the SWIOTLB bounce buffer memory is allocated
* from low memory, ensure that the adjusted size is within
* the limits of low available memory.
*
* The percentage of guest memory used here for SWIOTLB buffers
* is more of an approximation of the static adjustment which
* 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
*/
size = total_mem * 6 / 100;
size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
swiotlb_adjust_size(size);
/* Set restricted memory access for virtio. */
virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
}
static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
{
unsigned long pfn = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment