Commit 0459ff48 authored by Michael Kelley's avatar Michael Kelley Committed by Wei Liu

swiotlb: Remove bounce buffer remapping for Hyper-V

With changes to how Hyper-V guest VMs flip memory between private
(encrypted) and shared (decrypted), creating a second kernel virtual
mapping for shared memory is no longer necessary. Everything needed
for the transition to shared is handled by set_memory_decrypted().

As such, remove swiotlb_unencrypted_base and the associated
code.
Signed-off-by: default avatarMichael Kelley <mikelley@microsoft.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/1679838727-87310-8-git-send-email-mikelley@microsoft.comSigned-off-by: default avatarWei Liu <wei.liu@kernel.org>
parent 21eb596f
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/i8253.h> #include <linux/i8253.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/swiotlb.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h> #include <asm/hyperv-tlfs.h>
...@@ -408,12 +407,8 @@ static void __init ms_hyperv_init_platform(void) ...@@ -408,12 +407,8 @@ static void __init ms_hyperv_init_platform(void)
pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n", pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b); ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) { if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP)
static_branch_enable(&isolation_type_snp); static_branch_enable(&isolation_type_snp);
#ifdef CONFIG_SWIOTLB
swiotlb_unencrypted_base = ms_hyperv.shared_gpa_boundary;
#endif
}
} }
if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) { if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
......
...@@ -180,6 +180,4 @@ static inline bool is_swiotlb_for_alloc(struct device *dev) ...@@ -180,6 +180,4 @@ static inline bool is_swiotlb_for_alloc(struct device *dev)
} }
#endif /* CONFIG_DMA_RESTRICTED_POOL */ #endif /* CONFIG_DMA_RESTRICTED_POOL */
extern phys_addr_t swiotlb_unencrypted_base;
#endif /* __LINUX_SWIOTLB_H */ #endif /* __LINUX_SWIOTLB_H */
...@@ -73,8 +73,6 @@ static bool swiotlb_force_disable; ...@@ -73,8 +73,6 @@ static bool swiotlb_force_disable;
struct io_tlb_mem io_tlb_default_mem; struct io_tlb_mem io_tlb_default_mem;
phys_addr_t swiotlb_unencrypted_base;
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
static unsigned long default_nareas; static unsigned long default_nareas;
...@@ -201,34 +199,6 @@ static inline unsigned long nr_slots(u64 val) ...@@ -201,34 +199,6 @@ static inline unsigned long nr_slots(u64 val)
return DIV_ROUND_UP(val, IO_TLB_SIZE); return DIV_ROUND_UP(val, IO_TLB_SIZE);
} }
/*
* Remap swioltb memory in the unencrypted physical address space
* when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
* Isolation VMs).
*/
#ifdef CONFIG_HAS_IOMEM
static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
{
void *vaddr = NULL;
if (swiotlb_unencrypted_base) {
phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
vaddr = memremap(paddr, bytes, MEMREMAP_WB);
if (!vaddr)
pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
&paddr, bytes);
}
return vaddr;
}
#else
static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
{
return NULL;
}
#endif
/* /*
* Early SWIOTLB allocation may be too early to allow an architecture to * Early SWIOTLB allocation may be too early to allow an architecture to
* perform the desired operations. This function allows the architecture to * perform the desired operations. This function allows the architecture to
...@@ -238,18 +208,12 @@ static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) ...@@ -238,18 +208,12 @@ static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
void __init swiotlb_update_mem_attributes(void) void __init swiotlb_update_mem_attributes(void)
{ {
struct io_tlb_mem *mem = &io_tlb_default_mem; struct io_tlb_mem *mem = &io_tlb_default_mem;
void *vaddr;
unsigned long bytes; unsigned long bytes;
if (!mem->nslabs || mem->late_alloc) if (!mem->nslabs || mem->late_alloc)
return; return;
vaddr = phys_to_virt(mem->start);
bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
mem->vaddr = swiotlb_mem_remap(mem, bytes);
if (!mem->vaddr)
mem->vaddr = vaddr;
} }
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
...@@ -280,13 +244,6 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, ...@@ -280,13 +244,6 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
mem->slots[i].alloc_size = 0; mem->slots[i].alloc_size = 0;
} }
/*
* If swiotlb_unencrypted_base is set, the bounce buffer memory will
* be remapped and cleared in swiotlb_update_mem_attributes.
*/
if (swiotlb_unencrypted_base)
return;
memset(vaddr, 0, bytes); memset(vaddr, 0, bytes);
mem->vaddr = vaddr; mem->vaddr = vaddr;
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment