Commit 07aa1e78 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/mem-encrypt' into next

This branch has some cross-arch patches that are a prequisite for the
SVM work. They're in a topic branch in case any of the other arch
maintainers want to merge them to resolve conflicts.
parents bc605cd7 5cbdaeef
...@@ -925,6 +925,9 @@ config LOCK_EVENT_COUNTS ...@@ -925,6 +925,9 @@ config LOCK_EVENT_COUNTS
the chance of application behavior change because of timing the chance of application behavior change because of timing
differences. The counts are reported via debugfs. differences. The counts are reported via debugfs.
config ARCH_HAS_MEM_ENCRYPT
bool
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig" source "scripts/gcc-plugins/Kconfig"
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
config ARCH_HAS_MEM_ENCRYPT
def_bool y
config MMU config MMU
def_bool y def_bool y
...@@ -68,6 +65,7 @@ config S390 ...@@ -68,6 +65,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV select ARCH_HAS_KCOV
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_KERNEL_RWX
......
...@@ -4,10 +4,7 @@ ...@@ -4,10 +4,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define sme_me_mask 0ULL static inline bool mem_encrypt_active(void) { return false; }
static inline bool sme_active(void) { return false; }
extern bool sev_active(void);
int set_memory_encrypted(unsigned long addr, int numpages); int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages); int set_memory_decrypted(unsigned long addr, int numpages);
......
...@@ -156,14 +156,9 @@ int set_memory_decrypted(unsigned long addr, int numpages) ...@@ -156,14 +156,9 @@ int set_memory_decrypted(unsigned long addr, int numpages)
} }
/* are we a protected virtualization guest? */ /* are we a protected virtualization guest? */
bool sev_active(void)
{
return is_prot_virt_guest();
}
bool force_dma_unencrypted(struct device *dev) bool force_dma_unencrypted(struct device *dev)
{ {
return sev_active(); return is_prot_virt_guest();
} }
/* protected virtualization */ /* protected virtualization */
......
...@@ -68,6 +68,7 @@ config X86 ...@@ -68,6 +68,7 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64 select ARCH_HAS_PTE_DEVMAP if X86_64
...@@ -1518,9 +1519,6 @@ config X86_CPA_STATISTICS ...@@ -1518,9 +1519,6 @@ config X86_CPA_STATISTICS
helps to determine the effectiveness of preserving large and huge helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed. page mappings when mapping protections are changed.
config ARCH_HAS_MEM_ENCRYPT
def_bool y
config AMD_MEM_ENCRYPT config AMD_MEM_ENCRYPT
bool "AMD Secure Memory Encryption (SME) support" bool "AMD Secure Memory Encryption (SME) support"
depends on X86_64 && CPU_SUP_AMD depends on X86_64 && CPU_SUP_AMD
......
...@@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; ...@@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}
static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */ #endif /* __X86_MEM_ENCRYPT_H__ */
...@@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, ...@@ -70,3 +70,8 @@ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
{ {
return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true);
} }
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
return read_from_oldmem(buf, count, ppos, 0, sev_active());
}
...@@ -344,13 +344,11 @@ bool sme_active(void) ...@@ -344,13 +344,11 @@ bool sme_active(void)
{ {
return sme_me_mask && !sev_enabled; return sme_me_mask && !sev_enabled;
} }
EXPORT_SYMBOL(sme_active);
bool sev_active(void) bool sev_active(void)
{ {
return sme_me_mask && sev_enabled; return sme_me_mask && sev_enabled;
} }
EXPORT_SYMBOL(sev_active);
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
bool force_dma_unencrypted(struct device *dev) bool force_dma_unencrypted(struct device *dev)
......
...@@ -104,7 +104,7 @@ static int pfn_is_ram(unsigned long pfn) ...@@ -104,7 +104,7 @@ static int pfn_is_ram(unsigned long pfn)
} }
/* Reads a page from the oldmem device from given offset. */ /* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count, ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf, u64 *ppos, int userbuf,
bool encrypted) bool encrypted)
{ {
...@@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr) ...@@ -170,7 +170,7 @@ void __weak elfcorehdr_free(unsigned long long addr)
*/ */
ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos) ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{ {
return read_from_oldmem(buf, count, ppos, 0, sev_active()); return read_from_oldmem(buf, count, ppos, 0, false);
} }
/* /*
......
...@@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data) ...@@ -115,4 +115,18 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#ifdef CONFIG_PROC_VMCORE
ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted);
#else
static inline ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf,
bool encrypted)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_PROC_VMCORE */
#endif /* LINUX_CRASHDUMP_H */ #endif /* LINUX_CRASHDUMP_H */
...@@ -18,23 +18,10 @@ ...@@ -18,23 +18,10 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */ #else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
#define sme_me_mask 0ULL static inline bool mem_encrypt_active(void) { return false; }
static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
static inline bool mem_encrypt_active(void)
{
return sme_me_mask;
}
static inline u64 sme_get_me_mask(void)
{
return sme_me_mask;
}
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
/* /*
* The __sme_set() and __sme_clr() macros are useful for adding or removing * The __sme_set() and __sme_clr() macros are useful for adding or removing
......
...@@ -291,12 +291,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -291,12 +291,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
EXPORT_SYMBOL(dma_free_attrs); EXPORT_SYMBOL(dma_free_attrs);
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
}
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
...@@ -327,7 +321,6 @@ int dma_set_mask(struct device *dev, u64 mask) ...@@ -327,7 +321,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return -EIO; return -EIO;
arch_dma_set_mask(dev, mask); arch_dma_set_mask(dev, mask);
dma_check_mask(dev, mask);
*dev->dma_mask = mask; *dev->dma_mask = mask;
return 0; return 0;
} }
...@@ -345,7 +338,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) ...@@ -345,7 +338,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
if (!dma_supported(dev, mask)) if (!dma_supported(dev, mask))
return -EIO; return -EIO;
dma_check_mask(dev, mask);
dev->coherent_dma_mask = mask; dev->coherent_dma_mask = mask;
return 0; return 0;
} }
......
...@@ -461,8 +461,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, ...@@ -461,8 +461,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active()) if (mem_encrypt_active())
pr_warn_once("%s is active and system is using DMA bounce buffers\n", pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
sme_active() ? "SME" : "SEV");
mask = dma_get_seg_boundary(hwdev); mask = dma_get_seg_boundary(hwdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment