Commit 04340249 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'integrity-v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity

Pull integrity updates from Mimi Zohar:
 "Aside from the one EVM cleanup patch, all the other changes are kexec
  related.

  On different architectures different keyrings are used to verify the
  kexec'ed kernel image signature. Here are a number of preparatory
  cleanup patches and the patches themselves for making the keyrings -
  builtin_trusted_keyring, .machine, .secondary_trusted_keyring, and
  .platform - consistent across the different architectures"

* tag 'integrity-v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity:
  kexec, KEYS, s390: Make use of built-in and secondary keyring for signature verification
  arm64: kexec_file: use more system keyrings to verify kernel image signature
  kexec, KEYS: make the code in bzImage64_verify_sig generic
  kexec: clean up arch_kexec_kernel_verify_sig
  kexec: drop weak attribute from functions
  kexec_file: drop weak attribute from functions
  evm: Use IS_ENABLED to initialize .enabled
parents 87fe1adb 88b61b13
......@@ -84,16 +84,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
extern bool crash_is_nosave(unsigned long pfn);
extern void crash_prepare_suspend(void);
extern void crash_post_resume(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#else
static inline bool crash_is_nosave(unsigned long pfn) {return false; }
static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
struct kimage;
#if defined(CONFIG_KEXEC_CORE)
void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1,
unsigned long arg2);
int machine_kexec_post_load(struct kimage *image);
#define machine_kexec_post_load machine_kexec_post_load
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif
#define ARCH_HAS_KIMAGE_ARCH
......@@ -113,9 +127,9 @@ struct kimage_arch {
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_image_ops;
struct kimage;
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
extern int arch_kimage_file_post_load_cleanup(struct kimage *image);
extern int load_other_segments(struct kimage *image,
unsigned long kernel_load_addr, unsigned long kernel_size,
char *initrd, unsigned long initrd_len,
......
......@@ -14,7 +14,6 @@
#include <linux/kexec.h>
#include <linux/pe.h>
#include <linux/string.h>
#include <linux/verification.h>
#include <asm/byteorder.h>
#include <asm/cpufeature.h>
#include <asm/image.h>
......@@ -130,18 +129,10 @@ static void *image_load(struct kimage *image,
return NULL;
}
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
static int image_verify_sig(const char *kernel, unsigned long kernel_len)
{
return verify_pefile_signature(kernel, kernel_len, NULL,
VERIFYING_KEXEC_PE_SIGNATURE);
}
#endif
const struct kexec_file_ops kexec_image_ops = {
.probe = image_probe,
.load = image_load,
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
.verify_sig = image_verify_sig,
.verify_sig = kexec_kernel_verify_pe_sig,
#endif
};
......@@ -98,6 +98,11 @@ void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_co
void kexec_copy_flush(struct kimage *image);
#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS)
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
#endif
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
......@@ -120,6 +125,15 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
#ifdef CONFIG_PPC64
struct kexec_buf;
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len);
#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
int load_crashdump_segments_ppc64(struct kimage *image,
struct kexec_buf *kbuf);
int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
......
......@@ -85,6 +85,17 @@ struct kimage_arch {
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
#ifdef CONFIG_CRASH_DUMP
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
#define crash_free_reserved_phys_range crash_free_reserved_phys_range
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
......@@ -92,5 +103,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
#endif
#endif /*_S390_KEXEC_H */
......@@ -31,6 +31,7 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
struct module_signature *ms;
unsigned long sig_len;
int ret;
/* Skip signature verification when not secure IPLed. */
if (!ipl_secure_flag)
......@@ -65,11 +66,18 @@ int s390_verify_sig(const char *kernel, unsigned long kernel_len)
return -EBADMSG;
}
return verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
ret = verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
ret = verify_pkcs7_signature(kernel, kernel_len,
kernel + kernel_len, sig_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
return ret;
}
#endif /* CONFIG_KEXEC_SIG */
......
......@@ -186,6 +186,12 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres
void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
......@@ -193,6 +199,12 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
void *arch_kexec_kernel_image_load(struct kimage *image);
#define arch_kexec_kernel_image_load arch_kexec_kernel_image_load
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
#endif
#endif
......
......@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/efi.h>
#include <linux/verification.h>
#include <linux/random.h>
#include <asm/bootparam.h>
......@@ -596,28 +595,11 @@ static int bzImage64_cleanup(void *loader_data)
return 0;
}
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
int ret;
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
return ret;
}
#endif
const struct kexec_file_ops kexec_bzImage64_ops = {
.probe = bzImage64_probe,
.load = bzImage64_load,
.cleanup = bzImage64_cleanup,
#ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG
.verify_sig = bzImage64_verify_sig,
.verify_sig = kexec_kernel_verify_pe_sig,
#endif
};
......@@ -19,6 +19,7 @@
#include <asm/io.h>
#include <uapi/linux/kexec.h>
#include <linux/verification.h>
/* Location of a reserved region to hold the crash kernel.
*/
......@@ -188,21 +189,54 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
void *kexec_image_load_default(struct kimage *image);
#ifndef arch_kexec_kernel_image_probe
static inline int
arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
{
return kexec_image_probe_default(image, buf, buf_len);
}
#endif
#ifndef arch_kimage_file_post_load_cleanup
static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
return kexec_image_post_load_cleanup_default(image);
}
#endif
#ifndef arch_kexec_kernel_image_load
static inline void *arch_kexec_kernel_image_load(struct kimage *image)
{
return kexec_image_load_default(image);
}
#endif
/* Architectures may override the below functions */
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len);
void *arch_kexec_kernel_image_load(struct kimage *image);
int arch_kimage_file_post_load_cleanup(struct kimage *image);
#ifdef CONFIG_KEXEC_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len);
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
#endif
#endif
int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
#ifndef arch_kexec_locate_mem_hole
/**
* arch_kexec_locate_mem_hole - Find free memory to place the segments.
* @kbuf: Parameters for the memory search.
*
* On success, kbuf->mem will have the start address of the memory region found.
*
* Return: 0 on success, negative errno on error.
*/
static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
return kexec_locate_mem_hole(kbuf);
}
#endif
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
......@@ -358,7 +392,10 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
int machine_kexec_post_load(struct kimage *image);
#ifndef machine_kexec_post_load
static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
#endif
extern void __crash_kexec(struct pt_regs *);
extern void crash_kexec(struct pt_regs *);
......@@ -391,10 +428,21 @@ extern bool kexec_in_progress;
int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
void arch_kexec_protect_crashkres(void);
void arch_kexec_unprotect_crashkres(void);
#ifndef arch_kexec_protect_crashkres
/*
* Protection mechanism for crashkernel reserved memory after
* the kdump kernel is loaded.
*
* Provide an empty default implementation here -- architecture
* code may override this
*/
static inline void arch_kexec_protect_crashkres(void) { }
#endif
#ifndef arch_kexec_unprotect_crashkres
static inline void arch_kexec_unprotect_crashkres(void) { }
#endif
#ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page)
......@@ -424,6 +472,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
}
#endif
#ifndef crash_free_reserved_phys_range
static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE)
free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
}
#endif
static inline unsigned long virt_to_boot_phys(void *addr)
{
return phys_to_boot_phys(__pa((unsigned long)addr));
......
......@@ -591,11 +591,6 @@ static void kimage_free_extra_pages(struct kimage *image)
}
int __weak machine_kexec_post_load(struct kimage *image)
{
return 0;
}
void kimage_terminate(struct kimage *image)
{
if (*image->entry != 0)
......@@ -1020,15 +1015,6 @@ size_t crash_get_memory_size(void)
return size;
}
void __weak crash_free_reserved_phys_range(unsigned long begin,
unsigned long end)
{
unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE)
free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
}
int crash_shrink_memory(unsigned long new_size)
{
int ret = 0;
......@@ -1225,16 +1211,3 @@ int kernel_kexec(void)
mutex_unlock(&kexec_mutex);
return error;
}
/*
* Protection mechanism for crashkernel reserved memory after
* the kdump kernel is loaded.
*
* Provide an empty default implementation here -- architecture
* code may override this
*/
void __weak arch_kexec_protect_crashkres(void)
{}
void __weak arch_kexec_unprotect_crashkres(void)
{}
......@@ -62,14 +62,7 @@ int kexec_image_probe_default(struct kimage *image, void *buf,
return ret;
}
/* Architectures can provide this probe function */
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
return kexec_image_probe_default(image, buf, buf_len);
}
static void *kexec_image_load_default(struct kimage *image)
void *kexec_image_load_default(struct kimage *image)
{
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
......@@ -80,11 +73,6 @@ static void *kexec_image_load_default(struct kimage *image)
image->cmdline_buf_len);
}
void * __weak arch_kexec_kernel_image_load(struct kimage *image)
{
return kexec_image_load_default(image);
}
int kexec_image_post_load_cleanup_default(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
......@@ -93,30 +81,6 @@ int kexec_image_post_load_cleanup_default(struct kimage *image)
return image->fops->cleanup(image->image_loader_data);
}
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
{
return kexec_image_post_load_cleanup_default(image);
}
#ifdef CONFIG_KEXEC_SIG
static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
unsigned long buf_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.\n");
return -EKEYREJECTED;
}
return image->fops->verify_sig(buf, buf_len);
}
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len)
{
return kexec_image_verify_sig_default(image, buf, buf_len);
}
#endif
/*
* Free up memory used by kernel, initrd, and command line. This is temporary
* memory allocation which is not needed any more after these buffers have
......@@ -159,13 +123,41 @@ void kimage_file_post_load_cleanup(struct kimage *image)
}
#ifdef CONFIG_KEXEC_SIG
#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len)
{
int ret;
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) {
ret = verify_pefile_signature(kernel, kernel_len,
VERIFY_USE_PLATFORM_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
return ret;
}
#endif
static int kexec_image_verify_sig(struct kimage *image, void *buf,
unsigned long buf_len)
{
if (!image->fops || !image->fops->verify_sig) {
pr_debug("kernel loader does not support signature verification.\n");
return -EKEYREJECTED;
}
return image->fops->verify_sig(buf, buf_len);
}
static int
kimage_validate_signature(struct kimage *image)
{
int ret;
ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
image->kernel_buf_len);
ret = kexec_image_verify_sig(image, image->kernel_buf,
image->kernel_buf_len);
if (ret) {
if (sig_enforce) {
......@@ -621,19 +613,6 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf)
return ret == 1 ? 0 : -EADDRNOTAVAIL;
}
/**
* arch_kexec_locate_mem_hole - Find free memory to place the segments.
* @kbuf: Parameters for the memory search.
*
* On success, kbuf->mem will have the start address of the memory region found.
*
* Return: 0 on success, negative errno on error.
*/
int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
return kexec_locate_mem_hole(kbuf);
}
/**
* kexec_add_buffer - place a buffer in a kexec segment
* @kbuf: Buffer contents and memory parameters.
......
......@@ -36,42 +36,36 @@ static const char * const integrity_status_msg[] = {
int evm_hmac_attrs;
static struct xattr_list evm_config_default_xattrnames[] = {
{.name = XATTR_NAME_SELINUX,
#ifdef CONFIG_SECURITY_SELINUX
.enabled = true
#endif
{
.name = XATTR_NAME_SELINUX,
.enabled = IS_ENABLED(CONFIG_SECURITY_SELINUX)
},
{.name = XATTR_NAME_SMACK,
#ifdef CONFIG_SECURITY_SMACK
.enabled = true
#endif
{
.name = XATTR_NAME_SMACK,
.enabled = IS_ENABLED(CONFIG_SECURITY_SMACK)
},
{.name = XATTR_NAME_SMACKEXEC,
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
.enabled = true
#endif
{
.name = XATTR_NAME_SMACKEXEC,
.enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
},
{.name = XATTR_NAME_SMACKTRANSMUTE,
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
.enabled = true
#endif
{
.name = XATTR_NAME_SMACKTRANSMUTE,
.enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
},
{.name = XATTR_NAME_SMACKMMAP,
#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
.enabled = true
#endif
{
.name = XATTR_NAME_SMACKMMAP,
.enabled = IS_ENABLED(CONFIG_EVM_EXTRA_SMACK_XATTRS)
},
{.name = XATTR_NAME_APPARMOR,
#ifdef CONFIG_SECURITY_APPARMOR
.enabled = true
#endif
{
.name = XATTR_NAME_APPARMOR,
.enabled = IS_ENABLED(CONFIG_SECURITY_APPARMOR)
},
{.name = XATTR_NAME_IMA,
#ifdef CONFIG_IMA_APPRAISE
.enabled = true
#endif
{
.name = XATTR_NAME_IMA,
.enabled = IS_ENABLED(CONFIG_IMA_APPRAISE)
},
{.name = XATTR_NAME_CAPS,
{
.name = XATTR_NAME_CAPS,
.enabled = true
},
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment