Commit 2a3c17ed authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - Fixes for a pair of kexec_file_load() failures

 - A fix to ensure the direct mapping is PMD-aligned

 - A fix for CPU feature detection on SMP=n

 - The MMIO ordering fences have been strengthened to ensure ordering
   WRT delay()

 - Fixes for a pair of -Wmissing-variable-declarations warnings

 - A fix to avoid PUD mappings in vmap on sv39

 - flush_cache_vmap() now flushes the TLB to avoid issues on systems
   that cache invalid mappings

* tag 'riscv-for-linus-6.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Implement flush_cache_vmap()
  riscv: Do not allow vmap pud mappings for 3-level page table
  riscv: mm: fix 2 instances of -Wmissing-variable-declarations
  riscv,mmio: Fix readX()-to-delay() ordering
  riscv: Fix CPU feature detection with SMP disabled
  riscv: Start of DRAM should at least be aligned on PMD size for the direct mapping
  riscv/kexec: load initrd high in available memory
  riscv/kexec: handle R_RISCV_CALL_PLT relocation type
parents feb0eee9 7e381152
...@@ -37,6 +37,10 @@ static inline void flush_dcache_page(struct page *page) ...@@ -37,6 +37,10 @@ static inline void flush_dcache_page(struct page *page)
#define flush_icache_user_page(vma, pg, addr, len) \ #define flush_icache_user_page(vma, pg, addr, len) \
flush_icache_mm(vma->vm_mm, 0) flush_icache_mm(vma->vm_mm, 0)
#ifdef CONFIG_64BIT
#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
#endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define flush_icache_all() local_flush_icache_all() #define flush_icache_all() local_flush_icache_all()
......
...@@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) ...@@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* Relaxed I/O memory access primitives. These follow the Device memory * Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory * ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or * accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification * write) with all other I/O memory accesses to the same peripheral. Since the
* defines that all I/O regions are strongly ordered on channel 2, no explicit * platform specification defines that all I/O regions are strongly ordered on
* fences are required to enforce this ordering. * channel 0, no explicit fences are required to enforce this ordering.
*/ */
/* FIXME: These are now the same as asm-generic */ /* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0) #define __io_rbr() do {} while (0)
...@@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) ...@@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif #endif
/* /*
* I/O memory access primitives. Reads are ordered relative to any * I/O memory access primitives. Reads are ordered relative to any following
* following Normal memory access. Writes are ordered relative to any prior * Normal memory read and delay() loop. Writes are ordered relative to any
* Normal memory access. The memory barriers here are necessary as RISC-V * prior Normal memory write. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space. * doesn't define any ordering between the memory space and the I/O space.
*/ */
#define __io_br() do {} while (0) #define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") #define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") #define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
#define __io_aw() mmiowb_set_pending() #define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
......
...@@ -188,6 +188,8 @@ extern struct pt_alloc_ops pt_ops __initdata; ...@@ -188,6 +188,8 @@ extern struct pt_alloc_ops pt_ops __initdata;
#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
extern pgd_t swapper_pg_dir[]; extern pgd_t swapper_pg_dir[];
extern pgd_t trampoline_pg_dir[];
extern pgd_t early_pg_dir[];
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
......
...@@ -3,12 +3,14 @@ ...@@ -3,12 +3,14 @@
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
extern bool pgtable_l4_enabled, pgtable_l5_enabled;
#define IOREMAP_MAX_ORDER (PUD_SHIFT) #define IOREMAP_MAX_ORDER (PUD_SHIFT)
#define arch_vmap_pud_supported arch_vmap_pud_supported #define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot) static inline bool arch_vmap_pud_supported(pgprot_t prot)
{ {
return true; return pgtable_l4_enabled || pgtable_l5_enabled;
} }
#define arch_vmap_pmd_supported arch_vmap_pmd_supported #define arch_vmap_pmd_supported arch_vmap_pmd_supported
......
...@@ -17,6 +17,11 @@ ...@@ -17,6 +17,11 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpuid_to_hartid_map(cpu);
}
/* /*
* Returns the hart ID of the given device tree node, or -ENODEV if the node * Returns the hart ID of the given device tree node, or -ENODEV if the node
* isn't an enabled and valid RISC-V hart node. * isn't an enabled and valid RISC-V hart node.
......
...@@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, ...@@ -281,7 +281,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf,
kbuf.buffer = initrd; kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len; kbuf.bufsz = kbuf.memsz = initrd_len;
kbuf.buf_align = PAGE_SIZE; kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = false; kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf); ret = kexec_add_buffer(&kbuf);
if (ret) if (ret)
...@@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, ...@@ -425,6 +425,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
* sym, instead of searching the whole relsec. * sym, instead of searching the whole relsec.
*/ */
case R_RISCV_PCREL_HI20: case R_RISCV_PCREL_HI20:
case R_RISCV_CALL_PLT:
case R_RISCV_CALL: case R_RISCV_CALL:
*(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) | *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) |
ENCODE_UJTYPE_IMM(val - addr); ENCODE_UJTYPE_IMM(val - addr);
......
...@@ -61,11 +61,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid) ...@@ -61,11 +61,6 @@ int riscv_hartid_to_cpuid(unsigned long hartid)
return -ENOENT; return -ENOENT;
} }
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpuid_to_hartid_map(cpu);
}
static void ipi_stop(void) static void ipi_stop(void)
{ {
set_cpu_online(smp_processor_id(), false); set_cpu_online(smp_processor_id(), false);
......
...@@ -26,12 +26,13 @@ ...@@ -26,12 +26,13 @@
#include <linux/kfence.h> #include <linux/kfence.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/ptdump.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/ptdump.h>
#include <asm/sections.h>
#include <asm/soc.h>
#include <asm/tlbflush.h>
#include "../kernel/head.h" #include "../kernel/head.h"
...@@ -214,8 +215,13 @@ static void __init setup_bootmem(void) ...@@ -214,8 +215,13 @@ static void __init setup_bootmem(void)
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
phys_ram_end = memblock_end_of_DRAM(); phys_ram_end = memblock_end_of_DRAM();
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
*/
if (!IS_ENABLED(CONFIG_XIP_KERNEL)) if (!IS_ENABLED(CONFIG_XIP_KERNEL))
phys_ram_base = memblock_start_of_DRAM(); phys_ram_base = memblock_start_of_DRAM() & PMD_MASK;
/* /*
* In 64-bit, any use of __va/__pa before this point is wrong as we * In 64-bit, any use of __va/__pa before this point is wrong as we
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* region is not and then we have to go down to the PUD level. * region is not and then we have to go down to the PUD level.
*/ */
extern pgd_t early_pg_dir[PTRS_PER_PGD];
pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss; p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss; pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment