Commit c1eb8f6c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.17/parisc-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc architecture updates from Helge Deller:

 - Fix lpa and lpa_user defines (John David Anglin)

 - Fix symbol lookup of init functions with an __is_kernel() fix (Helge
   Deller)

 - Fix wrong pdc_toc_pim_11 and pdc_toc_pim_20 definitions (Helge
   Deller)

 - Add lws_atomic_xchg and lws_atomic_store syscalls (John David Anglin)

 - Rewrite light-weight syscall and futex code (John David Anglin)

 - Enable TOC (transfer of contents) feature unconditionally (Helge
   Deller)

 - Improve fault handler messages (John David Anglin)

 - Improve build process (Masahiro Yamada)

 - Reduce kernel code footprint of user access functions (Helge Deller)

 - Fix build error due to outX() macros (Bart Van Assche)

 - Ue default_groups in kobj_type in pdc_stable (Greg Kroah-Hartman)

 - Default to 16 CPUs on 32-bit kernel (Helge Deller)

* tag 'for-5.17/parisc-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Default to 16 CPUs on 32-bit kernel
  sections: Fix __is_kernel() to include init ranges
  parisc: Re-use toc_stack as hpmc_stack
  parisc: Enable TOC (transfer of contents) feature unconditionally
  parisc: io: Improve the outb(), outw() and outl() macros
  parisc: pdc_stable: use default_groups in kobj_type
  parisc: Add kgdb io_module to read chars via PDC
  parisc: Fix pdc_toc_pim_11 and pdc_toc_pim_20 definitions
  parisc: Add lws_atomic_xchg and lws_atomic_store syscalls
  parisc: Rewrite light-weight syscall and futex code
  parisc: Enhance page fault termination message
  parisc: Don't call faulthandler_disabled() in do_page_fault()
  parisc: Switch user access functions to signal errors in r29 instead of r8
  parisc: Avoid calling faulthandler_disabled() twice
  parisc: Fix lpa and lpa_user defines
  parisc: Define depi_safe macro
  parisc: decompressor: do not copy source files while building
parents f18e2d87 68d247ad
......@@ -287,20 +287,6 @@ config SMP
If you don't know what to do here, say N.
config TOC
bool "Support TOC switch"
default y if 64BIT || !SMP
help
Most PA-RISC machines have either a switch at the back of the machine
or a command in BMC to trigger a TOC interrupt. If you say Y here a
handler will be installed which will either show a backtrace on all
CPUs, or enter a possible configured debugger like kgdb/kdb.
Note that with this option enabled, the kernel will use an additional 16KB
per possible CPU as a special stack for the TOC handler.
If you don't want to debug the Kernel, say N.
config PARISC_CPU_TOPOLOGY
bool "Support cpu topology definition"
depends on SMP
......@@ -370,7 +356,8 @@ config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
default "4"
default "4" if 64BIT
default "16"
config KEXEC
bool "Kexec system call"
......
# SPDX-License-Identifier: GPL-2.0-only
firmware.c
real2.S
sizes.h
vmlinux
vmlinux.lds
......@@ -13,7 +13,6 @@ OBJECTS := head.o real2.o firmware.o misc.o piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += $(OBJECTS) sizes.h
targets += real2.S firmware.c
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
......@@ -42,14 +41,7 @@ $(obj)/head.o: $(obj)/sizes.h
CFLAGS_misc.o += -I$(objtree)/$(obj)
$(obj)/misc.o: $(obj)/sizes.h
$(obj)/firmware.o: $(obj)/firmware.c
$(obj)/firmware.c: $(srctree)/arch/$(SRCARCH)/kernel/firmware.c
$(call cmd,shipped)
AFLAGS_real2.o += -DBOOTLOADER
$(obj)/real2.o: $(obj)/real2.S
$(obj)/real2.S: $(srctree)/arch/$(SRCARCH)/kernel/real2.S
$(call cmd,shipped)
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
$(obj)/vmlinux.lds: $(obj)/sizes.h
......
// SPDX-License-Identifier: GPL-2.0-only
#include "../../kernel/firmware.c"
/* SPDX-License-Identifier: GPL-2.0-only */
#include "../../kernel/real2.S"
......@@ -158,6 +158,16 @@
#endif
.endm
/* The depi instruction leaves the most significant 32 bits of the
* target register in an undefined state on PA 2.0 systems. */
.macro depi_safe i, p, len, t
#ifdef CONFIG_64BIT
depdi \i, 32+(\p), \len, \t
#else
depi \i, \p, \len, \t
#endif
.endm
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
......
......@@ -8,39 +8,47 @@
#include <asm/errno.h>
/* The following has to match the LWS code in syscall.S. We have
sixteen four-word locks. */
* 256 four-word locks. We use bits 20-27 of the futex virtual
* address for the hash index.
*/
static inline unsigned long _futex_hash_index(unsigned long ua)
{
return (ua >> 2) & 0x3fc;
}
static inline void
_futex_spin_lock(u32 __user *uaddr)
_futex_spin_lock_irqsave(arch_spinlock_t *s, unsigned long *flags)
{
extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
preempt_disable();
local_irq_save(*flags);
arch_spin_lock(s);
}
static inline void
_futex_spin_unlock(u32 __user *uaddr)
_futex_spin_unlock_irqrestore(arch_spinlock_t *s, unsigned long *flags)
{
extern u32 lws_lock_start[];
long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
preempt_enable();
local_irq_restore(*flags);
}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
extern u32 lws_lock_start[];
unsigned long ua = (unsigned long)uaddr;
arch_spinlock_t *s;
unsigned long flags;
int oldval, ret;
u32 tmp;
ret = -EFAULT;
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
_futex_spin_lock_irqsave(s, &flags);
_futex_spin_lock(uaddr);
if (unlikely(get_user(oldval, uaddr) != 0))
/* Return -EFAULT if we encounter a page fault or COW break */
if (unlikely(get_user(oldval, uaddr) != 0)) {
ret = -EFAULT;
goto out_pagefault_enable;
}
ret = 0;
tmp = oldval;
......@@ -63,13 +71,14 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
break;
default:
ret = -ENOSYS;
goto out_pagefault_enable;
}
if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
if (unlikely(put_user(tmp, uaddr) != 0))
ret = -EFAULT;
out_pagefault_enable:
_futex_spin_unlock(uaddr);
_futex_spin_unlock_irqrestore(s, &flags);
if (!ret)
*oval = oldval;
......@@ -81,7 +90,11 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
extern u32 lws_lock_start[];
unsigned long ua = (unsigned long)uaddr;
arch_spinlock_t *s;
u32 val;
unsigned long flags;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble...
......@@ -94,23 +107,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
/* HPPA has no cmpxchg in hardware and therefore the
* best we can do here is use an array of locks. The
* lock selected is based on a hash of the userspace
* address. This should scale to a couple of CPUs.
* lock selected is based on a hash of the virtual
* address of the futex. This should scale to a couple
* of CPUs.
*/
_futex_spin_lock(uaddr);
s = (arch_spinlock_t *)&lws_lock_start[_futex_hash_index(ua)];
_futex_spin_lock_irqsave(s, &flags);
if (unlikely(get_user(val, uaddr) != 0)) {
_futex_spin_unlock(uaddr);
_futex_spin_unlock_irqrestore(s, &flags);
return -EFAULT;
}
if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
_futex_spin_unlock(uaddr);
_futex_spin_unlock_irqrestore(s, &flags);
return -EFAULT;
}
*uval = val;
_futex_spin_unlock(uaddr);
_futex_spin_unlock_irqrestore(s, &flags);
return 0;
}
......
......@@ -273,9 +273,9 @@ static inline int inl(unsigned long addr)
return -1;
}
#define outb(x, y) BUG()
#define outw(x, y) BUG()
#define outl(x, y) BUG()
#define outb(x, y) ({(void)(x); (void)(y); BUG(); 0;})
#define outw(x, y) ({(void)(x); (void)(y); BUG(); 0;})
#define outl(x, y) ({(void)(x); (void)(y); BUG(); 0;})
#endif
/*
......
......@@ -5,9 +5,11 @@
#define lpa(va) ({ \
unsigned long pa; \
__asm__ __volatile__( \
"copy %%r0,%0\n\t" \
"lpa %%r0(%1),%0" \
: "=r" (pa) \
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%1),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
); \
......@@ -17,9 +19,11 @@
#define lpa_user(va) ({ \
unsigned long pa; \
__asm__ __volatile__( \
"copy %%r0,%0\n\t" \
"lpa %%r0(%%sr3,%1),%0" \
: "=r" (pa) \
"copy %%r0,%0\n" \
"8:\tlpa %%r0(%%sr3,%1),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \
: "=&r" (pa) \
: "r" (va) \
: "memory" \
); \
......
......@@ -53,15 +53,18 @@ struct exception_table_entry {
/*
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
* (with lowest bit set) for which the fault handler in fixup_exception() will
* load -EFAULT into %r8 for a read or write fault, and zeroes the target
* load -EFAULT into %r29 for a read or write fault, and zeroes the target
* register in case of a read fault in get_user().
*/
#define ASM_EXCEPTIONTABLE_REG 29
#define ASM_EXCEPTIONTABLE_VAR(__variable) \
register long __variable __asm__ ("r29") = 0
#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
#define __get_user_internal(sr, val, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
ASM_EXCEPTIONTABLE_VAR(__gu_err); \
\
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm(sr, val, "ldb", ptr); break; \
......@@ -131,7 +134,7 @@ struct exception_table_entry {
#define __put_user_internal(sr, x, ptr) \
({ \
register long __pu_err __asm__ ("r8") = 0; \
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
__typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
......@@ -168,7 +171,8 @@ struct exception_table_entry {
* gcc knows about, so there are no aliasing issues. These macros must
* also be aware that fixups are executed in the context of the fault,
* and any registers used there must be listed as clobbers.
* r8 is already listed as err.
* The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG)
* is already listed as input and output register.
*/
#define __put_user_asm(sr, stx, x, ptr) \
......
......@@ -403,7 +403,7 @@ struct zeropage {
int vec_pad1[6];
/* [0x040] reserved processor dependent */
int pad0[112];
int pad0[112]; /* in QEMU pad0[0] holds "SeaBIOS\0" */
/* [0x200] reserved */
int pad1[84];
......@@ -691,6 +691,22 @@ struct pdc_hpmc_pim_20 { /* PDC_PIM */
unsigned long long fr[32];
};
struct pim_cpu_state_cf {
union {
unsigned int
iqv : 1, /* IIA queue Valid */
iqf : 1, /* IIA queue Failure */
ipv : 1, /* IPRs Valid */
grv : 1, /* GRs Valid */
crv : 1, /* CRs Valid */
srv : 1, /* SRs Valid */
trv : 1, /* CR24 through CR31 valid */
pad : 24, /* reserved */
td : 1; /* TOC did not cause any damage to the system state */
unsigned int val;
};
};
struct pdc_toc_pim_11 {
unsigned int gr[32];
unsigned int cr[32];
......@@ -698,8 +714,7 @@ struct pdc_toc_pim_11 {
unsigned int iasq_back;
unsigned int iaoq_back;
unsigned int check_type;
unsigned int hversion;
unsigned int cpu_state;
struct pim_cpu_state_cf cpu_state;
};
struct pdc_toc_pim_20 {
......@@ -709,8 +724,7 @@ struct pdc_toc_pim_20 {
unsigned long long iasq_back;
unsigned long long iaoq_back;
unsigned int check_type;
unsigned int hversion;
unsigned int cpu_state;
struct pim_cpu_state_cf cpu_state;
};
#endif /* !defined(__ASSEMBLY__) */
......
......@@ -10,7 +10,7 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
ptrace.o hardware.o inventory.o drivers.o alternative.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
patch.o
patch.o toc.o toc_asm.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
......@@ -39,4 +39,3 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o relocate_kernel.o
obj-$(CONFIG_KEXEC_FILE) += kexec_file.o
obj-$(CONFIG_TOC) += toc.o toc_asm.o
......@@ -36,7 +36,11 @@
int main(void)
{
DEFINE(TASK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
#ifdef CONFIG_SMP
DEFINE(TASK_TI_CPU, offsetof(struct task_struct, thread_info.cpu));
#endif
DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
DEFINE(TASK_PAGEFAULT_DISABLED, offsetof(struct task_struct, pagefault_disabled));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
......
......@@ -43,10 +43,8 @@
* IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
*/
__PAGE_ALIGNED_BSS
.align 4096
hpmc_stack:
.block 16384
.import toc_stack,data
#define hpmc_stack toc_stack /* re-use the TOC stack */
#define HPMC_IODC_BUF_SIZE 0x8000
......
......@@ -3,6 +3,7 @@
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
* Copyright (c) 2022 Helge Deller <deller@gmx.de>
*
*/
......@@ -207,3 +208,23 @@ int kgdb_arch_handle_exception(int trap, int signo,
}
return -1;
}
/* KGDB console driver which uses PDC to read chars from keyboard */
static void kgdb_pdc_write_char(u8 chr)
{
/* no need to print char. kgdb will do it. */
}
static struct kgdb_io kgdb_pdc_io_ops = {
.name = "kgdb_pdc",
.read_char = pdc_iodc_getc,
.write_char = kgdb_pdc_write_char,
};
static int __init kgdb_pdc_init(void)
{
kgdb_register_io_module(&kgdb_pdc_io_ops);
return 0;
}
early_initcall(kgdb_pdc_init);
This diff is collapsed.
......@@ -9,8 +9,10 @@
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/ldcw.h>
unsigned int __aligned(16) toc_lock = 1;
static unsigned int __aligned(16) toc_lock = 1;
DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack);
static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc)
{
......@@ -63,7 +65,8 @@ void notrace __noreturn __cold toc_intr(struct pt_regs *regs)
struct pdc_toc_pim_20 pim_data20;
struct pdc_toc_pim_11 pim_data11;
nmi_enter();
/* verify we wrote regs to the correct stack */
BUG_ON(regs != (struct pt_regs *)&per_cpu(toc_stack, raw_smp_processor_id()));
if (boot_cpu_data.cpu_type >= pcxu) {
if (pdc_pim_toc20(&pim_data20))
......@@ -76,14 +79,25 @@ void notrace __noreturn __cold toc_intr(struct pt_regs *regs)
}
#ifdef CONFIG_KGDB
nmi_enter();
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(raw_smp_processor_id(), regs);
kgdb_handle_exception(9, SIGTRAP, 0, regs);
#endif
/* serialize output, otherwise all CPUs write backtrace at once */
while (__ldcw(&toc_lock) == 0)
; /* wait */
show_regs(regs);
toc_lock = 1; /* release lock for next CPU */
if (raw_smp_processor_id() != 0)
while (1) ; /* all but monarch CPU will wait endless. */
/* give other CPUs time to show their backtrace */
mdelay(2000);
machine_restart("TOC");
/* should never reach this */
......
......@@ -5,34 +5,25 @@
.level 1.1
#include <asm/assembly.h>
#include <asm/psw.h>
#include <linux/threads.h>
#include <linux/linkage.h>
.text
.import toc_intr,code
.import toc_lock,data
.import toc_stack,data
.align 16
ENTRY_CFI(toc_handler)
/*
* synchronize CPUs and obtain offset
* for stack setup.
*/
load32 PA(toc_lock),%r1
0: ldcw,co 0(%r1),%r2
cmpib,= 0,%r2,0b
nop
addi 1,%r2,%r4
stw %r4,0(%r1)
addi -1,%r2,%r4
load32 PA(toc_stack),%sp
/*
* deposit CPU number into stack address,
* so every CPU will have its own stack.
*/
SHLREG %r4,14,%r4
#ifdef CONFIG_SMP
/* get per-cpu toc_stack address. */
mfctl %cr30, %r1
tophys %r1,%r2 /* task_struct */
LDREG TASK_TI_CPU(%r2),%r4 /* cpu */
load32 PA(__per_cpu_offset),%r1
LDREGX %r4(%r1),%r4
add %r4,%sp,%sp
#endif
/*
* setup pt_regs on stack and save the
......@@ -82,7 +73,3 @@ ENDPROC_CFI(toc_handler)
*/
SYM_DATA(toc_handler_csum, .long 0)
SYM_DATA(toc_handler_size, .long . - toc_handler)
__PAGE_ALIGNED_BSS
.align 64
SYM_DATA(toc_stack, .block 16384*NR_CPUS)
......@@ -785,7 +785,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
* unless pagefault_disable() was called before.
*/
if (fault_space == 0 && !faulthandler_disabled())
if (faulthandler_disabled() || fault_space == 0)
{
/* Clean up and return if in exception table. */
if (fixup_exception(regs))
......
......@@ -148,11 +148,11 @@ int fixup_exception(struct pt_regs *regs)
* Fix up get_user() and put_user().
* ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
* bit in the relative address of the fixup routine to indicate
* that %r8 should be loaded with -EFAULT to report a userspace
* access error.
* that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with
* -EFAULT to report a userspace access error.
*/
if (fix->fixup & 1) {
regs->gr[8] = -EFAULT;
regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT;
/* zero target register for get_user() */
if (parisc_acctyp(0, regs->iir) == VM_READ) {
......@@ -266,14 +266,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long acc_type;
vm_fault_t fault = 0;
unsigned int flags;
if (faulthandler_disabled())
goto no_context;
char *msg;
tsk = current;
mm = tsk->mm;
if (!mm)
if (!mm) {
msg = "Page fault: no context";
goto no_context;
}
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
......@@ -409,6 +409,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
force_sig_fault(signo, si_code, (void __user *) address);
return;
}
msg = "Page fault: bad address";
no_context:
......@@ -416,11 +417,13 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
return;
}
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
parisc_terminate(msg, regs, code, address);
out_of_memory:
out_of_memory:
mmap_read_unlock(mm);
if (!user_mode(regs))
if (!user_mode(regs)) {
msg = "Page fault: out of memory";
goto no_context;
}
pagefault_out_of_memory();
}
......@@ -482,11 +482,12 @@ static struct attribute *paths_subsys_attrs[] = {
&paths_attr_layer.attr,
NULL,
};
ATTRIBUTE_GROUPS(paths_subsys);
/* Specific kobject type for our PDC paths */
static struct kobj_type ktype_pdcspath = {
.sysfs_ops = &pdcspath_attr_ops,
.default_attrs = paths_subsys_attrs,
.default_groups = paths_subsys_groups,
};
/* We hard define the 4 types of path we expect to find */
......
......@@ -199,12 +199,16 @@ static inline bool __is_kernel_text(unsigned long addr)
* @addr: address to check
*
* Returns: true if the address is located in the kernel range, false otherwise.
* Note: an internal helper, only check the range of _stext to _end.
* Note: an internal helper, check the range of _stext to _end,
* and range from __init_begin to __init_end, which can be outside
* of the _stext to _end range.
*/
static inline bool __is_kernel(unsigned long addr)
{
return addr >= (unsigned long)_stext &&
addr < (unsigned long)_end;
return ((addr >= (unsigned long)_stext &&
addr < (unsigned long)_end) ||
(addr >= (unsigned long)__init_begin &&
addr < (unsigned long)__init_end));
}
#endif /* _ASM_GENERIC_SECTIONS_H_ */
......@@ -33,4 +33,9 @@ if [ -n "${building_out_of_srctree}" ]; then
do
rm -f arch/mips/boot/compressed/${f}
done
for f in firmware.c real2.S
do
rm -f arch/parisc/boot/compressed/${f}
done
fi
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment