Commit 7f9f4430 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'cris-for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jesper/cris

Pull arch/cris updates from Jesper Nilsson:
 "Some much needed love for the CRIS-port.

  There's a bunch of changes this time, giving the CRISv32 port a bit of
  modern makeover with device-tree, irq domain and gpiolib support, and
  more switchover to generic frameworks.

  Some small fixes and removal of the theoretical SMP support brings up
  the rear"

* tag 'cris-for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jesper/cris:
  cris: fix integer overflow in ELF_ET_DYN_BASE
  CRISv32: use GENERIC_SCHED_CLOCK
  CRISv32: use MMIO clocksource
  CRISv32: use generic clockevents
  CRIS: use generic headers via Kbuild
  CRIS: use generic cmpxchg.h
  CRIS: use generic atomic.h
  CRIS: use generic atomic bitops
  CRISv10: remove redundant macros from system.h
  CRIS: remove SMP code
  CRISv32: don't enable irqs in INIT_THREAD
  CRISv32: handle multiple signals
  CRISv32: prevent bogus restarts on sigreturn
  CRISv32: don't attempt syscall restart on irq exit
  Add binding documentation for CRIS
  CRIS: add Axis 88 board device tree
  CRISv32: add device tree support
  CRISv32: add irq domains support
  CRIS: enable GPIOLIB
parents 63905bba d939b52a
Axis Communications AB
ARTPEC series SoC Device Tree Bindings
CRISv32 based SoCs are ETRAX FS and ARTPEC-3:
- compatible = "axis,crisv32";
Boards based on the CRIS SoCs:
Required root node properties:
- compatible = should be one or more of the following:
- "axis,dev88" - for Axis devboard 88 with ETRAX FS
Optional:
* CRISv32 Interrupt Controller
Interrupt controller for the CRISv32 SoCs.
Main node required properties:
- compatible : should be:
"axis,crisv32-intc"
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The type shall be a <u32> and the value shall be 1.
- reg: physical base address and size of the intc registers map.
Example:
intc: interrupt-controller {
compatible = "axis,crisv32-intc";
reg = <0xb001c000 0x1000>;
interrupt-controller;
#interrupt-cells = <1>;
};
...@@ -46,12 +46,18 @@ config CRIS ...@@ -46,12 +46,18 @@ config CRIS
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_IOMAP select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
select GENERIC_CMOS_UPDATE select GENERIC_CMOS_UPDATE
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select CLONE_BACKWARDS2 select CLONE_BACKWARDS2
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
select OLD_SIGACTION select OLD_SIGACTION
select ARCH_REQUIRE_GPIOLIB
select IRQ_DOMAIN if ETRAX_ARCH_V32
select OF if ETRAX_ARCH_V32
select OF_EARLY_FLATTREE if ETRAX_ARCH_V32
select CLKSRC_MMIO if ETRAX_ARCH_V32
select GENERIC_CLOCKEVENTS if ETRAX_ARCH_V32
select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
config HZ config HZ
int int
...@@ -61,6 +67,10 @@ config NR_CPUS ...@@ -61,6 +67,10 @@ config NR_CPUS
int int
default "1" default "1"
config BUILTIN_DTB
string "DTB to build into the kernel image"
depends on OF
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
......
...@@ -40,6 +40,10 @@ else ...@@ -40,6 +40,10 @@ else
MACH := MACH :=
endif endif
ifneq ($(CONFIG_BUILTIN_DTB),"")
core-$(CONFIG_OF) += arch/cris/boot/dts/
endif
LD = $(CROSS_COMPILE)ld -mcrislinux LD = $(CROSS_COMPILE)ld -mcrislinux
OBJCOPYFLAGS := -O binary -R .note -R .comment -S OBJCOPYFLAGS := -O binary -R .note -R .comment -S
......
...@@ -9,7 +9,6 @@ obj-y := entry.o traps.o irq.o debugport.o \ ...@@ -9,7 +9,6 @@ obj-y := entry.o traps.o irq.o debugport.o \
process.o ptrace.o setup.o signal.o traps.o time.o \ process.o ptrace.o setup.o signal.o traps.o time.o \
cache.o cacheflush.o cache.o cacheflush.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o obj-$(CONFIG_ETRAX_KGDB) += kgdb.o kgdb_asm.o
obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o obj-$(CONFIG_ETRAX_FAST_TIMER) += fasttimer.o
obj-$(CONFIG_MODULES) += crisksyms.o obj-$(CONFIG_MODULES) += crisksyms.o
......
...@@ -99,6 +99,8 @@ ret_from_kernel_thread: ...@@ -99,6 +99,8 @@ ret_from_kernel_thread:
.type ret_from_intr,@function .type ret_from_intr,@function
ret_from_intr: ret_from_intr:
moveq 0, $r9 ; not a syscall
;; Check for resched if preemptive kernel, or if we're going back to ;; Check for resched if preemptive kernel, or if we're going back to
;; user-mode. This test matches the user_regs(regs) macro. Don't simply ;; user-mode. This test matches the user_regs(regs) macro. Don't simply
;; test CCS since that doesn't necessarily reflect what mode we'll ;; test CCS since that doesn't necessarily reflect what mode we'll
...@@ -145,7 +147,7 @@ system_call: ...@@ -145,7 +147,7 @@ system_call:
;; Stack-frame similar to the irq heads, which is reversed in ;; Stack-frame similar to the irq heads, which is reversed in
;; ret_from_sys_call. ;; ret_from_sys_call.
sub.d 92, $sp ; Skip EXS and EDA. sub.d 92, $sp ; Skip EDA.
movem $r13, [$sp] movem $r13, [$sp]
move.d $sp, $r8 move.d $sp, $r8
addq 14*4, $r8 addq 14*4, $r8
...@@ -156,8 +158,9 @@ system_call: ...@@ -156,8 +158,9 @@ system_call:
move $ccs, $r4 move $ccs, $r4
move $srp, $r5 move $srp, $r5
move $erp, $r6 move $erp, $r6
move.d $r9, $r7 ; Store syscall number in EXS
subq 4, $sp subq 4, $sp
movem $r6, [$r8] movem $r7, [$r8]
ei ; Enable interrupts while processing syscalls. ei ; Enable interrupts while processing syscalls.
move.d $r10, [$sp] move.d $r10, [$sp]
...@@ -277,44 +280,15 @@ _syscall_exit_work: ...@@ -277,44 +280,15 @@ _syscall_exit_work:
.type _work_pending,@function .type _work_pending,@function
_work_pending: _work_pending:
addoq +TI_flags, $r0, $acr
move.d [$acr], $r10
btstq TIF_NEED_RESCHED, $r10 ; Need resched?
bpl _work_notifysig ; No, must be signal/notify.
nop
.size _work_pending, . - _work_pending
.type _work_resched,@function
_work_resched:
move.d $r9, $r1 ; Preserve R9.
jsr schedule
nop
move.d $r1, $r9
di
addoq +TI_flags, $r0, $acr
move.d [$acr], $r1
and.d _TIF_WORK_MASK, $r1 ; Ignore sycall trace counter.
beq _Rexit
nop
btstq TIF_NEED_RESCHED, $r1
bmi _work_resched ; current->work.need_resched.
nop
.size _work_resched, . - _work_resched
.type _work_notifysig,@function
_work_notifysig:
;; Deal with pending signals and notify-resume requests.
addoq +TI_flags, $r0, $acr addoq +TI_flags, $r0, $acr
move.d [$acr], $r12 ; The thread_info_flags parameter. move.d [$acr], $r12 ; The thread_info_flags parameter.
move.d $sp, $r11 ; The regs param. move.d $sp, $r11 ; The regs param.
jsr do_notify_resume jsr do_work_pending
move.d $r9, $r10 ; do_notify_resume syscall/irq param. move.d $r9, $r10 ; The syscall/irq param.
ba _Rexit ba _Rexit
nop nop
.size _work_notifysig, . - _work_notifysig .size _work_pending, . - _work_pending
;; We get here as a sidetrack when we've entered a syscall with the ;; We get here as a sidetrack when we've entered a syscall with the
;; trace-bit set. We need to call do_syscall_trace and then continue ;; trace-bit set. We need to call do_syscall_trace and then continue
......
...@@ -52,11 +52,6 @@ tstart: ...@@ -52,11 +52,6 @@ tstart:
GIO_INIT GIO_INIT
#ifdef CONFIG_SMP
secondary_cpu_entry: /* Entry point for secondary CPUs */
di
#endif
;; Setup and enable the MMU. Use same configuration for both the data ;; Setup and enable the MMU. Use same configuration for both the data
;; and the instruction MMU. ;; and the instruction MMU.
;; ;;
...@@ -164,33 +159,6 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */ ...@@ -164,33 +159,6 @@ secondary_cpu_entry: /* Entry point for secondary CPUs */
nop nop
nop nop
#ifdef CONFIG_SMP
;; Read CPU ID
move 0, $srs
nop
nop
nop
move $s12, $r0
cmpq 0, $r0
beq master_cpu
nop
slave_cpu:
; Time to boot-up. Get stack location provided by master CPU.
move.d smp_init_current_idle_thread, $r1
move.d [$r1], $sp
add.d 8192, $sp
move.d ebp_start, $r0 ; Defined in linker-script.
move $r0, $ebp
jsr smp_callin
nop
master_cpu:
/* Set up entry point for secondary CPUs. The boot ROM has set up
* EBP at start of internal memory. The CPU will get there
* later when we issue an IPI to them... */
move.d MEM_INTMEM_START + IPI_INTR_VECT * 4, $r0
move.d secondary_cpu_entry, $r1
move.d $r1, [$r0]
#endif
; Check if starting from DRAM (network->RAM boot or unpacked ; Check if starting from DRAM (network->RAM boot or unpacked
; compressed kernel), or directly from flash. ; compressed kernel), or directly from flash.
lapcq ., $r0 lapcq ., $r0
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/threads.h> #include <linux/threads.h>
...@@ -56,9 +58,6 @@ struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] = ...@@ -56,9 +58,6 @@ struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
static unsigned long irq_regs[NR_CPUS] = static unsigned long irq_regs[NR_CPUS] =
{ {
regi_irq, regi_irq,
#ifdef CONFIG_SMP
regi_irq2,
#endif
}; };
#if NR_REAL_IRQS > 32 #if NR_REAL_IRQS > 32
...@@ -431,6 +430,19 @@ crisv32_do_multiple(struct pt_regs* regs) ...@@ -431,6 +430,19 @@ crisv32_do_multiple(struct pt_regs* regs)
irq_exit(); irq_exit();
} }
static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw_irq_num)
{
irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
return 0;
}
static struct irq_domain_ops crisv32_irq_ops = {
.map = crisv32_irq_map,
.xlate = irq_domain_xlate_onecell,
};
/* /*
* This is called by start_kernel. It fixes the IRQ masks and setup the * This is called by start_kernel. It fixes the IRQ masks and setup the
* interrupt vector table to point to bad_interrupt pointers. * interrupt vector table to point to bad_interrupt pointers.
...@@ -441,6 +453,8 @@ init_IRQ(void) ...@@ -441,6 +453,8 @@ init_IRQ(void)
int i; int i;
int j; int j;
reg_intr_vect_rw_mask vect_mask = {0}; reg_intr_vect_rw_mask vect_mask = {0};
struct device_node *np;
struct irq_domain *domain;
/* Clear all interrupts masks. */ /* Clear all interrupts masks. */
for (i = 0; i < NBR_REGS; i++) for (i = 0; i < NBR_REGS; i++)
...@@ -449,10 +463,15 @@ init_IRQ(void) ...@@ -449,10 +463,15 @@ init_IRQ(void)
for (i = 0; i < 256; i++) for (i = 0; i < 256; i++)
etrax_irv->v[i] = weird_irq; etrax_irv->v[i] = weird_irq;
/* Point all IRQ's to bad handlers. */ np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
domain = irq_domain_add_legacy(np, NR_IRQS - FIRST_IRQ,
FIRST_IRQ, FIRST_IRQ,
&crisv32_irq_ops, NULL);
BUG_ON(!domain);
irq_set_default_host(domain);
of_node_put(np);
for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) { for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
irq_set_chip_and_handler(j, &crisv32_irq_type,
handle_simple_irq);
set_exception_vector(i, interrupt[j]); set_exception_vector(i, interrupt[j]);
} }
......
...@@ -63,11 +63,6 @@ int show_cpuinfo(struct seq_file *m, void *v) ...@@ -63,11 +63,6 @@ int show_cpuinfo(struct seq_file *m, void *v)
info = &cpinfo[ARRAY_SIZE(cpinfo) - 1]; info = &cpinfo[ARRAY_SIZE(cpinfo) - 1];
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif
revision = rdvr(); revision = rdvr();
for (i = 0; i < ARRAY_SIZE(cpinfo); i++) { for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {
......
...@@ -72,6 +72,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) ...@@ -72,6 +72,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
/* Make that the user-mode flag is set. */ /* Make that the user-mode flag is set. */
regs->ccs |= (1 << (U_CCS_BITNR + CCS_SHIFT)); regs->ccs |= (1 << (U_CCS_BITNR + CCS_SHIFT));
/* Don't perform syscall restarting */
regs->exs = -1;
/* Restore the old USP. */ /* Restore the old USP. */
err |= __get_user(old_usp, &sc->usp); err |= __get_user(old_usp, &sc->usp);
wrusp(old_usp); wrusp(old_usp);
...@@ -425,6 +428,8 @@ do_signal(int canrestart, struct pt_regs *regs) ...@@ -425,6 +428,8 @@ do_signal(int canrestart, struct pt_regs *regs)
{ {
struct ksignal ksig; struct ksignal ksig;
canrestart = canrestart && ((int)regs->exs >= 0);
/* /*
* The common case should go fast, which is why this point is * The common case should go fast, which is why this point is
* reached from kernel-mode. If that's the case, just return * reached from kernel-mode. If that's the case, just return
......
#include <linux/types.h>
#include <asm/delay.h>
#include <irq.h>
#include <hwregs/intr_vect.h>
#include <hwregs/intr_vect_defs.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <hwregs/asm/mmu_defs_asm.h>
#include <hwregs/supp_reg.h>
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#define IPI_SCHEDULE 1
#define IPI_CALL 2
#define IPI_FLUSH_TLB 4
#define IPI_BOOT 8
#define FLUSH_ALL (void*)0xffffffff
/* Vector of locks used for various atomic operations */
spinlock_t cris_atomic_locks[] = {
[0 ... LOCK_COUNT - 1] = __SPIN_LOCK_UNLOCKED(cris_atomic_locks)
};
/* CPU masks */
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
EXPORT_SYMBOL(phys_cpu_present_map);
/* Variables used during SMP boot */
volatile int cpu_now_booting = 0;
volatile struct thread_info *smp_init_current_idle_thread;
/* Variables used during IPI */
static DEFINE_SPINLOCK(call_lock);
static DEFINE_SPINLOCK(tlbstate_lock);
struct call_data_struct {
void (*func) (void *info);
void *info;
int wait;
};
static struct call_data_struct * call_data;
static struct mm_struct* flush_mm;
static struct vm_area_struct* flush_vma;
static unsigned long flush_addr;
/* Mode registers */
static unsigned long irq_regs[NR_CPUS] = {
regi_irq,
regi_irq2
};
static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id);
static int send_ipi(int vector, int wait, cpumask_t cpu_mask);
static struct irqaction irq_ipi = {
.handler = crisv32_ipi_interrupt,
.flags = 0,
.name = "ipi",
};
extern void cris_mmu_init(void);
extern void cris_timer_init(void);
/* SMP initialization */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i;
/* From now on we can expect IPIs so set them up */
setup_irq(IPI_INTR_VECT, &irq_ipi);
/* Mark all possible CPUs as present */
for (i = 0; i < max_cpus; i++)
cpumask_set_cpu(i, &phys_cpu_present_map);
}
void smp_prepare_boot_cpu(void)
{
/* PGD pointer has moved after per_cpu initialization so
* update the MMU.
*/
pgd_t **pgd;
pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
SUPP_BANK_SEL(1);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
set_cpu_online(0, true);
cpumask_set_cpu(0, &phys_cpu_present_map);
set_cpu_possible(0, true);
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
/* Bring one cpu online.*/
static int __init
smp_boot_one_cpu(int cpuid, struct task_struct idle)
{
unsigned timeout;
cpumask_t cpu_mask;
cpumask_clear(&cpu_mask);
task_thread_info(idle)->cpu = cpuid;
/* Information to the CPU that is about to boot */
smp_init_current_idle_thread = task_thread_info(idle);
cpu_now_booting = cpuid;
/* Kick it */
set_cpu_online(cpuid, true);
cpumask_set_cpu(cpuid, &cpu_mask);
send_ipi(IPI_BOOT, 0, cpu_mask);
set_cpu_online(cpuid, false);
/* Wait for CPU to come online */
for (timeout = 0; timeout < 10000; timeout++) {
if(cpu_online(cpuid)) {
cpu_now_booting = 0;
smp_init_current_idle_thread = NULL;
return 0; /* CPU online */
}
udelay(100);
barrier();
}
printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
return -1;
}
/* Secondary CPUs starts using C here. Here we need to setup CPU
* specific stuff such as the local timer and the MMU. */
void __init smp_callin(void)
{
int cpu = cpu_now_booting;
reg_intr_vect_rw_mask vect_mask = {0};
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
/* Set up MMU */
cris_mmu_init();
__flush_tlb_all();
/* Setup local timer. */
cris_timer_init();
/* Enable IRQ and idle */
REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask);
crisv32_unmask_irq(IPI_INTR_VECT);
crisv32_unmask_irq(TIMER0_INTR_VECT);
preempt_disable();
notify_cpu_starting(cpu);
local_irq_enable();
set_cpu_online(cpu, true);
cpu_startup_entry(CPUHP_ONLINE);
}
/* Stop execution on this CPU.*/
void stop_this_cpu(void* dummy)
{
local_irq_disable();
asm volatile("halt");
}
/* Other calls */
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, NULL, 0);
}
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
/* cache_decay_ticks is used by the scheduler to decide if a process
* is "hot" on one CPU. A higher value means a higher penalty to move
* a process to another CPU. Our cache is rather small so we report
* 1 tick.
*/
unsigned long cache_decay_ticks = 1;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
smp_boot_one_cpu(cpu, tidle);
return cpu_online(cpu) ? 0 : -ENOSYS;
}
void smp_send_reschedule(int cpu)
{
cpumask_t cpu_mask;
cpumask_clear(&cpu_mask);
cpumask_set_cpu(cpu, &cpu_mask);
send_ipi(IPI_SCHEDULE, 0, cpu_mask);
}
/* TLB flushing
*
* Flush needs to be done on the local CPU and on any other CPU that
* may have the same mapping. The mm->cpu_vm_mask is used to keep track
* of which CPUs that a specific process has been executed on.
*/
void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr)
{
unsigned long flags;
cpumask_t cpu_mask;
spin_lock_irqsave(&tlbstate_lock, flags);
cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm));
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
flush_mm = mm;
flush_vma = vma;
flush_addr = addr;
send_ipi(IPI_FLUSH_TLB, 1, cpu_mask);
spin_unlock_irqrestore(&tlbstate_lock, flags);
}
void flush_tlb_all(void)
{
__flush_tlb_all();
flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0);
}
void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
flush_tlb_common(mm, FLUSH_ALL, 0);
/* No more mappings in other CPUs */
cpumask_clear(mm_cpumask(mm));
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_page(vma, addr);
flush_tlb_common(vma->vm_mm, vma, addr);
}
/* Inter processor interrupts
*
* The IPIs are used for:
* * Force a schedule on a CPU
* * FLush TLB on other CPUs
* * Call a function on other CPUs
*/
int send_ipi(int vector, int wait, cpumask_t cpu_mask)
{
int i = 0;
reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
int ret = 0;
/* Calculate CPUs to send to. */
cpumask_and(&cpu_mask, &cpu_mask, cpu_online_mask);
/* Send the IPI. */
for_each_cpu(i, &cpu_mask)
{
ipi.vector |= vector;
REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi);
}
/* Wait for IPI to finish on other CPUS */
if (wait) {
for_each_cpu(i, &cpu_mask) {
int j;
for (j = 0 ; j < 1000; j++) {
ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi);
if (!ipi.vector)
break;
udelay(100);
}
/* Timeout? */
if (ipi.vector) {
printk("SMP call timeout from %d to %d\n", smp_processor_id(), i);
ret = -ETIMEDOUT;
dump_stack();
}
}
}
return ret;
}
/*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func)(void *info), void *info, int wait)
{
cpumask_t cpu_mask;
struct call_data_struct data;
int ret;
cpumask_setall(&cpu_mask);
cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
data.wait = wait;
spin_lock(&call_lock);
call_data = &data;
ret = send_ipi(IPI_CALL, wait, cpu_mask);
spin_unlock(&call_lock);
return ret;
}
irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
{
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
reg_intr_vect_rw_ipi ipi;
ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
if (ipi.vector & IPI_SCHEDULE) {
scheduler_ipi();
}
if (ipi.vector & IPI_CALL) {
func(info);
}
if (ipi.vector & IPI_FLUSH_TLB) {
if (flush_mm == FLUSH_ALL)
__flush_tlb_all();
else if (flush_vma == FLUSH_ALL)
__flush_tlb_mm(flush_mm);
else
__flush_tlb_page(flush_vma, flush_addr);
}
ipi.vector = 0;
REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi);
return IRQ_HANDLED;
}
...@@ -8,12 +8,14 @@ ...@@ -8,12 +8,14 @@
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/sched_clock.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/signal.h> #include <asm/signal.h>
...@@ -36,33 +38,11 @@ ...@@ -36,33 +38,11 @@
/* Number of 763 counts before watchdog bites */ /* Number of 763 counts before watchdog bites */
#define ETRAX_WD_CNT ((2*ETRAX_WD_HZ)/HZ + 1) #define ETRAX_WD_CNT ((2*ETRAX_WD_HZ)/HZ + 1)
/* Register the continuos readonly timer available in FS and ARTPEC-3. */ #define CRISV32_TIMER_FREQ (100000000lu)
static cycle_t read_cont_rotime(struct clocksource *cs)
{
return (u32)REG_RD(timer, regi_timer0, r_time);
}
static struct clocksource cont_rotime = {
.name = "crisv32_rotime",
.rating = 300,
.read = read_cont_rotime,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init etrax_init_cont_rotime(void)
{
clocksource_register_khz(&cont_rotime, 100000);
return 0;
}
arch_initcall(etrax_init_cont_rotime);
unsigned long timer_regs[NR_CPUS] = unsigned long timer_regs[NR_CPUS] =
{ {
regi_timer0, regi_timer0,
#ifdef CONFIG_SMP
regi_timer2
#endif
}; };
extern int set_rtc_mmss(unsigned long nowtime); extern int set_rtc_mmss(unsigned long nowtime);
...@@ -189,81 +169,104 @@ void handle_watchdog_bite(struct pt_regs *regs) ...@@ -189,81 +169,104 @@ void handle_watchdog_bite(struct pt_regs *regs)
#endif #endif
} }
/* extern void cris_profile_sample(struct pt_regs *regs);
* timer_interrupt() needs to keep up the real-time clock, static void __iomem *timer_base;
* as well as call the "xtime_update()" routine every clocktick.
*/
extern void cris_do_profile(struct pt_regs *regs);
static inline irqreturn_t timer_interrupt(int irq, void *dev_id) static void crisv32_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *dev)
{ {
struct pt_regs *regs = get_irq_regs(); reg_timer_rw_tmr0_ctrl ctrl = {
int cpu = smp_processor_id(); .op = regk_timer_hold,
reg_timer_r_masked_intr masked_intr; .freq = regk_timer_f100,
reg_timer_rw_ack_intr ack_intr = { 0 }; };
/* Check if the timer interrupt is for us (a tmr0 int) */
masked_intr = REG_RD(timer, timer_regs[cpu], r_masked_intr);
if (!masked_intr.tmr0)
return IRQ_NONE;
/* Acknowledge the timer irq. */ REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
ack_intr.tmr0 = 1; }
REG_WR(timer, timer_regs[cpu], rw_ack_intr, ack_intr);
/* Reset watchdog otherwise it resets us! */ static int crisv32_clkevt_next_event(unsigned long evt,
reset_watchdog(); struct clock_event_device *dev)
{
reg_timer_rw_tmr0_ctrl ctrl = {
.op = regk_timer_ld,
.freq = regk_timer_f100,
};
/* Update statistics. */ REG_WR(timer, timer_base, rw_tmr0_div, evt);
update_process_times(user_mode(regs)); REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
cris_do_profile(regs); /* Save profiling information */ ctrl.op = regk_timer_run;
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
/* The master CPU is responsible for the time keeping. */ return 0;
if (cpu != 0) }
return IRQ_HANDLED;
static irqreturn_t crisv32_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
reg_timer_rw_tmr0_ctrl ctrl = {
.op = regk_timer_hold,
.freq = regk_timer_f100,
};
reg_timer_rw_ack_intr ack = { .tmr0 = 1 };
reg_timer_r_masked_intr intr;
intr = REG_RD(timer, timer_base, r_masked_intr);
if (!intr.tmr0)
return IRQ_NONE;
REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
REG_WR(timer, timer_base, rw_ack_intr, ack);
reset_watchdog();
#ifdef CONFIG_SYSTEM_PROFILER
cris_profile_sample(get_irq_regs());
#endif
evt->event_handler(evt);
/* Call the real timer interrupt handler */
xtime_update(1);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct clock_event_device crisv32_clockevent = {
.name = "crisv32-timer",
.rating = 300,
.features = CLOCK_EVT_FEAT_ONESHOT,
.set_mode = crisv32_clkevt_mode,
.set_next_event = crisv32_clkevt_next_event,
};
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */ /* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
static struct irqaction irq_timer = { static struct irqaction irq_timer = {
.handler = timer_interrupt, .handler = crisv32_timer_interrupt,
.flags = IRQF_SHARED, .flags = IRQF_TIMER | IRQF_SHARED,
.name = "timer" .name = "crisv32-timer",
.dev_id = &crisv32_clockevent,
}; };
void __init cris_timer_init(void) static u64 notrace crisv32_timer_sched_clock(void)
{ {
int cpu = smp_processor_id(); return REG_RD(timer, timer_base, r_time);
reg_timer_rw_tmr0_ctrl tmr0_ctrl = { 0 }; }
reg_timer_rw_tmr0_div tmr0_div = TIMER0_DIV;
reg_timer_rw_intr_mask timer_intr_mask;
/* Setup the etrax timers. static void __init crisv32_timer_init(void)
* Base frequency is 100MHz, divider 1000000 -> 100 HZ {
* We use timer0, so timer1 is free. reg_timer_rw_intr_mask timer_intr_mask;
* The trig timer is used by the fasttimer API if enabled. reg_timer_rw_tmr0_ctrl ctrl = {
*/ .op = regk_timer_hold,
.freq = regk_timer_f100,
};
tmr0_ctrl.op = regk_timer_ld; REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
tmr0_ctrl.freq = regk_timer_f100;
REG_WR(timer, timer_regs[cpu], rw_tmr0_div, tmr0_div);
REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Load */
tmr0_ctrl.op = regk_timer_run;
REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Start */
/* Enable the timer irq. */ timer_intr_mask = REG_RD(timer, timer_base, rw_intr_mask);
timer_intr_mask = REG_RD(timer, timer_regs[cpu], rw_intr_mask);
timer_intr_mask.tmr0 = 1; timer_intr_mask.tmr0 = 1;
REG_WR(timer, timer_regs[cpu], rw_intr_mask, timer_intr_mask); REG_WR(timer, timer_base, rw_intr_mask, timer_intr_mask);
} }
void __init time_init(void) void __init time_init(void)
{ {
reg_intr_vect_rw_mask intr_mask; int irq;
int ret;
/* Probe for the RTC and read it if it exists. /* Probe for the RTC and read it if it exists.
* Before the RTC can be probed the loops_per_usec variable needs * Before the RTC can be probed the loops_per_usec variable needs
...@@ -273,17 +276,28 @@ void __init time_init(void) ...@@ -273,17 +276,28 @@ void __init time_init(void)
*/ */
loops_per_usec = 50; loops_per_usec = 50;
/* Start CPU local timer. */ irq = TIMER0_INTR_VECT;
cris_timer_init(); timer_base = (void __iomem *) regi_timer0;
crisv32_timer_init();
sched_clock_register(crisv32_timer_sched_clock, 32,
CRISV32_TIMER_FREQ);
clocksource_mmio_init(timer_base + REG_RD_ADDR_timer_r_time,
"crisv32-timer", CRISV32_TIMER_FREQ,
300, 32, clocksource_mmio_readl_up);
crisv32_clockevent.cpumask = cpu_possible_mask;
crisv32_clockevent.irq = irq;
/* Enable the timer irq in global config. */ ret = setup_irq(irq, &irq_timer);
intr_mask = REG_RD_VECT(intr_vect, regi_irq, rw_mask, 1); if (ret)
intr_mask.timer0 = 1; pr_warn("failed to setup irq %d\n", irq);
REG_WR_VECT(intr_vect, regi_irq, rw_mask, 1, intr_mask);
/* Now actually register the timer irq handler that calls clockevents_config_and_register(&crisv32_clockevent,
* timer_interrupt(). */ CRISV32_TIMER_FREQ,
setup_irq(TIMER0_INTR_VECT, &irq_timer); 2, 0xffffffff);
/* Enable watchdog if we should use one. */ /* Enable watchdog if we should use one. */
......
...@@ -3,5 +3,5 @@ ...@@ -3,5 +3,5 @@
# #
lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o \ lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o \
csumcpfruser.o spinlock.o delay.o strcmp.o csumcpfruser.o delay.o strcmp.o
;; Core of the spinlock implementation
;;
;; Copyright (C) 2004 Axis Communications AB.
;;
;; Author: Mikael Starvik
.global cris_spin_lock
.type cris_spin_lock,@function
.global cris_spin_trylock
.type cris_spin_trylock,@function
.text
cris_spin_lock:
clearf p
1: test.b [$r10]
beq 1b
clearf p
ax
clear.b [$r10]
bcs 1b
clearf p
ret
nop
.size cris_spin_lock, . - cris_spin_lock
cris_spin_trylock:
clearf p
1: move.b [$r10], $r11
ax
clear.b [$r10]
bcs 1b
clearf p
ret
movu.b $r11,$r10
.size cris_spin_trylock, . - cris_spin_trylock
...@@ -40,17 +40,6 @@ void __init cris_mmu_init(void) ...@@ -40,17 +40,6 @@ void __init cris_mmu_init(void)
*/ */
per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
#ifdef CONFIG_SMP
{
pgd_t **pgd;
pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
SUPP_BANK_SEL(1);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
SUPP_BANK_SEL(2);
SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
}
#endif
/* Initialise the TLB. Function found in tlb.c. */ /* Initialise the TLB. Function found in tlb.c. */
tlb_init(); tlb_init();
......
...@@ -115,11 +115,7 @@ ...@@ -115,11 +115,7 @@
move.d $r0, [$r1] ; last_refill_cause = rw_mm_cause move.d $r0, [$r1] ; last_refill_cause = rw_mm_cause
3: ; Probably not in a loop, continue normal processing 3: ; Probably not in a loop, continue normal processing
#ifdef CONFIG_SMP
move $s7, $acr ; PGD
#else
move.d current_pgd, $acr ; PGD move.d current_pgd, $acr ; PGD
#endif
; Look up PMD in PGD ; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31) lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
move.d [$acr], $acr ; PGD for the current process move.d [$acr], $acr ; PGD for the current process
......
BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o
ifneq ($(CONFIG_BUILTIN_DTB),"")
obj-$(CONFIG_OF) += $(BUILTIN_DTB)
endif
clean-files := *.dtb.S
/dts-v1/;
/include/ "etraxfs.dtsi"
/ {
model = "Axis 88 Developer Board";
compatible = "axis,dev88";
aliases {
serial0 = &uart0;
};
soc {
uart0: serial@b00260000 {
status = "okay";
};
};
};
/ {
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&intc>;
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
model = "axis,crisv32";
reg = <0>;
};
};
soc {
compatible = "simple-bus";
model = "etraxfs";
#address-cells = <1>;
#size-cells = <1>;
ranges;
intc: interrupt-controller {
compatible = "axis,crisv32-intc";
reg = <0xb001c000 0x1000>;
interrupt-controller;
#interrupt-cells = <1>;
};
serial@b00260000 {
compatible = "axis,etraxfs-uart";
reg = <0xb0026000 0x1000>;
interrupts = <68>;
status = "disabled";
};
};
};
#ifndef __ASM_CRIS_ARCH_ATOMIC__
#define __ASM_CRIS_ARCH_ATOMIC__
#define cris_atomic_save(addr, flags) local_irq_save(flags);
#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
#endif
...@@ -36,12 +36,4 @@ static inline unsigned long _get_base(char * addr) ...@@ -36,12 +36,4 @@ static inline unsigned long _get_base(char * addr)
return 0; return 0;
} }
#define nop() __asm__ __volatile__ ("nop");
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
#endif #endif
#ifndef __ASM_CRIS_ARCH_ATOMIC__
#define __ASM_CRIS_ARCH_ATOMIC__
#include <linux/spinlock_types.h>
extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void* l);
#ifndef CONFIG_SMP
#define cris_atomic_save(addr, flags) local_irq_save(flags);
#define cris_atomic_restore(addr, flags) local_irq_restore(flags);
#else
extern spinlock_t cris_atomic_locks[];
#define LOCK_COUNT 128
#define HASH_ADDR(a) (((int)a) & 127)
#define cris_atomic_save(addr, flags) \
local_irq_save(flags); \
cris_spin_lock((void *)&cris_atomic_locks[HASH_ADDR(addr)].raw_lock.slock);
#define cris_atomic_restore(addr, flags) \
{ \
spinlock_t *lock = (void*)&cris_atomic_locks[HASH_ADDR(addr)]; \
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->raw_lock.slock) \
: "r" (1) \
: "memory"); \
local_irq_restore(flags); \
}
#endif
#endif
...@@ -25,8 +25,7 @@ struct thread_struct { ...@@ -25,8 +25,7 @@ struct thread_struct {
*/ */
#define TASK_SIZE (0xB0000000UL) #define TASK_SIZE (0xB0000000UL)
/* CCS I=1, enable interrupts. */ #define INIT_THREAD { }
#define INIT_THREAD { 0, 0, (1 << I_CCS_BITNR) }
#define KSTK_EIP(tsk) \ #define KSTK_EIP(tsk) \
({ \ ({ \
......
#ifndef __ASM_ARCH_SPINLOCK_H
#define __ASM_ARCH_SPINLOCK_H
#include <linux/spinlock_types.h>
#define RW_LOCK_BIAS 0x01000000
extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
: "r" (1) \
: "memory");
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
cpu_relax();
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
arch_spin_lock(lock);
}
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*
*/
static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
static inline void arch_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
arch_spin_unlock(&rw->slock);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
arch_spin_unlock(&rw->slock);
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
arch_spin_unlock(&rw->slock);
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
arch_spin_unlock(&rw->slock);
return ret;
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
arch_spin_unlock(&rw->slock);
return ret;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
generic-y += atomic.h
generic-y += barrier.h generic-y += barrier.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += cmpxchg.h
generic-y += cputime.h generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += exec.h generic-y += exec.h
generic-y += emergency-restart.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += linkage.h generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += module.h generic-y += module.h
generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += scatterlist.h generic-y += scatterlist.h
generic-y += sections.h generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h generic-y += trace_clock.h
generic-y += vga.h generic-y += vga.h
generic-y += xor.h generic-y += xor.h
/* $Id: atomic.h,v 1.3 2001/07/25 16:15:19 bjornw Exp $ */
#ifndef __ASM_CRIS_ATOMIC__
#define __ASM_CRIS_ATOMIC__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <arch/atomic.h>
#include <arch/system.h>
#include <asm/barrier.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/* These should be written in asm but we do it in C for now. */
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, volatile atomic_t *v) \
{ \
unsigned long flags; \
cris_atomic_save(v, flags); \
v->counter c_op i; \
cris_atomic_restore(v, flags); \
} \
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, volatile atomic_t *v) \
{ \
unsigned long flags; \
int retval; \
cris_atomic_save(v, flags); \
retval = (v->counter c_op i); \
cris_atomic_restore(v, flags); \
return retval; \
}
#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static inline int atomic_sub_and_test(int i, volatile atomic_t *v)
{
int retval;
unsigned long flags;
cris_atomic_save(v, flags);
retval = (v->counter -= i) == 0;
cris_atomic_restore(v, flags);
return retval;
}
static inline void atomic_inc(volatile atomic_t *v)
{
unsigned long flags;
cris_atomic_save(v, flags);
(v->counter)++;
cris_atomic_restore(v, flags);
}
static inline void atomic_dec(volatile atomic_t *v)
{
unsigned long flags;
cris_atomic_save(v, flags);
(v->counter)--;
cris_atomic_restore(v, flags);
}
static inline int atomic_inc_return(volatile atomic_t *v)
{
unsigned long flags;
int retval;
cris_atomic_save(v, flags);
retval = ++(v->counter);
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_dec_return(volatile atomic_t *v)
{
unsigned long flags;
int retval;
cris_atomic_save(v, flags);
retval = --(v->counter);
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_dec_and_test(volatile atomic_t *v)
{
int retval;
unsigned long flags;
cris_atomic_save(v, flags);
retval = --(v->counter) == 0;
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_inc_and_test(volatile atomic_t *v)
{
int retval;
unsigned long flags;
cris_atomic_save(v, flags);
retval = ++(v->counter) == 0;
cris_atomic_restore(v, flags);
return retval;
}
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
cris_atomic_save(v, flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
cris_atomic_restore(v, flags);
return ret;
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
cris_atomic_save(v, flags);
ret = v->counter;
if (ret != u)
v->counter += a;
cris_atomic_restore(v, flags);
return ret;
}
#endif
...@@ -19,119 +19,10 @@ ...@@ -19,119 +19,10 @@
#endif #endif
#include <arch/bitops.h> #include <arch/bitops.h>
#include <linux/atomic.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/barrier.h> #include <asm/barrier.h>
/* #include <asm-generic/bitops/atomic.h>
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
#define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)
/*
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
* in order to ensure changes are visible on other processors.
*/
#define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)
/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
#define change_bit(nr, addr) (void)test_and_change_bit(nr, addr)
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5;
mask = 1 << (nr & 0x1f);
cris_atomic_save(addr, flags);
retval = (mask & *adr) != 0;
*adr |= mask;
cris_atomic_restore(addr, flags);
return retval;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5;
mask = 1 << (nr & 0x1f);
cris_atomic_save(addr, flags);
retval = (mask & *adr) != 0;
*adr &= ~mask;
cris_atomic_restore(addr, flags);
return retval;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
unsigned int mask, retval;
unsigned long flags;
unsigned int *adr = (unsigned int *)addr;
adr += nr >> 5;
mask = 1 << (nr & 0x1f);
cris_atomic_save(addr, flags);
retval = (mask & *adr) != 0;
*adr ^= mask;
cris_atomic_restore(addr, flags);
return retval;
}
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
/* /*
......
#ifndef __ASM_CRIS_CMPXCHG__
#define __ASM_CRIS_CMPXCHG__
#include <linux/irqflags.h>
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
/* since Etrax doesn't have any atomic xchg instructions, we need to disable
irq's (if enabled) and do it with move.d's */
unsigned long flags,temp;
local_irq_save(flags); /* save flags, including irq enable bit and shut off irqs */
switch (size) {
case 1:
*((unsigned char *)&temp) = x;
x = *(unsigned char *)ptr;
*(unsigned char *)ptr = *((unsigned char *)&temp);
break;
case 2:
*((unsigned short *)&temp) = x;
x = *(unsigned short *)ptr;
*(unsigned short *)ptr = *((unsigned short *)&temp);
break;
case 4:
temp = x;
x = *(unsigned long *)ptr;
*(unsigned long *)ptr = temp;
break;
}
local_irq_restore(flags); /* restore irq enable bit */
return x;
}
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define tas(ptr) (xchg((ptr),1))
#include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif
#endif /* __ASM_CRIS_CMPXCHG__ */
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>
#include <asm-generic/div64.h>
...@@ -71,7 +71,7 @@ typedef unsigned long elf_fpregset_t; ...@@ -71,7 +71,7 @@ typedef unsigned long elf_fpregset_t;
the loader. We need to make sure that it is out of the way of the program the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */ that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This yields a mask that user programs can use to figure out what /* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space, instruction set this CPU supports. This could be done in user space,
......
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#endif
#ifndef __ASM_HARDIRQ_H
#define __ASM_HARDIRQ_H
#include <asm/irq.h>
#include <asm-generic/hardirq.h>
#endif /* __ASM_HARDIRQ_H */
#include <asm-generic/irq_regs.h>
#include <asm-generic/kdebug.h>
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
/* Dummy header just to define km_type. None of this
* is actually used on cris.
*/
#include <asm-generic/kmap_types.h>
#endif
#include <asm-generic/local.h>
#include <asm-generic/local64.h>
#ifndef _CRIS_PERCPU_H
#define _CRIS_PERCPU_H
#include <asm-generic/percpu.h>
#endif /* _CRIS_PERCPU_H */
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <linux/cpumask.h>
extern cpumask_t phys_cpu_present_map;
#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
...@@ -22,16 +22,9 @@ extern void __flush_tlb_mm(struct mm_struct *mm); ...@@ -22,16 +22,9 @@ extern void __flush_tlb_mm(struct mm_struct *mm);
extern void __flush_tlb_page(struct vm_area_struct *vma, extern void __flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr); unsigned long addr);
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr);
#else
#define flush_tlb_all __flush_tlb_all #define flush_tlb_all __flush_tlb_all
#define flush_tlb_mm __flush_tlb_mm #define flush_tlb_mm __flush_tlb_mm
#define flush_tlb_page __flush_tlb_page #define flush_tlb_page __flush_tlb_page
#endif
static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
{ {
......
#ifndef _ASM_CRIS_TOPOLOGY_H
#define _ASM_CRIS_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* _ASM_CRIS_TOPOLOGY_H */
...@@ -7,6 +7,7 @@ CPPFLAGS_vmlinux.lds := -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE) ...@@ -7,6 +7,7 @@ CPPFLAGS_vmlinux.lds := -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE)
extra-y := vmlinux.lds extra-y := vmlinux.lds
obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
obj-y += devicetree.o
obj-$(CONFIG_MODULES) += crisksyms.o obj-$(CONFIG_MODULES) += crisksyms.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/printk.h>
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
pr_err("%s(%llx, %llx)\n",
__func__, base, size);
}
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return alloc_bootmem_align(size, align);
}
...@@ -42,3 +42,26 @@ void do_notify_resume(int canrestart, struct pt_regs *regs, ...@@ -42,3 +42,26 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
tracehook_notify_resume(regs); tracehook_notify_resume(regs);
} }
} }
void do_work_pending(int syscall, struct pt_regs *regs,
unsigned int thread_flags)
{
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
} else {
if (unlikely(!user_mode(regs)))
return;
local_irq_enable();
if (thread_flags & _TIF_SIGPENDING) {
do_signal(syscall, regs);
syscall = 0;
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
local_irq_disable();
thread_flags = current_thread_info()->flags;
} while (thread_flags & _TIF_WORK_MASK);
}
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <arch/system.h> #include <arch/system.h>
...@@ -64,6 +67,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -64,6 +67,10 @@ void __init setup_arch(char **cmdline_p)
unsigned long start_pfn, max_pfn; unsigned long start_pfn, max_pfn;
unsigned long memory_start; unsigned long memory_start;
#ifdef CONFIG_OF
early_init_dt_scan(__dtb_start);
#endif
/* register an initial console printing routine for printk's */ /* register an initial console printing routine for printk's */
init_etrax_debug(); init_etrax_debug();
...@@ -141,6 +148,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -141,6 +148,8 @@ void __init setup_arch(char **cmdline_p)
reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT); reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
unflatten_and_copy_device_tree();
/* paging_init() sets up the MMU and marks all pages as reserved */ /* paging_init() sets up the MMU and marks all pages as reserved */
paging_init(); paging_init();
...@@ -204,3 +213,9 @@ static int __init topology_init(void) ...@@ -204,3 +213,9 @@ static int __init topology_init(void)
subsys_initcall(topology_init); subsys_initcall(topology_init);
static int __init cris_of_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
core_initcall(cris_of_init);
...@@ -79,11 +79,13 @@ cris_do_profile(struct pt_regs* regs) ...@@ -79,11 +79,13 @@ cris_do_profile(struct pt_regs* regs)
#endif #endif
} }
#ifndef CONFIG_GENERIC_SCHED_CLOCK
unsigned long long sched_clock(void) unsigned long long sched_clock(void)
{ {
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ) + return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ) +
get_ns_in_jiffie(); get_ns_in_jiffie();
} }
#endif
static int static int
__init init_udelay(void) __init init_udelay(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment