Commit 455a70cb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-4.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC fixes from Vineet Gupta:

 - more intc updates [Yuriv]

 - fix module build when unwinder is turned off

 - IO Coherency Programming model updates

 - other miscellaneous

* tag 'arc-4.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: Revert "ARC: mm: IOC: Don't enable IOC by default"
  ARC: mm: split arc_cache_init to allow __init reaping of bulk
  ARCv2: IOC: Use actual memory size to setup aperture size
  ARCv2: IOC: Adhere to progamming model guidelines to avoid DMA corruption
  ARCv2: IOC: refactor the IOC and SLC operations into own functions
  ARC: module: Fix !CONFIG_ARC_DW2_UNWIND builds
  ARCv2: save r30 on kernel entry as gcc uses it for code-gen
  ARCv2: IRQ: Call entry/exit functions for chained handlers in MCIP
  ARC: IRQ: Use hwirq instead of virq in mask/unmask
  ARC: mmu: clarify the MMUv3 programming model
parents 83fd57a7 d0e73e2a
......@@ -29,7 +29,7 @@ config ARC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
......
......@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_IC_PTAG_HI 0x1F
/* Bit val in IC_CTRL */
#define IC_CTRL_CACHE_DISABLE 0x1
#define IC_CTRL_DIS 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
......@@ -80,7 +80,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_DC_PTAG_HI 0x5F
/* Bit val in DC_CTRL */
#define DC_CTRL_INV_MODE_FLUSH 0x40
#define DC_CTRL_DIS 0x001
#define DC_CTRL_INV_MODE_FLUSH 0x040
#define DC_CTRL_FLUSH_STATUS 0x100
/*System-level cache (L2 cache) related Auxiliary registers */
......@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_SLC_RGN_END 0x916
/* Bit val in SLC_CONTROL */
#define SLC_CTRL_DIS 0x001
#define SLC_CTRL_IM 0x040
#define SLC_CTRL_DISABLE 0x001
#define SLC_CTRL_BUSY 0x100
#define SLC_CTRL_RGN_OP_INV 0x200
......
......@@ -16,6 +16,7 @@
;
; Now manually save: r12, sp, fp, gp, r25
PUSH r30
PUSH r12
; Saving pt_regs->sp correctly requires some extra work due to the way
......@@ -72,6 +73,7 @@
POPAX AUX_USER_SP
1:
POP r12
POP r30
.endm
......
......@@ -14,13 +14,13 @@
#include <asm-generic/module.h>
#ifdef CONFIG_ARC_DW2_UNWIND
struct mod_arch_specific {
#ifdef CONFIG_ARC_DW2_UNWIND
void *unw_info;
int unw_sec_idx;
#endif
const char *secstr;
};
#endif
#define MODULE_PROC_FAMILY "ARC700"
......
......@@ -84,7 +84,7 @@ struct pt_regs {
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
unsigned long r12;
unsigned long r12, r30;
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
......
......@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
void setup_processor(void);
void __init setup_arch_memory(void);
long __init arc_get_mem_sz(void);
/* Helpers used in arc_*_mumbojumbo routines */
#define IS_AVAIL1(v, s) ((v) ? s : "")
......
......@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
static void arcv2_irq_mask(struct irq_data *data)
{
write_aux_reg(AUX_IRQ_SELECT, data->irq);
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
static void arcv2_irq_unmask(struct irq_data *data)
{
write_aux_reg(AUX_IRQ_SELECT, data->irq);
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->irq);
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
......
......@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb &= ~(1 << data->irq);
ienb &= ~(1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
......@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb |= (1 << data->irq);
ienb |= (1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
......
......@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
#include <soc/arc/mcip.h>
#include <asm/irqflags-arcv2.h>
......@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
struct irq_chip *core_chip = irq_desc_get_chip(desc);
irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
chained_irq_enter(core_chip, desc);
generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
chained_irq_exit(core_chip, desc);
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
......
......@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
#ifdef CONFIG_ARC_DW2_UNWIND
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
mod->arch.secstr = secstr;
#endif
mod->arch.secstr = secstr;
return 0;
}
......@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
#ifdef CONFIG_ARC_DW2_UNWIND
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
#endif
return 0;
......
......@@ -23,7 +23,7 @@
static int l2_line_sz;
static int ioc_exists;
int slc_enable = 1, ioc_enable = 0;
int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
......@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
/*
* For ARC700 MMUv3 I-cache and D-cache flushes
* Also reused for HS38 aliasing I-cache configuration
* - ARC700 programming model requires paddr and vaddr be passed in seperate
* AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
* caches actually alias or not.
* - For HS38, only the aliasing I-cache configuration uses the PTAG reg
* (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
......@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
__after_dc_op(op);
}
static inline void __dc_disable(void)
{
const int r = ARC_REG_DC_CTRL;
__dc_entire_op(OP_FLUSH_N_INV);
write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
}
static void __dc_enable(void)
{
const int r = ARC_REG_DC_CTRL;
write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
}
/* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
......@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
#else
#define __dc_entire_op(op)
#define __dc_disable()
#define __dc_enable()
#define __dc_line_op(paddr, vaddr, sz, op)
#define __dc_line_op_k(paddr, sz, op)
......@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
noinline static void slc_entire_op(const int op)
{
unsigned int ctrl, r = ARC_REG_SLC_CTRL;
ctrl = read_aux_reg(r);
if (!(op & OP_FLUSH)) /* i.e. OP_INV */
ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
else
ctrl |= SLC_CTRL_IM;
write_aux_reg(r, ctrl);
write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
/* Important to wait for flush to complete */
while (read_aux_reg(r) & SLC_CTRL_BUSY);
}
static inline void arc_slc_disable(void)
{
const int r = ARC_REG_SLC_CTRL;
slc_entire_op(OP_FLUSH_N_INV);
write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
}
static inline void arc_slc_enable(void)
{
const int r = ARC_REG_SLC_CTRL;
write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
}
/***********************************************************
* Exported APIs
*/
......@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
return 0;
}
void arc_cache_init(void)
/*
* IO-Coherency (IOC) setup rules:
*
* 1. Needs to be at system level, so only once by Master core
* Non-Masters need not be accessing caches at that time
* - They are either HALT_ON_RESET and kick started much later or
* - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
* doesn't perturb caches or coherency unit
*
* 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
* otherwise any straggler data might behave strangely post IOC enabling
*
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
noinline void __init arc_ioc_setup(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
char str[256];
unsigned int ap_sz;
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
/* Flush + invalidate + disable L1 dcache */
__dc_disable();
/* Flush + invalidate SLC */
if (read_aux_reg(ARC_REG_SLC_BCR))
slc_entire_op(OP_FLUSH_N_INV);
/* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/*
* Only master CPU needs to execute rest of function:
* - Assume SMP so all cores will have same cache config so
* any geomtry checks will be same for all
* - IOC setup / dma callbacks only need to be setup once
* IOC Aperture size:
* decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
* TBD: fix for PGU + 1GB of low mem
* TBD: fix for PAE
*/
if (cpu)
return;
ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
/* Re-enable L1 dcache */
__dc_enable();
}
void __init arc_cache_init_master(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
......@@ -985,30 +1073,14 @@ void arc_cache_init(void)
}
}
if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
/* Note that SLC disable not formally supported till HS 3.0 */
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable();
/* IM set : flush before invalidate */
write_aux_reg(ARC_REG_SLC_CTRL,
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
/* Important to wait for flush to complete */
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
write_aux_reg(ARC_REG_SLC_CTRL,
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
}
if (is_isa_arcv2() && ioc_enable)
arc_ioc_setup();
if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
/* Enable partial writes */
write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
/* Enable IO coherency */
write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
__dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
__dma_cache_inv = __dma_cache_inv_ioc;
__dma_cache_wback = __dma_cache_wback_ioc;
......@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
__dma_cache_wback = __dma_cache_wback_l1;
}
}
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
char str[256];
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
/*
* Only master CPU needs to execute rest of function:
* - Assume SMP so all cores will have same cache config so
* any geomtry checks will be same for all
* - IOC setup / dma callbacks only need to be setup once
*/
if (!cpu)
arc_cache_init_master();
}
......@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
#endif
long __init arc_get_mem_sz(void)
{
return low_mem_sz;
}
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment