Commit 99ef7c2a authored by James Hogan's avatar James Hogan

metag: Cache/TLB handling

Add cache and TLB handling code for metag, including the required
callbacks used by MM switches and DMA operations. Caches can be
partitioned between the hardware threads and the global space, however
this is usually configured by the bootloader so Linux doesn't make any
changes to this configuration. TLBs aren't configurable, so only need
consideration to flush them.

On Meta1 the L1 cache was VIVT which required a full flush on MM switch.
Meta2 has a VIPT L1 cache so it doesn't require the full flush on MM
switch. Meta2 can also have a writeback L2 with hardware prefetch which
requires some special handling. Support is optional, and the L2 can be
detected and initialised by Linux.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
parent 027f891f
#ifndef __ASM_METAG_CACHE_H
#define __ASM_METAG_CACHE_H
/* L1 cache line size (64 bytes) */
#define L1_CACHE_SHIFT 6
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/* Meta requires large data items to be 8 byte aligned. */
#define ARCH_SLAB_MINALIGN 8
/*
* With an L2 cache, we may invalidate dirty lines, so we need to ensure DMA
* buffers have cache line alignment.
*/
#ifdef CONFIG_METAG_L2C
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#else
#define ARCH_DMA_MINALIGN 8
#endif
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif
#ifndef _METAG_CACHEFLUSH_H
#define _METAG_CACHEFLUSH_H
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <asm/l2cache.h>
#include <asm/metag_isa.h>
#include <asm/metag_mem.h>
void metag_cache_probe(void);
void metag_data_cache_flush_all(const void *start);
void metag_code_cache_flush_all(const void *start);
/*
* Routines to flush physical cache lines that may be used to cache data or code
* normally accessed via the linear address range supplied. The region flushed
* must either lie in local or global address space determined by the top bit of
* the pStart address. If Bytes is >= 4K then the whole of the related cache
* state will be flushed rather than a limited range.
*/
void metag_data_cache_flush(const void *start, int bytes);
void metag_code_cache_flush(const void *start, int bytes);
#ifdef CONFIG_METAG_META12
/* Write through, virtually tagged, split I/D cache. */
static inline void __flush_cache_all(void)
{
metag_code_cache_flush_all((void *) PAGE_OFFSET);
metag_data_cache_flush_all((void *) PAGE_OFFSET);
}
#define flush_cache_all() __flush_cache_all()
/* flush the entire user address space referenced in this mm structure */
static inline void flush_cache_mm(struct mm_struct *mm)
{
if (mm == current->mm)
__flush_cache_all();
}
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
/* flush a range of addresses from this mm */
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
flush_cache_mm(vma->vm_mm);
}
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long pfn)
{
flush_cache_mm(vma->vm_mm);
}
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
metag_data_cache_flush_all((void *) PAGE_OFFSET);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
}
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
metag_data_cache_flush_all((void *) PAGE_OFFSET);
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
metag_data_cache_flush_all((void *) PAGE_OFFSET);
}
#else
/* Write through, physically tagged, split I/D cache. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
{
/* FIXME: We can do better than this. All we are trying to do is
* make the i-cache coherent, we should use the PG_arch_1 bit like
* e.g. powerpc.
*/
#ifdef CONFIG_SMP
metag_out32(1, SYSC_ICACHE_FLUSH);
#else
metag_code_cache_flush_all((void *) PAGE_OFFSET);
#endif
}
#endif
/* Push n pages at kernel virtual address and clear the icache */
static inline void flush_icache_range(unsigned long address,
unsigned long endaddr)
{
#ifdef CONFIG_SMP
metag_out32(1, SYSC_ICACHE_FLUSH);
#else
metag_code_cache_flush((void *) address, endaddr - address);
#endif
}
static inline void flush_cache_sigtramp(unsigned long addr, int size)
{
/*
* Flush the icache in case there was previously some code
* fetched from this address, perhaps a previous sigtramp.
*
* We don't need to flush the dcache, it's write through and
* we just wrote the sigtramp code through it.
*/
#ifdef CONFIG_SMP
metag_out32(1, SYSC_ICACHE_FLUSH);
#else
metag_code_cache_flush((void *) addr, size);
#endif
}
#ifdef CONFIG_METAG_L2C
/*
* Perform a single specific CACHEWD operation on an address, masking lower bits
* of address first.
*/
static inline void cachewd_line(void *addr, unsigned int data)
{
unsigned long masked = (unsigned long)addr & -0x40;
__builtin_meta2_cachewd((void *)masked, data);
}
/* Perform a certain CACHEW op on each cache line in a range */
static inline void cachew_region_op(void *start, unsigned long size,
unsigned int op)
{
unsigned long offset = (unsigned long)start & 0x3f;
int i;
if (offset) {
size += offset;
start -= offset;
}
i = (size - 1) >> 6;
do {
__builtin_meta2_cachewd(start, op);
start += 0x40;
} while (i--);
}
/* prevent write fence and flushbacks being reordered in L2 */
static inline void l2c_fence_flush(void *addr)
{
/*
* Synchronise by reading back and re-flushing.
* It is assumed this access will miss, as the caller should have just
* flushed the cache line.
*/
(void)(volatile u8 *)addr;
cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
}
/* prevent write fence and writebacks being reordered in L2 */
static inline void l2c_fence(void *addr)
{
/*
* A write back has occurred, but not necessarily an invalidate, so the
* readback in l2c_fence_flush() would hit in the cache and have no
* effect. Therefore fully flush the line first.
*/
cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
l2c_fence_flush(addr);
}
/* Used to keep memory consistent when doing DMA. */
static inline void flush_dcache_region(void *start, unsigned long size)
{
/* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
if (meta_l2c_is_enabled()) {
cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
if (meta_l2c_is_writeback())
l2c_fence_flush(start + size - 1);
} else {
metag_data_cache_flush(start, size);
}
}
/* Write back dirty lines to memory (or do nothing if no writeback caches) */
static inline void writeback_dcache_region(void *start, unsigned long size)
{
if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
l2c_fence(start + size - 1);
}
}
/* Invalidate (may also write back if necessary) */
static inline void invalidate_dcache_region(void *start, unsigned long size)
{
if (meta_l2c_is_enabled())
cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
else
metag_data_cache_flush(start, size);
}
#else
#define flush_dcache_region(s, l) metag_data_cache_flush((s), (l))
#define writeback_dcache_region(s, l) do {} while (0)
#define invalidate_dcache_region(s, l) flush_dcache_region((s), (l))
#endif
static inline void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
}
static inline void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr,
void *dst, const void *src,
unsigned long len)
{
memcpy(dst, src, len);
}
#endif /* _METAG_CACHEFLUSH_H */
#ifndef _METAG_L2CACHE_H
#define _METAG_L2CACHE_H
#ifdef CONFIG_METAG_L2C
#include <asm/global_lock.h>
#include <asm/io.h>
/*
* Store the last known value of pfenable (we don't want prefetch enabled while
* L2 is off).
*/
extern int l2c_pfenable;
/* defined in arch/metag/drivers/core-sysfs.c */
extern struct sysdev_class cache_sysclass;
static inline void wr_fence(void);
/*
* Functions for reading of L2 cache configuration.
*/
/* Get raw L2 config register (CORE_CONFIG3) */
static inline unsigned int meta_l2c_config(void)
{
const unsigned int *corecfg3 = (const unsigned int *)METAC_CORE_CONFIG3;
return *corecfg3;
}
/* Get whether the L2 is present */
static inline int meta_l2c_is_present(void)
{
return meta_l2c_config() & METAC_CORECFG3_L2C_HAVE_L2C_BIT;
}
/* Get whether the L2 is configured for write-back instead of write-through */
static inline int meta_l2c_is_writeback(void)
{
return meta_l2c_config() & METAC_CORECFG3_L2C_MODE_BIT;
}
/* Get whether the L2 is unified instead of separated code/data */
static inline int meta_l2c_is_unified(void)
{
return meta_l2c_config() & METAC_CORECFG3_L2C_UNIFIED_BIT;
}
/* Get the L2 cache size in bytes */
static inline unsigned int meta_l2c_size(void)
{
unsigned int size_s;
if (!meta_l2c_is_present())
return 0;
size_s = (meta_l2c_config() & METAC_CORECFG3_L2C_SIZE_BITS)
>> METAC_CORECFG3_L2C_SIZE_S;
/* L2CSIZE is in KiB */
return 1024 << size_s;
}
/* Get the number of ways in the L2 cache */
static inline unsigned int meta_l2c_ways(void)
{
unsigned int ways_s;
if (!meta_l2c_is_present())
return 0;
ways_s = (meta_l2c_config() & METAC_CORECFG3_L2C_NUM_WAYS_BITS)
>> METAC_CORECFG3_L2C_NUM_WAYS_S;
return 0x1 << ways_s;
}
/* Get the line size of the L2 cache */
static inline unsigned int meta_l2c_linesize(void)
{
unsigned int line_size;
if (!meta_l2c_is_present())
return 0;
line_size = (meta_l2c_config() & METAC_CORECFG3_L2C_LINE_SIZE_BITS)
>> METAC_CORECFG3_L2C_LINE_SIZE_S;
switch (line_size) {
case METAC_CORECFG3_L2C_LINE_SIZE_64B:
return 64;
default:
return 0;
}
}
/* Get the revision ID of the L2 cache */
static inline unsigned int meta_l2c_revision(void)
{
return (meta_l2c_config() & METAC_CORECFG3_L2C_REV_ID_BITS)
>> METAC_CORECFG3_L2C_REV_ID_S;
}
/*
* Start an initialisation of the L2 cachelines and wait for completion.
* This should only be done in a LOCK1 or LOCK2 critical section while the L2
* is disabled.
*/
static inline void _meta_l2c_init(void)
{
metag_out32(SYSC_L2C_INIT_INIT, SYSC_L2C_INIT);
while (metag_in32(SYSC_L2C_INIT) == SYSC_L2C_INIT_IN_PROGRESS)
/* do nothing */;
}
/*
* Start a writeback of dirty L2 cachelines and wait for completion.
* This should only be done in a LOCK1 or LOCK2 critical section.
*/
static inline void _meta_l2c_purge(void)
{
metag_out32(SYSC_L2C_PURGE_PURGE, SYSC_L2C_PURGE);
while (metag_in32(SYSC_L2C_PURGE) == SYSC_L2C_PURGE_IN_PROGRESS)
/* do nothing */;
}
/* Set whether the L2 cache is enabled. */
static inline void _meta_l2c_enable(int enabled)
{
unsigned int enable;
enable = metag_in32(SYSC_L2C_ENABLE);
if (enabled)
enable |= SYSC_L2C_ENABLE_ENABLE_BIT;
else
enable &= ~SYSC_L2C_ENABLE_ENABLE_BIT;
metag_out32(enable, SYSC_L2C_ENABLE);
}
/* Set whether the L2 cache prefetch is enabled. */
static inline void _meta_l2c_pf_enable(int pfenabled)
{
unsigned int enable;
enable = metag_in32(SYSC_L2C_ENABLE);
if (pfenabled)
enable |= SYSC_L2C_ENABLE_PFENABLE_BIT;
else
enable &= ~SYSC_L2C_ENABLE_PFENABLE_BIT;
metag_out32(enable, SYSC_L2C_ENABLE);
}
/* Return whether the L2 cache is enabled */
static inline int _meta_l2c_is_enabled(void)
{
return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_ENABLE_BIT;
}
/* Return whether the L2 cache prefetch is enabled */
static inline int _meta_l2c_pf_is_enabled(void)
{
return metag_in32(SYSC_L2C_ENABLE) & SYSC_L2C_ENABLE_PFENABLE_BIT;
}
/* Return whether the L2 cache is enabled */
static inline int meta_l2c_is_enabled(void)
{
int en;
/*
* There is no need to lock at the moment, as the enable bit is never
* intermediately changed, so we will never see an intermediate result.
*/
en = _meta_l2c_is_enabled();
return en;
}
/*
* Ensure the L2 cache is disabled.
* Return whether the L2 was previously disabled.
*/
int meta_l2c_disable(void);
/*
* Ensure the L2 cache is enabled.
* Return whether the L2 was previously enabled.
*/
int meta_l2c_enable(void);
/* Return whether the L2 cache prefetch is enabled */
static inline int meta_l2c_pf_is_enabled(void)
{
return l2c_pfenable;
}
/*
* Set whether the L2 cache prefetch is enabled.
* Return whether the L2 prefetch was previously enabled.
*/
int meta_l2c_pf_enable(int pfenable);
/*
* Flush the L2 cache.
* Return 1 if the L2 is disabled.
*/
int meta_l2c_flush(void);
/*
* Write back all dirty cache lines in the L2 cache.
* Return 1 if the L2 is disabled or there isn't any writeback.
*/
static inline int meta_l2c_writeback(void)
{
unsigned long flags;
int en;
/* no need to purge if it's not a writeback cache */
if (!meta_l2c_is_writeback())
return 1;
/*
* Purge only works if the L2 is enabled, and involves reading back to
* detect completion, so keep this operation atomic with other threads.
*/
__global_lock1(flags);
en = meta_l2c_is_enabled();
if (likely(en)) {
wr_fence();
_meta_l2c_purge();
}
__global_unlock1(flags);
return !en;
}
#else /* CONFIG_METAG_L2C */
#define meta_l2c_config() 0
#define meta_l2c_is_present() 0
#define meta_l2c_is_writeback() 0
#define meta_l2c_is_unified() 0
#define meta_l2c_size() 0
#define meta_l2c_ways() 0
#define meta_l2c_linesize() 0
#define meta_l2c_revision() 0
#define meta_l2c_is_enabled() 0
#define _meta_l2c_pf_is_enabled() 0
#define meta_l2c_pf_is_enabled() 0
#define meta_l2c_disable() 1
#define meta_l2c_enable() 0
#define meta_l2c_pf_enable(X) 0
static inline int meta_l2c_flush(void)
{
return 1;
}
static inline int meta_l2c_writeback(void)
{
return 1;
}
#endif /* CONFIG_METAG_L2C */
#endif /* _METAG_L2CACHE_H */
#ifndef __ASM_METAG_TLB_H
#define __ASM_METAG_TLB_H
#include <asm/cacheflush.h>
#include <asm/page.h>
/* Note, read http://lkml.org/lkml/2004/1/15/6 */
#ifdef CONFIG_METAG_META12
#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#else
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#endif
#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif
#ifndef __ASM_METAG_TLBFLUSH_H
#define __ASM_METAG_TLBFLUSH_H
#include <linux/io.h>
#include <linux/sched.h>
#include <asm/metag_mem.h>
#include <asm/pgalloc.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*
* FIXME: Meta 2 can flush single TLB entries.
*
*/
#if defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP)
static inline void __flush_tlb(void)
{
/* flush TLB entries for just the current hardware thread */
int thread = hard_processor_id();
metag_out32(0, (LINSYSCFLUSH_TxMMCU_BASE +
LINSYSCFLUSH_TxMMCU_STRIDE * thread));
}
#else
static inline void __flush_tlb(void)
{
/* flush TLB entries for all hardware threads */
metag_out32(0, LINSYSCFLUSH_MMCU);
}
#endif /* defined(CONFIG_METAG_META21) && !defined(CONFIG_SMP) */
#define flush_tlb() __flush_tlb()
#define flush_tlb_all() __flush_tlb()
#define local_flush_tlb_all() __flush_tlb()
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
flush_tlb_mm(mm);
}
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#endif /* __ASM_METAG_TLBFLUSH_H */
/*
* arch/metag/mm/cache.c
*
* Copyright (C) 2001, 2002, 2005, 2007, 2012 Imagination Technologies.
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*
* Cache control code
*/
#include <linux/export.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/core_reg.h>
#include <asm/metag_isa.h>
#include <asm/metag_mem.h>
#include <asm/metag_regs.h>
#define DEFAULT_CACHE_WAYS_LOG2 2
/*
* Size of a set in the caches. Initialised for default 16K stride, adjusted
* according to values passed through TBI global heap segment via LDLK (on ATP)
* or config registers (on HTP/MTP)
*/
static int dcache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
- DEFAULT_CACHE_WAYS_LOG2;
static int icache_set_shift = METAG_TBI_CACHE_SIZE_BASE_LOG2
- DEFAULT_CACHE_WAYS_LOG2;
/*
* The number of sets in the caches. Initialised for HTP/ATP, adjusted
* according to NOMMU setting in config registers
*/
static unsigned char dcache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
static unsigned char icache_sets_log2 = DEFAULT_CACHE_WAYS_LOG2;
/**
* metag_cache_probe() - Probe L1 cache configuration.
*
* Probe the L1 cache configuration to aid the L1 physical cache flushing
* functions.
*/
void metag_cache_probe(void)
{
#ifndef CONFIG_METAG_META12
int coreid = metag_in32(METAC_CORE_ID);
int config = metag_in32(METAC_CORE_CONFIG2);
int cfgcache = coreid & METAC_COREID_CFGCACHE_BITS;
if (cfgcache == METAC_COREID_CFGCACHE_TYPE0 ||
cfgcache == METAC_COREID_CFGCACHE_PRIVNOMMU) {
icache_sets_log2 = 1;
dcache_sets_log2 = 1;
}
/* For normal size caches, the smallest size is 4Kb.
For small caches, the smallest size is 64b */
icache_set_shift = (config & METAC_CORECFG2_ICSMALL_BIT)
? 6 : 12;
icache_set_shift += (config & METAC_CORE_C2ICSZ_BITS)
>> METAC_CORE_C2ICSZ_S;
icache_set_shift -= icache_sets_log2;
dcache_set_shift = (config & METAC_CORECFG2_DCSMALL_BIT)
? 6 : 12;
dcache_set_shift += (config & METAC_CORECFG2_DCSZ_BITS)
>> METAC_CORECFG2_DCSZ_S;
dcache_set_shift -= dcache_sets_log2;
#else
/* Extract cache sizes from global heap segment */
unsigned long val, u;
int width, shift, addend;
PTBISEG seg;
seg = __TBIFindSeg(NULL, TBID_SEG(TBID_THREAD_GLOBAL,
TBID_SEGSCOPE_GLOBAL,
TBID_SEGTYPE_HEAP));
if (seg != NULL) {
val = seg->Data[1];
/* Work out width of I-cache size bit-field */
u = ((unsigned long) METAG_TBI_ICACHE_SIZE_BITS)
>> METAG_TBI_ICACHE_SIZE_S;
width = 0;
while (u & 1) {
width++;
u >>= 1;
}
/* Extract sign-extended size addend value */
shift = 32 - (METAG_TBI_ICACHE_SIZE_S + width);
addend = (long) ((val & METAG_TBI_ICACHE_SIZE_BITS)
<< shift)
>> (shift + METAG_TBI_ICACHE_SIZE_S);
/* Now calculate I-cache set size */
icache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
- DEFAULT_CACHE_WAYS_LOG2)
+ addend;
/* Similarly for D-cache */
u = ((unsigned long) METAG_TBI_DCACHE_SIZE_BITS)
>> METAG_TBI_DCACHE_SIZE_S;
width = 0;
while (u & 1) {
width++;
u >>= 1;
}
shift = 32 - (METAG_TBI_DCACHE_SIZE_S + width);
addend = (long) ((val & METAG_TBI_DCACHE_SIZE_BITS)
<< shift)
>> (shift + METAG_TBI_DCACHE_SIZE_S);
dcache_set_shift = (METAG_TBI_CACHE_SIZE_BASE_LOG2
- DEFAULT_CACHE_WAYS_LOG2)
+ addend;
}
#endif
}
static void metag_phys_data_cache_flush(const void *start)
{
unsigned long flush0, flush1, flush2, flush3;
int loops, step;
int thread;
int part, offset;
int set_shift;
/* Use a sequence of writes to flush the cache region requested */
thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
>> TXENABLE_THREAD_S;
/* Cache is broken into sets which lie in contiguous RAMs */
set_shift = dcache_set_shift;
/* Move to the base of the physical cache flush region */
flush0 = LINSYSCFLUSH_DCACHE_LINE;
step = 64;
/* Get partition data for this thread */
part = metag_in32(SYSC_DCPART0 +
(SYSC_xCPARTn_STRIDE * thread));
if ((int)start < 0)
/* Access Global vs Local partition */
part >>= SYSC_xCPARTG_AND_S
- SYSC_xCPARTL_AND_S;
/* Extract offset and move SetOff */
offset = (part & SYSC_xCPARTL_OR_BITS)
>> SYSC_xCPARTL_OR_S;
flush0 += (offset << (set_shift - 4));
/* Shrink size */
part = (part & SYSC_xCPARTL_AND_BITS)
>> SYSC_xCPARTL_AND_S;
loops = ((part + 1) << (set_shift - 4));
/* Reduce loops by step of cache line size */
loops /= step;
flush1 = flush0 + (1 << set_shift);
flush2 = flush0 + (2 << set_shift);
flush3 = flush0 + (3 << set_shift);
if (dcache_sets_log2 == 1) {
flush2 = flush1;
flush3 = flush1 + step;
flush1 = flush0 + step;
step <<= 1;
loops >>= 1;
}
/* Clear loops ways in cache */
while (loops-- != 0) {
/* Clear the ways. */
#if 0
/*
* GCC doesn't generate very good code for this so we
* provide inline assembly instead.
*/
metag_out8(0, flush0);
metag_out8(0, flush1);
metag_out8(0, flush2);
metag_out8(0, flush3);
flush0 += step;
flush1 += step;
flush2 += step;
flush3 += step;
#else
asm volatile (
"SETB\t[%0+%4++],%5\n"
"SETB\t[%1+%4++],%5\n"
"SETB\t[%2+%4++],%5\n"
"SETB\t[%3+%4++],%5\n"
: "+e" (flush0),
"+e" (flush1),
"+e" (flush2),
"+e" (flush3)
: "e" (step), "a" (0));
#endif
}
}
void metag_data_cache_flush_all(const void *start)
{
if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
/* No need to flush the data cache it's not actually enabled */
return;
metag_phys_data_cache_flush(start);
}
void metag_data_cache_flush(const void *start, int bytes)
{
unsigned long flush0;
int loops, step;
if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_DC_ON_BIT) == 0)
/* No need to flush the data cache it's not actually enabled */
return;
if (bytes >= 4096) {
metag_phys_data_cache_flush(start);
return;
}
/* Use linear cache flush mechanism on META IP */
flush0 = (int)start;
loops = ((int)start & (DCACHE_LINE_BYTES - 1)) + bytes +
(DCACHE_LINE_BYTES - 1);
loops >>= DCACHE_LINE_S;
#define PRIM_FLUSH(addr, offset) do { \
int __addr = ((int) (addr)) + ((offset) * 64); \
__builtin_dcache_flush((void *)(__addr)); \
} while (0)
#define LOOP_INC (4*64)
do {
/* By default stop */
step = 0;
switch (loops) {
/* Drop Thru Cases! */
default:
PRIM_FLUSH(flush0, 3);
loops -= 4;
step = 1;
case 3:
PRIM_FLUSH(flush0, 2);
case 2:
PRIM_FLUSH(flush0, 1);
case 1:
PRIM_FLUSH(flush0, 0);
flush0 += LOOP_INC;
case 0:
break;
}
} while (step);
}
EXPORT_SYMBOL(metag_data_cache_flush);
static void metag_phys_code_cache_flush(const void *start, int bytes)
{
unsigned long flush0, flush1, flush2, flush3, end_set;
int loops, step;
int thread;
int set_shift, set_size;
int part, offset;
/* Use a sequence of writes to flush the cache region requested */
thread = (__core_reg_get(TXENABLE) & TXENABLE_THREAD_BITS)
>> TXENABLE_THREAD_S;
set_shift = icache_set_shift;
/* Move to the base of the physical cache flush region */
flush0 = LINSYSCFLUSH_ICACHE_LINE;
step = 64;
/* Get partition code for this thread */
part = metag_in32(SYSC_ICPART0 +
(SYSC_xCPARTn_STRIDE * thread));
if ((int)start < 0)
/* Access Global vs Local partition */
part >>= SYSC_xCPARTG_AND_S-SYSC_xCPARTL_AND_S;
/* Extract offset and move SetOff */
offset = (part & SYSC_xCPARTL_OR_BITS)
>> SYSC_xCPARTL_OR_S;
flush0 += (offset << (set_shift - 4));
/* Shrink size */
part = (part & SYSC_xCPARTL_AND_BITS)
>> SYSC_xCPARTL_AND_S;
loops = ((part + 1) << (set_shift - 4));
/* Where does the Set end? */
end_set = flush0 + loops;
set_size = loops;
#ifdef CONFIG_METAG_META12
if ((bytes < 4096) && (bytes < loops)) {
/* Unreachable on HTP/MTP */
/* Only target the sets that could be relavent */
flush0 += (loops - step) & ((int) start);
loops = (((int) start) & (step-1)) + bytes + step - 1;
}
#endif
/* Reduce loops by step of cache line size */
loops /= step;
flush1 = flush0 + (1<<set_shift);
flush2 = flush0 + (2<<set_shift);
flush3 = flush0 + (3<<set_shift);
if (icache_sets_log2 == 1) {
flush2 = flush1;
flush3 = flush1 + step;
flush1 = flush0 + step;
#if 0
/* flush0 will stop one line early in this case
* (flush1 will do the final line).
* However we don't correct end_set here at the moment
* because it will never wrap on HTP/MTP
*/
end_set -= step;
#endif
step <<= 1;
loops >>= 1;
}
/* Clear loops ways in cache */
while (loops-- != 0) {
#if 0
/*
* GCC doesn't generate very good code for this so we
* provide inline assembly instead.
*/
/* Clear the ways */
metag_out8(0, flush0);
metag_out8(0, flush1);
metag_out8(0, flush2);
metag_out8(0, flush3);
flush0 += step;
flush1 += step;
flush2 += step;
flush3 += step;
#else
asm volatile (
"SETB\t[%0+%4++],%5\n"
"SETB\t[%1+%4++],%5\n"
"SETB\t[%2+%4++],%5\n"
"SETB\t[%3+%4++],%5\n"
: "+e" (flush0),
"+e" (flush1),
"+e" (flush2),
"+e" (flush3)
: "e" (step), "a" (0));
#endif
if (flush0 == end_set) {
/* Wrap within Set 0 */
flush0 -= set_size;
flush1 -= set_size;
flush2 -= set_size;
flush3 -= set_size;
}
}
}
void metag_code_cache_flush_all(const void *start)
{
if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
/* No need to flush the code cache it's not actually enabled */
return;
metag_phys_code_cache_flush(start, 4096);
}
void metag_code_cache_flush(const void *start, int bytes)
{
#ifndef CONFIG_METAG_META12
void *flush;
int loops, step;
#endif /* !CONFIG_METAG_META12 */
if ((metag_in32(SYSC_CACHE_MMU_CONFIG) & SYSC_CMMUCFG_IC_ON_BIT) == 0)
/* No need to flush the code cache it's not actually enabled */
return;
#ifdef CONFIG_METAG_META12
/* CACHEWD isn't available on Meta1, so always do full cache flush */
metag_phys_code_cache_flush(start, bytes);
#else /* CONFIG_METAG_META12 */
/* If large size do full physical cache flush */
if (bytes >= 4096) {
metag_phys_code_cache_flush(start, bytes);
return;
}
/* Use linear cache flush mechanism on META IP */
flush = (void *)((int)start & ~(ICACHE_LINE_BYTES-1));
loops = ((int)start & (ICACHE_LINE_BYTES-1)) + bytes +
(ICACHE_LINE_BYTES-1);
loops >>= ICACHE_LINE_S;
#define PRIM_IFLUSH(addr, offset) \
__builtin_meta2_cachewd(((addr) + ((offset) * 64)), CACHEW_ICACHE_BIT)
#define LOOP_INC (4*64)
do {
/* By default stop */
step = 0;
switch (loops) {
/* Drop Thru Cases! */
default:
PRIM_IFLUSH(flush, 3);
loops -= 4;
step = 1;
case 3:
PRIM_IFLUSH(flush, 2);
case 2:
PRIM_IFLUSH(flush, 1);
case 1:
PRIM_IFLUSH(flush, 0);
flush += LOOP_INC;
case 0:
break;
}
} while (step);
#endif /* !CONFIG_METAG_META12 */
}
EXPORT_SYMBOL(metag_code_cache_flush);
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/l2cache.h>
#include <asm/metag_isa.h>
/* If non-0, then initialise the L2 cache */
static int l2cache_init = 1;
/* If non-0, then initialise the L2 cache prefetch */
static int l2cache_init_pf = 1;
int l2c_pfenable;
static volatile u32 l2c_testdata[16] __initdata __aligned(64);
static int __init parse_l2cache(char *p)
{
char *cp = p;
if (get_option(&cp, &l2cache_init) != 1) {
pr_err("Bad l2cache parameter (%s)\n", p);
return 1;
}
return 0;
}
early_param("l2cache", parse_l2cache);
static int __init parse_l2cache_pf(char *p)
{
char *cp = p;
if (get_option(&cp, &l2cache_init_pf) != 1) {
pr_err("Bad l2cache_pf parameter (%s)\n", p);
return 1;
}
return 0;
}
early_param("l2cache_pf", parse_l2cache_pf);
static int __init meta_l2c_setup(void)
{
/*
* If the L2 cache isn't even present, don't do anything, but say so in
* the log.
*/
if (!meta_l2c_is_present()) {
pr_info("L2 Cache: Not present\n");
return 0;
}
/*
* Check whether the line size is recognised.
*/
if (!meta_l2c_linesize()) {
pr_warn_once("L2 Cache: unknown line size id (config=0x%08x)\n",
meta_l2c_config());
}
/*
* Initialise state.
*/
l2c_pfenable = _meta_l2c_pf_is_enabled();
/*
* Enable the L2 cache and print to log whether it was already enabled
* by the bootloader.
*/
if (l2cache_init) {
pr_info("L2 Cache: Enabling... ");
if (meta_l2c_enable())
pr_cont("already enabled\n");
else
pr_cont("done\n");
} else {
pr_info("L2 Cache: Not enabling\n");
}
/*
* Enable L2 cache prefetch.
*/
if (l2cache_init_pf) {
pr_info("L2 Cache: Enabling prefetch... ");
if (meta_l2c_pf_enable(1))
pr_cont("already enabled\n");
else
pr_cont("done\n");
} else {
pr_info("L2 Cache: Not enabling prefetch\n");
}
return 0;
}
core_initcall(meta_l2c_setup);
int meta_l2c_disable(void)
{
unsigned long flags;
int en;
if (!meta_l2c_is_present())
return 1;
/*
* Prevent other threads writing during the writeback, otherwise the
* writes will get "lost" when the L2 is disabled.
*/
__global_lock2(flags);
en = meta_l2c_is_enabled();
if (likely(en)) {
_meta_l2c_pf_enable(0);
wr_fence();
_meta_l2c_purge();
_meta_l2c_enable(0);
}
__global_unlock2(flags);
return !en;
}
int meta_l2c_enable(void)
{
unsigned long flags;
int en;
if (!meta_l2c_is_present())
return 0;
/*
* Init (clearing the L2) can happen while the L2 is disabled, so other
* threads are safe to continue executing, however we must not init the
* cache if it's already enabled (dirty lines would be discarded), so
* this operation should still be atomic with other threads.
*/
__global_lock1(flags);
en = meta_l2c_is_enabled();
if (likely(!en)) {
_meta_l2c_init();
_meta_l2c_enable(1);
_meta_l2c_pf_enable(l2c_pfenable);
}
__global_unlock1(flags);
return en;
}
int meta_l2c_pf_enable(int pfenable)
{
unsigned long flags;
int en = l2c_pfenable;
if (!meta_l2c_is_present())
return 0;
/*
* We read modify write the enable register, so this operation must be
* atomic with other threads.
*/
__global_lock1(flags);
en = l2c_pfenable;
l2c_pfenable = pfenable;
if (meta_l2c_is_enabled())
_meta_l2c_pf_enable(pfenable);
__global_unlock1(flags);
return en;
}
int meta_l2c_flush(void)
{
unsigned long flags;
int en;
/*
* Prevent other threads writing during the writeback. This also
* involves read modify writes.
*/
__global_lock2(flags);
en = meta_l2c_is_enabled();
if (likely(en)) {
_meta_l2c_pf_enable(0);
wr_fence();
_meta_l2c_purge();
_meta_l2c_enable(0);
_meta_l2c_init();
_meta_l2c_enable(1);
_meta_l2c_pf_enable(l2c_pfenable);
}
__global_unlock2(flags);
return !en;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment