Commit 4e76ae44 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6:
  [PARISC] Convert to new irq_chip functions
  [PARISC] fix per-cpu flag problem in the cpu affinity checkers
  [PARISC] fix vmap flush/invalidate
  eliminate special FLUSH flag from page table
  parisc: flush pages through tmpalias space
parents f19ade4d 1c0f6476
......@@ -15,6 +15,7 @@ config PARISC
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU
select GENERIC_HARDIRQS_NO_DEPRECATED
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
......
......@@ -26,8 +26,6 @@ void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *);
void flush_user_dcache_page(unsigned long);
void flush_user_icache_page(unsigned long);
void flush_user_dcache_range(unsigned long, unsigned long);
void flush_user_icache_range(unsigned long, unsigned long);
......@@ -37,6 +35,13 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr(void *addr);
static inline void flush_kernel_dcache_page(struct page *page)
{
flush_kernel_dcache_page_addr(page_address(page));
}
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
/* vmap range flushes and invalidates. Architecturally, we don't need
......@@ -50,6 +55,16 @@ static inline void flush_kernel_vmap_range(void *vaddr, int size)
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
void *cursor = vaddr;
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
struct page *page = vmalloc_to_page(cursor);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
flush_kernel_dcache_page(page);
}
flush_kernel_dcache_range_asm(start, start + size);
}
#define flush_cache_vmap(start, end) flush_cache_all()
......@@ -90,19 +105,15 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
/* defined in pacache.S exported in cache.c used by flush_anon_page */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
if (PageAnon(page))
flush_user_dcache_page(vmaddr);
}
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr(void *addr);
static inline void flush_kernel_dcache_page(struct page *page)
{
flush_kernel_dcache_page_addr(page_address(page));
flush_dcache_page_asm(page_to_phys(page), vmaddr);
}
#ifdef CONFIG_DEBUG_RODATA
......
......@@ -32,15 +32,10 @@ static __inline__ int irq_canonicalize(int irq)
}
struct irq_chip;
struct irq_data;
/*
* Some useful "we don't have to do anything here" handlers. Should
* probably be provided by the generic code.
*/
void no_ack_irq(unsigned int irq);
void no_end_irq(unsigned int irq);
void cpu_ack_irq(unsigned int irq);
void cpu_eoi_irq(unsigned int irq);
void cpu_ack_irq(struct irq_data *d);
void cpu_eoi_irq(struct irq_data *d);
extern int txn_alloc_irq(unsigned int nbits);
extern int txn_claim_irq(int);
......@@ -49,7 +44,7 @@ extern unsigned long txn_alloc_addr(unsigned int);
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
extern int cpu_check_affinity(unsigned int irq, const struct cpumask *dest);
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
......
......@@ -138,8 +138,7 @@ struct vm_area_struct;
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
#define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */
/* for cache flushing only */
/* bit 21 was formerly the FLUSH bit but is now unused */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
......@@ -173,7 +172,6 @@ struct vm_area_struct;
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
......@@ -213,7 +211,6 @@ struct vm_area_struct;
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)
/*
......@@ -261,7 +258,7 @@ extern unsigned long *empty_zero_page;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH))
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
......@@ -444,13 +441,10 @@ struct mm_struct;
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t old_pte;
pte_t pte;
spin_lock(&pa_dbit_lock);
pte = old_pte = *ptep;
pte_val(pte) &= ~_PAGE_PRESENT;
pte_val(pte) |= _PAGE_FLUSH;
set_pte_at(mm,addr,ptep,pte);
old_pte = *ptep;
pte_clear(mm,addr,ptep);
spin_unlock(&pa_dbit_lock);
return old_pte;
......
......@@ -27,12 +27,17 @@
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/shmparam.h>
int split_tlb __read_mostly;
int dcache_stride __read_mostly;
int icache_stride __read_mostly;
EXPORT_SYMBOL(dcache_stride);
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
/* On some machines (e.g. ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
......@@ -259,81 +264,13 @@ void disable_sr_hashing(void)
panic("SpaceID hashing is still on!\n");
}
/* Simple function to work out if we have an existing address translation
* for a user space vma. */
static inline int translation_exists(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
pmd_t *pmd;
pte_t pte;
if(pgd_none(*pgd))
return 0;
pmd = pmd_offset(pgd, addr);
if(pmd_none(*pmd) || pmd_bad(*pmd))
return 0;
/* We cannot take the pte lock here: flush_cache_page is usually
* called with pte lock already held. Whereas flush_dcache_page
* takes flush_dcache_mmap_lock, which is lower in the hierarchy:
* the vma itself is secure, but the pte might come or go racily.
*/
pte = *pte_offset_map(pmd, addr);
/* But pte_unmap() does nothing on this architecture */
/* Filter out coincidental file entries and swap entries */
if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
return 0;
return pte_pfn(pte) == pfn;
}
/* Private function to flush a page from the cache of a non-current
* process. cr25 contains the Page Directory of the current user
* process; we're going to hijack both it and the user space %sr3 to
* temporarily make the non-current process current. We have to do
* this because cache flushing may cause a non-access tlb miss which
* the handlers have to fill in from the pgd of the non-current
* process. */
static inline void
flush_user_cache_page_non_current(struct vm_area_struct *vma,
unsigned long vmaddr)
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long physaddr)
{
/* save the current process space and pgd */
unsigned long space = mfsp(3), pgd = mfctl(25);
/* we don't mind taking interrupts since they may not
* do anything with user space, but we can't
* be preempted here */
preempt_disable();
/* make us current */
mtctl(__pa(vma->vm_mm->pgd), 25);
mtsp(vma->vm_mm->context, 3);
flush_user_dcache_page(vmaddr);
if(vma->vm_flags & VM_EXEC)
flush_user_icache_page(vmaddr);
/* put the old current process back */
mtsp(space, 3);
mtctl(pgd, 25);
preempt_enable();
}
static inline void
__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
if (likely(vma->vm_mm->context == mfsp(3))) {
flush_user_dcache_page(vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_page(vmaddr);
} else {
flush_user_cache_page_non_current(vma, vmaddr);
}
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
flush_icache_page_asm(physaddr, vmaddr);
}
void flush_dcache_page(struct page *page)
......@@ -342,10 +279,8 @@ void flush_dcache_page(struct page *page)
struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
unsigned long offset;
unsigned long addr;
unsigned long addr, old_addr = 0;
pgoff_t pgoff;
unsigned long pfn = page_to_pfn(page);
if (mapping && !mapping_mapped(mapping)) {
set_bit(PG_dcache_dirty, &page->flags);
......@@ -369,20 +304,11 @@ void flush_dcache_page(struct page *page)
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
addr = mpnt->vm_start + offset;
/* Flush instructions produce non access tlb misses.
* On PA, we nullify these instructions rather than
* taking a page fault if the pte doesn't exist.
* This is just for speed. If the page translation
* isn't there, there's no point exciting the
* nadtlb handler into a nullification frenzy.
*
* Make sure we really have this page: the private
* mappings may cover this area but have COW'd this
* particular page.
*/
if (translation_exists(mpnt, addr, pfn)) {
__flush_cache_page(mpnt, addr);
break;
if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
__flush_cache_page(mpnt, addr, page_to_phys(page));
if (old_addr)
printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
old_addr = addr;
}
}
flush_dcache_mmap_unlock(mapping);
......@@ -573,7 +499,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
if (likely(translation_exists(vma, vmaddr, pfn)))
__flush_cache_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
}
......@@ -225,22 +225,13 @@
#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
*
* Note: naitlb misses will be treated
* as an ordinary itlb miss for now.
* However, note that naitlb misses
* have the faulting address in the
* IOR/ISR.
*/
.macro naitlb_11 code
mfctl %isr,spc
b itlb_miss_11
b naitlb_miss_11
mfctl %ior,va
/* FIXME: If user causes a naitlb miss, the priv level may not be in
* lower bits of va, where the itlb miss handler is expecting them
*/
.align 32
.endm
......@@ -248,26 +239,17 @@
/*
* naitlb miss interruption handler (parisc 2.0)
*
* Note: naitlb misses will be treated
* as an ordinary itlb miss for now.
* However, note that naitlb misses
* have the faulting address in the
* IOR/ISR.
*/
.macro naitlb_20 code
mfctl %isr,spc
#ifdef CONFIG_64BIT
b itlb_miss_20w
b naitlb_miss_20w
#else
b itlb_miss_20
b naitlb_miss_20
#endif
mfctl %ior,va
/* FIXME: If user causes a naitlb miss, the priv level may not be in
* lower bits of va, where the itlb miss handler is expecting them
*/
.align 32
.endm
......@@ -581,7 +563,24 @@
copy \va,\tmp1
depi 0,31,23,\tmp1
cmpb,COND(<>),n \tmp,\tmp1,\fault
ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
mfctl %cr19,\tmp /* iir */
/* get the opcode (first six bits) into \tmp */
extrw,u \tmp,5,6,\tmp
/*
* Only setting the T bit prevents data cache movein
* Setting access rights to zero prevents instruction cache movein
*
* Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
* to type field and _PAGE_READ goes to top bit of PL1
*/
ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
/*
* so if the opcode is one (i.e. this is a memory management
* instruction) nullify the next load so \prot is only T.
* Otherwise this is a normal data operation
*/
cmpiclr,= 0x01,\tmp,%r0
ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
depd,z \prot,8,7,\prot
/*
* OK, it is in the temp alias region, check whether "from" or "to".
......@@ -631,11 +630,7 @@ ENTRY(fault_vector_20)
def 13
def 14
dtlb_20 15
#if 0
naitlb_20 16
#else
def 16
#endif
nadtlb_20 17
def 18
def 19
......@@ -678,11 +673,7 @@ ENTRY(fault_vector_11)
def 13
def 14
dtlb_11 15
#if 0
naitlb_11 16
#else
def 16
#endif
nadtlb_11 17
def 18
def 19
......@@ -1203,7 +1194,7 @@ nadtlb_miss_20w:
get_pgd spc,ptp
space_check spc,t0,nadtlb_fault
L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
update_ptep ptp,pte,t0,t1
......@@ -1214,16 +1205,8 @@ nadtlb_miss_20w:
rfir
nop
nadtlb_check_flush_20w:
bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
/* Insert a "flush only" translation */
depdi,z 7,7,3,prot
depdi 1,10,1,prot
/* Drop prot bits from pte and convert to page addr for idtlbt */
convert_for_tlb_insert20 pte
nadtlb_check_alias_20w:
do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
idtlbt pte,prot
......@@ -1255,25 +1238,7 @@ dtlb_miss_11:
nop
dtlb_check_alias_11:
/* Check to see if fault is in the temporary alias region */
cmpib,<>,n 0,spc,dtlb_fault /* forward */
ldil L%(TMPALIAS_MAP_START),t0
copy va,t1
depwi 0,31,23,t1
cmpb,<>,n t0,t1,dtlb_fault /* forward */
ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
depw,z prot,8,7,prot
/*
* OK, it is in the temp alias region, check whether "from" or "to".
* Check "subtle" note in pacache.S re: r23/r26.
*/
extrw,u,= va,9,1,r0
or,tr %r23,%r0,pte /* If "from" use "from" page */
or %r26,%r0,pte /* else "to", use "to" page */
do_alias spc,t0,t1,va,pte,prot,dtlb_fault
idtlba pte,(va)
idtlbp prot,(va)
......@@ -1286,7 +1251,7 @@ nadtlb_miss_11:
space_check spc,t0,nadtlb_fault
L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
update_ptep ptp,pte,t0,t1
......@@ -1304,26 +1269,11 @@ nadtlb_miss_11:
rfir
nop
nadtlb_check_flush_11:
bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
/* Insert a "flush only" translation */
zdepi 7,7,3,prot
depi 1,10,1,prot
nadtlb_check_alias_11:
do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
/* Get rid of prot bits and convert to page addr for idtlba */
depi 0,31,ASM_PFN_PTE_SHIFT,pte
SHRREG pte,(ASM_PFN_PTE_SHIFT-(31-26)),pte
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
idtlba pte,(va)
idtlbp prot,(va)
rfir
nop
......@@ -1359,7 +1309,7 @@ nadtlb_miss_20:
space_check spc,t0,nadtlb_fault
L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
update_ptep ptp,pte,t0,t1
......@@ -1372,21 +1322,14 @@ nadtlb_miss_20:
rfir
nop
nadtlb_check_flush_20:
bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
/* Insert a "flush only" translation */
depdi,z 7,7,3,prot
depdi 1,10,1,prot
/* Drop prot bits from pte and convert to page addr for idtlbt */
convert_for_tlb_insert20 pte
nadtlb_check_alias_20:
do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
idtlbt pte,prot
rfir
nop
#endif
nadtlb_emulate:
......@@ -1484,6 +1427,36 @@ itlb_miss_20w:
rfir
nop
naitlb_miss_20w:
/*
* I miss is a little different, since we allow users to fault
* on the gateway page which is in the kernel address space.
*/
space_adjust spc,va,t0
get_pgd spc,ptp
space_check spc,t0,naitlb_fault
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
update_ptep ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
rfir
nop
naitlb_check_alias_20w:
do_alias spc,t0,t1,va,pte,prot,naitlb_fault
iitlbt pte,prot
rfir
nop
#else
itlb_miss_11:
......@@ -1508,6 +1481,38 @@ itlb_miss_11:
rfir
nop
naitlb_miss_11:
get_pgd spc,ptp
space_check spc,t0,naitlb_fault
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
update_ptep ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
mtsp t0, %sr1 /* Restore sr1 */
rfir
nop
naitlb_check_alias_11:
do_alias spc,t0,t1,va,pte,prot,itlb_fault
iitlba pte,(%sr0, va)
iitlbp prot,(%sr0, va)
rfir
nop
itlb_miss_20:
get_pgd spc,ptp
......@@ -1526,6 +1531,32 @@ itlb_miss_20:
rfir
nop
naitlb_miss_20:
get_pgd spc,ptp
space_check spc,t0,naitlb_fault
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
update_ptep ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
f_extend pte,t0
iitlbt pte,prot
rfir
nop
naitlb_check_alias_20:
do_alias spc,t0,t1,va,pte,prot,naitlb_fault
iitlbt pte,prot
rfir
nop
#endif
#ifdef CONFIG_64BIT
......@@ -1662,6 +1693,10 @@ nadtlb_fault:
b intr_save
ldi 17,%r8
naitlb_fault:
b intr_save
ldi 16,%r8
dtlb_fault:
b intr_save
ldi 15,%r8
......
......@@ -52,9 +52,9 @@ static volatile unsigned long cpu_eiem = 0;
*/
static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
static void cpu_mask_irq(unsigned int irq)
static void cpu_mask_irq(struct irq_data *d)
{
unsigned long eirr_bit = EIEM_MASK(irq);
unsigned long eirr_bit = EIEM_MASK(d->irq);
cpu_eiem &= ~eirr_bit;
/* Do nothing on the other CPUs. If they get this interrupt,
......@@ -63,7 +63,7 @@ static void cpu_mask_irq(unsigned int irq)
* then gets disabled */
}
static void cpu_unmask_irq(unsigned int irq)
static void __cpu_unmask_irq(unsigned int irq)
{
unsigned long eirr_bit = EIEM_MASK(irq);
......@@ -75,9 +75,14 @@ static void cpu_unmask_irq(unsigned int irq)
smp_send_all_nop();
}
void cpu_ack_irq(unsigned int irq)
static void cpu_unmask_irq(struct irq_data *d)
{
__cpu_unmask_irq(d->irq);
}
void cpu_ack_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(irq);
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* Clear in EIEM so we can no longer process */
......@@ -90,9 +95,9 @@ void cpu_ack_irq(unsigned int irq)
mtctl(mask, 23);
}
void cpu_eoi_irq(unsigned int irq)
void cpu_eoi_irq(struct irq_data *d)
{
unsigned long mask = EIEM_MASK(irq);
unsigned long mask = EIEM_MASK(d->irq);
int cpu = smp_processor_id();
/* set it in the eiems---it's no longer in process */
......@@ -103,15 +108,16 @@ void cpu_eoi_irq(unsigned int irq)
}
#ifdef CONFIG_SMP
int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
{
int cpu_dest;
/* timer and ipi have to always be received on all CPUs */
if (CHECK_IRQ_PER_CPU(irq)) {
if (CHECK_IRQ_PER_CPU(irq_to_desc(d->irq)->status)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
cpumask_setall(irq_desc[irq].affinity);
* been set; we must reset it. Will fix - tglx
*/
cpumask_setall(d->affinity);
return -EINVAL;
}
......@@ -121,33 +127,34 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
return cpu_dest;
}
static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
bool force)
{
int cpu_dest;
cpu_dest = cpu_check_affinity(irq, dest);
cpu_dest = cpu_check_affinity(d, dest);
if (cpu_dest < 0)
return -1;
cpumask_copy(irq_desc[irq].affinity, dest);
cpumask_copy(d->affinity, dest);
return 0;
}
#endif
static struct irq_chip cpu_interrupt_type = {
.name = "CPU",
.mask = cpu_mask_irq,
.unmask = cpu_unmask_irq,
.ack = cpu_ack_irq,
.eoi = cpu_eoi_irq,
.name = "CPU",
.irq_mask = cpu_mask_irq,
.irq_unmask = cpu_unmask_irq,
.irq_ack = cpu_ack_irq,
.irq_eoi = cpu_eoi_irq,
#ifdef CONFIG_SMP
.set_affinity = cpu_set_affinity_irq,
.irq_set_affinity = cpu_set_affinity_irq,
#endif
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
.retrigger = NULL,
.irq_retrigger = NULL,
};
int show_interrupts(struct seq_file *p, void *v)
......@@ -181,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
#ifndef PARISC_IRQ_CR16_COUNTS
seq_printf(p, " %s", action->name);
......@@ -233,14 +240,14 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
{
if (irq_desc[irq].action)
return -EBUSY;
if (irq_desc[irq].chip != &cpu_interrupt_type)
if (get_irq_chip(irq) != &cpu_interrupt_type)
return -EBUSY;
/* for iosapic interrupts */
if (type) {
set_irq_chip_and_handler(irq, type, handle_percpu_irq);
set_irq_chip_data(irq, data);
cpu_unmask_irq(irq);
__cpu_unmask_irq(irq);
}
return 0;
}
......@@ -289,7 +296,8 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
struct irq_data *d = irq_get_irq_data(irq);
cpumask_copy(d->affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
......@@ -333,6 +341,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
struct irq_desc *desc;
cpumask_t dest;
#endif
......@@ -346,8 +355,9 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
cpumask_copy(&dest, irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
desc = irq_to_desc(irq);
cpumask_copy(&dest, desc->irq_data.affinity);
if (CHECK_IRQ_PER_CPU(desc->status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
......
......@@ -608,93 +608,131 @@ ENTRY(__clear_user_page_asm)
.procend
ENDPROC(__clear_user_page_asm)
ENTRY(flush_kernel_dcache_page_asm)
ENTRY(flush_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
/* FIXME: page size dependend */
#endif
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,12, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,12, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
pdtlb 0(%r28)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldw R%dcache_stride(%r1), %r1
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r26, %r25, %r25
sub %r25, %r23, %r25
1: fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25,1b
fdc,m %r23(%r26)
add %r28, %r25, %r25
sub %r25, %r1, %r25
1: fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
fdc,m %r1(%r28)
cmpb,COND(<<) %r28, %r25,1b
fdc,m %r1(%r28)
sync
bv %r0(%r2)
nop
pdtlb (%r25)
.exit
.procend
ENDPROC(flush_kernel_dcache_page_asm)
ENTRY(flush_user_dcache_page)
ENDPROC(flush_dcache_page_asm)
ENTRY(flush_icache_page_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
depdi,z 1,63-PAGE_SHIFT,1, %r25
#if (TMPALIAS_MAP_START >= 0x80000000)
depdi 0, 31,32, %r28 /* clear any sign extension */
/* FIXME: page size dependend */
#endif
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,12, %r28 /* Clear any offset bits */
#else
depwi,z 1,31-PAGE_SHIFT,1, %r25
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,12, %r28 /* Clear any offset bits */
#endif
add %r26, %r25, %r25
sub %r25, %r23, %r25
/* Purge any old translation */
1: fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
fdc,m %r23(%sr3, %r26)
cmpb,COND(<<) %r26, %r25,1b
fdc,m %r23(%sr3, %r26)
pitlb (%sr0,%r28)
ldil L%icache_stride, %r1
ldw R%icache_stride(%r1), %r1
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r25
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r25
#endif
add %r28, %r25, %r25
sub %r25, %r1, %r25
1: fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
fic,m %r1(%r28)
cmpb,COND(<<) %r28, %r25,1b
fic,m %r1(%r28)
sync
bv %r0(%r2)
nop
pitlb (%sr0,%r25)
.exit
.procend
ENDPROC(flush_user_dcache_page)
ENDPROC(flush_icache_page_asm)
ENTRY(flush_user_icache_page)
ENTRY(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
......@@ -711,23 +749,23 @@ ENTRY(flush_user_icache_page)
sub %r25, %r23, %r25
1: fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
fic,m %r23(%sr3, %r26)
1: fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
fdc,m %r23(%r26)
cmpb,COND(<<) %r26, %r25,1b
fic,m %r23(%sr3, %r26)
fdc,m %r23(%r26)
sync
bv %r0(%r2)
......@@ -735,8 +773,7 @@ ENTRY(flush_user_icache_page)
.exit
.procend
ENDPROC(flush_user_icache_page)
ENDPROC(flush_kernel_dcache_page_asm)
ENTRY(purge_kernel_dcache_page)
.proc
......@@ -780,69 +817,6 @@ ENTRY(purge_kernel_dcache_page)
.procend
ENDPROC(purge_kernel_dcache_page)
#if 0
/* Currently not used, but it still is a possible alternate
* solution.
*/
ENTRY(flush_alias_page)
.proc
.callinfo NO_CALLS
.entry
tophys_r1 %r26
ldil L%(TMPALIAS_MAP_START), %r28
#ifdef CONFIG_64BIT
extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
depdi 0, 63,12, %r28 /* Clear any offset bits */
#else
extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
depwi 0, 31,12, %r28 /* Clear any offset bits */
#endif
/* Purge any old translation */
pdtlb 0(%r28)
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
#ifdef CONFIG_64BIT
depdi,z 1, 63-PAGE_SHIFT,1, %r29
#else
depwi,z 1, 31-PAGE_SHIFT,1, %r29
#endif
add %r28, %r29, %r29
sub %r29, %r23, %r29
1: fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
fdc,m %r23(%r28)
cmpb,COND(<<) %r28, %r29, 1b
fdc,m %r23(%r28)
sync
bv %r0(%r2)
nop
.exit
.procend
#endif
.export flush_user_dcache_range_asm
......@@ -865,7 +839,6 @@ flush_user_dcache_range_asm:
.exit
.procend
ENDPROC(flush_alias_page)
ENTRY(flush_kernel_dcache_range_asm)
.proc
......
......@@ -296,25 +296,25 @@ static struct pci_port_ops dino_port_ops = {
.outl = dino_out32
};
static void dino_mask_irq(unsigned int irq)
static void dino_mask_irq(struct irq_data *d)
{
struct dino_device *dino_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/* Clear the matching bit in the IMR register */
dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
static void dino_unmask_irq(unsigned int irq)
static void dino_unmask_irq(struct irq_data *d)
{
struct dino_device *dino_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq);
/*
** clear pending IRQ bits
......@@ -346,9 +346,9 @@ static void dino_unmask_irq(unsigned int irq)
}
static struct irq_chip dino_interrupt_type = {
.name = "GSC-PCI",
.unmask = dino_unmask_irq,
.mask = dino_mask_irq,
.name = "GSC-PCI",
.irq_unmask = dino_unmask_irq,
.irq_mask = dino_mask_irq,
};
......
......@@ -144,8 +144,9 @@ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered *
/* called by free irq */
static void eisa_mask_irq(unsigned int irq)
static void eisa_mask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
EISA_DBG("disable irq %d\n", irq);
......@@ -164,8 +165,9 @@ static void eisa_mask_irq(unsigned int irq)
}
/* called by request irq */
static void eisa_unmask_irq(unsigned int irq)
static void eisa_unmask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
......@@ -183,9 +185,9 @@ static void eisa_unmask_irq(unsigned int irq)
}
static struct irq_chip eisa_interrupt_type = {
.name = "EISA",
.unmask = eisa_unmask_irq,
.mask = eisa_mask_irq,
.name = "EISA",
.irq_unmask = eisa_unmask_irq,
.irq_mask = eisa_mask_irq,
};
static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
......
......@@ -105,13 +105,13 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
return NO_IRQ;
}
static void gsc_asic_mask_irq(unsigned int irq)
static void gsc_asic_mask_irq(struct irq_data *d)
{
struct gsc_asic *irq_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
u32 imr;
DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
irq_dev->name, imr);
/* Disable the IRQ line by clearing the bit in the IMR */
......@@ -120,13 +120,13 @@ static void gsc_asic_mask_irq(unsigned int irq)
gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
}
static void gsc_asic_unmask_irq(unsigned int irq)
static void gsc_asic_unmask_irq(struct irq_data *d)
{
struct gsc_asic *irq_dev = get_irq_chip_data(irq);
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
struct gsc_asic *irq_dev = irq_data_get_irq_chip_data(d);
int local_irq = gsc_find_local_irq(d->irq, irq_dev->global_irq, 32);
u32 imr;
DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, irq,
DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __func__, d->irq,
irq_dev->name, imr);
/* Enable the IRQ line by setting the bit in the IMR */
......@@ -140,9 +140,9 @@ static void gsc_asic_unmask_irq(unsigned int irq)
}
static struct irq_chip gsc_asic_interrupt_type = {
.name = "GSC-ASIC",
.unmask = gsc_asic_unmask_irq,
.mask = gsc_asic_mask_irq,
.name = "GSC-ASIC",
.irq_unmask = gsc_asic_unmask_irq,
.irq_mask = gsc_asic_mask_irq,
};
int gsc_assign_irq(struct irq_chip *type, void *data)
......
......@@ -615,10 +615,10 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
}
static void iosapic_mask_irq(unsigned int irq)
static void iosapic_mask_irq(struct irq_data *d)
{
unsigned long flags;
struct vector_info *vi = get_irq_chip_data(irq);
struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1;
spin_lock_irqsave(&iosapic_lock, flags);
......@@ -628,9 +628,9 @@ static void iosapic_mask_irq(unsigned int irq)
spin_unlock_irqrestore(&iosapic_lock, flags);
}
static void iosapic_unmask_irq(unsigned int irq)
static void iosapic_unmask_irq(struct irq_data *d)
{
struct vector_info *vi = get_irq_chip_data(irq);
struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1;
/* data is initialized by fixup_irq */
......@@ -666,34 +666,34 @@ printk("\n");
* enables their IRQ. It can lead to "interesting" race conditions
* in the driver initialization sequence.
*/
DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", d->irq,
vi->eoi_addr, vi->eoi_data);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
}
static void iosapic_eoi_irq(unsigned int irq)
static void iosapic_eoi_irq(struct irq_data *d)
{
struct vector_info *vi = get_irq_chip_data(irq);
struct vector_info *vi = irq_data_get_irq_chip_data(d);
iosapic_eoi(vi->eoi_addr, vi->eoi_data);
cpu_eoi_irq(irq);
cpu_eoi_irq(d);
}
#ifdef CONFIG_SMP
static int iosapic_set_affinity_irq(unsigned int irq,
const struct cpumask *dest)
static int iosapic_set_affinity_irq(struct irq_data *d,
const struct cpumask *dest, bool force)
{
struct vector_info *vi = get_irq_chip_data(irq);
struct vector_info *vi = irq_data_get_irq_chip_data(d);
u32 d0, d1, dummy_d0;
unsigned long flags;
int dest_cpu;
dest_cpu = cpu_check_affinity(irq, dest);
dest_cpu = cpu_check_affinity(d, dest);
if (dest_cpu < 0)
return -1;
cpumask_copy(irq_desc[irq].affinity, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(irq, dest_cpu);
cpumask_copy(d->affinity, cpumask_of(dest_cpu));
vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
spin_lock_irqsave(&iosapic_lock, flags);
/* d1 contains the destination CPU, so only want to set that
......@@ -708,13 +708,13 @@ static int iosapic_set_affinity_irq(unsigned int irq,
#endif
static struct irq_chip iosapic_interrupt_type = {
.name = "IO-SAPIC-level",
.unmask = iosapic_unmask_irq,
.mask = iosapic_mask_irq,
.ack = cpu_ack_irq,
.eoi = iosapic_eoi_irq,
.name = "IO-SAPIC-level",
.irq_unmask = iosapic_unmask_irq,
.irq_mask = iosapic_mask_irq,
.irq_ack = cpu_ack_irq,
.irq_eoi = iosapic_eoi_irq,
#ifdef CONFIG_SMP
.set_affinity = iosapic_set_affinity_irq,
.irq_set_affinity = iosapic_set_affinity_irq,
#endif
};
......
......@@ -286,8 +286,9 @@ superio_init(struct pci_dev *pcidev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
static void superio_mask_irq(unsigned int irq)
static void superio_mask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
......@@ -303,8 +304,9 @@ static void superio_mask_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
static void superio_unmask_irq(unsigned int irq)
static void superio_unmask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
......@@ -320,9 +322,9 @@ static void superio_unmask_irq(unsigned int irq)
}
static struct irq_chip superio_interrupt_type = {
.name = SUPERIO,
.unmask = superio_unmask_irq,
.mask = superio_mask_irq,
.name = SUPERIO,
.irq_unmask = superio_unmask_irq,
.irq_mask = superio_mask_irq,
};
#ifdef DEBUG_SUPERIO_INIT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment