Commit 13fd7aeb authored by Paul Mundt's avatar Paul Mundt

Merge branches 'sh/dwarf-unwinder', 'sh/g3-prep' and 'sh/stable-updates'

......@@ -243,16 +243,13 @@ struct dwarf_cie {
unsigned long cie_pointer;
struct list_head link;
unsigned long flags;
#define DWARF_CIE_Z_AUGMENTATION (1 << 0)
/*
* 'mod' will be non-NULL if this CIE came from a module's
* .eh_frame section.
*/
struct module *mod;
/* linked-list entry if this CIE is from a module */
struct list_head link;
struct rb_node node;
};
/**
......@@ -266,13 +263,11 @@ struct dwarf_fde {
unsigned long address_range;
unsigned char *instructions;
unsigned char *end;
/* linked-list entry if this FDE is from a module */
struct list_head link;
/*
* 'mod' will be non-NULL if this FDE came from a module's
* .eh_frame section.
*/
struct module *mod;
struct rb_node node;
};
/**
......
#ifndef _ASM_SH_MODULE_H
#define _ASM_SH_MODULE_H
#include <asm-generic/module.h>
struct mod_arch_specific {
#ifdef CONFIG_DWARF_UNWINDER
struct list_head fde_list;
struct list_head cie_list;
#endif
};
#ifdef CONFIG_64BIT
#define Elf_Shdr Elf64_Shdr
#define Elf_Sym Elf64_Sym
#define Elf_Ehdr Elf64_Ehdr
#else
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#endif
#ifdef CONFIG_CPU_LITTLE_ENDIAN
# ifdef CONFIG_CPU_SH2
......
......@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
mov #1, r5
call_handle_tlbmiss:
setup_frame_reg
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
......@@ -365,6 +364,8 @@ handle_exception:
mov.l @k2, k2 ! read out vector and keep in k2
handle_exception_special:
setup_frame_reg
! Setup return address and jump to exception handler
mov.l 7f, r9 ! fetch return address
stc r2_bank, r0 ! k2 (vector)
......
......@@ -39,10 +39,10 @@ static mempool_t *dwarf_frame_pool;
static struct kmem_cache *dwarf_reg_cachep;
static mempool_t *dwarf_reg_pool;
static LIST_HEAD(dwarf_cie_list);
static struct rb_root cie_root;
static DEFINE_SPINLOCK(dwarf_cie_lock);
static LIST_HEAD(dwarf_fde_list);
static struct rb_root fde_root;
static DEFINE_SPINLOCK(dwarf_fde_lock);
static struct dwarf_cie *cached_cie;
......@@ -301,7 +301,8 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len)
*/
static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
{
struct dwarf_cie *cie;
struct rb_node **rb_node = &cie_root.rb_node;
struct dwarf_cie *cie = NULL;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
......@@ -315,16 +316,24 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
goto out;
}
list_for_each_entry(cie, &dwarf_cie_list, link) {
if (cie->cie_pointer == cie_ptr) {
cached_cie = cie;
break;
while (*rb_node) {
struct dwarf_cie *cie_tmp;
cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
BUG_ON(!cie_tmp);
if (cie_ptr == cie_tmp->cie_pointer) {
cie = cie_tmp;
cached_cie = cie_tmp;
goto out;
} else {
if (cie_ptr < cie_tmp->cie_pointer)
rb_node = &(*rb_node)->rb_left;
else
rb_node = &(*rb_node)->rb_right;
}
}
/* Couldn't find the entry in the list. */
if (&cie->link == &dwarf_cie_list)
cie = NULL;
out:
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
return cie;
......@@ -336,25 +345,34 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
*/
struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
{
struct dwarf_fde *fde;
struct rb_node **rb_node = &fde_root.rb_node;
struct dwarf_fde *fde = NULL;
unsigned long flags;
spin_lock_irqsave(&dwarf_fde_lock, flags);
list_for_each_entry(fde, &dwarf_fde_list, link) {
unsigned long start, end;
while (*rb_node) {
struct dwarf_fde *fde_tmp;
unsigned long tmp_start, tmp_end;
start = fde->initial_location;
end = fde->initial_location + fde->address_range;
fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
BUG_ON(!fde_tmp);
if (pc >= start && pc < end)
break;
}
tmp_start = fde_tmp->initial_location;
tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
/* Couldn't find the entry in the list. */
if (&fde->link == &dwarf_fde_list)
fde = NULL;
if (pc < tmp_start) {
rb_node = &(*rb_node)->rb_left;
} else {
if (pc < tmp_end) {
fde = fde_tmp;
goto out;
} else
rb_node = &(*rb_node)->rb_right;
}
}
out:
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
return fde;
......@@ -540,6 +558,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
mempool_free(frame, dwarf_frame_pool);
}
extern void ret_from_irq(void);
/**
* dwarf_unwind_stack - unwind the stack
*
......@@ -550,7 +570,7 @@ void dwarf_free_frame(struct dwarf_frame *frame)
* on the callstack. Each of the lower (older) stack frames are
* linked via the "prev" member.
*/
struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
struct dwarf_frame *prev)
{
struct dwarf_frame *frame;
......@@ -678,6 +698,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
addr = frame->cfa + reg->addr;
frame->return_addr = __raw_readl(addr);
/*
* Ah, the joys of unwinding through interrupts.
*
* Interrupts are tricky - the DWARF info needs to be _really_
* accurate and unfortunately I'm seeing a lot of bogus DWARF
* info. For example, I've seen interrupts occur in epilogues
* just after the frame pointer (r14) had been restored. The
* problem was that the DWARF info claimed that the CFA could be
* reached by using the value of the frame pointer before it was
* restored.
*
* So until the compiler can be trusted to produce reliable
* DWARF info when it really matters, let's stop unwinding once
* we've calculated the function that was interrupted.
*/
if (prev && prev->pc == (unsigned long)ret_from_irq)
frame->return_addr = 0;
return frame;
bail:
......@@ -688,6 +726,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
unsigned char *end, struct module *mod)
{
struct rb_node **rb_node = &cie_root.rb_node;
struct rb_node *parent;
struct dwarf_cie *cie;
unsigned long flags;
int count;
......@@ -782,11 +822,30 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
cie->initial_instructions = p;
cie->instructions_end = end;
cie->mod = mod;
/* Add to list */
spin_lock_irqsave(&dwarf_cie_lock, flags);
list_add_tail(&cie->link, &dwarf_cie_list);
while (*rb_node) {
struct dwarf_cie *cie_tmp;
cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
parent = *rb_node;
if (cie->cie_pointer < cie_tmp->cie_pointer)
rb_node = &parent->rb_left;
else if (cie->cie_pointer >= cie_tmp->cie_pointer)
rb_node = &parent->rb_right;
else
WARN_ON(1);
}
rb_link_node(&cie->node, parent, rb_node);
rb_insert_color(&cie->node, &cie_root);
if (mod != NULL)
list_add_tail(&cie->link, &mod->arch.cie_list);
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
return 0;
......@@ -796,6 +855,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
void *start, unsigned long len,
unsigned char *end, struct module *mod)
{
struct rb_node **rb_node = &fde_root.rb_node;
struct rb_node *parent;
struct dwarf_fde *fde;
struct dwarf_cie *cie;
unsigned long flags;
......@@ -843,11 +904,38 @@ static int dwarf_parse_fde(void *entry, u32 entry_type,
fde->instructions = p;
fde->end = end;
fde->mod = mod;
/* Add to list. */
spin_lock_irqsave(&dwarf_fde_lock, flags);
list_add_tail(&fde->link, &dwarf_fde_list);
while (*rb_node) {
struct dwarf_fde *fde_tmp;
unsigned long tmp_start, tmp_end;
unsigned long start, end;
fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
start = fde->initial_location;
end = fde->initial_location + fde->address_range;
tmp_start = fde_tmp->initial_location;
tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
parent = *rb_node;
if (start < tmp_start)
rb_node = &parent->rb_left;
else if (start >= tmp_end)
rb_node = &parent->rb_right;
else
WARN_ON(1);
}
rb_link_node(&fde->node, parent, rb_node);
rb_insert_color(&fde->node, &fde_root);
if (mod != NULL)
list_add_tail(&fde->link, &mod->arch.fde_list);
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
return 0;
......@@ -892,19 +980,29 @@ static struct unwinder dwarf_unwinder = {
static void dwarf_unwinder_cleanup(void)
{
struct dwarf_cie *cie, *cie_tmp;
struct dwarf_fde *fde, *fde_tmp;
struct rb_node **fde_rb_node = &fde_root.rb_node;
struct rb_node **cie_rb_node = &cie_root.rb_node;
/*
* Deallocate all the memory allocated for the DWARF unwinder.
* Traverse all the FDE/CIE lists and remove and free all the
* memory associated with those data structures.
*/
list_for_each_entry_safe(cie, cie_tmp, &dwarf_cie_list, link)
kfree(cie);
while (*fde_rb_node) {
struct dwarf_fde *fde;
list_for_each_entry_safe(fde, fde_tmp, &dwarf_fde_list, link)
fde = rb_entry(*fde_rb_node, struct dwarf_fde, node);
rb_erase(*fde_rb_node, &fde_root);
kfree(fde);
}
while (*cie_rb_node) {
struct dwarf_cie *cie;
cie = rb_entry(*cie_rb_node, struct dwarf_cie, node);
rb_erase(*cie_rb_node, &cie_root);
kfree(cie);
}
kmem_cache_destroy(dwarf_reg_cachep);
kmem_cache_destroy(dwarf_frame_cachep);
......@@ -1004,6 +1102,8 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
/* Did we find the .eh_frame section? */
if (i != hdr->e_shnum) {
INIT_LIST_HEAD(&me->arch.cie_list);
INIT_LIST_HEAD(&me->arch.fde_list);
err = dwarf_parse_section((char *)start, (char *)end, me);
if (err) {
printk(KERN_WARNING "%s: failed to parse DWARF info\n",
......@@ -1024,38 +1124,26 @@ int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
*/
void module_dwarf_cleanup(struct module *mod)
{
struct dwarf_fde *fde;
struct dwarf_cie *cie;
struct dwarf_fde *fde, *ftmp;
struct dwarf_cie *cie, *ctmp;
unsigned long flags;
spin_lock_irqsave(&dwarf_cie_lock, flags);
again_cie:
list_for_each_entry(cie, &dwarf_cie_list, link) {
if (cie->mod == mod)
break;
}
if (&cie->link != &dwarf_cie_list) {
list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
list_del(&cie->link);
rb_erase(&cie->node, &cie_root);
kfree(cie);
goto again_cie;
}
spin_unlock_irqrestore(&dwarf_cie_lock, flags);
spin_lock_irqsave(&dwarf_fde_lock, flags);
again_fde:
list_for_each_entry(fde, &dwarf_fde_list, link) {
if (fde->mod == mod)
break;
}
if (&fde->link != &dwarf_fde_list) {
list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
list_del(&fde->link);
rb_erase(&fde->node, &fde_root);
kfree(fde);
goto again_fde;
}
spin_unlock_irqrestore(&dwarf_fde_lock, flags);
......@@ -1074,8 +1162,6 @@ void module_dwarf_cleanup(struct module *mod)
static int __init dwarf_unwinder_init(void)
{
int err;
INIT_LIST_HEAD(&dwarf_cie_list);
INIT_LIST_HEAD(&dwarf_fde_list);
dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
sizeof(struct dwarf_frame), 0,
......
......@@ -70,8 +70,14 @@ ret_from_exception:
CFI_STARTPROC simple
CFI_DEF_CFA r14, 0
CFI_REL_OFFSET 17, 64
CFI_REL_OFFSET 15, 0
CFI_REL_OFFSET 15, 60
CFI_REL_OFFSET 14, 56
CFI_REL_OFFSET 13, 52
CFI_REL_OFFSET 12, 48
CFI_REL_OFFSET 11, 44
CFI_REL_OFFSET 10, 40
CFI_REL_OFFSET 9, 36
CFI_REL_OFFSET 8, 32
preempt_stop()
ENTRY(ret_from_irq)
!
......
......@@ -518,34 +518,6 @@ static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfffffe80)
return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */
if (port->mapbase == 0xa4000150)
return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xa4000140)
return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7705)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == SCIF0)
return __raw_readb(SCPDR)&0x04 ? 1 : 0; /* IRDA */
if (port->mapbase == SCIF2)
return __raw_readb(SCPDR)&0x10 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
static inline int sci_rxd_in(struct uart_port *port)
{
return sci_in(port,SCxSR)&0x0010 ? 1 : 0;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xa4430000)
return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
else if (port->mapbase == 0xa4438000)
return sci_in(port, SCxSR) & 0x0003 ? 1 : 0;
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \
......@@ -558,207 +530,17 @@ static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */
if (port->mapbase == 0xffe80000)
return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe80000)
return __raw_readw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfe4b0000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0;
if (port->mapbase == 0xfe4c0000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0;
if (port->mapbase == 0xfe4d0000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7760)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfe600000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfe610000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfe620000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe10000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe20000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe30000)
return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7366)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readb(SCPDR0) & 0x0001 ? 1 : 0; /* SCIF0 */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readb(PSDR) & 0x02 ? 1 : 0; /* SCIF0 */
if (port->mapbase == 0xffe10000)
return __raw_readb(PADR) & 0x40 ? 1 : 0; /* SCIF1 */
if (port->mapbase == 0xffe20000)
return __raw_readb(PWDR) & 0x04 ? 1 : 0; /* SCIF2 */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readb(SCSPTR0) & 0x0008 ? 1 : 0; /* SCIF0 */
if (port->mapbase == 0xffe10000)
return __raw_readb(SCSPTR1) & 0x0020 ? 1 : 0; /* SCIF1 */
if (port->mapbase == 0xffe20000)
return __raw_readb(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF2 */
if (port->mapbase == 0xa4e30000)
return __raw_readb(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF3 */
if (port->mapbase == 0xa4e40000)
return __raw_readb(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF4 */
if (port->mapbase == 0xa4e50000)
return __raw_readb(SCSPTR5) & 0x0008 ? 1 : 0; /* SCIF5 */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
# define SCFSR 0x0010
# define SCASSR 0x0014
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->type == PORT_SCIF)
return __raw_readw((port->mapbase + SCFSR)) & SCIF_BRK ? 1 : 0;
if (port->type == PORT_SCIFA)
return __raw_readw((port->mapbase + SCASSR)) & SCIF_BRK ? 1 : 0;
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
static inline int sci_rxd_in(struct uart_port *port)
{
return sci_in(port, SCSPTR)&0x0001 ? 1 : 0; /* SCIF */
}
#elif defined(__H8300H__) || defined(__H8300S__)
static inline int sci_rxd_in(struct uart_port *port)
{
int ch = (port->mapbase - SMR0) >> 3;
return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe08000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe10000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF/IRDA */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xff923000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xff924000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xff925000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffe00000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffe10000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffea0000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffeb0000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffec0000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffed0000)
return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffee0000)
return __raw_readw(SCSPTR4) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffef0000)
return __raw_readw(SCSPTR5) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
defined(CONFIG_CPU_SUBTYPE_SH7203) || \
defined(CONFIG_CPU_SUBTYPE_SH7206) || \
defined(CONFIG_CPU_SUBTYPE_SH7263)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xfffe8000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffe8800)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffe9000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffe9800)
return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
#if defined(CONFIG_CPU_SUBTYPE_SH7201)
if (port->mapbase == 0xfffeA000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffeA800)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffeB000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xfffeB800)
return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
#endif
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xf8400000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xf8410000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xf8420000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
#else /* default case for non-SCI processors */
static inline int sci_rxd_in(struct uart_port *port)
{
if (port->mapbase == 0xffc30000)
return __raw_readw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffc40000)
return __raw_readw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffc50000)
return __raw_readw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */
if (port->mapbase == 0xffc60000)
return __raw_readw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */
return 1;
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment