Commit 70189e27 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Kill some port-specific bloat.

- Uninline down/up/down_trylock/down_interruptible
- Uninline PCI controller cfg space helpers
- Uninline ip_fast_csum
- Uninline some routines in signal32/sys_sparc32/unaligned
- Uninline arch/sparc64/mm/fault.c:get_fault_init
- NUM_IVECS need only be IMAP_NR + 1
parent 71660e15
...@@ -72,11 +72,135 @@ unsigned char pci_highest_busnum = 0; ...@@ -72,11 +72,135 @@ unsigned char pci_highest_busnum = 0;
*/ */
int pci_device_reorder = 0; int pci_device_reorder = 0;
spinlock_t pci_poke_lock = SPIN_LOCK_UNLOCKED;
volatile int pci_poke_in_progress; volatile int pci_poke_in_progress;
volatile int pci_poke_cpu = -1; volatile int pci_poke_cpu = -1;
volatile int pci_poke_faulted; volatile int pci_poke_faulted;
static spinlock_t pci_poke_lock = SPIN_LOCK_UNLOCKED;
void pci_config_read8(u8 *addr, u8 *ret)
{
unsigned long flags;
u8 byte;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduba [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (byte)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = byte;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_read16(u16 *addr, u16 *ret)
{
unsigned long flags;
u16 word;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (word)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = word;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_read32(u32 *addr, u32 *ret)
{
unsigned long flags;
u32 dword;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduwa [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (dword)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = dword;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write8(u8 *addr, u8 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stba %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write16(u16 *addr, u16 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stha %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write32(u32 *addr, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stwa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
/* Probe for all PCI controllers in the system. */ /* Probe for all PCI controllers in the system. */
extern void sabre_init(int, char *); extern void sabre_init(int, char *);
extern void psycho_init(int, char *); extern void psycho_init(int, char *);
......
...@@ -42,132 +42,11 @@ extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_p ...@@ -42,132 +42,11 @@ extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_p
extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *); extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
/* Configuration space access. */ /* Configuration space access. */
extern spinlock_t pci_poke_lock; extern void pci_config_read8(u8 *addr, u8 *ret);
extern volatile int pci_poke_in_progress; extern void pci_config_read16(u16 *addr, u16 *ret);
extern volatile int pci_poke_cpu; extern void pci_config_read32(u32 *addr, u32 *ret);
extern volatile int pci_poke_faulted; extern void pci_config_write8(u8 *addr, u8 val);
extern void pci_config_write16(u16 *addr, u16 val);
static __inline__ void pci_config_read8(u8 *addr, u8 *ret) extern void pci_config_write32(u32 *addr, u32 val);
{
unsigned long flags;
u8 byte;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduba [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (byte)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = byte;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static __inline__ void pci_config_read16(u16 *addr, u16 *ret)
{
unsigned long flags;
u16 word;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (word)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = word;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static __inline__ void pci_config_read32(u32 *addr, u32 *ret)
{
unsigned long flags;
u32 dword;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduwa [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (dword)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = dword;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static __inline__ void pci_config_write8(u8 *addr, u8 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stba %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static __inline__ void pci_config_write16(u16 *addr, u16 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stha %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static __inline__ void pci_config_write32(u32 *addr, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stwa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
#endif /* !(PCI_IMPL_H) */ #endif /* !(PCI_IMPL_H) */
...@@ -40,13 +40,57 @@ static __inline__ int __sem_update_count(struct semaphore *sem, int incr) ...@@ -40,13 +40,57 @@ static __inline__ int __sem_update_count(struct semaphore *sem, int incr)
return old_count; return old_count;
} }
void __up(struct semaphore *sem) static void __up(struct semaphore *sem)
{ {
__sem_update_count(sem, 1); __sem_update_count(sem, 1);
wake_up(&sem->wait); wake_up(&sem->wait);
} }
void __down(struct semaphore * sem) void up(struct semaphore *sem)
{
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count + 1;
* sem->count = new_val;
* if (old_val < 0)
* __up(sem);
*
* The (old_val < 0) test is equivalent to
* the more straightforward (new_val <= 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! up sem(%0)\n"
" membar #StoreLoad | #LoadLoad\n"
"1: lduw [%0], %%g5\n"
" add %%g5, 1, %%g7\n"
" cas [%0], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" addcc %%g7, 1, %%g0\n"
" ble,pn %%icc, 3f\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
" .subsection 2\n"
"3: mov %0, %%g5\n"
" save %%sp, -160, %%sp\n"
" mov %%g1, %%l1\n"
" mov %%g2, %%l2\n"
" mov %%g3, %%l3\n"
" call %1\n"
" mov %%g5, %%o0\n"
" mov %%l1, %%g1\n"
" mov %%l2, %%g2\n"
" ba,pt %%xcc, 2b\n"
" restore %%l3, %%g0, %%g3\n"
" .previous\n"
: : "r" (sem), "i" (__up)
: "g5", "g7", "memory", "cc");
}
static void __down(struct semaphore * sem)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk); DECLARE_WAITQUEUE(wait, tsk);
...@@ -64,7 +108,90 @@ void __down(struct semaphore * sem) ...@@ -64,7 +108,90 @@ void __down(struct semaphore * sem)
wake_up(&sem->wait); wake_up(&sem->wait);
} }
int __down_interruptible(struct semaphore * sem) void down(struct semaphore *sem)
{
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* sem->count = new_val;
* if (old_val < 1)
* __down(sem);
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down sem(%0)\n"
"1: lduw [%0], %%g5\n"
" sub %%g5, 1, %%g7\n"
" cas [%0], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" cmp %%g7, 1\n"
" bl,pn %%icc, 3f\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
" .subsection 2\n"
"3: mov %0, %%g5\n"
" save %%sp, -160, %%sp\n"
" mov %%g1, %%l1\n"
" mov %%g2, %%l2\n"
" mov %%g3, %%l3\n"
" call %1\n"
" mov %%g5, %%o0\n"
" mov %%l1, %%g1\n"
" mov %%l2, %%g2\n"
" ba,pt %%xcc, 2b\n"
" restore %%l3, %%g0, %%g3\n"
" .previous\n"
: : "r" (sem), "i" (__down)
: "g5", "g7", "memory", "cc");
}
int down_trylock(struct semaphore *sem)
{
int ret;
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* if (old_val < 1) {
* ret = 1;
* } else {
* sem->count = new_val;
* ret = 0;
* }
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down_trylock sem(%1) ret(%0)\n"
"1: lduw [%1], %%g5\n"
" sub %%g5, 1, %%g7\n"
" cmp %%g5, 1\n"
" bl,pn %%icc, 2f\n"
" mov 1, %0\n"
" cas [%1], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" mov 0, %0\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
: "=&r" (ret)
: "r" (sem)
: "g5", "g7", "memory", "cc");
return ret;
}
static int __down_interruptible(struct semaphore * sem)
{ {
int retval = 0; int retval = 0;
struct task_struct *tsk = current; struct task_struct *tsk = current;
...@@ -87,3 +214,51 @@ int __down_interruptible(struct semaphore * sem) ...@@ -87,3 +214,51 @@ int __down_interruptible(struct semaphore * sem)
wake_up(&sem->wait); wake_up(&sem->wait);
return retval; return retval;
} }
int down_interruptible(struct semaphore *sem)
{
int ret = 0;
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* sem->count = new_val;
* if (old_val < 1)
* ret = __down_interruptible(sem);
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down_interruptible sem(%2) ret(%0)\n"
"1: lduw [%2], %%g5\n"
" sub %%g5, 1, %%g7\n"
" cas [%2], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" cmp %%g7, 1\n"
" bl,pn %%icc, 3f\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
" .subsection 2\n"
"3: mov %2, %%g5\n"
" save %%sp, -160, %%sp\n"
" mov %%g1, %%l1\n"
" mov %%g2, %%l2\n"
" mov %%g3, %%l3\n"
" call %3\n"
" mov %%g5, %%o0\n"
" mov %%l1, %%g1\n"
" mov %%l2, %%g2\n"
" mov %%l3, %%g3\n"
" ba,pt %%xcc, 2b\n"
" restore %%o0, %%g0, %0\n"
" .previous\n"
: "=r" (ret)
: "0" (ret), "r" (sem), "i" (__down_interruptible)
: "g5", "g7", "memory", "cc");
return ret;
}
...@@ -230,7 +230,7 @@ asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs * ...@@ -230,7 +230,7 @@ asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *
} }
} }
static inline int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu) static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{ {
unsigned long *fpregs = current_thread_info()->fpregs; unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs; unsigned long fprs;
...@@ -477,7 +477,7 @@ static int invalid_frame_pointer(void *fp, int fplen) ...@@ -477,7 +477,7 @@ static int invalid_frame_pointer(void *fp, int fplen)
return 0; return 0;
} }
static inline void *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize) static void *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
{ {
unsigned long sp; unsigned long sp;
...@@ -645,7 +645,7 @@ setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *o ...@@ -645,7 +645,7 @@ setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *o
} }
static inline int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu) static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{ {
unsigned long *fpregs = current_thread_info()->fpregs; unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs; unsigned long fprs;
...@@ -665,8 +665,8 @@ static inline int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu) ...@@ -665,8 +665,8 @@ static inline int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
return err; return err;
} }
static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset) int signo, sigset_t *oldset)
{ {
struct new_signal_frame32 *sf; struct new_signal_frame32 *sf;
int sigframe_size; int sigframe_size;
...@@ -790,7 +790,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg ...@@ -790,7 +790,7 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
} }
/* Setup a Solaris stack frame */ /* Setup a Solaris stack frame */
static inline void static void
setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc, setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
struct pt_regs *regs, int signr, sigset_t *oldset) struct pt_regs *regs, int signr, sigset_t *oldset)
{ {
...@@ -1089,9 +1089,9 @@ asmlinkage int svr4_setcontext(svr4_ucontext_t *c, struct pt_regs *regs) ...@@ -1089,9 +1089,9 @@ asmlinkage int svr4_setcontext(svr4_ucontext_t *c, struct pt_regs *regs)
do_exit(SIGSEGV); do_exit(SIGSEGV);
} }
static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long signr, sigset_t *oldset, unsigned long signr, sigset_t *oldset,
siginfo_t *info) siginfo_t *info)
{ {
struct rt_signal_frame32 *sf; struct rt_signal_frame32 *sf;
int sigframe_size; int sigframe_size;
......
...@@ -161,9 +161,10 @@ EXPORT_SYMBOL(smp_call_function); ...@@ -161,9 +161,10 @@ EXPORT_SYMBOL(smp_call_function);
#endif #endif
/* semaphores */ /* semaphores */
EXPORT_SYMBOL(__down); EXPORT_SYMBOL(down);
EXPORT_SYMBOL(__down_interruptible); EXPORT_SYMBOL(down_trylock);
EXPORT_SYMBOL(__up); EXPORT_SYMBOL(down_interruptible);
EXPORT_SYMBOL(up);
/* Atomic counter implementation. */ /* Atomic counter implementation. */
EXPORT_SYMBOL(__atomic_add); EXPORT_SYMBOL(__atomic_add);
...@@ -332,6 +333,7 @@ EXPORT_SYMBOL(__strncmp); ...@@ -332,6 +333,7 @@ EXPORT_SYMBOL(__strncmp);
EXPORT_SYMBOL(__memmove); EXPORT_SYMBOL(__memmove);
EXPORT_SYMBOL(csum_partial_copy_sparc64); EXPORT_SYMBOL(csum_partial_copy_sparc64);
EXPORT_SYMBOL(ip_fast_csum);
/* Moving data to/from userspace. */ /* Moving data to/from userspace. */
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__copy_to_user);
......
...@@ -273,7 +273,7 @@ struct itimerval32 ...@@ -273,7 +273,7 @@ struct itimerval32
struct timeval32 it_value; struct timeval32 it_value;
}; };
static inline long get_tv32(struct timeval *o, struct timeval32 *i) static long get_tv32(struct timeval *o, struct timeval32 *i)
{ {
return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) || return (!access_ok(VERIFY_READ, tv32, sizeof(*tv32)) ||
(__get_user(o->tv_sec, &i->tv_sec) | (__get_user(o->tv_sec, &i->tv_sec) |
...@@ -296,7 +296,7 @@ static inline long get_it32(struct itimerval *o, struct itimerval32 *i) ...@@ -296,7 +296,7 @@ static inline long get_it32(struct itimerval *o, struct itimerval32 *i)
__get_user(o->it_value.tv_usec, &i->it_value.tv_usec))); __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
} }
static inline long put_it32(struct itimerval32 *o, struct itimerval *i) static long put_it32(struct itimerval32 *o, struct itimerval *i)
{ {
return (!access_ok(VERIFY_WRITE, i32, sizeof(*i32)) || return (!access_ok(VERIFY_WRITE, i32, sizeof(*i32)) ||
(__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) | (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
...@@ -890,7 +890,7 @@ asmlinkage long sys32_fcntl64(unsigned int fd, unsigned int cmd, unsigned long a ...@@ -890,7 +890,7 @@ asmlinkage long sys32_fcntl64(unsigned int fd, unsigned int cmd, unsigned long a
return sys32_fcntl(fd, cmd, arg); return sys32_fcntl(fd, cmd, arg);
} }
static inline int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf) static int put_statfs (struct statfs32 *ubuf, struct statfs *kbuf)
{ {
int err; int err;
...@@ -1272,8 +1272,7 @@ asmlinkage int sys32_getdents(unsigned int fd, struct linux_dirent32 *dirent, un ...@@ -1272,8 +1272,7 @@ asmlinkage int sys32_getdents(unsigned int fd, struct linux_dirent32 *dirent, un
* 64-bit unsigned longs. * 64-bit unsigned longs.
*/ */
static inline int static int get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
{ {
if (ufdset) { if (ufdset) {
unsigned long odd; unsigned long odd;
...@@ -1303,8 +1302,7 @@ get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset) ...@@ -1303,8 +1302,7 @@ get_fd_set32(unsigned long n, unsigned long *fdset, u32 *ufdset)
return 0; return 0;
} }
static inline void static void set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
{ {
unsigned long odd; unsigned long odd;
...@@ -2217,8 +2215,8 @@ static inline int iov_from_user32_to_kern(struct iovec *kiov, ...@@ -2217,8 +2215,8 @@ static inline int iov_from_user32_to_kern(struct iovec *kiov,
return tot_len; return tot_len;
} }
static inline int msghdr_from_user32_to_kern(struct msghdr *kmsg, static int msghdr_from_user32_to_kern(struct msghdr *kmsg,
struct msghdr32 *umsg) struct msghdr32 *umsg)
{ {
u32 tmp1, tmp2, tmp3; u32 tmp1, tmp2, tmp3;
int err; int err;
......
...@@ -149,8 +149,8 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) ...@@ -149,8 +149,8 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
} }
} }
static inline unsigned long compute_effective_address(struct pt_regs *regs, static unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd) unsigned int insn, unsigned int rd)
{ {
unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f; unsigned int rs2 = insn & 0x1f;
......
...@@ -10,6 +10,6 @@ obj-y := PeeCeeI.o blockops.o debuglocks.o strlen.o strncmp.o \ ...@@ -10,6 +10,6 @@ obj-y := PeeCeeI.o blockops.o debuglocks.o strlen.o strncmp.o \
VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \ VIScopy.o VISbzero.o VISmemset.o VIScsum.o VIScsumcopy.o \
VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \ VIScsumcopyusr.o VISsave.o atomic.o rwlock.o bitops.o \
dec_and_lock.o U3memcpy.o U3copy_from_user.o U3copy_to_user.o \ dec_and_lock.o U3memcpy.o U3copy_from_user.o U3copy_to_user.o \
U3copy_in_user.o mcount.o U3copy_in_user.o mcount.o ipcsum.o
include $(TOPDIR)/Rules.make include $(TOPDIR)/Rules.make
.text
.align 32
.globl ip_fast_csum
ip_fast_csum: /* %o0 = iph, %o1 = ihl */
sub %o1, 4, %g7
lduw [%o0 + 0x00], %o2
lduw [%o0 + 0x04], %g2
lduw [%o0 + 0x08], %g3
addcc %g2, %o2, %o2
lduw [%o0 + 0x0c], %g2
addccc %g3, %o2, %o2
lduw [%o0 + 0x10], %g3
addccc %g2, %o2, %o2
addc %o2, %g0, %o2
1: addcc %g3, %o2, %o2
add %o0, 4, %o0
addccc %o2, %g0, %o2
subcc %g7, 1, %g7
be,a,pt %icc, 2f
sll %o2, 16, %g2
lduw [%o0 + 0x10], %g3
ba,pt %xcc, 1b
nop
2: addcc %o2, %g2, %g2
srl %g2, 16, %o2
addc %o2, %g0, %o2
xnor %g0, %o2, %o2
set 0xffff, %o1
retl
and %o2, %o1, %o0
...@@ -222,7 +222,7 @@ static void do_fault_siginfo(int code, int sig, unsigned long address) ...@@ -222,7 +222,7 @@ static void do_fault_siginfo(int code, int sig, unsigned long address)
extern int handle_ldf_stq(u32, struct pt_regs *); extern int handle_ldf_stq(u32, struct pt_regs *);
extern int handle_ld_nf(u32, struct pt_regs *); extern int handle_ld_nf(u32, struct pt_regs *);
static inline unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
{ {
if (!insn) { if (!insn) {
if (!regs->tpc || (regs->tpc & 0x3)) if (!regs->tpc || (regs->tpc & 0x3))
......
...@@ -78,45 +78,8 @@ csum_and_copy_to_user(const char *src, char *dst, int len, ...@@ -78,45 +78,8 @@ csum_and_copy_to_user(const char *src, char *dst, int len,
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time. * the majority of the time.
*/ */
static __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph, extern unsigned short ip_fast_csum(__const__ unsigned char *iph,
unsigned int ihl) unsigned int ihl);
{
unsigned short sum;
/* Note: We must read %2 before we touch %0 for the first time,
* because GCC can legitimately use the same register for
* both operands.
*/
__asm__ __volatile__(
" sub %2, 4, %%g7 ! IEU0\n"
" lduw [%1 + 0x00], %0 ! Load Group\n"
" lduw [%1 + 0x04], %%g2 ! Load Group\n"
" lduw [%1 + 0x08], %%g3 ! Load Group\n"
" addcc %%g2, %0, %0 ! IEU1 1 Load Bubble + Group\n"
" lduw [%1 + 0x0c], %%g2 ! Load\n"
" addccc %%g3, %0, %0 ! Sngle Group no Bubble\n"
" lduw [%1 + 0x10], %%g3 ! Load Group\n"
" addccc %%g2, %0, %0 ! Sngle Group no Bubble\n"
" addc %0, %%g0, %0 ! Sngle Group\n"
"1: addcc %%g3, %0, %0 ! IEU1 Group no Bubble\n"
" add %1, 4, %1 ! IEU0\n"
" addccc %0, %%g0, %0 ! Sngle Group no Bubble\n"
" subcc %%g7, 1, %%g7 ! IEU1 Group\n"
" be,a,pt %%icc, 2f ! CTI\n"
" sll %0, 16, %%g2 ! IEU0\n"
" lduw [%1 + 0x10], %%g3 ! Load Group\n"
" ba,pt %%xcc, 1b ! CTI\n"
" nop ! IEU0\n"
"2: addcc %0, %%g2, %%g2 ! IEU1 Group\n"
" srl %%g2, 16, %0 ! IEU0 Group regdep XXX Scheisse!\n"
" addc %0, %%g0, %0 ! Sngle Group\n"
" xnor %%g0, %0, %0 ! IEU0 Group\n"
" srl %0, 0, %0 ! IEU0 Group XXX Scheisse!\n"
: "=r" (sum), "=&r" (iph)
: "r" (ihl), "1" (iph)
: "g2", "g3", "g7", "cc");
return sum;
}
/* Fold a partial checksum without adding pseudo headers. */ /* Fold a partial checksum without adding pseudo headers. */
static __inline__ unsigned short csum_fold(unsigned int sum) static __inline__ unsigned short csum_fold(unsigned int sum)
......
...@@ -93,7 +93,7 @@ extern unsigned char dma_sync_reg_table_entry; ...@@ -93,7 +93,7 @@ extern unsigned char dma_sync_reg_table_entry;
#define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */ #define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */
#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */ #define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
#define NUM_IVECS 8192 #define NUM_IVECS (IMAP_INR + 1)
extern struct ino_bucket ivector_table[NUM_IVECS]; extern struct ino_bucket ivector_table[NUM_IVECS];
#define __irq_ino(irq) \ #define __irq_ino(irq) \
......
...@@ -47,184 +47,10 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem) ...@@ -47,184 +47,10 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0); sema_init(sem, 0);
} }
extern void __down(struct semaphore * sem); extern void up(struct semaphore *sem);
extern int __down_interruptible(struct semaphore * sem); extern void down(struct semaphore *sem);
extern void __up(struct semaphore * sem); extern int down_trylock(struct semaphore *sem);
extern int down_interruptible(struct semaphore *sem);
static __inline__ void down(struct semaphore * sem)
{
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* sem->count = new_val;
* if (old_val < 1)
* __down(sem);
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down sem(%0)\n"
"1: lduw [%0], %%g5\n"
" sub %%g5, 1, %%g7\n"
" cas [%0], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" cmp %%g7, 1\n"
" bl,pn %%icc, 3f\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
" .subsection 2\n"
"3: mov %0, %%g5\n"
" save %%sp, -160, %%sp\n"
" mov %%g1, %%l1\n"
" mov %%g2, %%l2\n"
" mov %%g3, %%l3\n"
" call %1\n"
" mov %%g5, %%o0\n"
" mov %%l1, %%g1\n"
" mov %%l2, %%g2\n"
" ba,pt %%xcc, 2b\n"
" restore %%l3, %%g0, %%g3\n"
" .previous\n"
: : "r" (sem), "i" (__down)
: "g5", "g7", "memory", "cc");
}
static __inline__ int down_interruptible(struct semaphore *sem)
{
int ret = 0;
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* sem->count = new_val;
* if (old_val < 1)
* ret = __down_interruptible(sem);
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down_interruptible sem(%2) ret(%0)\n"
"1: lduw [%2], %%g5\n"
" sub %%g5, 1, %%g7\n"
" cas [%2], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" cmp %%g7, 1\n"
" bl,pn %%icc, 3f\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
" .subsection 2\n"
"3: mov %2, %%g5\n"
" save %%sp, -160, %%sp\n"
" mov %%g1, %%l1\n"
" mov %%g2, %%l2\n"
" mov %%g3, %%l3\n"
" call %3\n"
" mov %%g5, %%o0\n"
" mov %%l1, %%g1\n"
" mov %%l2, %%g2\n"
" mov %%l3, %%g3\n"
" ba,pt %%xcc, 2b\n"
" restore %%o0, %%g0, %0\n"
" .previous\n"
: "=r" (ret)
: "0" (ret), "r" (sem), "i" (__down_interruptible)
: "g5", "g7", "memory", "cc");
return ret;
}
static __inline__ int down_trylock(struct semaphore *sem)
{
int ret;
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count - 1;
* if (old_val < 1) {
* ret = 1;
* } else {
* sem->count = new_val;
* ret = 0;
* }
*
* The (old_val < 1) test is equivalent to
* the more straightforward (new_val < 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! down_trylock sem(%1) ret(%0)\n"
"1: lduw [%1], %%g5\n"
" sub %%g5, 1, %%g7\n"
" cmp %%g5, 1\n"
" bl,pn %%icc, 2f\n"
" mov 1, %0\n"
" cas [%1], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" mov 0, %0\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
: "=&r" (ret)
: "r" (sem)
: "g5", "g7", "memory", "cc");
return ret;
}
static __inline__ void up(struct semaphore * sem)
{
/* This atomically does:
* old_val = sem->count;
* new_val = sem->count + 1;
* sem->count = new_val;
* if (old_val < 0)
* __up(sem);
*
* The (old_val < 0) test is equivalent to
* the more straightforward (new_val <= 0),
* but it is easier to test the former because
* of how the CAS instruction works.
*/
__asm__ __volatile__("\n"
" ! up sem(%0)\n"
" membar #StoreLoad | #LoadLoad\n"
"1: lduw [%0], %%g5\n"
" add %%g5, 1, %%g7\n"
" cas [%0], %%g5, %%g7\n"
" cmp %%g5, %%g7\n"
" bne,pn %%icc, 1b\n"
" addcc %%g7, 1, %%g0\n"
" ble,pn %%icc, 3f\n"
" membar #StoreLoad | #StoreStore\n"
"2:\n"
" .subsection 2\n"
"3: mov %0, %%g5\n"
" save %%sp, -160, %%sp\n"
" mov %%g1, %%l1\n"
" mov %%g2, %%l2\n"
" mov %%g3, %%l3\n"
" call %1\n"
" mov %%g5, %%o0\n"
" mov %%l1, %%g1\n"
" mov %%l2, %%g2\n"
" ba,pt %%xcc, 2b\n"
" restore %%l3, %%g0, %%g3\n"
" .previous\n"
: : "r" (sem), "i" (__up)
: "g5", "g7", "memory", "cc");
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment