Commit 4bba626e authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 523cc485 f9f0080e
......@@ -388,7 +388,7 @@ config IA64_GRANULE_16MB
config IA64_GRANULE_64MB
bool "64MB"
depends on !(IA64_GENERIC || IA64_HP_ZX1)
depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_SGI_SN2)
endchoice
......
......@@ -43,7 +43,8 @@ endif
ifeq ($(GCC_VERSION),3)
ifeq ($(GCC_MINOR_VERSION),4)
cflags-$(CONFIG_ITANIUM) += -mtune=merced
# Workaround Itanium 1 bugs in gcc 3.4.
# cflags-$(CONFIG_ITANIUM) += -mtune=merced
cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
endif
endif
......
This diff is collapsed.
......@@ -44,7 +44,7 @@
GLOBAL_ENTRY(efi_call_phys)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,5,7,0
alloc loc1=ar.pfs,8,7,7,0
ld8 r2=[in0],8 // load EFI function's entry point
mov loc0=rp
.body
......@@ -70,9 +70,13 @@ GLOBAL_ENTRY(efi_call_phys)
mov out3=in4
mov out5=in6
mov out6=in7
mov loc5=r19
mov loc6=r20
br.call.sptk.many rp=b6 // call the EFI function
.ret1: mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2: mov ar.rsc=loc4 // restore RSE configuration
mov ar.pfs=loc1
......
......@@ -182,7 +182,7 @@ GLOBAL_ENTRY(ia64_switch_to)
movl r25=init_task
mov r27=IA64_KR(CURRENT_STACK)
adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
dep r20=0,in0,61,3 // physical address of "current"
dep r20=0,in0,61,3 // physical address of "next"
;;
st8 [r22]=sp // save kernel stack pointer of old task
shr.u r26=r20,IA64_GRANULE_SHIFT
......@@ -195,7 +195,7 @@ GLOBAL_ENTRY(ia64_switch_to)
(p6) br.cond.dpnt .map
;;
.done:
(p6) ssm psr.ic // if we we had to map, renable the psr.ic bit FIRST!!!
(p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!!
;;
(p6) srlz.d
ld8 sp=[r21] // load kernel stack pointer of new task
......
......@@ -67,7 +67,7 @@ start_ap:
* Initialize kernel region registers:
* rr[5]: VHPT enabled, page size = PAGE_SHIFT
* rr[6]: VHPT disabled, page size = IA64_GRANULE_SHIFT
* rr[5]: VHPT disabled, page size = IA64_GRANULE_SHIFT
* rr[7]: VHPT disabled, page size = IA64_GRANULE_SHIFT
*/
mov r16=((ia64_rid(IA64_REGION_ID_KERNEL, (5<<61)) << 8) | (PAGE_SHIFT << 2) | 1)
movl r17=(5<<61)
......@@ -154,8 +154,7 @@ start_ap:
#endif
;;
tpa r3=r2 // r3 == phys addr of task struct
;;
shr.u r16=r3,IA64_GRANULE_SHIFT
mov r16=-1
(isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it
// load mapping for stack (virtaddr in r2, physaddr in r3)
......@@ -169,6 +168,7 @@ start_ap:
dep r2=-1,r3,61,3 // IMVA of task
;;
mov r17=rr[r2]
shr.u r16=r3,IA64_GRANULE_SHIFT
;;
dep r17=0,r17,8,24
;;
......@@ -706,6 +706,9 @@ END(__ia64_init_fpu)
*
* Inputs:
* r16 = new psr to establish
* Output:
* r19 = old virtual address of ar.bsp
* r20 = old virtual address of sp
*
* Note: RSE must already be in enforced lazy mode
*/
......@@ -724,12 +727,13 @@ GLOBAL_ENTRY(ia64_switch_mode_phys)
mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode_phys,r15
mov r17=ar.bsp
mov r19=ar.bsp
mov r20=sp
mov r14=rp // get return address into a general register
;;
// going to physical mode, use tpa to translate virt->phys
tpa r17=r17
tpa r17=r19
tpa r3=r3
tpa sp=sp
tpa r14=r14
......@@ -752,6 +756,8 @@ END(ia64_switch_mode_phys)
*
* Inputs:
* r16 = new psr to establish
* r19 = new bspstore to establish
* r20 = new sp to establish
*
* Note: RSE must already be in enforced lazy mode
*/
......@@ -770,7 +776,6 @@ GLOBAL_ENTRY(ia64_switch_mode_virt)
mov cr.ipsr=r16 // set new PSR
add r3=1f-ia64_switch_mode_virt,r15
mov r17=ar.bsp
mov r14=rp // get return address into a general register
;;
......@@ -781,15 +786,14 @@ GLOBAL_ENTRY(ia64_switch_mode_virt)
movl r18=KERNEL_START
dep r3=0,r3,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
dep r14=0,r14,KERNEL_TR_PAGE_SHIFT,64-KERNEL_TR_PAGE_SHIFT
dep r17=-1,r17,61,3
dep sp=-1,sp,61,3
mov sp=r20
;;
or r3=r3,r18
or r14=r14,r18
;;
mov r18=ar.rnat // save ar.rnat
mov ar.bspstore=r17 // this steps on ar.rnat
mov ar.bspstore=r19 // this steps on ar.rnat
mov cr.iip=r3
mov cr.ifs=r0
;;
......
......@@ -217,10 +217,8 @@ set_rte (unsigned int vector, unsigned int dest, int mask)
spin_lock_irqsave(&iosapic_lock, flags);
{
writel(IOSAPIC_RTE_HIGH(rte_index), addr + IOSAPIC_REG_SELECT);
writel(high32, addr + IOSAPIC_WINDOW);
writel(IOSAPIC_RTE_LOW(rte_index), addr + IOSAPIC_REG_SELECT);
writel(low32, addr + IOSAPIC_WINDOW);
iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
iosapic_intr_info[vector].low32 = low32;
}
spin_unlock_irqrestore(&iosapic_lock, flags);
......@@ -249,12 +247,9 @@ mask_irq (unsigned int irq)
spin_lock_irqsave(&iosapic_lock, flags);
{
writel(IOSAPIC_RTE_LOW(rte_index), addr + IOSAPIC_REG_SELECT);
/* set only the mask bit */
low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK;
writel(low32, addr + IOSAPIC_WINDOW);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
spin_unlock_irqrestore(&iosapic_lock, flags);
}
......@@ -275,9 +270,8 @@ unmask_irq (unsigned int irq)
spin_lock_irqsave(&iosapic_lock, flags);
{
writel(IOSAPIC_RTE_LOW(rte_index), addr + IOSAPIC_REG_SELECT);
low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK;
writel(low32, addr + IOSAPIC_WINDOW);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
spin_unlock_irqrestore(&iosapic_lock, flags);
}
......@@ -325,10 +319,8 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
iosapic_intr_info[vec].low32 = low32;
writel(IOSAPIC_RTE_HIGH(rte_index), addr + IOSAPIC_REG_SELECT);
writel(high32, addr + IOSAPIC_WINDOW);
writel(IOSAPIC_RTE_LOW(rte_index), addr + IOSAPIC_REG_SELECT);
writel(low32, addr + IOSAPIC_WINDOW);
iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
spin_unlock_irqrestore(&iosapic_lock, flags);
#endif
......@@ -351,7 +343,7 @@ iosapic_end_level_irq (unsigned int irq)
ia64_vector vec = irq_to_vector(irq);
move_irq(irq);
writel(vec, iosapic_intr_info[vec].addr + IOSAPIC_EOI);
iosapic_eoi(iosapic_intr_info[vec].addr, vec);
}
#define iosapic_shutdown_level_irq mask_irq
......@@ -428,8 +420,7 @@ iosapic_version (char *addr)
* unsigned int reserved2 : 8;
* }
*/
writel(IOSAPIC_VERSION, addr + IOSAPIC_REG_SELECT);
return readl(IOSAPIC_WINDOW + addr);
return iosapic_read(addr, IOSAPIC_VERSION);
}
/*
......
......@@ -256,7 +256,7 @@ ia64_mca_log_sal_error_record(int sal_info_type)
salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
if (irq_safe)
printk(KERN_INFO "CPU %d: SAL log contains %s error record\n",
IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
smp_processor_id(),
sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
......
......@@ -656,8 +656,26 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
case RV_PCREL:
switch (r_type) {
case R_IA64_PCREL21B:
/* special because it can cross into other module/kernel-core. */
if (!is_internal(mod, val))
if (in_init(mod, val)) {
/* Calls to init code from core are bad news */
if (in_core(mod, (uint64_t)location)) {
printk(KERN_ERR "%s: init symbol 0x%lx used in module code at %p\n",
mod->name, val, location);
return -ENOEXEC;
}
} else if (in_core(mod, val)) {
/*
* Init section may have been allocated far away from core,
* if the branch won't reach, then allocate a plt for it.
*/
if (in_init(mod, (uint64_t)location)) {
uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
if (delta + (1 << 20) >= (1 << 21)) {
val = get_fdesc(mod, val, &ok);
val = get_plt(mod, location, val, &ok);
}
}
} else
val = get_plt(mod, location, val, &ok);
/* FALL THROUGH */
default:
......
......@@ -55,7 +55,7 @@ END(ia64_pal_default_handler)
*/
GLOBAL_ENTRY(ia64_pal_call_static)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)
alloc loc1 = ar.pfs,6,90,0,0
alloc loc1 = ar.pfs,5,5,0,0
movl loc2 = pal_entry_point
1: {
mov r28 = in0
......@@ -66,7 +66,9 @@ GLOBAL_ENTRY(ia64_pal_call_static)
ld8 loc2 = [loc2] // loc2 <- entry point
tbit.nz p6,p7 = in4, 0
adds r8 = 1f-1b,r8
mov loc4=ar.rsc // save RSE configuration
;;
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov loc3 = psr
mov loc0 = rp
.body
......@@ -82,6 +84,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
mov rp = r8
br.cond.sptk.many b7
1: mov psr.l = loc3
mov ar.rsc = loc4 // restore RSE configuration
mov ar.pfs = loc1
mov rp = loc0
;;
......@@ -98,7 +101,7 @@ END(ia64_pal_call_static)
*/
GLOBAL_ENTRY(ia64_pal_call_stacked)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
alloc loc1 = ar.pfs,5,4,87,0
alloc loc1 = ar.pfs,4,4,4,0
movl loc2 = pal_entry_point
mov r28 = in0 // Index MUST be copied to r28
......@@ -145,7 +148,7 @@ END(ia64_pal_call_stacked)
GLOBAL_ENTRY(ia64_pal_call_phys_static)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6)
alloc loc1 = ar.pfs,6,90,0,0
alloc loc1 = ar.pfs,4,7,0,0
movl loc2 = pal_entry_point
1: {
mov r28 = in0 // copy procedure index
......@@ -176,10 +179,14 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys
.ret1: mov rp = r8 // install return address (physical)
mov loc5 = r19
mov loc6 = r20
br.cond.sptk.many b7
1:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret2:
mov psr.l = loc3 // restore init PSR
......@@ -201,7 +208,7 @@ END(ia64_pal_call_phys_static)
*/
GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
alloc loc1 = ar.pfs,5,5,86,0
alloc loc1 = ar.pfs,5,7,4,0
movl loc2 = pal_entry_point
1: {
mov r28 = in0 // copy procedure index
......@@ -230,10 +237,14 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys
.ret6:
mov loc5 = r19
mov loc6 = r20
br.call.sptk.many rp=b7 // now make the call
.ret7:
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
.ret8: mov psr.l = loc3 // restore init PSR
......
......@@ -277,6 +277,29 @@ setup_serial_legacy (void)
}
#endif
/**
* early_console_setup - setup debugging console
*
* Consoles started here require little enough setup that we can start using
* them very early in the boot process, either right after the machine
* vector initialization, or even before if the drivers can detect their hw.
*
* Returns non-zero if a console couldn't be setup.
*/
static inline int __init
early_console_setup (void)
{
#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
{
extern int sn_serial_console_early_setup(void);
if(!sn_serial_console_early_setup())
return 0;
}
#endif
return -1;
}
void __init
setup_arch (char **cmdline_p)
{
......@@ -294,6 +317,12 @@ setup_arch (char **cmdline_p)
machvec_init(acpi_get_sysname());
#endif
#ifdef CONFIG_SMP
/* If we register an early console, allow CPU 0 to printk */
if (!early_console_setup())
cpu_set(smp_processor_id(), cpu_online_map);
#endif
#ifdef CONFIG_ACPI_BOOT
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
......
......@@ -549,7 +549,7 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
if (!num_node_memblks) {
/* No SRAT table, so assume one node (node 0) */
if (start < end)
(*func)(start, len, 0);
(*func)(start, end - start, 0);
return;
}
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
......@@ -199,7 +199,7 @@ bte_error_handler(unsigned long _nodepda)
err_nodepda->bte_if[i].cleanup_active = 0;
BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
smp_processor_id(), i));
spin_unlock(&pda->cpu_bte_if[i]->spinlock);
spin_unlock(&err_nodepda->bte_if[i].spinlock);
}
del_timer(recovery_timer);
......
......@@ -7,6 +7,7 @@
*/
#include <linux/config.h>
#include <linux/module.h>
#include <asm/sn/sgi.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
......@@ -27,10 +28,18 @@
#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
#endif
/*
* The base address of for each set of bte registers.
*/
static int bte_offsets[] = { IIO_IBLS0, IIO_IBLS1 };
/* two interfaces on two btes */
#define MAX_INTERFACES_TO_TRY 4
static struct bteinfo_s *
bte_if_on_node(nasid_t nasid, int interface)
{
nodepda_t *tmp_nodepda;
tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
return &tmp_nodepda->bte_if[interface];
}
/************************************************************************
......@@ -61,11 +70,12 @@ static int bte_offsets[] = { IIO_IBLS0, IIO_IBLS1 };
bte_result_t
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
int bte_to_use;
u64 transfer_size;
struct bteinfo_s *bte;
bte_result_t bte_status;
unsigned long irq_flags;
struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
int bte_if_index;
BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
......@@ -79,17 +89,57 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
(src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
if (mode & BTE_USE_DEST) {
/* try remote then local */
btes_to_try[0] = bte_if_on_node(NASID_GET(dest), 0);
btes_to_try[1] = bte_if_on_node(NASID_GET(dest), 1);
if (mode & BTE_USE_ANY) {
btes_to_try[2] = bte_if_on_node(get_nasid(), 0);
btes_to_try[3] = bte_if_on_node(get_nasid(), 1);
} else {
btes_to_try[2] = NULL;
btes_to_try[3] = NULL;
}
} else {
/* try local then remote */
btes_to_try[0] = bte_if_on_node(get_nasid(), 0);
btes_to_try[1] = bte_if_on_node(get_nasid(), 1);
if (mode & BTE_USE_ANY) {
btes_to_try[2] = bte_if_on_node(NASID_GET(dest), 0);
btes_to_try[3] = bte_if_on_node(NASID_GET(dest), 1);
} else {
btes_to_try[2] = NULL;
btes_to_try[3] = NULL;
}
}
do {
local_irq_save(irq_flags);
bte_to_use = 0;
bte_if_index = 0;
/* Attempt to lock one of the BTE interfaces. */
while ((bte_to_use < BTES_PER_NODE) &&
BTE_LOCK_IF_AVAIL(bte_to_use)) {
bte_to_use++;
while (bte_if_index < MAX_INTERFACES_TO_TRY) {
bte = btes_to_try[bte_if_index++];
if (bte == NULL) {
continue;
}
if (spin_trylock(&bte->spinlock)) {
if ((*bte->most_rcnt_na & BTE_ACTIVE) ||
(BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
/* Got the lock but BTE still busy */
spin_unlock(&bte->spinlock);
bte = NULL;
} else {
/* we got the lock and it's not busy */
break;
}
}
}
if (bte_to_use < BTES_PER_NODE) {
if (bte != NULL) {
break;
}
......@@ -100,12 +150,9 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
}
/* Wait until a bte is available. */
udelay(10);
udelay(1);
} while (1);
bte = pda->cpu_bte_if[bte_to_use];
BTE_PRINTKV(("Got a lock on bte %d\n", bte_to_use));
if (notification == NULL) {
/* User does not want to be notified. */
......@@ -121,28 +168,24 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
*bte->most_rcnt_na = -1L;
/* Set the status reg busy bit and transfer length */
BTE_PRINTKV(("IBLS - HUB_S(0x%p, 0x%lx)\n",
BTEREG_LNSTAT_ADDR, IBLS_BUSY | transfer_size));
HUB_S(BTEREG_LNSTAT_ADDR, (IBLS_BUSY | transfer_size));
BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
/* Set the source and destination registers */
BTE_PRINTKV(("IBSA - HUB_S(0x%p, 0x%lx)\n", BTEREG_SRC_ADDR,
(TO_PHYS(src))));
HUB_S(BTEREG_SRC_ADDR, (TO_PHYS(src)));
BTE_PRINTKV(("IBDA - HUB_S(0x%p, 0x%lx)\n", BTEREG_DEST_ADDR,
(TO_PHYS(dest))));
HUB_S(BTEREG_DEST_ADDR, (TO_PHYS(dest)));
BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
BTE_SRC_STORE(bte, TO_PHYS(src));
BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
BTE_DEST_STORE(bte, TO_PHYS(dest));
/* Set the notification register */
BTE_PRINTKV(("IBNA - HUB_S(0x%p, 0x%lx)\n", BTEREG_NOTIF_ADDR,
(TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)))));
HUB_S(BTEREG_NOTIF_ADDR, (TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
BTE_PRINTKV(("IBNA = 0x%lx)\n",
TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
BTE_NOTIF_STORE(bte, TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
/* Initiate the transfer */
BTE_PRINTK(("IBCT - HUB_S(0x%p, 0x%lx)\n", BTEREG_CTRL_ADDR,
BTE_VALID_MODE(mode)));
HUB_S(BTEREG_CTRL_ADDR, BTE_VALID_MODE(mode));
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
spin_unlock_irqrestore(&bte->spinlock, irq_flags);
......@@ -156,7 +199,7 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
HUB_L(BTEREG_LNSTAT_ADDR), *bte->most_rcnt_na));
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
if (*bte->most_rcnt_na & IBLS_ERROR) {
bte_status = *bte->most_rcnt_na & ~IBLS_ERROR;
......@@ -165,10 +208,11 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
bte_status = BTE_SUCCESS;
}
BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
HUB_L(BTEREG_LNSTAT_ADDR), *bte->most_rcnt_na));
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
return bte_status;
}
EXPORT_SYMBOL(bte_copy);
/*
......@@ -201,14 +245,19 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
u64 footBcopyDest;
u64 footBcopyLen;
bte_result_t rv;
char *bteBlock;
char *bteBlock, *bteBlock_unaligned;
if (len == 0) {
return BTE_SUCCESS;
}
/* temporary buffer used during unaligned transfers */
bteBlock = pda->cpu_bte_if[0]->scratch_buf;
bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
GFP_KERNEL | GFP_DMA);
if (bteBlock_unaligned == NULL) {
return BTEFAIL_NOTAVAIL;
}
bteBlock = (char *) L1_CACHE_ALIGN((u64) bteBlock_unaligned);
headBcopySrcOffset = src & L1_CACHE_MASK;
destFirstCacheOffset = dest & L1_CACHE_MASK;
......@@ -276,6 +325,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
ia64_tpa((unsigned long)bteBlock),
footBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
......@@ -296,6 +346,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
(len - headBcopyLen -
footBcopyLen), mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
......@@ -325,6 +376,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
rv = bte_copy(headBteSource,
ia64_tpa((unsigned long)bteBlock), headBteLen, mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
......@@ -332,8 +384,10 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
headBcopySrcOffset),
headBcopyLen);
}
kfree(bteBlock_unaligned);
return BTE_SUCCESS;
}
EXPORT_SYMBOL(bte_unaligned_copy);
/************************************************************************
......@@ -370,9 +424,9 @@ bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
mynodepda->bte_recovery_timer.data = (unsigned long) mynodepda;
for (i = 0; i < BTES_PER_NODE; i++) {
/* >>> Don't know why the 0x1800000L is here. Robin */
mynodepda->bte_if[i].bte_base_addr =
(char *) LOCAL_MMR_ADDR(bte_offsets[i] | 0x1800000L);
(u64) mynodepda->bte_if[i].bte_base_addr =
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode),
(i == 0 ? IIO_IBLS0 : IIO_IBLS1));
/*
* Initialize the notification and spinlock
......@@ -383,8 +437,6 @@ bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
mynodepda->bte_if[i].notify = 0L;
spin_lock_init(&mynodepda->bte_if[i].spinlock);
mynodepda->bte_if[i].scratch_buf =
alloc_bootmem_node(NODE_DATA(cnode), BTE_MAX_XFER);
mynodepda->bte_if[i].bte_cnode = cnode;
mynodepda->bte_if[i].bte_error_count = 0;
mynodepda->bte_if[i].bte_num = i;
......@@ -393,23 +445,3 @@ bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
}
}
/*
* bte_init_cpu()
*
* Initialize the cpupda structure with pointers to the
* nodepda bte blocks.
*
*/
void
bte_init_cpu(void)
{
/* Called by setup.c as each cpu is being added to the nodepda */
if (local_node_data->active_cpu_count & 0x1) {
pda->cpu_bte_if[0] = &(nodepda->bte_if[0]);
pda->cpu_bte_if[1] = &(nodepda->bte_if[1]);
} else {
pda->cpu_bte_if[0] = &(nodepda->bte_if[1]);
pda->cpu_bte_if[1] = &(nodepda->bte_if[0]);
}
}
......@@ -54,7 +54,6 @@ DEFINE_PER_CPU(struct pda_s, pda_percpu);
#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
extern void bte_init_node (nodepda_t *, cnodeid_t);
extern void bte_init_cpu (void);
extern void sn_timer_init(void);
extern unsigned long last_time_offset;
extern void init_platform_hubinfo(nodepda_t **nodepdaindr);
......@@ -496,8 +495,6 @@ sn_cpu_init(void)
buddy_nasid = cnodeid_to_nasid(numa_node_id() == numnodes-1 ? 0 : numa_node_id()+ 1);
pda->pio_shub_war_cam_addr = (volatile unsigned long*)GLOBAL_MMR_ADDR(nasid, SH_PI_CAM_CONTROL);
}
bte_init_cpu();
}
/*
......
#ifndef __ASM_IA64_IOSAPIC_H
#define __ASM_IA64_IOSAPIC_H
#define IOSAPIC_DEFAULT_ADDR 0xFEC00000
#define IOSAPIC_REG_SELECT 0x0
#define IOSAPIC_WINDOW 0x10
#define IOSAPIC_EOI 0x40
#define IOSAPIC_VERSION 0x1
#define IOSAPIC_VERSION 0x1
/*
* Redirection table entry
......@@ -55,6 +53,23 @@
#define NR_IOSAPICS 256
static inline unsigned int iosapic_read(char *iosapic, unsigned int reg)
{
writel(reg, iosapic + IOSAPIC_REG_SELECT);
return readl(iosapic + IOSAPIC_WINDOW);
}
static inline void iosapic_write(char *iosapic, unsigned int reg, u32 val)
{
writel(reg, iosapic + IOSAPIC_REG_SELECT);
writel(val, iosapic + IOSAPIC_WINDOW);
}
static inline void iosapic_eoi(char *iosapic, u32 vector)
{
writel(vector, iosapic + IOSAPIC_EOI);
}
extern void __init iosapic_system_init (int pcat_compat);
extern void __init iosapic_init (unsigned long address,
unsigned int gsi_base);
......
......@@ -123,5 +123,9 @@ extern void smp_send_reschedule (int cpu);
extern void lock_ipi_calllock(void);
extern void unlock_ipi_calllock(void);
#else
#define cpu_logical_id(cpuid) 0
#endif /* CONFIG_SMP */
#endif /* _ASM_IA64_SMP_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
......@@ -48,35 +48,31 @@
#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE)
/* Use a reserved bit to let the caller specify a wait for any BTE */
#define BTE_WACQUIRE (0x4000)
/* Use the BTE on the node with the destination memory */
#define BTE_USE_DEST (BTE_WACQUIRE << 1)
/* Use any available BTE interface on any node for the transfer */
#define BTE_USE_ANY (BTE_USE_DEST << 1)
/* macro to force the IBCT0 value valid */
#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE))
/*
* Handle locking of the bte interfaces.
*
* All transfers spinlock the interface before setting up the SHUB
* registers. Sync transfers hold the lock until all processing is
* complete. Async transfers release the lock as soon as the transfer
* is initiated.
*
* To determine if an interface is available, we must check both the
* busy bit and the spinlock for that interface.
*/
#define BTE_LOCK_IF_AVAIL(_x) (\
(*pda->cpu_bte_if[_x]->most_rcnt_na & (IBLS_BUSY | IBLS_ERROR)) && \
(!(spin_trylock(&(pda->cpu_bte_if[_x]->spinlock)))) \
)
#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR)
/*
* Some macros to simplify reading.
* Start with macros to locate the BTE control registers.
*/
#define BTEREG_LNSTAT_ADDR ((u64 *)(bte->bte_base_addr))
#define BTEREG_SRC_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_SRC))
#define BTEREG_DEST_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_DEST))
#define BTEREG_CTRL_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_CTRL))
#define BTEREG_NOTIF_ADDR ((u64 *)(bte->bte_base_addr + BTEOFF_NOTIFY))
#define BTE_LNSTAT_LOAD(_bte) \
HUB_L(_bte->bte_base_addr)
#define BTE_LNSTAT_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr, (_x))
#define BTE_SRC_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr + (BTEOFF_SRC/8), (_x))
#define BTE_DEST_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr + (BTEOFF_DEST/8), (_x))
#define BTE_CTRL_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr + (BTEOFF_CTRL/8), (_x))
#define BTE_NOTIF_STORE(_bte, _x) \
HUB_S(_bte->bte_base_addr + (BTEOFF_NOTIFY/8), (_x))
/* Possible results from bte_copy and bte_unaligned_copy */
......@@ -110,16 +106,15 @@ typedef enum {
* to work with a BTE.
*/
struct bteinfo_s {
u64 volatile notify ____cacheline_aligned;
char *bte_base_addr ____cacheline_aligned;
volatile u64 notify ____cacheline_aligned;
u64 *bte_base_addr ____cacheline_aligned;
spinlock_t spinlock;
cnodeid_t bte_cnode; /* cnode */
int bte_error_count; /* Number of errors encountered */
int bte_num; /* 0 --> BTE0, 1 --> BTE1 */
int cleanup_active; /* Interface is locked for cleanup */
volatile bte_result_t bh_error; /* error while processing */
u64 volatile *most_rcnt_na;
void *scratch_buf; /* Node local scratch buffer */
volatile u64 *most_rcnt_na;
};
......@@ -130,6 +125,8 @@ extern bte_result_t bte_copy(u64, u64, u64, u64, void *);
extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64);
extern void bte_error_handler(unsigned long);
#define bte_zero(dest, len, mode, notification) \
bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification)
/*
* The following is the prefered way of calling bte_unaligned_copy
......
......@@ -49,8 +49,6 @@ typedef struct pda_s {
volatile unsigned long *pio_shub_war_cam_addr;
volatile unsigned long *mem_write_status_addr;
struct bteinfo_s *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */
unsigned long sn_soft_irr[4];
unsigned long sn_in_service_ivecs[4];
short cnodeid_to_nasid_table[MAX_NUMNODES];
......
......@@ -84,7 +84,6 @@
*/
#ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment