Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
20f6d716
Commit
20f6d716
authored
May 05, 2003
by
David Mosberger
Browse files
Options
Browse Files
Download
Plain Diff
Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5
into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents
d210257e
083a5094
Changes
49
Show whitespace changes
Inline
Side-by-side
Showing
49 changed files
with
1760 additions
and
1494 deletions
+1760
-1494
arch/ia64/Makefile
arch/ia64/Makefile
+11
-3
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/common/sba_iommu.c
+808
-528
arch/ia64/hp/sim/simeth.c
arch/ia64/hp/sim/simeth.c
+1
-1
arch/ia64/hp/zx1/Makefile
arch/ia64/hp/zx1/Makefile
+0
-1
arch/ia64/hp/zx1/hpzx1_misc.c
arch/ia64/hp/zx1/hpzx1_misc.c
+0
-348
arch/ia64/ia32/ia32_entry.S
arch/ia64/ia32/ia32_entry.S
+5
-5
arch/ia64/ia32/ia32_traps.c
arch/ia64/ia32/ia32_traps.c
+1
-1
arch/ia64/ia32/sys_ia32.c
arch/ia64/ia32/sys_ia32.c
+31
-172
arch/ia64/kernel/Makefile
arch/ia64/kernel/Makefile
+2
-0
arch/ia64/kernel/acpi-ext.c
arch/ia64/kernel/acpi-ext.c
+76
-46
arch/ia64/kernel/acpi.c
arch/ia64/kernel/acpi.c
+1
-130
arch/ia64/kernel/brl_emu.c
arch/ia64/kernel/brl_emu.c
+1
-1
arch/ia64/kernel/head.S
arch/ia64/kernel/head.S
+79
-0
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/iosapic.c
+19
-18
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq.c
+2
-2
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca.c
+169
-16
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/mca_asm.S
+4
-4
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+97
-28
arch/ia64/kernel/perfmon_mckinley.h
arch/ia64/kernel/perfmon_mckinley.h
+2
-5
arch/ia64/kernel/process.c
arch/ia64/kernel/process.c
+6
-6
arch/ia64/kernel/sal.c
arch/ia64/kernel/sal.c
+1
-1
arch/ia64/kernel/setup.c
arch/ia64/kernel/setup.c
+8
-1
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+37
-12
arch/ia64/kernel/time.c
arch/ia64/kernel/time.c
+1
-1
arch/ia64/kernel/traps.c
arch/ia64/kernel/traps.c
+1
-1
arch/ia64/kernel/unaligned.c
arch/ia64/kernel/unaligned.c
+1
-1
arch/ia64/kernel/unwind.c
arch/ia64/kernel/unwind.c
+58
-36
arch/ia64/lib/copy_user.S
arch/ia64/lib/copy_user.S
+1
-1
arch/ia64/lib/do_csum.S
arch/ia64/lib/do_csum.S
+1
-1
arch/ia64/lib/io.c
arch/ia64/lib/io.c
+1
-1
arch/ia64/lib/swiotlb.c
arch/ia64/lib/swiotlb.c
+2
-2
arch/ia64/pci/pci.c
arch/ia64/pci/pci.c
+171
-29
include/asm-ia64/acpi-ext.h
include/asm-ia64/acpi-ext.h
+2
-17
include/asm-ia64/compat.h
include/asm-ia64/compat.h
+3
-0
include/asm-ia64/intrinsics.h
include/asm-ia64/intrinsics.h
+16
-14
include/asm-ia64/io.h
include/asm-ia64/io.h
+28
-4
include/asm-ia64/iosapic.h
include/asm-ia64/iosapic.h
+1
-0
include/asm-ia64/machvec_init.h
include/asm-ia64/machvec_init.h
+4
-0
include/asm-ia64/pal.h
include/asm-ia64/pal.h
+2
-1
include/asm-ia64/pci.h
include/asm-ia64/pci.h
+7
-1
include/asm-ia64/perfmon.h
include/asm-ia64/perfmon.h
+2
-1
include/asm-ia64/processor.h
include/asm-ia64/processor.h
+1
-1
include/asm-ia64/ptrace.h
include/asm-ia64/ptrace.h
+2
-0
include/asm-ia64/sal.h
include/asm-ia64/sal.h
+1
-1
include/asm-ia64/serial.h
include/asm-ia64/serial.h
+0
-1
include/asm-ia64/spinlock.h
include/asm-ia64/spinlock.h
+82
-48
include/asm-ia64/uaccess.h
include/asm-ia64/uaccess.h
+1
-1
include/asm-ia64/unwind.h
include/asm-ia64/unwind.h
+9
-2
include/linux/pci_ids.h
include/linux/pci_ids.h
+1
-0
No files found.
arch/ia64/Makefile
View file @
20f6d716
...
@@ -23,6 +23,7 @@ cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
...
@@ -23,6 +23,7 @@ cflags-y := -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
CFLAGS_KERNEL
:=
-mconstant-gp
CFLAGS_KERNEL
:=
-mconstant-gp
GCC_VERSION
=
$(
shell
$(CC)
-v
2>&1 | fgrep
'gcc version'
|
cut
-f3
-d
' '
|
cut
-f1
-d
'.'
)
GCC_VERSION
=
$(
shell
$(CC)
-v
2>&1 | fgrep
'gcc version'
|
cut
-f3
-d
' '
|
cut
-f1
-d
'.'
)
GCC_MINOR_VERSION
=
$(
shell
$(CC)
-v
2>&1 | fgrep
'gcc version'
|
cut
-f3
-d
' '
|
cut
-f2
-d
'.'
)
GAS_STATUS
=
$(
shell
arch
/ia64/scripts/check-gas
$(CC)
$(OBJDUMP)
)
GAS_STATUS
=
$(
shell
arch
/ia64/scripts/check-gas
$(CC)
$(OBJDUMP)
)
...
@@ -35,7 +36,14 @@ $(error Sorry, you need a newer version of the assember, one that is built from
...
@@ -35,7 +36,14 @@ $(error Sorry, you need a newer version of the assember, one that is built from
endif
endif
ifneq
($(GCC_VERSION),2)
ifneq
($(GCC_VERSION),2)
cflags-y
+=
-frename-registers
--param
max-inline-insns
=
5000
cflags-$(CONFIG_ITANIUM)
+=
-frename-registers
endif
ifeq
($(GCC_VERSION),3)
ifeq
($(GCC_MINOR_VERSION),4)
cflags-$(CONFIG_ITANIUM)
+=
-mtune
=
merced
cflags-$(CONFIG_MCKINLEY)
+=
-mtune
=
mckinley
endif
endif
endif
cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC)
+=
-mb-step
cflags-$(CONFIG_ITANIUM_BSTEP_SPECIFIC)
+=
-mb-step
...
@@ -48,14 +56,14 @@ libs-y += arch/ia64/lib/
...
@@ -48,14 +56,14 @@ libs-y += arch/ia64/lib/
core-y
+=
arch
/ia64/kernel/
arch
/ia64/mm/
core-y
+=
arch
/ia64/kernel/
arch
/ia64/mm/
core-$(CONFIG_IA32_SUPPORT)
+=
arch
/ia64/ia32/
core-$(CONFIG_IA32_SUPPORT)
+=
arch
/ia64/ia32/
core-$(CONFIG_IA64_DIG)
+=
arch
/ia64/dig/
core-$(CONFIG_IA64_DIG)
+=
arch
/ia64/dig/
core-$(CONFIG_IA64_GENERIC)
+=
arch
/ia64/dig/
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
\
core-$(CONFIG_IA64_GENERIC)
+=
arch
/ia64/dig/
arch
/ia64/hp/sim/
core-$(CONFIG_IA64_HP_ZX1)
+=
arch
/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1)
+=
arch
/ia64/dig/
core-$(CONFIG_IA64_SGI_SN)
+=
arch
/ia64/sn/
core-$(CONFIG_IA64_SGI_SN)
+=
arch
/ia64/sn/
drivers-$(CONFIG_PCI)
+=
arch
/ia64/pci/
drivers-$(CONFIG_PCI)
+=
arch
/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM)
+=
arch
/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_SIM)
+=
arch
/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
drivers-$(CONFIG_IA64_HP_ZX1)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC)
+=
arch
/ia64/hp/common/
arch
/ia64/hp/zx1/
arch
/ia64/hp/sim/
boot
:=
arch
/ia64/boot
boot
:=
arch
/ia64/boot
tools
:=
arch
/ia64/tools
tools
:=
arch
/ia64/tools
...
...
arch/ia64/hp/common/sba_iommu.c
View file @
20f6d716
/*
/*
** IA64 System Bus Adapter (SBA) I/O MMU manager
** IA64 System Bus Adapter (SBA) I/O MMU manager
**
**
** (c) Copyright 2002 Alex Williamson
** (c) Copyright 2002
-2003
Alex Williamson
** (c) Copyright 2002 Grant Grundler
** (c) Copyright 2002
-2003
Grant Grundler
** (c) Copyright 2002 Hewlett-Packard Company
** (c) Copyright 2002
-2003
Hewlett-Packard Company
**
**
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
...
@@ -30,17 +30,39 @@
...
@@ -30,17 +30,39 @@
#include <linux/string.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/efi.h>
#include <asm/delay.h>
/* ia64_get_itc() */
#include <asm/delay.h>
/* ia64_get_itc() */
#include <asm/io.h>
#include <asm/io.h>
#include <asm/page.h>
/* PAGE_OFFSET */
#include <asm/page.h>
/* PAGE_OFFSET */
#include <asm/dma.h>
#include <asm/system.h>
/* wmb() */
#include <asm/acpi-ext.h>
#define
DRIVER_NAME "SBA
"
#define
PFX "IOC:
"
/*
** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
** not defined, all DMA will be 32bit and go through the TLB.
*/
#define ALLOW_IOV_BYPASS
#define ALLOW_IOV_BYPASS
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues. If the device is
** particularly agressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address. This could severely impact
** error containment, and is therefore off by default. The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
*/
#undef FULL_VALID_PDIR
#define ENABLE_MARK_CLEAN
#define ENABLE_MARK_CLEAN
/*
/*
** The number of debug flags is a clue - this code is fragile.
** The number of debug flags is a clue - this code is fragile.
*/
*/
...
@@ -52,6 +74,10 @@
...
@@ -52,6 +74,10 @@
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_BYPASS
#undef DEBUG_BYPASS
#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
#endif
#define SBA_INLINE __inline__
#define SBA_INLINE __inline__
/* #define SBA_INLINE */
/* #define SBA_INLINE */
...
@@ -96,12 +122,8 @@
...
@@ -96,12 +122,8 @@
#define ASSERT(expr)
#define ASSERT(expr)
#endif
#endif
#define KB(x) ((x) * 1024)
#define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
/*
/*
** The number of pdir entries to "free" before issu
e
ing
** The number of pdir entries to "free" before issuing
** a read to PCOM register to flush out PCOM writes.
** a read to PCOM register to flush out PCOM writes.
** Interacts with allocation granularity (ie 4 or 8 entries
** Interacts with allocation granularity (ie 4 or 8 entries
** allocated and free'd/purged at a time might make this
** allocated and free'd/purged at a time might make this
...
@@ -109,30 +131,24 @@
...
@@ -109,30 +131,24 @@
*/
*/
#define DELAYED_RESOURCE_CNT 16
#define DELAYED_RESOURCE_CNT 16
#define DEFAULT_DMA_HINT_REG(d) 0
#define DEFAULT_DMA_HINT_REG 0
#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
#define SBA_FUNC_ID 0x0000
/* function id */
#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
#define SBA_FCLASS 0x0008
/* function class, bist, header, rev... */
#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
#define SBA_FUNC_SIZE 0x10000
/* SBA configuration function reg set */
#define ZX1_IOC_OFFSET 0x1000
/* ACPI reports SBA, we want IOC */
unsigned
int
__initdata
zx1_func_offsets
[]
=
{
0x1000
,
0x4000
,
0x8000
,
0x9000
,
0xa000
,
-
1
};
#define SBA_IOC_OFFSET 0x1000
#define MAX_IOC 1
/* we only have 1 for now*/
#define IOC_FUNC_ID 0x000
#define IOC_FCLASS 0x008
/* function class, bist, header, rev... */
#define IOC_IBASE 0x300
/* IO TLB */
#define IOC_IBASE 0x300
/* IO TLB */
#define IOC_IMASK 0x308
#define IOC_IMASK 0x308
#define IOC_PCOM 0x310
#define IOC_PCOM 0x310
#define IOC_TCNFG 0x318
#define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320
#define IOC_PDIR_BASE 0x320
#define IOC_IOVA_SPACE_BASE 0x40000000
/* IOVA ranges start at 1GB */
/* AGP GART driver looks for this */
#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
/*
/*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
...
@@ -152,7 +168,7 @@ unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
...
@@ -152,7 +168,7 @@ unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
#define IOVP_MASK PAGE_MASK
#define IOVP_MASK PAGE_MASK
struct
ioc
{
struct
ioc
{
unsigned
long
ioc_hpa
;
/* I/O MMU base address */
void
*
ioc_hpa
;
/* I/O MMU base address */
char
*
res_map
;
/* resource map, bit == pdir entry */
char
*
res_map
;
/* resource map, bit == pdir entry */
u64
*
pdir_base
;
/* physical base address */
u64
*
pdir_base
;
/* physical base address */
unsigned
long
ibase
;
/* pdir IOV Space base */
unsigned
long
ibase
;
/* pdir IOV Space base */
...
@@ -193,37 +209,32 @@ struct ioc {
...
@@ -193,37 +209,32 @@ struct ioc {
#endif
#endif
#endif
#endif
/* STUFF We don't need in performance path */
/* Stuff we don't need in performance path */
unsigned
int
pdir_size
;
/* in bytes, determined by IOV Space size */
struct
ioc
*
next
;
/* list of IOC's in system */
};
acpi_handle
handle
;
/* for multiple IOC's */
struct
sba_device
{
struct
sba_device
*
next
;
/* list of SBA's in system */
const
char
*
name
;
const
char
*
name
;
unsigned
long
sba_hpa
;
/* base address */
unsigned
int
func_id
;
spinlock_t
sba_lock
;
unsigned
int
rev
;
/* HW revision of chip */
unsigned
int
flags
;
/* state/functionality enabled */
u32
iov_size
;
unsigned
int
hw_rev
;
/* HW revision of chip */
unsigned
int
pdir_size
;
/* in bytes, determined by IOV Space size */
struct
pci_dev
*
sac_only_dev
;
unsigned
int
num_ioc
;
/* number of on-board IOC's */
struct
ioc
ioc
[
MAX_IOC
];
};
};
static
struct
ioc
*
ioc_list
;
static
struct
sba_device
*
sba_list
;
static
int
sba_count
;
static
int
reserve_sba_gart
=
1
;
static
int
reserve_sba_gart
=
1
;
static
struct
pci_dev
sac_only_dev
;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
#define sba_sg_len(sg) (sg->length)
#else
#define sba_sg_iova(sg) (sg->dma_address)
#define sba_sg_address(sg) ((sg)->address ? (sg)->address : \
#define sba_sg_iova_len(sg) (sg->dma_length)
page_address((sg)->page) + (sg)->offset)
#endif
/* REVISIT - fix me for multiple SBAs/IOCs */
#ifdef FULL_VALID_PDIR
#define GET_IOC(dev) (sba_list->ioc)
static
u64
prefetch_spill_page
;
#define SBA_SET_AGP(sba_dev) (sba_dev->flags |= 0x1)
#endif
#define SBA_GET_AGP(sba_dev) (sba_dev->flags & 0x1)
#define GET_IOC(dev) ((struct ioc *) PCI_CONTROLLER(dev)->iommu)
/*
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
...
@@ -232,10 +243,7 @@ static struct pci_dev sac_only_dev;
...
@@ -232,10 +243,7 @@ static struct pci_dev sac_only_dev;
** rather than the HW. I/O MMU allocation alogorithms can be
** rather than the HW. I/O MMU allocation alogorithms can be
** faster with smaller size is (to some degree).
** faster with smaller size is (to some degree).
*/
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*IOVP_SIZE)
#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
...
@@ -255,7 +263,7 @@ static struct pci_dev sac_only_dev;
...
@@ -255,7 +263,7 @@ static struct pci_dev sac_only_dev;
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU
* @hpa: base address of the IOMMU
*
*
* Print the size/location of the IO MMU P
dir
.
* Print the size/location of the IO MMU P
DIR
.
*/
*/
static
void
static
void
sba_dump_tlb
(
char
*
hpa
)
sba_dump_tlb
(
char
*
hpa
)
...
@@ -273,19 +281,19 @@ sba_dump_tlb(char *hpa)
...
@@ -273,19 +281,19 @@ sba_dump_tlb(char *hpa)
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
/**
/**
* sba_dump_pdir_entry - debugging only - print one IOMMU P
dir
entry
* sba_dump_pdir_entry - debugging only - print one IOMMU P
DIR
entry
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line.
* @msg: text to print ont the output line.
* @pide: pdir index.
* @pide: pdir index.
*
*
* Print one entry of the IO MMU P
dir
in human readable form.
* Print one entry of the IO MMU P
DIR
in human readable form.
*/
*/
static
void
static
void
sba_dump_pdir_entry
(
struct
ioc
*
ioc
,
char
*
msg
,
uint
pide
)
sba_dump_pdir_entry
(
struct
ioc
*
ioc
,
char
*
msg
,
uint
pide
)
{
{
/* start printing from lowest pde in rval */
/* start printing from lowest pde in rval */
u64
*
ptr
=
&
(
ioc
->
pdir_base
[
pide
&
~
(
BITS_PER_LONG
-
1
)])
;
u64
*
ptr
=
&
ioc
->
pdir_base
[
pide
&
~
(
BITS_PER_LONG
-
1
)]
;
unsigned
long
*
rptr
=
(
unsigned
long
*
)
&
(
ioc
->
res_map
[(
pide
>>
3
)
&
~
(
sizeof
(
unsigned
long
)
-
1
)])
;
unsigned
long
*
rptr
=
(
unsigned
long
*
)
&
ioc
->
res_map
[(
pide
>>
3
)
&
-
sizeof
(
unsigned
long
)]
;
uint
rcnt
;
uint
rcnt
;
printk
(
KERN_DEBUG
"SBA: %s rp %p bit %d rval 0x%lx
\n
"
,
printk
(
KERN_DEBUG
"SBA: %s rp %p bit %d rval 0x%lx
\n
"
,
...
@@ -296,7 +304,7 @@ sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
...
@@ -296,7 +304,7 @@ sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
printk
(
KERN_DEBUG
"%s %2d %p %016Lx
\n
"
,
printk
(
KERN_DEBUG
"%s %2d %p %016Lx
\n
"
,
(
rcnt
==
(
pide
&
(
BITS_PER_LONG
-
1
)))
(
rcnt
==
(
pide
&
(
BITS_PER_LONG
-
1
)))
?
" -->"
:
" "
,
?
" -->"
:
" "
,
rcnt
,
ptr
,
*
ptr
);
rcnt
,
ptr
,
(
unsigned
long
long
)
*
ptr
);
rcnt
++
;
rcnt
++
;
ptr
++
;
ptr
++
;
}
}
...
@@ -359,17 +367,18 @@ sba_check_pdir(struct ioc *ioc, char *msg)
...
@@ -359,17 +367,18 @@ sba_check_pdir(struct ioc *ioc, char *msg)
* print the SG list so we can verify it's correct by hand.
* print the SG list so we can verify it's correct by hand.
*/
*/
static
void
static
void
sba_dump_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_dump_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
{
while
(
nents
--
>
0
)
{
while
(
nents
--
>
0
)
{
printk
(
KERN_DEBUG
" %d : DMA %08lx/%05x CPU %p
\n
"
,
nents
,
printk
(
KERN_DEBUG
" %d : DMA %08lx/%05x CPU %p
\n
"
,
nents
,
(
unsigned
long
)
sba_sg_iova
(
startsg
),
sba_sg_iova_len
(
startsg
)
,
startsg
->
dma_address
,
startsg
->
dma_length
,
sba_sg_address
(
startsg
));
sba_sg_address
(
startsg
));
startsg
++
;
startsg
++
;
}
}
}
}
static
void
static
void
sba_check_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
sba_check_sg
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
{
struct
scatterlist
*
the_sg
=
startsg
;
struct
scatterlist
*
the_sg
=
startsg
;
int
the_nents
=
nents
;
int
the_nents
=
nents
;
...
@@ -398,9 +407,11 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
...
@@ -398,9 +407,11 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
#define PAGES_PER_RANGE 1
/* could increase this to 4 or 8 if needed */
#define PAGES_PER_RANGE 1
/* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */
/* Convert from IOVP to IOVA and vice versa. */
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir)))
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | \
((hint_reg)<<(ioc->hint_shift_pdir)))
#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
/* FIXME : review these macros to verify correctness and usage */
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define RESMAP_MASK(n) ~(~0UL << (n))
#define RESMAP_MASK(n) ~(~0UL << (n))
...
@@ -408,7 +419,7 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
...
@@ -408,7 +419,7 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
/**
/**
* sba_search_bitmap - find free space in IO P
dir
resource bitmap
* sba_search_bitmap - find free space in IO P
DIR
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need.
* @bits_wanted: number of entries we need.
*
*
...
@@ -445,7 +456,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
...
@@ -445,7 +456,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
** We need the alignment to invalidate I/O TLB using
** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path.
** SBA HW features in the unmap path.
*/
*/
unsigned
long
o
=
1
UL
<<
get_order
(
bits_wanted
<<
IOVP
_SHIFT
);
unsigned
long
o
=
1
<<
get_order
(
bits_wanted
<<
PAGE
_SHIFT
);
uint
bitshiftcnt
=
ROUNDUP
(
ioc
->
res_bitshift
,
o
);
uint
bitshiftcnt
=
ROUNDUP
(
ioc
->
res_bitshift
,
o
);
unsigned
long
mask
;
unsigned
long
mask
;
...
@@ -491,7 +502,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
...
@@ -491,7 +502,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
/**
/**
* sba_alloc_range - find free bits and mark them in IO P
dir
resource bitmap
* sba_alloc_range - find free bits and mark them in IO P
DIR
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for
* @size: number of bytes to create a mapping for
*
*
...
@@ -520,7 +531,8 @@ sba_alloc_range(struct ioc *ioc, size_t size)
...
@@ -520,7 +531,8 @@ sba_alloc_range(struct ioc *ioc, size_t size)
if
(
pide
>=
(
ioc
->
res_size
<<
3
))
{
if
(
pide
>=
(
ioc
->
res_size
<<
3
))
{
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
);
pide
=
sba_search_bitmap
(
ioc
,
pages_needed
);
if
(
pide
>=
(
ioc
->
res_size
<<
3
))
if
(
pide
>=
(
ioc
->
res_size
<<
3
))
panic
(
__FILE__
": I/O MMU @ %lx is out of mapping resources
\n
"
,
ioc
->
ioc_hpa
);
panic
(
__FILE__
": I/O MMU @ %p is out of mapping resources
\n
"
,
ioc
->
ioc_hpa
);
}
}
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
...
@@ -553,7 +565,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
...
@@ -553,7 +565,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
/**
/**
* sba_free_range - unmark bits in IO P
dir
resource bitmap
* sba_free_range - unmark bits in IO P
DIR
resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated.
* @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for
* @size: number of bytes to create a mapping for
...
@@ -600,14 +612,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
...
@@ -600,14 +612,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
/**
/**
* sba_io_pdir_entry - fill in one IO P
dir
entry
* sba_io_pdir_entry - fill in one IO P
DIR
entry
* @pdir_ptr: pointer to IO P
dir
entry
* @pdir_ptr: pointer to IO P
DIR
entry
* @
phys_page: phys CPU address of page
to map
* @
vba: Virtual CPU address of buffer
to map
*
*
* SBA Mapping Routine
* SBA Mapping Routine
*
*
* Given a
physical address (phys_page
, arg1) sba_io_pdir_entry()
* Given a
virtual address (vba
, arg1) sba_io_pdir_entry()
* loads the I/O P
dir
entry pointed to by pdir_ptr (arg0).
* loads the I/O P
DIR
entry pointed to by pdir_ptr (arg0).
* Each IO Pdir entry consists of 8 bytes as shown below
* Each IO Pdir entry consists of 8 bytes as shown below
* (LSB == bit 0):
* (LSB == bit 0):
*
*
...
@@ -619,12 +631,21 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
...
@@ -619,12 +631,21 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
* V == Valid Bit
* V == Valid Bit
* U == Unused
* U == Unused
* PPN == Physical Page Number
* PPN == Physical Page Number
*
* The physical address fields are filled with the results of virt_to_phys()
* on the vba.
*/
*/
#define SBA_VALID_MASK 0x80000000000000FFULL
#if 1
#define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK)
#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
#define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK)
| 0x8000000000000000ULL)
#else
void
SBA_INLINE
sba_io_pdir_entry
(
u64
*
pdir_ptr
,
unsigned
long
vba
)
{
*
pdir_ptr
=
((
vba
&
~
0xE000000000000FFFULL
)
|
0x80000000000000FFULL
);
}
#endif
#ifdef ENABLE_MARK_CLEAN
#ifdef ENABLE_MARK_CLEAN
/**
/**
...
@@ -640,7 +661,7 @@ mark_clean (void *addr, size_t size)
...
@@ -640,7 +661,7 @@ mark_clean (void *addr, size_t size)
pg_addr
=
PAGE_ALIGN
((
unsigned
long
)
addr
);
pg_addr
=
PAGE_ALIGN
((
unsigned
long
)
addr
);
end
=
(
unsigned
long
)
addr
+
size
;
end
=
(
unsigned
long
)
addr
+
size
;
while
(
pg_addr
+
PAGE_SIZE
<=
end
)
{
while
(
pg_addr
+
PAGE_SIZE
<=
end
)
{
struct
page
*
page
=
virt_to_page
(
pg_addr
);
struct
page
*
page
=
virt_to_page
(
(
void
*
)
pg_addr
);
set_bit
(
PG_arch_1
,
&
page
->
flags
);
set_bit
(
PG_arch_1
,
&
page
->
flags
);
pg_addr
+=
PAGE_SIZE
;
pg_addr
+=
PAGE_SIZE
;
}
}
...
@@ -648,12 +669,12 @@ mark_clean (void *addr, size_t size)
...
@@ -648,12 +669,12 @@ mark_clean (void *addr, size_t size)
#endif
#endif
/**
/**
* sba_mark_invalid - invalidate one or more IO P
dir
entries
* sba_mark_invalid - invalidate one or more IO P
DIR
entries
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier
* @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers.
* @byte_cnt: number of bytes this mapping covers.
*
*
* Marking the IO P
dir
entry(ies) as Invalid and invalidate
* Marking the IO P
DIR
entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* corresponding IO TLB entry. The PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries.
* is to purge stale entries in the IO TLB when unmapping entries.
*
*
...
@@ -687,15 +708,24 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
...
@@ -687,15 +708,24 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iovp
|=
IOVP_SHIFT
;
/* set "size" field for PCOM */
iovp
|=
IOVP_SHIFT
;
/* set "size" field for PCOM */
#ifndef FULL_VALID_PDIR
/*
/*
** clear I/O P
dir
entry "valid" bit
** clear I/O P
DIR
entry "valid" bit
** Do NOT clear the rest - save it for debugging.
** Do NOT clear the rest - save it for debugging.
** We should only clear bits that have previously
** We should only clear bits that have previously
** been enabled.
** been enabled.
*/
*/
ioc
->
pdir_base
[
off
]
&=
~
SBA_VALID_MASK
;
ioc
->
pdir_base
[
off
]
&=
~
(
0x80000000000000FFULL
);
#else
/*
** If we want to maintain the PDIR as valid, put in
** the spill page so devices prefetching won't
** cause a hard fail.
*/
ioc
->
pdir_base
[
off
]
=
(
0x80000000000000FFULL
|
prefetch_spill_page
);
#endif
}
else
{
}
else
{
u32
t
=
get_order
(
byte_cnt
)
+
IOVP
_SHIFT
;
u32
t
=
get_order
(
byte_cnt
)
+
PAGE
_SHIFT
;
iovp
|=
t
;
iovp
|=
t
;
ASSERT
(
t
<=
31
);
/* 2GB! Max value of "size" field */
ASSERT
(
t
<=
31
);
/* 2GB! Max value of "size" field */
...
@@ -703,14 +733,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
...
@@ -703,14 +733,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
do
{
do
{
/* verify this pdir entry is enabled */
/* verify this pdir entry is enabled */
ASSERT
(
ioc
->
pdir_base
[
off
]
>>
63
);
ASSERT
(
ioc
->
pdir_base
[
off
]
>>
63
);
#ifndef FULL_VALID_PDIR
/* clear I/O Pdir entry "valid" bit first */
/* clear I/O Pdir entry "valid" bit first */
ioc
->
pdir_base
[
off
]
&=
~
SBA_VALID_MASK
;
ioc
->
pdir_base
[
off
]
&=
~
(
0x80000000000000FFULL
);
#else
ioc
->
pdir_base
[
off
]
=
(
0x80000000000000FFULL
|
prefetch_spill_page
);
#endif
off
++
;
off
++
;
byte_cnt
-=
IOVP_SIZE
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
while
(
byte_cnt
>
0
);
}
}
WRITE_REG
(
iovp
,
ioc
->
ioc_hpa
+
IOC_PCOM
);
WRITE_REG
(
iovp
|
ioc
->
ibase
,
ioc
->
ioc_hpa
+
IOC_PCOM
);
}
}
/**
/**
...
@@ -732,12 +766,9 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -732,12 +766,9 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
u64
*
pdir_start
;
u64
*
pdir_start
;
int
pide
;
int
pide
;
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
unsigned
long
p
hys
_addr
=
virt_to_phys
(
addr
);
unsigned
long
p
ci
_addr
=
virt_to_phys
(
addr
);
#endif
#endif
if
(
!
sba_list
)
panic
(
"sba_map_single: no SBA found!
\n
"
);
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
ASSERT
(
ioc
);
...
@@ -745,7 +776,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -745,7 +776,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
/*
/*
** Check if the PCI device can DMA to ptr... if so, just return ptr
** Check if the PCI device can DMA to ptr... if so, just return ptr
*/
*/
if
((
p
hys
_addr
&
~
dev
->
dma_mask
)
==
0
)
{
if
((
p
ci
_addr
&
~
dev
->
dma_mask
)
==
0
)
{
/*
/*
** Device is bit capable of DMA'ing to the buffer...
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
** just return the PCI address of ptr
...
@@ -756,8 +787,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -756,8 +787,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
#endif
DBG_BYPASS
(
"sba_map_single() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
DBG_BYPASS
(
"sba_map_single() bypass mask/addr: 0x%lx/0x%lx
\n
"
,
dev
->
dma_mask
,
p
hys
_addr
);
dev
->
dma_mask
,
p
ci
_addr
);
return
p
hys
_addr
;
return
p
ci
_addr
;
}
}
#endif
#endif
...
@@ -790,8 +821,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -790,8 +821,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
while
(
size
>
0
)
{
while
(
size
>
0
)
{
ASSERT
(((
u8
*
)
pdir_start
)[
7
]
==
0
);
/* verify availability */
ASSERT
(((
u8
*
)
pdir_start
)[
7
]
==
0
);
/* verify availability */
sba_io_pdir_entry
(
pdir_start
,
(
unsigned
long
)
addr
);
sba_io_pdir_entry
(
pdir_start
,
virt_to_phys
(
addr
));
DBG_RUN
(
" pdir 0x%p %lx
\n
"
,
pdir_start
,
*
pdir_start
);
DBG_RUN
(
" pdir 0x%p %lx
\n
"
,
pdir_start
,
*
pdir_start
);
...
@@ -799,12 +829,15 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
...
@@ -799,12 +829,15 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
size
-=
IOVP_SIZE
;
size
-=
IOVP_SIZE
;
pdir_start
++
;
pdir_start
++
;
}
}
/* force pdir update */
wmb
();
/* form complete address */
/* form complete address */
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
sba_check_pdir
(
ioc
,
"Check after sba_map_single()"
);
sba_check_pdir
(
ioc
,
"Check after sba_map_single()"
);
#endif
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
(
direction
)
);
return
SBA_IOVA
(
ioc
,
iovp
,
offset
,
DEFAULT_DMA_HINT_REG
);
}
}
/**
/**
...
@@ -826,9 +859,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
...
@@ -826,9 +859,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
unsigned
long
flags
;
unsigned
long
flags
;
dma_addr_t
offset
;
dma_addr_t
offset
;
if
(
!
sba_list
)
panic
(
"sba_map_single: no SBA found!
\n
"
);
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
ASSERT
(
ioc
);
...
@@ -861,29 +891,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
...
@@ -861,29 +891,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
size
+=
offset
;
size
+=
offset
;
size
=
ROUNDUP
(
size
,
IOVP_SIZE
);
size
=
ROUNDUP
(
size
,
IOVP_SIZE
);
#ifdef ENABLE_MARK_CLEAN
/*
** Don't need to hold the spinlock while telling VM pages are "clean".
** The pages are "busy" in the resource map until we mark them free.
** But tell VM pages are clean *before* releasing the resource
** in order to avoid race conditions.
*/
if
(
direction
==
PCI_DMA_FROMDEVICE
)
{
u32
iovp
=
(
u32
)
SBA_IOVP
(
ioc
,
iova
);
unsigned
int
pide
=
PDIR_INDEX
(
iovp
);
u64
*
pdirp
=
&
(
ioc
->
pdir_base
[
pide
]);
size_t
byte_cnt
=
size
;
void
*
addr
;
do
{
addr
=
phys_to_virt
(
sba_io_page
(
pdirp
));
mark_clean
(
addr
,
min
(
byte_cnt
,
IOVP_SIZE
));
pdirp
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
#endif
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
ioc
->
usingle_calls
++
;
ioc
->
usingle_calls
++
;
...
@@ -909,7 +916,40 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
...
@@ -909,7 +916,40 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
sba_free_range
(
ioc
,
iova
,
size
);
sba_free_range
(
ioc
,
iova
,
size
);
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
READ_REG
(
ioc
->
ioc_hpa
+
IOC_PCOM
);
/* flush purges */
#endif
/* DELAYED_RESOURCE_CNT == 0 */
#endif
/* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN
if
(
direction
==
PCI_DMA_FROMDEVICE
)
{
u32
iovp
=
(
u32
)
SBA_IOVP
(
ioc
,
iova
);
int
off
=
PDIR_INDEX
(
iovp
);
void
*
addr
;
if
(
size
<=
IOVP_SIZE
)
{
addr
=
phys_to_virt
(
ioc
->
pdir_base
[
off
]
&
~
0xE000000000000FFFULL
);
mark_clean
(
addr
,
size
);
}
else
{
size_t
byte_cnt
=
size
;
do
{
addr
=
phys_to_virt
(
ioc
->
pdir_base
[
off
]
&
~
0xE000000000000FFFULL
);
mark_clean
(
addr
,
min
(
byte_cnt
,
IOVP_SIZE
));
off
++
;
byte_cnt
-=
IOVP_SIZE
;
}
while
(
byte_cnt
>
0
);
}
}
#endif
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
}
}
...
@@ -924,6 +964,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
...
@@ -924,6 +964,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
void
*
void
*
sba_alloc_consistent
(
struct
pci_dev
*
hwdev
,
size_t
size
,
dma_addr_t
*
dma_handle
)
sba_alloc_consistent
(
struct
pci_dev
*
hwdev
,
size_t
size
,
dma_addr_t
*
dma_handle
)
{
{
struct
ioc
*
ioc
;
void
*
ret
;
void
*
ret
;
if
(
!
hwdev
)
{
if
(
!
hwdev
)
{
...
@@ -941,7 +982,8 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
...
@@ -941,7 +982,8 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
* than dma_mask from the device, this needs to be
* than dma_mask from the device, this needs to be
* updated.
* updated.
*/
*/
*
dma_handle
=
sba_map_single
(
&
sac_only_dev
,
ret
,
size
,
0
);
ioc
=
GET_IOC
(
hwdev
);
*
dma_handle
=
sba_map_single
(
ioc
->
sac_only_dev
,
ret
,
size
,
0
);
}
}
return
ret
;
return
ret
;
...
@@ -965,109 +1007,238 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
...
@@ -965,109 +1007,238 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
}
}
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x1UL
#ifdef DEBUG_LARGE_SG_ENTRIES
#ifdef DEBUG_LARGE_SG_ENTRIES
int
dump_run_sg
=
0
;
int
dump_run_sg
=
0
;
#endif
#endif
#define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page)
#define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG))
/**
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static
SBA_INLINE
int
sba_fill_pdir
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
int
nents
)
{
struct
scatterlist
*
dma_sg
=
startsg
;
/* pointer to current DMA */
int
n_mappings
=
0
;
u64
*
pdirp
=
0
;
unsigned
long
dma_offset
=
0
;
dma_sg
--
;
while
(
nents
--
>
0
)
{
int
cnt
=
startsg
->
dma_length
;
startsg
->
dma_length
=
0
;
#ifdef DEBUG_LARGE_SG_ENTRIES
if
(
dump_run_sg
)
printk
(
" %2d : %08lx/%05x %p
\n
"
,
nents
,
startsg
->
dma_address
,
cnt
,
sba_sg_address
(
startsg
));
#else
DBG_RUN_SG
(
" %d : %08lx/%05x %p
\n
"
,
nents
,
startsg
->
dma_address
,
cnt
,
sba_sg_address
(
startsg
));
#endif
/*
** Look for the start of a new DMA stream
*/
if
(
startsg
->
dma_address
&
PIDE_FLAG
)
{
u32
pide
=
startsg
->
dma_address
&
~
PIDE_FLAG
;
dma_offset
=
(
unsigned
long
)
pide
&
~
IOVP_MASK
;
startsg
->
dma_address
=
0
;
dma_sg
++
;
dma_sg
->
dma_address
=
pide
|
ioc
->
ibase
;
pdirp
=
&
(
ioc
->
pdir_base
[
pide
>>
IOVP_SHIFT
]);
n_mappings
++
;
}
/*
** Look for a VCONTIG chunk
*/
if
(
cnt
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
sba_sg_address
(
startsg
);
ASSERT
(
pdirp
);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
dma_sg
->
dma_length
+=
cnt
;
cnt
+=
dma_offset
;
dma_offset
=
0
;
/* only want offset on first chunk */
cnt
=
ROUNDUP
(
cnt
,
IOVP_SIZE
);
#ifdef CONFIG_PROC_FS
ioc
->
msg_pages
+=
cnt
>>
IOVP_SHIFT
;
#endif
do
{
sba_io_pdir_entry
(
pdirp
,
vaddr
);
vaddr
+=
IOVP_SIZE
;
cnt
-=
IOVP_SIZE
;
pdirp
++
;
}
while
(
cnt
>
0
);
}
startsg
++
;
}
/* force pdir update */
wmb
();
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg
=
0
;
#endif
return
(
n_mappings
);
}
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on a page boundry.
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
/**
/**
* sba_coalesce_chunks - preprocess the SG list
* sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg:
input=SG list output=DMA addr/len pairs filled in
* @startsg:
list of IOVA/size pairs
* @nents: number of entries in startsg list
* @nents: number of entries in startsg list
* @direction: R/W or both.
*
*
* Walk the SG list and determine where the breaks are in the DMA stream.
* First pass is to walk the SG list and determine where the breaks are
* Allocate IO Pdir resources and fill them in separate loop.
* in the DMA stream. Allocates PDIR entries but does not fill them.
* Returns the number of DMA streams used for output IOVA list.
* Returns the number of DMA chunks.
* Note each DMA stream can consume multiple IO Pdir entries.
*
*
* Code is written assuming some coalescing is possible.
* Doing the fill seperate from the coalescing/allocation keeps the
* code simpler. Future enhancement could make one pass through
* the sglist do both.
*/
*/
static
SBA_INLINE
int
static
SBA_INLINE
int
sba_coalesce_chunks
(
struct
ioc
*
ioc
,
struct
scatterlist
*
startsg
,
sba_coalesce_chunks
(
struct
ioc
*
ioc
,
int
nents
,
int
direction
)
struct
scatterlist
*
startsg
,
int
nents
)
{
{
struct
scatterlist
*
dma_sg
=
startsg
;
/* return array */
struct
scatterlist
*
vcontig_sg
;
/* VCONTIG chunk head */
unsigned
long
vcontig_len
;
/* len of VCONTIG chunk */
unsigned
long
vcontig_end
;
struct
scatterlist
*
dma_sg
;
/* next DMA stream head */
unsigned
long
dma_offset
,
dma_len
;
/* start/len of DMA stream */
int
n_mappings
=
0
;
int
n_mappings
=
0
;
ASSERT
(
nents
>
1
);
while
(
nents
>
0
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
sba_sg_address
(
startsg
);
do
{
unsigned
int
dma_cnt
=
1
;
/* number of pages in DMA stream */
unsigned
int
pide
;
/* index into IO Pdir array */
u64
*
pdirp
;
/* pointer into IO Pdir array */
unsigned
long
dma_offset
,
dma_len
;
/* cumulative DMA stream */
/*
/*
** Prepare for first/next DMA stream
** Prepare for first/next DMA stream
*/
*/
dma_len
=
sba_sg_len
(
startsg
);
dma_sg
=
vcontig_sg
=
startsg
;
dma_offset
=
(
unsigned
long
)
sba_sg_address
(
startsg
);
dma_len
=
vcontig_len
=
vcontig_end
=
startsg
->
length
;
startsg
++
;
vcontig_end
+=
vaddr
;
nents
--
;
dma_offset
=
vaddr
&
~
IOVP_MASK
;
/* PARANOID: clear entries */
startsg
->
dma_address
=
startsg
->
dma_length
=
0
;
/*
/*
** We want to know how many entries can be coalesced
** This loop terminates one iteration "early" since
** before trying to allocate IO Pdir space.
** it's always looking one "ahead".
** IOVAs can then be allocated "naturally" aligned
** to take advantage of the block IO TLB flush.
*/
*/
while
(
nents
)
{
while
(
--
nents
>
0
)
{
unsigned
long
end_offset
=
dma_offset
+
dma_len
;
unsigned
long
vaddr
;
/* tmp */
/* prev entry must end on a page boundary */
startsg
++
;
if
(
end_offset
&
IOVP_MASK
)
break
;
/* next entry start on a page boundary? */
/* PARANOID */
if
(
startsg
->
offset
)
startsg
->
dma_address
=
startsg
->
dma_length
=
0
;
break
;
/* catch brokenness in SCSI layer */
ASSERT
(
startsg
->
length
<=
DMA_CHUNK_SIZE
);
/*
/*
** make sure current dma stream won't exceed
** First make sure current dma stream won't
** DMA_CHUNK_SIZE if coalescing entries.
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
*/
if
(((
end_offset
+
startsg
->
length
+
~
IOVP_MASK
)
if
(((
dma_len
+
dma_offset
+
startsg
->
length
+
~
IOVP_MASK
)
&
IOVP_MASK
)
&
IOVP_MASK
)
>
DMA_CHUNK_SIZE
)
>
DMA_CHUNK_SIZE
)
break
;
break
;
dma_len
+=
sba_sg_len
(
startsg
);
/*
startsg
++
;
** Then look for virtually contiguous blocks.
nents
--
;
**
dma_cnt
++
;
** append the next transaction?
*/
vaddr
=
(
unsigned
long
)
sba_sg_address
(
startsg
);
if
(
vcontig_end
==
vaddr
)
{
vcontig_len
+=
startsg
->
length
;
vcontig_end
+=
startsg
->
length
;
dma_len
+=
startsg
->
length
;
continue
;
}
}
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg
=
(
vcontig_len
>
IOVP_SIZE
);
#endif
/* allocate IO Pdir resource.
/*
** returns index into (u64) IO Pdir array.
** Not virtually contigous.
** IOVA is formed from this.
** Terminate prev chunk.
** Start a new chunk.
**
** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
*/
pide
=
sba_alloc_range
(
ioc
,
dma_cnt
<<
IOVP_SHIFT
);
vcontig_sg
->
dma_length
=
vcontig_len
;
pdirp
=
&
(
ioc
->
pdir_base
[
pide
]);
/* fill_pdir: write stream into IO Pdir */
vcontig_sg
=
startsg
;
while
(
dma_cnt
--
)
{
vcontig_len
=
startsg
->
length
;
sba_io_pdir_entry
(
pdirp
,
SG_ENT_PHYS_PAGE
(
startsg
));
startsg
++
;
pdirp
++
;
}
/* "output" IOVA */
/*
sba_sg_iova
(
dma_sg
)
=
SBA_IOVA
(
ioc
,
** 3) do the entries end/start on page boundaries?
((
dma_addr_t
)
pide
<<
IOVP_SHIFT
),
** Don't update vcontig_end until we've checked.
dma_offset
,
*/
DEFAULT_DMA_HINT_REG
(
direction
));
if
(
DMA_CONTIG
(
vcontig_end
,
vaddr
))
sba_sg_iova_len
(
dma_sg
)
=
dma_len
;
{
vcontig_end
=
vcontig_len
+
vaddr
;
dma_len
+=
vcontig_len
;
continue
;
}
else
{
break
;
}
}
dma_sg
++
;
/*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
vcontig_sg
->
dma_length
=
vcontig_len
;
dma_len
=
(
dma_len
+
dma_offset
+
~
IOVP_MASK
)
&
IOVP_MASK
;
ASSERT
(
dma_len
<=
DMA_CHUNK_SIZE
);
dma_sg
->
dma_address
=
(
dma_addr_t
)
(
PIDE_FLAG
|
(
sba_alloc_range
(
ioc
,
dma_len
)
<<
IOVP_SHIFT
)
|
dma_offset
);
n_mappings
++
;
n_mappings
++
;
}
while
(
nents
);
}
return
n_mappings
;
return
n_mappings
;
}
}
...
@@ -1075,60 +1246,52 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
...
@@ -1075,60 +1246,52 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
/**
/**
* sba_map_sg - map Scatter/Gather list
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI
device
owned by the driver that's asking.
* @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs
* @sglist: array of buffer/length pairs
* @nents: number of entries in list
* @nents: number of entries in list
* @direction: R/W or both.
* @direction: R/W or both.
*
*
* See Documentation/DMA-mapping.txt
* See Documentation/DMA-mapping.txt
*/
*/
int
sba_map_sg
(
struct
pci_dev
*
dev
,
struct
scatterlist
*
sglist
,
int
nents
,
int
sba_map_sg
(
struct
pci_dev
*
dev
,
struct
scatterlist
*
sglist
,
int
nents
,
int
direction
)
int
direction
)
{
{
struct
ioc
*
ioc
;
struct
ioc
*
ioc
;
int
filled
=
0
;
int
coalesced
,
filled
=
0
;
unsigned
long
flags
;
unsigned
long
flags
;
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
struct
scatterlist
*
sg
;
struct
scatterlist
*
sg
;
#endif
#endif
DBG_RUN_SG
(
"%s() START %d entries, 0x%p,0x%x
\n
"
,
__FUNCTION__
,
nents
,
DBG_RUN_SG
(
"%s() START %d entries
\n
"
,
__FUNCTION__
,
nents
);
sba_sg_address
(
sglist
),
sba_sg_len
(
sglist
));
if
(
!
sba_list
)
panic
(
"sba_map_single: no SBA found!
\n
"
);
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
ASSERT
(
ioc
);
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
if
(
dev
->
dma_mask
>=
ioc
->
dma_mask
)
{
if
(
dev
->
dma_mask
>=
ioc
->
dma_mask
)
{
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
)
{
for
(
sg
=
sglist
;
filled
<
nents
;
filled
++
,
sg
++
){
s
ba_sg_iova
(
sg
)
=
virt_to_phys
(
sba_sg_address
(
sg
))
;
s
g
->
dma_length
=
sg
->
length
;
s
ba_sg_iova_len
(
sg
)
=
sba_sg_len
(
sg
);
s
g
->
dma_address
=
virt_to_phys
(
sba_sg_address
(
sg
)
);
}
}
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
spin_lock_irqsave
(
&
ioc
->
res_lock
,
flags
);
ioc
->
msg_bypass
++
;
ioc
->
msg_bypass
++
;
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
#endif
DBG_RUN_SG
(
"%s() DONE %d mappings bypassed
\n
"
,
__FUNCTION__
,
filled
);
return
filled
;
return
filled
;
}
}
#endif
#endif
/* Fast path single entry scatterlists. */
/* Fast path single entry scatterlists. */
if
(
nents
==
1
)
{
if
(
nents
==
1
)
{
s
ba_sg_iova
(
sglist
)
=
sba_map_single
(
dev
,
s
glist
->
dma_length
=
sglist
->
length
;
(
void
*
)
sba_sg_iova
(
sglist
)
,
sglist
->
dma_address
=
sba_map_single
(
dev
,
sba_sg_len
(
sglist
),
direction
);
sba_sg_address
(
sglist
),
sba_sg_iova_len
(
sglist
)
=
sba_sg_len
(
sglist
);
sglist
->
length
,
direction
);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
/*
/*
** Should probably do some stats counting, but trying to
** Should probably do some stats counting, but trying to
** be precise quickly starts wasting CPU time.
** be precise quickly starts wasting CPU time.
*/
*/
#endif
#endif
DBG_RUN_SG
(
"%s() DONE 1 mapping
\n
"
,
__FUNCTION__
);
return
1
;
return
1
;
}
}
...
@@ -1147,9 +1310,24 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1147,9 +1310,24 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
#endif
#endif
/*
/*
** coalesce and program the I/O Pdir
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
*/
filled
=
sba_coalesce_chunks
(
ioc
,
sglist
,
nents
,
direction
);
coalesced
=
sba_coalesce_chunks
(
ioc
,
sglist
,
nents
);
/*
** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/
filled
=
sba_fill_pdir
(
ioc
,
sglist
,
nents
);
#ifdef ASSERT_PDIR_SANITY
#ifdef ASSERT_PDIR_SANITY
if
(
sba_check_pdir
(
ioc
,
"Check after sba_map_sg()"
))
if
(
sba_check_pdir
(
ioc
,
"Check after sba_map_sg()"
))
...
@@ -1161,6 +1339,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1161,6 +1339,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
ASSERT
(
coalesced
==
filled
);
DBG_RUN_SG
(
"%s() DONE %d mappings
\n
"
,
__FUNCTION__
,
filled
);
DBG_RUN_SG
(
"%s() DONE %d mappings
\n
"
,
__FUNCTION__
,
filled
);
return
filled
;
return
filled
;
...
@@ -1184,11 +1363,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1184,11 +1363,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
unsigned
long
flags
;
unsigned
long
flags
;
#endif
#endif
DBG_RUN_SG
(
"%s() START %d entries, 0x%p,0x%x
\n
"
,
DBG_RUN_SG
(
"%s() START %d entries, %p,%x
\n
"
,
__FUNCTION__
,
nents
,
sba_sg_address
(
sglist
),
sba_sg_len
(
sglist
));
__FUNCTION__
,
nents
,
sba_sg_address
(
sglist
),
sglist
->
length
);
if
(
!
sba_list
)
panic
(
"sba_map_single: no SBA found!
\n
"
);
ioc
=
GET_IOC
(
dev
);
ioc
=
GET_IOC
(
dev
);
ASSERT
(
ioc
);
ASSERT
(
ioc
);
...
@@ -1203,10 +1379,10 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1203,10 +1379,10 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
spin_unlock_irqrestore
(
&
ioc
->
res_lock
,
flags
);
#endif
#endif
while
(
sba_sg_len
(
sglist
)
&&
nents
--
)
{
while
(
nents
&&
sglist
->
dma_length
)
{
sba_unmap_single
(
dev
,
(
dma_addr_t
)
sba_sg_iova
(
sglist
)
,
sba_unmap_single
(
dev
,
sglist
->
dma_address
,
s
ba_sg_iova_len
(
sglist
)
,
direction
);
s
glist
->
dma_length
,
direction
);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
/*
/*
** This leaves inconsistent data in the stats, but we can't
** This leaves inconsistent data in the stats, but we can't
...
@@ -1214,9 +1390,11 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1214,9 +1390,11 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
** were coalesced to a single entry. The stats are fun,
** were coalesced to a single entry. The stats are fun,
** but speed is more important.
** but speed is more important.
*/
*/
ioc
->
usg_pages
+=
(((
u64
)
sba_sg_iova
(
sglist
)
&
~
IOVP_MASK
)
+
sba_sg_len
(
sglist
)
+
IOVP_SIZE
-
1
)
>>
IOVP_SHIFT
;
ioc
->
usg_pages
+=
((
sglist
->
dma_address
&
~
IOVP_MASK
)
+
sglist
->
dma_length
+
IOVP_SIZE
-
1
)
>>
PAGE_SHIFT
;
#endif
#endif
++
sglist
;
sglist
++
;
nents
--
;
}
}
DBG_RUN_SG
(
"%s() DONE (nents %d)
\n
"
,
__FUNCTION__
,
nents
);
DBG_RUN_SG
(
"%s() DONE (nents %d)
\n
"
,
__FUNCTION__
,
nents
);
...
@@ -1229,87 +1407,76 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
...
@@ -1229,87 +1407,76 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
}
}
unsigned
long
sba_dma_address
(
struct
scatterlist
*
sg
)
{
return
((
unsigned
long
)
sba_sg_iova
(
sg
));
}
int
sba_dma_supported
(
struct
pci_dev
*
dev
,
u64
mask
)
{
return
1
;
}
/**************************************************************
/**************************************************************
*
*
* Initialization and claim
* Initialization and claim
*
*
***************************************************************/
***************************************************************/
static
void
__init
static
void
ioc_iova_init
(
struct
ioc
*
ioc
)
sba_ioc_init
(
struct
sba_device
*
sba_dev
,
struct
ioc
*
ioc
,
int
ioc_num
)
{
{
u32
iova_space_size
,
iova_space_mask
;
u32
iova_space_mask
;
void
*
pdir_base
;
int
iov_order
,
tcnfg
;
int
pdir_size
,
iov_order
,
tcnfg
;
int
agp_found
=
0
;
struct
pci_dev
*
device
;
#ifdef FULL_VALID_PDIR
unsigned
long
index
;
#endif
/*
/*
** Firmware programs the maximum IOV space size into the imask reg
** Firmware programs the base and size of a "safe IOVA space"
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/
*/
iova_space_size
=
~
(
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IMASK
)
&
0xFFFFFFFFUL
)
+
1
;
ioc
->
ibase
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IBASE
)
&
~
0x1UL
;
ioc
->
iov_size
=
~
(
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IMASK
)
&
0xFFFFFFFFUL
)
+
1
;
/*
/*
** iov_order is always based on a 1GB IOVA space since we want to
** iov_order is always based on a 1GB IOVA space since we want to
** turn on the other half for AGP GART.
** turn on the other half for AGP GART.
*/
*/
iov_order
=
get_order
(
io
va_space_size
>>
(
IOVP_SHIFT
-
PAGE_SHIFT
));
iov_order
=
get_order
(
io
c
->
iov_size
>>
(
IOVP_SHIFT
-
PAGE_SHIFT
));
ioc
->
pdir_size
=
pdir_size
=
(
iova_space_size
/
IOVP_SIZE
)
*
sizeof
(
u64
);
ioc
->
pdir_size
=
(
ioc
->
iov_size
/
IOVP_SIZE
)
*
sizeof
(
u64
);
DBG_INIT
(
"%s() hpa
0x%lx IOV %dMB (%d bits) PDIR size 0x%0
x
\n
"
,
DBG_INIT
(
"%s() hpa
%p IOV %dMB (%d bits) PDIR size 0x%
x
\n
"
,
__FUNCTION__
,
ioc
->
ioc_hpa
,
iova_space_size
>>
20
,
__FUNCTION__
,
ioc
->
ioc_hpa
,
ioc
->
iov_size
>>
20
,
iov_order
+
PAGE_SHIFT
,
ioc
->
pdir_size
);
iov_order
+
PAGE_SHIFT
,
ioc
->
pdir_size
);
/*
XXX
DMA HINTs not used */
/*
FIXME :
DMA HINTs not used */
ioc
->
hint_shift_pdir
=
iov_order
+
PAGE_SHIFT
;
ioc
->
hint_shift_pdir
=
iov_order
+
PAGE_SHIFT
;
ioc
->
hint_mask_pdir
=
~
(
0x3
<<
(
iov_order
+
PAGE_SHIFT
));
ioc
->
hint_mask_pdir
=
~
(
0x3
<<
(
iov_order
+
PAGE_SHIFT
));
ioc
->
pdir_base
=
pdir_base
=
ioc
->
pdir_base
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
pdir_size
));
get_order
(
ioc
->
pdir_size
));
if
(
NULL
==
pdir_base
)
if
(
!
ioc
->
pdir_base
)
{
panic
(
PFX
"Couldn't allocate I/O Page Table
\n
"
);
panic
(
__FILE__
":%s() could not allocate I/O Page Table
\n
"
,
__FUNCTION__
);
}
memset
(
ioc
->
pdir_base
,
0
,
ioc
->
pdir_size
);
memset
(
pdir_base
,
0
,
pdir_size
);
DBG_INIT
(
"%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx
\n
"
,
DBG_INIT
(
"%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx
\n
"
,
__FUNCTION__
,
pdir_base
,
pdir_size
,
__FUNCTION__
,
ioc
->
pdir_base
,
ioc
->
pdir_size
,
ioc
->
hint_shift_pdir
,
ioc
->
hint_mask_pdir
);
ioc
->
hint_shift_pdir
,
ioc
->
hint_mask_pdir
);
ASSERT
((((
unsigned
long
)
pdir_base
)
&
PAGE_MASK
)
==
(
unsigned
long
)
pdir_base
);
ASSERT
((((
unsigned
long
)
ioc
->
pdir_base
)
&
PAGE_MASK
)
==
(
unsigned
long
)
ioc
->
pdir_base
);
WRITE_REG
(
virt_to_phys
(
pdir_base
),
ioc
->
ioc_hpa
+
IOC_PDIR_BASE
);
WRITE_REG
(
virt_to_phys
(
ioc
->
pdir_base
),
ioc
->
ioc_hpa
+
IOC_PDIR_BASE
);
DBG_INIT
(
" base %p
\n
"
,
pdir_base
);
DBG_INIT
(
" base %p
\n
"
,
ioc
->
pdir_base
);
/* build IMASK for IOC and Elroy */
/* build IMASK for IOC and Elroy */
iova_space_mask
=
0xffffffff
;
iova_space_mask
=
0xffffffff
;
iova_space_mask
<<=
(
iov_order
+
IOVP_SHIFT
);
iova_space_mask
<<=
(
iov_order
+
PAGE_SHIFT
);
ioc
->
imask
=
iova_space_mask
;
ioc
->
ibase
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_IBASE
)
&
0xFFFFFFFEUL
;
ioc
->
imask
=
iova_space_mask
;
/* save it */
DBG_INIT
(
"%s() IOV base 0x%lx mask 0x%0lx
\n
"
,
DBG_INIT
(
"%s() IOV base 0x%lx mask 0x%0lx
\n
"
,
__FUNCTION__
,
ioc
->
ibase
,
ioc
->
imask
);
__FUNCTION__
,
ioc
->
ibase
,
ioc
->
imask
);
/*
/*
**
XXX DMA HINT
registers are programmed with default hint
**
FIXME: Hint
registers are programmed with default hint
** values during boot, so hints should be sane even if we
** values during boot, so hints should be sane even if we
** can't reprogram them the way drivers want.
** can't reprogram them the way drivers want.
*/
*/
WRITE_REG
(
ioc
->
imask
,
ioc
->
ioc_hpa
+
IOC_IMASK
);
WRITE_REG
(
ioc
->
imask
,
ioc
->
ioc_hpa
+
IOC_IMASK
);
/*
/*
** Setting the upper bits makes checking for bypass addresses
** Setting the upper bits makes checking for bypass addresses
...
@@ -1317,34 +1484,30 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
...
@@ -1317,34 +1484,30 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
*/
*/
ioc
->
imask
|=
0xFFFFFFFF00000000UL
;
ioc
->
imask
|=
0xFFFFFFFF00000000UL
;
/* Set I/O Pdir page size to system page size */
/* Set I/O PDIR Page size to system page size */
switch
(
IOVP_SHIFT
)
{
switch
(
PAGE_SHIFT
)
{
case
12
:
/* 4K */
case
12
:
tcnfg
=
0
;
break
;
/* 4K */
tcnfg
=
0
;
case
13
:
tcnfg
=
1
;
break
;
/* 8K */
break
;
case
14
:
tcnfg
=
2
;
break
;
/* 16K */
case
13
:
/* 8K */
case
16
:
tcnfg
=
3
;
break
;
/* 64K */
tcnfg
=
1
;
default:
break
;
panic
(
PFX
"Unsupported system page size %d"
,
case
14
:
/* 16K */
1
<<
PAGE_SHIFT
);
tcnfg
=
2
;
break
;
case
16
:
/* 64K */
tcnfg
=
3
;
break
;
break
;
}
}
WRITE_REG
(
tcnfg
,
ioc
->
ioc_hpa
+
IOC_TCNFG
);
WRITE_REG
(
tcnfg
,
ioc
->
ioc_hpa
+
IOC_TCNFG
);
/*
/*
** Program the IOC's ibase and enable IOVA translation
** Program the IOC's ibase and enable IOVA translation
** Bit zero == enable bit.
** Bit zero == enable bit.
*/
*/
WRITE_REG
(
ioc
->
ibase
|
1
,
ioc
->
ioc_hpa
+
IOC_IBASE
);
WRITE_REG
(
ioc
->
ibase
|
1
,
ioc
->
ioc_hpa
+
IOC_IBASE
);
/*
/*
** Clear I/O TLB of any possible entries.
** Clear I/O TLB of any possible entries.
** (Yes. This is a bit paranoid...but so what)
** (Yes. This is a bit paranoid...but so what)
*/
*/
WRITE_REG
(
0
|
31
,
ioc
->
ioc_hpa
+
IOC_PCOM
);
WRITE_REG
(
ioc
->
ibase
|
(
iov_order
+
PAGE_SHIFT
),
ioc
->
ioc_hpa
+
IOC_PCOM
);
/*
/*
** If an AGP device is present, only use half of the IOV space
** If an AGP device is present, only use half of the IOV space
...
@@ -1354,160 +1517,241 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
...
@@ -1354,160 +1517,241 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
** We program the next pdir index after we stop w/ a key for
** We program the next pdir index after we stop w/ a key for
** the GART code to handshake on.
** the GART code to handshake on.
*/
*/
if
(
SBA_GET_AGP
(
sba_dev
))
{
pci_for_each_dev
(
device
)
DBG_INIT
(
"%s() AGP Device found, reserving 512MB for GART support
\n
"
,
__FUNCTION__
);
agp_found
|=
pci_find_capability
(
device
,
PCI_CAP_ID_AGP
);
if
(
agp_found
&&
reserve_sba_gart
)
{
DBG_INIT
(
"%s: AGP device found, reserving half of IOVA for GART support
\n
"
,
__FUNCTION__
);
ioc
->
pdir_size
/=
2
;
ioc
->
pdir_size
/=
2
;
((
u64
*
)
pdir_base
)[
PDIR_INDEX
(
iova_space_size
/
2
)]
=
0x0000badbadc0ffeeULL
;
((
u64
*
)
ioc
->
pdir_base
)[
PDIR_INDEX
(
ioc
->
iov_size
/
2
)]
=
ZX1_SBA_IOMMU_COOKIE
;
}
}
#ifdef FULL_VALID_PDIR
/*
** Check to see if the spill page has been allocated, we don't need more than
** one across multiple SBAs.
*/
if
(
!
prefetch_spill_page
)
{
char
*
spill_poison
=
"SBAIOMMU POISON"
;
int
poison_size
=
16
;
void
*
poison_addr
,
*
addr
;
DBG_INIT
(
"%s() DONE
\n
"
,
__FUNCTION__
);
addr
=
(
void
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
IOVP_SIZE
));
}
if
(
!
addr
)
panic
(
PFX
"Couldn't allocate PDIR spill page
\n
"
);
poison_addr
=
addr
;
for
(
;
(
u64
)
poison_addr
<
addr
+
IOVP_SIZE
;
poison_addr
+=
poison_size
)
memcpy
(
poison_addr
,
spill_poison
,
poison_size
);
prefetch_spill_page
=
virt_to_phys
(
addr
);
/**************************************************************************
DBG_INIT
(
"%s() prefetch spill addr: 0x%lx
\n
"
,
__FUNCTION__
,
prefetch_spill_page
);
**
}
** SBA initialization code (HW and SW)
/*
**
** Set all the PDIR entries valid w/ the spill page as the target
** o identify SBA chip itself
*/
** o FIXME: initialize DMA hints for reasonable defaults
for
(
index
=
0
;
index
<
(
ioc
->
pdir_size
/
sizeof
(
u64
))
;
index
++
)
**
((
u64
*
)
ioc
->
pdir_base
)[
index
]
=
(
0x80000000000000FF
|
prefetch_spill_page
);
**************************************************************************/
#endif
static
void
}
sba_hw_init
(
struct
sba_device
*
sba_dev
)
static
void
__init
ioc_resource_init
(
struct
ioc
*
ioc
)
{
{
int
i
;
spin_lock_init
(
&
ioc
->
res_lock
);
int
num_ioc
;
u64
dma_mask
;
u32
func_id
;
/*
/* resource map size dictated by pdir_size */
** Identify the SBA so we can set the dma_mask. We can make a virtual
ioc
->
res_size
=
ioc
->
pdir_size
/
sizeof
(
u64
);
/* entries */
** dma_mask of the memory subsystem such that devices not implmenting
ioc
->
res_size
>>=
3
;
/* convert bit count to byte count */
** a full 64bit mask might still be able to bypass efficiently.
DBG_INIT
(
"%s() res_size 0x%x
\n
"
,
__FUNCTION__
,
ioc
->
res_size
);
*/
func_id
=
READ_REG
(
sba_dev
->
sba_hpa
+
SBA_FUNC_ID
);
if
(
func_id
==
ZX1_FUNC_ID_VALUE
)
{
ioc
->
res_map
=
(
char
*
)
__get_free_pages
(
GFP_KERNEL
,
dma_mask
=
0xFFFFFFFFFFUL
;
get_order
(
ioc
->
res_size
));
}
else
{
if
(
!
ioc
->
res_map
)
dma_mask
=
0xFFFFFFFFFFFFFFFFUL
;
panic
(
PFX
"Couldn't allocate resource map
\n
"
);
}
memset
(
ioc
->
res_map
,
0
,
ioc
->
res_size
);
/* next available IOVP - circular search */
ioc
->
res_hint
=
(
unsigned
long
*
)
ioc
->
res_map
;
DBG_INIT
(
"%s(): ioc->dma_mask == 0x%lx
\n
"
,
__FUNCTION__
,
dma_mask
);
#ifdef ASSERT_PDIR_SANITY
/* Mark first bit busy - ie no IOVA 0 */
ioc
->
res_map
[
0
]
=
0x1
;
ioc
->
pdir_base
[
0
]
=
0x8000000000000000ULL
|
ZX1_SBA_IOMMU_COOKIE
;
#endif
#ifdef FULL_VALID_PDIR
/* Mark the last resource used so we don't prefetch beyond IOVA space */
ioc
->
res_map
[
ioc
->
res_size
-
1
]
|=
0x80UL
;
/* res_map is chars */
ioc
->
pdir_base
[(
ioc
->
pdir_size
/
sizeof
(
u64
))
-
1
]
=
(
0x80000000000000FF
|
prefetch_spill_page
);
#endif
DBG_INIT
(
"%s() res_map %x %p
\n
"
,
__FUNCTION__
,
ioc
->
res_size
,
(
void
*
)
ioc
->
res_map
);
}
static
void
__init
ioc_sac_init
(
struct
ioc
*
ioc
)
{
struct
pci_dev
*
sac
=
NULL
;
struct
pci_controller
*
controller
=
NULL
;
/*
/*
** Leaving in the multiple ioc code from parisc for the future,
* pci_alloc_consistent() must return a DMA address which is
** currently there are no muli-ioc mckinley sbas
* SAC (single address cycle) addressable, so allocate a
* pseudo-device to enforce that.
*/
*/
sba_dev
->
ioc
[
0
].
ioc_hpa
=
SBA_IOC_OFFSET
;
sac
=
kmalloc
(
sizeof
(
*
sac
),
GFP_KERNEL
);
num_ioc
=
1
;
if
(
!
sac
)
panic
(
PFX
"Couldn't allocate struct pci_dev"
);
sba_dev
->
num_ioc
=
num_ioc
;
memset
(
sac
,
0
,
sizeof
(
*
sac
));
for
(
i
=
0
;
i
<
num_ioc
;
i
++
)
{
sba_dev
->
ioc
[
i
].
dma_mask
=
dma_mask
;
controller
=
kmalloc
(
sizeof
(
*
controller
),
GFP_KERNEL
);
sba_dev
->
ioc
[
i
].
ioc_hpa
+=
sba_dev
->
sba_hpa
;
if
(
!
controller
)
sba_ioc_init
(
sba_dev
,
&
(
sba_dev
->
ioc
[
i
]),
i
);
panic
(
PFX
"Couldn't allocate struct pci_controller"
);
}
memset
(
controller
,
0
,
sizeof
(
*
controller
));
controller
->
iommu
=
ioc
;
sac
->
sysdata
=
controller
;
sac
->
dma_mask
=
0xFFFFFFFFUL
;
ioc
->
sac_only_dev
=
sac
;
}
}
static
void
static
void
__init
sba_common_init
(
struct
sba_device
*
sba_dev
)
ioc_zx1_init
(
struct
ioc
*
ioc
)
{
{
int
i
;
if
(
ioc
->
rev
<
0x20
)
panic
(
PFX
"IOC 2.0 or later required for IOMMU support
\n
"
);
/* add this one to the head of the list (order doesn't matter)
ioc
->
dma_mask
=
0xFFFFFFFFFFUL
;
** This will be useful for debugging - especially if we get coredumps
}
*/
sba_dev
->
next
=
sba_list
;
sba_list
=
sba_dev
;
sba_count
++
;
for
(
i
=
0
;
i
<
sba_dev
->
num_ioc
;
i
++
)
{
typedef
void
(
initfunc
)(
struct
ioc
*
);
int
res_size
;
/* resource map size dictated by pdir_size */
struct
ioc_iommu
{
res_size
=
sba_dev
->
ioc
[
i
].
pdir_size
/
sizeof
(
u64
);
/* entries */
u32
func_id
;
res_size
>>=
3
;
/* convert bit count to byte count */
char
*
name
;
DBG_INIT
(
"%s() res_size 0x%x
\n
"
,
initfunc
*
init
;
__FUNCTION__
,
res_size
)
;
}
;
sba_dev
->
ioc
[
i
].
res_size
=
res_size
;
static
struct
ioc_iommu
ioc_iommu_info
[]
__initdata
=
{
sba_dev
->
ioc
[
i
].
res_map
=
(
char
*
)
__get_free_pages
(
GFP_KERNEL
,
get_order
(
res_size
));
{
ZX1_IOC_ID
,
"zx1"
,
ioc_zx1_init
},
{
REO_IOC_ID
,
"REO"
},
{
SX1000_IOC_ID
,
"sx1000"
},
};
if
(
NULL
==
sba_dev
->
ioc
[
i
].
res_map
)
static
struct
ioc
*
__init
{
ioc_init
(
u64
hpa
,
void
*
handle
)
panic
(
__FILE__
":%s() could not allocate resource map
\n
"
,
__FUNCTION__
);
{
}
struct
ioc
*
ioc
;
struct
ioc_iommu
*
info
;
memset
(
sba_dev
->
ioc
[
i
].
res_map
,
0
,
res_size
);
ioc
=
kmalloc
(
sizeof
(
*
ioc
),
GFP_KERNEL
);
/* next available IOVP - circular search */
if
(
!
ioc
)
if
((
sba_dev
->
hw_rev
&
0xFF
)
>=
0x20
)
{
return
NULL
;
sba_dev
->
ioc
[
i
].
res_hint
=
(
unsigned
long
*
)
sba_dev
->
ioc
[
i
].
res_map
;
}
else
{
u64
reserved_iov
;
/* Yet another 1.x hack */
memset
(
ioc
,
0
,
sizeof
(
*
ioc
));
printk
(
KERN_DEBUG
"zx1 1.x: Starting resource hint offset into "
"IOV space to avoid initial zero value IOVA
\n
"
);
sba_dev
->
ioc
[
i
].
res_hint
=
(
unsigned
long
*
)
&
(
sba_dev
->
ioc
[
i
].
res_map
[
L1_CACHE_BYTES
]);
sba_dev
->
ioc
[
i
].
res_map
[
0
]
=
0x1
;
ioc
->
next
=
ioc_list
;
sba_dev
->
ioc
[
i
].
pdir_base
[
0
]
=
0x8000badbadc0ffeeULL
;
ioc_list
=
ioc
;
for
(
reserved_iov
=
0xA0000
;
reserved_iov
<
0xC0000
;
reserved_iov
+=
IOVP_SIZE
)
{
ioc
->
handle
=
handle
;
u64
*
res_ptr
=
(
u64
*
)
sba_dev
->
ioc
[
i
].
res_map
;
ioc
->
ioc_hpa
=
ioremap
(
hpa
,
0x1000
);
int
index
=
PDIR_INDEX
(
reserved_iov
);
int
res_word
;
u64
mask
;
res_word
=
(
int
)(
index
/
BITS_PER_LONG
);
ioc
->
func_id
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_FUNC_ID
);
mask
=
0x1UL
<<
(
index
-
(
res_word
*
BITS_PER_LONG
));
ioc
->
rev
=
READ_REG
(
ioc
->
ioc_hpa
+
IOC_FCLASS
)
&
0xFFUL
;
res_ptr
[
res_word
]
|=
mask
;
ioc
->
dma_mask
=
0xFFFFFFFFFFFFFFFFUL
;
/* conservative */
sba_dev
->
ioc
[
i
].
pdir_base
[
PDIR_INDEX
(
reserved_iov
)]
=
(
SBA_VALID_MASK
|
reserved_iov
);
for
(
info
=
ioc_iommu_info
;
info
<
ioc_iommu_info
+
ARRAY_SIZE
(
ioc_iommu_info
);
info
++
)
{
if
(
ioc
->
func_id
==
info
->
func_id
)
{
ioc
->
name
=
info
->
name
;
if
(
info
->
init
)
(
info
->
init
)(
ioc
);
}
}
}
}
#ifdef ASSERT_PDIR_SANITY
if
(
!
ioc
->
name
)
{
/* Mark first bit busy - ie no IOVA 0 */
ioc
->
name
=
kmalloc
(
24
,
GFP_KERNEL
);
sba_dev
->
ioc
[
i
].
res_map
[
0
]
=
0x1
;
if
(
ioc
->
name
)
sba_dev
->
ioc
[
i
].
pdir_base
[
0
]
=
0x8000badbadc0ffeeULL
;
sprintf
(
ioc
->
name
,
"Unknown (%04x:%04x)"
,
#endif
ioc
->
func_id
&
0xFFFF
,
(
ioc
->
func_id
>>
16
)
&
0xFFFF
);
DBG_INIT
(
"%s() %d res_map %x %p
\n
"
,
__FUNCTION__
,
else
i
,
res_size
,
(
void
*
)
sba_dev
->
ioc
[
i
].
res_map
)
;
ioc
->
name
=
"Unknown"
;
}
}
sba_dev
->
sba_lock
=
SPIN_LOCK_UNLOCKED
;
ioc_iova_init
(
ioc
);
ioc_resource_init
(
ioc
);
ioc_sac_init
(
ioc
);
printk
(
KERN_INFO
PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx
\n
"
,
ioc
->
name
,
(
ioc
->
rev
>>
4
)
&
0xF
,
ioc
->
rev
&
0xF
,
hpa
,
ioc
->
iov_size
>>
20
,
ioc
->
ibase
);
return
ioc
;
}
}
/**************************************************************************
**
** SBA initialization code (HW and SW)
**
** o identify SBA chip itself
** o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_PROC_FS
static
int
sba_proc_info
(
char
*
buf
,
char
**
start
,
off_t
offset
,
int
len
)
static
void
*
ioc_start
(
struct
seq_file
*
s
,
loff_t
*
pos
)
{
{
struct
sba_device
*
sba_dev
;
struct
ioc
*
ioc
;
struct
ioc
*
ioc
;
int
total_pages
;
loff_t
n
=
*
pos
;
unsigned
long
i
=
0
,
avg
=
0
,
min
,
max
;
for
(
sba_dev
=
sba_list
;
sba_dev
;
sba_dev
=
sba_dev
->
next
)
{
for
(
ioc
=
ioc_list
;
ioc
;
ioc
=
ioc
->
next
)
i
oc
=
&
sba_dev
->
ioc
[
0
];
/* FIXME: Multi-IOC support! */
i
f
(
!
n
--
)
total_pages
=
(
int
)
(
ioc
->
res_size
<<
3
);
/* 8 bits per byte */
return
ioc
;
sprintf
(
buf
,
"%s rev %d.%d
\n
"
,
"Hewlett-Packard zx1 SBA"
,
return
NULL
;
((
sba_dev
->
hw_rev
>>
4
)
&
0xF
),
(
sba_dev
->
hw_rev
&
0xF
));
}
sprintf
(
buf
,
"%sIO PDIR size : %d bytes (%d entries)
\n
"
,
buf
,
(
int
)
((
ioc
->
res_size
<<
3
)
*
sizeof
(
u64
)),
/* 8 bits/byte */
total_pages
);
static
void
*
ioc_next
(
struct
seq_file
*
s
,
void
*
v
,
loff_t
*
pos
)
{
struct
ioc
*
ioc
=
v
;
++*
pos
;
return
ioc
->
next
;
}
static
void
ioc_stop
(
struct
seq_file
*
s
,
void
*
v
)
{
}
static
int
ioc_show
(
struct
seq_file
*
s
,
void
*
v
)
{
struct
ioc
*
ioc
=
v
;
int
total_pages
=
(
int
)
(
ioc
->
res_size
<<
3
);
/* 8 bits per byte */
unsigned
long
i
=
0
,
avg
=
0
,
min
,
max
;
seq_printf
(
s
,
"Hewlett Packard %s IOC rev %d.%d
\n
"
,
ioc
->
name
,
((
ioc
->
rev
>>
4
)
&
0xF
),
(
ioc
->
rev
&
0xF
));
seq_printf
(
s
,
"IO PDIR size : %d bytes (%d entries)
\n
"
,
(
int
)
((
ioc
->
res_size
<<
3
)
*
sizeof
(
u64
)),
/* 8 bits/byte */
total_pages
);
sprintf
(
buf
,
"%sIO PDIR entries : %ld free %ld used (%d%%)
\n
"
,
buf
,
seq_printf
(
s
,
"IO PDIR entries : %ld free %ld used (%d%%)
\n
"
,
total_pages
-
ioc
->
used_pages
,
ioc
->
used_pages
,
total_pages
-
ioc
->
used_pages
,
ioc
->
used_pages
,
(
int
)
(
ioc
->
used_pages
*
100
/
total_pages
));
(
int
)
(
ioc
->
used_pages
*
100
/
total_pages
));
sprintf
(
buf
,
"%sResource bitmap : %d bytes (%d pages)
\n
"
,
seq_printf
(
s
,
"Resource bitmap : %d bytes (%d pages)
\n
"
,
buf
,
ioc
->
res_size
,
ioc
->
res_size
<<
3
);
/* 8 bits per byte */
ioc
->
res_size
,
ioc
->
res_size
<<
3
);
/* 8 bits per byte */
min
=
max
=
ioc
->
avg_search
[
0
];
min
=
max
=
ioc
->
avg_search
[
0
];
for
(
i
=
0
;
i
<
SBA_SEARCH_SAMPLE
;
i
++
)
{
for
(
i
=
0
;
i
<
SBA_SEARCH_SAMPLE
;
i
++
)
{
...
@@ -1516,184 +1760,220 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len)
...
@@ -1516,184 +1760,220 @@ static int sba_proc_info(char *buf, char **start, off_t offset, int len)
if
(
ioc
->
avg_search
[
i
]
<
min
)
min
=
ioc
->
avg_search
[
i
];
if
(
ioc
->
avg_search
[
i
]
<
min
)
min
=
ioc
->
avg_search
[
i
];
}
}
avg
/=
SBA_SEARCH_SAMPLE
;
avg
/=
SBA_SEARCH_SAMPLE
;
sprintf
(
buf
,
"%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)
\n
"
,
seq_printf
(
s
,
" Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)
\n
"
,
min
,
avg
,
max
);
buf
,
min
,
avg
,
max
);
sprintf
(
buf
,
"%s
pci_map_single(): %12ld calls %12ld pages (avg %d/1000)
\n
"
,
seq_printf
(
s
,
"
pci_map_single(): %12ld calls %12ld pages (avg %d/1000)
\n
"
,
buf
,
ioc
->
msingle_calls
,
ioc
->
msingle_pages
,
ioc
->
msingle_calls
,
ioc
->
msingle_pages
,
(
int
)
((
ioc
->
msingle_pages
*
1000
)
/
ioc
->
msingle_calls
));
(
int
)
((
ioc
->
msingle_pages
*
1000
)
/
ioc
->
msingle_calls
));
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
sprintf
(
buf
,
"%spci_map_single(): %12ld bypasses
\n
"
,
seq_printf
(
s
,
"pci_map_single(): %12ld bypasses
\n
"
,
ioc
->
msingle_bypass
);
buf
,
ioc
->
msingle_bypass
);
#endif
#endif
sprintf
(
buf
,
"%s
pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)
\n
"
,
seq_printf
(
s
,
"
pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)
\n
"
,
buf
,
ioc
->
usingle_calls
,
ioc
->
usingle_pages
,
ioc
->
usingle_calls
,
ioc
->
usingle_pages
,
(
int
)
((
ioc
->
usingle_pages
*
1000
)
/
ioc
->
usingle_calls
));
(
int
)
((
ioc
->
usingle_pages
*
1000
)
/
ioc
->
usingle_calls
));
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
sprintf
(
buf
,
"%spci_unmap_single: %12ld bypasses
\n
"
,
seq_printf
(
s
,
"pci_unmap_single: %12ld bypasses
\n
"
,
ioc
->
usingle_bypass
);
buf
,
ioc
->
usingle_bypass
);
#endif
#endif
sprintf
(
buf
,
"%s
pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)
\n
"
,
seq_printf
(
s
,
"
pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)
\n
"
,
buf
,
ioc
->
msg_calls
,
ioc
->
msg_pages
,
ioc
->
msg_calls
,
ioc
->
msg_pages
,
(
int
)
((
ioc
->
msg_pages
*
1000
)
/
ioc
->
msg_calls
));
(
int
)
((
ioc
->
msg_pages
*
1000
)
/
ioc
->
msg_calls
));
#ifdef ALLOW_IOV_BYPASS
#ifdef ALLOW_IOV_BYPASS
sprintf
(
buf
,
"%spci_map_sg() : %12ld bypasses
\n
"
,
seq_printf
(
s
,
"pci_map_sg() : %12ld bypasses
\n
"
,
ioc
->
msg_bypass
);
buf
,
ioc
->
msg_bypass
);
#endif
#endif
sprintf
(
buf
,
"%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)
\n
"
,
seq_printf
(
s
,
"pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)
\n
"
,
buf
,
ioc
->
usg_calls
,
ioc
->
usg_pages
,
ioc
->
usg_calls
,
ioc
->
usg_pages
,
(
int
)
((
ioc
->
usg_pages
*
1000
)
/
ioc
->
usg_calls
));
(
int
)
((
ioc
->
usg_pages
*
1000
)
/
ioc
->
usg_calls
));
}
return
0
;
return
strlen
(
buf
);
}
static
struct
seq_operations
ioc_seq_ops
=
{
.
start
=
ioc_start
,
.
next
=
ioc_next
,
.
stop
=
ioc_stop
,
.
show
=
ioc_show
};
static
int
ioc_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
return
seq_open
(
file
,
&
ioc_seq_ops
);
}
}
static
struct
file_operations
ioc_fops
=
{
.
open
=
ioc_open
,
.
read
=
seq_read
,
.
llseek
=
seq_lseek
,
.
release
=
seq_release
};
static
int
static
int
sba_resource_map
(
char
*
buf
,
char
**
start
,
off_t
offset
,
int
len
)
ioc_map_show
(
struct
seq_file
*
s
,
void
*
v
)
{
{
struct
ioc
*
ioc
=
sba_list
->
ioc
;
/* FIXME: Multi-IOC support! */
struct
ioc
*
ioc
=
v
;
unsigned
int
*
res_ptr
;
unsigned
int
*
res_ptr
=
(
unsigned
int
*
)
ioc
->
res_map
;
int
i
;
int
i
;
if
(
!
ioc
)
for
(
i
=
0
;
i
<
(
ioc
->
res_size
/
sizeof
(
unsigned
int
));
++
i
,
++
res_ptr
)
seq_printf
(
s
,
"%s%08x"
,
(
i
&
7
)
?
" "
:
"
\n
"
,
*
res_ptr
);
seq_printf
(
s
,
"
\n
"
);
return
0
;
return
0
;
}
res_ptr
=
(
unsigned
int
*
)
ioc
->
res_map
;
static
struct
seq_operations
ioc_map_ops
=
{
buf
[
0
]
=
'\0'
;
.
start
=
ioc_start
,
for
(
i
=
0
;
i
<
(
ioc
->
res_size
/
sizeof
(
unsigned
int
));
++
i
,
++
res_ptr
)
{
.
next
=
ioc_next
,
if
((
i
&
7
)
==
0
)
.
stop
=
ioc_stop
,
strcat
(
buf
,
"
\n
"
);
.
show
=
ioc_map_show
sprintf
(
buf
,
"%s %08x"
,
buf
,
*
res_ptr
);
};
}
strcat
(
buf
,
"
\n
"
);
return
strlen
(
buf
);
static
int
ioc_map_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
return
seq_open
(
file
,
&
ioc_map_ops
);
}
}
#endif
/*
static
struct
file_operations
ioc_map_fops
=
{
** Determine if sba should claim this chip (return 0) or not (return 1).
.
open
=
ioc_map_open
,
** If so, initialize the chip and tell other partners in crime they
.
read
=
seq_read
,
** have work to do.
.
llseek
=
seq_lseek
,
*/
.
release
=
seq_release
void
__init
sba_init
(
void
)
};
static
void
__init
ioc_proc_init
(
void
)
{
{
struct
sba_device
*
sba_dev
;
if
(
ioc_list
)
{
u32
func_id
,
hw_rev
;
struct
proc_dir_entry
*
dir
,
*
entry
;
u32
*
func_offset
=
NULL
;
int
i
,
agp_found
=
0
;
static
char
sba_rev
[
6
];
struct
pci_dev
*
device
=
NULL
;
u64
hpa
=
0
;
if
(
!
(
device
=
pci_find_device
(
PCI_VENDOR_ID_HP
,
PCI_DEVICE_ID_HP_ZX1_SBA
,
NULL
)))
return
;
for
(
i
=
0
;
i
<
PCI_NUM_RESOURCES
;
i
++
)
{
dir
=
proc_mkdir
(
"bus/mckinley"
,
0
);
if
(
pci_resource_flags
(
device
,
i
)
==
IORESOURCE_MEM
)
{
entry
=
create_proc_entry
(
ioc_list
->
name
,
0
,
dir
);
hpa
=
(
u64
)
ioremap
(
pci_resource_start
(
device
,
i
),
if
(
entry
)
pci_resource_len
(
device
,
i
));
entry
->
proc_fops
=
&
ioc_fops
;
break
;
}
entry
=
create_proc_entry
(
"bitmap"
,
0
,
dir
);
if
(
entry
)
entry
->
proc_fops
=
&
ioc_map_fops
;
}
}
}
#endif
func_id
=
READ_REG
(
hpa
+
SBA_FUNC_ID
);
void
if
(
func_id
!=
ZX1_FUNC_ID_VALUE
)
sba_connect_bus
(
struct
pci_bus
*
bus
)
return
;
{
acpi_handle
handle
,
parent
;
acpi_status
status
;
struct
ioc
*
ioc
;
if
(
!
PCI_CONTROLLER
(
bus
))
panic
(
PFX
"no sysdata on bus %d!
\n
"
,
bus
->
number
);
strcpy
(
sba_rev
,
"zx1"
);
if
(
PCI_CONTROLLER
(
bus
)
->
iommu
)
func_offset
=
zx1_func_offsets
;
return
;
/* Read HW Rev First */
handle
=
PCI_CONTROLLER
(
bus
)
->
acpi_handle
;
hw_rev
=
READ_REG
(
hpa
+
SBA_FCLASS
)
&
0xFFUL
;
if
(
!
handle
)
return
;
/*
/*
* Not all revision registers of the chipset are updated on every
* The IOC scope encloses PCI root bridges in the ACPI
* turn. Must scan through all functions looking for the highest rev
* namespace, so work our way out until we find an IOC we
* claimed previously.
*/
*/
if
(
func_offset
)
{
do
{
for
(
i
=
0
;
func_offset
[
i
]
!=
-
1
;
i
++
)
{
for
(
ioc
=
ioc_list
;
ioc
;
ioc
=
ioc
->
next
)
u32
func_rev
;
if
(
ioc
->
handle
==
handle
)
{
PCI_CONTROLLER
(
bus
)
->
iommu
=
ioc
;
func_rev
=
READ_REG
(
hpa
+
SBA_FCLASS
+
func_offset
[
i
])
&
0xFFUL
;
DBG_INIT
(
"%s() func offset: 0x%x rev: 0x%x
\n
"
,
__FUNCTION__
,
func_offset
[
i
],
func_rev
);
if
(
func_rev
>
hw_rev
)
hw_rev
=
func_rev
;
}
}
printk
(
KERN_INFO
"%s found %s %d.%d at %s, HPA 0x%lx
\n
"
,
DRIVER_NAME
,
sba_rev
,
((
hw_rev
>>
4
)
&
0xF
),
(
hw_rev
&
0xF
),
device
->
slot_name
,
hpa
);
if
((
hw_rev
&
0xFF
)
<
0x20
)
{
printk
(
KERN_INFO
"%s: SBA rev less than 2.0 not supported"
,
DRIVER_NAME
);
return
;
return
;
}
}
sba_dev
=
kmalloc
(
sizeof
(
struct
sba_device
),
GFP_KERNEL
);
status
=
acpi_get_parent
(
handle
,
&
parent
);
if
(
NULL
==
sba_dev
)
{
handle
=
parent
;
printk
(
KERN_ERR
DRIVER_NAME
" - couldn't alloc sba_device
\n
"
);
}
while
(
ACPI_SUCCESS
(
status
));
return
;
}
memset
(
sba_dev
,
0
,
sizeof
(
struct
sba_device
));
printk
(
KERN_WARNING
"No IOC for PCI Bus %02x:%02x in ACPI
\n
"
,
PCI_SEGMENT
(
bus
),
bus
->
number
);
}
for
(
i
=
0
;
i
<
MAX_IOC
;
i
++
)
static
int
__init
spin_lock_init
(
&
(
sba_dev
->
ioc
[
i
].
res_lock
));
acpi_sba_ioc_add
(
struct
acpi_device
*
device
)
{
struct
ioc
*
ioc
;
acpi_status
status
;
u64
hpa
,
length
;
struct
acpi_device_info
dev_info
;
sba_dev
->
hw_rev
=
hw_rev
;
status
=
hp_acpi_csr_space
(
device
->
handle
,
&
hpa
,
&
length
);
sba_dev
->
sba_hpa
=
hpa
;
if
(
ACPI_FAILURE
(
status
))
return
1
;
/*
status
=
acpi_get_object_info
(
device
->
handle
,
&
dev_info
);
* We pass this fake device from alloc_consistent to ensure
if
(
ACPI_FAILURE
(
status
))
* we only use SAC for alloc_consistent mappings.
return
1
;
*/
sac_only_dev
.
dma_mask
=
0xFFFFFFFFUL
;
/*
/*
* We need to check for an AGP device, if we find one, then only
* For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
* use part of the IOVA space for PCI DMA, the rest is for GART.
* root bridges, and its CSR space includes the IOC function.
* REVISIT for multiple IOC.
*/
*/
pci_for_each_dev
(
device
)
if
(
strncmp
(
"HWP0001"
,
dev_info
.
hardware_id
,
7
)
==
0
)
agp_found
|=
pci_find_capability
(
device
,
PCI_CAP_ID_AGP
)
;
hpa
+=
ZX1_IOC_OFFSET
;
if
(
agp_found
&&
reserve_sba_gart
)
ioc
=
ioc_init
(
hpa
,
device
->
handle
);
SBA_SET_AGP
(
sba_dev
);
if
(
!
ioc
)
return
1
;
sba_hw_init
(
sba_dev
)
;
return
0
;
sba_common_init
(
sba_dev
);
}
#ifdef CONFIG_PROC_FS
static
struct
acpi_driver
acpi_sba_ioc_driver
=
{
{
name:
"IOC IOMMU Driver"
,
struct
proc_dir_entry
*
proc_mckinley_root
;
ids:
"HWP0001,HWP0004"
,
ops:
{
add:
acpi_sba_ioc_add
,
},
};
proc_mckinley_root
=
proc_mkdir
(
"bus/mckinley"
,
0
);
static
int
__init
create_proc_info_entry
(
sba_rev
,
0
,
proc_mckinley_root
,
sba_proc_info
);
sba_init
(
void
)
create_proc_info_entry
(
"bitmap"
,
0
,
proc_mckinley_root
,
sba_resource_map
);
{
}
struct
pci_bus
*
b
;
MAX_DMA_ADDRESS
=
~
0UL
;
acpi_bus_register_driver
(
&
acpi_sba_ioc_driver
);
pci_for_each_bus
(
b
)
sba_connect_bus
(
b
);
#ifdef CONFIG_PROC_FS
ioc_proc_init
();
#endif
#endif
return
0
;
}
}
subsys_initcall
(
sba_init
);
/* must be initialized after ACPI etc., but before any drivers... */
static
int
__init
static
int
__init
nosbagart
(
char
*
str
)
nosbagart
(
char
*
str
)
{
{
reserve_sba_gart
=
0
;
reserve_sba_gart
=
0
;
return
1
;
return
1
;
}
}
__setup
(
"nosbagart"
,
nosbagart
);
int
sba_dma_supported
(
struct
pci_dev
*
dev
,
u64
mask
)
{
/* make sure it's at least 32bit capable */
return
((
mask
&
0xFFFFFFFFUL
)
==
0xFFFFFFFFUL
);
}
__setup
(
"nosbagart"
,
nosbagart
);
EXPORT_SYMBOL
(
sba_init
);
EXPORT_SYMBOL
(
sba_map_single
);
EXPORT_SYMBOL
(
sba_map_single
);
EXPORT_SYMBOL
(
sba_unmap_single
);
EXPORT_SYMBOL
(
sba_unmap_single
);
EXPORT_SYMBOL
(
sba_map_sg
);
EXPORT_SYMBOL
(
sba_map_sg
);
EXPORT_SYMBOL
(
sba_unmap_sg
);
EXPORT_SYMBOL
(
sba_unmap_sg
);
EXPORT_SYMBOL
(
sba_dma_address
);
EXPORT_SYMBOL
(
sba_dma_supported
);
EXPORT_SYMBOL
(
sba_dma_supported
);
EXPORT_SYMBOL
(
sba_alloc_consistent
);
EXPORT_SYMBOL
(
sba_alloc_consistent
);
EXPORT_SYMBOL
(
sba_free_consistent
);
EXPORT_SYMBOL
(
sba_free_consistent
);
arch/ia64/hp/sim/simeth.c
View file @
20f6d716
...
@@ -505,7 +505,7 @@ simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs)
...
@@ -505,7 +505,7 @@ simeth_interrupt(int irq, void *dev_id, struct pt_regs * regs)
}
}
/*
/*
* very simple loop because we get interrupts only when receving
* very simple loop because we get interrupts only when rece
i
ving
*/
*/
while
(
simeth_rx
(
dev
));
while
(
simeth_rx
(
dev
));
}
}
...
...
arch/ia64/hp/zx1/Makefile
View file @
20f6d716
...
@@ -5,5 +5,4 @@
...
@@ -5,5 +5,4 @@
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
#
obj-y
:=
hpzx1_misc.o
obj-$(CONFIG_IA64_GENERIC)
+=
hpzx1_machvec.o
obj-$(CONFIG_IA64_GENERIC)
+=
hpzx1_machvec.o
arch/ia64/hp/zx1/hpzx1_misc.c
deleted
100644 → 0
View file @
d210257e
/*
* Misc. support for HP zx1 chipset support
*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* Alex Williamson <alex_williamson@hp.com>
* Bjorn Helgaas <bjorn_helgaas@hp.com>
*/
#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <asm/dma.h>
#include <asm/iosapic.h>
extern
acpi_status
acpi_evaluate_integer
(
acpi_handle
,
acpi_string
,
struct
acpi_object_list
*
,
unsigned
long
*
);
#define PFX "hpzx1: "
static
int
hpzx1_devices
;
struct
fake_pci_dev
{
struct
fake_pci_dev
*
next
;
struct
pci_dev
*
pci_dev
;
unsigned
long
csr_base
;
unsigned
long
csr_size
;
unsigned
long
mapped_csrs
;
// ioremapped
int
sizing
;
// in middle of BAR sizing operation?
}
*
fake_pci_dev_list
;
static
struct
pci_ops
*
orig_pci_ops
;
struct
fake_pci_dev
*
lookup_fake_dev
(
struct
pci_bus
*
bus
,
unsigned
int
devfn
)
{
struct
fake_pci_dev
*
fake_dev
;
for
(
fake_dev
=
fake_pci_dev_list
;
fake_dev
;
fake_dev
=
fake_dev
->
next
)
if
(
fake_dev
->
pci_dev
->
bus
==
bus
&&
fake_dev
->
pci_dev
->
devfn
==
devfn
)
return
fake_dev
;
return
NULL
;
}
static
int
hp_cfg_read
(
struct
pci_bus
*
bus
,
unsigned
int
devfn
,
int
where
,
int
size
,
u32
*
value
)
{
struct
fake_pci_dev
*
fake_dev
=
lookup_fake_dev
(
bus
,
devfn
);
if
(
!
fake_dev
)
return
(
*
orig_pci_ops
->
read
)(
bus
,
devfn
,
where
,
size
,
value
);
if
(
where
==
PCI_BASE_ADDRESS_0
)
{
if
(
fake_dev
->
sizing
)
*
value
=
~
(
fake_dev
->
csr_size
-
1
);
else
*
value
=
((
fake_dev
->
csr_base
&
PCI_BASE_ADDRESS_MEM_MASK
)
|
PCI_BASE_ADDRESS_SPACE_MEMORY
);
fake_dev
->
sizing
=
0
;
return
PCIBIOS_SUCCESSFUL
;
}
switch
(
size
)
{
case
1
:
*
value
=
readb
(
fake_dev
->
mapped_csrs
+
where
);
break
;
case
2
:
*
value
=
readw
(
fake_dev
->
mapped_csrs
+
where
);
break
;
case
4
:
*
value
=
readl
(
fake_dev
->
mapped_csrs
+
where
);
break
;
default:
printk
(
KERN_WARNING
"hp_cfg_read: bad size = %d bytes"
,
size
);
break
;
}
if
(
where
==
PCI_COMMAND
)
*
value
|=
PCI_COMMAND_MEMORY
;
/* SBA omits this */
return
PCIBIOS_SUCCESSFUL
;
}
static
int
hp_cfg_write
(
struct
pci_bus
*
bus
,
unsigned
int
devfn
,
int
where
,
int
size
,
u32
value
)
{
struct
fake_pci_dev
*
fake_dev
=
lookup_fake_dev
(
bus
,
devfn
);
if
(
!
fake_dev
)
return
(
*
orig_pci_ops
->
write
)(
bus
,
devfn
,
where
,
size
,
value
);
if
(
where
==
PCI_BASE_ADDRESS_0
)
{
if
(
value
==
((
1UL
<<
8
*
size
)
-
1
))
fake_dev
->
sizing
=
1
;
return
PCIBIOS_SUCCESSFUL
;
}
switch
(
size
)
{
case
1
:
writeb
(
value
,
fake_dev
->
mapped_csrs
+
where
);
break
;
case
2
:
writew
(
value
,
fake_dev
->
mapped_csrs
+
where
);
break
;
case
4
:
writel
(
value
,
fake_dev
->
mapped_csrs
+
where
);
break
;
default:
printk
(
KERN_WARNING
"hp_cfg_write: bad size = %d bytes"
,
size
);
break
;
}
return
PCIBIOS_SUCCESSFUL
;
}
static
struct
pci_ops
hp_pci_conf
=
{
.
read
=
hp_cfg_read
,
.
write
=
hp_cfg_write
};
static
void
hpzx1_fake_pci_dev
(
char
*
name
,
unsigned
int
busnum
,
unsigned
long
addr
,
unsigned
int
size
)
{
struct
fake_pci_dev
*
fake
;
int
slot
,
ret
;
struct
pci_dev
*
dev
;
struct
pci_bus
*
b
,
*
bus
=
NULL
;
u8
hdr
;
fake
=
kmalloc
(
sizeof
(
*
fake
),
GFP_KERNEL
);
if
(
!
fake
)
{
printk
(
KERN_ERR
PFX
"No memory for %s (0x%p) sysdata
\n
"
,
name
,
(
void
*
)
addr
);
return
;
}
memset
(
fake
,
0
,
sizeof
(
*
fake
));
fake
->
csr_base
=
addr
;
fake
->
csr_size
=
size
;
fake
->
mapped_csrs
=
(
unsigned
long
)
ioremap
(
addr
,
size
);
fake
->
sizing
=
0
;
pci_for_each_bus
(
b
)
if
(
busnum
==
b
->
number
)
{
bus
=
b
;
break
;
}
if
(
!
bus
)
{
printk
(
KERN_ERR
PFX
"No host bus 0x%02x for %s (0x%p)
\n
"
,
busnum
,
name
,
(
void
*
)
addr
);
kfree
(
fake
);
return
;
}
for
(
slot
=
0x1e
;
slot
;
slot
--
)
if
(
!
pci_find_slot
(
busnum
,
PCI_DEVFN
(
slot
,
0
)))
break
;
if
(
slot
<
0
)
{
printk
(
KERN_ERR
PFX
"No space for %s (0x%p) on bus 0x%02x
\n
"
,
name
,
(
void
*
)
addr
,
busnum
);
kfree
(
fake
);
return
;
}
dev
=
kmalloc
(
sizeof
(
*
dev
),
GFP_KERNEL
);
if
(
!
dev
)
{
printk
(
KERN_ERR
PFX
"No memory for %s (0x%p)
\n
"
,
name
,
(
void
*
)
addr
);
kfree
(
fake
);
return
;
}
bus
->
ops
=
&
hp_pci_conf
;
// replace pci ops for this bus
fake
->
pci_dev
=
dev
;
fake
->
next
=
fake_pci_dev_list
;
fake_pci_dev_list
=
fake
;
memset
(
dev
,
0
,
sizeof
(
*
dev
));
dev
->
bus
=
bus
;
dev
->
sysdata
=
fake
;
dev
->
dev
.
parent
=
bus
->
dev
;
dev
->
dev
.
bus
=
&
pci_bus_type
;
dev
->
devfn
=
PCI_DEVFN
(
slot
,
0
);
pci_read_config_word
(
dev
,
PCI_VENDOR_ID
,
&
dev
->
vendor
);
pci_read_config_word
(
dev
,
PCI_DEVICE_ID
,
&
dev
->
device
);
pci_read_config_byte
(
dev
,
PCI_HEADER_TYPE
,
&
hdr
);
dev
->
hdr_type
=
hdr
&
0x7f
;
pci_setup_device
(
dev
);
// pci_insert_device() without running /sbin/hotplug
list_add_tail
(
&
dev
->
bus_list
,
&
bus
->
devices
);
list_add_tail
(
&
dev
->
global_list
,
&
pci_devices
);
strcpy
(
dev
->
dev
.
bus_id
,
dev
->
slot_name
);
ret
=
device_register
(
&
dev
->
dev
);
if
(
ret
<
0
)
printk
(
KERN_INFO
PFX
"fake device registration failed (%d)
\n
"
,
ret
);
printk
(
KERN_INFO
PFX
"%s at 0x%lx; pci dev %s
\n
"
,
name
,
addr
,
dev
->
slot_name
);
hpzx1_devices
++
;
}
struct
acpi_hp_vendor_long
{
u8
guid_id
;
u8
guid
[
16
];
u8
csr_base
[
8
];
u8
csr_length
[
8
];
};
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
extern
acpi_status
acpi_get_crs
(
acpi_handle
,
struct
acpi_buffer
*
);
extern
struct
acpi_resource
*
acpi_get_crs_next
(
struct
acpi_buffer
*
,
int
*
);
extern
union
acpi_resource_data
*
acpi_get_crs_type
(
struct
acpi_buffer
*
,
int
*
,
int
);
extern
void
acpi_dispose_crs
(
struct
acpi_buffer
*
);
static
acpi_status
hp_csr_space
(
acpi_handle
obj
,
u64
*
csr_base
,
u64
*
csr_length
)
{
int
i
,
offset
=
0
;
acpi_status
status
;
struct
acpi_buffer
buf
;
struct
acpi_resource_vendor
*
res
;
struct
acpi_hp_vendor_long
*
hp_res
;
efi_guid_t
vendor_guid
;
*
csr_base
=
0
;
*
csr_length
=
0
;
status
=
acpi_get_crs
(
obj
,
&
buf
);
if
(
ACPI_FAILURE
(
status
))
{
printk
(
KERN_ERR
PFX
"Unable to get _CRS data on object
\n
"
);
return
status
;
}
res
=
(
struct
acpi_resource_vendor
*
)
acpi_get_crs_type
(
&
buf
,
&
offset
,
ACPI_RSTYPE_VENDOR
);
if
(
!
res
)
{
printk
(
KERN_ERR
PFX
"Failed to find config space for device
\n
"
);
acpi_dispose_crs
(
&
buf
);
return
AE_NOT_FOUND
;
}
hp_res
=
(
struct
acpi_hp_vendor_long
*
)(
res
->
reserved
);
if
(
res
->
length
!=
HP_CCSR_LENGTH
||
hp_res
->
guid_id
!=
HP_CCSR_TYPE
)
{
printk
(
KERN_ERR
PFX
"Unknown Vendor data
\n
"
);
acpi_dispose_crs
(
&
buf
);
return
AE_TYPE
;
/* Revisit error? */
}
memcpy
(
&
vendor_guid
,
hp_res
->
guid
,
sizeof
(
efi_guid_t
));
if
(
efi_guidcmp
(
vendor_guid
,
HP_CCSR_GUID
)
!=
0
)
{
printk
(
KERN_ERR
PFX
"Vendor GUID does not match
\n
"
);
acpi_dispose_crs
(
&
buf
);
return
AE_TYPE
;
/* Revisit error? */
}
for
(
i
=
0
;
i
<
8
;
i
++
)
{
*
csr_base
|=
((
u64
)(
hp_res
->
csr_base
[
i
])
<<
(
i
*
8
));
*
csr_length
|=
((
u64
)(
hp_res
->
csr_length
[
i
])
<<
(
i
*
8
));
}
acpi_dispose_crs
(
&
buf
);
return
AE_OK
;
}
static
acpi_status
hpzx1_sba_probe
(
acpi_handle
obj
,
u32
depth
,
void
*
context
,
void
**
ret
)
{
u64
csr_base
=
0
,
csr_length
=
0
;
acpi_status
status
;
char
*
name
=
context
;
char
fullname
[
16
];
status
=
hp_csr_space
(
obj
,
&
csr_base
,
&
csr_length
);
if
(
ACPI_FAILURE
(
status
))
return
status
;
/*
* Only SBA shows up in ACPI namespace, so its CSR space
* includes both SBA and IOC. Make SBA and IOC show up
* separately in PCI space.
*/
sprintf
(
fullname
,
"%s SBA"
,
name
);
hpzx1_fake_pci_dev
(
fullname
,
0
,
csr_base
,
0x1000
);
sprintf
(
fullname
,
"%s IOC"
,
name
);
hpzx1_fake_pci_dev
(
fullname
,
0
,
csr_base
+
0x1000
,
0x1000
);
return
AE_OK
;
}
static
acpi_status
hpzx1_lba_probe
(
acpi_handle
obj
,
u32
depth
,
void
*
context
,
void
**
ret
)
{
u64
csr_base
=
0
,
csr_length
=
0
;
acpi_status
status
;
acpi_native_uint
busnum
;
char
*
name
=
context
;
char
fullname
[
32
];
status
=
hp_csr_space
(
obj
,
&
csr_base
,
&
csr_length
);
if
(
ACPI_FAILURE
(
status
))
return
status
;
status
=
acpi_evaluate_integer
(
obj
,
METHOD_NAME__BBN
,
NULL
,
&
busnum
);
if
(
ACPI_FAILURE
(
status
))
{
printk
(
KERN_WARNING
PFX
"evaluate _BBN fail=0x%x
\n
"
,
status
);
busnum
=
0
;
// no _BBN; stick it on bus 0
}
sprintf
(
fullname
,
"%s _BBN 0x%02x"
,
name
,
(
unsigned
int
)
busnum
);
hpzx1_fake_pci_dev
(
fullname
,
busnum
,
csr_base
,
csr_length
);
return
AE_OK
;
}
static
void
hpzx1_acpi_dev_init
(
void
)
{
extern
struct
pci_ops
*
pci_root_ops
;
orig_pci_ops
=
pci_root_ops
;
/*
* Make fake PCI devices for the following hardware in the
* ACPI namespace. This makes it more convenient for drivers
* because they can claim these devices based on PCI
* information, rather than needing to know about ACPI. The
* 64-bit "HPA" space for this hardware is available as BAR
* 0/1.
*
* HWP0001: Single IOC SBA w/o IOC in namespace
* HWP0002: LBA device
* HWP0003: AGP LBA device
*/
acpi_get_devices
(
"HWP0001"
,
hpzx1_sba_probe
,
"HWP0001"
,
NULL
);
acpi_get_devices
(
"HWP0002"
,
hpzx1_lba_probe
,
"HWP0002 PCI LBA"
,
NULL
);
acpi_get_devices
(
"HWP0003"
,
hpzx1_lba_probe
,
"HWP0003 AGP LBA"
,
NULL
);
}
extern
void
sba_init
(
void
);
static
int
hpzx1_init
(
void
)
{
/* zx1 has a hardware I/O TLB which lets us DMA from any device to any address */
MAX_DMA_ADDRESS
=
~
0UL
;
hpzx1_acpi_dev_init
();
sba_init
();
return
0
;
}
subsys_initcall
(
hpzx1_init
);
arch/ia64/ia32/ia32_entry.S
View file @
20f6d716
...
@@ -273,9 +273,9 @@ ia32_syscall_table:
...
@@ -273,9 +273,9 @@ ia32_syscall_table:
data8
sys32_sigsuspend
data8
sys32_sigsuspend
data8
compat_sys_sigpending
data8
compat_sys_sigpending
data8
sys_sethostname
data8
sys_sethostname
data8
sys32
_setrlimit
/*
75
*/
data8
compat_sys
_setrlimit
/*
75
*/
data8
sys32
_old_getrlimit
data8
compat_sys
_old_getrlimit
data8
sys32
_getrusage
data8
compat_sys
_getrusage
data8
sys32_gettimeofday
data8
sys32_gettimeofday
data8
sys32_settimeofday
data8
sys32_settimeofday
data8
sys32_getgroups16
/*
80
*/
data8
sys32_getgroups16
/*
80
*/
...
@@ -312,7 +312,7 @@ ia32_syscall_table:
...
@@ -312,7 +312,7 @@ ia32_syscall_table:
data8
sys_vhangup
data8
sys_vhangup
data8
sys32_ni_syscall
/*
used
to
be
sys_idle
*/
data8
sys32_ni_syscall
/*
used
to
be
sys_idle
*/
data8
sys32_ni_syscall
data8
sys32_ni_syscall
data8
sys32
_wait4
data8
compat_sys
_wait4
data8
sys_swapoff
/*
115
*/
data8
sys_swapoff
/*
115
*/
data8
sys32_sysinfo
data8
sys32_sysinfo
data8
sys32_ipc
data8
sys32_ipc
...
@@ -389,7 +389,7 @@ ia32_syscall_table:
...
@@ -389,7 +389,7 @@ ia32_syscall_table:
data8
sys32_ni_syscall
/*
streams1
*/
data8
sys32_ni_syscall
/*
streams1
*/
data8
sys32_ni_syscall
/*
streams2
*/
data8
sys32_ni_syscall
/*
streams2
*/
data8
sys32_vfork
/*
190
*/
data8
sys32_vfork
/*
190
*/
data8
sys32
_getrlimit
data8
compat_sys
_getrlimit
data8
sys32_mmap2
data8
sys32_mmap2
data8
sys32_truncate64
data8
sys32_truncate64
data8
sys32_ftruncate64
data8
sys32_ftruncate64
...
...
arch/ia64/ia32/ia32_traps.c
View file @
20f6d716
...
@@ -103,7 +103,7 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
...
@@ -103,7 +103,7 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
* C1 reg you need in case of a stack fault, 0x040 is the stack
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't syncronizing its FPU usage
* then we have a bad program that isn't sync
h
ronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
* fully reproduce the context of the exception
*/
*/
...
...
arch/ia64/ia32/sys_ia32.c
View file @
20f6d716
...
@@ -243,8 +243,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
...
@@ -243,8 +243,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
return
-
ENOMEM
;
return
-
ENOMEM
;
if
(
old_prot
)
if
(
old_prot
)
if
(
copy_from_user
(
page
,
(
void
*
)
PAGE_START
(
start
),
PAGE_SIZE
))
copy_from_user
(
page
,
(
void
*
)
PAGE_START
(
start
),
PAGE_SIZE
);
return
-
EFAULT
;
down_write
(
&
current
->
mm
->
mmap_sem
);
down_write
(
&
current
->
mm
->
mmap_sem
);
{
{
...
@@ -1005,77 +1004,6 @@ sys32_writev (int fd, struct compat_iovec *vector, u32 count)
...
@@ -1005,77 +1004,6 @@ sys32_writev (int fd, struct compat_iovec *vector, u32 count)
return
ret
;
return
ret
;
}
}
#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct
rlimit32
{
int
rlim_cur
;
int
rlim_max
;
};
extern
asmlinkage
long
sys_getrlimit
(
unsigned
int
resource
,
struct
rlimit
*
rlim
);
asmlinkage
long
sys32_old_getrlimit
(
unsigned
int
resource
,
struct
rlimit32
*
rlim
)
{
mm_segment_t
old_fs
=
get_fs
();
struct
rlimit
r
;
int
ret
;
set_fs
(
KERNEL_DS
);
ret
=
sys_getrlimit
(
resource
,
&
r
);
set_fs
(
old_fs
);
if
(
!
ret
)
{
ret
=
put_user
(
RESOURCE32
(
r
.
rlim_cur
),
&
rlim
->
rlim_cur
);
ret
|=
put_user
(
RESOURCE32
(
r
.
rlim_max
),
&
rlim
->
rlim_max
);
}
return
ret
;
}
asmlinkage
long
sys32_getrlimit
(
unsigned
int
resource
,
struct
rlimit32
*
rlim
)
{
mm_segment_t
old_fs
=
get_fs
();
struct
rlimit
r
;
int
ret
;
set_fs
(
KERNEL_DS
);
ret
=
sys_getrlimit
(
resource
,
&
r
);
set_fs
(
old_fs
);
if
(
!
ret
)
{
if
(
r
.
rlim_cur
>=
0xffffffff
)
r
.
rlim_cur
=
0xffffffff
;
if
(
r
.
rlim_max
>=
0xffffffff
)
r
.
rlim_max
=
0xffffffff
;
ret
=
put_user
(
r
.
rlim_cur
,
&
rlim
->
rlim_cur
);
ret
|=
put_user
(
r
.
rlim_max
,
&
rlim
->
rlim_max
);
}
return
ret
;
}
extern
asmlinkage
long
sys_setrlimit
(
unsigned
int
resource
,
struct
rlimit
*
rlim
);
asmlinkage
long
sys32_setrlimit
(
unsigned
int
resource
,
struct
rlimit32
*
rlim
)
{
struct
rlimit
r
;
int
ret
;
mm_segment_t
old_fs
=
get_fs
();
if
(
resource
>=
RLIM_NLIMITS
)
return
-
EINVAL
;
if
(
get_user
(
r
.
rlim_cur
,
&
rlim
->
rlim_cur
)
||
get_user
(
r
.
rlim_max
,
&
rlim
->
rlim_max
))
return
-
EFAULT
;
if
(
r
.
rlim_cur
==
RLIM_INFINITY32
)
r
.
rlim_cur
=
RLIM_INFINITY
;
if
(
r
.
rlim_max
==
RLIM_INFINITY32
)
r
.
rlim_max
=
RLIM_INFINITY
;
set_fs
(
KERNEL_DS
);
ret
=
sys_setrlimit
(
resource
,
&
r
);
set_fs
(
old_fs
);
return
ret
;
}
/*
/*
* sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
* sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
*
*
...
@@ -1648,19 +1576,35 @@ shmctl32 (int first, int second, void *uptr)
...
@@ -1648,19 +1576,35 @@ shmctl32 (int first, int second, void *uptr)
return
err
;
return
err
;
}
}
extern
int
sem_ctls
[];
#define sc_semopm (sem_ctls[2])
static
long
static
long
semtimedop32
(
int
semid
,
struct
sembuf
*
ts
ems
,
int
nsem
s
,
semtimedop32
(
int
semid
,
struct
sembuf
*
ts
ops
,
int
nsop
s
,
const
struct
compat_timespec
*
timeout32
)
struct
compat_timespec
*
timeout32
)
{
{
struct
timespec
t
;
struct
timespec
t
;
if
(
get_user
(
t
.
tv_sec
,
&
timeout32
->
tv_sec
)
||
mm_segment_t
oldfs
;
get_user
(
t
.
tv_nsec
,
&
timeout32
->
tv_nsec
))
long
ret
;
/* parameter checking precedence should mirror sys_semtimedop() */
if
(
nsops
<
1
||
semid
<
0
)
return
-
EINVAL
;
if
(
nsops
>
sc_semopm
)
return
-
E2BIG
;
if
(
!
access_ok
(
VERIFY_READ
,
tsops
,
nsops
*
sizeof
(
struct
sembuf
))
||
get_compat_timespec
(
&
t
,
timeout32
))
return
-
EFAULT
;
return
-
EFAULT
;
return
sys_semtimedop
(
semid
,
tsems
,
nsems
,
&
t
);
oldfs
=
get_fs
();
set_fs
(
KERNEL_DS
);
ret
=
sys_semtimedop
(
semid
,
tsops
,
nsops
,
&
t
);
set_fs
(
oldfs
);
return
ret
;
}
}
asmlinkage
long
asmlinkage
long
sys32_ipc
(
u32
call
,
int
first
,
int
second
,
int
third
,
u32
ptr
,
u32
fifth
)
sys32_ipc
(
u32
call
,
int
first
,
int
second
,
int
third
,
u32
ptr
,
u32
fifth
)
{
{
int
version
;
int
version
;
...
@@ -1668,12 +1612,15 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
...
@@ -1668,12 +1612,15 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
call
&=
0xffff
;
call
&=
0xffff
;
switch
(
call
)
{
switch
(
call
)
{
case
SEMTIMEDOP
:
if
(
fifth
)
return
semtimedop32
(
first
,
(
struct
sembuf
*
)
AA
(
ptr
),
second
,
(
struct
compat_timespec
*
)
AA
(
fifth
));
/* else fall through for normal semop() */
case
SEMOP
:
case
SEMOP
:
/* struct sembuf is the same on 32 and 64bit :)) */
/* struct sembuf is the same on 32 and 64bit :)) */
return
sys_semtimedop
(
first
,
(
struct
sembuf
*
)
AA
(
ptr
),
second
,
NULL
);
return
sys_semtimedop
(
first
,
(
struct
sembuf
*
)
AA
(
ptr
),
second
,
case
SEMTIMEDOP
:
NULL
);
return
semtimedop32
(
first
,
(
struct
sembuf
*
)
AA
(
ptr
),
second
,
(
const
struct
compat_timespec
*
)
AA
(
fifth
));
case
SEMGET
:
case
SEMGET
:
return
sys_semget
(
first
,
second
,
third
);
return
sys_semget
(
first
,
second
,
third
);
case
SEMCTL
:
case
SEMCTL
:
...
@@ -1724,98 +1671,10 @@ sys32_time (int *tloc)
...
@@ -1724,98 +1671,10 @@ sys32_time (int *tloc)
return
i
;
return
i
;
}
}
struct
rusage32
{
struct
compat_timeval
ru_utime
;
struct
compat_timeval
ru_stime
;
int
ru_maxrss
;
int
ru_ixrss
;
int
ru_idrss
;
int
ru_isrss
;
int
ru_minflt
;
int
ru_majflt
;
int
ru_nswap
;
int
ru_inblock
;
int
ru_oublock
;
int
ru_msgsnd
;
int
ru_msgrcv
;
int
ru_nsignals
;
int
ru_nvcsw
;
int
ru_nivcsw
;
};
static
int
put_rusage
(
struct
rusage32
*
ru
,
struct
rusage
*
r
)
{
int
err
;
if
(
!
access_ok
(
VERIFY_WRITE
,
ru
,
sizeof
(
*
ru
)))
return
-
EFAULT
;
err
=
__put_user
(
r
->
ru_utime
.
tv_sec
,
&
ru
->
ru_utime
.
tv_sec
);
err
|=
__put_user
(
r
->
ru_utime
.
tv_usec
,
&
ru
->
ru_utime
.
tv_usec
);
err
|=
__put_user
(
r
->
ru_stime
.
tv_sec
,
&
ru
->
ru_stime
.
tv_sec
);
err
|=
__put_user
(
r
->
ru_stime
.
tv_usec
,
&
ru
->
ru_stime
.
tv_usec
);
err
|=
__put_user
(
r
->
ru_maxrss
,
&
ru
->
ru_maxrss
);
err
|=
__put_user
(
r
->
ru_ixrss
,
&
ru
->
ru_ixrss
);
err
|=
__put_user
(
r
->
ru_idrss
,
&
ru
->
ru_idrss
);
err
|=
__put_user
(
r
->
ru_isrss
,
&
ru
->
ru_isrss
);
err
|=
__put_user
(
r
->
ru_minflt
,
&
ru
->
ru_minflt
);
err
|=
__put_user
(
r
->
ru_majflt
,
&
ru
->
ru_majflt
);
err
|=
__put_user
(
r
->
ru_nswap
,
&
ru
->
ru_nswap
);
err
|=
__put_user
(
r
->
ru_inblock
,
&
ru
->
ru_inblock
);
err
|=
__put_user
(
r
->
ru_oublock
,
&
ru
->
ru_oublock
);
err
|=
__put_user
(
r
->
ru_msgsnd
,
&
ru
->
ru_msgsnd
);
err
|=
__put_user
(
r
->
ru_msgrcv
,
&
ru
->
ru_msgrcv
);
err
|=
__put_user
(
r
->
ru_nsignals
,
&
ru
->
ru_nsignals
);
err
|=
__put_user
(
r
->
ru_nvcsw
,
&
ru
->
ru_nvcsw
);
err
|=
__put_user
(
r
->
ru_nivcsw
,
&
ru
->
ru_nivcsw
);
return
err
;
}
asmlinkage
long
sys32_wait4
(
int
pid
,
unsigned
int
*
stat_addr
,
int
options
,
struct
rusage32
*
ru
)
{
if
(
!
ru
)
return
sys_wait4
(
pid
,
stat_addr
,
options
,
NULL
);
else
{
struct
rusage
r
;
int
ret
;
unsigned
int
status
;
mm_segment_t
old_fs
=
get_fs
();
set_fs
(
KERNEL_DS
);
ret
=
sys_wait4
(
pid
,
stat_addr
?
&
status
:
NULL
,
options
,
&
r
);
set_fs
(
old_fs
);
if
(
put_rusage
(
ru
,
&
r
))
return
-
EFAULT
;
if
(
stat_addr
&&
put_user
(
status
,
stat_addr
))
return
-
EFAULT
;
return
ret
;
}
}
asmlinkage
long
asmlinkage
long
sys32_waitpid
(
int
pid
,
unsigned
int
*
stat_addr
,
int
options
)
sys32_waitpid
(
int
pid
,
unsigned
int
*
stat_addr
,
int
options
)
{
{
return
sys32_wait4
(
pid
,
stat_addr
,
options
,
NULL
);
return
compat_sys_wait4
(
pid
,
stat_addr
,
options
,
NULL
);
}
extern
asmlinkage
long
sys_getrusage
(
int
who
,
struct
rusage
*
ru
);
asmlinkage
long
sys32_getrusage
(
int
who
,
struct
rusage32
*
ru
)
{
struct
rusage
r
;
int
ret
;
mm_segment_t
old_fs
=
get_fs
();
set_fs
(
KERNEL_DS
);
ret
=
sys_getrusage
(
who
,
&
r
);
set_fs
(
old_fs
);
if
(
put_rusage
(
ru
,
&
r
))
return
-
EFAULT
;
return
ret
;
}
}
static
unsigned
int
static
unsigned
int
...
...
arch/ia64/kernel/Makefile
View file @
20f6d716
...
@@ -11,6 +11,8 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o ir
...
@@ -11,6 +11,8 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o ir
obj-$(CONFIG_EFI_VARS)
+=
efivars.o
obj-$(CONFIG_EFI_VARS)
+=
efivars.o
obj-$(CONFIG_FSYS)
+=
fsys.o
obj-$(CONFIG_FSYS)
+=
fsys.o
obj-$(CONFIG_IA64_BRL_EMU)
+=
brl_emu.o
obj-$(CONFIG_IA64_BRL_EMU)
+=
brl_emu.o
obj-$(CONFIG_IA64_GENERIC)
+=
acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1)
+=
acpi-ext.o
obj-$(CONFIG_IA64_MCA)
+=
mca.o mca_asm.o
obj-$(CONFIG_IA64_MCA)
+=
mca.o mca_asm.o
obj-$(CONFIG_IA64_PALINFO)
+=
palinfo.o
obj-$(CONFIG_IA64_PALINFO)
+=
palinfo.o
obj-$(CONFIG_IOSAPIC)
+=
iosapic.o
obj-$(CONFIG_IOSAPIC)
+=
iosapic.o
...
...
arch/ia64/kernel/acpi-ext.c
View file @
20f6d716
...
@@ -3,69 +3,99 @@
...
@@ -3,69 +3,99 @@
*
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
* Copyright (C) Alex Williamson
* Copyright (C) Bjorn Helgaas
*
*
* Vendor specific extensions to ACPI. These are used by both
* Vendor specific extensions to ACPI.
* HP and NEC.
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/efi.h>
#include <asm/acpi-ext.h>
#include <asm/acpi-ext.h>
/*
struct
acpi_vendor_descriptor
{
* Note: Strictly speaking, this is only needed for HP and NEC machines.
u8
guid_id
;
* However, NEC machines identify themselves as DIG-compliant, so there is
efi_guid_t
guid
;
* no easy way to #ifdef this out.
};
*/
struct
acpi_vendor_info
{
struct
acpi_vendor_descriptor
*
descriptor
;
u8
*
data
;
u32
length
;
};
acpi_status
acpi_status
hp_acpi_csr_space
(
acpi_handle
obj
,
u64
*
csr_base
,
u64
*
csr_length
)
acpi_vendor_resource_match
(
struct
acpi_resource
*
resource
,
void
*
context
)
{
{
int
i
,
offset
=
0
;
struct
acpi_vendor_info
*
info
=
(
struct
acpi_vendor_info
*
)
context
;
acpi_status
status
;
struct
acpi_resource_vendor
*
vendor
;
struct
acpi_buffer
buf
;
struct
acpi_vendor_descriptor
*
descriptor
;
struct
acpi_resource_vendor
*
res
;
u32
length
;
struct
acpi_hp_vendor_long
*
hp_res
;
efi_guid_t
vendor_guid
;
if
(
resource
->
id
!=
ACPI_RSTYPE_VENDOR
)
return
AE_OK
;
*
csr_base
=
0
;
*
csr_length
=
0
;
vendor
=
(
struct
acpi_resource_vendor
*
)
&
resource
->
data
;
descriptor
=
(
struct
acpi_vendor_descriptor
*
)
vendor
->
reserved
;
status
=
acpi_get_crs
(
obj
,
&
buf
);
if
(
vendor
->
length
<=
sizeof
(
*
info
->
descriptor
)
||
if
(
ACPI_FAILURE
(
status
))
{
descriptor
->
guid_id
!=
info
->
descriptor
->
guid_id
||
printk
(
KERN_ERR
PREFIX
"Unable to get _CRS data on object
\n
"
);
efi_guidcmp
(
descriptor
->
guid
,
info
->
descriptor
->
guid
))
return
status
;
return
AE_OK
;
}
length
=
vendor
->
length
-
sizeof
(
struct
acpi_vendor_descriptor
);
res
=
(
struct
acpi_resource_vendor
*
)
acpi_get_crs_type
(
&
buf
,
&
offset
,
ACPI_RSTYPE_VENDOR
);
info
->
data
=
acpi_os_allocate
(
length
);
if
(
!
res
)
{
if
(
!
info
->
data
)
printk
(
KERN_ERR
PREFIX
"Failed to find config space for device
\n
"
);
return
AE_NO_MEMORY
;
acpi_dispose_crs
(
&
buf
);
memcpy
(
info
->
data
,
vendor
->
reserved
+
sizeof
(
struct
acpi_vendor_descriptor
),
length
);
info
->
length
=
length
;
return
AE_CTRL_TERMINATE
;
}
acpi_status
acpi_find_vendor_resource
(
acpi_handle
obj
,
struct
acpi_vendor_descriptor
*
id
,
u8
**
data
,
u32
*
length
)
{
struct
acpi_vendor_info
info
;
info
.
descriptor
=
id
;
info
.
data
=
0
;
acpi_walk_resources
(
obj
,
METHOD_NAME__CRS
,
acpi_vendor_resource_match
,
&
info
);
if
(
!
info
.
data
)
return
AE_NOT_FOUND
;
return
AE_NOT_FOUND
;
}
hp_res
=
(
struct
acpi_hp_vendor_long
*
)(
res
->
reserved
);
*
data
=
info
.
data
;
*
length
=
info
.
length
;
return
AE_OK
;
}
if
(
res
->
length
!=
HP_CCSR_LENGTH
||
hp_res
->
guid_id
!=
HP_CCSR_TYPE
)
{
struct
acpi_vendor_descriptor
hp_ccsr_descriptor
=
{
printk
(
KERN_ERR
PREFIX
"Unknown Vendor data
\n
"
);
.
guid_id
=
2
,
acpi_dispose_crs
(
&
buf
);
.
guid
=
EFI_GUID
(
0x69e9adf9
,
0x924f
,
0xab5f
,
0xf6
,
0x4a
,
0x24
,
0xd2
,
0x01
,
0x37
,
0x0e
,
0xad
)
return
AE_TYPE
;
/* Revisit error? */
};
}
memcpy
(
&
vendor_guid
,
hp_res
->
guid
,
sizeof
(
efi_guid_t
));
acpi_status
if
(
efi_guidcmp
(
vendor_guid
,
HP_CCSR_GUID
)
!=
0
)
{
hp_acpi_csr_space
(
acpi_handle
obj
,
u64
*
csr_base
,
u64
*
csr_length
)
printk
(
KERN_ERR
PREFIX
"Vendor GUID does not match
\n
"
);
{
acpi_dispose_crs
(
&
buf
);
acpi_status
status
;
return
AE_TYPE
;
/* Revisit error? */
u8
*
data
;
}
u32
length
;
int
i
;
status
=
acpi_find_vendor_resource
(
obj
,
&
hp_ccsr_descriptor
,
&
data
,
&
length
);
for
(
i
=
0
;
i
<
8
;
i
++
)
{
if
(
ACPI_FAILURE
(
status
)
||
length
!=
16
)
*
csr_base
|=
((
u64
)(
hp_res
->
csr_base
[
i
])
<<
(
i
*
8
));
return
AE_NOT_FOUND
;
*
csr_length
|=
((
u64
)(
hp_res
->
csr_length
[
i
])
<<
(
i
*
8
));
}
memcpy
(
csr_base
,
data
,
sizeof
(
*
csr_base
));
memcpy
(
csr_length
,
data
+
8
,
sizeof
(
*
csr_length
));
acpi_os_free
(
data
);
acpi_dispose_crs
(
&
buf
);
return
AE_OK
;
return
AE_OK
;
}
}
EXPORT_SYMBOL
(
hp_acpi_csr_space
);
arch/ia64/kernel/acpi.c
View file @
20f6d716
...
@@ -115,134 +115,6 @@ acpi_get_sysname (void)
...
@@ -115,134 +115,6 @@ acpi_get_sysname (void)
#endif
#endif
}
}
#ifdef CONFIG_ACPI
/**
* acpi_get_crs - Return the current resource settings for a device
* obj: A handle for this device
* buf: A buffer to be populated by this call.
*
* Pass a valid handle, typically obtained by walking the namespace and a
* pointer to an allocated buffer, and this function will fill in the buffer
* with a list of acpi_resource structures.
*/
acpi_status
acpi_get_crs
(
acpi_handle
obj
,
struct
acpi_buffer
*
buf
)
{
acpi_status
result
;
buf
->
length
=
0
;
buf
->
pointer
=
NULL
;
result
=
acpi_get_current_resources
(
obj
,
buf
);
if
(
result
!=
AE_BUFFER_OVERFLOW
)
return
result
;
buf
->
pointer
=
kmalloc
(
buf
->
length
,
GFP_KERNEL
);
if
(
!
buf
->
pointer
)
return
-
ENOMEM
;
return
acpi_get_current_resources
(
obj
,
buf
);
}
struct
acpi_resource
*
acpi_get_crs_next
(
struct
acpi_buffer
*
buf
,
int
*
offset
)
{
struct
acpi_resource
*
res
;
if
(
*
offset
>=
buf
->
length
)
return
NULL
;
res
=
buf
->
pointer
+
*
offset
;
*
offset
+=
res
->
length
;
return
res
;
}
union
acpi_resource_data
*
acpi_get_crs_type
(
struct
acpi_buffer
*
buf
,
int
*
offset
,
int
type
)
{
for
(;;)
{
struct
acpi_resource
*
res
=
acpi_get_crs_next
(
buf
,
offset
);
if
(
!
res
)
return
NULL
;
if
(
res
->
id
==
type
)
return
&
res
->
data
;
}
}
void
acpi_dispose_crs
(
struct
acpi_buffer
*
buf
)
{
kfree
(
buf
->
pointer
);
}
void
acpi_get_crs_addr
(
struct
acpi_buffer
*
buf
,
int
type
,
u64
*
base
,
u64
*
size
,
u64
*
tra
)
{
int
offset
=
0
;
struct
acpi_resource_address16
*
addr16
;
struct
acpi_resource_address32
*
addr32
;
struct
acpi_resource_address64
*
addr64
;
for
(;;)
{
struct
acpi_resource
*
res
=
acpi_get_crs_next
(
buf
,
&
offset
);
if
(
!
res
)
return
;
switch
(
res
->
id
)
{
case
ACPI_RSTYPE_ADDRESS16
:
addr16
=
(
struct
acpi_resource_address16
*
)
&
res
->
data
;
if
(
type
==
addr16
->
resource_type
)
{
*
base
=
addr16
->
min_address_range
;
*
size
=
addr16
->
address_length
;
*
tra
=
addr16
->
address_translation_offset
;
return
;
}
break
;
case
ACPI_RSTYPE_ADDRESS32
:
addr32
=
(
struct
acpi_resource_address32
*
)
&
res
->
data
;
if
(
type
==
addr32
->
resource_type
)
{
*
base
=
addr32
->
min_address_range
;
*
size
=
addr32
->
address_length
;
*
tra
=
addr32
->
address_translation_offset
;
return
;
}
break
;
case
ACPI_RSTYPE_ADDRESS64
:
addr64
=
(
struct
acpi_resource_address64
*
)
&
res
->
data
;
if
(
type
==
addr64
->
resource_type
)
{
*
base
=
addr64
->
min_address_range
;
*
size
=
addr64
->
address_length
;
*
tra
=
addr64
->
address_translation_offset
;
return
;
}
break
;
}
}
}
int
acpi_get_addr_space
(
void
*
obj
,
u8
type
,
u64
*
base
,
u64
*
length
,
u64
*
tra
)
{
acpi_status
status
;
struct
acpi_buffer
buf
;
*
base
=
0
;
*
length
=
0
;
*
tra
=
0
;
status
=
acpi_get_crs
((
acpi_handle
)
obj
,
&
buf
);
if
(
ACPI_FAILURE
(
status
))
{
printk
(
KERN_ERR
PREFIX
"Unable to get _CRS data on object
\n
"
);
return
status
;
}
acpi_get_crs_addr
(
&
buf
,
type
,
base
,
length
,
tra
);
acpi_dispose_crs
(
&
buf
);
return
AE_OK
;
}
#endif
/* CONFIG_ACPI */
#ifdef CONFIG_ACPI_BOOT
#ifdef CONFIG_ACPI_BOOT
#define ACPI_MAX_PLATFORM_INTERRUPTS 256
#define ACPI_MAX_PLATFORM_INTERRUPTS 256
...
@@ -918,8 +790,7 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
...
@@ -918,8 +790,7 @@ acpi_register_irq (u32 gsi, u32 polarity, u32 trigger)
return
0
;
return
0
;
/* Turn it on */
/* Turn it on */
vector
=
iosapic_register_intr
(
gsi
,
polarity
?
IOSAPIC_POL_HIGH
:
IOSAPIC_POL_LOW
,
vector
=
iosapic_register_intr
(
gsi
,
polarity
,
trigger
);
trigger
?
IOSAPIC_EDGE
:
IOSAPIC_LEVEL
);
return
vector
;
return
vector
;
}
}
...
...
arch/ia64/kernel/brl_emu.c
View file @
20f6d716
...
@@ -59,7 +59,7 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
...
@@ -59,7 +59,7 @@ ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
unsigned
long
next_ip
;
unsigned
long
next_ip
;
struct
siginfo
siginfo
;
struct
siginfo
siginfo
;
struct
illegal_op_return
rv
;
struct
illegal_op_return
rv
;
int
tmp_taken
,
unimplemented_address
;
long
tmp_taken
,
unimplemented_address
;
rv
.
fkt
=
(
unsigned
long
)
-
1
;
rv
.
fkt
=
(
unsigned
long
)
-
1
;
...
...
arch/ia64/kernel/head.S
View file @
20f6d716
...
@@ -733,3 +733,82 @@ SET_REG(b4);
...
@@ -733,3 +733,82 @@ SET_REG(b4);
SET_REG
(
b5
)
;
SET_REG
(
b5
)
;
#endif /* CONFIG_IA64_BRL_EMU */
#endif /* CONFIG_IA64_BRL_EMU */
#ifdef CONFIG_SMP
/
*
*
This
routine
handles
spinlock
contention
.
It
uses
a
non
-
standard
calling
*
convention
to
avoid
converting
leaf
routines
into
interior
routines
.
Because
*
of
this
special
convention
,
there
are
several
restrictions
:
*
*
-
do
not
use
gp
relative
variables
,
this
code
is
called
from
the
kernel
*
and
from
modules
,
r1
is
undefined
.
*
-
do
not
use
stacked
registers
,
the
caller
owns
them
.
*
-
do
not
use
the
scratch
stack
space
,
the
caller
owns
it
.
*
-
do
not
use
any
registers
other
than
the
ones
listed
below
*
*
Inputs
:
*
ar
.
pfs
-
saved
CFM
of
caller
*
ar
.
ccv
-
0
(
and
available
for
use
)
*
r28
-
available
for
use
.
*
r29
-
available
for
use
.
*
r30
-
available
for
use
.
*
r31
-
address
of
lock
,
available
for
use
.
*
b7
-
return
address
*
p14
-
available
for
use
.
*
*
If
you
patch
this
code
to
use
more
registers
,
do
not
forget
to
update
*
the
clobber
lists
for
spin_lock
()
in
include
/
asm
-
ia64
/
spinlock
.
h
.
*/
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
GLOBAL_ENTRY
(
ia64_spinlock_contention_pre3_4
)
.
prologue
.
save
ar
.
pfs
,
r0
//
this
code
effectively
has
a
zero
frame
size
.
save
rp
,
r28
.
body
nop
0
nop
0
.
restore
sp
//
pop
existing
prologue
after
next
insn
mov
b6
=
r28
.
prologue
.
save
ar
.
pfs
,
r0
.
altrp
b6
.
body
.
wait
:
//
exponential
backoff
,
kdb
,
lockmeter
etc
.
go
in
here
hint
@
pause
ld4.bias
r30
=[
r31
]
nop
0
;;
cmp4.eq
p14
,
p0
=
r30
,
r0
(
p14
)
br.cond.sptk.few
b6
//
lock
is
now
free
,
try
to
acquire
br.cond.sptk.few
.
wait
END
(
ia64_spinlock_contention_pre3_4
)
#else
GLOBAL_ENTRY
(
ia64_spinlock_contention
)
.
prologue
.
altrp
b6
.
body
.
wait
:
//
exponential
backoff
,
kdb
,
lockmeter
etc
.
go
in
here
hint
@
pause
ld4.bias
r30
=[
r31
]
;;
cmp4.ne
p14
,
p0
=
r30
,
r0
mov
r30
=
1
(
p14
)
br.cond.sptk.few
.
wait
;;
cmpxchg4.acq
r30
=[
r31
],
r30
,
ar
.
ccv
;;
cmp4.ne
p14
,
p0
=
r0
,
r30
(
p14
)
br.cond.sptk.few
.
wait
br.ret.sptk.many
b6
//
lock
is
now
taken
END
(
ia64_spinlock_contention
)
#endif
#endif /* CONFIG_SMP */
arch/ia64/kernel/iosapic.c
View file @
20f6d716
...
@@ -581,9 +581,8 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
...
@@ -581,9 +581,8 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
polarity
,
trigger
);
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
polarity
,
trigger
);
DBG
(
"ISA: IRQ %u -> GSI 0x%x (%s,%s) -> CPU 0x%04x vector %d
\n
"
,
DBG
(
"ISA: IRQ %u -> GSI 0x%x (%s,%s) -> CPU 0x%04x vector %d
\n
"
,
isa_irq
,
gsi
,
isa_irq
,
gsi
,
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
,
polarity
==
IOSAPIC_POL_HIGH
?
"high"
:
"low"
,
trigger
==
IOSAPIC_EDGE
?
"edge"
:
"level"
,
trigger
==
IOSAPIC_EDGE
?
"edge"
:
"level"
,
dest
,
vector
);
dest
,
vector
);
/* program the IOSAPIC routing table */
/* program the IOSAPIC routing table */
set_rte
(
vector
,
dest
);
set_rte
(
vector
,
dest
);
...
@@ -635,7 +634,6 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
...
@@ -635,7 +634,6 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
(
ver
&
0xf0
)
>>
4
,
(
ver
&
0x0f
),
phys_addr
,
gsi_base
,
gsi_base
+
num_rte
-
1
);
(
ver
&
0xf0
)
>>
4
,
(
ver
&
0x0f
),
phys_addr
,
gsi_base
,
gsi_base
+
num_rte
-
1
);
if
((
gsi_base
==
0
)
&&
pcat_compat
)
{
if
((
gsi_base
==
0
)
&&
pcat_compat
)
{
/*
/*
* Map the legacy ISA devices into the IOSAPIC data. Some of these may
* Map the legacy ISA devices into the IOSAPIC data. Some of these may
* get reprogrammed later on with data from the ACPI Interrupt Source
* get reprogrammed later on with data from the ACPI Interrupt Source
...
@@ -646,20 +644,11 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
...
@@ -646,20 +644,11 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
}
}
}
}
static
void
__init
void
fixup_vector
(
int
vector
,
unsigned
int
gsi
,
const
char
*
pci_id
)
iosapic_enable_intr
(
unsigned
int
vector
)
{
{
struct
hw_interrupt_type
*
irq_type
=
&
irq_type_iosapic_level
;
irq_desc_t
*
idesc
;
unsigned
int
dest
;
unsigned
int
dest
;
idesc
=
irq_desc
(
vector
);
if
(
idesc
->
handler
!=
irq_type
)
{
if
(
idesc
->
handler
!=
&
no_irq_type
)
printk
(
KERN_INFO
"IOSAPIC: changing vector %d from %s to %s
\n
"
,
vector
,
idesc
->
handler
->
typename
,
irq_type
->
typename
);
idesc
->
handler
=
irq_type
;
}
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
/*
/*
* For platforms that do not support interrupt redirect via the XTP interface, we
* For platforms that do not support interrupt redirect via the XTP interface, we
...
@@ -687,8 +676,8 @@ fixup_vector (int vector, unsigned int gsi, const char *pci_id)
...
@@ -687,8 +676,8 @@ fixup_vector (int vector, unsigned int gsi, const char *pci_id)
#endif
#endif
set_rte
(
vector
,
dest
);
set_rte
(
vector
,
dest
);
printk
(
KERN_INFO
"IOSAPIC:
%s -> GSI 0x%x -> CPU 0x%04x vector %
d
\n
"
,
printk
(
KERN_INFO
"IOSAPIC:
vector %d -> CPU 0x%04x, enable
d
\n
"
,
pci_id
,
gsi
,
dest
,
vector
);
vector
,
dest
);
}
}
void
__init
void
__init
...
@@ -699,6 +688,8 @@ iosapic_parse_prt (void)
...
@@ -699,6 +688,8 @@ iosapic_parse_prt (void)
unsigned
int
gsi
;
unsigned
int
gsi
;
int
vector
;
int
vector
;
char
pci_id
[
16
];
char
pci_id
[
16
];
struct
hw_interrupt_type
*
irq_type
=
&
irq_type_iosapic_level
;
irq_desc_t
*
idesc
;
list_for_each
(
node
,
&
acpi_prt
.
entries
)
{
list_for_each
(
node
,
&
acpi_prt
.
entries
)
{
entry
=
list_entry
(
node
,
struct
acpi_prt_entry
,
node
);
entry
=
list_entry
(
node
,
struct
acpi_prt_entry
,
node
);
...
@@ -711,6 +702,9 @@ iosapic_parse_prt (void)
...
@@ -711,6 +702,9 @@ iosapic_parse_prt (void)
vector
=
gsi_to_vector
(
gsi
);
vector
=
gsi_to_vector
(
gsi
);
if
(
vector
<
0
)
{
if
(
vector
<
0
)
{
if
(
find_iosapic
(
gsi
)
<
0
)
continue
;
/* allocate a vector for this interrupt line */
/* allocate a vector for this interrupt line */
if
(
pcat_compat
&&
(
gsi
<
16
))
if
(
pcat_compat
&&
(
gsi
<
16
))
vector
=
isa_irq_to_vector
(
gsi
);
vector
=
isa_irq_to_vector
(
gsi
);
...
@@ -723,6 +717,13 @@ iosapic_parse_prt (void)
...
@@ -723,6 +717,13 @@ iosapic_parse_prt (void)
snprintf
(
pci_id
,
sizeof
(
pci_id
),
"%02x:%02x:%02x[%c]"
,
snprintf
(
pci_id
,
sizeof
(
pci_id
),
"%02x:%02x:%02x[%c]"
,
entry
->
id
.
segment
,
entry
->
id
.
bus
,
entry
->
id
.
device
,
'A'
+
entry
->
pin
);
entry
->
id
.
segment
,
entry
->
id
.
bus
,
entry
->
id
.
device
,
'A'
+
entry
->
pin
);
fixup_vector
(
vector
,
gsi
,
pci_id
);
/*
* If vector was previously initialized to a different
* handler, re-initialize.
*/
idesc
=
irq_desc
(
vector
);
if
(
idesc
->
handler
!=
irq_type
)
register_intr
(
gsi
,
vector
,
IOSAPIC_LOWEST_PRIORITY
,
IOSAPIC_POL_LOW
,
IOSAPIC_LEVEL
);
}
}
}
}
arch/ia64/kernel/irq.c
View file @
20f6d716
...
@@ -50,7 +50,7 @@
...
@@ -50,7 +50,7 @@
* Linux has a controller-independent x86 interrupt architecture.
* Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used
* every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible
* by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the apropriate
* interrupt source is transparently wired to the ap
p
ropriate
* controller. Thus drivers need not be aware of the
* controller. Thus drivers need not be aware of the
* interrupt-controller.
* interrupt-controller.
*
*
...
@@ -705,7 +705,7 @@ unsigned int probe_irq_mask(unsigned long val)
...
@@ -705,7 +705,7 @@ unsigned int probe_irq_mask(unsigned long val)
* The interrupt probe logic state is returned to its previous
* The interrupt probe logic state is returned to its previous
* value.
* value.
*
*
* BUGS: When used in a module (which arguably shouldnt happen)
* BUGS: When used in a module (which arguably shouldn
'
t happen)
* nothing prevents two IRQ probe callers from overlapping. The
* nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal.
* results of this are non-optimal.
*/
*/
...
...
arch/ia64/kernel/mca.c
View file @
20f6d716
...
@@ -3,6 +3,9 @@
...
@@ -3,6 +3,9 @@
* Purpose: Generic MCA handling layer
* Purpose: Generic MCA handling layer
*
*
* Updated for latest kernel
* Updated for latest kernel
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Copyright (C) 2002 Dell Computer Corporation
* Copyright (C) 2002 Dell Computer Corporation
* Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
* Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
*
*
...
@@ -18,6 +21,7 @@
...
@@ -18,6 +21,7 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander(vijay@engr.sgi.com)
* Copyright (C) Vijay Chander(vijay@engr.sgi.com)
*
*
* 03/04/15 D. Mosberger Added INIT backtrace support.
* 02/03/25 M. Domsch GUID cleanups
* 02/03/25 M. Domsch GUID cleanups
*
*
* 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
* 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
...
@@ -39,6 +43,7 @@
...
@@ -39,6 +43,7 @@
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
#include <linux/smp_lock.h>
#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/acpi.h>
...
@@ -47,6 +52,7 @@
...
@@ -47,6 +52,7 @@
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/smp.h>
#include <asm/delay.h>
#include <asm/machvec.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
...
@@ -139,7 +145,7 @@ ia64_mca_log_sal_error_record(int sal_info_type, int called_from_init)
...
@@ -139,7 +145,7 @@ ia64_mca_log_sal_error_record(int sal_info_type, int called_from_init)
/* Get the MCA error record */
/* Get the MCA error record */
if
(
!
ia64_log_get
(
sal_info_type
,
(
prfunc_t
)
printk
))
if
(
!
ia64_log_get
(
sal_info_type
,
(
prfunc_t
)
printk
))
return
platform_err
;
// no record retrieved
return
platform_err
;
/* no record retrieved */
/* TODO:
/* TODO:
* 1. analyze error logs to determine recoverability
* 1. analyze error logs to determine recoverability
...
@@ -176,18 +182,167 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
...
@@ -176,18 +182,167 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
ia64_mca_log_sal_error_record
(
SAL_INFO_TYPE_CPE
,
0
);
ia64_mca_log_sal_error_record
(
SAL_INFO_TYPE_CPE
,
0
);
}
}
/*
static
void
* This routine will be used to deal with platform specific handling
show_min_state
(
pal_min_state_area_t
*
minstate
)
* of the init, i.e. drop into the kernel debugger on server machine,
{
* or if the processor is part of some parallel machine without a
u64
iip
=
minstate
->
pmsa_iip
+
((
struct
ia64_psr
*
)(
&
minstate
->
pmsa_ipsr
))
->
ri
;
* console, then we would call the appropriate debug hooks here.
u64
xip
=
minstate
->
pmsa_xip
+
((
struct
ia64_psr
*
)(
&
minstate
->
pmsa_xpsr
))
->
ri
;
printk
(
"NaT bits
\t
%016lx
\n
"
,
minstate
->
pmsa_nat_bits
);
printk
(
"pr
\t\t
%016lx
\n
"
,
minstate
->
pmsa_pr
);
printk
(
"b0
\t\t
%016lx "
,
minstate
->
pmsa_br0
);
print_symbol
(
"%s
\n
"
,
minstate
->
pmsa_br0
);
printk
(
"ar.rsc
\t\t
%016lx
\n
"
,
minstate
->
pmsa_rsc
);
printk
(
"cr.iip
\t\t
%016lx "
,
iip
);
print_symbol
(
"%s
\n
"
,
iip
);
printk
(
"cr.ipsr
\t\t
%016lx
\n
"
,
minstate
->
pmsa_ipsr
);
printk
(
"cr.ifs
\t\t
%016lx
\n
"
,
minstate
->
pmsa_ifs
);
printk
(
"xip
\t\t
%016lx "
,
xip
);
print_symbol
(
"%s
\n
"
,
xip
);
printk
(
"xpsr
\t\t
%016lx
\n
"
,
minstate
->
pmsa_xpsr
);
printk
(
"xfs
\t\t
%016lx
\n
"
,
minstate
->
pmsa_xfs
);
printk
(
"b1
\t\t
%016lx "
,
minstate
->
pmsa_br1
);
print_symbol
(
"%s
\n
"
,
minstate
->
pmsa_br1
);
printk
(
"
\n
static registers r0-r15:
\n
"
);
printk
(
" r0- 3 %016lx %016lx %016lx %016lx
\n
"
,
0UL
,
minstate
->
pmsa_gr
[
0
],
minstate
->
pmsa_gr
[
1
],
minstate
->
pmsa_gr
[
2
]);
printk
(
" r4- 7 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_gr
[
3
],
minstate
->
pmsa_gr
[
4
],
minstate
->
pmsa_gr
[
5
],
minstate
->
pmsa_gr
[
6
]);
printk
(
" r8-11 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_gr
[
7
],
minstate
->
pmsa_gr
[
8
],
minstate
->
pmsa_gr
[
9
],
minstate
->
pmsa_gr
[
10
]);
printk
(
"r11-15 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_gr
[
11
],
minstate
->
pmsa_gr
[
12
],
minstate
->
pmsa_gr
[
13
],
minstate
->
pmsa_gr
[
14
]);
printk
(
"
\n
bank 0:
\n
"
);
printk
(
"r16-19 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank0_gr
[
0
],
minstate
->
pmsa_bank0_gr
[
1
],
minstate
->
pmsa_bank0_gr
[
2
],
minstate
->
pmsa_bank0_gr
[
3
]);
printk
(
"r20-23 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank0_gr
[
4
],
minstate
->
pmsa_bank0_gr
[
5
],
minstate
->
pmsa_bank0_gr
[
6
],
minstate
->
pmsa_bank0_gr
[
7
]);
printk
(
"r24-27 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank0_gr
[
8
],
minstate
->
pmsa_bank0_gr
[
9
],
minstate
->
pmsa_bank0_gr
[
10
],
minstate
->
pmsa_bank0_gr
[
11
]);
printk
(
"r28-31 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank0_gr
[
12
],
minstate
->
pmsa_bank0_gr
[
13
],
minstate
->
pmsa_bank0_gr
[
14
],
minstate
->
pmsa_bank0_gr
[
15
]);
printk
(
"
\n
bank 1:
\n
"
);
printk
(
"r16-19 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank1_gr
[
0
],
minstate
->
pmsa_bank1_gr
[
1
],
minstate
->
pmsa_bank1_gr
[
2
],
minstate
->
pmsa_bank1_gr
[
3
]);
printk
(
"r20-23 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank1_gr
[
4
],
minstate
->
pmsa_bank1_gr
[
5
],
minstate
->
pmsa_bank1_gr
[
6
],
minstate
->
pmsa_bank1_gr
[
7
]);
printk
(
"r24-27 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank1_gr
[
8
],
minstate
->
pmsa_bank1_gr
[
9
],
minstate
->
pmsa_bank1_gr
[
10
],
minstate
->
pmsa_bank1_gr
[
11
]);
printk
(
"r28-31 %016lx %016lx %016lx %016lx
\n
"
,
minstate
->
pmsa_bank1_gr
[
12
],
minstate
->
pmsa_bank1_gr
[
13
],
minstate
->
pmsa_bank1_gr
[
14
],
minstate
->
pmsa_bank1_gr
[
15
]);
}
static
void
fetch_min_state
(
pal_min_state_area_t
*
ms
,
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
)
{
u64
*
dst_banked
,
*
src_banked
,
bit
,
shift
,
nat_bits
;
int
i
;
/*
* First, update the pt-regs and switch-stack structures with the contents stored
* in the min-state area:
*/
*/
if
(((
struct
ia64_psr
*
)
&
ms
->
pmsa_ipsr
)
->
ic
==
0
)
{
pt
->
cr_ipsr
=
ms
->
pmsa_xpsr
;
pt
->
cr_iip
=
ms
->
pmsa_xip
;
pt
->
cr_ifs
=
ms
->
pmsa_xfs
;
}
else
{
pt
->
cr_ipsr
=
ms
->
pmsa_ipsr
;
pt
->
cr_iip
=
ms
->
pmsa_iip
;
pt
->
cr_ifs
=
ms
->
pmsa_ifs
;
}
pt
->
ar_rsc
=
ms
->
pmsa_rsc
;
pt
->
pr
=
ms
->
pmsa_pr
;
pt
->
r1
=
ms
->
pmsa_gr
[
0
];
pt
->
r2
=
ms
->
pmsa_gr
[
1
];
pt
->
r3
=
ms
->
pmsa_gr
[
2
];
sw
->
r4
=
ms
->
pmsa_gr
[
3
];
sw
->
r5
=
ms
->
pmsa_gr
[
4
];
sw
->
r6
=
ms
->
pmsa_gr
[
5
];
sw
->
r7
=
ms
->
pmsa_gr
[
6
];
pt
->
r8
=
ms
->
pmsa_gr
[
7
];
pt
->
r9
=
ms
->
pmsa_gr
[
8
];
pt
->
r10
=
ms
->
pmsa_gr
[
9
];
pt
->
r11
=
ms
->
pmsa_gr
[
10
];
pt
->
r12
=
ms
->
pmsa_gr
[
11
];
pt
->
r13
=
ms
->
pmsa_gr
[
12
];
pt
->
r14
=
ms
->
pmsa_gr
[
13
];
pt
->
r15
=
ms
->
pmsa_gr
[
14
];
dst_banked
=
&
pt
->
r16
;
/* r16-r31 are contiguous in struct pt_regs */
src_banked
=
ms
->
pmsa_bank1_gr
;
for
(
i
=
0
;
i
<
16
;
++
i
)
dst_banked
[
i
]
=
src_banked
[
i
];
pt
->
b0
=
ms
->
pmsa_br0
;
sw
->
b1
=
ms
->
pmsa_br1
;
/* construct the NaT bits for the pt-regs structure: */
# define PUT_NAT_BIT(dst, addr) \
do { \
bit = nat_bits & 1; nat_bits >>= 1; \
shift = ((unsigned long) addr >> 3) & 0x3f; \
dst = ((dst) & ~(1UL << shift)) | (bit << shift); \
} while (0)
/* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
shift
=
((
unsigned
long
)
&
ms
->
pmsa_gr
[
0
]
>>
3
)
&
0x3f
;
nat_bits
=
(
ms
->
pmsa_nat_bits
>>
shift
)
|
(
ms
->
pmsa_nat_bits
<<
(
64
-
shift
));
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r1
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r2
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r3
);
PUT_NAT_BIT
(
sw
->
ar_unat
,
&
sw
->
r4
);
PUT_NAT_BIT
(
sw
->
ar_unat
,
&
sw
->
r5
);
PUT_NAT_BIT
(
sw
->
ar_unat
,
&
sw
->
r6
);
PUT_NAT_BIT
(
sw
->
ar_unat
,
&
sw
->
r7
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r8
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r9
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r10
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r11
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r12
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r13
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r14
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r15
);
nat_bits
>>=
16
;
/* skip over bank0 NaT bits */
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r16
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r17
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r18
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r19
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r20
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r21
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r22
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r23
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r24
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r25
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r26
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r27
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r28
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r29
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r30
);
PUT_NAT_BIT
(
sw
->
caller_unat
,
&
pt
->
r31
);
}
void
void
init_handler_platform
(
struct
pt_regs
*
regs
)
init_handler_platform
(
sal_log_processor_info_t
*
proc_ptr
,
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
)
{
{
struct
unw_frame_info
info
;
/* if a kernel debugger is available call it here else just dump the registers */
/* if a kernel debugger is available call it here else just dump the registers */
show_regs
(
regs
);
/* dump the state info */
/*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
* generated via the BMC's command-line interface, but since the console is on the
* same serial line, the user will need some time to switch out of the BMC before
* the dump begins.
*/
printk
(
"Delaying for 5 seconds...
\n
"
);
udelay
(
5
*
1000000
);
show_min_state
(
&
SAL_LPI_PSI_INFO
(
proc_ptr
)
->
min_state_area
);
fetch_min_state
(
&
SAL_LPI_PSI_INFO
(
proc_ptr
)
->
min_state_area
,
pt
,
sw
);
unw_init_from_interruption
(
&
info
,
current
,
pt
,
sw
);
ia64_do_show_stack
(
&
info
,
NULL
);
printk
(
"
\n
INIT dump complete. Please reboot now.
\n
"
);
while
(
1
);
/* hang city if no debugger */
while
(
1
);
/* hang city if no debugger */
}
}
...
@@ -263,7 +418,6 @@ ia64_mca_register_cpev (int cpev)
...
@@ -263,7 +418,6 @@ ia64_mca_register_cpev (int cpev)
/*
/*
* routine to process and prepare to dump min_state_save
* routine to process and prepare to dump min_state_save
* information for debugging purposes.
* information for debugging purposes.
*
*/
*/
void
void
ia64_process_min_state_save
(
pal_min_state_area_t
*
pmss
)
ia64_process_min_state_save
(
pal_min_state_area_t
*
pmss
)
...
@@ -272,8 +426,6 @@ ia64_process_min_state_save (pal_min_state_area_t *pmss)
...
@@ -272,8 +426,6 @@ ia64_process_min_state_save (pal_min_state_area_t *pmss)
u64
*
tpmss_ptr
=
(
u64
*
)
pmss
;
u64
*
tpmss_ptr
=
(
u64
*
)
pmss
;
u64
*
return_min_state_ptr
=
ia64_mca_min_state_save_info
;
u64
*
return_min_state_ptr
=
ia64_mca_min_state_save_info
;
/* dump out the min_state_area information */
for
(
i
=
0
;
i
<
max
;
i
++
)
{
for
(
i
=
0
;
i
<
max
;
i
++
)
{
/* copy min-state register info for eventual return to PAL */
/* copy min-state register info for eventual return to PAL */
...
@@ -986,7 +1138,7 @@ ia64_mca_cpe_int_caller(void *dummy)
...
@@ -986,7 +1138,7 @@ ia64_mca_cpe_int_caller(void *dummy)
* ia64_mca_cpe_poll
* ia64_mca_cpe_poll
*
*
* Poll for Corrected Platform Errors (CPEs), dynamically adjust
* Poll for Corrected Platform Errors (CPEs), dynamically adjust
* polling interval based on occur
a
nce of an event.
* polling interval based on occur
re
nce of an event.
*
*
* Inputs : dummy(unused)
* Inputs : dummy(unused)
* Outputs : None
* Outputs : None
...
@@ -1062,7 +1214,7 @@ device_initcall(ia64_mca_late_init);
...
@@ -1062,7 +1214,7 @@ device_initcall(ia64_mca_late_init);
*
*
*/
*/
void
void
ia64_init_handler
(
struct
pt_regs
*
regs
)
ia64_init_handler
(
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
)
{
{
sal_log_processor_info_t
*
proc_ptr
;
sal_log_processor_info_t
*
proc_ptr
;
ia64_err_rec_t
*
plog_ptr
;
ia64_err_rec_t
*
plog_ptr
;
...
@@ -1089,7 +1241,7 @@ ia64_init_handler (struct pt_regs *regs)
...
@@ -1089,7 +1241,7 @@ ia64_init_handler (struct pt_regs *regs)
/* Clear the INIT SAL logs now that they have been saved in the OS buffer */
/* Clear the INIT SAL logs now that they have been saved in the OS buffer */
ia64_sal_clear_state_info
(
SAL_INFO_TYPE_INIT
);
ia64_sal_clear_state_info
(
SAL_INFO_TYPE_INIT
);
init_handler_platform
(
regs
);
/* call platform specific routines */
init_handler_platform
(
proc_ptr
,
pt
,
sw
);
/* call platform specific routines */
}
}
/*
/*
...
@@ -2139,7 +2291,8 @@ ia64_log_print(int sal_info_type, prfunc_t prfunc)
...
@@ -2139,7 +2291,8 @@ ia64_log_print(int sal_info_type, prfunc_t prfunc)
switch
(
sal_info_type
)
{
switch
(
sal_info_type
)
{
case
SAL_INFO_TYPE_MCA
:
case
SAL_INFO_TYPE_MCA
:
prfunc
(
"+BEGIN HARDWARE ERROR STATE AT MCA
\n
"
);
prfunc
(
"+BEGIN HARDWARE ERROR STATE AT MCA
\n
"
);
platform_err
=
ia64_log_platform_info_print
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
prfunc
);
platform_err
=
ia64_log_platform_info_print
(
IA64_LOG_CURR_BUFFER
(
sal_info_type
),
prfunc
);
prfunc
(
"+END HARDWARE ERROR STATE AT MCA
\n
"
);
prfunc
(
"+END HARDWARE ERROR STATE AT MCA
\n
"
);
break
;
break
;
case
SAL_INFO_TYPE_INIT
:
case
SAL_INFO_TYPE_INIT
:
...
...
arch/ia64/kernel/mca_asm.S
View file @
20f6d716
...
@@ -766,8 +766,6 @@ GLOBAL_ENTRY(ia64_monarch_init_handler)
...
@@ -766,8 +766,6 @@ GLOBAL_ENTRY(ia64_monarch_init_handler)
//
stash
the
information
the
SAL
passed
to
os
//
stash
the
information
the
SAL
passed
to
os
SAL_TO_OS_MCA_HANDOFF_STATE_SAVE
(
r2
)
SAL_TO_OS_MCA_HANDOFF_STATE_SAVE
(
r2
)
;;
;;
//
now
we
want
to
save
information
so
we
can
dump
registers
SAVE_MIN_WITH_COVER
SAVE_MIN_WITH_COVER
;;
;;
mov
r8
=
cr
.
ifa
mov
r8
=
cr
.
ifa
...
@@ -798,10 +796,12 @@ IVirtual_Switch:
...
@@ -798,10 +796,12 @@ IVirtual_Switch:
//
//
//
Let
's call the C handler to get the rest of the state info
//
Let
's call the C handler to get the rest of the state info
//
//
alloc
r14
=
ar
.
pfs
,
0
,
0
,
1
,
0
//
now
it
's safe (must be first in insn group!)
alloc
r14
=
ar
.
pfs
,
0
,
0
,
2
,
0
//
now
it
's safe (must be first in insn group!)
;;
//
;;
adds
out0
=
16
,
sp
//
out0
=
pointer
to
pt_regs
adds
out0
=
16
,
sp
//
out0
=
pointer
to
pt_regs
;;
;;
DO_SAVE_SWITCH_STACK
adds
out1
=
16
,
sp
//
out0
=
pointer
to
switch_stack
br.call.sptk.many
rp
=
ia64_init_handler
br.call.sptk.many
rp
=
ia64_init_handler
.
ret1
:
.
ret1
:
...
...
arch/ia64/kernel/perfmon.c
View file @
20f6d716
...
@@ -2,7 +2,7 @@
...
@@ -2,7 +2,7 @@
* This file implements the perfmon subsystem which is used
* This file implements the perfmon subsystem which is used
* to program the IA-64 Performance Monitoring Unit (PMU).
* to program the IA-64 Performance Monitoring Unit (PMU).
*
*
* Originaly Written by Ganesh Venkitachalam, IBM Corp.
* Original
l
y Written by Ganesh Venkitachalam, IBM Corp.
* Copyright (C) 1999 Ganesh Venkitachalam <venkitac@us.ibm.com>
* Copyright (C) 1999 Ganesh Venkitachalam <venkitac@us.ibm.com>
*
*
* Modifications by Stephane Eranian, Hewlett-Packard Co.
* Modifications by Stephane Eranian, Hewlett-Packard Co.
...
@@ -224,8 +224,9 @@ typedef struct {
...
@@ -224,8 +224,9 @@ typedef struct {
unsigned
int
protected
:
1
;
/* allow access to creator of context only */
unsigned
int
protected
:
1
;
/* allow access to creator of context only */
unsigned
int
using_dbreg
:
1
;
/* using range restrictions (debug registers) */
unsigned
int
using_dbreg
:
1
;
/* using range restrictions (debug registers) */
unsigned
int
excl_idle
:
1
;
/* exclude idle task in system wide session */
unsigned
int
excl_idle
:
1
;
/* exclude idle task in system wide session */
unsigned
int
unsecure
:
1
;
/* sp = 0 for non self-monitored task */
unsigned
int
trap_reason
:
2
;
/* reason for going into pfm_block_ovfl_reset() */
unsigned
int
trap_reason
:
2
;
/* reason for going into pfm_block_ovfl_reset() */
unsigned
int
reserved
:
2
1
;
unsigned
int
reserved
:
2
0
;
}
pfm_context_flags_t
;
}
pfm_context_flags_t
;
#define PFM_TRAP_REASON_NONE 0x0
/* default value */
#define PFM_TRAP_REASON_NONE 0x0
/* default value */
...
@@ -278,6 +279,7 @@ typedef struct pfm_context {
...
@@ -278,6 +279,7 @@ typedef struct pfm_context {
#define ctx_fl_using_dbreg ctx_flags.using_dbreg
#define ctx_fl_using_dbreg ctx_flags.using_dbreg
#define ctx_fl_excl_idle ctx_flags.excl_idle
#define ctx_fl_excl_idle ctx_flags.excl_idle
#define ctx_fl_trap_reason ctx_flags.trap_reason
#define ctx_fl_trap_reason ctx_flags.trap_reason
#define ctx_fl_unsecure ctx_flags.unsecure
/*
/*
* global information about all sessions
* global information about all sessions
...
@@ -646,7 +648,7 @@ pfm_vm_close(struct vm_area_struct *vma)
...
@@ -646,7 +648,7 @@ pfm_vm_close(struct vm_area_struct *vma)
/*
/*
* This function is called from pfm_destroy_context() and also from pfm_inherit()
* This function is called from pfm_destroy_context() and also from pfm_inherit()
* to explicit
e
ly remove the sampling buffer mapping from the user level address space.
* to explicitly remove the sampling buffer mapping from the user level address space.
*/
*/
static
int
static
int
pfm_remove_smpl_mapping
(
struct
task_struct
*
task
)
pfm_remove_smpl_mapping
(
struct
task_struct
*
task
)
...
@@ -1076,10 +1078,15 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
...
@@ -1076,10 +1078,15 @@ pfx_is_sane(struct task_struct *task, pfarg_context_t *pfx)
* and it must be a valid CPU
* and it must be a valid CPU
*/
*/
cpu
=
ffz
(
~
pfx
->
ctx_cpu_mask
);
cpu
=
ffz
(
~
pfx
->
ctx_cpu_mask
);
#ifdef CONFIG_SMP
if
(
cpu_online
(
cpu
)
==
0
)
{
if
(
cpu_online
(
cpu
)
==
0
)
{
#else
if
(
cpu
!=
0
)
{
#endif
DBprintk
((
"CPU%d is not online
\n
"
,
cpu
));
DBprintk
((
"CPU%d is not online
\n
"
,
cpu
));
return
-
EINVAL
;
return
-
EINVAL
;
}
}
/*
/*
* check for pre-existing pinning, if conflicting reject
* check for pre-existing pinning, if conflicting reject
*/
*/
...
@@ -1225,6 +1232,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
...
@@ -1225,6 +1232,7 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
ctx
->
ctx_fl_block
=
(
ctx_flags
&
PFM_FL_NOTIFY_BLOCK
)
?
1
:
0
;
ctx
->
ctx_fl_block
=
(
ctx_flags
&
PFM_FL_NOTIFY_BLOCK
)
?
1
:
0
;
ctx
->
ctx_fl_system
=
(
ctx_flags
&
PFM_FL_SYSTEM_WIDE
)
?
1
:
0
;
ctx
->
ctx_fl_system
=
(
ctx_flags
&
PFM_FL_SYSTEM_WIDE
)
?
1
:
0
;
ctx
->
ctx_fl_excl_idle
=
(
ctx_flags
&
PFM_FL_EXCL_IDLE
)
?
1
:
0
;
ctx
->
ctx_fl_excl_idle
=
(
ctx_flags
&
PFM_FL_EXCL_IDLE
)
?
1
:
0
;
ctx
->
ctx_fl_unsecure
=
(
ctx_flags
&
PFM_FL_UNSECURE
)
?
1
:
0
;
ctx
->
ctx_fl_frozen
=
0
;
ctx
->
ctx_fl_frozen
=
0
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_NONE
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_NONE
;
...
@@ -1251,9 +1259,11 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
...
@@ -1251,9 +1259,11 @@ pfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int
DBprintk
((
"context=%p, pid=%d notify_task=%p
\n
"
,
DBprintk
((
"context=%p, pid=%d notify_task=%p
\n
"
,
(
void
*
)
ctx
,
task
->
pid
,
ctx
->
ctx_notify_task
));
(
void
*
)
ctx
,
task
->
pid
,
ctx
->
ctx_notify_task
));
DBprintk
((
"context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d
\n
"
,
DBprintk
((
"context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d
unsecure=%d
\n
"
,
(
void
*
)
ctx
,
task
->
pid
,
ctx_flags
,
ctx
->
ctx_fl_inherit
,
(
void
*
)
ctx
,
task
->
pid
,
ctx_flags
,
ctx
->
ctx_fl_inherit
,
ctx
->
ctx_fl_block
,
ctx
->
ctx_fl_system
,
ctx
->
ctx_fl_excl_idle
));
ctx
->
ctx_fl_block
,
ctx
->
ctx_fl_system
,
ctx
->
ctx_fl_excl_idle
,
ctx
->
ctx_fl_unsecure
));
/*
/*
* when no notification is required, we can make this visible at the last moment
* when no notification is required, we can make this visible at the last moment
...
@@ -1659,7 +1669,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
...
@@ -1659,7 +1669,7 @@ pfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count
if
(
!
PMD_IS_IMPL
(
cnum
))
goto
abort_mission
;
if
(
!
PMD_IS_IMPL
(
cnum
))
goto
abort_mission
;
/*
/*
* we can only read the register that we use. That includes
* we can only read the register that we use. That includes
* the one we explicit
e
ly initialize AND the one we want included
* the one we explicitly initialize AND the one we want included
* in the sampling buffer (smpl_regs).
* in the sampling buffer (smpl_regs).
*
*
* Having this restriction allows optimization in the ctxsw routine
* Having this restriction allows optimization in the ctxsw routine
...
@@ -1871,7 +1881,7 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
...
@@ -1871,7 +1881,7 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
* if blocking, then post the semaphore.
* if blocking, then post the semaphore.
* if non-blocking, then we ensure that the task will go into
* if non-blocking, then we ensure that the task will go into
* pfm_overflow_must_block() before returning to user mode.
* pfm_overflow_must_block() before returning to user mode.
* We cannot explicit
e
ly reset another task, it MUST always
* We cannot explicitly reset another task, it MUST always
* be done by the task itself. This works for system wide because
* be done by the task itself. This works for system wide because
* the tool that is controlling the session is doing "self-monitoring".
* the tool that is controlling the session is doing "self-monitoring".
*
*
...
@@ -1882,7 +1892,10 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
...
@@ -1882,7 +1892,10 @@ pfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
DBprintk
((
"unblocking %d
\n
"
,
task
->
pid
));
DBprintk
((
"unblocking %d
\n
"
,
task
->
pid
));
up
(
sem
);
up
(
sem
);
}
else
{
}
else
{
struct
thread_info
*
info
=
(
struct
thread_info
*
)
((
char
*
)
task
+
IA64_TASK_SIZE
);
task
->
thread
.
pfm_ovfl_block_reset
=
1
;
task
->
thread
.
pfm_ovfl_block_reset
=
1
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_RESET
;
set_bit
(
TIF_NOTIFY_RESUME
,
&
info
->
flags
);
}
}
#if 0
#if 0
/*
/*
...
@@ -2051,7 +2064,7 @@ pfm_protect_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int
...
@@ -2051,7 +2064,7 @@ pfm_protect_context(struct task_struct *task, pfm_context_t *ctx, void *arg, int
/*
/*
* reinforce secure monitoring: cannot toggle psr.up
* reinforce secure monitoring: cannot toggle psr.up
*/
*/
ia64_psr
(
regs
)
->
sp
=
1
;
i
f
(
ctx
->
ctx_fl_unsecure
==
0
)
i
a64_psr
(
regs
)
->
sp
=
1
;
return
0
;
return
0
;
}
}
...
@@ -2732,12 +2745,13 @@ pfm_ovfl_block_reset(void)
...
@@ -2732,12 +2745,13 @@ pfm_ovfl_block_reset(void)
* again
* again
*/
*/
th
->
pfm_ovfl_block_reset
=
0
;
th
->
pfm_ovfl_block_reset
=
0
;
clear_thread_flag
(
TIF_NOTIFY_RESUME
);
/*
/*
* do some sanity checks first
* do some sanity checks first
*/
*/
if
(
!
ctx
)
{
if
(
!
ctx
)
{
printk
(
KERN_
DEBUG
"perfmon: [%d] has no PFM context
\n
"
,
current
->
pid
);
printk
(
KERN_
ERR
"perfmon: [%d] has no PFM context
\n
"
,
current
->
pid
);
return
;
return
;
}
}
/*
/*
...
@@ -2899,15 +2913,18 @@ pfm_record_sample(struct task_struct *task, pfm_context_t *ctx, unsigned long ov
...
@@ -2899,15 +2913,18 @@ pfm_record_sample(struct task_struct *task, pfm_context_t *ctx, unsigned long ov
/*
/*
* main overflow processing routine.
* main overflow processing routine.
* it can be called from the interrupt path or explicitely during the context switch code
* it can be called from the interrupt path or explicitly during the context switch code
* Arguments:
* mode: 0=coming from PMU interrupt, 1=coming from ctxsw
*
* Return:
* Return:
* new value of pmc[0]. if 0x0 then unfreeze, else keep frozen
* new value of pmc[0]. if 0x0 then unfreeze, else keep frozen
*/
*/
static
unsigned
long
static
unsigned
long
pfm_overflow_handler
(
struct
task_struct
*
task
,
pfm_context_t
*
ctx
,
u64
pmc0
,
struct
pt_regs
*
regs
)
pfm_overflow_handler
(
int
mode
,
struct
task_struct
*
task
,
pfm_context_t
*
ctx
,
u64
pmc0
,
struct
pt_regs
*
regs
)
{
{
unsigned
long
mask
;
struct
thread_struct
*
t
;
struct
thread_struct
*
t
;
unsigned
long
mask
;
unsigned
long
old_val
;
unsigned
long
old_val
;
unsigned
long
ovfl_notify
=
0UL
,
ovfl_pmds
=
0UL
;
unsigned
long
ovfl_notify
=
0UL
,
ovfl_pmds
=
0UL
;
int
i
;
int
i
;
...
@@ -2998,10 +3015,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
...
@@ -2998,10 +3015,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
/*
/*
* check for sampling buffer
* check for sampling buffer
*
*
* if present, record sample
. We propagate notification ONLY when buffer
* if present, record sample
only when a 64-bit counter has overflowed.
* becomes full.
*
We propagate notification ONLY when buffer
becomes full.
*/
*/
if
(
CTX_HAS_SMPL
(
ctx
))
{
if
(
CTX_HAS_SMPL
(
ctx
)
&&
ovfl_pmds
)
{
ret
=
pfm_record_sample
(
task
,
ctx
,
ovfl_pmds
,
regs
);
ret
=
pfm_record_sample
(
task
,
ctx
,
ovfl_pmds
,
regs
);
if
(
ret
==
1
)
{
if
(
ret
==
1
)
{
/*
/*
...
@@ -3046,12 +3063,55 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
...
@@ -3046,12 +3063,55 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
* ctx_notify_task could already be NULL, checked in pfm_notify_user()
* ctx_notify_task could already be NULL, checked in pfm_notify_user()
*/
*/
if
(
CTX_OVFL_NOBLOCK
(
ctx
)
==
0
&&
ctx
->
ctx_notify_task
!=
task
)
{
if
(
CTX_OVFL_NOBLOCK
(
ctx
)
==
0
&&
ctx
->
ctx_notify_task
!=
task
)
{
t
->
pfm_ovfl_block_reset
=
1
;
/* will cause blocking */
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_BLOCKSIG
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_BLOCKSIG
;
}
else
{
}
else
{
t
->
pfm_ovfl_block_reset
=
1
;
/* will cause blocking */
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_SIG
;
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_SIG
;
}
}
/*
* we cannot block in system wide mode and we do not go
* through the PMU ctxsw code. Therefore we can generate
* the notification here. In system wide mode, the current
* task maybe different from the task controlling the session
* on this CPU, therefore owner can be different from current.
*
* In per-process mode, this function gets called from
* the interrupt handler or pfm_load_regs(). The mode argument
* tells where we are coming from. When coming from the interrupt
* handler, it is safe to notify (send signal) right here because
* we do not hold any runqueue locks needed by send_sig_info().
*
* However when coming from ctxsw, we cannot send the signal here.
* It must be deferred until we are sure we do not hold any runqueue
* related locks. The current task maybe different from the owner
* only in UP mode. The deferral is implemented using the
* TIF_NOTIFY_RESUME mechanism. In this case, the pending work
* is checked when the task is about to leave the kernel (see
* entry.S). As of this version of perfmon, a kernel only
* task cannot be monitored in per-process mode. Therefore,
* when this function gets called from pfm_load_regs(), we know
* we have a user level task which will eventually either exit
* or leave the kernel, and thereby go through the checkpoint
* for TIF_*.
*/
if
(
ctx
->
ctx_fl_system
||
mode
==
0
)
{
pfm_notify_user
(
ctx
);
ctx
->
ctx_fl_trap_reason
=
PFM_TRAP_REASON_NONE
;
}
else
{
struct
thread_info
*
info
;
/*
* given that TIF_NOTIFY_RESUME is not specific to
* perfmon, we need to have a second level check to
* verify the source of the notification.
*/
task
->
thread
.
pfm_ovfl_block_reset
=
1
;
/*
* when coming from ctxsw, current still points to the
* previous task, therefore we must work with task and not current.
*/
info
=
((
struct
thread_info
*
)
((
char
*
)
task
+
IA64_TASK_SIZE
));
set_bit
(
TIF_NOTIFY_RESUME
,
&
info
->
flags
);
}
/*
/*
* keep the PMU frozen until either pfm_restart() or
* keep the PMU frozen until either pfm_restart() or
...
@@ -3059,7 +3119,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
...
@@ -3059,7 +3119,10 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
*/
*/
ctx
->
ctx_fl_frozen
=
1
;
ctx
->
ctx_fl_frozen
=
1
;
DBprintk_ovfl
((
"return pmc0=0x%x must_block=%ld reason=%d
\n
"
,
DBprintk_ovfl
((
"current [%d] owner [%d] mode=%d return pmc0=0x%x must_block=%ld reason=%d
\n
"
,
current
->
pid
,
PMU_OWNER
()
?
PMU_OWNER
()
->
pid
:
-
1
,
mode
,
ctx
->
ctx_fl_frozen
?
0x1
:
0x0
,
ctx
->
ctx_fl_frozen
?
0x1
:
0x0
,
t
->
pfm_ovfl_block_reset
,
t
->
pfm_ovfl_block_reset
,
ctx
->
ctx_fl_trap_reason
));
ctx
->
ctx_fl_trap_reason
));
...
@@ -3114,13 +3177,15 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
...
@@ -3114,13 +3177,15 @@ pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
/*
/*
* assume PMC[0].fr = 1 at this point
* assume PMC[0].fr = 1 at this point
*/
*/
pmc0
=
pfm_overflow_handler
(
task
,
ctx
,
pmc0
,
regs
);
pmc0
=
pfm_overflow_handler
(
0
,
task
,
ctx
,
pmc0
,
regs
);
/*
/*
* we can only update pmc0 when the overflow
* we can only update pmc0 when the overflow
* is for the current context. In UP the current
* is for the current context or we are in system
* task may not be the one owning the PMU
* wide mode. In UP (per-task) the current
* task may not be the one owning the PMU,
* same thing for system-wide.
*/
*/
if
(
task
==
current
)
{
if
(
task
==
current
||
ctx
->
ctx_fl_system
)
{
/*
/*
* We always clear the overflow status bits and either unfreeze
* We always clear the overflow status bits and either unfreeze
* or keep the PMU frozen.
* or keep the PMU frozen.
...
@@ -3454,7 +3519,7 @@ pfm_load_regs (struct task_struct *task)
...
@@ -3454,7 +3519,7 @@ pfm_load_regs (struct task_struct *task)
* Side effect on ctx_fl_frozen is possible.
* Side effect on ctx_fl_frozen is possible.
*/
*/
if
(
t
->
pmc
[
0
]
&
~
0x1
)
{
if
(
t
->
pmc
[
0
]
&
~
0x1
)
{
t
->
pmc
[
0
]
=
pfm_overflow_handler
(
task
,
ctx
,
t
->
pmc
[
0
],
NULL
);
t
->
pmc
[
0
]
=
pfm_overflow_handler
(
1
,
task
,
ctx
,
t
->
pmc
[
0
],
NULL
);
}
}
/*
/*
...
@@ -3754,16 +3819,20 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
...
@@ -3754,16 +3819,20 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
preempt_disable
();
preempt_disable
();
/*
/*
* make sure child cannot mess up the monitoring session
* for secure sessions, make sure child cannot mess up
* the monitoring session.
*/
*/
if
(
ctx
->
ctx_fl_unsecure
==
0
)
{
ia64_psr
(
regs
)
->
sp
=
1
;
ia64_psr
(
regs
)
->
sp
=
1
;
DBprintk
((
"enabling psr.sp for [%d]
\n
"
,
task
->
pid
));
DBprintk
((
"enabling psr.sp for [%d]
\n
"
,
task
->
pid
));
}
else
{
DBprintk
((
"psr.sp=%d [%d]
\n
"
,
ia64_psr
(
regs
)
->
sp
,
task
->
pid
));
}
/*
/*
* if there was a virtual mapping for the sampling buffer
* if there was a virtual mapping for the sampling buffer
* the mapping is NOT inherited across fork() (see VM_DONTCOPY),
* the mapping is NOT inherited across fork() (see VM_DONTCOPY),
* so we don't have to explicit
e
ly remove it here.
* so we don't have to explicitly remove it here.
*
*
*
*
* Part of the clearing of fields is also done in
* Part of the clearing of fields is also done in
...
...
arch/ia64/kernel/perfmon_mckinley.h
View file @
20f6d716
...
@@ -25,8 +25,8 @@ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
...
@@ -25,8 +25,8 @@ static pfm_reg_desc_t pfm_mck_pmc_desc[PMU_MAX_PMCS]={
/* pmc5 */
{
PFM_REG_COUNTING
,
6
,
0x0UL
,
0xfffff7fUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc5 */
{
PFM_REG_COUNTING
,
6
,
0x0UL
,
0xfffff7fUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
5
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc6 */
{
PFM_REG_COUNTING
,
6
,
0x0UL
,
0xfffff7fUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc6 */
{
PFM_REG_COUNTING
,
6
,
0x0UL
,
0xfffff7fUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
6
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc7 */
{
PFM_REG_COUNTING
,
6
,
0x0UL
,
0xfffff7fUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc7 */
{
PFM_REG_COUNTING
,
6
,
0x0UL
,
0xfffff7fUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
7
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc8 */
{
PFM_REG_CONFIG
,
0
,
0xffffffff3fffffffUL
,
0xffffffff
9
fffffffUL
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc8 */
{
PFM_REG_CONFIG
,
0
,
0xffffffff3fffffffUL
,
0xffffffff
3
fffffffUL
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc9 */
{
PFM_REG_CONFIG
,
0
,
0xffffffff3ffffffcUL
,
0xffffffff
9
fffffffUL
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc9 */
{
PFM_REG_CONFIG
,
0
,
0xffffffff3ffffffcUL
,
0xffffffff
3
fffffffUL
,
NULL
,
pfm_mck_pmc_check
,
{
0UL
,
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc10 */
{
PFM_REG_MONITOR
,
4
,
0x0UL
,
0xffffUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
0
)
|
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc10 */
{
PFM_REG_MONITOR
,
4
,
0x0UL
,
0xffffUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
0
)
|
RDEP
(
1
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc11 */
{
PFM_REG_MONITOR
,
6
,
0x0UL
,
0x30f01cf
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
2
)
|
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc11 */
{
PFM_REG_MONITOR
,
6
,
0x0UL
,
0x30f01cf
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
2
)
|
RDEP
(
3
)
|
RDEP
(
17
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc12 */
{
PFM_REG_MONITOR
,
6
,
0x0UL
,
0xffffUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
/* pmc12 */
{
PFM_REG_MONITOR
,
6
,
0x0UL
,
0xffffUL
,
NULL
,
pfm_mck_reserved
,
{
RDEP
(
8
)
|
RDEP
(
9
)
|
RDEP
(
10
)
|
RDEP
(
11
)
|
RDEP
(
12
)
|
RDEP
(
13
)
|
RDEP
(
14
)
|
RDEP
(
15
)
|
RDEP
(
16
),
0UL
,
0UL
,
0UL
},
{
0UL
,
0UL
,
0UL
,
0UL
}},
...
@@ -143,11 +143,8 @@ pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *va
...
@@ -143,11 +143,8 @@ pfm_mck_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *va
case
8
:
val8
=
*
val
;
case
8
:
val8
=
*
val
;
val13
=
th
->
pmc
[
13
];
val13
=
th
->
pmc
[
13
];
val14
=
th
->
pmc
[
14
];
val14
=
th
->
pmc
[
14
];
*
val
|=
1UL
<<
2
;
/* bit 2 must always be 1 */
check_case1
=
1
;
check_case1
=
1
;
break
;
break
;
case
9
:
*
val
|=
1UL
<<
2
;
/* bit 2 must always be 1 */
break
;
case
13
:
val8
=
th
->
pmc
[
8
];
case
13
:
val8
=
th
->
pmc
[
8
];
val13
=
*
val
;
val13
=
*
val
;
val14
=
th
->
pmc
[
14
];
val14
=
th
->
pmc
[
14
];
...
...
arch/ia64/kernel/process.c
View file @
20f6d716
...
@@ -43,8 +43,8 @@
...
@@ -43,8 +43,8 @@
#include "sigframe.h"
#include "sigframe.h"
static
void
void
do_show_stack
(
struct
unw_frame_info
*
info
,
void
*
arg
)
ia64_
do_show_stack
(
struct
unw_frame_info
*
info
,
void
*
arg
)
{
{
unsigned
long
ip
,
sp
,
bsp
;
unsigned
long
ip
,
sp
,
bsp
;
char
buf
[
80
];
/* don't make it so big that it overflows the stack! */
char
buf
[
80
];
/* don't make it so big that it overflows the stack! */
...
@@ -57,7 +57,7 @@ do_show_stack (struct unw_frame_info *info, void *arg)
...
@@ -57,7 +57,7 @@ do_show_stack (struct unw_frame_info *info, void *arg)
unw_get_sp
(
info
,
&
sp
);
unw_get_sp
(
info
,
&
sp
);
unw_get_bsp
(
info
,
&
bsp
);
unw_get_bsp
(
info
,
&
bsp
);
snprintf
(
buf
,
sizeof
(
buf
),
" [<%016lx>] %%s
sp=0x%016lx bsp=0x
%016lx
\n
"
,
snprintf
(
buf
,
sizeof
(
buf
),
" [<%016lx>] %%s
\n\t\t\t\t
sp=%016lx bsp=
%016lx
\n
"
,
ip
,
sp
,
bsp
);
ip
,
sp
,
bsp
);
print_symbol
(
buf
,
ip
);
print_symbol
(
buf
,
ip
);
}
while
(
unw_unwind
(
info
)
>=
0
);
}
while
(
unw_unwind
(
info
)
>=
0
);
...
@@ -73,12 +73,12 @@ void
...
@@ -73,12 +73,12 @@ void
show_stack
(
struct
task_struct
*
task
)
show_stack
(
struct
task_struct
*
task
)
{
{
if
(
!
task
)
if
(
!
task
)
unw_init_running
(
do_show_stack
,
0
);
unw_init_running
(
ia64_
do_show_stack
,
0
);
else
{
else
{
struct
unw_frame_info
info
;
struct
unw_frame_info
info
;
unw_init_from_blocked_task
(
&
info
,
task
);
unw_init_from_blocked_task
(
&
info
,
task
);
do_show_stack
(
&
info
,
0
);
ia64_
do_show_stack
(
&
info
,
0
);
}
}
}
}
...
@@ -135,7 +135,7 @@ show_regs (struct pt_regs *regs)
...
@@ -135,7 +135,7 @@ show_regs (struct pt_regs *regs)
((
i
==
sof
-
1
)
||
(
i
%
3
)
==
2
)
?
"
\n
"
:
" "
);
((
i
==
sof
-
1
)
||
(
i
%
3
)
==
2
)
?
"
\n
"
:
" "
);
}
}
}
else
}
else
show_stack
(
0
);
show_stack
(
NULL
);
}
}
void
void
...
...
arch/ia64/kernel/sal.c
View file @
20f6d716
...
@@ -116,7 +116,7 @@ ia64_sal_init (struct ia64_sal_systab *systab)
...
@@ -116,7 +116,7 @@ ia64_sal_init (struct ia64_sal_systab *systab)
p
=
(
char
*
)
(
systab
+
1
);
p
=
(
char
*
)
(
systab
+
1
);
for
(
i
=
0
;
i
<
systab
->
entry_count
;
i
++
)
{
for
(
i
=
0
;
i
<
systab
->
entry_count
;
i
++
)
{
/*
/*
* The first byte of each entry type contains the type desciptor.
* The first byte of each entry type contains the type desc
r
iptor.
*/
*/
switch
(
*
p
)
{
switch
(
*
p
)
{
case
SAL_DESC_ENTRY_POINT
:
case
SAL_DESC_ENTRY_POINT
:
...
...
arch/ia64/kernel/setup.c
View file @
20f6d716
...
@@ -60,6 +60,8 @@ struct ia64_boot_param *ia64_boot_param;
...
@@ -60,6 +60,8 @@ struct ia64_boot_param *ia64_boot_param;
struct
screen_info
screen_info
;
struct
screen_info
screen_info
;
unsigned
long
ia64_iobase
;
/* virtual address for I/O accesses */
unsigned
long
ia64_iobase
;
/* virtual address for I/O accesses */
struct
io_space
io_space
[
MAX_IO_SPACES
];
unsigned
int
num_io_spaces
;
unsigned
char
aux_device_present
=
0xaa
;
/* XXX remove this when legacy I/O is gone */
unsigned
char
aux_device_present
=
0xaa
;
/* XXX remove this when legacy I/O is gone */
...
@@ -412,6 +414,11 @@ setup_arch (char **cmdline_p)
...
@@ -412,6 +414,11 @@ setup_arch (char **cmdline_p)
}
}
ia64_iobase
=
(
unsigned
long
)
ioremap
(
phys_iobase
,
0
);
ia64_iobase
=
(
unsigned
long
)
ioremap
(
phys_iobase
,
0
);
/* setup legacy IO port space */
io_space
[
0
].
mmio_base
=
ia64_iobase
;
io_space
[
0
].
sparse
=
1
;
num_io_spaces
=
1
;
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
cpu_physical_id
(
0
)
=
hard_smp_processor_id
();
cpu_physical_id
(
0
)
=
hard_smp_processor_id
();
#endif
#endif
...
@@ -421,7 +428,7 @@ setup_arch (char **cmdline_p)
...
@@ -421,7 +428,7 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI_BOOT
#ifdef CONFIG_ACPI_BOOT
acpi_boot_init
();
acpi_boot_init
();
#endif
#endif
#ifdef CONFIG_SERIAL_HCDP
#ifdef CONFIG_SERIAL_
8250_
HCDP
if
(
efi
.
hcdp
)
{
if
(
efi
.
hcdp
)
{
void
setup_serial_hcdp
(
void
*
);
void
setup_serial_hcdp
(
void
*
);
...
...
arch/ia64/kernel/smpboot.c
View file @
20f6d716
...
@@ -279,15 +279,6 @@ smp_callin (void)
...
@@ -279,15 +279,6 @@ smp_callin (void)
smp_setup_percpu_timer
();
smp_setup_percpu_timer
();
if
(
!
(
sal_platform_features
&
IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT
))
{
/*
* Synchronize the ITC with the BP
*/
Dprintk
(
"Going to syncup ITC with BP.
\n
"
);
ia64_sync_itc
(
0
);
}
/*
/*
* Get our bogomips.
* Get our bogomips.
*/
*/
...
@@ -310,6 +301,27 @@ smp_callin (void)
...
@@ -310,6 +301,27 @@ smp_callin (void)
local_irq_enable
();
local_irq_enable
();
calibrate_delay
();
calibrate_delay
();
local_cpu_data
->
loops_per_jiffy
=
loops_per_jiffy
;
local_cpu_data
->
loops_per_jiffy
=
loops_per_jiffy
;
if
(
!
(
sal_platform_features
&
IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT
))
{
/*
* Synchronize the ITC with the BP. Need to do this after irqs are
* enabled because ia64_sync_itc() calls smp_call_function_single(), which
* calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
* local_bh_enable(), which bugs out if irqs are not enabled...
*/
Dprintk
(
"Going to syncup ITC with BP.
\n
"
);
ia64_sync_itc
(
0
);
/*
* Make sure we didn't sync the itc ahead of the next
* timer interrupt, if so, just reset it.
*/
if
(
time_after
(
ia64_get_itc
(),
local_cpu_data
->
itm_next
))
{
Dprintk
(
"oops, jumped a timer.
\n
"
);
ia64_cpu_local_tick
();
}
}
/*
/*
* Allow the master to continue.
* Allow the master to continue.
*/
*/
...
@@ -394,13 +406,26 @@ do_boot_cpu (int sapicid, int cpu)
...
@@ -394,13 +406,26 @@ do_boot_cpu (int sapicid, int cpu)
return
0
;
return
0
;
}
}
unsigned
long
cache_decay_ticks
;
/* # of ticks an idle task is considered cache-hot */
static
int
__init
decay
(
char
*
str
)
{
int
ticks
;
get_option
(
&
str
,
&
ticks
);
cache_decay_ticks
=
ticks
;
return
1
;
}
__setup
(
"decay="
,
decay
);
/*
* # of ticks an idle task is considered cache-hot. Highly application-dependent. There
* are apps out there which are known to suffer significantly with values >= 4.
*/
unsigned
long
cache_decay_ticks
=
10
;
/* equal to MIN_TIMESLICE */
static
void
static
void
smp_tune_scheduling
(
void
)
smp_tune_scheduling
(
void
)
{
{
cache_decay_ticks
=
10
;
/* XXX base this on PAL info and cache-bandwidth estimate */
printk
(
KERN_INFO
"task migration cache decay timeout: %ld msecs.
\n
"
,
printk
(
KERN_INFO
"task migration cache decay timeout: %ld msecs.
\n
"
,
(
cache_decay_ticks
+
1
)
*
1000
/
HZ
);
(
cache_decay_ticks
+
1
)
*
1000
/
HZ
);
}
}
...
...
arch/ia64/kernel/time.c
View file @
20f6d716
...
@@ -221,7 +221,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
...
@@ -221,7 +221,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do
{
do
{
/*
/*
* If we're too close to the next clock tick for comfort, we increase the
* If we're too close to the next clock tick for comfort, we increase the
* saf
te
y margin by intentionally dropping the next tick(s). We do NOT update
* saf
et
y margin by intentionally dropping the next tick(s). We do NOT update
* itm.next because that would force us to call do_timer() which in turn would
* itm.next because that would force us to call do_timer() which in turn would
* let our clock run too fast (with the potentially devastating effect of
* let our clock run too fast (with the potentially devastating effect of
* losing monotony of time).
* losing monotony of time).
...
...
arch/ia64/kernel/traps.c
View file @
20f6d716
...
@@ -94,7 +94,7 @@ die (const char *str, struct pt_regs *regs, long err)
...
@@ -94,7 +94,7 @@ die (const char *str, struct pt_regs *regs, long err)
{
{
static
struct
{
static
struct
{
spinlock_t
lock
;
spinlock_t
lock
;
int
lock_owner
;
u32
lock_owner
;
int
lock_owner_depth
;
int
lock_owner_depth
;
}
die
=
{
}
die
=
{
.
lock
=
SPIN_LOCK_UNLOCKED
,
.
lock
=
SPIN_LOCK_UNLOCKED
,
...
...
arch/ia64/kernel/unaligned.c
View file @
20f6d716
...
@@ -789,7 +789,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
...
@@ -789,7 +789,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
*
*
* ldX.a (advanced load):
* ldX.a (advanced load):
* - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
* - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
* address doesn't match requested size align
e
ment. This means that we would
* address doesn't match requested size alignment. This means that we would
* possibly need more than one load to get the result.
* possibly need more than one load to get the result.
*
*
* The load part can be handled just like a normal load, however the difficult
* The load part can be handled just like a normal load, however the difficult
...
...
arch/ia64/kernel/unwind.c
View file @
20f6d716
...
@@ -682,7 +682,7 @@ finish_prologue (struct unw_state_record *sr)
...
@@ -682,7 +682,7 @@ finish_prologue (struct unw_state_record *sr)
* First, resolve implicit register save locations (see Section "11.4.2.3 Rules
* First, resolve implicit register save locations (see Section "11.4.2.3 Rules
* for Using Unwind Descriptors", rule 3):
* for Using Unwind Descriptors", rule 3):
*/
*/
for
(
i
=
0
;
i
<
(
int
)
sizeof
(
unw
.
save_order
)
/
sizeof
(
unw
.
save_order
[
0
]
);
++
i
)
{
for
(
i
=
0
;
i
<
(
int
)
(
sizeof
(
unw
.
save_order
)
/
sizeof
(
unw
.
save_order
[
0
])
);
++
i
)
{
reg
=
sr
->
curr
.
reg
+
unw
.
save_order
[
i
];
reg
=
sr
->
curr
.
reg
+
unw
.
save_order
[
i
];
if
(
reg
->
where
==
UNW_WHERE_GR_SAVE
)
{
if
(
reg
->
where
==
UNW_WHERE_GR_SAVE
)
{
reg
->
where
=
UNW_WHERE_GR
;
reg
->
where
=
UNW_WHERE_GR
;
...
@@ -698,7 +698,7 @@ finish_prologue (struct unw_state_record *sr)
...
@@ -698,7 +698,7 @@ finish_prologue (struct unw_state_record *sr)
*/
*/
if
(
sr
->
imask
)
{
if
(
sr
->
imask
)
{
unsigned
char
kind
,
mask
=
0
,
*
cp
=
sr
->
imask
;
unsigned
char
kind
,
mask
=
0
,
*
cp
=
sr
->
imask
;
unsigned
long
t
;
int
t
;
static
const
unsigned
char
limit
[
3
]
=
{
static
const
unsigned
char
limit
[
3
]
=
{
UNW_REG_F31
,
UNW_REG_R7
,
UNW_REG_B5
UNW_REG_F31
,
UNW_REG_R7
,
UNW_REG_B5
};
};
...
@@ -1888,22 +1888,21 @@ unw_unwind_to_user (struct unw_frame_info *info)
...
@@ -1888,22 +1888,21 @@ unw_unwind_to_user (struct unw_frame_info *info)
return
-
1
;
return
-
1
;
}
}
void
static
void
unw_init_frame_info
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
struct
switch_stack
*
sw
)
init_frame_info
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
struct
switch_stack
*
sw
,
unsigned
long
stktop
)
{
{
unsigned
long
rbslimit
,
rbstop
,
stklimit
,
stktop
,
sol
;
unsigned
long
rbslimit
,
rbstop
,
stklimit
;
STAT
(
unsigned
long
start
,
flags
;)
STAT
(
unsigned
long
start
,
flags
;)
STAT
(
local_irq_save
(
flags
);
++
unw
.
stat
.
api
.
inits
;
start
=
ia64_get_itc
());
STAT
(
local_irq_save
(
flags
);
++
unw
.
stat
.
api
.
inits
;
start
=
ia64_get_itc
());
/*
/*
* Subtle stuff here: we _could_ unwind through the
* Subtle stuff here: we _could_ unwind through the switch_stack frame but we
* switch_stack frame but we don't want to do that because it
* don't want to do that because it would be slow as each preserved register would
* would be slow as each preserved register would have to be
* have to be processed. Instead, what we do here is zero out the frame info and
* processed. Instead, what we do here is zero out the frame
* start the unwind process at the function that created the switch_stack frame.
* info and start the unwind process at the function that
* When a preserved value in switch_stack needs to be accessed, run_script() will
* created the switch_stack frame. When a preserved value in
* switch_stack needs to be accessed, run_script() will
* initialize the appropriate pointer on demand.
* initialize the appropriate pointer on demand.
*/
*/
memset
(
info
,
0
,
sizeof
(
*
info
));
memset
(
info
,
0
,
sizeof
(
*
info
));
...
@@ -1914,7 +1913,6 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
...
@@ -1914,7 +1913,6 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
rbstop
=
rbslimit
;
rbstop
=
rbslimit
;
stklimit
=
(
unsigned
long
)
t
+
IA64_STK_OFFSET
;
stklimit
=
(
unsigned
long
)
t
+
IA64_STK_OFFSET
;
stktop
=
(
unsigned
long
)
sw
-
16
;
if
(
stktop
<=
rbstop
)
if
(
stktop
<=
rbstop
)
stktop
=
rbstop
;
stktop
=
rbstop
;
...
@@ -1924,34 +1922,58 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
...
@@ -1924,34 +1922,58 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct
info
->
memstk
.
top
=
stktop
;
info
->
memstk
.
top
=
stktop
;
info
->
task
=
t
;
info
->
task
=
t
;
info
->
sw
=
sw
;
info
->
sw
=
sw
;
info
->
sp
=
info
->
psp
=
(
unsigned
long
)
(
sw
+
1
)
-
16
;
info
->
sp
=
info
->
psp
=
stktop
;
info
->
pt
=
0
;
info
->
pr
=
sw
->
pr
;
UNW_DPRINT
(
3
,
"unwind.%s:
\n
"
" task 0x%lx
\n
"
" rbs = [0x%lx-0x%lx)
\n
"
" stk = [0x%lx-0x%lx)
\n
"
" pr 0x%lx
\n
"
" sw 0x%lx
\n
"
" sp 0x%lx
\n
"
,
__FUNCTION__
,
(
unsigned
long
)
t
,
rbslimit
,
rbstop
,
stktop
,
stklimit
,
info
->
pr
,
(
unsigned
long
)
info
->
sw
,
info
->
sp
);
STAT
(
unw
.
stat
.
api
.
init_time
+=
ia64_get_itc
()
-
start
;
local_irq_restore
(
flags
));
}
void
unw_init_from_interruption
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
)
{
unsigned
long
sof
;
init_frame_info
(
info
,
t
,
sw
,
pt
->
r12
);
info
->
cfm_loc
=
&
pt
->
cr_ifs
;
info
->
unat_loc
=
&
pt
->
ar_unat
;
info
->
pfs_loc
=
&
pt
->
ar_pfs
;
sof
=
*
info
->
cfm_loc
&
0x7f
;
info
->
bsp
=
(
unsigned
long
)
ia64_rse_skip_regs
((
unsigned
long
*
)
info
->
regstk
.
top
,
-
sof
);
info
->
ip
=
pt
->
cr_iip
+
ia64_psr
(
pt
)
->
ri
;
info
->
pt
=
(
unsigned
long
)
pt
;
UNW_DPRINT
(
3
,
"unwind.%s:
\n
"
" bsp 0x%lx
\n
"
" sof 0x%lx
\n
"
" ip 0x%lx
\n
"
,
__FUNCTION__
,
info
->
bsp
,
sof
,
info
->
ip
);
find_save_locs
(
info
);
}
void
unw_init_frame_info
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
struct
switch_stack
*
sw
)
{
unsigned
long
sol
;
init_frame_info
(
info
,
t
,
sw
,
(
unsigned
long
)
(
sw
+
1
)
-
16
);
info
->
cfm_loc
=
&
sw
->
ar_pfs
;
info
->
cfm_loc
=
&
sw
->
ar_pfs
;
sol
=
(
*
info
->
cfm_loc
>>
7
)
&
0x7f
;
sol
=
(
*
info
->
cfm_loc
>>
7
)
&
0x7f
;
info
->
bsp
=
(
unsigned
long
)
ia64_rse_skip_regs
((
unsigned
long
*
)
info
->
regstk
.
top
,
-
sol
);
info
->
bsp
=
(
unsigned
long
)
ia64_rse_skip_regs
((
unsigned
long
*
)
info
->
regstk
.
top
,
-
sol
);
info
->
ip
=
sw
->
b0
;
info
->
ip
=
sw
->
b0
;
info
->
pr
=
sw
->
pr
;
UNW_DPRINT
(
3
,
"unwind.%s:
\n
"
UNW_DPRINT
(
3
,
"unwind.%s
\n
"
" rbslimit 0x%lx
\n
"
" rbstop 0x%lx
\n
"
" stklimit 0x%lx
\n
"
" stktop 0x%lx
\n
"
" task 0x%lx
\n
"
" sw 0x%lx
\n
"
,
__FUNCTION__
,
rbslimit
,
rbstop
,
stklimit
,
stktop
,
(
unsigned
long
)(
info
->
task
),
(
unsigned
long
)(
info
->
sw
));
UNW_DPRINT
(
3
,
" sp/psp 0x%lx
\n
"
" sol 0x%lx
\n
"
" bsp 0x%lx
\n
"
" bsp 0x%lx
\n
"
" ip 0x%lx
\n
"
" sol 0x%lx
\n
"
" pr 0x%lx
\n
"
,
" ip 0x%lx
\n
"
,
info
->
sp
,
sol
,
info
->
bsp
,
info
->
ip
,
info
->
pr
);
__FUNCTION__
,
info
->
bsp
,
sol
,
info
->
ip
);
find_save_locs
(
info
);
find_save_locs
(
info
);
STAT
(
unw
.
stat
.
api
.
init_time
+=
ia64_get_itc
()
-
start
;
local_irq_restore
(
flags
));
}
}
void
void
...
...
arch/ia64/lib/copy_user.S
View file @
20f6d716
...
@@ -316,7 +316,7 @@ GLOBAL_ENTRY(__copy_user)
...
@@ -316,7 +316,7 @@ GLOBAL_ENTRY(__copy_user)
//
Beginning
of
long
mempcy
(
i
.
e
.
>
16
bytes
)
//
Beginning
of
long
mempcy
(
i
.
e
.
>
16
bytes
)
//
//
.
long_copy_user
:
.
long_copy_user
:
tbit.nz
p6
,
p7
=
src1
,
0
//
odd
align
e
ment
tbit.nz
p6
,
p7
=
src1
,
0
//
odd
alignment
and
tmp
=
7
,
tmp
and
tmp
=
7
,
tmp
;;
;;
cmp.eq
p10
,
p8
=
r0
,
tmp
cmp.eq
p10
,
p8
=
r0
,
tmp
...
...
arch/ia64/lib/do_csum.S
View file @
20f6d716
...
@@ -137,7 +137,7 @@ GLOBAL_ENTRY(do_csum)
...
@@ -137,7 +137,7 @@ GLOBAL_ENTRY(do_csum)
mov
saved_pr
=
pr
//
preserve
predicates
(
rotation
)
mov
saved_pr
=
pr
//
preserve
predicates
(
rotation
)
(
p6
)
br.ret.spnt.many
rp
//
return
if
zero
or
negative
length
(
p6
)
br.ret.spnt.many
rp
//
return
if
zero
or
negative
length
mov
hmask
=-
1
//
intialize
head
mask
mov
hmask
=-
1
//
in
i
tialize
head
mask
tbit.nz
p15
,
p0
=
buf
,
0
//
is
buf
an
odd
address
?
tbit.nz
p15
,
p0
=
buf
,
0
//
is
buf
an
odd
address
?
and
first1
=-
8
,
buf
//
8
-
byte
align
down
address
of
first1
element
and
first1
=-
8
,
buf
//
8
-
byte
align
down
address
of
first1
element
...
...
arch/ia64/lib/io.c
View file @
20f6d716
...
@@ -108,7 +108,7 @@ ia64_readl (void *addr)
...
@@ -108,7 +108,7 @@ ia64_readl (void *addr)
unsigned
long
unsigned
long
ia64_readq
(
void
*
addr
)
ia64_readq
(
void
*
addr
)
{
{
return
__ia64_readq
(
addr
)
return
__ia64_readq
(
addr
)
;
}
}
...
...
arch/ia64/lib/swiotlb.c
View file @
20f6d716
...
@@ -239,7 +239,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
...
@@ -239,7 +239,7 @@ unmap_single (struct pci_dev *hwdev, char *dma_addr, size_t size, int direction)
for
(
i
=
index
+
nslots
-
1
;
i
>=
index
;
i
--
)
for
(
i
=
index
+
nslots
-
1
;
i
>=
index
;
i
--
)
io_tlb_list
[
i
]
=
++
count
;
io_tlb_list
[
i
]
=
++
count
;
/*
/*
* Step 2: merge the returned slots with the prece
e
ding slots, if
* Step 2: merge the returned slots with the preceding slots, if
* available (non zero)
* available (non zero)
*/
*/
for
(
i
=
index
-
1
;
(
OFFSET
(
i
,
IO_TLB_SEGSIZE
)
!=
IO_TLB_SEGSIZE
-
1
)
&&
for
(
i
=
index
-
1
;
(
OFFSET
(
i
,
IO_TLB_SEGSIZE
)
!=
IO_TLB_SEGSIZE
-
1
)
&&
...
@@ -399,7 +399,7 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in
...
@@ -399,7 +399,7 @@ swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t pci_addr, size_t size, in
/*
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
* Map a set of buffers described by scatterlist in streaming mode for DMA. This is the
* scat
h
er-gather version of the above swiotlb_map_single interface. Here the scatter
* scat
t
er-gather version of the above swiotlb_map_single interface. Here the scatter
* gather list elements are each tagged with the appropriate dma address and length. They
* gather list elements are each tagged with the appropriate dma address and length. They
* are obtained via sg_dma_{address,length}(SG).
* are obtained via sg_dma_{address,length}(SG).
*
*
...
...
arch/ia64/pci/pci.c
View file @
20f6d716
...
@@ -5,6 +5,7 @@
...
@@ -5,6 +5,7 @@
*
*
* Copyright (C) 2002 Hewlett-Packard Co
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
* Bjorn Helgaas <bjorn_helgaas@hp.com>
*
*
* Note: Above list of copyright holders is incomplete...
* Note: Above list of copyright holders is incomplete...
*/
*/
...
@@ -116,31 +117,10 @@ pci_acpi_init (void)
...
@@ -116,31 +117,10 @@ pci_acpi_init (void)
subsys_initcall
(
pci_acpi_init
);
subsys_initcall
(
pci_acpi_init
);
static
void
__init
pcibios_fixup_resource
(
struct
resource
*
res
,
u64
offset
)
{
res
->
start
+=
offset
;
res
->
end
+=
offset
;
}
void
__init
pcibios_fixup_device_resources
(
struct
pci_dev
*
dev
,
struct
pci_bus
*
bus
)
{
int
i
;
for
(
i
=
0
;
i
<
PCI_NUM_RESOURCES
;
i
++
)
{
if
(
!
dev
->
resource
[
i
].
start
)
continue
;
if
(
dev
->
resource
[
i
].
flags
&
IORESOURCE_MEM
)
pcibios_fixup_resource
(
&
dev
->
resource
[
i
],
PCI_CONTROLLER
(
dev
)
->
mem_offset
);
}
}
/* Called by ACPI when it finds a new root bus. */
/* Called by ACPI when it finds a new root bus. */
static
struct
pci_controller
*
static
struct
pci_controller
*
alloc_pci_controller
(
int
seg
)
alloc_pci_controller
(
int
seg
)
{
{
struct
pci_controller
*
controller
;
struct
pci_controller
*
controller
;
...
@@ -153,8 +133,8 @@ alloc_pci_controller(int seg)
...
@@ -153,8 +133,8 @@ alloc_pci_controller(int seg)
return
controller
;
return
controller
;
}
}
struct
pci_bus
*
st
atic
st
ruct
pci_bus
*
scan_root_bus
(
int
bus
,
struct
pci_ops
*
ops
,
void
*
sysdata
)
scan_root_bus
(
int
bus
,
struct
pci_ops
*
ops
,
void
*
sysdata
)
{
{
struct
pci_bus
*
b
;
struct
pci_bus
*
b
;
...
@@ -184,23 +164,185 @@ scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
...
@@ -184,23 +164,185 @@ scan_root_bus(int bus, struct pci_ops *ops, void *sysdata)
return
b
;
return
b
;
}
}
static
int
alloc_resource
(
char
*
name
,
struct
resource
*
root
,
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
flags
)
{
struct
resource
*
res
;
res
=
kmalloc
(
sizeof
(
*
res
),
GFP_KERNEL
);
if
(
!
res
)
return
-
ENOMEM
;
memset
(
res
,
0
,
sizeof
(
*
res
));
res
->
name
=
name
;
res
->
start
=
start
;
res
->
end
=
end
;
res
->
flags
=
flags
;
if
(
request_resource
(
root
,
res
))
return
-
EBUSY
;
return
0
;
}
static
u64
add_io_space
(
struct
acpi_resource_address64
*
addr
)
{
u64
offset
;
int
sparse
=
0
;
int
i
;
if
(
addr
->
address_translation_offset
==
0
)
return
IO_SPACE_BASE
(
0
);
/* part of legacy IO space */
if
(
addr
->
attribute
.
io
.
translation_attribute
==
ACPI_SPARSE_TRANSLATION
)
sparse
=
1
;
offset
=
(
u64
)
ioremap
(
addr
->
address_translation_offset
,
0
);
for
(
i
=
0
;
i
<
num_io_spaces
;
i
++
)
if
(
io_space
[
i
].
mmio_base
==
offset
&&
io_space
[
i
].
sparse
==
sparse
)
return
IO_SPACE_BASE
(
i
);
if
(
num_io_spaces
==
MAX_IO_SPACES
)
{
printk
(
"Too many IO port spaces
\n
"
);
return
~
0
;
}
i
=
num_io_spaces
++
;
io_space
[
i
].
mmio_base
=
offset
;
io_space
[
i
].
sparse
=
sparse
;
return
IO_SPACE_BASE
(
i
);
}
static
acpi_status
count_window
(
struct
acpi_resource
*
resource
,
void
*
data
)
{
unsigned
int
*
windows
=
(
unsigned
int
*
)
data
;
struct
acpi_resource_address64
addr
;
acpi_status
status
;
status
=
acpi_resource_to_address64
(
resource
,
&
addr
);
if
(
ACPI_SUCCESS
(
status
))
if
(
addr
.
resource_type
==
ACPI_MEMORY_RANGE
||
addr
.
resource_type
==
ACPI_IO_RANGE
)
(
*
windows
)
++
;
return
AE_OK
;
}
struct
pci_root_info
{
struct
pci_controller
*
controller
;
char
*
name
;
};
static
acpi_status
add_window
(
struct
acpi_resource
*
res
,
void
*
data
)
{
struct
pci_root_info
*
info
=
(
struct
pci_root_info
*
)
data
;
struct
pci_window
*
window
;
struct
acpi_resource_address64
addr
;
acpi_status
status
;
unsigned
long
flags
,
offset
=
0
;
struct
resource
*
root
;
status
=
acpi_resource_to_address64
(
res
,
&
addr
);
if
(
ACPI_SUCCESS
(
status
))
{
if
(
addr
.
resource_type
==
ACPI_MEMORY_RANGE
)
{
flags
=
IORESOURCE_MEM
;
root
=
&
iomem_resource
;
offset
=
addr
.
address_translation_offset
;
}
else
if
(
addr
.
resource_type
==
ACPI_IO_RANGE
)
{
flags
=
IORESOURCE_IO
;
root
=
&
ioport_resource
;
offset
=
add_io_space
(
&
addr
);
if
(
offset
==
~
0
)
return
AE_OK
;
}
else
return
AE_OK
;
window
=
&
info
->
controller
->
window
[
info
->
controller
->
windows
++
];
window
->
resource
.
flags
|=
flags
;
window
->
resource
.
start
=
addr
.
min_address_range
;
window
->
resource
.
end
=
addr
.
max_address_range
;
window
->
offset
=
offset
;
if
(
alloc_resource
(
info
->
name
,
root
,
addr
.
min_address_range
+
offset
,
addr
.
max_address_range
+
offset
,
flags
))
printk
(
KERN_ERR
"alloc 0x%lx-0x%lx from %s for %s failed
\n
"
,
addr
.
min_address_range
+
offset
,
addr
.
max_address_range
+
offset
,
root
->
name
,
info
->
name
);
}
return
AE_OK
;
}
struct
pci_bus
*
struct
pci_bus
*
pcibios_scan_root
(
void
*
handle
,
int
seg
,
int
bus
)
pcibios_scan_root
(
void
*
handle
,
int
seg
,
int
bus
)
{
{
struct
pci_root_info
info
;
struct
pci_controller
*
controller
;
struct
pci_controller
*
controller
;
u64
base
,
size
,
offset
;
unsigned
int
windows
=
0
;
char
*
name
;
printk
(
"PCI: Probing PCI hardware on bus (%02x:%02x)
\n
"
,
seg
,
bus
);
printk
(
"PCI: Probing PCI hardware on bus (%02x:%02x)
\n
"
,
seg
,
bus
);
controller
=
alloc_pci_controller
(
seg
);
controller
=
alloc_pci_controller
(
seg
);
if
(
!
controller
)
if
(
!
controller
)
return
NULL
;
goto
out1
;
controller
->
acpi_handle
=
handle
;
controller
->
acpi_handle
=
handle
;
acpi_get_addr_space
(
handle
,
ACPI_MEMORY_RANGE
,
&
base
,
&
size
,
&
offset
);
acpi_walk_resources
(
handle
,
METHOD_NAME__CRS
,
count_window
,
&
windows
);
controller
->
mem_offset
=
offset
;
controller
->
window
=
kmalloc
(
sizeof
(
*
controller
->
window
)
*
windows
,
GFP_KERNEL
);
if
(
!
controller
->
window
)
goto
out2
;
name
=
kmalloc
(
16
,
GFP_KERNEL
);
if
(
!
name
)
goto
out3
;
sprintf
(
name
,
"PCI Bus %02x:%02x"
,
seg
,
bus
);
info
.
controller
=
controller
;
info
.
name
=
name
;
acpi_walk_resources
(
handle
,
METHOD_NAME__CRS
,
add_window
,
&
info
);
return
scan_root_bus
(
bus
,
pci_root_ops
,
controller
);
return
scan_root_bus
(
bus
,
pci_root_ops
,
controller
);
out3:
kfree
(
controller
->
window
);
out2:
kfree
(
controller
);
out1:
return
NULL
;
}
void
__init
pcibios_fixup_device_resources
(
struct
pci_dev
*
dev
,
struct
pci_bus
*
bus
)
{
struct
pci_controller
*
controller
=
PCI_CONTROLLER
(
dev
);
struct
pci_window
*
window
;
int
i
,
j
;
for
(
i
=
0
;
i
<
PCI_NUM_RESOURCES
;
i
++
)
{
if
(
!
dev
->
resource
[
i
].
start
)
continue
;
#define contains(win, res) ((res)->start >= (win)->start && \
(res)->end <= (win)->end)
for
(
j
=
0
;
j
<
controller
->
windows
;
j
++
)
{
window
=
&
controller
->
window
[
j
];
if
(((
dev
->
resource
[
i
].
flags
&
IORESOURCE_MEM
&&
window
->
resource
.
flags
&
IORESOURCE_MEM
)
||
(
dev
->
resource
[
i
].
flags
&
IORESOURCE_IO
&&
window
->
resource
.
flags
&
IORESOURCE_IO
))
&&
contains
(
&
window
->
resource
,
&
dev
->
resource
[
i
]))
{
dev
->
resource
[
i
].
start
+=
window
->
offset
;
dev
->
resource
[
i
].
end
+=
window
->
offset
;
}
}
}
}
}
/*
/*
...
...
include/asm-ia64/acpi-ext.h
View file @
20f6d716
...
@@ -3,30 +3,15 @@
...
@@ -3,30 +3,15 @@
*
*
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) 2003 Hewlett-Packard
* Copyright (C) Alex Williamson
* Copyright (C) Alex Williamson
* Copyright (C) Bjorn Helgaas
*
*
* Vendor specific extensions to ACPI.
The HP-specific extensiosn are also used by NEC.
* Vendor specific extensions to ACPI.
*/
*/
#ifndef _ASM_IA64_ACPI_EXT_H
#ifndef _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#define _ASM_IA64_ACPI_EXT_H
#include <linux/types.h>
#include <linux/types.h>
#define HP_CCSR_LENGTH 0x21
#define HP_CCSR_TYPE 0x2
#define HP_CCSR_GUID EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, \
0xf6, 0x4a, 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad)
struct
acpi_hp_vendor_long
{
u8
guid_id
;
u8
guid
[
16
];
u8
csr_base
[
8
];
u8
csr_length
[
8
];
};
extern
acpi_status
hp_acpi_csr_space
(
acpi_handle
,
u64
*
base
,
u64
*
length
);
extern
acpi_status
hp_acpi_csr_space
(
acpi_handle
,
u64
*
base
,
u64
*
length
);
extern
acpi_status
acpi_get_crs
(
acpi_handle
,
struct
acpi_buffer
*
);
extern
struct
acpi_resource
*
acpi_get_crs_next
(
struct
acpi_buffer
*
,
int
*
);
extern
union
acpi_resource_data
*
acpi_get_crs_type
(
struct
acpi_buffer
*
,
int
*
,
int
);
extern
void
acpi_dispose_crs
(
struct
acpi_buffer
*
);
#endif
/* _ASM_IA64_ACPI_EXT_H */
#endif
/* _ASM_IA64_ACPI_EXT_H */
include/asm-ia64/compat.h
View file @
20f6d716
...
@@ -102,6 +102,9 @@ struct compat_statfs {
...
@@ -102,6 +102,9 @@ struct compat_statfs {
int
f_spare
[
6
];
int
f_spare
[
6
];
};
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef
u32
compat_old_sigset_t
;
/* at least 32 bits */
typedef
u32
compat_old_sigset_t
;
/* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG 64
...
...
include/asm-ia64/intrinsics.h
View file @
20f6d716
...
@@ -17,16 +17,16 @@
...
@@ -17,16 +17,16 @@
extern
unsigned
long
__bad_size_for_ia64_fetch_and_add
(
void
);
extern
unsigned
long
__bad_size_for_ia64_fetch_and_add
(
void
);
extern
unsigned
long
__bad_increment_for_ia64_fetch_and_add
(
void
);
extern
unsigned
long
__bad_increment_for_ia64_fetch_and_add
(
void
);
#define IA64_FETCHADD(tmp,v,n,sz) \
#define IA64_FETCHADD(tmp,v,n,sz
,sem
) \
({ \
({ \
switch (sz) { \
switch (sz) { \
case 4: \
case 4: \
__asm__ __volatile__ ("fetchadd4.
rel
%0=[%1],%2" \
__asm__ __volatile__ ("fetchadd4.
"sem"
%0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \
break; \
\
\
case 8: \
case 8: \
__asm__ __volatile__ ("fetchadd8.
rel
%0=[%1],%2" \
__asm__ __volatile__ ("fetchadd8.
"sem"
%0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \
break; \
\
\
...
@@ -35,32 +35,34 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
...
@@ -35,32 +35,34 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
} \
} \
})
})
#define ia64_fetch
_and_add(i,v
) \
#define ia64_fetch
add(i,v,sem
) \
({ \
({ \
__u64 _tmp; \
__u64 _tmp; \
volatile __typeof__(*(v)) *_v = (v); \
volatile __typeof__(*(v)) *_v = (v); \
/* Can't use a switch () here: gcc isn't always smart enough for that... */
\
/* Can't use a switch () here: gcc isn't always smart enough for that... */
\
if ((i) == -16) \
if ((i) == -16) \
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))
);
\
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))
, sem);
\
else if ((i) == -8) \
else if ((i) == -8) \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))
, sem
); \
else if ((i) == -4) \
else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))
, sem
); \
else if ((i) == -1) \
else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))
, sem
); \
else if ((i) == 1) \
else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))
, sem
); \
else if ((i) == 4) \
else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))
, sem
); \
else if ((i) == 8) \
else if ((i) == 8) \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))
, sem
); \
else if ((i) == 16) \
else if ((i) == 16) \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))
, sem
); \
else \
else \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \
(__typeof__(*(v))) (_tmp
+ (i));
/* return new value */
\
(__typeof__(*(v))) (_tmp
);
/* return old value */
\
})
})
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i))
/* return new value */
/*
/*
* This function doesn't exist, so you'll get a linker error if
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg().
* something tries to do an invalid xchg().
...
...
include/asm-ia64/io.h
View file @
20f6d716
...
@@ -32,6 +32,24 @@
...
@@ -32,6 +32,24 @@
*/
*/
#define IO_SPACE_LIMIT 0xffffffffffffffffUL
#define IO_SPACE_LIMIT 0xffffffffffffffffUL
#define MAX_IO_SPACES 16
#define IO_SPACE_BITS 24
#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff))
struct
io_space
{
unsigned
long
mmio_base
;
/* base in MMIO space */
int
sparse
;
};
extern
struct
io_space
io_space
[];
extern
unsigned
int
num_io_spaces
;
# ifdef __KERNEL__
# ifdef __KERNEL__
#include <asm/machvec.h>
#include <asm/machvec.h>
...
@@ -80,11 +98,17 @@ __ia64_get_io_port_base (void)
...
@@ -80,11 +98,17 @@ __ia64_get_io_port_base (void)
static
inline
void
*
static
inline
void
*
__ia64_mk_io_addr
(
unsigned
long
port
)
__ia64_mk_io_addr
(
unsigned
long
port
)
{
{
const
unsigned
long
io_base
=
__ia64_get_io_port_base
();
struct
io_space
*
space
;
unsigned
long
addr
;
unsigned
long
offset
;
space
=
&
io_space
[
IO_SPACE_NR
(
port
)];
port
=
IO_SPACE_PORT
(
port
);
if
(
space
->
sparse
)
offset
=
IO_SPACE_SPARSE_ENCODING
(
port
);
else
offset
=
port
;
addr
=
io_base
|
((
port
>>
2
)
<<
12
)
|
(
port
&
0xfff
);
return
(
void
*
)
(
space
->
mmio_base
|
offset
);
return
(
void
*
)
addr
;
}
}
/*
/*
...
...
include/asm-ia64/iosapic.h
View file @
20f6d716
...
@@ -57,6 +57,7 @@ extern void __init iosapic_init (unsigned long address,
...
@@ -57,6 +57,7 @@ extern void __init iosapic_init (unsigned long address,
extern
int
gsi_to_vector
(
unsigned
int
gsi
);
extern
int
gsi_to_vector
(
unsigned
int
gsi
);
extern
int
gsi_to_irq
(
unsigned
int
gsi
);
extern
int
gsi_to_irq
(
unsigned
int
gsi
);
extern
void
__init
iosapic_parse_prt
(
void
);
extern
void
__init
iosapic_parse_prt
(
void
);
extern
void
iosapic_enable_intr
(
unsigned
int
vector
);
extern
int
iosapic_register_intr
(
unsigned
int
gsi
,
unsigned
long
polarity
,
extern
int
iosapic_register_intr
(
unsigned
int
gsi
,
unsigned
long
polarity
,
unsigned
long
trigger
);
unsigned
long
trigger
);
extern
void
__init
iosapic_override_isa_irq
(
unsigned
int
isa_irq
,
unsigned
int
gsi
,
extern
void
__init
iosapic_override_isa_irq
(
unsigned
int
isa_irq
,
unsigned
int
gsi
,
...
...
include/asm-ia64/machvec_init.h
View file @
20f6d716
...
@@ -16,6 +16,10 @@ extern ia64_mv_inl_t __ia64_inl;
...
@@ -16,6 +16,10 @@ extern ia64_mv_inl_t __ia64_inl;
extern
ia64_mv_outb_t
__ia64_outb
;
extern
ia64_mv_outb_t
__ia64_outb
;
extern
ia64_mv_outw_t
__ia64_outw
;
extern
ia64_mv_outw_t
__ia64_outw
;
extern
ia64_mv_outl_t
__ia64_outl
;
extern
ia64_mv_outl_t
__ia64_outl
;
extern
ia64_mv_readb_t
__ia64_readb
;
extern
ia64_mv_readw_t
__ia64_readw
;
extern
ia64_mv_readl_t
__ia64_readl
;
extern
ia64_mv_readq_t
__ia64_readq
;
#define MACHVEC_HELPER(name) \
#define MACHVEC_HELPER(name) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \
...
...
include/asm-ia64/pal.h
View file @
20f6d716
...
@@ -622,7 +622,8 @@ typedef struct pal_min_state_area_s {
...
@@ -622,7 +622,8 @@ typedef struct pal_min_state_area_s {
u64
pmsa_xip
;
/* previous iip */
u64
pmsa_xip
;
/* previous iip */
u64
pmsa_xpsr
;
/* previous psr */
u64
pmsa_xpsr
;
/* previous psr */
u64
pmsa_xfs
;
/* previous ifs */
u64
pmsa_xfs
;
/* previous ifs */
u64
pmsa_reserved
[
71
];
/* pal_min_state_area should total to 1KB */
u64
pmsa_br1
;
/* branch register 1 */
u64
pmsa_reserved
[
70
];
/* pal_min_state_area should total to 1KB */
}
pal_min_state_area_t
;
}
pal_min_state_area_t
;
...
...
include/asm-ia64/pci.h
View file @
20f6d716
...
@@ -97,12 +97,18 @@ extern int pcibios_prep_mwi (struct pci_dev *);
...
@@ -97,12 +97,18 @@ extern int pcibios_prep_mwi (struct pci_dev *);
extern
int
pci_mmap_page_range
(
struct
pci_dev
*
dev
,
struct
vm_area_struct
*
vma
,
extern
int
pci_mmap_page_range
(
struct
pci_dev
*
dev
,
struct
vm_area_struct
*
vma
,
enum
pci_mmap_state
mmap_state
,
int
write_combine
);
enum
pci_mmap_state
mmap_state
,
int
write_combine
);
struct
pci_window
{
struct
resource
resource
;
u64
offset
;
};
struct
pci_controller
{
struct
pci_controller
{
void
*
acpi_handle
;
void
*
acpi_handle
;
void
*
iommu
;
void
*
iommu
;
int
segment
;
int
segment
;
u64
mem_offset
;
unsigned
int
windows
;
struct
pci_window
*
window
;
};
};
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
...
...
include/asm-ia64/perfmon.h
View file @
20f6d716
...
@@ -41,6 +41,7 @@
...
@@ -41,6 +41,7 @@
#define PFM_FL_NOTIFY_BLOCK 0x04
/* block task on user level notifications */
#define PFM_FL_NOTIFY_BLOCK 0x04
/* block task on user level notifications */
#define PFM_FL_SYSTEM_WIDE 0x08
/* create a system wide context */
#define PFM_FL_SYSTEM_WIDE 0x08
/* create a system wide context */
#define PFM_FL_EXCL_IDLE 0x20
/* exclude idle task from system wide session */
#define PFM_FL_EXCL_IDLE 0x20
/* exclude idle task from system wide session */
#define PFM_FL_UNSECURE 0x40
/* allow unsecure monitoring for non self-monitoring task */
/*
/*
* PMC flags
* PMC flags
...
@@ -125,7 +126,7 @@ typedef struct {
...
@@ -125,7 +126,7 @@ typedef struct {
* Define the version numbers for both perfmon as a whole and the sampling buffer format.
* Define the version numbers for both perfmon as a whole and the sampling buffer format.
*/
*/
#define PFM_VERSION_MAJ 1U
#define PFM_VERSION_MAJ 1U
#define PFM_VERSION_MIN
3
U
#define PFM_VERSION_MIN
4
U
#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
#define PFM_SMPL_VERSION_MAJ 1U
#define PFM_SMPL_VERSION_MAJ 1U
...
...
include/asm-ia64/processor.h
View file @
20f6d716
...
@@ -291,7 +291,7 @@ struct thread_struct {
...
@@ -291,7 +291,7 @@ struct thread_struct {
#define start_thread(regs,new_ip,new_sp) do { \
#define start_thread(regs,new_ip,new_sp) do { \
set_fs(USER_DS); \
set_fs(USER_DS); \
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL
| IA64_PSR_SP))
\
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL
))
\
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
regs->cr_iip = new_ip; \
regs->cr_iip = new_ip; \
regs->ar_rsc = 0xf;
/* eager mode, privilege level 3 */
\
regs->ar_rsc = 0xf;
/* eager mode, privilege level 3 */
\
...
...
include/asm-ia64/ptrace.h
View file @
20f6d716
...
@@ -227,8 +227,10 @@ struct switch_stack {
...
@@ -227,8 +227,10 @@ struct switch_stack {
})
})
struct
task_struct
;
/* forward decl */
struct
task_struct
;
/* forward decl */
struct
unw_frame_info
;
/* forward decl */
extern
void
show_regs
(
struct
pt_regs
*
);
extern
void
show_regs
(
struct
pt_regs
*
);
extern
void
ia64_do_show_stack
(
struct
unw_frame_info
*
,
void
*
);
extern
unsigned
long
ia64_get_user_rbs_end
(
struct
task_struct
*
,
struct
pt_regs
*
,
extern
unsigned
long
ia64_get_user_rbs_end
(
struct
task_struct
*
,
struct
pt_regs
*
,
unsigned
long
*
);
unsigned
long
*
);
extern
long
ia64_peek
(
struct
task_struct
*
,
struct
switch_stack
*
,
unsigned
long
,
extern
long
ia64_peek
(
struct
task_struct
*
,
struct
switch_stack
*
,
unsigned
long
,
...
...
include/asm-ia64/sal.h
View file @
20f6d716
...
@@ -226,7 +226,7 @@ enum {
...
@@ -226,7 +226,7 @@ enum {
/* Encodings for machine check parameter types */
/* Encodings for machine check parameter types */
enum
{
enum
{
SAL_MC_PARAM_RENDEZ_INT
=
1
,
/* Rendez
e
vous interrupt */
SAL_MC_PARAM_RENDEZ_INT
=
1
,
/* Rendezvous interrupt */
SAL_MC_PARAM_RENDEZ_WAKEUP
=
2
,
/* Wakeup */
SAL_MC_PARAM_RENDEZ_WAKEUP
=
2
,
/* Wakeup */
SAL_MC_PARAM_CPE_INT
=
3
/* Corrected Platform Error Int */
SAL_MC_PARAM_CPE_INT
=
3
/* Corrected Platform Error Int */
};
};
...
...
include/asm-ia64/serial.h
View file @
20f6d716
...
@@ -59,7 +59,6 @@
...
@@ -59,7 +59,6 @@
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS },
/* ttyS2 */
\
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS },
/* ttyS2 */
\
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS },
/* ttyS3 */
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS },
/* ttyS3 */
#ifdef CONFIG_SERIAL_MANY_PORTS
#ifdef CONFIG_SERIAL_MANY_PORTS
#define EXTRA_SERIAL_PORT_DEFNS \
#define EXTRA_SERIAL_PORT_DEFNS \
{ 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS },
/* ttyS4 */
\
{ 0, BASE_BAUD, 0x1A0, 9, FOURPORT_FLAGS },
/* ttyS4 */
\
...
...
include/asm-ia64/spinlock.h
View file @
20f6d716
...
@@ -22,26 +22,72 @@ typedef struct {
...
@@ -22,26 +22,72 @@ typedef struct {
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
#define spin_lock_init(x) ((x)->lock = 0)
#define DEBUG_SPIN_LOCK 0
#define NEW_LOCK
#ifdef NEW_LOCK
#if DEBUG_SPIN_LOCK
/*
* Try to get the lock. If we fail to get the lock, make a non-standard call to
#include <ia64intrin.h>
* ia64_spinlock_contention(). We do not use a normal call because that would force all
* callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
#define _raw_spin_lock(x) \
* carefully coded to touch only those registers that spin_lock() marks "clobbered".
do { \
*/
unsigned long _timeout = 1000000000; \
volatile unsigned int _old = 0, _new = 1, *_ptr = &((x)->lock); \
do { \
if (_timeout-- == 0) { \
extern void dump_stack (void); \
printk("kernel DEADLOCK at %s:%d?\n", __FILE__, __LINE__); \
dump_stack(); \
} \
} while (__sync_val_compare_and_swap(_ptr, _old, _new) != _old); \
} while (0)
#define IA64_SPINLOCK_CLOBBERS "ar.pfs", "p14", "r28", "r29", "r30", "b6", "memory"
static
inline
void
_raw_spin_lock
(
spinlock_t
*
lock
)
{
register
volatile
unsigned
int
*
ptr
asm
(
"r31"
)
=
&
lock
->
lock
;
#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 4)
# ifdef CONFIG_ITANIUM
/* don't use brl on Itanium... */
asm
volatile
(
"{
\n\t
"
" mov ar.ccv = r0
\n\t
"
" mov r28 = ip
\n\t
"
" mov r30 = 1;;
\n\t
"
"}
\n\t
"
"cmpxchg4.acq r30 = [%1], r30, ar.ccv
\n\t
"
"movl r29 = ia64_spinlock_contention_pre3_4;;
\n\t
"
"cmp4.ne p14, p0 = r30, r0
\n\t
"
"mov b6 = r29;;
\n
"
"(p14) br.cond.spnt.many b6"
:
"=r"
(
ptr
)
:
"r"
(
ptr
)
:
IA64_SPINLOCK_CLOBBERS
);
# else
asm
volatile
(
"{
\n\t
"
" mov ar.ccv = r0
\n\t
"
" mov r28 = ip
\n\t
"
" mov r30 = 1;;
\n\t
"
"}
\n\t
"
"cmpxchg4.acq r30 = [%1], r30, ar.ccv;;
\n\t
"
"cmp4.ne p14, p0 = r30, r0
\n
"
"(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4"
:
"=r"
(
ptr
)
:
"r"
(
ptr
)
:
IA64_SPINLOCK_CLOBBERS
);
# endif
/* CONFIG_MCKINLEY */
#else
#else
# ifdef CONFIG_ITANIUM
/* don't use brl on Itanium... */
/* mis-declare, so we get the entry-point, not it's function descriptor: */
asm
volatile
(
"mov r30 = 1
\n\t
"
"mov ar.ccv = r0;;
\n\t
"
"cmpxchg4.acq r30 = [%0], r30, ar.ccv
\n\t
"
"movl r29 = ia64_spinlock_contention;;
\n\t
"
"cmp4.ne p14, p0 = r30, r0
\n\t
"
"mov b6 = r29;;
\n
"
"(p14) br.call.spnt.many b6 = b6"
:
"=r"
(
ptr
)
:
"r"
(
ptr
)
:
IA64_SPINLOCK_CLOBBERS
);
# else
asm
volatile
(
"mov r30 = 1
\n\t
"
"mov ar.ccv = r0;;
\n\t
"
"cmpxchg4.acq r30 = [%0], r30, ar.ccv;;
\n\t
"
"cmp4.ne p14, p0 = r30, r0
\n\t
"
"(p14) brl.call.spnt.many b6=ia64_spinlock_contention"
:
"=r"
(
ptr
)
:
"r"
(
ptr
)
:
IA64_SPINLOCK_CLOBBERS
);
# endif
/* CONFIG_MCKINLEY */
#endif
}
#else
/* !NEW_LOCK */
/*
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
...
@@ -64,7 +110,7 @@ do { \
...
@@ -64,7 +110,7 @@ do { \
";;\n" \
";;\n" \
:: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
:: "r"(&(x)->lock) : "ar.ccv", "p7", "r2", "r29", "memory")
#endif
/* !
DEBUG_SPIN
_LOCK */
#endif
/* !
NEW
_LOCK */
#define spin_is_locked(x) ((x)->lock != 0)
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
...
@@ -72,8 +118,8 @@ do { \
...
@@ -72,8 +118,8 @@ do { \
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef
struct
{
typedef
struct
{
volatile
int
read_counter
:
31
;
volatile
int
read_counter
:
31
;
volatile
int
write_lock
:
1
;
volatile
int
write_lock
:
1
;
}
rwlock_t
;
}
rwlock_t
;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
...
@@ -82,33 +128,21 @@ typedef struct {
...
@@ -82,33 +128,21 @@ typedef struct {
#define _raw_read_lock(rw) \
#define _raw_read_lock(rw) \
do { \
do { \
int __read_lock_tmp = 0; \
rwlock_t *__read_lock_ptr = (rw); \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \
\
";;\n" \
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
"tbit.nz p6,p0 = %0, 31\n" \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
"(p6) br.cond.sptk.few 2f\n" \
while (*(volatile int *)__read_lock_ptr < 0) \
".section .text.lock,\"ax\"\n" \
barrier(); \
"2:\tfetchadd4.rel %0 = [%1], -1\n" \
\
";;\n" \
} \
"3:\tld4.acq %0 = [%1]\n" \
} while (0)
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 3b\n" \
"br.cond.sptk.few 1b\n" \
";;\n" \
".previous\n" \
: "=&r" (__read_lock_tmp) \
: "r" (rw) : "p6", "memory"); \
} while(0)
#define _raw_read_unlock(rw) \
#define _raw_read_unlock(rw) \
do { \
do { \
int __read_unlock_tmp = 0; \
rwlock_t *__read_lock_ptr = (rw); \
__asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
: "=r" (__read_unlock_tmp) \
} while (0)
: "r" (rw) \
: "memory"); \
} while(0)
#define _raw_write_lock(rw) \
#define _raw_write_lock(rw) \
do { \
do { \
...
...
include/asm-ia64/uaccess.h
View file @
20f6d716
...
@@ -8,7 +8,7 @@
...
@@ -8,7 +8,7 @@
* addresses. Thus, we need to be careful not to let the user to
* addresses. Thus, we need to be careful not to let the user to
* trick us into accessing kernel memory that would normally be
* trick us into accessing kernel memory that would normally be
* inaccessible. This code is also fairly performance sensitive,
* inaccessible. This code is also fairly performance sensitive,
* so we want to spend as little time doing saf
te
y checks as
* so we want to spend as little time doing saf
et
y checks as
* possible.
* possible.
*
*
* To make matters a bit more interesting, these macros sometimes also
* To make matters a bit more interesting, these macros sometimes also
...
...
include/asm-ia64/unwind.h
View file @
20f6d716
...
@@ -2,8 +2,8 @@
...
@@ -2,8 +2,8 @@
#define _ASM_IA64_UNWIND_H
#define _ASM_IA64_UNWIND_H
/*
/*
* Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999-2000
, 2003
Hewlett-Packard Co
*
Copyright (C) 1999-2000
David Mosberger-Tang <davidm@hpl.hp.com>
*
David Mosberger-Tang <davidm@hpl.hp.com>
*
*
* A simple API for unwinding kernel stacks. This is used for
* A simple API for unwinding kernel stacks. This is used for
* debugging and error reporting purposes. The kernel doesn't need
* debugging and error reporting purposes. The kernel doesn't need
...
@@ -107,6 +107,13 @@ extern void unw_remove_unwind_table (void *handle);
...
@@ -107,6 +107,13 @@ extern void unw_remove_unwind_table (void *handle);
*/
*/
extern
void
unw_init_from_blocked_task
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
);
extern
void
unw_init_from_blocked_task
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
);
/*
* Prepare to unwind from interruption. The pt-regs and switch-stack structures must have
* be "adjacent" (no state modifications between pt-regs and switch-stack).
*/
extern
void
unw_init_from_interruption
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
struct
pt_regs
*
pt
,
struct
switch_stack
*
sw
);
extern
void
unw_init_frame_info
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
extern
void
unw_init_frame_info
(
struct
unw_frame_info
*
info
,
struct
task_struct
*
t
,
struct
switch_stack
*
sw
);
struct
switch_stack
*
sw
);
...
...
include/linux/pci_ids.h
View file @
20f6d716
...
@@ -605,6 +605,7 @@
...
@@ -605,6 +605,7 @@
#define PCI_DEVICE_ID_HP_ZX1_SBA 0x1229
#define PCI_DEVICE_ID_HP_ZX1_SBA 0x1229
#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a
#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a
#define PCI_DEVICE_ID_HP_ZX1_LBA 0x122e
#define PCI_DEVICE_ID_HP_ZX1_LBA 0x122e
#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c
#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment