Commit 3c6c3721 authored by Tony Luck's avatar Tony Luck

Merge intel.com:/data/home/aegl/BK/Linus

into intel.com:/data/home/aegl/BK/linux-ia64-release-2.6.11
parents 307bc76b 872a9465
......@@ -674,6 +674,10 @@ running once the system is up.
mac53c9x= [HW,SCSI]
Format: <num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
machvec= [IA64]
Force the use of a particular machine-vector (machvec) in a generic
kernel. Example: machvec=hpzx1_swiotlb
mad16= [HW,OSS]
Format: <io>,<irq>,<dma>,<dma16>,<mpu_io>,<mpu_irq>,<joystick>
......
......@@ -64,6 +64,7 @@ config IA64_GENERIC
generic For any supported IA-64 system
DIG-compliant For DIG ("Developer's Interface Guide") compliant systems
HP-zx1/sx1000 For HP systems
HP-zx1/sx1000+swiotlb For HP systems with (broken) DMA-constrained devices.
SGI-SN2 For SGI Altix systems
Ski-simulator For the HP simulator <http://www.hpl.hp.com/research/linux/ski/>
......@@ -78,6 +79,15 @@ config IA64_HP_ZX1
Build a kernel that runs on HP zx1 and sx1000 systems. This adds
support for the HP I/O MMU.
config IA64_HP_ZX1_SWIOTLB
bool "HP-zx1/sx1000 with software I/O TLB"
help
Build a kernel that runs on HP zx1 and sx1000 systems even when they
have broken PCI devices which cannot DMA to full 32 bits. Apart
from support for the HP I/O MMU, this includes support for the software
I/O TLB, which allows supporting the broken devices at the expense of
wasting some kernel memory (about 2MB by default).
config IA64_SGI_SN2
bool "SGI-SN2"
help
......@@ -188,7 +198,7 @@ config HOLES_IN_ZONE
config DISCONTIGMEM
bool "Discontiguous memory support"
depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1) && NUMA && VIRTUAL_MEM_MAP
depends on (IA64_DIG || IA64_SGI_SN2 || IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB) && NUMA && VIRTUAL_MEM_MAP
default y if (IA64_SGI_SN2 || IA64_GENERIC) && NUMA
help
Say Y to support efficient handling of discontiguous physical memory,
......@@ -326,7 +336,7 @@ menu "Power management and ACPI"
config PM
bool "Power Management support"
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB
default y
help
"Power Management" means that parts of your computer are shut
......
......@@ -16,7 +16,7 @@ config IA64_GRANULE_16MB
config IA64_GRANULE_64MB
bool "64MB"
depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_SGI_SN2)
depends on !(IA64_GENERIC || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_SGI_SN2)
endchoice
......
......@@ -57,11 +57,13 @@ core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
......
......@@ -6,3 +6,5 @@
#
obj-y := sba_iommu.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += hwsw_iommu.o
obj-$(CONFIG_IA64_GENERIC) += hwsw_iommu.o
......@@ -5,4 +5,4 @@
# Copyright (C) Alex Williamson (alex_williamson@hp.com)
#
obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o
obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o hpzx1_swiotlb_machvec.o
......@@ -12,6 +12,7 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o i
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
......
......@@ -113,6 +113,8 @@ acpi_get_sysname (void)
return "hpsim";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
return "hpzx1_swiotlb";
# elif defined (CONFIG_IA64_SGI_SN2)
return "sn2";
# elif defined (CONFIG_IA64_DIG)
......
/*
* Architecture-specific setup.
*
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
......@@ -312,7 +312,28 @@ setup_arch (char **cmdline_p)
io_port_init();
#ifdef CONFIG_IA64_GENERIC
machvec_init(acpi_get_sysname());
{
const char *mvec_name = strstr (*cmdline_p, "machvec=");
char str[64];
if (mvec_name) {
const char *end;
size_t len;
mvec_name += 8;
end = strchr (mvec_name, ' ');
if (end)
len = end - mvec_name;
else
len = strlen (mvec_name);
len = min(len, sizeof (str) - 1);
strncpy (str, mvec_name, len);
str[len] = '\0';
mvec_name = str;
} else
mvec_name = acpi_get_sysname();
machvec_init(mvec_name);
}
#endif
if (early_console_setup(*cmdline_p) == 0)
......
......@@ -61,9 +61,8 @@ static char *io_tlb_start, *io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
* io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
* Default to 64MB.
*/
static unsigned long io_tlb_nslabs = 32768;
static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
......@@ -113,10 +112,15 @@ __setup("swiotlb=", setup_io_tlb_npages);
* structures for the software IO TLB used to implement the PCI DMA API.
*/
void
swiotlb_init(void)
swiotlb_init_with_default_size (size_t default_size)
{
unsigned long i;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> PAGE_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
......@@ -145,6 +149,12 @@ swiotlb_init(void)
virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
}
void
swiotlb_init (void)
{
swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
static inline int
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
......
......@@ -6,6 +6,7 @@
* Copyright (C) 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Bjorn Helgaas <bjorn_helgaas@hp.com>
* Copyright (C) 2004 Silicon Graphics, Inc.
*
* Note: Above list of copyright holders is incomplete...
*/
......@@ -526,7 +527,7 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
if (write_combine)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
......@@ -540,6 +541,117 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
return 0;
}
/**
* pci_mmap_legacy_page_range - map legacy memory space to userland
* @bus: bus whose legacy space we're mapping
* @vma: vma passed in by mmap
*
* Map legacy memory space for this device back to userspace using a machine
* vector to get the base address.
*/
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
char *addr;
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
/**
* ia64_pci_get_legacy_mem - generic legacy mem routine
* @bus: bus to get legacy memory base address for
*
* Find the base of legacy memory for @bus. This is typically the first
* megabyte of bus address space for @bus or is simply 0 on platforms whose
* chipsets support legacy I/O and memory routing. Returns the base address
* or an error pointer if an error occurred.
*
* This is the ia64 generic version of this routine. Other platforms
* are free to override it with a machine vector.
*/
char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
{
return (char *)__IA64_UNCACHED_OFFSET;
}
/**
* ia64_pci_legacy_read - read from legacy I/O space
* @bus: bus to read
* @port: legacy port value
* @val: caller allocated storage for returned value
* @size: number of bytes to read
*
* Simply reads @size bytes from @port and puts the result in @val.
*
* Again, this (and the write routine) are generic versions that can be
* overridden by the platform. This is necessary on platforms that don't
* support legacy I/O routing or that hard fail on legacy I/O timeouts.
*/
int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
{
int ret = size;
switch (size) {
case 1:
*val = inb(port);
break;
case 2:
*val = inw(port);
break;
case 4:
*val = inl(port);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* ia64_pci_legacy_write - perform a legacy I/O write
* @bus: bus pointer
* @port: port to write
* @val: value to write
* @size: number of bytes to write from @val
*
* Simply writes @size bytes of @val to @port.
*/
int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size)
{
int ret = 0;
switch (size) {
case 1:
outb(val, port);
break;
case 2:
outw(val, port);
break;
case 4:
outl(val, port);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* pci_cacheline_size - determine cacheline size for PCI devices
* @dev: void
......
......@@ -475,3 +475,67 @@ EXPORT_SYMBOL(sn_pci_alloc_consistent);
EXPORT_SYMBOL(sn_pci_free_consistent);
EXPORT_SYMBOL(sn_pci_dma_supported);
EXPORT_SYMBOL(sn_dma_mapping_error);
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{
if (!SN_PCIBUS_BUSSOFT(bus))
return ERR_PTR(-ENODEV);
return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
}
int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
{
unsigned long addr;
int ret;
if (!SN_PCIBUS_BUSSOFT(bus))
return -ENODEV;
addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
addr += port;
ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
if (ret == 2)
return -EINVAL;
if (ret == 1)
*val = -1;
return size;
}
int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
{
int ret = size;
unsigned long paddr;
unsigned long *addr;
if (!SN_PCIBUS_BUSSOFT(bus)) {
ret = -ENODEV;
goto out;
}
/* Put the phys addr in uncached space */
paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
paddr += port;
addr = (unsigned long *)paddr;
switch (size) {
case 1:
*(volatile u8 *)(addr) = (u8)(val);
break;
case 2:
*(volatile u16 *)(addr) = (u16)(val);
break;
case 4:
*(volatile u32 *)(addr) = (u32)(val);
break;
default:
ret = -EINVAL;
break;
}
out:
return ret;
}
......@@ -144,7 +144,7 @@ config AGP_I460
config AGP_HP_ZX1
tristate "HP ZX1 chipset AGP support"
depends on AGP && (IA64_HP_ZX1 || IA64_GENERIC)
depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
help
This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors.
......
......@@ -20,6 +20,7 @@ struct scatterlist;
struct irq_desc;
struct page;
struct mm_struct;
struct pci_bus;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
......@@ -31,6 +32,11 @@ typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef struct irq_desc *ia64_mv_irq_desc (unsigned int);
typedef u8 ia64_mv_irq_to_vector (unsigned int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
u8 size);
typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val,
u8 size);
/* DMA-mapping interface: */
typedef void ia64_mv_dma_init (void);
......@@ -94,6 +100,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# include <asm/machvec_dig.h>
# elif defined (CONFIG_IA64_HP_ZX1)
# include <asm/machvec_hpzx1.h>
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
# include <asm/machvec_hpzx1_swiotlb.h>
# elif defined (CONFIG_IA64_SGI_SN2)
# include <asm/machvec_sn2.h>
# elif defined (CONFIG_IA64_GENERIC)
......@@ -125,6 +133,9 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_irq_desc ia64_mv.irq_desc
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
# define platform_pci_legacy_read ia64_mv.pci_legacy_read
# define platform_pci_legacy_write ia64_mv.pci_legacy_write
# define platform_inb ia64_mv.inb
# define platform_inw ia64_mv.inw
# define platform_inl ia64_mv.inl
......@@ -172,6 +183,9 @@ struct ia64_machine_vector {
ia64_mv_irq_desc *irq_desc;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
ia64_mv_pci_legacy_read_t *pci_legacy_read;
ia64_mv_pci_legacy_write_t *pci_legacy_write;
ia64_mv_inb_t *inb;
ia64_mv_inw_t *inw;
ia64_mv_inl_t *inl;
......@@ -215,6 +229,9 @@ struct ia64_machine_vector {
platform_irq_desc, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
platform_pci_get_legacy_mem, \
platform_pci_legacy_read, \
platform_pci_legacy_write, \
platform_inb, \
platform_inw, \
platform_inl, \
......@@ -330,6 +347,15 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
#ifndef platform_local_vector_to_irq
# define platform_local_vector_to_irq __ia64_local_vector_to_irq
#endif
#ifndef platform_pci_get_legacy_mem
# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem
#endif
#ifndef platform_pci_legacy_read
# define platform_pci_legacy_read ia64_pci_legacy_read
#endif
#ifndef platform_pci_legacy_write
# define platform_pci_legacy_write ia64_pci_legacy_write
#endif
#ifndef platform_inb
# define platform_inb __ia64_inb
#endif
......
......@@ -5,6 +5,9 @@ extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
extern ia64_mv_irq_desc __ia64_irq_desc;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write;
extern ia64_mv_inb_t __ia64_inb;
extern ia64_mv_inw_t __ia64_inw;
......
......@@ -43,6 +43,9 @@ extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_desc sn_irq_desc;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write;
extern ia64_mv_inb_t __sn_inb;
extern ia64_mv_inw_t __sn_inw;
extern ia64_mv_inl_t __sn_inl;
......@@ -105,6 +108,9 @@ extern ia64_mv_dma_supported sn_dma_supported;
#define platform_irq_desc sn_irq_desc
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
#define platform_pci_legacy_write sn_pci_legacy_write
#define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sn_dma_alloc_coherent
#define platform_dma_free_coherent sn_dma_free_coherent
......
......@@ -4,7 +4,7 @@
#ifdef CONFIG_IA64_DIG
/* Max 8 Nodes */
#define NODES_SHIFT 3
#elif defined(CONFIG_IA64_HP_ZX1)
#elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
/* Max 32 Nodes */
#define NODES_SHIFT 5
#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
......
......@@ -85,6 +85,20 @@ extern int pcibios_prep_mwi (struct pci_dev *);
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
#define HAVE_PCI_LEGACY
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma);
extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
size_t count);
extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
size_t count);
extern int pci_mmap_legacy_mem(struct kobject *kobj,
struct bin_attribute *attr,
struct vm_area_struct *vma);
#define pci_get_legacy_mem platform_pci_get_legacy_mem
#define pci_legacy_read platform_pci_legacy_read
#define pci_legacy_write platform_pci_legacy_write
struct pci_window {
struct resource resource;
......
......@@ -474,6 +474,52 @@ ia64_sn_pod_mode(void)
return isrv.v0;
}
/**
* ia64_sn_probe_mem - read from memory safely
* @addr: address to probe
* @size: number bytes to read (1,2,4,8)
* @data_ptr: address to store value read by probe (-1 returned if probe fails)
*
* Call into the SAL to do a memory read. If the read generates a machine
* check, this routine will recover gracefully and return -1 to the caller.
* @addr is usually a kernel virtual address in uncached space (i.e. the
* address starts with 0xc), but if called in physical mode, @addr should
* be a physical address.
*
* Return values:
* 0 - probe successful
* 1 - probe failed (generated MCA)
* 2 - Bad arg
* <0 - PAL error
*/
static inline u64
ia64_sn_probe_mem(long addr, long size, void *data_ptr)
{
struct ia64_sal_retval isrv;
SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0);
if (data_ptr) {
switch (size) {
case 1:
*((u8*)data_ptr) = (u8)isrv.v0;
break;
case 2:
*((u16*)data_ptr) = (u16)isrv.v0;
break;
case 4:
*((u32*)data_ptr) = (u32)isrv.v0;
break;
case 8:
*((u64*)data_ptr) = (u64)isrv.v0;
break;
default:
isrv.status = 2;
}
}
return isrv.status;
}
/*
* Retrieve the system serial number as an ASCII string.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment