Commit e57dc450 authored by David Mosberger's avatar David Mosberger

ia64: Patch by Alex Williamson: forward port of the 2.4 sba_iommu.

parent 39b18937
/* /*
** IA64 System Bus Adapter (SBA) I/O MMU manager ** IA64 System Bus Adapter (SBA) I/O MMU manager
** **
** (c) Copyright 2002 Alex Williamson ** (c) Copyright 2002-2003 Alex Williamson
** (c) Copyright 2002 Grant Grundler ** (c) Copyright 2002-2003 Grant Grundler
** (c) Copyright 2002 Hewlett-Packard Company ** (c) Copyright 2002-2003 Hewlett-Packard Company
** **
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
...@@ -30,17 +30,38 @@ ...@@ -30,17 +30,38 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/efi.h> #include <linux/efi.h>
#include <asm/delay.h> /* ia64_get_itc() */ #include <asm/delay.h> /* ia64_get_itc() */
#include <asm/io.h> #include <asm/io.h>
#include <asm/page.h> /* PAGE_OFFSET */ #include <asm/page.h> /* PAGE_OFFSET */
#include <asm/dma.h>
#include <asm/system.h> /* wmb() */
#include <asm/acpi-ext.h>
#define DRIVER_NAME "SBA" #define PFX "IOC: "
/*
** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
** not defined, all DMA will be 32bit and go through the TLB.
*/
#define ALLOW_IOV_BYPASS #define ALLOW_IOV_BYPASS
/*
** If a device prefetches beyond the end of a valid pdir entry, it will cause
** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
** disconnect on 4k boundaries and prevent such issues. If the device is
** particularly agressive, this option will keep the entire pdir valid such
** that prefetching will hit a valid address. This could severely impact
** error containment, and is therefore off by default. The page that is
** used for spill-over is poisoned, so that should help debugging somewhat.
*/
#undef FULL_VALID_PDIR
#define ENABLE_MARK_CLEAN #define ENABLE_MARK_CLEAN
/* /*
** The number of debug flags is a clue - this code is fragile. ** The number of debug flags is a clue - this code is fragile.
*/ */
...@@ -52,6 +73,10 @@ ...@@ -52,6 +73,10 @@
#undef DEBUG_LARGE_SG_ENTRIES #undef DEBUG_LARGE_SG_ENTRIES
#undef DEBUG_BYPASS #undef DEBUG_BYPASS
#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
#endif
#define SBA_INLINE __inline__ #define SBA_INLINE __inline__
/* #define SBA_INLINE */ /* #define SBA_INLINE */
...@@ -96,10 +121,6 @@ ...@@ -96,10 +121,6 @@
#define ASSERT(expr) #define ASSERT(expr)
#endif #endif
#define KB(x) ((x) * 1024)
#define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
/* /*
** The number of pdir entries to "free" before issueing ** The number of pdir entries to "free" before issueing
** a read to PCOM register to flush out PCOM writes. ** a read to PCOM register to flush out PCOM writes.
...@@ -109,30 +130,23 @@ ...@@ -109,30 +130,23 @@
*/ */
#define DELAYED_RESOURCE_CNT 16 #define DELAYED_RESOURCE_CNT 16
#define DEFAULT_DMA_HINT_REG(d) 0 #define DEFAULT_DMA_HINT_REG 0
#define ZX1_FUNC_ID_VALUE ((PCI_DEVICE_ID_HP_ZX1_SBA << 16) | PCI_VENDOR_ID_HP)
#define ZX1_MC_ID ((PCI_DEVICE_ID_HP_ZX1_MC << 16) | PCI_VENDOR_ID_HP)
#define SBA_FUNC_ID 0x0000 /* function id */ #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */ #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
#define SBA_FUNC_SIZE 0x10000 /* SBA configuration function reg set */ #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
0x9000, 0xa000, -1};
#define SBA_IOC_OFFSET 0x1000
#define MAX_IOC 1 /* we only have 1 for now*/
#define IOC_FUNC_ID 0x000
#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
#define IOC_IBASE 0x300 /* IO TLB */ #define IOC_IBASE 0x300 /* IO TLB */
#define IOC_IMASK 0x308 #define IOC_IMASK 0x308
#define IOC_PCOM 0x310 #define IOC_PCOM 0x310
#define IOC_TCNFG 0x318 #define IOC_TCNFG 0x318
#define IOC_PDIR_BASE 0x320 #define IOC_PDIR_BASE 0x320
#define IOC_IOVA_SPACE_BASE 0x40000000 /* IOVA ranges start at 1GB */ /* AGP GART driver looks for this */
#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
/* /*
** IOC supports 4/8/16/64KB page sizes (see TCNFG register) ** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
...@@ -152,7 +166,7 @@ unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000, ...@@ -152,7 +166,7 @@ unsigned int __initdata zx1_func_offsets[] = {0x1000, 0x4000, 0x8000,
#define IOVP_MASK PAGE_MASK #define IOVP_MASK PAGE_MASK
struct ioc { struct ioc {
unsigned long ioc_hpa; /* I/O MMU base address */ void *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */ char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */ u64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base */ unsigned long ibase; /* pdir IOV Space base */
...@@ -193,37 +207,32 @@ struct ioc { ...@@ -193,37 +207,32 @@ struct ioc {
#endif #endif
#endif #endif
/* STUFF We don't need in performance path */ /* Stuff we don't need in performance path */
struct ioc *next; /* list of IOC's in system */
acpi_handle handle; /* for multiple IOC's */
const char *name;
unsigned int func_id;
unsigned int rev; /* HW revision of chip */
u32 iov_size;
unsigned int pdir_size; /* in bytes, determined by IOV Space size */ unsigned int pdir_size; /* in bytes, determined by IOV Space size */
struct pci_dev *sac_only_dev;
}; };
struct sba_device { static struct ioc *ioc_list;
struct sba_device *next; /* list of SBA's in system */
const char *name;
unsigned long sba_hpa; /* base address */
spinlock_t sba_lock;
unsigned int flags; /* state/functionality enabled */
unsigned int hw_rev; /* HW revision of chip */
unsigned int num_ioc; /* number of on-board IOC's */
struct ioc ioc[MAX_IOC];
};
static struct sba_device *sba_list;
static int sba_count;
static int reserve_sba_gart = 1; static int reserve_sba_gart = 1;
static struct pci_dev sac_only_dev;
#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
#define sba_sg_len(sg) (sg->length) #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
#define sba_sg_iova(sg) (sg->dma_address) #else
#define sba_sg_iova_len(sg) (sg->dma_length) #define sba_sg_address(sg) ((sg)->address ? (sg)->address : \
page_address((sg)->page) + (sg)->offset)
#endif
#ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page;
#endif
/* REVISIT - fix me for multiple SBAs/IOCs */ #define GET_IOC(dev) ((struct ioc *) PCI_CONTROLLER(dev)->iommu)
#define GET_IOC(dev) (sba_list->ioc)
#define SBA_SET_AGP(sba_dev) (sba_dev->flags |= 0x1)
#define SBA_GET_AGP(sba_dev) (sba_dev->flags & 0x1)
/* /*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
...@@ -232,10 +241,7 @@ static struct pci_dev sac_only_dev; ...@@ -232,10 +241,7 @@ static struct pci_dev sac_only_dev;
** rather than the HW. I/O MMU allocation alogorithms can be ** rather than the HW. I/O MMU allocation alogorithms can be
** faster with smaller size is (to some degree). ** faster with smaller size is (to some degree).
*/ */
#define DMA_CHUNK_SIZE (BITS_PER_LONG*IOVP_SIZE) #define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
/* Looks nice and keeps the compiler happy */
#define SBA_DEV(d) ((struct sba_device *) (d))
#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
...@@ -255,7 +261,7 @@ static struct pci_dev sac_only_dev; ...@@ -255,7 +261,7 @@ static struct pci_dev sac_only_dev;
* sba_dump_tlb - debugging only - print IOMMU operating parameters * sba_dump_tlb - debugging only - print IOMMU operating parameters
* @hpa: base address of the IOMMU * @hpa: base address of the IOMMU
* *
* Print the size/location of the IO MMU Pdir. * Print the size/location of the IO MMU PDIR.
*/ */
static void static void
sba_dump_tlb(char *hpa) sba_dump_tlb(char *hpa)
...@@ -273,19 +279,19 @@ sba_dump_tlb(char *hpa) ...@@ -273,19 +279,19 @@ sba_dump_tlb(char *hpa)
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
/** /**
* sba_dump_pdir_entry - debugging only - print one IOMMU Pdir entry * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @msg: text to print ont the output line. * @msg: text to print ont the output line.
* @pide: pdir index. * @pide: pdir index.
* *
* Print one entry of the IO MMU Pdir in human readable form. * Print one entry of the IO MMU PDIR in human readable form.
*/ */
static void static void
sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
{ {
/* start printing from lowest pde in rval */ /* start printing from lowest pde in rval */
u64 *ptr = &(ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]); u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
uint rcnt; uint rcnt;
printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
...@@ -359,17 +365,18 @@ sba_check_pdir(struct ioc *ioc, char *msg) ...@@ -359,17 +365,18 @@ sba_check_pdir(struct ioc *ioc, char *msg)
* print the SG list so we can verify it's correct by hand. * print the SG list so we can verify it's correct by hand.
*/ */
static void static void
sba_dump_sg(struct ioc *ioc, struct scatterlist *startsg, int nents) sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{ {
while (nents-- > 0) { while (nents-- > 0) {
printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
(unsigned long) sba_sg_iova(startsg), sba_sg_iova_len(startsg), startsg->dma_address, startsg->dma_length,
sba_sg_address(startsg)); sba_sg_address(startsg));
startsg++; startsg++;
} }
} }
static void static void
sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents) sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
{ {
struct scatterlist *the_sg = startsg; struct scatterlist *the_sg = startsg;
int the_nents = nents; int the_nents = nents;
...@@ -398,9 +405,11 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents) ...@@ -398,9 +405,11 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
/* Convert from IOVP to IOVA and vice versa. */ /* Convert from IOVP to IOVA and vice versa. */
#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | ((hint_reg)<<(ioc->hint_shift_pdir))) #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset) | \
((hint_reg)<<(ioc->hint_shift_pdir)))
#define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase)) #define SBA_IOVP(ioc,iova) (((iova) & ioc->hint_mask_pdir) & ~(ioc->ibase))
/* FIXME : review these macros to verify correctness and usage */
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define RESMAP_MASK(n) ~(~0UL << (n)) #define RESMAP_MASK(n) ~(~0UL << (n))
...@@ -408,7 +417,7 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents) ...@@ -408,7 +417,7 @@ sba_check_sg(struct ioc *ioc, struct scatterlist *startsg, int nents)
/** /**
* sba_search_bitmap - find free space in IO Pdir resource bitmap * sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need. * @bits_wanted: number of entries we need.
* *
...@@ -445,7 +454,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ...@@ -445,7 +454,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
** We need the alignment to invalidate I/O TLB using ** We need the alignment to invalidate I/O TLB using
** SBA HW features in the unmap path. ** SBA HW features in the unmap path.
*/ */
unsigned long o = 1UL << get_order(bits_wanted << IOVP_SHIFT); unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
unsigned long mask; unsigned long mask;
...@@ -491,7 +500,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ...@@ -491,7 +500,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
/** /**
* sba_alloc_range - find free bits and mark them in IO Pdir resource bitmap * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @size: number of bytes to create a mapping for * @size: number of bytes to create a mapping for
* *
...@@ -520,7 +529,8 @@ sba_alloc_range(struct ioc *ioc, size_t size) ...@@ -520,7 +529,8 @@ sba_alloc_range(struct ioc *ioc, size_t size)
if (pide >= (ioc->res_size << 3)) { if (pide >= (ioc->res_size << 3)) {
pide = sba_search_bitmap(ioc, pages_needed); pide = sba_search_bitmap(ioc, pages_needed);
if (pide >= (ioc->res_size << 3)) if (pide >= (ioc->res_size << 3))
panic(__FILE__ ": I/O MMU @ %lx is out of mapping resources\n", ioc->ioc_hpa); panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
ioc->ioc_hpa);
} }
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
...@@ -553,7 +563,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) ...@@ -553,7 +563,7 @@ sba_alloc_range(struct ioc *ioc, size_t size)
/** /**
* sba_free_range - unmark bits in IO Pdir resource bitmap * sba_free_range - unmark bits in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO virtual address which was previously allocated. * @iova: IO virtual address which was previously allocated.
* @size: number of bytes to create a mapping for * @size: number of bytes to create a mapping for
...@@ -600,14 +610,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) ...@@ -600,14 +610,14 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
/** /**
* sba_io_pdir_entry - fill in one IO Pdir entry * sba_io_pdir_entry - fill in one IO PDIR entry
* @pdir_ptr: pointer to IO Pdir entry * @pdir_ptr: pointer to IO PDIR entry
* @phys_page: phys CPU address of page to map * @vba: Virtual CPU address of buffer to map
* *
* SBA Mapping Routine * SBA Mapping Routine
* *
* Given a physical address (phys_page, arg1) sba_io_pdir_entry() * Given a virtual address (vba, arg1) sba_io_pdir_entry()
* loads the I/O Pdir entry pointed to by pdir_ptr (arg0). * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
* Each IO Pdir entry consists of 8 bytes as shown below * Each IO Pdir entry consists of 8 bytes as shown below
* (LSB == bit 0): * (LSB == bit 0):
* *
...@@ -619,12 +629,21 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) ...@@ -619,12 +629,21 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
* V == Valid Bit * V == Valid Bit
* U == Unused * U == Unused
* PPN == Physical Page Number * PPN == Physical Page Number
*
* The physical address fields are filled with the results of virt_to_phys()
* on the vba.
*/ */
#define SBA_VALID_MASK 0x80000000000000FFULL #if 1
#define sba_io_pdir_entry(pdir_ptr, phys_page) *pdir_ptr = (phys_page | SBA_VALID_MASK) #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
#define sba_io_page(pdir_ptr) (*pdir_ptr & ~SBA_VALID_MASK) | 0x8000000000000000ULL)
#else
void SBA_INLINE
sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
{
*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
}
#endif
#ifdef ENABLE_MARK_CLEAN #ifdef ENABLE_MARK_CLEAN
/** /**
...@@ -640,7 +659,7 @@ mark_clean (void *addr, size_t size) ...@@ -640,7 +659,7 @@ mark_clean (void *addr, size_t size)
pg_addr = PAGE_ALIGN((unsigned long) addr); pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size; end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) { while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr); struct page *page = virt_to_page((void *)pg_addr);
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE; pg_addr += PAGE_SIZE;
} }
...@@ -648,12 +667,12 @@ mark_clean (void *addr, size_t size) ...@@ -648,12 +667,12 @@ mark_clean (void *addr, size_t size)
#endif #endif
/** /**
* sba_mark_invalid - invalidate one or more IO Pdir entries * sba_mark_invalid - invalidate one or more IO PDIR entries
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @iova: IO Virtual Address mapped earlier * @iova: IO Virtual Address mapped earlier
* @byte_cnt: number of bytes this mapping covers. * @byte_cnt: number of bytes this mapping covers.
* *
* Marking the IO Pdir entry(ies) as Invalid and invalidate * Marking the IO PDIR entry(ies) as Invalid and invalidate
* corresponding IO TLB entry. The PCOM (Purge Command Register) * corresponding IO TLB entry. The PCOM (Purge Command Register)
* is to purge stale entries in the IO TLB when unmapping entries. * is to purge stale entries in the IO TLB when unmapping entries.
* *
...@@ -687,15 +706,24 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) ...@@ -687,15 +706,24 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iovp |= IOVP_SHIFT; /* set "size" field for PCOM */ iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
#ifndef FULL_VALID_PDIR
/* /*
** clear I/O Pdir entry "valid" bit ** clear I/O PDIR entry "valid" bit
** Do NOT clear the rest - save it for debugging. ** Do NOT clear the rest - save it for debugging.
** We should only clear bits that have previously ** We should only clear bits that have previously
** been enabled. ** been enabled.
*/ */
ioc->pdir_base[off] &= ~SBA_VALID_MASK; ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
#else
/*
** If we want to maintain the PDIR as valid, put in
** the spill page so devices prefetching won't
** cause a hard fail.
*/
ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
#endif
} else { } else {
u32 t = get_order(byte_cnt) + IOVP_SHIFT; u32 t = get_order(byte_cnt) + PAGE_SHIFT;
iovp |= t; iovp |= t;
ASSERT(t <= 31); /* 2GB! Max value of "size" field */ ASSERT(t <= 31); /* 2GB! Max value of "size" field */
...@@ -703,14 +731,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) ...@@ -703,14 +731,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
do { do {
/* verify this pdir entry is enabled */ /* verify this pdir entry is enabled */
ASSERT(ioc->pdir_base[off] >> 63); ASSERT(ioc->pdir_base[off] >> 63);
#ifndef FULL_VALID_PDIR
/* clear I/O Pdir entry "valid" bit first */ /* clear I/O Pdir entry "valid" bit first */
ioc->pdir_base[off] &= ~SBA_VALID_MASK; ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
#else
ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
#endif
off++; off++;
byte_cnt -= IOVP_SIZE; byte_cnt -= IOVP_SIZE;
} while (byte_cnt > 0); } while (byte_cnt > 0);
} }
WRITE_REG(iovp, ioc->ioc_hpa+IOC_PCOM); WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
} }
/** /**
...@@ -726,18 +758,15 @@ dma_addr_t ...@@ -726,18 +758,15 @@ dma_addr_t
sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
{ {
struct ioc *ioc; struct ioc *ioc;
unsigned long flags; unsigned long flags;
dma_addr_t iovp; dma_addr_t iovp;
dma_addr_t offset; dma_addr_t offset;
u64 *pdir_start; u64 *pdir_start;
int pide; int pide;
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
unsigned long phys_addr = virt_to_phys(addr); unsigned long pci_addr = virt_to_phys(addr);
#endif #endif
if (!sba_list)
panic("sba_map_single: no SBA found!\n");
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
ASSERT(ioc); ASSERT(ioc);
...@@ -745,7 +774,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -745,7 +774,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
/* /*
** Check if the PCI device can DMA to ptr... if so, just return ptr ** Check if the PCI device can DMA to ptr... if so, just return ptr
*/ */
if ((phys_addr & ~dev->dma_mask) == 0) { if ((pci_addr & ~dev->dma_mask) == 0) {
/* /*
** Device is bit capable of DMA'ing to the buffer... ** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr ** just return the PCI address of ptr
...@@ -756,8 +785,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -756,8 +785,8 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif #endif
DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
dev->dma_mask, phys_addr); dev->dma_mask, pci_addr);
return phys_addr; return pci_addr;
} }
#endif #endif
...@@ -790,8 +819,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -790,8 +819,7 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
while (size > 0) { while (size > 0) {
ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
sba_io_pdir_entry(pdir_start, (unsigned long) addr);
sba_io_pdir_entry(pdir_start, virt_to_phys(addr));
DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
...@@ -799,12 +827,15 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction) ...@@ -799,12 +827,15 @@ sba_map_single(struct pci_dev *dev, void *addr, size_t size, int direction)
size -= IOVP_SIZE; size -= IOVP_SIZE;
pdir_start++; pdir_start++;
} }
/* force pdir update */
wmb();
/* form complete address */ /* form complete address */
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
sba_check_pdir(ioc,"Check after sba_map_single()"); sba_check_pdir(ioc,"Check after sba_map_single()");
#endif #endif
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG(direction)); return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
} }
/** /**
...@@ -823,12 +854,9 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -823,12 +854,9 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
#if DELAYED_RESOURCE_CNT > 0 #if DELAYED_RESOURCE_CNT > 0
struct sba_dma_pair *d; struct sba_dma_pair *d;
#endif #endif
unsigned long flags; unsigned long flags;
dma_addr_t offset; dma_addr_t offset;
if (!sba_list)
panic("sba_map_single: no SBA found!\n");
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
ASSERT(ioc); ASSERT(ioc);
...@@ -861,29 +889,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -861,29 +889,6 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
size += offset; size += offset;
size = ROUNDUP(size, IOVP_SIZE); size = ROUNDUP(size, IOVP_SIZE);
#ifdef ENABLE_MARK_CLEAN
/*
** Don't need to hold the spinlock while telling VM pages are "clean".
** The pages are "busy" in the resource map until we mark them free.
** But tell VM pages are clean *before* releasing the resource
** in order to avoid race conditions.
*/
if (direction == PCI_DMA_FROMDEVICE) {
u32 iovp = (u32) SBA_IOVP(ioc,iova);
unsigned int pide = PDIR_INDEX(iovp);
u64 *pdirp = &(ioc->pdir_base[pide]);
size_t byte_cnt = size;
void *addr;
do {
addr = phys_to_virt(sba_io_page(pdirp));
mark_clean(addr, min(byte_cnt, IOVP_SIZE));
pdirp++;
byte_cnt -= IOVP_SIZE;
} while (byte_cnt > 0);
}
#endif
spin_lock_irqsave(&ioc->res_lock, flags); spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
ioc->usingle_calls++; ioc->usingle_calls++;
...@@ -909,7 +914,40 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -909,7 +914,40 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
sba_free_range(ioc, iova, size); sba_free_range(ioc, iova, size);
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
#endif /* DELAYED_RESOURCE_CNT == 0 */ #endif /* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN
if (direction == PCI_DMA_FROMDEVICE) {
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
void *addr;
if (size <= IOVP_SIZE) {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, size);
} else {
size_t byte_cnt = size;
do {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, min(byte_cnt, IOVP_SIZE));
off++;
byte_cnt -= IOVP_SIZE;
} while (byte_cnt > 0);
}
}
#endif
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
/* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
** For Astro based systems this isn't a big deal WRT performance.
** As long as 2.4 kernels copyin/copyout data from/to userspace,
** we don't need the syncdma. The issue here is I/O MMU cachelines
** are *not* coherent in all cases. May be hwrev dependent.
** Need to investigate more.
asm volatile("syncdma");
*/
} }
...@@ -924,6 +962,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size, ...@@ -924,6 +962,7 @@ void sba_unmap_single(struct pci_dev *dev, dma_addr_t iova, size_t size,
void * void *
sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
{ {
struct ioc *ioc;
void *ret; void *ret;
if (!hwdev) { if (!hwdev) {
...@@ -941,7 +980,8 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) ...@@ -941,7 +980,8 @@ sba_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
* than dma_mask from the device, this needs to be * than dma_mask from the device, this needs to be
* updated. * updated.
*/ */
*dma_handle = sba_map_single(&sac_only_dev, ret, size, 0); ioc = GET_IOC(hwdev);
*dma_handle = sba_map_single(ioc->sac_only_dev, ret, size, 0);
} }
return ret; return ret;
...@@ -965,109 +1005,238 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, ...@@ -965,109 +1005,238 @@ void sba_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
} }
/*
** Since 0 is a valid pdir_base index value, can't use that
** to determine if a value is valid or not. Use a flag to indicate
** the SG list entry contains a valid pdir index.
*/
#define PIDE_FLAG 0x1UL
#ifdef DEBUG_LARGE_SG_ENTRIES #ifdef DEBUG_LARGE_SG_ENTRIES
int dump_run_sg = 0; int dump_run_sg = 0;
#endif #endif
#define SG_ENT_VIRT_PAGE(sg) page_address((sg)->page)
#define SG_ENT_PHYS_PAGE(SG) virt_to_phys(SG_ENT_VIRT_PAGE(SG)) /**
* sba_fill_pdir - write allocated SG entries into IO PDIR
* @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list
*
* Take preprocessed SG list and write corresponding entries
* in the IO PDIR.
*/
static SBA_INLINE int
sba_fill_pdir(
struct ioc *ioc,
struct scatterlist *startsg,
int nents)
{
struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
int n_mappings = 0;
u64 *pdirp = 0;
unsigned long dma_offset = 0;
dma_sg--;
while (nents-- > 0) {
int cnt = startsg->dma_length;
startsg->dma_length = 0;
#ifdef DEBUG_LARGE_SG_ENTRIES
if (dump_run_sg)
printk(" %2d : %08lx/%05x %p\n",
nents, startsg->dma_address, cnt,
sba_sg_address(startsg));
#else
DBG_RUN_SG(" %d : %08lx/%05x %p\n",
nents, startsg->dma_address, cnt,
sba_sg_address(startsg));
#endif
/*
** Look for the start of a new DMA stream
*/
if (startsg->dma_address & PIDE_FLAG) {
u32 pide = startsg->dma_address & ~PIDE_FLAG;
dma_offset = (unsigned long) pide & ~IOVP_MASK;
startsg->dma_address = 0;
dma_sg++;
dma_sg->dma_address = pide | ioc->ibase;
pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
n_mappings++;
}
/*
** Look for a VCONTIG chunk
*/
if (cnt) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
ASSERT(pdirp);
/* Since multiple Vcontig blocks could make up
** one DMA stream, *add* cnt to dma_len.
*/
dma_sg->dma_length += cnt;
cnt += dma_offset;
dma_offset=0; /* only want offset on first chunk */
cnt = ROUNDUP(cnt, IOVP_SIZE);
#ifdef CONFIG_PROC_FS
ioc->msg_pages += cnt >> IOVP_SHIFT;
#endif
do {
sba_io_pdir_entry(pdirp, vaddr);
vaddr += IOVP_SIZE;
cnt -= IOVP_SIZE;
pdirp++;
} while (cnt > 0);
}
startsg++;
}
/* force pdir update */
wmb();
#ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = 0;
#endif
return(n_mappings);
}
/*
** Two address ranges are DMA contiguous *iff* "end of prev" and
** "start of next" are both on a page boundry.
**
** (shift left is a quick trick to mask off upper bits)
*/
#define DMA_CONTIG(__X, __Y) \
(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - PAGE_SHIFT)) == 0UL)
/** /**
* sba_coalesce_chunks - preprocess the SG list * sba_coalesce_chunks - preprocess the SG list
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @startsg: input=SG list output=DMA addr/len pairs filled in * @startsg: list of IOVA/size pairs
* @nents: number of entries in startsg list * @nents: number of entries in startsg list
* @direction: R/W or both.
* *
* Walk the SG list and determine where the breaks are in the DMA stream. * First pass is to walk the SG list and determine where the breaks are
* Allocate IO Pdir resources and fill them in separate loop. * in the DMA stream. Allocates PDIR entries but does not fill them.
* Returns the number of DMA streams used for output IOVA list. * Returns the number of DMA chunks.
* Note each DMA stream can consume multiple IO Pdir entries.
* *
* Code is written assuming some coalescing is possible. * Doing the fill seperate from the coalescing/allocation keeps the
* code simpler. Future enhancement could make one pass through
* the sglist do both.
*/ */
static SBA_INLINE int static SBA_INLINE int
sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, sba_coalesce_chunks( struct ioc *ioc,
int nents, int direction) struct scatterlist *startsg,
int nents)
{ {
struct scatterlist *dma_sg = startsg; /* return array */ struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
unsigned long vcontig_len; /* len of VCONTIG chunk */
unsigned long vcontig_end;
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0; int n_mappings = 0;
ASSERT(nents > 1); while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
do {
unsigned int dma_cnt = 1; /* number of pages in DMA stream */
unsigned int pide; /* index into IO Pdir array */
u64 *pdirp; /* pointer into IO Pdir array */
unsigned long dma_offset, dma_len; /* cumulative DMA stream */
/* /*
** Prepare for first/next DMA stream ** Prepare for first/next DMA stream
*/ */
dma_len = sba_sg_len(startsg); dma_sg = vcontig_sg = startsg;
dma_offset = (unsigned long) sba_sg_address(startsg); dma_len = vcontig_len = vcontig_end = startsg->length;
startsg++; vcontig_end += vaddr;
nents--; dma_offset = vaddr & ~IOVP_MASK;
/* PARANOID: clear entries */
startsg->dma_address = startsg->dma_length = 0;
/* /*
** We want to know how many entries can be coalesced ** This loop terminates one iteration "early" since
** before trying to allocate IO Pdir space. ** it's always looking one "ahead".
** IOVAs can then be allocated "naturally" aligned
** to take advantage of the block IO TLB flush.
*/ */
while (nents) { while (--nents > 0) {
unsigned long end_offset = dma_offset + dma_len; unsigned long vaddr; /* tmp */
/* prev entry must end on a page boundary */ startsg++;
if (end_offset & IOVP_MASK)
break;
/* next entry start on a page boundary? */ /* PARANOID */
if (startsg->offset) startsg->dma_address = startsg->dma_length = 0;
break;
/* catch brokenness in SCSI layer */
ASSERT(startsg->length <= DMA_CHUNK_SIZE);
/* /*
** make sure current dma stream won't exceed ** First make sure current dma stream won't
** DMA_CHUNK_SIZE if coalescing entries. ** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/ */
if (((end_offset + startsg->length + ~IOVP_MASK) if (((dma_len + dma_offset + startsg->length + ~IOVP_MASK) & IOVP_MASK)
& IOVP_MASK) > DMA_CHUNK_SIZE)
> DMA_CHUNK_SIZE)
break; break;
dma_len += sba_sg_len(startsg); /*
startsg++; ** Then look for virtually contiguous blocks.
nents--; **
dma_cnt++; ** append the next transaction?
} */
vaddr = (unsigned long) sba_sg_address(startsg);
if (vcontig_end == vaddr)
{
vcontig_len += startsg->length;
vcontig_end += startsg->length;
dma_len += startsg->length;
continue;
}
ASSERT(dma_len <= DMA_CHUNK_SIZE); #ifdef DEBUG_LARGE_SG_ENTRIES
dump_run_sg = (vcontig_len > IOVP_SIZE);
#endif
/* allocate IO Pdir resource. /*
** returns index into (u64) IO Pdir array. ** Not virtually contigous.
** IOVA is formed from this. ** Terminate prev chunk.
*/ ** Start a new chunk.
pide = sba_alloc_range(ioc, dma_cnt << IOVP_SHIFT); **
pdirp = &(ioc->pdir_base[pide]); ** Once we start a new VCONTIG chunk, dma_offset
** can't change. And we need the offset from the first
** chunk - not the last one. Ergo Successive chunks
** must start on page boundaries and dove tail
** with it's predecessor.
*/
vcontig_sg->dma_length = vcontig_len;
/* fill_pdir: write stream into IO Pdir */ vcontig_sg = startsg;
while (dma_cnt--) { vcontig_len = startsg->length;
sba_io_pdir_entry(pdirp, SG_ENT_PHYS_PAGE(startsg));
startsg++;
pdirp++;
}
/* "output" IOVA */ /*
sba_sg_iova(dma_sg) = SBA_IOVA(ioc, ** 3) do the entries end/start on page boundaries?
((dma_addr_t) pide << IOVP_SHIFT), ** Don't update vcontig_end until we've checked.
dma_offset, */
DEFAULT_DMA_HINT_REG(direction)); if (DMA_CONTIG(vcontig_end, vaddr))
sba_sg_iova_len(dma_sg) = dma_len; {
vcontig_end = vcontig_len + vaddr;
dma_len += vcontig_len;
continue;
} else {
break;
}
}
dma_sg++; /*
** End of DMA Stream
** Terminate last VCONTIG block.
** Allocate space for DMA stream.
*/
vcontig_sg->dma_length = vcontig_len;
dma_len = (dma_len + dma_offset + ~IOVP_MASK) & IOVP_MASK;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
| (sba_alloc_range(ioc, dma_len) << IOVP_SHIFT)
| dma_offset);
n_mappings++; n_mappings++;
} while (nents); }
return n_mappings; return n_mappings;
} }
...@@ -1075,60 +1244,52 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, ...@@ -1075,60 +1244,52 @@ sba_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg,
/** /**
* sba_map_sg - map Scatter/Gather list * sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI device owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
* @sglist: array of buffer/length pairs * @sglist: array of buffer/length pairs
* @nents: number of entries in list * @nents: number of entries in list
* @direction: R/W or both. * @direction: R/W or both.
* *
* See Documentation/DMA-mapping.txt * See Documentation/DMA-mapping.txt
*/ */
int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
int direction)
{ {
struct ioc *ioc; struct ioc *ioc;
int filled = 0; int coalesced, filled = 0;
unsigned long flags; unsigned long flags;
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
struct scatterlist *sg; struct scatterlist *sg;
#endif #endif
DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n", __FUNCTION__, nents, DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
sba_sg_address(sglist), sba_sg_len(sglist));
if (!sba_list)
panic("sba_map_single: no SBA found!\n");
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
ASSERT(ioc); ASSERT(ioc);
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
if (dev->dma_mask >= ioc->dma_mask) { if (dev->dma_mask >= ioc->dma_mask) {
for (sg = sglist ; filled < nents ; filled++, sg++) { for (sg = sglist ; filled < nents ; filled++, sg++){
sba_sg_iova(sg) = virt_to_phys(sba_sg_address(sg)); sg->dma_length = sg->length;
sba_sg_iova_len(sg) = sba_sg_len(sg); sg->dma_address = virt_to_phys(sba_sg_address(sg));
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
spin_lock_irqsave(&ioc->res_lock, flags); spin_lock_irqsave(&ioc->res_lock, flags);
ioc->msg_bypass++; ioc->msg_bypass++;
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif #endif
DBG_RUN_SG("%s() DONE %d mappings bypassed\n", __FUNCTION__, filled);
return filled; return filled;
} }
#endif #endif
/* Fast path single entry scatterlists. */ /* Fast path single entry scatterlists. */
if (nents == 1) { if (nents == 1) {
sba_sg_iova(sglist) = sba_map_single(dev, sglist->dma_length = sglist->length;
(void *) sba_sg_iova(sglist), sglist->dma_address = sba_map_single(dev,
sba_sg_len(sglist), direction); sba_sg_address(sglist),
sba_sg_iova_len(sglist) = sba_sg_len(sglist); sglist->length, direction);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* /*
** Should probably do some stats counting, but trying to ** Should probably do some stats counting, but trying to
** be precise quickly starts wasting CPU time. ** be precise quickly starts wasting CPU time.
*/ */
#endif #endif
DBG_RUN_SG("%s() DONE 1 mapping\n", __FUNCTION__);
return 1; return 1;
} }
...@@ -1145,11 +1306,26 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1145,11 +1306,26 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
ioc->msg_calls++; ioc->msg_calls++;
#endif #endif
/*
** First coalesce the chunks and allocate I/O pdir space
**
** If this is one DMA stream, we can properly map using the
** correct virtual address associated with each DMA page.
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = sba_coalesce_chunks(ioc, sglist, nents);
/* /*
** coalesce and program the I/O Pdir ** Program the I/O Pdir
**
** map the virtual addresses to the I/O Pdir
** o dma_address will contain the pdir index
** o dma_len will contain the number of bytes to map
** o address contains the virtual address.
*/ */
filled = sba_coalesce_chunks(ioc, sglist, nents, direction); filled = sba_fill_pdir(ioc, sglist, nents);
#ifdef ASSERT_PDIR_SANITY #ifdef ASSERT_PDIR_SANITY
if (sba_check_pdir(ioc,"Check after sba_map_sg()")) if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
...@@ -1161,6 +1337,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1161,6 +1337,7 @@ int sba_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
ASSERT(coalesced == filled);
DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
return filled; return filled;
...@@ -1184,11 +1361,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1184,11 +1361,8 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
unsigned long flags; unsigned long flags;
#endif #endif
DBG_RUN_SG("%s() START %d entries, 0x%p,0x%x\n", DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__FUNCTION__, nents, sba_sg_address(sglist), sba_sg_len(sglist)); __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
if (!sba_list)
panic("sba_map_single: no SBA found!\n");
ioc = GET_IOC(dev); ioc = GET_IOC(dev);
ASSERT(ioc); ASSERT(ioc);
...@@ -1203,10 +1377,10 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1203,10 +1377,10 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif #endif
while (sba_sg_len(sglist) && nents--) { while (nents && sglist->dma_length) {
sba_unmap_single(dev, (dma_addr_t)sba_sg_iova(sglist), sba_unmap_single(dev, sglist->dma_address,
sba_sg_iova_len(sglist), direction); sglist->dma_length, direction);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* /*
** This leaves inconsistent data in the stats, but we can't ** This leaves inconsistent data in the stats, but we can't
...@@ -1214,9 +1388,11 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1214,9 +1388,11 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
** were coalesced to a single entry. The stats are fun, ** were coalesced to a single entry. The stats are fun,
** but speed is more important. ** but speed is more important.
*/ */
ioc->usg_pages += (((u64)sba_sg_iova(sglist) & ~IOVP_MASK) + sba_sg_len(sglist) + IOVP_SIZE - 1) >> IOVP_SHIFT; ioc->usg_pages += ((sglist->dma_address & ~IOVP_MASK) + sglist->dma_length
+ IOVP_SIZE - 1) >> PAGE_SHIFT;
#endif #endif
++sglist; sglist++;
nents--;
} }
DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
...@@ -1229,87 +1405,90 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, ...@@ -1229,87 +1405,90 @@ void sba_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents,
} }
unsigned long
sba_dma_address (struct scatterlist *sg)
{
return ((unsigned long)sba_sg_iova(sg));
}
int
sba_dma_supported (struct pci_dev *dev, u64 mask)
{
return 1;
}
/************************************************************** /**************************************************************
* *
* Initialization and claim * Initialization and claim
* *
***************************************************************/ ***************************************************************/
static void __init
static void ioc_iova_init(struct ioc *ioc)
sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
{ {
u32 iova_space_size, iova_space_mask; u32 iova_space_mask;
void * pdir_base; int iov_order, tcnfg;
int pdir_size, iov_order, tcnfg; int agp_found = 0;
struct pci_dev *device;
#ifdef FULL_VALID_PDIR
unsigned long index;
#endif
/* /*
** Firmware programs the maximum IOV space size into the imask reg ** Firmware programs the base and size of a "safe IOVA space"
** (one that doesn't overlap memory or LMMIO space) in the
** IBASE and IMASK registers.
*/ */
iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
ioc->iov_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
if (ioc->ibase == 0) {
if (((unsigned long) ioc->ioc_hpa & 0x3000UL) == 0x2000)
ioc->ibase = 0xc0000000;
else
ioc->ibase = 0x80000000;
printk("WARNING: IBASE is zero; setting to 0x%lx\n", ioc->ibase);
}
if (ioc->ibase < 0xfed00000UL && ioc->ibase + ioc->iov_size >= 0xfee00000UL) {
printk("WARNING: IOV space overlaps local config and interrupt message, "
"truncating\n");
ioc->iov_size /= 2;
}
/* /*
** iov_order is always based on a 1GB IOVA space since we want to ** iov_order is always based on a 1GB IOVA space since we want to
** turn on the other half for AGP GART. ** turn on the other half for AGP GART.
*/ */
iov_order = get_order(iova_space_size >> (IOVP_SHIFT-PAGE_SHIFT)); iov_order = get_order(ioc->iov_size >> (IOVP_SHIFT - PAGE_SHIFT));
ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); ioc->pdir_size = (ioc->iov_size / IOVP_SIZE) * sizeof(u64);
DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n", DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits) PDIR size 0x%0x\n",
__FUNCTION__, ioc->ioc_hpa, iova_space_size>>20, __FUNCTION__, ioc->ioc_hpa, ioc->iov_size >> 20,
iov_order + PAGE_SHIFT, ioc->pdir_size); iov_order + PAGE_SHIFT, ioc->pdir_size);
/* XXX DMA HINTs not used */ /* FIXME : DMA HINTs not used */
ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
ioc->pdir_base = pdir_base = ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
(void *) __get_free_pages(GFP_KERNEL, get_order(pdir_size)); get_order(ioc->pdir_size));
if (NULL == pdir_base) if (!ioc->pdir_base)
{ panic(PFX "Couldn't allocate I/O Page Table\n");
panic(__FILE__ ":%s() could not allocate I/O Page Table\n", __FUNCTION__);
} memset(ioc->pdir_base, 0, ioc->pdir_size);
memset(pdir_base, 0, pdir_size);
DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n", DBG_INIT("%s() pdir %p size %x hint_shift_pdir %x hint_mask_pdir %lx\n",
__FUNCTION__, pdir_base, pdir_size, __FUNCTION__, ioc->pdir_base, ioc->pdir_size,
ioc->hint_shift_pdir, ioc->hint_mask_pdir); ioc->hint_shift_pdir, ioc->hint_mask_pdir);
ASSERT((((unsigned long) pdir_base) & PAGE_MASK) == (unsigned long) pdir_base); ASSERT((((unsigned long) ioc->pdir_base) & PAGE_MASK) == (unsigned long) ioc->pdir_base);
WRITE_REG(virt_to_phys(pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
DBG_INIT(" base %p\n", pdir_base); DBG_INIT(" base %p\n", ioc->pdir_base);
/* build IMASK for IOC and Elroy */ /* build IMASK for IOC and Elroy */
iova_space_mask = 0xffffffff; iova_space_mask = 0xffffffff;
iova_space_mask <<= (iov_order + IOVP_SHIFT); iova_space_mask <<= (iov_order + PAGE_SHIFT);
ioc->imask = iova_space_mask;
ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & 0xFFFFFFFEUL;
ioc->imask = iova_space_mask; /* save it */
DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n", DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
__FUNCTION__, ioc->ibase, ioc->imask); __FUNCTION__, ioc->ibase, ioc->imask);
/* /*
** XXX DMA HINT registers are programmed with default hint ** FIXME: Hint registers are programmed with default hint
** values during boot, so hints should be sane even if we ** values during boot, so hints should be sane even if we
** can't reprogram them the way drivers want. ** can't reprogram them the way drivers want.
*/ */
WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
/* /*
** Setting the upper bits makes checking for bypass addresses ** Setting the upper bits makes checking for bypass addresses
...@@ -1317,34 +1496,30 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num) ...@@ -1317,34 +1496,30 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
*/ */
ioc->imask |= 0xFFFFFFFF00000000UL; ioc->imask |= 0xFFFFFFFF00000000UL;
/* Set I/O Pdir page size to system page size */ /* Set I/O PDIR Page size to system page size */
switch (IOVP_SHIFT) { switch (PAGE_SHIFT) {
case 12: /* 4K */ case 12: tcnfg = 0; break; /* 4K */
tcnfg = 0; case 13: tcnfg = 1; break; /* 8K */
break; case 14: tcnfg = 2; break; /* 16K */
case 13: /* 8K */ case 16: tcnfg = 3; break; /* 64K */
tcnfg = 1; default:
break; panic(PFX "Unsupported system page size %d",
case 14: /* 16K */ 1 << PAGE_SHIFT);
tcnfg = 2;
break;
case 16: /* 64K */
tcnfg = 3;
break; break;
} }
WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
/* /*
** Program the IOC's ibase and enable IOVA translation ** Program the IOC's ibase and enable IOVA translation
** Bit zero == enable bit. ** Bit zero == enable bit.
*/ */
WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
/* /*
** Clear I/O TLB of any possible entries. ** Clear I/O TLB of any possible entries.
** (Yes. This is a bit paranoid...but so what) ** (Yes. This is a bit paranoid...but so what)
*/ */
WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); WRITE_REG(ioc->ibase | (iov_order+PAGE_SHIFT), ioc->ioc_hpa + IOC_PCOM);
/* /*
** If an AGP device is present, only use half of the IOV space ** If an AGP device is present, only use half of the IOV space
...@@ -1354,213 +1529,267 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num) ...@@ -1354,213 +1529,267 @@ sba_ioc_init(struct sba_device *sba_dev, struct ioc *ioc, int ioc_num)
** We program the next pdir index after we stop w/ a key for ** We program the next pdir index after we stop w/ a key for
** the GART code to handshake on. ** the GART code to handshake on.
*/ */
if (SBA_GET_AGP(sba_dev)) { pci_for_each_dev(device)
DBG_INIT("%s() AGP Device found, reserving 512MB for GART support\n", __FUNCTION__); agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
ioc->pdir_size /= 2;
((u64 *)pdir_base)[PDIR_INDEX(iova_space_size/2)] = 0x0000badbadc0ffeeULL; if (agp_found && reserve_sba_gart) {
DBG_INIT("%s: AGP device found, reserving half of IOVA for GART support\n",
__FUNCTION__);
ioc->pdir_size /= 2;
((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
}
#ifdef FULL_VALID_PDIR
/*
** Check to see if the spill page has been allocated, we don't need more than
** one across multiple SBAs.
*/
if (!prefetch_spill_page) {
char *spill_poison = "SBAIOMMU POISON";
int poison_size = 16;
void *poison_addr, *addr;
addr = (void *)__get_free_pages(GFP_KERNEL, get_order(IOVP_SIZE));
if (!addr)
panic(PFX "Couldn't allocate PDIR spill page\n");
poison_addr = (u64) addr;
for ( ; (u64) poison_addr < addr + IOVP_SIZE; poison_addr += poison_size)
memcpy(poison_addr, spill_poison, poison_size);
prefetch_spill_page = virt_to_phys(addr);
DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
} }
/*
** Set all the PDIR entries valid w/ the spill page as the target
*/
for (index = 0 ; index < (ioc->pdir_size / sizeof(u64)) ; index++)
((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
#endif
DBG_INIT("%s() DONE\n", __FUNCTION__);
} }
static void __init
ioc_resource_init(struct ioc *ioc)
{
spin_lock_init(&ioc->res_lock);
/* resource map size dictated by pdir_size */
ioc->res_size = ioc->pdir_size / sizeof(u64); /* entries */
ioc->res_size >>= 3; /* convert bit count to byte count */
DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
/************************************************************************** ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
** get_order(ioc->res_size));
** SBA initialization code (HW and SW) if (!ioc->res_map)
** panic(PFX "Couldn't allocate resource map\n");
** o identify SBA chip itself
** o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/
static void memset(ioc->res_map, 0, ioc->res_size);
sba_hw_init(struct sba_device *sba_dev) /* next available IOVP - circular search */
{ ioc->res_hint = (unsigned long *) ioc->res_map;
int i;
int num_ioc;
u64 dma_mask;
u32 func_id;
/* #ifdef ASSERT_PDIR_SANITY
** Identify the SBA so we can set the dma_mask. We can make a virtual /* Mark first bit busy - ie no IOVA 0 */
** dma_mask of the memory subsystem such that devices not implmenting ioc->res_map[0] = 0x1;
** a full 64bit mask might still be able to bypass efficiently. ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
*/ #endif
func_id = READ_REG(sba_dev->sba_hpa + SBA_FUNC_ID); #ifdef FULL_VALID_PDIR
/* Mark the last resource used so we don't prefetch beyond IOVA space */
ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
ioc->pdir_base[(ioc->pdir_size / sizeof(u64)) - 1] = (0x80000000000000FF
| prefetch_spill_page);
#endif
if (func_id == ZX1_FUNC_ID_VALUE) { DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
dma_mask = 0xFFFFFFFFFFUL; ioc->res_size, (void *) ioc->res_map);
} else { }
dma_mask = 0xFFFFFFFFFFFFFFFFUL;
} static void __init
ioc_sac_init(struct ioc *ioc)
{
struct pci_dev *sac = NULL;
struct pci_controller *controller = NULL;
DBG_INIT("%s(): ioc->dma_mask == 0x%lx\n", __FUNCTION__, dma_mask);
/* /*
** Leaving in the multiple ioc code from parisc for the future, * pci_alloc_consistent() must return a DMA address which is
** currently there are no muli-ioc mckinley sbas * SAC (single address cycle) addressable, so allocate a
*/ * pseudo-device to enforce that.
sba_dev->ioc[0].ioc_hpa = SBA_IOC_OFFSET; */
num_ioc = 1; sac = kmalloc(sizeof(*sac), GFP_KERNEL);
if (!sac)
sba_dev->num_ioc = num_ioc; panic(PFX "Couldn't allocate struct pci_dev");
for (i = 0; i < num_ioc; i++) { memset(sac, 0, sizeof(*sac));
sba_dev->ioc[i].dma_mask = dma_mask;
sba_dev->ioc[i].ioc_hpa += sba_dev->sba_hpa; controller = kmalloc(sizeof(*controller), GFP_KERNEL);
sba_ioc_init(sba_dev, &(sba_dev->ioc[i]), i); if (!controller)
} panic(PFX "Couldn't allocate struct pci_controller");
memset(controller, 0, sizeof(*controller));
controller->iommu = ioc;
sac->sysdata = controller;
sac->dma_mask = 0xFFFFFFFFUL;
ioc->sac_only_dev = sac;
} }
static void static void __init
sba_common_init(struct sba_device *sba_dev) ioc_zx1_init(struct ioc *ioc)
{ {
int i; if (ioc->rev < 0x20)
panic(PFX "IOC 2.0 or later required for IOMMU support\n");
/* add this one to the head of the list (order doesn't matter) ioc->dma_mask = 0xFFFFFFFFFFUL;
** This will be useful for debugging - especially if we get coredumps }
*/
sba_dev->next = sba_list;
sba_list = sba_dev;
sba_count++;
for(i=0; i< sba_dev->num_ioc; i++) { typedef void (initfunc)(struct ioc *);
int res_size;
/* resource map size dictated by pdir_size */ struct ioc_iommu {
res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ u32 func_id;
res_size >>= 3; /* convert bit count to byte count */ char *name;
DBG_INIT("%s() res_size 0x%x\n", initfunc *init;
__FUNCTION__, res_size); };
sba_dev->ioc[i].res_size = res_size; static struct ioc_iommu ioc_iommu_info[] __initdata = {
sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); { ZX1_IOC_ID, "zx1", ioc_zx1_init },
{ REO_IOC_ID, "REO" },
};
if (NULL == sba_dev->ioc[i].res_map) static struct ioc * __init
{ ioc_init(u64 hpa, void *handle)
panic(__FILE__ ":%s() could not allocate resource map\n", __FUNCTION__ ); {
} struct ioc *ioc;
struct ioc_iommu *info;
memset(sba_dev->ioc[i].res_map, 0, res_size); ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
/* next available IOVP - circular search */ if (!ioc)
if ((sba_dev->hw_rev & 0xFF) >= 0x20) { return NULL;
sba_dev->ioc[i].res_hint = (unsigned long *)
sba_dev->ioc[i].res_map;
} else {
u64 reserved_iov;
/* Yet another 1.x hack */ memset(ioc, 0, sizeof(*ioc));
printk(KERN_DEBUG "zx1 1.x: Starting resource hint offset into "
"IOV space to avoid initial zero value IOVA\n");
sba_dev->ioc[i].res_hint = (unsigned long *)
&(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
sba_dev->ioc[i].res_map[0] = 0x1; ioc->next = ioc_list;
sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL; ioc_list = ioc;
for (reserved_iov = 0xA0000 ; reserved_iov < 0xC0000 ; reserved_iov += IOVP_SIZE) { ioc->handle = handle;
u64 *res_ptr = (u64 *) sba_dev->ioc[i].res_map; ioc->ioc_hpa = ioremap(hpa, 0x1000);
int index = PDIR_INDEX(reserved_iov);
int res_word;
u64 mask;
res_word = (int)(index / BITS_PER_LONG); ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
mask = 0x1UL << (index - (res_word * BITS_PER_LONG)); ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
res_ptr[res_word] |= mask; ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
sba_dev->ioc[i].pdir_base[PDIR_INDEX(reserved_iov)] = (SBA_VALID_MASK | reserved_iov);
} for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
if (ioc->func_id == info->func_id) {
ioc->name = info->name;
if (info->init)
(info->init)(ioc);
} }
}
#ifdef ASSERT_PDIR_SANITY if (!ioc->name)
/* Mark first bit busy - ie no IOVA 0 */ ioc->name = "Unknown";
sba_dev->ioc[i].res_map[0] = 0x1;
sba_dev->ioc[i].pdir_base[0] = 0x8000badbadc0ffeeULL;
#endif
DBG_INIT("%s() %d res_map %x %p\n", __FUNCTION__, ioc_iova_init(ioc);
i, res_size, (void *)sba_dev->ioc[i].res_map); ioc_resource_init(ioc);
} ioc_sac_init(ioc);
sba_dev->sba_lock = SPIN_LOCK_UNLOCKED; printk(KERN_INFO PFX
} "Found %s IOC %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
hpa, ioc->iov_size >> 20, ioc->ibase);
#ifdef CONFIG_PROC_FS return ioc;
static int sba_proc_info(char *buf, char **start, off_t offset, int len) }
{
struct sba_device *sba_dev;
struct ioc *ioc;
int total_pages;
unsigned long i = 0, avg = 0, min, max;
for (sba_dev = sba_list; sba_dev; sba_dev = sba_dev->next) {
ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
sprintf(buf, "%s rev %d.%d\n", "Hewlett-Packard zx1 SBA",
((sba_dev->hw_rev >> 4) & 0xF), (sba_dev->hw_rev & 0xF));
sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n", buf,
(int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ total_pages);
sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf, /**************************************************************************
total_pages - ioc->used_pages, ioc->used_pages, **
(int) (ioc->used_pages * 100 / total_pages)); ** SBA initialization code (HW and SW)
**
** o identify SBA chip itself
** o FIXME: initialize DMA hints for reasonable defaults
**
**************************************************************************/
sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", #ifdef CONFIG_PROC_FS
buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ static int
sba_proc_info_one(char *buf, struct ioc *ioc)
{
int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
unsigned long i = 0, avg = 0, min, max;
min = max = ioc->avg_search[0]; sprintf(buf, "Hewlett Packard %s IOC rev %d.%d\n",
for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
avg += ioc->avg_search[i]; sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n",
if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; buf,
if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
} total_pages);
avg /= SBA_SEARCH_SAMPLE;
sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf,
buf, min, avg, max); total_pages - ioc->used_pages, ioc->used_pages,
(int) (ioc->used_pages * 100 / total_pages));
sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
min = max = ioc->avg_search[0];
for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
avg += ioc->avg_search[i];
if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
}
avg /= SBA_SEARCH_SAMPLE;
sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
buf, min, avg, max);
sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n", sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->msingle_calls, ioc->msingle_pages, buf, ioc->msingle_calls, ioc->msingle_pages,
(int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls)); (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
sprintf(buf, "%spci_map_single(): %12ld bypasses\n", sprintf(buf, "%spci_map_single(): %12ld bypasses\n",
buf, ioc->msingle_bypass); buf, ioc->msingle_bypass);
#endif #endif
sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n", sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->usingle_calls, ioc->usingle_pages, buf, ioc->usingle_calls, ioc->usingle_pages,
(int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls)); (int) ((ioc->usingle_pages * 1000)/ioc->usingle_calls));
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
sprintf(buf, "%spci_unmap_single: %12ld bypasses\n", sprintf(buf, "%spci_unmap_single: %12ld bypasses\n",
buf, ioc->usingle_bypass); buf, ioc->usingle_bypass);
#endif #endif
sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n", sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->msg_calls, ioc->msg_pages, buf, ioc->msg_calls, ioc->msg_pages,
(int) ((ioc->msg_pages * 1000)/ioc->msg_calls)); (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
#ifdef ALLOW_IOV_BYPASS #ifdef ALLOW_IOV_BYPASS
sprintf(buf, "%spci_map_sg() : %12ld bypasses\n", sprintf(buf, "%spci_map_sg() : %12ld bypasses\n",
buf, ioc->msg_bypass); buf, ioc->msg_bypass);
#endif #endif
sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n", sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
buf, ioc->usg_calls, ioc->usg_pages, buf, ioc->usg_calls, ioc->usg_pages,
(int) ((ioc->usg_pages * 1000)/ioc->usg_calls)); (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
}
return strlen(buf); return strlen(buf);
} }
static int static int
sba_resource_map(char *buf, char **start, off_t offset, int len) sba_proc_info(char *buf, char **start, off_t offset, int len)
{ {
struct ioc *ioc = sba_list->ioc; /* FIXME: Multi-IOC support! */ struct ioc *ioc;
unsigned int *res_ptr; char *base = buf;
int i;
if (!ioc) for (ioc = ioc_list; ioc; ioc = ioc->next) {
return 0; buf += sba_proc_info_one(buf, ioc);
}
return strlen(base);
}
static int
sba_resource_map_one(char *buf, struct ioc *ioc)
{
unsigned int *res_ptr = (unsigned int *)ioc->res_map;
int i;
res_ptr = (unsigned int *)ioc->res_map;
buf[0] = '\0'; buf[0] = '\0';
for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) { for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) {
if ((i & 7) == 0) if ((i & 7) == 0)
...@@ -1571,129 +1800,147 @@ sba_resource_map(char *buf, char **start, off_t offset, int len) ...@@ -1571,129 +1800,147 @@ sba_resource_map(char *buf, char **start, off_t offset, int len)
return strlen(buf); return strlen(buf);
} }
#endif
/* static int
** Determine if sba should claim this chip (return 0) or not (return 1). sba_resource_map(char *buf, char **start, off_t offset, int len)
** If so, initialize the chip and tell other partners in crime they
** have work to do.
*/
void __init sba_init(void)
{ {
struct sba_device *sba_dev; struct ioc *ioc;
u32 func_id, hw_rev; char *base = buf;
u32 *func_offset = NULL;
int i, agp_found = 0;
static char sba_rev[6];
struct pci_dev *device = NULL;
u64 hpa = 0;
if (!(device = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_SBA, NULL)))
return;
for (i = 0; i < PCI_NUM_RESOURCES; i++) { for (ioc = ioc_list; ioc; ioc = ioc->next) {
if (pci_resource_flags(device, i) == IORESOURCE_MEM) { buf += sba_resource_map_one(buf, ioc);
hpa = (u64) ioremap(pci_resource_start(device, i),
pci_resource_len(device, i));
break;
}
} }
func_id = READ_REG(hpa + SBA_FUNC_ID); return strlen(base);
if (func_id != ZX1_FUNC_ID_VALUE) }
return;
strcpy(sba_rev, "zx1");
func_offset = zx1_func_offsets;
/* Read HW Rev First */ static void __init
hw_rev = READ_REG(hpa + SBA_FCLASS) & 0xFFUL; sba_proc_init(void)
{
if (ioc_list) {
struct proc_dir_entry * proc_mckinley_root;
/* proc_mckinley_root = proc_mkdir("bus/mckinley",0);
* Not all revision registers of the chipset are updated on every create_proc_info_entry(ioc_list->name, 0, proc_mckinley_root, sba_proc_info);
* turn. Must scan through all functions looking for the highest rev create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map);
*/
if (func_offset) {
for (i = 0 ; func_offset[i] != -1 ; i++) {
u32 func_rev;
func_rev = READ_REG(hpa + SBA_FCLASS + func_offset[i]) & 0xFFUL;
DBG_INIT("%s() func offset: 0x%x rev: 0x%x\n",
__FUNCTION__, func_offset[i], func_rev);
if (func_rev > hw_rev)
hw_rev = func_rev;
}
} }
return 0;
}
#endif
printk(KERN_INFO "%s found %s %d.%d at %s, HPA 0x%lx\n", DRIVER_NAME, void
sba_rev, ((hw_rev >> 4) & 0xF), (hw_rev & 0xF), sba_connect_bus(struct pci_bus *bus)
device->slot_name, hpa); {
acpi_handle handle, parent;
acpi_status status;
struct ioc *ioc;
if ((hw_rev & 0xFF) < 0x20) { if (!PCI_CONTROLLER(bus))
printk(KERN_INFO "%s: SBA rev less than 2.0 not supported", DRIVER_NAME); panic(PFX "no sysdata on bus %d!\n",bus->number);
if (PCI_CONTROLLER(bus)->iommu)
return; return;
}
sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL); handle = PCI_CONTROLLER(bus)->acpi_handle;
if (NULL == sba_dev) { if (!handle)
printk(KERN_ERR DRIVER_NAME " - couldn't alloc sba_device\n");
return; return;
}
memset(sba_dev, 0, sizeof(struct sba_device)); /*
* The IOC scope encloses PCI root bridges in the ACPI
* namespace, so work our way out until we find an IOC we
* claimed previously.
*/
do {
for (ioc = ioc_list; ioc; ioc = ioc->next)
if (ioc->handle == handle) {
PCI_CONTROLLER(bus)->iommu = ioc;
return;
}
for(i=0; i<MAX_IOC; i++) status = acpi_get_parent(handle, &parent);
spin_lock_init(&(sba_dev->ioc[i].res_lock)); handle = parent;
} while (ACPI_SUCCESS(status));
sba_dev->hw_rev = hw_rev; printk("No IOC for PCI Bus %d in ACPI\n", bus->number);
sba_dev->sba_hpa = hpa; }
/* static int __init
* We pass this fake device from alloc_consistent to ensure acpi_sba_ioc_add(struct acpi_device *device)
* we only use SAC for alloc_consistent mappings. {
*/ struct ioc *ioc;
sac_only_dev.dma_mask = 0xFFFFFFFFUL; acpi_status status;
u64 hpa, length;
struct acpi_device_info dev_info;
/* /*
* We need to check for an AGP device, if we find one, then only * Only SBA appears in ACPI namespace. It encloses the PCI
* use part of the IOVA space for PCI DMA, the rest is for GART. * root bridges, and its CSR space includes the IOC function.
* REVISIT for multiple IOC.
*/ */
pci_for_each_dev(device) status = hp_acpi_csr_space(device->handle, &hpa, &length);
agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); if (ACPI_FAILURE(status))
return 1;
if (agp_found && reserve_sba_gart) status = acpi_get_object_info(device->handle, &dev_info);
SBA_SET_AGP(sba_dev); if (ACPI_FAILURE(status))
return 1;
sba_hw_init(sba_dev); if (strncmp("HWP0001", dev_info.hardware_id, 7) == 0)
sba_common_init(sba_dev); hpa += ZX1_IOC_OFFSET;
#ifdef CONFIG_PROC_FS ioc = ioc_init(hpa, device->handle);
{ if (!ioc)
struct proc_dir_entry * proc_mckinley_root; return 1;
proc_mckinley_root = proc_mkdir("bus/mckinley",0); return 0;
create_proc_info_entry(sba_rev, 0, proc_mckinley_root, sba_proc_info); }
create_proc_info_entry("bitmap", 0, proc_mckinley_root, sba_resource_map);
} static struct acpi_driver acpi_sba_ioc_driver = {
name: "IOC IOMMU Driver",
ids: "HWP0001,HWP0004",
ops: {
add: acpi_sba_ioc_add,
},
};
static int __init
sba_init(void)
{
struct pci_bus *b;
MAX_DMA_ADDRESS = ~0UL;
acpi_bus_register_driver(&acpi_sba_ioc_driver);
pci_for_each_bus(b)
sba_connect_bus(b);
#ifdef CONFIG_PROC_FS
sba_proc_init();
#endif #endif
return 0;
} }
device_initcall(sba_init);
static int __init static int __init
nosbagart (char *str) nosbagart(char *str)
{ {
reserve_sba_gart = 0; reserve_sba_gart = 0;
return 1; return 1;
} }
__setup("nosbagart",nosbagart); int
sba_dma_supported (struct pci_dev *dev, u64 mask)
{
/* make sure it's at least 32bit capable */
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
__setup("nosbagart", nosbagart);
EXPORT_SYMBOL(sba_init);
EXPORT_SYMBOL(sba_map_single); EXPORT_SYMBOL(sba_map_single);
EXPORT_SYMBOL(sba_unmap_single); EXPORT_SYMBOL(sba_unmap_single);
EXPORT_SYMBOL(sba_map_sg); EXPORT_SYMBOL(sba_map_sg);
EXPORT_SYMBOL(sba_unmap_sg); EXPORT_SYMBOL(sba_unmap_sg);
EXPORT_SYMBOL(sba_dma_address);
EXPORT_SYMBOL(sba_dma_supported); EXPORT_SYMBOL(sba_dma_supported);
EXPORT_SYMBOL(sba_alloc_consistent); EXPORT_SYMBOL(sba_alloc_consistent);
EXPORT_SYMBOL(sba_free_consistent); EXPORT_SYMBOL(sba_free_consistent);
...@@ -5,5 +5,4 @@ ...@@ -5,5 +5,4 @@
# Copyright (C) Alex Williamson (alex_williamson@hp.com) # Copyright (C) Alex Williamson (alex_williamson@hp.com)
# #
obj-y := hpzx1_misc.o
obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o obj-$(CONFIG_IA64_GENERIC) += hpzx1_machvec.o
...@@ -11,6 +11,8 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o ir ...@@ -11,6 +11,8 @@ obj-y := acpi.o entry.o efi.o efi_stub.o gate.o ia64_ksyms.o irq.o irq_ia64.o ir
obj-$(CONFIG_EFI_VARS) += efivars.o obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_FSYS) += fsys.o obj-$(CONFIG_FSYS) += fsys.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o obj-$(CONFIG_IA64_MCA) += mca.o mca_asm.o
obj-$(CONFIG_IA64_PALINFO) += palinfo.o obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o obj-$(CONFIG_IOSAPIC) += iosapic.o
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment