Commit 19ea7178 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-dj.bkbits.net/agpgart

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 23139076 6750ed12
......@@ -141,16 +141,6 @@ struct agp_bridge_data {
char minor_version;
};
#define OUTREG64(mmap, addr, val) __raw_writeq((val), (mmap)+(addr))
#define OUTREG32(mmap, addr, val) __raw_writel((val), (mmap)+(addr))
#define OUTREG16(mmap, addr, val) __raw_writew((val), (mmap)+(addr))
#define OUTREG8(mmap, addr, val) __raw_writeb((val), (mmap)+(addr))
#define INREG64(mmap, addr) __raw_readq((mmap)+(addr))
#define INREG32(mmap, addr) __raw_readl((mmap)+(addr))
#define INREG16(mmap, addr) __raw_readw((mmap)+(addr))
#define INREG8(mmap, addr) __raw_readb((mmap)+(addr))
#define KB(x) ((x) * 1024)
#define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
......
......@@ -277,6 +277,15 @@ static struct agp_device_ids ali_agp_device_ids[] __devinitdata =
.device_id = PCI_DEVICE_ID_AL_M1671,
.chipset_name = "M1671",
},
{
.device_id = PCI_DEVICE_ID_AL_M1681,
.chipset_name = "M1681",
},
{
.device_id = PCI_DEVICE_ID_AL_M1683,
.chipset_name = "M1683",
},
{ }, /* dummy final entry, always present */
};
......@@ -387,6 +396,8 @@ static struct pci_driver agp_ali_pci_driver = {
static int __init agp_ali_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_ali_pci_driver);
}
......
......@@ -195,6 +195,8 @@ alpha_core_agp_setup(void)
static int __init agp_alpha_core_init(void)
{
if (agp_off)
return -EINVAL;
if (alpha_mv.agp_info)
return alpha_core_agp_setup();
return -ENODEV;
......
......@@ -53,8 +53,10 @@ static int amd_create_page_map(struct amd_page_map *page_map)
}
global_cache_flush();
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
readl(page_map->remapped+i); /* PCI Posting. */
}
return 0;
}
......@@ -167,6 +169,7 @@ static int amd_create_gatt_table(void)
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
return 0;
......@@ -220,8 +223,8 @@ static int amd_irongate_configure(void)
amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
/* Write out the address of the gatt table */
OUTREG32(amd_irongate_private.registers, AMD_ATTBASE,
agp_bridge->gatt_bus_addr);
writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
/* Write the Sync register */
pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
......@@ -230,19 +233,19 @@ static int amd_irongate_configure(void)
pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
/* Write the enable register */
enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
enable_reg = (enable_reg | 0x0004);
OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
/* Write out the size register */
pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
temp = (((temp & ~(0x0000000e)) | current_size->size_value)
| 0x00000001);
temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
/* Flush the tlb */
OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
return 0;
}
......@@ -254,9 +257,10 @@ static void amd_irongate_cleanup(void)
previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
enable_reg = INREG16(amd_irongate_private.registers, AMD_GARTENABLE);
enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
enable_reg = (enable_reg & ~(0x0004));
OUTREG16(amd_irongate_private.registers, AMD_GARTENABLE, enable_reg);
writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
/* Write back the previous size and disable gart translation */
pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
......@@ -275,7 +279,8 @@ static void amd_irongate_cleanup(void)
static void amd_irongate_tlbflush(struct agp_memory *temp)
{
OUTREG32(amd_irongate_private.registers, AMD_TLBFLUSH, 0x00000001);
writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
}
static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
......@@ -310,6 +315,7 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
return 0;
......@@ -328,6 +334,7 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
......@@ -471,6 +478,8 @@ static struct pci_driver agp_amdk7_pci_driver = {
static int __init agp_amdk7_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_amdk7_pci_driver);
}
......
......@@ -46,6 +46,11 @@
#define NVIDIA_X86_64_1_APBASE2 0xd8
#define NVIDIA_X86_64_1_APLIMIT2 0xdc
/* ULi K8 registers */
#define ULI_X86_64_BASE_ADDR 0x10
#define ULI_X86_64_HTT_FEA_REG 0x50
#define ULI_X86_64_ENU_SCR_REG 0x54
static int nr_garts;
static struct pci_dev * hammers[MAX_HAMMER_GARTS];
......@@ -109,6 +114,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
pte |= GPTE_VALID | GPTE_COHERENT;
writel(pte, agp_bridge->gatt_table+j);
readl(agp_bridge->gatt_table+j); /* PCI Posting. */
}
amd64_tlbflush(mem);
return 0;
......@@ -355,7 +361,7 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
int i = 0;
/* cache pci_devs of northbridges. */
while ((loop_dev = pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev))
while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev))
!= NULL) {
if (i == MAX_HAMMER_GARTS) {
printk(KERN_ERR PFX "Too many northbridges for AGP\n");
......@@ -405,6 +411,61 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data
}
}
static struct aper_size_info_32 uli_sizes[7] =
{
{256, 65536, 6, 10},
{128, 32768, 5, 9},
{64, 16384, 4, 8},
{32, 8192, 3, 7},
{16, 4096, 2, 6},
{8, 2048, 1, 4},
{4, 1024, 0, 3}
};
static int __devinit uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
int i;
unsigned size = amd64_fetch_size();
printk(KERN_INFO "Setting up ULi AGP. \n");
dev1 = pci_find_slot ((unsigned int)pdev->bus->number,PCI_DEVFN(0,0));
if (dev1 == NULL) {
printk(KERN_INFO PFX "Detected a ULi chipset, "
"but could not fine the secondary device.\n");
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
if (uli_sizes[i].size == size)
break;
if (i == ARRAY_SIZE(uli_sizes)) {
printk(KERN_INFO PFX "No ULi size found for %d\n", size);
return -ENODEV;
}
/* shadow x86-64 registers into ULi registers */
pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
if ((httfea & 0x7fff) >> (32 - 25))
return -ENODEV;
httfea = (httfea& 0x7fff) << 25;
pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
baseaddr|= httfea;
pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
enuscr= httfea+ (size * 1024 * 1024) - 1;
pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
return 0;
}
static struct aper_size_info_32 nforce3_sizes[5] =
{
{512, 131072, 7, 0x00000000 },
......@@ -513,6 +574,14 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
}
}
if (pdev->vendor == PCI_VENDOR_ID_AL) {
int ret = uli_agp_init(pdev);
if (ret) {
agp_put_bridge(bridge);
return ret;
}
}
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
......@@ -536,6 +605,15 @@ static struct pci_device_id agp_amd64_pci_table[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* ULi M1689 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AL,
.device = PCI_DEVICE_ID_AL_M1689,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8T800Pro */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
......@@ -581,7 +659,6 @@ static struct pci_device_id agp_amd64_pci_table[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* NForce3 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
......@@ -625,6 +702,11 @@ static struct pci_driver agp_amd64_pci_driver = {
int __init agp_amd64_init(void)
{
int err = 0;
static struct pci_device_id amd64nb[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ },
};
if (agp_off)
return -EINVAL;
if (pci_module_init(&agp_amd64_pci_driver) > 0) {
......@@ -640,13 +722,13 @@ int __init agp_amd64_init(void)
}
/* First check that we have at least one AMD64 NB */
if (!pci_find_device(PCI_VENDOR_ID_AMD, 0x1103, NULL))
if (!pci_dev_present(amd64nb))
return -ENODEV;
/* Look for any AGP bridge */
dev = NULL;
err = -ENODEV;
while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev))) {
for_each_pci_dev(dev) {
if (!pci_find_capability(dev, PCI_CAP_ID_AGP))
continue;
/* Only one bridge supported right now */
......
......@@ -61,9 +61,6 @@ static int ati_create_page_map(ati_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real));
err = map_page_into_agp(virt_to_page(page_map->real));
/* CACHE_FLUSH(); */
global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL || err) {
......@@ -75,8 +72,10 @@ static int ati_create_page_map(ati_page_map *page_map)
/*CACHE_FLUSH();*/
global_cache_flush();
for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
for(i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
readl(page_map->remapped+i); /* PCI Posting. */
}
return 0;
}
......@@ -186,7 +185,8 @@ static int ati_fetch_size(void)
static void ati_tlbflush(struct agp_memory * mem)
{
OUTREG32(ati_generic_private.registers, ATI_GART_CACHE_CNTRL, 1);
writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
}
static void ati_cleanup(void)
......@@ -230,15 +230,16 @@ static int ati_configure(void)
agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
*/
OUTREG32(ati_generic_private.registers, ATI_GART_FEATURE_ID, 0x60000);
writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
/* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
pci_read_config_dword(agp_bridge->dev, 4, &temp);
pci_write_config_dword(agp_bridge->dev, 4, temp | (1<<14));
/* Write out the address of the gatt table */
OUTREG32(ati_generic_private.registers, ATI_GART_BASE,
agp_bridge->gatt_bus_addr);
writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
return 0;
}
......@@ -291,6 +292,7 @@ static int ati_insert_memory(struct agp_memory * mem,
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
agp_bridge->driver->tlb_flush(mem);
return 0;
......@@ -310,6 +312,7 @@ static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
agp_bridge->driver->tlb_flush(mem);
......@@ -371,6 +374,7 @@ static int ati_create_gatt_table(void)
for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
return 0;
......@@ -525,6 +529,8 @@ static struct pci_driver agp_ati_pci_driver = {
static int __init agp_ati_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_ati_pci_driver);
}
......
......@@ -375,7 +375,7 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
if (!r->start && r->end) {
if(pci_assign_resource(pdev, 0)) {
printk(KERN_ERR PFX "could not assign resource 0\n");
return (-ENODEV);
return -ENODEV;
}
}
......@@ -386,7 +386,7 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
*/
if (pci_enable_device(pdev)) {
printk(KERN_ERR PFX "Unable to Enable PCI device\n");
return (-ENODEV);
return -ENODEV;
}
/* Fill in the mode register */
......@@ -441,6 +441,9 @@ static int __init agp_efficeon_init(void)
{
static int agp_initialised=0;
if (agp_off)
return -EINVAL;
if (agp_initialised == 1)
return 0;
agp_initialised=1;
......
......@@ -35,7 +35,10 @@
#include <linux/pm.h>
#include <linux/agp_backend.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
#include "agp.h"
__u32 *agp_gatt_table;
......@@ -47,6 +50,26 @@ int agp_memory_reserved;
*/
EXPORT_SYMBOL_GPL(agp_memory_reserved);
#if defined(CONFIG_X86)
int map_page_into_agp(struct page *page)
{
int i;
i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
global_flush_tlb();
return i;
}
EXPORT_SYMBOL_GPL(map_page_into_agp);
int unmap_page_from_agp(struct page *page)
{
int i;
i = change_page_attr(page, 1, PAGE_KERNEL);
global_flush_tlb();
return i;
}
EXPORT_SYMBOL_GPL(unmap_page_from_agp);
#endif
/*
* Generic routines for handling agp_memory structures -
* They use the basic page allocation routines to do the brunt of the work.
......@@ -181,8 +204,7 @@ struct agp_memory *agp_allocate_memory(size_t page_count, u32 type)
agp_free_memory(new);
return NULL;
}
new->memory[i] =
agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
new->memory[i] = virt_to_phys(addr);
new->page_count++;
}
......@@ -507,7 +529,7 @@ u32 agp_collect_device_status(u32 mode, u32 cmd)
u32 tmp;
u32 agp3;
while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
for_each_pci_dev(device) {
cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
if (!cap_ptr)
continue;
......@@ -551,7 +573,7 @@ void agp_device_command(u32 command, int agp_v3)
if (agp_v3)
mode *= 4;
while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
for_each_pci_dev(device) {
u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
if (!agp)
continue;
......@@ -737,8 +759,10 @@ int agp_generic_create_gatt_table(void)
agp_bridge->gatt_bus_addr = virt_to_phys(agp_bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++)
for (i = 0; i < num_entries; i++) {
writel(agp_bridge->scratch_page, agp_bridge->gatt_table+i);
readl(agp_bridge->gatt_table+i); /* PCI Posting. */
}
return 0;
}
......@@ -854,8 +878,10 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type), agp_bridge->gatt_table+j);
readl(agp_bridge->gatt_table+j); /* PCI Posting. */
}
agp_bridge->driver->tlb_flush(mem);
return 0;
......@@ -873,9 +899,12 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
}
/* AK: bogus, should encode addresses > 4GB */
for (i = pg_start; i < (mem->page_count + pg_start); i++)
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
writel(agp_bridge->scratch_page, agp_bridge->gatt_table+i);
readl(agp_bridge->gatt_table+i); /* PCI Posting. */
}
global_cache_flush();
agp_bridge->driver->tlb_flush(mem);
return 0;
}
......@@ -958,21 +987,15 @@ void agp_enable(u32 mode)
EXPORT_SYMBOL(agp_enable);
#ifdef CONFIG_SMP
static void ipi_handler(void *null)
{
flush_agp_cache();
}
#endif
void global_cache_flush(void)
{
#ifdef CONFIG_SMP
if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
panic(PFX "timed out waiting for the other CPUs!\n");
#else
flush_agp_cache();
#endif
}
EXPORT_SYMBOL(global_cache_flush);
......
......@@ -88,7 +88,7 @@ static int __init hp_zx1_ioc_shared(void)
* - IOVA space is 1Gb in size
* - first 512Mb is IOMMU, second 512Mb is GART
*/
hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG);
hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
switch (hp->io_tlb_ps) {
case 0: hp->io_tlb_shift = 12; break;
case 1: hp->io_tlb_shift = 13; break;
......@@ -104,13 +104,13 @@ static int __init hp_zx1_ioc_shared(void)
hp->io_page_size = 1 << hp->io_tlb_shift;
hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1;
hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE));
hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
......@@ -174,7 +174,7 @@ hp_zx1_ioc_init (u64 hpa)
* If the IOTLB is currently disabled, we can take it over.
* Otherwise, we have to share with sba_iommu.
*/
hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0;
hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
if (hp->io_pdir_owner)
return hp_zx1_ioc_owner();
......@@ -189,18 +189,18 @@ hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
u8 pos, id;
int ttl = 48;
status = INREG16(hpa, PCI_STATUS);
status = readw(hpa+PCI_STATUS);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
pos = INREG8(hpa, PCI_CAPABILITY_LIST);
pos = readb(hpa+PCI_CAPABILITY_LIST);
while (ttl-- && pos >= 0x40) {
pos &= ~3;
id = INREG8(hpa, pos + PCI_CAP_LIST_ID);
id = readb(hpa+pos+PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = INREG8(hpa, pos + PCI_CAP_LIST_NEXT);
pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
}
return 0;
}
......@@ -217,7 +217,7 @@ hp_zx1_lba_init (u64 hpa)
hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
cap = INREG32(hp->lba_regs, hp->lba_cap_offset) & 0xff;
cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
if (cap != PCI_CAP_ID_AGP) {
printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
cap, hp->lba_cap_offset);
......@@ -245,15 +245,19 @@ hp_zx1_configure (void)
agp_bridge->gart_bus_addr = hp->gart_base;
agp_bridge->capndx = hp->lba_cap_offset;
agp_bridge->mode = INREG32(hp->lba_regs, hp->lba_cap_offset + PCI_AGP_STATUS);
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir));
OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps);
OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1);
OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
INREG64(hp->ioc_regs, HP_ZX1_PCOM);
writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
writel(~(HP_ZX1_IOVA_SIZE-1), hp->ioc_regs+HP_ZX1_IMASK);
readl(hp->ioc_regs+HP_ZX1_IMASK);
writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
readl(hp->ioc_regs+HP_ZX1_IBASE);
writel(hp->iova_base|log2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
readl(hp->ioc_regs+HP_ZX1_PCOM);
}
return 0;
......@@ -265,8 +269,10 @@ hp_zx1_cleanup (void)
struct _hp_private *hp = &hp_private;
if (hp->ioc_regs) {
if (hp->io_pdir_owner)
OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0);
if (hp->io_pdir_owner) {
writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
readq(hp->ioc_regs+HP_ZX1_IBASE);
}
iounmap(hp->ioc_regs);
}
if (hp->lba_regs)
......@@ -278,8 +284,8 @@ hp_zx1_tlbflush (struct agp_memory *mem)
{
struct _hp_private *hp = &hp_private;
OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size));
INREG64(hp->ioc_regs, HP_ZX1_PCOM);
writeq(hp->gart_base | log2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
readq(hp->ioc_regs+HP_ZX1_PCOM);
}
static int
......@@ -401,12 +407,11 @@ hp_zx1_enable (u32 mode)
struct _hp_private *hp = &hp_private;
u32 command;
command = INREG32(hp->lba_regs, hp->lba_cap_offset + PCI_AGP_STATUS);
command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
command = agp_collect_device_status(mode, command);
command |= 0x00000100;
OUTREG32(hp->lba_regs, hp->lba_cap_offset + PCI_AGP_COMMAND, command);
writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
agp_device_command(command, (mode & AGP8X_MODE) != 0);
}
......@@ -519,6 +524,8 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
static int __init
agp_hp_init (void)
{
if (agp_off)
return -EINVAL;
acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
if (hp_zx1_gart_found)
......
......@@ -532,8 +532,8 @@ static void i460_destroy_page (void *page)
static unsigned long i460_mask_memory (unsigned long addr, int type)
{
/* Make sure the returned address is a valid GATT entry */
return (agp_bridge->driver->masks[0].mask
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12));
return agp_bridge->driver->masks[0].mask
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12);
}
struct agp_bridge_driver intel_i460_driver = {
......@@ -585,6 +585,8 @@ static int __devinit agp_intel_i460_probe(struct pci_dev *pdev,
bridge->dev = pdev;
bridge->capndx = cap_ptr;
printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
......@@ -620,6 +622,8 @@ static struct pci_driver agp_intel_i460_pci_driver = {
static int __init agp_intel_i460_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_intel_i460_pci_driver);
}
......
This diff is collapsed.
......@@ -51,7 +51,7 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
if (new == NULL)
return NULL;
new->memory[0] = agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
new->memory[0] = virt_to_phys(addr);
new->page_count = 1;
new->num_scratch_pages = 1;
new->type = AGP_PHYS_MEMORY;
......@@ -111,8 +111,7 @@ static void intel_i830_init_gtt_entries(void)
gtt_entries = MB(8) - KB(132);
break;
case I830_GMCH_GMS_LOCAL:
rdct = INREG8(intel_i830_private.registers,
I830_RDRAM_CHANNEL_TYPE);
rdct = readb(intel_i830_private.registers+I830_RDRAM_CHANNEL_TYPE);
gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
MB(ddt[I830_RDRAM_DDT(rdct)]);
local = 1;
......@@ -174,10 +173,10 @@ static int intel_i830_create_gatt_table(void)
intel_i830_private.registers = (volatile u8 __iomem*) ioremap(temp,128 * 4096);
if (!intel_i830_private.registers)
return (-ENOMEM);
return -ENOMEM;
temp = INREG32(intel_i830_private.registers,I810_PGETBL_CTL) & 0xfffff000;
global_cache_flush();
temp = readl(intel_i830_private.registers+I810_PGETBL_CTL) & 0xfffff000;
global_cache_flush(); /* FIXME: ?? */
/* we have to call this as early as possible after the MMIO base address is known */
intel_i830_init_gtt_entries();
......@@ -186,7 +185,7 @@ static int intel_i830_create_gatt_table(void)
agp_bridge->gatt_bus_addr = temp;
return(0);
return 0;
}
/* Return the gatt table to a sane state. Use the top of stolen
......@@ -194,7 +193,7 @@ static int intel_i830_create_gatt_table(void)
*/
static int intel_i830_free_gatt_table(void)
{
return(0);
return 0;
}
static int intel_i830_fetch_size(void)
......@@ -209,7 +208,7 @@ static int intel_i830_fetch_size(void)
/* 855GM/852GM/865G has 128MB aperture size */
agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
agp_bridge->aperture_size_idx = 0;
return(values[0].size);
return values[0].size;
}
pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
......@@ -217,14 +216,14 @@ static int intel_i830_fetch_size(void)
if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
agp_bridge->aperture_size_idx = 0;
return(values[0].size);
return values[0].size;
} else {
agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
agp_bridge->aperture_size_idx = 1;
return(values[1].size);
return values[1].size;
}
return(0);
return 0;
}
static int intel_i830_configure(void)
......@@ -243,14 +242,17 @@ static int intel_i830_configure(void)
gmch_ctrl |= I830_GMCH_ENABLED;
pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
OUTREG32(intel_i830_private.registers,I810_PGETBL_CTL,agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED);
global_cache_flush();
writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_i830_private.registers+I810_PGETBL_CTL);
readl(intel_i830_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
if (agp_bridge->driver->needs_scratch_page)
for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++)
OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge->scratch_page);
return (0);
if (agp_bridge->driver->needs_scratch_page) {
for (i = intel_i830_private.gtt_entries; i < current_size->num_entries; i++) {
writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
}
}
global_cache_flush();
return 0;
}
static void intel_i830_cleanup(void)
......@@ -272,11 +274,11 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start,
pg_start,intel_i830_private.gtt_entries);
printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
return (-EINVAL);
return -EINVAL;
}
if ((pg_start + mem->page_count) > num_entries)
return (-EINVAL);
return -EINVAL;
/* The i830 can't check the GTT for entries since its read only,
* depend on the caller to make the correct offset decisions.
......@@ -284,19 +286,21 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start,
if ((type != 0 && type != AGP_PHYS_MEMORY) ||
(mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
return (-EINVAL);
return -EINVAL;
global_cache_flush();
global_cache_flush(); /* FIXME: ?? */
for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),
agp_bridge->driver->mask_memory(mem->memory[i], mem->type));
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type),
intel_i830_private.registers+I810_PTE_BASE+(j*4));
readl(intel_i830_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
}
global_cache_flush();
agp_bridge->driver->tlb_flush(mem);
return(0);
return 0;
}
static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
......@@ -308,26 +312,26 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
if (pg_start < intel_i830_private.gtt_entries) {
printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
return (-EINVAL);
return -EINVAL;
}
for (i = pg_start; i < (mem->page_count + pg_start); i++)
OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (i * 4),agp_bridge->scratch_page);
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
}
global_cache_flush();
agp_bridge->driver->tlb_flush(mem);
return (0);
return 0;
}
static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
{
if (type == AGP_PHYS_MEMORY)
return(alloc_agpphysmem_i8xx(pg_count, type));
return alloc_agpphysmem_i8xx(pg_count, type);
/* always return NULL for other allocation types for now */
return(NULL);
return NULL;
}
static int intel_8xx_fetch_size(void)
......@@ -470,9 +474,9 @@ static int find_i830(u16 device)
{
struct pci_dev *i830_dev;
i830_dev = pci_find_device(PCI_VENDOR_ID_INTEL, device, NULL);
i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
if (i830_dev && PCI_FUNC(i830_dev->devfn) != 0) {
i830_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
i830_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
device, i830_dev);
}
......@@ -536,7 +540,7 @@ static int __devinit agp_intelmch_probe(struct pci_dev *pdev,
if (!r->start && r->end) {
if(pci_assign_resource(pdev, 0)) {
printk(KERN_ERR PFX "could not assign resource 0\n");
return (-ENODEV);
return -ENODEV;
}
}
......@@ -547,7 +551,7 @@ static int __devinit agp_intelmch_probe(struct pci_dev *pdev,
*/
if (pci_enable_device(pdev)) {
printk(KERN_ERR PFX "Unable to Enable PCI device\n");
return (-ENODEV);
return -ENODEV;
}
/* Fill in the mode register */
......@@ -565,6 +569,7 @@ static void __devexit agp_intelmch_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
pci_dev_put(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
......
......@@ -347,7 +347,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
INIT_LIST_HEAD(head);
/* Find all AGP devices, and add them to dev_list. */
while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
for_each_pci_dev(dev) {
mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
if (mcapndx == 0)
continue;
......
......@@ -214,9 +214,11 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
global_cache_flush();
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j); /* PCI Posting. */
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
......@@ -403,6 +405,8 @@ static struct pci_driver agp_nvidia_pci_driver = {
static int __init agp_nvidia_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_nvidia_pci_driver);
}
......
......@@ -340,6 +340,8 @@ static struct pci_driver agp_sis_pci_driver = {
static int __init agp_sis_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_sis_pci_driver);
}
......
......@@ -242,12 +242,12 @@ static int serverworks_fetch_size(void)
*/
static void serverworks_tlbflush(struct agp_memory *temp)
{
OUTREG8(serverworks_private.registers, SVWRKS_POSTFLUSH, 1);
while(INREG8(serverworks_private.registers, SVWRKS_POSTFLUSH) == 1)
writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1)
cpu_relax();
OUTREG32(serverworks_private.registers, SVWRKS_DIRFLUSH, 1);
while(INREG32(serverworks_private.registers, SVWRKS_DIRFLUSH) == 1)
writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
while(readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1)
cpu_relax();
}
......@@ -269,21 +269,21 @@ static int serverworks_configure(void)
return -ENOMEM;
}
OUTREG8(serverworks_private.registers, SVWRKS_GART_CACHE, 0x0a);
writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
OUTREG32(serverworks_private.registers, SVWRKS_GATTBASE,
agp_bridge->gatt_bus_addr);
writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
cap_reg = INREG16(serverworks_private.registers, SVWRKS_COMMAND);
cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
cap_reg &= ~0x0007;
cap_reg |= 0x4;
OUTREG16(serverworks_private.registers, SVWRKS_COMMAND, cap_reg);
writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
readw(serverworks_private.registers+SVWRKS_COMMAND);
pci_read_config_byte(serverworks_private.svrwrks_dev,
SVWRKS_AGP_ENABLE, &enable_reg);
pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
enable_reg |= 0x1; /* Agp Enable bit */
pci_write_config_byte(serverworks_private.svrwrks_dev,
SVWRKS_AGP_ENABLE, enable_reg);
pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
serverworks_tlbflush(NULL);
agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
......@@ -539,6 +539,8 @@ static struct pci_driver agp_serverworks_pci_driver = {
static int __init agp_serverworks_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_serverworks_pci_driver);
}
......
......@@ -373,6 +373,8 @@ static struct pci_driver agp_uninorth_pci_driver = {
static int __init agp_uninorth_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_uninorth_pci_driver);
}
......
......@@ -523,6 +523,8 @@ static struct pci_driver agp_via_pci_driver = {
static int __init agp_via_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_via_pci_driver);
}
......
......@@ -12,8 +12,8 @@
* data corruption on some CPUs.
*/
#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
int map_page_into_agp(struct page *page);
int unmap_page_from_agp(struct page *page);
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
......
......@@ -10,9 +10,8 @@
* with different cachability attributes for the same page.
*/
#define map_page_into_agp(page) \
change_page_attr(page, 1, PAGE_KERNEL_NOCACHE)
#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL)
int map_page_into_agp(struct page *page);
int unmap_page_from_agp(struct page *page);
#define flush_agp_mappings() global_flush_tlb()
/* Could use CLFLUSH here if the cpu supports it. But then it would
......
......@@ -1017,6 +1017,7 @@
#define PCI_DEVICE_ID_AL_M1531 0x1531
#define PCI_DEVICE_ID_AL_M1533 0x1533
#define PCI_DEVICE_ID_AL_M1541 0x1541
#define PCI_DEVICE_ID_AL_M1543 0x1543
#define PCI_DEVICE_ID_AL_M1563 0x1563
#define PCI_DEVICE_ID_AL_M1621 0x1621
#define PCI_DEVICE_ID_AL_M1631 0x1631
......@@ -1026,7 +1027,9 @@
#define PCI_DEVICE_ID_AL_M1647 0x1647
#define PCI_DEVICE_ID_AL_M1651 0x1651
#define PCI_DEVICE_ID_AL_M1671 0x1671
#define PCI_DEVICE_ID_AL_M1543 0x1543
#define PCI_DEVICE_ID_AL_M1681 0x1681
#define PCI_DEVICE_ID_AL_M1683 0x1683
#define PCI_DEVICE_ID_AL_M1689 0x1689
#define PCI_DEVICE_ID_AL_M3307 0x3307
#define PCI_DEVICE_ID_AL_M4803 0x5215
#define PCI_DEVICE_ID_AL_M5219 0x5219
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment