Commit c9a1be96 authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/radeon/kms: consolidate GART code, fix segfault after GPU lockup V2

After GPU lockup VRAM gart table is unpinned and thus its pointer
becomes unvalid. This patch move the unpin code to a common helper
function and set pointer to NULL so that page update code can check
if it should update GPU page table or not. That way bo still bound
to GART can be unbound (pci_unmap_page for all there page) properly
while there is no need to update the GPU page table.

V2 move the test for null gart out of the loop, small optimization
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 0e2c978e
...@@ -894,7 +894,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) ...@@ -894,7 +894,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp; u32 tmp;
int r; int r;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -946,7 +946,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) ...@@ -946,7 +946,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev) void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int r;
/* Disable all tables */ /* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT0_CNTL, 0);
...@@ -966,14 +965,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev) ...@@ -966,14 +965,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) { radeon_gart_table_vram_unpin(rdev);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
} }
void evergreen_pcie_gart_fini(struct radeon_device *rdev) void evergreen_pcie_gart_fini(struct radeon_device *rdev)
......
...@@ -935,7 +935,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) ...@@ -935,7 +935,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -980,8 +980,6 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) ...@@ -980,8 +980,6 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
void cayman_pcie_gart_disable(struct radeon_device *rdev) void cayman_pcie_gart_disable(struct radeon_device *rdev)
{ {
int r;
/* Disable all tables */ /* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0); WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0); WREG32(VM_CONTEXT1_CNTL, 0);
...@@ -997,14 +995,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev) ...@@ -997,14 +995,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
WREG32(VM_L2_CNTL2, 0); WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(6)); L2_CACHE_BIGK_FRAGMENT_SIZE(6));
if (rdev->gart.table.vram.robj) { radeon_gart_table_vram_unpin(rdev);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
} }
void cayman_pcie_gart_fini(struct radeon_device *rdev) void cayman_pcie_gart_fini(struct radeon_device *rdev)
......
...@@ -577,7 +577,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) ...@@ -577,7 +577,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.ram.ptr) { if (rdev->gart.ptr) {
WARN(1, "R100 PCI GART already initialized\n"); WARN(1, "R100 PCI GART already initialized\n");
return 0; return 0;
} }
...@@ -636,10 +636,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev) ...@@ -636,10 +636,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{ {
u32 *gtt = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) { if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL; return -EINVAL;
} }
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); gtt[i] = cpu_to_le32(lower_32_bits(addr));
return 0; return 0;
} }
......
...@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) ...@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{ {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; void __iomem *ptr = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) { if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL; return -EINVAL;
...@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) ...@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.vram.robj) { if (rdev->gart.robj) {
WARN(1, "RV370 PCIE GART already initialized\n"); WARN(1, "RV370 PCIE GART already initialized\n");
return 0; return 0;
} }
...@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) ...@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
uint32_t tmp; uint32_t tmp;
int r; int r;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -154,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) ...@@ -154,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev) void rv370_pcie_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int r;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
...@@ -163,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) ...@@ -163,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) { radeon_gart_table_vram_unpin(rdev);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
} }
void rv370_pcie_gart_fini(struct radeon_device *rdev) void rv370_pcie_gart_fini(struct radeon_device *rdev)
......
...@@ -895,7 +895,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) ...@@ -895,7 +895,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
/* flush hdp cache so updates hit vram */ /* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) { !(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; void __iomem *ptr = (void *)rdev->gart.ptr;
u32 tmp; u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
...@@ -930,7 +930,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) ...@@ -930,7 +930,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.vram.robj) { if (rdev->gart.robj) {
WARN(1, "R600 PCIE GART already initialized\n"); WARN(1, "R600 PCIE GART already initialized\n");
return 0; return 0;
} }
...@@ -947,7 +947,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) ...@@ -947,7 +947,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp; u32 tmp;
int r, i; int r, i;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -1002,7 +1002,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) ...@@ -1002,7 +1002,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev) void r600_pcie_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int i, r; int i;
/* Disable all tables */ /* Disable all tables */
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
...@@ -1029,14 +1029,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) ...@@ -1029,14 +1029,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) { radeon_gart_table_vram_unpin(rdev);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
} }
void r600_pcie_gart_fini(struct radeon_device *rdev) void r600_pcie_gart_fini(struct radeon_device *rdev)
......
...@@ -307,30 +307,17 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv, ...@@ -307,30 +307,17 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
*/ */
struct radeon_mc; struct radeon_mc;
struct radeon_gart_table_ram {
volatile uint32_t *ptr;
};
struct radeon_gart_table_vram {
struct radeon_bo *robj;
volatile uint32_t *ptr;
};
union radeon_gart_table {
struct radeon_gart_table_ram ram;
struct radeon_gart_table_vram vram;
};
#define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12 #define RADEON_GPU_PAGE_SHIFT 12
struct radeon_gart { struct radeon_gart {
dma_addr_t table_addr; dma_addr_t table_addr;
struct radeon_bo *robj;
void *ptr;
unsigned num_gpu_pages; unsigned num_gpu_pages;
unsigned num_cpu_pages; unsigned num_cpu_pages;
unsigned table_size; unsigned table_size;
union radeon_gart_table table;
struct page **pages; struct page **pages;
dma_addr_t *pages_addr; dma_addr_t *pages_addr;
bool *ttm_alloced; bool *ttm_alloced;
...@@ -341,6 +328,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev); ...@@ -341,6 +328,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
void radeon_gart_table_ram_free(struct radeon_device *rdev); void radeon_gart_table_ram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_alloc(struct radeon_device *rdev); int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
void radeon_gart_table_vram_free(struct radeon_device *rdev); void radeon_gart_table_vram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_pin(struct radeon_device *rdev);
void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
int radeon_gart_init(struct radeon_device *rdev); int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev); void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
...@@ -348,6 +337,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -348,6 +337,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist, int pages, struct page **pagelist,
dma_addr_t *dma_addr); dma_addr_t *dma_addr);
void radeon_gart_restore(struct radeon_device *rdev);
/* /*
...@@ -1445,8 +1435,6 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v); ...@@ -1445,8 +1435,6 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
/* AGP */ /* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev); extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev); extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev);
......
...@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) ...@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
rdev->gart.table_size >> PAGE_SHIFT); rdev->gart.table_size >> PAGE_SHIFT);
} }
#endif #endif
rdev->gart.table.ram.ptr = ptr; rdev->gart.ptr = ptr;
memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size); memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
return 0; return 0;
} }
void radeon_gart_table_ram_free(struct radeon_device *rdev) void radeon_gart_table_ram_free(struct radeon_device *rdev)
{ {
if (rdev->gart.table.ram.ptr == NULL) { if (rdev->gart.ptr == NULL) {
return; return;
} }
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
set_memory_wb((unsigned long)rdev->gart.table.ram.ptr, set_memory_wb((unsigned long)rdev->gart.ptr,
rdev->gart.table_size >> PAGE_SHIFT); rdev->gart.table_size >> PAGE_SHIFT);
} }
#endif #endif
pci_free_consistent(rdev->pdev, rdev->gart.table_size, pci_free_consistent(rdev->pdev, rdev->gart.table_size,
(void *)rdev->gart.table.ram.ptr, (void *)rdev->gart.ptr,
rdev->gart.table_addr); rdev->gart.table_addr);
rdev->gart.table.ram.ptr = NULL; rdev->gart.ptr = NULL;
rdev->gart.table_addr = 0; rdev->gart.table_addr = 0;
} }
...@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) ...@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size, r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj); &rdev->gart.robj);
if (r) { if (r) {
return r; return r;
} }
...@@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) ...@@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
uint64_t gpu_addr; uint64_t gpu_addr;
int r; int r;
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); r = radeon_bo_reserve(rdev->gart.robj, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
r = radeon_bo_pin(rdev->gart.table.vram.robj, r = radeon_bo_pin(rdev->gart.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr); RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) { if (r) {
radeon_bo_unreserve(rdev->gart.table.vram.robj); radeon_bo_unreserve(rdev->gart.robj);
return r; return r;
} }
r = radeon_bo_kmap(rdev->gart.table.vram.robj, r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
(void **)&rdev->gart.table.vram.ptr);
if (r) if (r)
radeon_bo_unpin(rdev->gart.table.vram.robj); radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj); radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr; rdev->gart.table_addr = gpu_addr;
return r; return r;
} }
void radeon_gart_table_vram_free(struct radeon_device *rdev) void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
return; return;
} }
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); r = radeon_bo_reserve(rdev->gart.robj, false);
if (likely(r == 0)) { if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj); radeon_bo_kunmap(rdev->gart.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj); radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj); radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.ptr = NULL;
} }
radeon_bo_unref(&rdev->gart.table.vram.robj); }
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
if (rdev->gart.robj == NULL) {
return;
}
radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
} }
...@@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
if (rdev->gart.pages[p]) { if (rdev->gart.pages[p]) {
if (!rdev->gart.ttm_alloced[p]) if (!rdev->gart.ttm_alloced[p])
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr; rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p]; page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base); if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_base);
}
page_base += RADEON_GPU_PAGE_SIZE; page_base += RADEON_GPU_PAGE_SIZE;
} }
} }
...@@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
} }
} }
rdev->gart.pages[p] = pagelist[i]; rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p]; if (rdev->gart.ptr) {
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { page_base = rdev->gart.pages_addr[p];
radeon_gart_set_page(rdev, t, page_base); for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
page_base += RADEON_GPU_PAGE_SIZE; radeon_gart_set_page(rdev, t, page_base);
page_base += RADEON_GPU_PAGE_SIZE;
}
} }
} }
mb(); mb();
...@@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev) ...@@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev)
int i, j, t; int i, j, t;
u64 page_base; u64 page_base;
if (!rdev->gart.ptr) {
return;
}
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i]; page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
......
...@@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev) ...@@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.ram.ptr) { if (rdev->gart.ptr) {
WARN(1, "RS400 GART already initialized\n"); WARN(1, "RS400 GART already initialized\n");
return 0; return 0;
} }
...@@ -212,6 +212,7 @@ void rs400_gart_fini(struct radeon_device *rdev) ...@@ -212,6 +212,7 @@ void rs400_gart_fini(struct radeon_device *rdev)
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{ {
uint32_t entry; uint32_t entry;
u32 *gtt = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) { if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL; return -EINVAL;
...@@ -221,7 +222,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) ...@@ -221,7 +222,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
((upper_32_bits(addr) & 0xff) << 4) | ((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE; RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry); entry = cpu_to_le32(entry);
rdev->gart.table.ram.ptr[i] = entry; gtt[i] = entry;
return 0; return 0;
} }
......
...@@ -414,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev) ...@@ -414,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev)
{ {
int r; int r;
if (rdev->gart.table.vram.robj) { if (rdev->gart.robj) {
WARN(1, "RS600 GART already initialized\n"); WARN(1, "RS600 GART already initialized\n");
return 0; return 0;
} }
...@@ -432,7 +432,7 @@ static int rs600_gart_enable(struct radeon_device *rdev) ...@@ -432,7 +432,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
u32 tmp; u32 tmp;
int r, i; int r, i;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -495,20 +495,12 @@ static int rs600_gart_enable(struct radeon_device *rdev) ...@@ -495,20 +495,12 @@ static int rs600_gart_enable(struct radeon_device *rdev)
void rs600_gart_disable(struct radeon_device *rdev) void rs600_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int r;
/* FIXME: disable out of gart access */ /* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0); WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1); tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) { radeon_gart_table_vram_unpin(rdev);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (r == 0) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
} }
void rs600_gart_fini(struct radeon_device *rdev) void rs600_gart_fini(struct radeon_device *rdev)
...@@ -526,7 +518,7 @@ void rs600_gart_fini(struct radeon_device *rdev) ...@@ -526,7 +518,7 @@ void rs600_gart_fini(struct radeon_device *rdev)
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{ {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; void __iomem *ptr = (void *)rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) { if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL; return -EINVAL;
......
...@@ -124,7 +124,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) ...@@ -124,7 +124,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp; u32 tmp;
int r, i; int r, i;
if (rdev->gart.table.vram.robj == NULL) { if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -171,7 +171,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) ...@@ -171,7 +171,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
void rv770_pcie_gart_disable(struct radeon_device *rdev) void rv770_pcie_gart_disable(struct radeon_device *rdev)
{ {
u32 tmp; u32 tmp;
int i, r; int i;
/* Disable all tables */ /* Disable all tables */
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
...@@ -191,14 +191,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) ...@@ -191,14 +191,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) { radeon_gart_table_vram_unpin(rdev);
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
} }
void rv770_pcie_gart_fini(struct radeon_device *rdev) void rv770_pcie_gart_fini(struct radeon_device *rdev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment