Commit 652a1876 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-3.8' of git://people.freedesktop.org/~agd5f/linux into drm-next

Fix regression, and some locking races, also as CS support
for the DMA engines.

* 'drm-next-3.8' of git://people.freedesktop.org/~agd5f/linux:
  radeon: fix regression with eviction since evict caching changes
  drm/radeon: add more pedantic checks in the CP DMA checker
  drm/radeon: bump version for CS ioctl support for async DMA
  drm/radeon: enable the async DMA rings in the CS ioctl
  drm/radeon: add VM CS parser support for async DMA on cayman/TN/SI
  drm/radeon/kms: add evergreen/cayman CS parser for async DMA (v2)
  drm/radeon/kms: add 6xx/7xx CS parser for async DMA (v2)
  drm/radeon: fix htile buffer size computation for command stream checker
  drm/radeon: fix fence locking in the pageflip callback
  drm/radeon: make indirect register access concurrency-safe
  drm/radeon: add W|RREG32_IDX for MM_INDEX|DATA based mmio accesss
parents 9add1ac3 dd54fee7
This diff is collapsed.
...@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev) ...@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
return 0; return 0;
} }
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
bool always_indirect)
{ {
if (reg < rdev->rmmio_size) if (reg < rdev->rmmio_size && !always_indirect)
return readl(((void __iomem *)rdev->rmmio) + reg); return readl(((void __iomem *)rdev->rmmio) + reg);
else { else {
unsigned long flags;
uint32_t ret;
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
return ret;
} }
} }
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
bool always_indirect)
{ {
if (reg < rdev->rmmio_size) if (reg < rdev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)rdev->rmmio) + reg); writel(v, ((void __iomem *)rdev->rmmio) + reg);
else { else {
unsigned long flags;
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
} }
} }
......
...@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) ...@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
/* nby is npipes htiles aligned == npipes * 8 pixel aligned */ /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
nby = round_up(nby, track->npipes * 8); nby = round_up(nby, track->npipes * 8);
} else { } else {
/* htile widht & nby (8 or 4) make 2 bits number */ /* always assume 8x8 htile */
tmp = track->htile_surface & 3;
/* align is htile align * 8, htile align vary according to /* align is htile align * 8, htile align vary according to
* number of pipe and tile width and nby * number of pipe and tile width and nby
*/ */
switch (track->npipes) { switch (track->npipes) {
case 8: case 8:
switch (tmp) { /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 64 * 8);
nbx = round_up(nbx, 64 * 8); nby = round_up(nby, 64 * 8);
nby = round_up(nby, 64 * 8);
break;
case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 64 * 8);
nby = round_up(nby, 32 * 8);
break;
case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
default:
return -EINVAL;
}
break; break;
case 4: case 4:
switch (tmp) { /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 64 * 8);
nbx = round_up(nbx, 64 * 8); nby = round_up(nby, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 16 * 8);
break;
default:
return -EINVAL;
}
break; break;
case 2: case 2:
switch (tmp) { /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 32 * 8);
nbx = round_up(nbx, 32 * 8); nby = round_up(nby, 32 * 8);
nby = round_up(nby, 32 * 8);
break;
case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 32 * 8);
nby = round_up(nby, 16 * 8);
break;
case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 16 * 8);
nby = round_up(nby, 16 * 8);
break;
default:
return -EINVAL;
}
break; break;
case 1: case 1:
switch (tmp) { /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ nbx = round_up(nbx, 32 * 8);
nbx = round_up(nbx, 32 * 8); nby = round_up(nby, 16 * 8);
nby = round_up(nby, 16 * 8);
break;
case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 16 * 8);
nby = round_up(nby, 16 * 8);
break;
case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
nbx = round_up(nbx, 16 * 8);
nby = round_up(nby, 8 * 8);
break;
default:
return -EINVAL;
}
break; break;
default: default:
dev_warn(p->dev, "%s:%d invalid num pipes %d\n", dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
...@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) ...@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
} }
} }
/* compute number of htile */ /* compute number of htile */
nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4; nbx = nbx >> 3;
nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4; nby = nby >> 3;
size = nbx * nby * 4; /* size must be aligned on npipes * 2K boundary */
size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
size += track->htile_offset; size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) { if (size > radeon_bo_size(track->htile_bo)) {
...@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) ...@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break; break;
case DB_HTILE_SURFACE: case DB_HTILE_SURFACE:
track->htile_surface = radeon_get_ib_value(p, idx); track->htile_surface = radeon_get_ib_value(p, idx);
/* force 8x8 htile width and height */
ib[idx] |= 3;
track->db_dirty = true; track->db_dirty = true;
break; break;
case SQ_PGM_START_FS: case SQ_PGM_START_FS:
...@@ -2568,3 +2514,196 @@ void r600_cs_legacy_init(void) ...@@ -2568,3 +2514,196 @@ void r600_cs_legacy_init(void)
{ {
r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
} }
/*
* DMA
*/
/**
* r600_dma_cs_next_reloc() - parse next reloc
* @p: parser structure holding parsing context.
* @cs_reloc: reloc informations
*
* Return the next reloc, do bo validation and compute
* GPU offset using the provided start.
**/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw);
return -EINVAL;
}
*cs_reloc = p->relocs_ptr[idx];
p->dma_reloc_idx++;
return 0;
}
#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
/**
* r600_dma_cs_parse() - parse the DMA IB
* @p: parser structure holding parsing context.
*
* Parses the DMA IB from the CS ioctl and updates
* the GPU addresses based on the reloc information and
* checks for errors. (R6xx-R7xx)
* Returns 0 for success and an error on failure.
**/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_reloc *src_reloc, *dst_reloc;
u32 header, cmd, count, tiled;
volatile u32 *ib = p->ib.ptr;
u32 idx, idx_value;
u64 src_offset, dst_offset;
int r;
do {
if (p->idx >= ib_chunk->length_dw) {
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
header = radeon_get_ib_value(p, idx);
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
tiled = GET_DMA_T(header);
switch (cmd) {
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
if (tiled) {
dst_offset = ib[idx+1];
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 5;
} else {
dst_offset = ib[idx+1];
dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
if (tiled) {
idx_value = radeon_get_ib_value(p, idx + 2);
/* detile bit */
if (idx_value & (1 << 31)) {
/* tiled src, linear dst */
src_offset = ib[idx+1];
src_offset <<= 8;
ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
dst_offset = ib[idx+5];
dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
src_offset = ib[idx+5];
src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
dst_offset = ib[idx+1];
dst_offset <<= 8;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
p->idx += 7;
} else {
src_offset = ib[idx+2];
src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
dst_offset = ib[idx+1];
dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
p->idx += 5;
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
if (p->family < CHIP_RV770) {
DRM_ERROR("Constant Fill is 7xx only !\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
dst_offset = ib[idx+1];
dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
p->idx += 4;
break;
case DMA_PACKET_NOP:
p->idx += 1;
break;
default:
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
return 0;
}
...@@ -839,6 +839,7 @@ struct radeon_cs_parser { ...@@ -839,6 +839,7 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs; struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr; struct radeon_cs_reloc **relocs_ptr;
struct list_head validated; struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */ /* indices of various chunks */
int chunk_ib_idx; int chunk_ib_idx;
int chunk_relocs_idx; int chunk_relocs_idx;
...@@ -1556,6 +1557,8 @@ struct radeon_device { ...@@ -1556,6 +1557,8 @@ struct radeon_device {
/* Register mmio */ /* Register mmio */
resource_size_t rmmio_base; resource_size_t rmmio_base;
resource_size_t rmmio_size; resource_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
void __iomem *rmmio; void __iomem *rmmio;
radeon_rreg_t mc_rreg; radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg; radeon_wreg_t mc_wreg;
...@@ -1631,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -1631,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev); void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev); int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); bool always_indirect);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
...@@ -1648,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); ...@@ -1648,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) #define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg)) #define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) #define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg)) #define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) #define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) #define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
...@@ -1675,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); ...@@ -1675,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
tmp_ |= ((val) & ~(mask)); \ tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \ WREG32_PLL(reg, tmp_); \
} while (0) } while (0)
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
......
...@@ -952,7 +952,7 @@ static struct radeon_asic r600_asic = { ...@@ -952,7 +952,7 @@ static struct radeon_asic r600_asic = {
.ib_execute = &r600_dma_ring_ib_execute, .ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit, .emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup, .is_lockup = &r600_dma_is_lockup,
...@@ -1036,7 +1036,7 @@ static struct radeon_asic rs780_asic = { ...@@ -1036,7 +1036,7 @@ static struct radeon_asic rs780_asic = {
.ib_execute = &r600_dma_ring_ib_execute, .ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit, .emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup, .is_lockup = &r600_dma_is_lockup,
...@@ -1120,7 +1120,7 @@ static struct radeon_asic rv770_asic = { ...@@ -1120,7 +1120,7 @@ static struct radeon_asic rv770_asic = {
.ib_execute = &r600_dma_ring_ib_execute, .ib_execute = &r600_dma_ring_ib_execute,
.emit_fence = &r600_dma_fence_ring_emit, .emit_fence = &r600_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &r600_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup, .is_lockup = &r600_dma_is_lockup,
...@@ -1204,7 +1204,7 @@ static struct radeon_asic evergreen_asic = { ...@@ -1204,7 +1204,7 @@ static struct radeon_asic evergreen_asic = {
.ib_execute = &evergreen_dma_ring_ib_execute, .ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup, .is_lockup = &r600_dma_is_lockup,
...@@ -1288,7 +1288,7 @@ static struct radeon_asic sumo_asic = { ...@@ -1288,7 +1288,7 @@ static struct radeon_asic sumo_asic = {
.ib_execute = &evergreen_dma_ring_ib_execute, .ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup, .is_lockup = &r600_dma_is_lockup,
...@@ -1372,7 +1372,7 @@ static struct radeon_asic btc_asic = { ...@@ -1372,7 +1372,7 @@ static struct radeon_asic btc_asic = {
.ib_execute = &evergreen_dma_ring_ib_execute, .ib_execute = &evergreen_dma_ring_ib_execute,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &r600_dma_is_lockup, .is_lockup = &r600_dma_is_lockup,
...@@ -1484,9 +1484,10 @@ static struct radeon_asic cayman_asic = { ...@@ -1484,9 +1484,10 @@ static struct radeon_asic cayman_asic = {
}, },
[R600_RING_TYPE_DMA_INDEX] = { [R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute, .ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup, .is_lockup = &cayman_dma_is_lockup,
...@@ -1494,9 +1495,10 @@ static struct radeon_asic cayman_asic = { ...@@ -1494,9 +1495,10 @@ static struct radeon_asic cayman_asic = {
}, },
[CAYMAN_RING_TYPE_DMA1_INDEX] = { [CAYMAN_RING_TYPE_DMA1_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute, .ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup, .is_lockup = &cayman_dma_is_lockup,
...@@ -1609,9 +1611,10 @@ static struct radeon_asic trinity_asic = { ...@@ -1609,9 +1611,10 @@ static struct radeon_asic trinity_asic = {
}, },
[R600_RING_TYPE_DMA_INDEX] = { [R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute, .ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup, .is_lockup = &cayman_dma_is_lockup,
...@@ -1619,9 +1622,10 @@ static struct radeon_asic trinity_asic = { ...@@ -1619,9 +1622,10 @@ static struct radeon_asic trinity_asic = {
}, },
[CAYMAN_RING_TYPE_DMA1_INDEX] = { [CAYMAN_RING_TYPE_DMA1_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute, .ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = &evergreen_dma_cs_parse,
.ring_test = &r600_dma_ring_test, .ring_test = &r600_dma_ring_test,
.ib_test = &r600_dma_ib_test, .ib_test = &r600_dma_ib_test,
.is_lockup = &cayman_dma_is_lockup, .is_lockup = &cayman_dma_is_lockup,
...@@ -1734,6 +1738,7 @@ static struct radeon_asic si_asic = { ...@@ -1734,6 +1738,7 @@ static struct radeon_asic si_asic = {
}, },
[R600_RING_TYPE_DMA_INDEX] = { [R600_RING_TYPE_DMA_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute, .ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = NULL,
...@@ -1744,6 +1749,7 @@ static struct radeon_asic si_asic = { ...@@ -1744,6 +1749,7 @@ static struct radeon_asic si_asic = {
}, },
[CAYMAN_RING_TYPE_DMA1_INDEX] = { [CAYMAN_RING_TYPE_DMA1_INDEX] = {
.ib_execute = &cayman_dma_ring_ib_execute, .ib_execute = &cayman_dma_ring_ib_execute,
.ib_parse = &evergreen_dma_ib_parse,
.emit_fence = &evergreen_dma_fence_ring_emit, .emit_fence = &evergreen_dma_fence_ring_emit,
.emit_semaphore = &r600_dma_semaphore_ring_emit, .emit_semaphore = &r600_dma_semaphore_ring_emit,
.cs_parse = NULL, .cs_parse = NULL,
......
...@@ -304,6 +304,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); ...@@ -304,6 +304,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p); int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev, void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev, void r600_semaphore_ring_emit(struct radeon_device *rdev,
...@@ -430,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc); ...@@ -430,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev); int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev); int evergreen_irq_process(struct radeon_device *rdev);
extern int evergreen_cs_parse(struct radeon_cs_parser *p); extern int evergreen_cs_parse(struct radeon_cs_parser *p);
extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev);
...@@ -471,6 +473,7 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, ...@@ -471,6 +473,7 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev, void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib); struct radeon_ib *ib);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
......
...@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram, ...@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
while (ram--) { while (ram--) {
addr = ram * 1024 * 1024; addr = ram * 1024 * 1024;
/* write to each page */ /* write to each page */
WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
WREG32(RADEON_MM_DATA, 0xdeadbeef);
/* read back and verify */ /* read back and verify */
WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
return 0; return 0;
} }
......
...@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index) ...@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
} }
} }
u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
{
u32 ret;
if (addr < 0x10000)
ret = DRM_READ32(dev_priv->mmio, addr);
else {
DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
}
return ret;
}
static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
{ {
u32 ret; u32 ret;
......
...@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) ...@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return 0; return 0;
} }
chunk = &p->chunks[p->chunk_relocs_idx]; chunk = &p->chunks[p->chunk_relocs_idx];
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */ /* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4; p->nrelocs = chunk->length_dw / 4;
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
...@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority ...@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
} else } else
p->ring = RADEON_RING_TYPE_GFX_INDEX; p->ring = RADEON_RING_TYPE_GFX_INDEX;
break; break;
case RADEON_CS_RING_DMA:
if (p->rdev->family >= CHIP_CAYMAN) {
if (p->priority > 0)
p->ring = R600_RING_TYPE_DMA_INDEX;
else
p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
} else if (p->rdev->family >= CHIP_R600) {
p->ring = R600_RING_TYPE_DMA_INDEX;
} else {
return -EINVAL;
}
break;
} }
return 0; return 0;
} }
......
...@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) ...@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_device *rdev = crtc->dev->dev_private; struct radeon_device *rdev = crtc->dev->dev_private;
if (ASIC_IS_DCE4(rdev)) { if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) { } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else { } else {
u32 reg;
switch (radeon_crtc->crtc_id) { switch (radeon_crtc->crtc_id) {
case 0: case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); reg = RADEON_CRTC_GEN_CNTL;
break; break;
case 1: case 1:
WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); reg = RADEON_CRTC2_GEN_CNTL;
break; break;
default: default:
return; return;
} }
WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN); WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
} }
} }
......
...@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */ /* Registers mapping */
/* TODO: block userspace mapping of io register */ /* TODO: block userspace mapping of io register */
spin_lock_init(&rdev->mmio_idx_lock);
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
......
...@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, ...@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->old_rbo = rbo; work->old_rbo = rbo;
obj = new_radeon_fb->obj; obj = new_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj); rbo = gem_to_radeon_bo(obj);
spin_lock(&rbo->tbo.bdev->fence_lock);
if (rbo->tbo.sync_obj) if (rbo->tbo.sync_obj)
work->fence = radeon_fence_ref(rbo->tbo.sync_obj); work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
spin_unlock(&rbo->tbo.bdev->fence_lock);
INIT_WORK(&work->work, radeon_unpin_work_func); INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */ /* We borrow the event spin lock for protecting unpin_work */
......
...@@ -66,9 +66,11 @@ ...@@ -66,9 +66,11 @@
* 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
* 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
* 2.25.0 - eg+: new info request for num SE and num SH * 2.25.0 - eg+: new info request for num SE and num SH
* 2.26.0 - r600-eg: fix htile size computation
* 2.27.0 - r600-SI: Add CS ioctl support for async DMA
*/ */
#define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 25 #define KMS_DRIVER_MINOR 27
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev); int radeon_driver_unload_kms(struct drm_device *dev);
......
...@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file ...@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc); extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base); extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
extern void radeon_freelist_reset(struct drm_device * dev); extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
......
...@@ -96,9 +96,9 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) ...@@ -96,9 +96,9 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
} }
if (domain & RADEON_GEM_DOMAIN_CPU) { if (domain & RADEON_GEM_DOMAIN_CPU) {
if (rbo->rdev->flags & RADEON_IS_AGP) { if (rbo->rdev->flags & RADEON_IS_AGP) {
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT; rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
} else { } else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
} }
} }
if (!c) if (!c)
......
...@@ -917,6 +917,7 @@ struct drm_radeon_gem_va { ...@@ -917,6 +917,7 @@ struct drm_radeon_gem_va {
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ /* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
#define RADEON_CS_RING_GFX 0 #define RADEON_CS_RING_GFX 0
#define RADEON_CS_RING_COMPUTE 1 #define RADEON_CS_RING_COMPUTE 1
#define RADEON_CS_RING_DMA 2
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ /* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
/* 0 = normal, + = higher priority, - = lower priority */ /* 0 = normal, + = higher priority, - = lower priority */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment