Commit 01175474 authored by Srinivasan Shanmugam's avatar Srinivasan Shanmugam Committed by Alex Deucher

drm/radeon: Cleanup radeon/radeon_fence.c

Fixes the following:

WARNING: Possible repeated word: 'Fences'
WARNING: Missing a blank line after declarations
WARNING: braces {} are not necessary for single statement blocks
WARNING: braces {} are not necessary for any arm of this statement
WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
WARNING: quoted string split across lines
WARNING: Block comments use * on subsequent lines
WARNING: Block comments use a trailing */ on a separate line

Cc: Guchun Chen <guchun.chen@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com>
Signed-off-by: default avatarSrinivasan Shanmugam <srinivasan.shanmugam@amd.com>
Reviewed-by: default avatarGuchun Chen <guchun.chen@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 176b9dd2
......@@ -45,7 +45,6 @@
#include "radeon_trace.h"
/*
* Fences
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
......@@ -67,10 +66,10 @@
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
if (drv->cpu_addr) {
if (drv->cpu_addr)
*drv->cpu_addr = cpu_to_le32(seq);
}
} else {
WREG32(drv->scratch_reg, seq);
}
......@@ -91,11 +90,10 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
u32 seq = 0;
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
if (drv->cpu_addr) {
if (drv->cpu_addr)
seq = le32_to_cpu(*drv->cpu_addr);
} else {
else
seq = lower_32_bits(atomic64_read(&drv->last_seq));
}
} else {
seq = RREG32(drv->scratch_reg);
}
......@@ -139,9 +137,9 @@ int radeon_fence_emit(struct radeon_device *rdev,
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
if ((*fence) == NULL)
return -ENOMEM;
}
(*fence)->rdev = rdev;
(*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
......@@ -163,7 +161,8 @@ int radeon_fence_emit(struct radeon_device *rdev,
* for the fence locking itself, so unlocked variants are used for
* fence_signal, and remove_wait_queue.
*/
static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
static int radeon_fence_check_signaled(wait_queue_entry_t *wait,
unsigned int mode, int flags, void *key)
{
struct radeon_fence *fence;
u64 seq;
......@@ -197,7 +196,7 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
unsigned int count_loop = 0;
bool wake = false;
/* Note there is a scenario here for an infinite loop but it's
......@@ -231,9 +230,9 @@ static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
seq |= last_emitted & 0xffffffff00000000LL;
}
if (seq <= last_seq || seq > last_emitted) {
if (seq <= last_seq || seq > last_emitted)
break;
}
/* If we loop over we don't want to return without
* checking if a fence is signaled as it means that the
* seq we just read is different from the previous on.
......@@ -296,8 +295,7 @@ static void radeon_fence_check_lockup(struct work_struct *work)
else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (current fence id "
"0x%016llx last fence id 0x%016llx on ring %d)\n",
dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n",
(uint64_t)atomic64_read(&fence_drv->last_seq),
fence_drv->sync_seq[ring], ring);
......@@ -338,16 +336,16 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
* radeon_fence_signaled().
*/
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
u64 seq, unsigned ring)
u64 seq, unsigned int ring)
{
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
return true;
}
/* poll new last sequence at least once */
radeon_fence_process(rdev, ring);
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
return true;
}
return false;
}
......@@ -355,20 +353,18 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
struct radeon_device *rdev = fence->rdev;
unsigned ring = fence->ring;
unsigned int ring = fence->ring;
u64 seq = fence->seq;
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
return true;
}
if (down_read_trylock(&rdev->exclusive_lock)) {
radeon_fence_process(rdev, ring);
up_read(&rdev->exclusive_lock);
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
return true;
}
}
return false;
}
......@@ -451,7 +447,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
*/
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
unsigned int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
......@@ -549,9 +545,8 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
seq[fence->ring] = fence->seq;
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
if (r <= 0) {
if (r <= 0)
return r;
}
dma_fence_signal(&fence->base);
return r;
......@@ -571,11 +566,11 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
if (r > 0) {
if (r > 0)
return 0;
} else {
else
return r;
}
}
/**
......@@ -596,15 +591,14 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
bool intr)
{
uint64_t seq[RADEON_NUM_RINGS];
unsigned i, num_rings = 0;
unsigned int i, num_rings = 0;
long r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
seq[i] = 0;
if (!fences[i]) {
if (!fences[i])
continue;
}
seq[i] = fences[i]->seq;
++num_rings;
......@@ -615,9 +609,9 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return -ENOENT;
r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
if (r < 0)
return r;
}
return 0;
}
......@@ -638,13 +632,16 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
/* nothing to wait for, last_seq is already
* the last emited fence
*/
return -ENOENT;
}
r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
return 0;
}
......@@ -704,9 +701,8 @@ void radeon_fence_unref(struct radeon_fence **fence)
struct radeon_fence *tmp = *fence;
*fence = NULL;
if (tmp) {
if (tmp)
dma_fence_put(&tmp->base);
}
}
/**
......@@ -719,7 +715,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
uint64_t emitted;
......@@ -730,10 +726,10 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
emitted = rdev->fence_drv[ring].sync_seq[ring]
- atomic64_read(&rdev->fence_drv[ring].last_seq);
/* to avoid 32bits warp around */
if (emitted > 0x10000000) {
if (emitted > 0x10000000)
emitted = 0x10000000;
}
return (unsigned)emitted;
return (unsigned int)emitted;
}
/**
......@@ -751,19 +747,16 @@ bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
{
struct radeon_fence_driver *fdrv;
if (!fence) {
if (!fence)
return false;
}
if (fence->ring == dst_ring) {
if (fence->ring == dst_ring)
return false;
}
/* we are protected by the ring mutex */
fdrv = &fence->rdev->fence_drv[dst_ring];
if (fence->seq <= fdrv->sync_seq[fence->ring]) {
if (fence->seq <= fdrv->sync_seq[fence->ring])
return false;
}
return true;
}
......@@ -780,23 +773,21 @@ bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
{
struct radeon_fence_driver *dst, *src;
unsigned i;
unsigned int i;
if (!fence) {
if (!fence)
return;
}
if (fence->ring == dst_ring) {
if (fence->ring == dst_ring)
return;
}
/* we are protected by the ring mutex */
src = &fence->rdev->fence_drv[fence->ring];
dst = &fence->rdev->fence_drv[dst_ring];
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (i == dst_ring) {
if (i == dst_ring)
continue;
}
dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
}
}
......@@ -895,9 +886,8 @@ void radeon_fence_driver_init(struct radeon_device *rdev)
int ring;
init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
for (ring = 0; ring < RADEON_NUM_RINGS; ring++)
radeon_fence_driver_init_ring(rdev, ring);
}
radeon_debugfs_fence_init(rdev);
}
......@@ -1023,6 +1013,7 @@ static const char *radeon_fence_get_driver_name(struct dma_fence *fence)
static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
{
struct radeon_fence *fence = to_radeon_fence(f);
switch (fence->ring) {
case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
......@@ -1032,7 +1023,9 @@ static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
default: WARN_ON_ONCE(1); return "radeon.unk";
default:
WARN_ON_ONCE(1);
return "radeon.unk";
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment