Commit c5bc1c93 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull nouveau and radeon fixes from Dave Airlie:
 "Just some nouveau and radeon/amdgpu fixes.

  The nouveau fixes look large as the firmware context files are
  regenerated, but the actual change is quite small"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon: make some dpm errors debug only
  drm/nouveau/volt/pwm/gk104: fix an off-by-one resulting in the voltage not being set
  drm/nouveau/nvif: allow userspace access to its own client object
  drm/nouveau/gr/gf100-: fix oops when calling zbc methods
  drm/nouveau/gr/gf117-: assume no PPC if NV_PGRAPH_GPC_GPM_PD_PES_TPC_ID_MASK is zero
  drm/nouveau/gr/gf117-: read NV_PGRAPH_GPC_GPM_PD_PES_TPC_ID_MASK from correct GPC
  drm/nouveau/gr/gf100-: split out per-gpc address calculation macro
  drm/nouveau/bios: return actual size of the buffer retrieved via _ROM
  drm/nouveau/instmem: protect instobj list with a spinlock
  drm/nouveau/pci: enable c800 magic for some unknown Samsung laptop
  drm/nouveau/pci: enable c800 magic for Clevo P157SM
  drm/radeon: make rv770_set_sw_state failures non-fatal
  drm/amdgpu: move dependency handling out of atomic section v2
  drm/amdgpu: optimize scheduler fence handling
  drm/amdgpu: remove vm->mutex
  drm/amdgpu: add mutex for ba_va->valids/invalids
  drm/amdgpu: adapt vce session create interface changes
  drm/amdgpu: vce use multiple cache surface starting from stoney
  drm/amdgpu: reset vce trap interrupt flag
parents 818aba30 8c14f72b
...@@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping { ...@@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping {
/* bo virtual addresses in a specific vm */ /* bo virtual addresses in a specific vm */
struct amdgpu_bo_va { struct amdgpu_bo_va {
struct mutex mutex;
/* protected by bo being reserved */ /* protected by bo being reserved */
struct list_head bo_list; struct list_head bo_list;
struct fence *last_pt_update; struct fence *last_pt_update;
...@@ -928,8 +929,6 @@ struct amdgpu_vm_id { ...@@ -928,8 +929,6 @@ struct amdgpu_vm_id {
}; };
struct amdgpu_vm { struct amdgpu_vm {
struct mutex mutex;
struct rb_root va; struct rb_root va;
/* protecting invalidated */ /* protecting invalidated */
......
...@@ -784,8 +784,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -784,8 +784,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_cs_parser parser = {}; struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false; bool reserved_buffers = false;
int i, r; int i, r;
...@@ -803,7 +801,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -803,7 +801,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_handle_lockup(adev, r); r = amdgpu_cs_handle_lockup(adev, r);
return r; return r;
} }
mutex_lock(&vm->mutex);
r = amdgpu_cs_parser_relocs(&parser); r = amdgpu_cs_parser_relocs(&parser);
if (r == -ENOMEM) if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n"); DRM_ERROR("Not enough memory for command submission!\n");
...@@ -888,7 +885,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -888,7 +885,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out: out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers); amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
mutex_unlock(&vm->mutex);
r = amdgpu_cs_handle_lockup(adev, r); r = amdgpu_cs_handle_lockup(adev, r);
return r; return r;
} }
......
...@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ...@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
mutex_lock(&vm->mutex);
r = amdgpu_bo_reserve(rbo, false); r = amdgpu_bo_reserve(rbo, false);
if (r) { if (r)
mutex_unlock(&vm->mutex);
return r; return r;
}
bo_va = amdgpu_vm_bo_find(vm, rbo); bo_va = amdgpu_vm_bo_find(vm, rbo);
if (!bo_va) { if (!bo_va) {
...@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri ...@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
++bo_va->ref_count; ++bo_va->ref_count;
} }
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
mutex_unlock(&vm->mutex);
return 0; return 0;
} }
...@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
mutex_lock(&vm->mutex);
r = amdgpu_bo_reserve(rbo, true); r = amdgpu_bo_reserve(rbo, true);
if (r) { if (r) {
mutex_unlock(&vm->mutex);
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
return; return;
...@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
} }
} }
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
mutex_unlock(&vm->mutex);
} }
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
...@@ -553,7 +546,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -553,7 +546,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle); gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) if (gobj == NULL)
return -ENOENT; return -ENOENT;
mutex_lock(&fpriv->vm.mutex);
rbo = gem_to_amdgpu_bo(gobj); rbo = gem_to_amdgpu_bo(gobj);
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
...@@ -568,7 +560,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -568,7 +560,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
} }
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) { if (r) {
mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
} }
...@@ -577,7 +568,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -577,7 +568,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (!bo_va) { if (!bo_va) {
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
mutex_unlock(&fpriv->vm.mutex);
return -ENOENT; return -ENOENT;
} }
...@@ -602,7 +592,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -602,7 +592,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va, args->operation); amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
} }
......
...@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
ib->ptr[ib->length_dw++] = handle; ib->ptr[ib->length_dw++] = handle;
ib->ptr[ib->length_dw++] = 0x00000030; /* len */ if ((ring->adev->vce.fw_version >> 24) >= 52)
ib->ptr[ib->length_dw++] = 0x00000040; /* len */
else
ib->ptr[ib->length_dw++] = 0x00000030; /* len */
ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000042; ib->ptr[ib->length_dw++] = 0x00000042;
...@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000100; ib->ptr[ib->length_dw++] = 0x00000100;
ib->ptr[ib->length_dw++] = 0x0000000c; ib->ptr[ib->length_dw++] = 0x0000000c;
ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000000;
if ((ring->adev->vce.fw_version >> 24) >= 52) {
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
ib->ptr[ib->length_dw++] = 0x00000000;
}
ib->ptr[ib->length_dw++] = 0x00000014; /* len */ ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
......
...@@ -922,8 +922,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, ...@@ -922,8 +922,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
bo_va = list_first_entry(&vm->invalidated, bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va, vm_status); struct amdgpu_bo_va, vm_status);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
mutex_lock(&bo_va->mutex);
r = amdgpu_vm_bo_update(adev, bo_va, NULL); r = amdgpu_vm_bo_update(adev, bo_va, NULL);
mutex_unlock(&bo_va->mutex);
if (r) if (r)
return r; return r;
...@@ -967,7 +968,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, ...@@ -967,7 +968,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status); INIT_LIST_HEAD(&bo_va->vm_status);
mutex_init(&bo_va->mutex);
list_add_tail(&bo_va->bo_list, &bo->va); list_add_tail(&bo_va->bo_list, &bo->va);
return bo_va; return bo_va;
...@@ -1045,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1045,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
mapping->offset = offset; mapping->offset = offset;
mapping->flags = flags; mapping->flags = flags;
mutex_lock(&bo_va->mutex);
list_add(&mapping->list, &bo_va->invalids); list_add(&mapping->list, &bo_va->invalids);
mutex_unlock(&bo_va->mutex);
spin_lock(&vm->it_lock); spin_lock(&vm->it_lock);
interval_tree_insert(&mapping->it, &vm->va); interval_tree_insert(&mapping->it, &vm->va);
spin_unlock(&vm->it_lock); spin_unlock(&vm->it_lock);
...@@ -1121,7 +1124,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1121,7 +1124,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
bool valid = true; bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
mutex_lock(&bo_va->mutex);
list_for_each_entry(mapping, &bo_va->valids, list) { list_for_each_entry(mapping, &bo_va->valids, list) {
if (mapping->it.start == saddr) if (mapping->it.start == saddr)
break; break;
...@@ -1135,10 +1138,12 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1135,10 +1138,12 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
break; break;
} }
if (&mapping->list == &bo_va->invalids) if (&mapping->list == &bo_va->invalids) {
mutex_unlock(&bo_va->mutex);
return -ENOENT; return -ENOENT;
}
} }
mutex_unlock(&bo_va->mutex);
list_del(&mapping->list); list_del(&mapping->list);
spin_lock(&vm->it_lock); spin_lock(&vm->it_lock);
interval_tree_remove(&mapping->it, &vm->va); interval_tree_remove(&mapping->it, &vm->va);
...@@ -1190,8 +1195,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -1190,8 +1195,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
spin_unlock(&vm->it_lock); spin_unlock(&vm->it_lock);
kfree(mapping); kfree(mapping);
} }
fence_put(bo_va->last_pt_update); fence_put(bo_va->last_pt_update);
mutex_destroy(&bo_va->mutex);
kfree(bo_va); kfree(bo_va);
} }
...@@ -1236,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1236,7 +1241,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->ids[i].id = 0; vm->ids[i].id = 0;
vm->ids[i].flushed_updates = NULL; vm->ids[i].flushed_updates = NULL;
} }
mutex_init(&vm->mutex);
vm->va = RB_ROOT; vm->va = RB_ROOT;
spin_lock_init(&vm->status_lock); spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->invalidated);
...@@ -1320,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1320,7 +1324,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
fence_put(vm->ids[i].flushed_updates); fence_put(vm->ids[i].flushed_updates);
} }
mutex_destroy(&vm->mutex);
} }
/** /**
......
...@@ -40,6 +40,9 @@ ...@@ -40,6 +40,9 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
#define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024)
...@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
/* set BUSY flag */ /* set BUSY flag */
WREG32_P(mmVCE_STATUS, 1, ~1); WREG32_P(mmVCE_STATUS, 1, ~1);
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
~VCE_VCPU_CNTL__CLK_EN_MASK); else
WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
~VCE_VCPU_CNTL__CLK_EN_MASK);
WREG32_P(mmVCE_SOFT_RESET, WREG32_P(mmVCE_SOFT_RESET,
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
...@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) ...@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL, 0);
WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
WREG32(mmVCE_LMI_VM_CTRL, 0); WREG32(mmVCE_LMI_VM_CTRL, 0);
if (adev->asic_type >= CHIP_STONEY) {
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8));
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8));
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8));
} else
WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
offset = AMDGPU_VCE_FIRMWARE_OFFSET; offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V3_0_FW_SIZE; size = VCE_V3_0_FW_SIZE;
WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
...@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, ...@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
DRM_DEBUG("IH: VCE\n"); DRM_DEBUG("IH: VCE\n");
WREG32_P(mmVCE_SYS_INT_STATUS,
VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
switch (entry->src_data) { switch (entry->src_data) {
case 0: case 0:
amdgpu_fence_process(&adev->vce.ring[0]); amdgpu_fence_process(&adev->vce.ring[0]);
......
...@@ -30,8 +30,7 @@ ...@@ -30,8 +30,7 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "gpu_sched_trace.h" #include "gpu_sched_trace.h"
static struct amd_sched_job * static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
struct kmem_cache *sched_fence_slab; struct kmem_cache *sched_fence_slab;
...@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, ...@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
} }
/** /**
* Select next job from a specified run queue with round robin policy. * Select an entity which could provide a job to run
* Return NULL if nothing available. *
* @rq The run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/ */
static struct amd_sched_job * static struct amd_sched_entity *
amd_sched_rq_select_job(struct amd_sched_rq *rq) amd_sched_rq_select_entity(struct amd_sched_rq *rq)
{ {
struct amd_sched_entity *entity; struct amd_sched_entity *entity;
struct amd_sched_job *sched_job;
spin_lock(&rq->lock); spin_lock(&rq->lock);
entity = rq->current_entity; entity = rq->current_entity;
if (entity) { if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) { list_for_each_entry_continue(entity, &rq->entities, list) {
sched_job = amd_sched_entity_pop_job(entity); if (amd_sched_entity_is_ready(entity)) {
if (sched_job) {
rq->current_entity = entity; rq->current_entity = entity;
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
return sched_job; return entity;
} }
} }
} }
list_for_each_entry(entity, &rq->entities, list) { list_for_each_entry(entity, &rq->entities, list) {
sched_job = amd_sched_entity_pop_job(entity); if (amd_sched_entity_is_ready(entity)) {
if (sched_job) {
rq->current_entity = entity; rq->current_entity = entity;
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
return sched_job; return entity;
} }
if (entity == rq->current_entity) if (entity == rq->current_entity)
...@@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) ...@@ -176,6 +175,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
return false; return false;
} }
/**
* Check if entity is ready
*
* @entity The pointer to a valid scheduler entity
*
* Return true if entity could provide a job.
*/
static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
{
if (kfifo_is_empty(&entity->job_queue))
return false;
if (ACCESS_ONCE(entity->dependency))
return false;
return true;
}
/** /**
* Destroy a context entity * Destroy a context entity
* *
...@@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) ...@@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
amd_sched_wakeup(entity->sched); amd_sched_wakeup(entity->sched);
} }
static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->sched;
struct fence * fence = entity->dependency;
struct amd_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
fence_put(entity->dependency);
return false;
}
s_fence = to_amd_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
/* Fence is from the same scheduler */
if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
/* Ignore it when it is already scheduled */
fence_put(entity->dependency);
return false;
}
/* Wait for fence to be scheduled */
entity->cb.func = amd_sched_entity_wakeup;
list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
return true;
}
if (!fence_add_callback(entity->dependency, &entity->cb,
amd_sched_entity_wakeup))
return true;
fence_put(entity->dependency);
return false;
}
static struct amd_sched_job * static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity) amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{ {
struct amd_gpu_scheduler *sched = entity->sched; struct amd_gpu_scheduler *sched = entity->sched;
struct amd_sched_job *sched_job; struct amd_sched_job *sched_job;
if (ACCESS_ONCE(entity->dependency))
return NULL;
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
return NULL; return NULL;
while ((entity->dependency = sched->ops->dependency(sched_job))) { while ((entity->dependency = sched->ops->dependency(sched_job)))
if (amd_sched_entity_add_dependency_cb(entity))
if (entity->dependency->context == entity->fence_context) {
/* We can ignore fences from ourself */
fence_put(entity->dependency);
continue;
}
if (fence_add_callback(entity->dependency, &entity->cb,
amd_sched_entity_wakeup))
fence_put(entity->dependency);
else
return NULL; return NULL;
}
return sched_job; return sched_job;
} }
...@@ -304,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) ...@@ -304,22 +342,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
} }
/** /**
* Select next to run * Select next entity to process
*/ */
static struct amd_sched_job * static struct amd_sched_entity *
amd_sched_select_job(struct amd_gpu_scheduler *sched) amd_sched_select_entity(struct amd_gpu_scheduler *sched)
{ {
struct amd_sched_job *sched_job; struct amd_sched_entity *entity;
if (!amd_sched_ready(sched)) if (!amd_sched_ready(sched))
return NULL; return NULL;
/* Kernel run queue has higher priority than normal run queue*/ /* Kernel run queue has higher priority than normal run queue*/
sched_job = amd_sched_rq_select_job(&sched->kernel_rq); entity = amd_sched_rq_select_entity(&sched->kernel_rq);
if (sched_job == NULL) if (entity == NULL)
sched_job = amd_sched_rq_select_job(&sched->sched_rq); entity = amd_sched_rq_select_entity(&sched->sched_rq);
return sched_job; return entity;
} }
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
...@@ -381,13 +419,16 @@ static int amd_sched_main(void *param) ...@@ -381,13 +419,16 @@ static int amd_sched_main(void *param)
unsigned long flags; unsigned long flags;
wait_event_interruptible(sched->wake_up_worker, wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() || (entity = amd_sched_select_entity(sched)) ||
(sched_job = amd_sched_select_job(sched))); kthread_should_stop());
if (!entity)
continue;
sched_job = amd_sched_entity_pop_job(entity);
if (!sched_job) if (!sched_job)
continue; continue;
entity = sched_job->s_entity;
s_fence = sched_job->s_fence; s_fence = sched_job->s_fence;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
...@@ -400,6 +441,7 @@ static int amd_sched_main(void *param) ...@@ -400,6 +441,7 @@ static int amd_sched_main(void *param)
atomic_inc(&sched->hw_rq_count); atomic_inc(&sched->hw_rq_count);
fence = sched->ops->run_job(sched_job); fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) { if (fence) {
r = fence_add_callback(fence, &s_fence->cb, r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job); amd_sched_process_job);
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <linux/kfifo.h> #include <linux/kfifo.h>
#include <linux/fence.h> #include <linux/fence.h>
#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
struct amd_gpu_scheduler; struct amd_gpu_scheduler;
struct amd_sched_rq; struct amd_sched_rq;
...@@ -68,6 +70,7 @@ struct amd_sched_rq { ...@@ -68,6 +70,7 @@ struct amd_sched_rq {
struct amd_sched_fence { struct amd_sched_fence {
struct fence base; struct fence base;
struct fence_cb cb; struct fence_cb cb;
struct list_head scheduled_cb;
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
spinlock_t lock; spinlock_t lock;
void *owner; void *owner;
...@@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); ...@@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner); struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
void amd_sched_fence_signal(struct amd_sched_fence *fence); void amd_sched_fence_signal(struct amd_sched_fence *fence);
#endif #endif
...@@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity ...@@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL) if (fence == NULL)
return NULL; return NULL;
INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner; fence->owner = owner;
fence->sched = s_entity->sched; fence->sched = s_entity->sched;
spin_lock_init(&fence->lock); spin_lock_init(&fence->lock);
...@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) ...@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
FENCE_TRACE(&fence->base, "was already signaled\n"); FENCE_TRACE(&fence->base, "was already signaled\n");
} }
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
{
struct fence_cb *cur, *tmp;
set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
list_del_init(&cur->node);
cur->func(&s_fence->base, cur);
}
}
static const char *amd_sched_fence_get_driver_name(struct fence *fence) static const char *amd_sched_fence_get_driver_name(struct fence *fence)
{ {
return "amd_sched"; return "amd_sched";
......
...@@ -7,6 +7,7 @@ struct nvkm_instmem { ...@@ -7,6 +7,7 @@ struct nvkm_instmem {
const struct nvkm_instmem_func *func; const struct nvkm_instmem_func *func;
struct nvkm_subdev subdev; struct nvkm_subdev subdev;
spinlock_t lock;
struct list_head list; struct list_head list;
u32 reserved; u32 reserved;
......
...@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, ...@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
return -ENODEV; return -ENODEV;
} }
obj = (union acpi_object *)buffer.pointer; obj = (union acpi_object *)buffer.pointer;
len = min(len, (int)obj->buffer.length);
memcpy(bios+offset, obj->buffer.pointer, len); memcpy(bios+offset, obj->buffer.pointer, len);
kfree(buffer.pointer); kfree(buffer.pointer);
return len; return len;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <nvif/client.h> #include <nvif/client.h>
#include <nvif/device.h> #include <nvif/device.h>
#include <nvif/ioctl.h>
#include <drmP.h> #include <drmP.h>
...@@ -65,9 +66,10 @@ struct nouveau_drm_tile { ...@@ -65,9 +66,10 @@ struct nouveau_drm_tile {
}; };
enum nouveau_drm_object_route { enum nouveau_drm_object_route {
NVDRM_OBJECT_NVIF = 0, NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF,
NVDRM_OBJECT_USIF, NVDRM_OBJECT_USIF,
NVDRM_OBJECT_ABI16, NVDRM_OBJECT_ABI16,
NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY,
}; };
enum nouveau_drm_notify_route { enum nouveau_drm_notify_route {
......
...@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) ...@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (nvif_unpack(argv->v0, 0, 0, true)) { if (nvif_unpack(argv->v0, 0, 0, true)) {
/* block access to objects not created via this interface */ /* block access to objects not created via this interface */
owner = argv->v0.owner; owner = argv->v0.owner;
argv->v0.owner = NVDRM_OBJECT_USIF; if (argv->v0.object == 0ULL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
} else } else
goto done; goto done;
......
...@@ -278,6 +278,12 @@ nvkm_device_pci_10de_0fe3[] = { ...@@ -278,6 +278,12 @@ nvkm_device_pci_10de_0fe3[] = {
{} {}
}; };
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_0fe4[] = {
{ 0x144d, 0xc740, NULL, { .War00C800_0 = true } },
{}
};
static const struct nvkm_device_pci_vendor static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_104b[] = { nvkm_device_pci_10de_104b[] = {
{ 0x1043, 0x844c, "GeForce GT 625" }, { 0x1043, 0x844c, "GeForce GT 625" },
...@@ -688,6 +694,12 @@ nvkm_device_pci_10de_1199[] = { ...@@ -688,6 +694,12 @@ nvkm_device_pci_10de_1199[] = {
{} {}
}; };
static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_11e0[] = {
{ 0x1558, 0x5106, NULL, { .War00C800_0 = true } },
{}
};
static const struct nvkm_device_pci_vendor static const struct nvkm_device_pci_vendor
nvkm_device_pci_10de_11e3[] = { nvkm_device_pci_10de_11e3[] = {
{ 0x17aa, 0x3683, "GeForce GTX 760A" }, { 0x17aa, 0x3683, "GeForce GTX 760A" },
...@@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = { ...@@ -1370,7 +1382,7 @@ nvkm_device_pci_10de[] = {
{ 0x0fe1, "GeForce GT 730M" }, { 0x0fe1, "GeForce GT 730M" },
{ 0x0fe2, "GeForce GT 745M" }, { 0x0fe2, "GeForce GT 745M" },
{ 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 }, { 0x0fe3, "GeForce GT 745M", nvkm_device_pci_10de_0fe3 },
{ 0x0fe4, "GeForce GT 750M" }, { 0x0fe4, "GeForce GT 750M", nvkm_device_pci_10de_0fe4 },
{ 0x0fe9, "GeForce GT 750M" }, { 0x0fe9, "GeForce GT 750M" },
{ 0x0fea, "GeForce GT 755M" }, { 0x0fea, "GeForce GT 755M" },
{ 0x0fec, "GeForce 710A" }, { 0x0fec, "GeForce 710A" },
...@@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = { ...@@ -1485,7 +1497,7 @@ nvkm_device_pci_10de[] = {
{ 0x11c6, "GeForce GTX 650 Ti" }, { 0x11c6, "GeForce GTX 650 Ti" },
{ 0x11c8, "GeForce GTX 650" }, { 0x11c8, "GeForce GTX 650" },
{ 0x11cb, "GeForce GT 740" }, { 0x11cb, "GeForce GT 740" },
{ 0x11e0, "GeForce GTX 770M" }, { 0x11e0, "GeForce GTX 770M", nvkm_device_pci_10de_11e0 },
{ 0x11e1, "GeForce GTX 765M" }, { 0x11e1, "GeForce GTX 765M" },
{ 0x11e2, "GeForce GTX 765M" }, { 0x11e2, "GeForce GTX 765M" },
{ 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 },
......
...@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info) ...@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc];
const u32 t = timeslice_mode; const u32 t = timeslice_mode;
const u32 o = PPC_UNIT(gpc, ppc, 0); const u32 o = PPC_UNIT(gpc, ppc, 0);
if (!(gr->ppc_mask[gpc] & (1 << ppc)))
continue;
mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
......
...@@ -52,10 +52,12 @@ mmio_list_base: ...@@ -52,10 +52,12 @@ mmio_list_base:
#endif #endif
#ifdef INCLUDE_CODE #ifdef INCLUDE_CODE
#define gpc_addr(reg,addr) /*
*/ imm32(reg,addr) /*
*/ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE
#define gpc_wr32(addr,reg) /* #define gpc_wr32(addr,reg) /*
*/ gpc_addr($r14,addr) /*
*/ mov b32 $r15 reg /* */ mov b32 $r15 reg /*
*/ imm32($r14, addr) /*
*/ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /*
*/ call(nv_wr32) */ call(nv_wr32)
// reports an exception to the host // reports an exception to the host
...@@ -161,7 +163,7 @@ init: ...@@ -161,7 +163,7 @@ init:
#if NV_PGRAPH_GPCX_UNK__SIZE > 0 #if NV_PGRAPH_GPCX_UNK__SIZE > 0
// figure out which, and how many, UNKs are actually present // figure out which, and how many, UNKs are actually present
imm32($r14, 0x500c30) gpc_addr($r14, 0x500c30)
clear b32 $r2 clear b32 $r2
clear b32 $r3 clear b32 $r3
clear b32 $r4 clear b32 $r4
......
...@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = { ...@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = {
0x03f01200, 0x03f01200,
0x0002d000, 0x0002d000,
0x17f104bd, 0x17f104bd,
0x10fe0542, 0x10fe0545,
0x0007f100, 0x0007f100,
0x0003f007, 0x0003f007,
0xbd0000d0, 0xbd0000d0,
...@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = { ...@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = {
0x02d00103, 0x02d00103,
0xf104bd00, 0xf104bd00,
0xf00c30e7, 0xf00c30e7,
0x24bd50e3, 0xe5f050e3,
0x44bd34bd, 0xbd24bd01,
/* 0x0430: init_unk_loop */ /* 0x0433: init_unk_loop */
0xb06821f4, 0xf444bd34,
0x0bf400f6, 0xf6b06821,
0x01f7f00f, 0x0f0bf400,
0xfd04f2bb, 0xbb01f7f0,
0x30b6054f, 0x4ffd04f2,
/* 0x0445: init_unk_next */ 0x0130b605,
0x0120b601, /* 0x0448: init_unk_next */
0xb004e0b6, 0xb60120b6,
0x1bf40126, 0x26b004e0,
/* 0x0451: init_unk_done */ 0xe21bf401,
0x070380e2, /* 0x0454: init_unk_done */
0xf1080480, 0x80070380,
0xf0010027, 0x27f10804,
0x22cf0223, 0x23f00100,
0x9534bd00, 0x0022cf02,
0x07f10825, 0x259534bd,
0x03f0c000, 0x0007f108,
0x0005d001, 0x0103f0c0,
0x07f104bd, 0xbd0005d0,
0x03f0c100, 0x0007f104,
0x0005d001, 0x0103f0c1,
0x0e9804bd, 0xbd0005d0,
0x010f9800, 0x000e9804,
0x015021f5, 0xf5010f98,
0xbb002fbb, 0xbb015021,
0x0e98003f, 0x3fbb002f,
0x020f9801, 0x010e9800,
0x015021f5, 0xf5020f98,
0xfd050e98, 0x98015021,
0x2ebb00ef, 0xeffd050e,
0x003ebb00, 0x002ebb00,
0x98020e98, 0x98003ebb,
0x21f5030f, 0x0f98020e,
0x0e980150, 0x5021f503,
0x00effd07, 0x070e9801,
0xbb002ebb, 0xbb00effd,
0x35b6003e, 0x3ebb002e,
0x0007f102, 0x0235b600,
0x0103f0d3, 0xd30007f1,
0xbd0003d0, 0xd00103f0,
0x0825b604,
0xb60635b6,
0x30b60120,
0x0824b601,
0xb90834b6,
0x21f5022f,
0x2fbb02d3,
0x003fbb00,
0x010007f1,
0xd00203f0,
0x04bd0003, 0x04bd0003,
0x29f024bd, 0xb60825b6,
0x0007f11f, 0x20b60635,
0x0203f008, 0x0130b601,
0xbd0002d0, 0xb60824b6,
/* 0x0505: main */ 0x2fb90834,
0x0031f404, 0xd321f502,
0xf00028f4, 0x002fbb02,
0x21f424d7, 0xf1003fbb,
0xf401f439, 0xf0010007,
0xf404e4b0, 0x03d00203,
0x81fe1e18, 0xbd04bd00,
0x0627f001, 0x1f29f024,
0x12fd20bd, 0x080007f1,
0x01e4b604, 0xd00203f0,
0xfe051efd, 0x04bd0002,
0x21f50018, /* 0x0508: main */
0x0ef405fa, 0xf40031f4,
/* 0x0535: main_not_ctx_xfer */ 0xd7f00028,
0x10ef94d3, 0x3921f424,
0xf501f5f0, 0xb0f401f4,
0xf4037e21, 0x18f404e4,
/* 0x0542: ih */ 0x0181fe1e,
0x80f9c60e, 0xbd0627f0,
0xf90188fe, 0x0412fd20,
0xf990f980, 0xfd01e4b6,
0xf9b0f9a0, 0x18fe051e,
0xf9e0f9d0, 0xfd21f500,
0xf104bdf0, 0xd30ef405,
0xf00200a7, /* 0x0538: main_not_ctx_xfer */
0xaacf00a3, 0xf010ef94,
0x04abc400, 0x21f501f5,
0xf02c0bf4, 0x0ef4037e,
0xe7f124d7, /* 0x0545: ih */
0xe3f01a00, 0xfe80f9c6,
0x00eecf00, 0x80f90188,
0x1900f7f1, 0xa0f990f9,
0xcf00f3f0, 0xd0f9b0f9,
0x21f400ff, 0xf0f9e0f9,
0x01e7f004, 0xa7f104bd,
0x1d0007f1, 0xa3f00200,
0xd00003f0, 0x00aacf00,
0x04bd000e, 0xf404abc4,
/* 0x0590: ih_no_fifo */ 0xd7f02c0b,
0x010007f1, 0x00e7f124,
0xd00003f0, 0x00e3f01a,
0x04bd000a, 0xf100eecf,
0xe0fcf0fc, 0xf01900f7,
0xb0fcd0fc, 0xffcf00f3,
0x90fca0fc, 0x0421f400,
0x88fe80fc, 0xf101e7f0,
0xf480fc00, 0xf01d0007,
0x01f80032, 0x0ed00003,
/* 0x05b4: hub_barrier_done */ /* 0x0593: ih_no_fifo */
0x9801f7f0, 0xf104bd00,
0xfebb040e, 0xf0010007,
0x02ffb904, 0x0ad00003,
0x9418e7f1, 0xfc04bd00,
0xf440e3f0, 0xfce0fcf0,
0x00f89d21, 0xfcb0fcd0,
/* 0x05cc: ctx_redswitch */ 0xfc90fca0,
0xf120f7f0, 0x0088fe80,
0x32f480fc,
/* 0x05b7: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb04,
0xf102ffb9,
0xf09418e7,
0x21f440e3,
/* 0x05cf: ctx_redswitch */
0xf000f89d,
0x07f120f7,
0x03f08500,
0x000fd001,
0xe7f004bd,
/* 0x05e1: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
0xf10200f5,
0xf0850007, 0xf0850007,
0x0fd00103, 0x0fd00103,
0xf004bd00, 0xf804bd00,
/* 0x05de: ctx_redswitch_delay */ /* 0x05fd: ctx_xfer */
0xe2b608e7, 0x0007f100,
0xfd1bf401, 0x0203f081,
0x0800f5f1, 0xbd000fd0,
0x0200f5f1, 0x0711f404,
0x850007f1, 0x05cf21f5,
0xd00103f0, /* 0x0610: ctx_xfer_not_load */
0x04bd000f, 0x026a21f5,
/* 0x05fa: ctx_xfer */ 0x07f124bd,
0x07f100f8, 0x03f047fc,
0x03f08100, 0x0002d002,
0x000fd002, 0x2cf004bd,
0x11f404bd, 0x0320b601,
0xcc21f507, 0x4afc07f1,
/* 0x060d: ctx_xfer_not_load */ 0xd00203f0,
0x6a21f505, 0x04bd0002,
0xf124bd02,
0xf047fc07,
0x02d00203,
0xf004bd00,
0x20b6012c,
0xfc07f103,
0x0203f04a,
0xbd0002d0,
0x01acf004,
0xf102a5f0,
0xf00000b7,
0x0c9850b3,
0x0fc4b604,
0x9800bcbb,
0x0d98000c,
0x00e7f001,
0x016f21f5,
0xf101acf0,
0xf04000b7,
0x0c9850b3,
0x0fc4b604,
0x9800bcbb,
0x0d98010c,
0x060f9802,
0x0800e7f1,
0x016f21f5,
0xf001acf0, 0xf001acf0,
0xb7f104a5, 0xb7f102a5,
0xb3f03000, 0xb3f00000,
0x040c9850, 0x040c9850,
0xbb0fc4b6, 0xbb0fc4b6,
0x0c9800bc, 0x0c9800bc,
0x030d9802, 0x010d9800,
0xf1080f98, 0xf500e7f0,
0xf50200e7, 0xf0016f21,
0xf5016f21, 0xb7f101ac,
0xf4025e21, 0xb3f04000,
0x12f40601, 0x040c9850,
/* 0x06a9: ctx_xfer_post */ 0xbb0fc4b6,
0x7f21f507, 0x0c9800bc,
/* 0x06ad: ctx_xfer_done */ 0x020d9801,
0xb421f502, 0xf1060f98,
0x0000f805, 0xf50800e7,
0x00000000, 0xf0016f21,
0xa5f001ac,
0x00b7f104,
0x50b3f030,
0xb6040c98,
0xbcbb0fc4,
0x020c9800,
0x98030d98,
0xe7f1080f,
0x21f50200,
0x21f5016f,
0x01f4025e,
0x0712f406,
/* 0x06ac: ctx_xfer_post */
0x027f21f5,
/* 0x06b0: ctx_xfer_done */
0x05b721f5,
0x000000f8,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
......
...@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = { ...@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = {
0x03f01200, 0x03f01200,
0x0002d000, 0x0002d000,
0x17f104bd, 0x17f104bd,
0x10fe0542, 0x10fe0545,
0x0007f100, 0x0007f100,
0x0003f007, 0x0003f007,
0xbd0000d0, 0xbd0000d0,
...@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = { ...@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = {
0x02d00103, 0x02d00103,
0xf104bd00, 0xf104bd00,
0xf00c30e7, 0xf00c30e7,
0x24bd50e3, 0xe5f050e3,
0x44bd34bd, 0xbd24bd01,
/* 0x0430: init_unk_loop */ /* 0x0433: init_unk_loop */
0xb06821f4, 0xf444bd34,
0x0bf400f6, 0xf6b06821,
0x01f7f00f, 0x0f0bf400,
0xfd04f2bb, 0xbb01f7f0,
0x30b6054f, 0x4ffd04f2,
/* 0x0445: init_unk_next */ 0x0130b605,
0x0120b601, /* 0x0448: init_unk_next */
0xb004e0b6, 0xb60120b6,
0x1bf40126, 0x26b004e0,
/* 0x0451: init_unk_done */ 0xe21bf401,
0x070380e2, /* 0x0454: init_unk_done */
0xf1080480, 0x80070380,
0xf0010027, 0x27f10804,
0x22cf0223, 0x23f00100,
0x9534bd00, 0x0022cf02,
0x07f10825, 0x259534bd,
0x03f0c000, 0x0007f108,
0x0005d001, 0x0103f0c0,
0x07f104bd, 0xbd0005d0,
0x03f0c100, 0x0007f104,
0x0005d001, 0x0103f0c1,
0x0e9804bd, 0xbd0005d0,
0x010f9800, 0x000e9804,
0x015021f5, 0xf5010f98,
0xbb002fbb, 0xbb015021,
0x0e98003f, 0x3fbb002f,
0x020f9801, 0x010e9800,
0x015021f5, 0xf5020f98,
0xfd050e98, 0x98015021,
0x2ebb00ef, 0xeffd050e,
0x003ebb00, 0x002ebb00,
0x98020e98, 0x98003ebb,
0x21f5030f, 0x0f98020e,
0x0e980150, 0x5021f503,
0x00effd07, 0x070e9801,
0xbb002ebb, 0xbb00effd,
0x35b6003e, 0x3ebb002e,
0x0007f102, 0x0235b600,
0x0103f0d3, 0xd30007f1,
0xbd0003d0, 0xd00103f0,
0x0825b604,
0xb60635b6,
0x30b60120,
0x0824b601,
0xb90834b6,
0x21f5022f,
0x2fbb02d3,
0x003fbb00,
0x010007f1,
0xd00203f0,
0x04bd0003, 0x04bd0003,
0x29f024bd, 0xb60825b6,
0x0007f11f, 0x20b60635,
0x0203f008, 0x0130b601,
0xbd0002d0, 0xb60824b6,
/* 0x0505: main */ 0x2fb90834,
0x0031f404, 0xd321f502,
0xf00028f4, 0x002fbb02,
0x21f424d7, 0xf1003fbb,
0xf401f439, 0xf0010007,
0xf404e4b0, 0x03d00203,
0x81fe1e18, 0xbd04bd00,
0x0627f001, 0x1f29f024,
0x12fd20bd, 0x080007f1,
0x01e4b604, 0xd00203f0,
0xfe051efd, 0x04bd0002,
0x21f50018, /* 0x0508: main */
0x0ef405fa, 0xf40031f4,
/* 0x0535: main_not_ctx_xfer */ 0xd7f00028,
0x10ef94d3, 0x3921f424,
0xf501f5f0, 0xb0f401f4,
0xf4037e21, 0x18f404e4,
/* 0x0542: ih */ 0x0181fe1e,
0x80f9c60e, 0xbd0627f0,
0xf90188fe, 0x0412fd20,
0xf990f980, 0xfd01e4b6,
0xf9b0f9a0, 0x18fe051e,
0xf9e0f9d0, 0xfd21f500,
0xf104bdf0, 0xd30ef405,
0xf00200a7, /* 0x0538: main_not_ctx_xfer */
0xaacf00a3, 0xf010ef94,
0x04abc400, 0x21f501f5,
0xf02c0bf4, 0x0ef4037e,
0xe7f124d7, /* 0x0545: ih */
0xe3f01a00, 0xfe80f9c6,
0x00eecf00, 0x80f90188,
0x1900f7f1, 0xa0f990f9,
0xcf00f3f0, 0xd0f9b0f9,
0x21f400ff, 0xf0f9e0f9,
0x01e7f004, 0xa7f104bd,
0x1d0007f1, 0xa3f00200,
0xd00003f0, 0x00aacf00,
0x04bd000e, 0xf404abc4,
/* 0x0590: ih_no_fifo */ 0xd7f02c0b,
0x010007f1, 0x00e7f124,
0xd00003f0, 0x00e3f01a,
0x04bd000a, 0xf100eecf,
0xe0fcf0fc, 0xf01900f7,
0xb0fcd0fc, 0xffcf00f3,
0x90fca0fc, 0x0421f400,
0x88fe80fc, 0xf101e7f0,
0xf480fc00, 0xf01d0007,
0x01f80032, 0x0ed00003,
/* 0x05b4: hub_barrier_done */ /* 0x0593: ih_no_fifo */
0x9801f7f0, 0xf104bd00,
0xfebb040e, 0xf0010007,
0x02ffb904, 0x0ad00003,
0x9418e7f1, 0xfc04bd00,
0xf440e3f0, 0xfce0fcf0,
0x00f89d21, 0xfcb0fcd0,
/* 0x05cc: ctx_redswitch */ 0xfc90fca0,
0xf120f7f0, 0x0088fe80,
0x32f480fc,
/* 0x05b7: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb04,
0xf102ffb9,
0xf09418e7,
0x21f440e3,
/* 0x05cf: ctx_redswitch */
0xf000f89d,
0x07f120f7,
0x03f08500,
0x000fd001,
0xe7f004bd,
/* 0x05e1: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
0xf10200f5,
0xf0850007, 0xf0850007,
0x0fd00103, 0x0fd00103,
0xf004bd00, 0xf804bd00,
/* 0x05de: ctx_redswitch_delay */ /* 0x05fd: ctx_xfer */
0xe2b608e7, 0x0007f100,
0xfd1bf401, 0x0203f081,
0x0800f5f1, 0xbd000fd0,
0x0200f5f1, 0x0711f404,
0x850007f1, 0x05cf21f5,
0xd00103f0, /* 0x0610: ctx_xfer_not_load */
0x04bd000f, 0x026a21f5,
/* 0x05fa: ctx_xfer */ 0x07f124bd,
0x07f100f8, 0x03f047fc,
0x03f08100, 0x0002d002,
0x000fd002, 0x2cf004bd,
0x11f404bd, 0x0320b601,
0xcc21f507, 0x4afc07f1,
/* 0x060d: ctx_xfer_not_load */ 0xd00203f0,
0x6a21f505, 0x04bd0002,
0xf124bd02,
0xf047fc07,
0x02d00203,
0xf004bd00,
0x20b6012c,
0xfc07f103,
0x0203f04a,
0xbd0002d0,
0x01acf004,
0xf102a5f0,
0xf00000b7,
0x0c9850b3,
0x0fc4b604,
0x9800bcbb,
0x0d98000c,
0x00e7f001,
0x016f21f5,
0xf101acf0,
0xf04000b7,
0x0c9850b3,
0x0fc4b604,
0x9800bcbb,
0x0d98010c,
0x060f9802,
0x0800e7f1,
0x016f21f5,
0xf001acf0, 0xf001acf0,
0xb7f104a5, 0xb7f102a5,
0xb3f03000, 0xb3f00000,
0x040c9850, 0x040c9850,
0xbb0fc4b6, 0xbb0fc4b6,
0x0c9800bc, 0x0c9800bc,
0x030d9802, 0x010d9800,
0xf1080f98, 0xf500e7f0,
0xf50200e7, 0xf0016f21,
0xf5016f21, 0xb7f101ac,
0xf4025e21, 0xb3f04000,
0x12f40601, 0x040c9850,
/* 0x06a9: ctx_xfer_post */ 0xbb0fc4b6,
0x7f21f507, 0x0c9800bc,
/* 0x06ad: ctx_xfer_done */ 0x020d9801,
0xb421f502, 0xf1060f98,
0x0000f805, 0xf50800e7,
0x00000000, 0xf0016f21,
0xa5f001ac,
0x00b7f104,
0x50b3f030,
0xb6040c98,
0xbcbb0fc4,
0x020c9800,
0x98030d98,
0xe7f1080f,
0x21f50200,
0x21f5016f,
0x01f4025e,
0x0712f406,
/* 0x06ac: ctx_xfer_post */
0x027f21f5,
/* 0x06b0: ctx_xfer_done */
0x05b721f5,
0x000000f8,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
......
...@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = { ...@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = {
0x03f01200, 0x03f01200,
0x0002d000, 0x0002d000,
0x17f104bd, 0x17f104bd,
0x10fe0542, 0x10fe0545,
0x0007f100, 0x0007f100,
0x0003f007, 0x0003f007,
0xbd0000d0, 0xbd0000d0,
...@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = { ...@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = {
0x02d00103, 0x02d00103,
0xf104bd00, 0xf104bd00,
0xf00c30e7, 0xf00c30e7,
0x24bd50e3, 0xe5f050e3,
0x44bd34bd, 0xbd24bd01,
/* 0x0430: init_unk_loop */ /* 0x0433: init_unk_loop */
0xb06821f4, 0xf444bd34,
0x0bf400f6, 0xf6b06821,
0x01f7f00f, 0x0f0bf400,
0xfd04f2bb, 0xbb01f7f0,
0x30b6054f, 0x4ffd04f2,
/* 0x0445: init_unk_next */ 0x0130b605,
0x0120b601, /* 0x0448: init_unk_next */
0xb004e0b6, 0xb60120b6,
0x1bf40226, 0x26b004e0,
/* 0x0451: init_unk_done */ 0xe21bf402,
0x070380e2, /* 0x0454: init_unk_done */
0xf1080480, 0x80070380,
0xf0010027, 0x27f10804,
0x22cf0223, 0x23f00100,
0x9534bd00, 0x0022cf02,
0x07f10825, 0x259534bd,
0x03f0c000, 0x0007f108,
0x0005d001, 0x0103f0c0,
0x07f104bd, 0xbd0005d0,
0x03f0c100, 0x0007f104,
0x0005d001, 0x0103f0c1,
0x0e9804bd, 0xbd0005d0,
0x010f9800, 0x000e9804,
0x015021f5, 0xf5010f98,
0xbb002fbb, 0xbb015021,
0x0e98003f, 0x3fbb002f,
0x020f9801, 0x010e9800,
0x015021f5, 0xf5020f98,
0xfd050e98, 0x98015021,
0x2ebb00ef, 0xeffd050e,
0x003ebb00, 0x002ebb00,
0x98020e98, 0x98003ebb,
0x21f5030f, 0x0f98020e,
0x0e980150, 0x5021f503,
0x00effd07, 0x070e9801,
0xbb002ebb, 0xbb00effd,
0x35b6003e, 0x3ebb002e,
0x0007f102, 0x0235b600,
0x0103f0d3, 0xd30007f1,
0xbd0003d0, 0xd00103f0,
0x0825b604,
0xb60635b6,
0x30b60120,
0x0824b601,
0xb90834b6,
0x21f5022f,
0x2fbb02d3,
0x003fbb00,
0x010007f1,
0xd00203f0,
0x04bd0003, 0x04bd0003,
0x29f024bd, 0xb60825b6,
0x0007f11f, 0x20b60635,
0x0203f030, 0x0130b601,
0xbd0002d0, 0xb60824b6,
/* 0x0505: main */ 0x2fb90834,
0x0031f404, 0xd321f502,
0xf00028f4, 0x002fbb02,
0x21f424d7, 0xf1003fbb,
0xf401f439, 0xf0010007,
0xf404e4b0, 0x03d00203,
0x81fe1e18, 0xbd04bd00,
0x0627f001, 0x1f29f024,
0x12fd20bd, 0x300007f1,
0x01e4b604, 0xd00203f0,
0xfe051efd, 0x04bd0002,
0x21f50018, /* 0x0508: main */
0x0ef405fa, 0xf40031f4,
/* 0x0535: main_not_ctx_xfer */ 0xd7f00028,
0x10ef94d3, 0x3921f424,
0xf501f5f0, 0xb0f401f4,
0xf4037e21, 0x18f404e4,
/* 0x0542: ih */ 0x0181fe1e,
0x80f9c60e, 0xbd0627f0,
0xf90188fe, 0x0412fd20,
0xf990f980, 0xfd01e4b6,
0xf9b0f9a0, 0x18fe051e,
0xf9e0f9d0, 0xfd21f500,
0xf104bdf0, 0xd30ef405,
0xf00200a7, /* 0x0538: main_not_ctx_xfer */
0xaacf00a3, 0xf010ef94,
0x04abc400, 0x21f501f5,
0xf02c0bf4, 0x0ef4037e,
0xe7f124d7, /* 0x0545: ih */
0xe3f01a00, 0xfe80f9c6,
0x00eecf00, 0x80f90188,
0x1900f7f1, 0xa0f990f9,
0xcf00f3f0, 0xd0f9b0f9,
0x21f400ff, 0xf0f9e0f9,
0x01e7f004, 0xa7f104bd,
0x1d0007f1, 0xa3f00200,
0xd00003f0, 0x00aacf00,
0x04bd000e, 0xf404abc4,
/* 0x0590: ih_no_fifo */ 0xd7f02c0b,
0x010007f1, 0x00e7f124,
0xd00003f0, 0x00e3f01a,
0x04bd000a, 0xf100eecf,
0xe0fcf0fc, 0xf01900f7,
0xb0fcd0fc, 0xffcf00f3,
0x90fca0fc, 0x0421f400,
0x88fe80fc, 0xf101e7f0,
0xf480fc00, 0xf01d0007,
0x01f80032, 0x0ed00003,
/* 0x05b4: hub_barrier_done */ /* 0x0593: ih_no_fifo */
0x9801f7f0, 0xf104bd00,
0xfebb040e, 0xf0010007,
0x02ffb904, 0x0ad00003,
0x9418e7f1, 0xfc04bd00,
0xf440e3f0, 0xfce0fcf0,
0x00f89d21, 0xfcb0fcd0,
/* 0x05cc: ctx_redswitch */ 0xfc90fca0,
0xf120f7f0, 0x0088fe80,
0x32f480fc,
/* 0x05b7: hub_barrier_done */
0xf001f800,
0x0e9801f7,
0x04febb04,
0xf102ffb9,
0xf09418e7,
0x21f440e3,
/* 0x05cf: ctx_redswitch */
0xf000f89d,
0x07f120f7,
0x03f08500,
0x000fd001,
0xe7f004bd,
/* 0x05e1: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
0xf10200f5,
0xf0850007, 0xf0850007,
0x0fd00103, 0x0fd00103,
0xf004bd00, 0xf804bd00,
/* 0x05de: ctx_redswitch_delay */ /* 0x05fd: ctx_xfer */
0xe2b608e7, 0x0007f100,
0xfd1bf401, 0x0203f081,
0x0800f5f1, 0xbd000fd0,
0x0200f5f1, 0x0711f404,
0x850007f1, 0x05cf21f5,
0xd00103f0, /* 0x0610: ctx_xfer_not_load */
0x04bd000f, 0x026a21f5,
/* 0x05fa: ctx_xfer */ 0x07f124bd,
0x07f100f8, 0x03f047fc,
0x03f08100, 0x0002d002,
0x000fd002, 0x2cf004bd,
0x11f404bd, 0x0320b601,
0xcc21f507, 0x4afc07f1,
/* 0x060d: ctx_xfer_not_load */ 0xd00203f0,
0x6a21f505, 0x04bd0002,
0xf124bd02,
0xf047fc07,
0x02d00203,
0xf004bd00,
0x20b6012c,
0xfc07f103,
0x0203f04a,
0xbd0002d0,
0x01acf004,
0xf102a5f0,
0xf00000b7,
0x0c9850b3,
0x0fc4b604,
0x9800bcbb,
0x0d98000c,
0x00e7f001,
0x016f21f5,
0xf101acf0,
0xf04000b7,
0x0c9850b3,
0x0fc4b604,
0x9800bcbb,
0x0d98010c,
0x060f9802,
0x0800e7f1,
0x016f21f5,
0xf001acf0, 0xf001acf0,
0xb7f104a5, 0xb7f102a5,
0xb3f03000, 0xb3f00000,
0x040c9850, 0x040c9850,
0xbb0fc4b6, 0xbb0fc4b6,
0x0c9800bc, 0x0c9800bc,
0x030d9802, 0x010d9800,
0xf1080f98, 0xf500e7f0,
0xf50200e7, 0xf0016f21,
0xf5016f21, 0xb7f101ac,
0xf4025e21, 0xb3f04000,
0x12f40601, 0x040c9850,
/* 0x06a9: ctx_xfer_post */ 0xbb0fc4b6,
0x7f21f507, 0x0c9800bc,
/* 0x06ad: ctx_xfer_done */ 0x020d9801,
0xb421f502, 0xf1060f98,
0x0000f805, 0xf50800e7,
0x00000000, 0xf0016f21,
0xa5f001ac,
0x00b7f104,
0x50b3f030,
0xb6040c98,
0xbcbb0fc4,
0x020c9800,
0x98030d98,
0xe7f1080f,
0x21f50200,
0x21f5016f,
0x01f4025e,
0x0712f406,
/* 0x06ac: ctx_xfer_post */
0x027f21f5,
/* 0x06b0: ctx_xfer_done */
0x05b721f5,
0x000000f8,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
......
...@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = { ...@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = {
0x02020014, 0x02020014,
0xf6120040, 0xf6120040,
0x04bd0002, 0x04bd0002,
0xfe048141, 0xfe048441,
0x00400010, 0x00400010,
0x0000f607, 0x0000f607,
0x040204bd, 0x040204bd,
...@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = { ...@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = {
0x01c90080, 0x01c90080,
0xbd0002f6, 0xbd0002f6,
0x0c308e04, 0x0c308e04,
0xbd24bd50, 0x01e5f050,
/* 0x0383: init_unk_loop */ 0x34bd24bd,
0x7e44bd34, /* 0x0386: init_unk_loop */
0xb0000065, 0x657e44bd,
0x0bf400f6, 0xf6b00000,
0xbb010f0e, 0x0e0bf400,
0x4ffd04f2, 0xf2bb010f,
0x0130b605, 0x054ffd04,
/* 0x0398: init_unk_next */ /* 0x039b: init_unk_next */
0xb60120b6, 0xb60130b6,
0x26b004e0, 0xe0b60120,
0xe21bf401, 0x0126b004,
/* 0x03a4: init_unk_done */ /* 0x03a7: init_unk_done */
0xb50703b5, 0xb5e21bf4,
0x00820804, 0x04b50703,
0x22cf0201, 0x01008208,
0x9534bd00, 0x0022cf02,
0x00800825, 0x259534bd,
0x05f601c0, 0xc0008008,
0x8004bd00, 0x0005f601,
0xf601c100, 0x008004bd,
0x04bd0005, 0x05f601c1,
0x98000e98, 0x9804bd00,
0x207e010f, 0x0f98000e,
0x2fbb0001, 0x01207e01,
0x003fbb00, 0x002fbb00,
0x98010e98, 0x98003fbb,
0x207e020f, 0x0f98010e,
0x0e980001, 0x01207e02,
0x00effd05, 0x050e9800,
0xbb002ebb, 0xbb00effd,
0x0e98003e, 0x3ebb002e,
0x030f9802, 0x020e9800,
0x0001207e, 0x7e030f98,
0xfd070e98, 0x98000120,
0x2ebb00ef, 0xeffd070e,
0x003ebb00, 0x002ebb00,
0x800235b6, 0xb6003ebb,
0xf601d300, 0x00800235,
0x04bd0003, 0x03f601d3,
0xb60825b6, 0xb604bd00,
0x20b60635, 0x35b60825,
0x0130b601, 0x0120b606,
0xb60824b6, 0xb60130b6,
0x2fb20834, 0x34b60824,
0x0002687e, 0x7e2fb208,
0xbb002fbb, 0xbb000268,
0x0080003f, 0x3fbb002f,
0x03f60201, 0x01008000,
0xbd04bd00, 0x0003f602,
0x1f29f024, 0x24bd04bd,
0x02300080, 0x801f29f0,
0xbd0002f6, 0xf6023000,
/* 0x0445: main */ 0x04bd0002,
0x0031f404, /* 0x0448: main */
0x0d0028f4, 0xf40031f4,
0x00377e24, 0x240d0028,
0xf401f400, 0x0000377e,
0xf404e4b0, 0xb0f401f4,
0x81fe1d18, 0x18f404e4,
0xbd060201, 0x0181fe1d,
0x0412fd20, 0x20bd0602,
0xfd01e4b6, 0xb60412fd,
0x18fe051e, 0x1efd01e4,
0x05187e00, 0x0018fe05,
0xd40ef400, 0x00051b7e,
/* 0x0474: main_not_ctx_xfer */ /* 0x0477: main_not_ctx_xfer */
0xf010ef94, 0x94d40ef4,
0xf87e01f5, 0xf5f010ef,
0x0ef40002, 0x02f87e01,
/* 0x0481: ih */ 0xc70ef400,
0xfe80f9c7, /* 0x0484: ih */
0x80f90188, 0x88fe80f9,
0xa0f990f9, 0xf980f901,
0xd0f9b0f9, 0xf9a0f990,
0xf0f9e0f9, 0xf9d0f9b0,
0x004a04bd, 0xbdf0f9e0,
0x00aacf02, 0x02004a04,
0xf404abc4, 0xc400aacf,
0x240d1f0b, 0x0bf404ab,
0xcf1a004e, 0x4e240d1f,
0x004f00ee, 0xeecf1a00,
0x00ffcf19, 0x19004f00,
0x0000047e, 0x7e00ffcf,
0x0040010e, 0x0e000004,
0x000ef61d, 0x1d004001,
/* 0x04be: ih_no_fifo */ 0xbd000ef6,
0x004004bd, /* 0x04c1: ih_no_fifo */
0x000af601, 0x01004004,
0xf0fc04bd, 0xbd000af6,
0xd0fce0fc, 0xfcf0fc04,
0xa0fcb0fc, 0xfcd0fce0,
0x80fc90fc, 0xfca0fcb0,
0xfc0088fe, 0xfe80fc90,
0x0032f480, 0x80fc0088,
/* 0x04de: hub_barrier_done */ 0xf80032f4,
0x010f01f8, /* 0x04e1: hub_barrier_done */
0xbb040e98, 0x98010f01,
0xffb204fe, 0xfebb040e,
0x4094188e, 0x8effb204,
0x00008f7e, 0x7e409418,
/* 0x04f2: ctx_redswitch */ 0xf800008f,
0x200f00f8, /* 0x04f5: ctx_redswitch */
0x80200f00,
0xf6018500,
0x04bd000f,
/* 0x0502: ctx_redswitch_delay */
0xe2b6080e,
0xfd1bf401,
0x0800f5f1,
0x0200f5f1,
0x01850080, 0x01850080,
0xbd000ff6, 0xbd000ff6,
/* 0x04ff: ctx_redswitch_delay */ /* 0x051b: ctx_xfer */
0xb6080e04, 0x8000f804,
0x1bf401e2, 0xf6028100,
0x00f5f1fd, 0x04bd000f,
0x00f5f108, 0x7e0711f4,
0x85008002, /* 0x052b: ctx_xfer_not_load */
0x000ff601, 0x7e0004f5,
0x00f804bd, 0xbd000216,
/* 0x0518: ctx_xfer */ 0x47fc8024,
0x02810080,
0xbd000ff6,
0x0711f404,
0x0004f27e,
/* 0x0528: ctx_xfer_not_load */
0x0002167e,
0xfc8024bd,
0x02f60247,
0xf004bd00,
0x20b6012c,
0x4afc8003,
0x0002f602, 0x0002f602,
0xacf004bd, 0x2cf004bd,
0x02a5f001, 0x0320b601,
0x5000008b, 0x024afc80,
0xb6040c98, 0xbd0002f6,
0xbcbb0fc4, 0x01acf004,
0x000c9800, 0x8b02a5f0,
0x0e010d98, 0x98500000,
0x013d7e00,
0x01acf000,
0x5040008b,
0xb6040c98,
0xbcbb0fc4,
0x010c9800,
0x98020d98,
0x004e060f,
0x013d7e08,
0x01acf000,
0x8b04a5f0,
0x98503000,
0xc4b6040c, 0xc4b6040c,
0x00bcbb0f, 0x00bcbb0f,
0x98020c98, 0x98000c98,
0x0f98030d, 0x000e010d,
0x02004e08,
0x00013d7e, 0x00013d7e,
0x00020a7e, 0x8b01acf0,
0xf40601f4, 0x98504000,
/* 0x05b2: ctx_xfer_post */ 0xc4b6040c,
0x277e0712, 0x00bcbb0f,
/* 0x05b6: ctx_xfer_done */ 0x98010c98,
0xde7e0002, 0x0f98020d,
0x00f80004, 0x08004e06,
0x00000000, 0x00013d7e,
0xf001acf0,
0x008b04a5,
0x0c985030,
0x0fc4b604,
0x9800bcbb,
0x0d98020c,
0x080f9803,
0x7e02004e,
0x7e00013d,
0xf400020a,
0x12f40601,
/* 0x05b5: ctx_xfer_post */
0x02277e07,
/* 0x05b9: ctx_xfer_done */
0x04e17e00,
0x0000f800,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
......
...@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = { ...@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = {
0x020014fe, 0x020014fe,
0x12004002, 0x12004002,
0xbd0002f6, 0xbd0002f6,
0x05b04104, 0x05b34104,
0x400010fe, 0x400010fe,
0x00f60700, 0x00f60700,
0x0204bd00, 0x0204bd00,
...@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = { ...@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = {
0xc900800f, 0xc900800f,
0x0002f601, 0x0002f601,
0x308e04bd, 0x308e04bd,
0x24bd500c, 0xe5f0500c,
0x44bd34bd, 0xbd24bd01,
/* 0x03b0: init_unk_loop */ /* 0x03b3: init_unk_loop */
0x0000657e, 0x7e44bd34,
0xf400f6b0, 0xb0000065,
0x010f0e0b, 0x0bf400f6,
0xfd04f2bb, 0xbb010f0e,
0x30b6054f, 0x4ffd04f2,
/* 0x03c5: init_unk_next */ 0x0130b605,
0x0120b601, /* 0x03c8: init_unk_next */
0xb004e0b6, 0xb60120b6,
0x1bf40226, 0x26b004e0,
/* 0x03d1: init_unk_done */ 0xe21bf402,
0x0703b5e2, /* 0x03d4: init_unk_done */
0x820804b5, 0xb50703b5,
0xcf020100, 0x00820804,
0x34bd0022, 0x22cf0201,
0x80082595, 0x9534bd00,
0xf601c000, 0x00800825,
0x05f601c0,
0x8004bd00,
0xf601c100,
0x04bd0005, 0x04bd0005,
0x01c10080, 0x98000e98,
0xbd0005f6, 0x207e010f,
0x000e9804, 0x2fbb0001,
0x7e010f98, 0x003fbb00,
0xbb000120, 0x98010e98,
0x3fbb002f, 0x207e020f,
0x010e9800, 0x0e980001,
0x7e020f98, 0x00effd05,
0x98000120, 0xbb002ebb,
0xeffd050e, 0x0e98003e,
0x002ebb00, 0x030f9802,
0x98003ebb, 0x0001207e,
0x0f98020e, 0xfd070e98,
0x01207e03, 0x2ebb00ef,
0x070e9800, 0x003ebb00,
0xbb00effd, 0x800235b6,
0x3ebb002e, 0xf601d300,
0x0235b600, 0x04bd0003,
0x01d30080, 0xb60825b6,
0xbd0003f6, 0x20b60635,
0x0825b604, 0x0130b601,
0xb60635b6, 0xb60824b6,
0x30b60120, 0x2fb20834,
0x0824b601, 0x0002687e,
0xb20834b6, 0xbb002fbb,
0x02687e2f, 0x3f0f003f,
0x002fbb00, 0x501d608e,
0x0f003fbb, 0xb201e5f0,
0x8effb23f, 0x008f7eff,
0xf0501d60, 0x8e0c0f00,
0x8f7e01e5,
0x0c0f0000,
0xa88effb2,
0xe5f0501d,
0x008f7e01,
0x03147e00,
0xb23f0f00,
0x1d608eff,
0x01e5f050,
0x00008f7e,
0xffb2000f,
0x501d9c8e,
0x7e01e5f0,
0x0f00008f,
0x03147e01,
0x8effb200,
0xf0501da8, 0xf0501da8,
0x8f7e01e5, 0xffb201e5,
0xff0f0000, 0x00008f7e,
0x988effb2, 0x0003147e,
0x608e3f0f,
0xe5f0501d, 0xe5f0501d,
0x008f7e01, 0x7effb201,
0xb2020f00, 0x0f00008f,
0x1da88eff, 0x1d9c8e00,
0x01e5f050, 0x01e5f050,
0x00008f7e, 0x8f7effb2,
0x010f0000,
0x0003147e, 0x0003147e,
0x85050498, 0x501da88e,
0x98504000, 0xb201e5f0,
0x64b60406, 0x008f7eff,
0x0056bb0f, 0x8eff0f00,
/* 0x04e0: tpc_strand_init_tpc_loop */ 0xf0501d98,
0x05705eb8, 0xffb201e5,
0x00657e00,
0xbdf6b200,
/* 0x04ed: tpc_strand_init_idx_loop */
0x605eb874,
0x7fb20005,
0x00008f7e,
0x05885eb8,
0x082f9500,
0x00008f7e,
0x058c5eb8,
0x082f9500,
0x00008f7e, 0x00008f7e,
0x05905eb8, 0xa88e020f,
0x00657e00,
0x06f5b600,
0xb601f0b6,
0x2fbb08f4,
0x003fbb00,
0xb60170b6,
0x1bf40162,
0x0050b7bf,
0x0142b608,
0x0fa81bf4,
0x8effb23f,
0xf0501d60,
0x8f7e01e5,
0x0d0f0000,
0xa88effb2,
0xe5f0501d, 0xe5f0501d,
0x008f7e01, 0x7effb201,
0x03147e00, 0x7e00008f,
0x01008000, 0x98000314,
0x0003f602, 0x00850504,
0x24bd04bd, 0x06985040,
0x801f29f0, 0x0f64b604,
0xf6023000, /* 0x04e3: tpc_strand_init_tpc_loop */
0x04bd0002, 0xb80056bb,
/* 0x0574: main */ 0x0005705e,
0xf40031f4, 0x0000657e,
0x240d0028, 0x74bdf6b2,
0x0000377e, /* 0x04f0: tpc_strand_init_idx_loop */
0xb0f401f4, 0x05605eb8,
0x18f404e4, 0x7e7fb200,
0x0181fe1d, 0xb800008f,
0x20bd0602, 0x0005885e,
0xb60412fd, 0x7e082f95,
0x1efd01e4, 0xb800008f,
0x0018fe05, 0x00058c5e,
0x0006477e, 0x7e082f95,
/* 0x05a3: main_not_ctx_xfer */ 0xb800008f,
0x94d40ef4, 0x0005905e,
0xf5f010ef, 0x0000657e,
0x02f87e01, 0xb606f5b6,
0xc70ef400, 0xf4b601f0,
/* 0x05b0: ih */ 0x002fbb08,
0x88fe80f9, 0xb6003fbb,
0xf980f901, 0x62b60170,
0xf9a0f990, 0xbf1bf401,
0xf9d0f9b0, 0x080050b7,
0xbdf0f9e0, 0xf40142b6,
0x02004a04, 0x3f0fa81b,
0xc400aacf, 0x501d608e,
0x0bf404ab, 0xb201e5f0,
0x4e240d1f, 0x008f7eff,
0xeecf1a00, 0x8e0d0f00,
0x19004f00, 0xf0501da8,
0x7e00ffcf, 0xffb201e5,
0x0e000004, 0x00008f7e,
0x1d004001, 0x0003147e,
0xbd000ef6, 0x02010080,
/* 0x05ed: ih_no_fifo */ 0xbd0003f6,
0x01004004, 0xf024bd04,
0xbd000af6, 0x00801f29,
0xfcf0fc04, 0x02f60230,
0xfcd0fce0, /* 0x0577: main */
0xfca0fcb0, 0xf404bd00,
0xfe80fc90, 0x28f40031,
0x80fc0088, 0x7e240d00,
0xf80032f4, 0xf4000037,
/* 0x060d: hub_barrier_done */ 0xe4b0f401,
0x98010f01, 0x1d18f404,
0xfebb040e, 0x020181fe,
0x8effb204, 0xfd20bd06,
0x7e409418, 0xe4b60412,
0xf800008f, 0x051efd01,
/* 0x0621: ctx_redswitch */ 0x7e0018fe,
0x80200f00, 0xf400064a,
/* 0x05a6: main_not_ctx_xfer */
0xef94d40e,
0x01f5f010,
0x0002f87e,
/* 0x05b3: ih */
0xf9c70ef4,
0x0188fe80,
0x90f980f9,
0xb0f9a0f9,
0xe0f9d0f9,
0x04bdf0f9,
0xcf02004a,
0xabc400aa,
0x1f0bf404,
0x004e240d,
0x00eecf1a,
0xcf19004f,
0x047e00ff,
0x010e0000,
0xf61d0040,
0x04bd000e,
/* 0x05f0: ih_no_fifo */
0xf6010040,
0x04bd000a,
0xe0fcf0fc,
0xb0fcd0fc,
0x90fca0fc,
0x88fe80fc,
0xf480fc00,
0x01f80032,
/* 0x0610: hub_barrier_done */
0x0e98010f,
0x04febb04,
0x188effb2,
0x8f7e4094,
0x00f80000,
/* 0x0624: ctx_redswitch */
0x0080200f,
0x0ff60185,
0x0e04bd00,
/* 0x0631: ctx_redswitch_delay */
0x01e2b608,
0xf1fd1bf4,
0xf10800f5,
0x800200f5,
0xf6018500, 0xf6018500,
0x04bd000f, 0x04bd000f,
/* 0x062e: ctx_redswitch_delay */ /* 0x064a: ctx_xfer */
0xe2b6080e, 0x008000f8,
0xfd1bf401, 0x0ff60281,
0x0800f5f1, 0x8e04bd00,
0x0200f5f1, 0xf0501dc4,
0x01850080, 0xffb201e5,
0xbd000ff6, 0x00008f7e,
/* 0x0647: ctx_xfer */ 0x7e0711f4,
0x8000f804, /* 0x0667: ctx_xfer_not_load */
0xf6028100, 0x7e000624,
0x04bd000f, 0xbd000216,
0xc48effb2, 0x47fc8024,
0xe5f0501d,
0x008f7e01,
0x0711f400,
0x0006217e,
/* 0x0664: ctx_xfer_not_load */
0x0002167e,
0xfc8024bd,
0x02f60247,
0xf004bd00,
0x20b6012c,
0x4afc8003,
0x0002f602, 0x0002f602,
0x0c0f04bd, 0x2cf004bd,
0xa88effb2, 0x0320b601,
0xe5f0501d, 0x024afc80,
0x008f7e01, 0xbd0002f6,
0x03147e00, 0x8e0c0f04,
0xb23f0f00, 0xf0501da8,
0x1d608eff, 0xffb201e5,
0x01e5f050,
0x00008f7e, 0x00008f7e,
0xffb2000f, 0x0003147e,
0x501d9c8e, 0x608e3f0f,
0x7e01e5f0, 0xe5f0501d,
0x7effb201,
0x0f00008f, 0x0f00008f,
0x03147e01, 0x1d9c8e00,
0x01fcf000,
0xb203f0b6,
0x1da88eff,
0x01e5f050, 0x01e5f050,
0x00008f7e, 0x8f7effb2,
0xf001acf0, 0x010f0000,
0x008b02a5, 0x0003147e,
0x0c985000, 0xb601fcf0,
0x0fc4b604, 0xa88e03f0,
0x9800bcbb, 0xe5f0501d,
0x0d98000c, 0x7effb201,
0x7e000e01, 0xf000008f,
0xf000013d,
0x008b01ac,
0x0c985040,
0x0fc4b604,
0x9800bcbb,
0x0d98010c,
0x060f9802,
0x7e08004e,
0xf000013d,
0xa5f001ac, 0xa5f001ac,
0x30008b04, 0x00008b02,
0x040c9850, 0x040c9850,
0xbb0fc4b6, 0xbb0fc4b6,
0x0c9800bc, 0x0c9800bc,
0x030d9802, 0x010d9800,
0x4e080f98, 0x3d7e000e,
0x3d7e0200, 0xacf00001,
0x0a7e0001, 0x40008b01,
0x147e0002, 0x040c9850,
0x01f40003, 0xbb0fc4b6,
0x1a12f406, 0x0c9800bc,
/* 0x073c: ctx_xfer_post */ 0x020d9801,
0x0002277e, 0x4e060f98,
0xffb20d0f, 0x3d7e0800,
0x501da88e, 0xacf00001,
0x7e01e5f0, 0x04a5f001,
0x7e00008f, 0x5030008b,
/* 0x0753: ctx_xfer_done */ 0xb6040c98,
0x7e000314, 0xbcbb0fc4,
0xf800060d, 0x020c9800,
0x00000000, 0x98030d98,
0x004e080f,
0x013d7e02,
0x020a7e00,
0x03147e00,
0x0601f400,
/* 0x073f: ctx_xfer_post */
0x7e1a12f4,
0x0f000227,
0x1da88e0d,
0x01e5f050,
0x8f7effb2,
0x147e0000,
/* 0x0756: ctx_xfer_done */
0x107e0003,
0x00f80006,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
0x00000000, 0x00000000,
......
...@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, ...@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format,
static int static int
gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
{ {
struct gf100_gr *gr = (void *)object->engine; struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
union { union {
struct fermi_a_zbc_color_v0 v0; struct fermi_a_zbc_color_v0 v0;
} *args = data; } *args = data;
...@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) ...@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size)
static int static int
gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size)
{ {
struct gf100_gr *gr = (void *)object->engine; struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine));
union { union {
struct fermi_a_zbc_depth_v0 v0; struct fermi_a_zbc_depth_v0 v0;
} *args = data; } *args = data;
...@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base) ...@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base)
gr->ppc_nr[i] = gr->func->ppc_nr; gr->ppc_nr[i] = gr->func->ppc_nr;
for (j = 0; j < gr->ppc_nr[i]; j++) { for (j = 0; j < gr->ppc_nr[i]; j++) {
u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4)));
if (mask)
gr->ppc_mask[i] |= (1 << j);
gr->ppc_tpc_nr[i][j] = hweight8(mask); gr->ppc_tpc_nr[i][j] = hweight8(mask);
} }
} }
......
...@@ -97,6 +97,7 @@ struct gf100_gr { ...@@ -97,6 +97,7 @@ struct gf100_gr {
u8 tpc_nr[GPC_MAX]; u8 tpc_nr[GPC_MAX];
u8 tpc_total; u8 tpc_total;
u8 ppc_nr[GPC_MAX]; u8 ppc_nr[GPC_MAX];
u8 ppc_mask[GPC_MAX];
u8 ppc_tpc_nr[GPC_MAX][4]; u8 ppc_tpc_nr[GPC_MAX][4];
struct nvkm_memory *unk4188b4; struct nvkm_memory *unk4188b4;
......
...@@ -97,7 +97,9 @@ static void * ...@@ -97,7 +97,9 @@ static void *
nvkm_instobj_dtor(struct nvkm_memory *memory) nvkm_instobj_dtor(struct nvkm_memory *memory)
{ {
struct nvkm_instobj *iobj = nvkm_instobj(memory); struct nvkm_instobj *iobj = nvkm_instobj(memory);
spin_lock(&iobj->imem->lock);
list_del(&iobj->head); list_del(&iobj->head);
spin_unlock(&iobj->imem->lock);
nvkm_memory_del(&iobj->parent); nvkm_memory_del(&iobj->parent);
return iobj; return iobj;
} }
...@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, ...@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory);
iobj->parent = memory; iobj->parent = memory;
iobj->imem = imem; iobj->imem = imem;
spin_lock(&iobj->imem->lock);
list_add_tail(&iobj->head, &imem->list); list_add_tail(&iobj->head, &imem->list);
spin_unlock(&iobj->imem->lock);
memory = &iobj->memory; memory = &iobj->memory;
} }
...@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, ...@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
{ {
nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
imem->func = func; imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list); INIT_LIST_HEAD(&imem->list);
} }
...@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) ...@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
duty = (uv - bios->base) * div / bios->pwm_range; duty = (uv - bios->base) * div / bios->pwm_range;
nvkm_wr32(device, 0x20340, div); nvkm_wr32(device, 0x20340, div);
nvkm_wr32(device, 0x20344, 0x8000000 | duty); nvkm_wr32(device, 0x20344, 0x80000000 | duty);
return 0; return 0;
} }
......
...@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev) ...@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev)
result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
if (result != PPSMC_Result_OK) if (result != PPSMC_Result_OK)
DRM_ERROR("Could not force DPM to low\n"); DRM_DEBUG("Could not force DPM to low\n");
WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
......
...@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev) ...@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev)
result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
if (result != PPSMC_Result_OK) if (result != PPSMC_Result_OK)
DRM_ERROR("Could not force DPM to low.\n"); DRM_DEBUG("Could not force DPM to low.\n");
WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
...@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev) ...@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev)
int rv770_set_sw_state(struct radeon_device *rdev) int rv770_set_sw_state(struct radeon_device *rdev)
{ {
if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
return -EINVAL; DRM_DEBUG("rv770_set_sw_state failed\n");
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment