Commit 62ec7d38 authored by Lijo Lazar's avatar Lijo Lazar Committed by Alex Deucher

drm/amdkfd: Use device based logging for errors

Convert some pr_* to some dev_* APIs to identify the device.
Signed-off-by: default avatarLijo Lazar <lijo.lazar@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d3dbccac
...@@ -380,7 +380,8 @@ int kfd_init_apertures(struct kfd_process *process) ...@@ -380,7 +380,8 @@ int kfd_init_apertures(struct kfd_process *process)
pdd = kfd_create_process_device_data(dev, process); pdd = kfd_create_process_device_data(dev, process);
if (!pdd) { if (!pdd) {
pr_err("Failed to create process device data\n"); dev_err(dev->adev->dev,
"Failed to create process device data\n");
return -ENOMEM; return -ENOMEM;
} }
/* /*
......
...@@ -69,7 +69,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, ...@@ -69,7 +69,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break; break;
default: default:
pr_err("Invalid queue type %d\n", type); dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false; return false;
} }
...@@ -79,13 +79,14 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev, ...@@ -79,13 +79,14 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off);
if (!prop.doorbell_ptr) { if (!prop.doorbell_ptr) {
pr_err("Failed to initialize doorbell"); dev_err(dev->adev->dev, "Failed to initialize doorbell");
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
} }
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0) { if (retval != 0) {
pr_err("Failed to init pq queues size %d\n", queue_size); dev_err(dev->adev->dev, "Failed to init pq queues size %d\n",
queue_size);
goto err_pq_allocate_vidmem; goto err_pq_allocate_vidmem;
} }
...@@ -341,7 +342,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev, ...@@ -341,7 +342,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq; return kq;
pr_err("Failed to init kernel queue\n"); dev_err(dev->adev->dev, "Failed to init kernel queue\n");
kfree(kq); kfree(kq);
return NULL; return NULL;
...@@ -360,26 +361,26 @@ static __attribute__((unused)) void test_kq(struct kfd_node *dev) ...@@ -360,26 +361,26 @@ static __attribute__((unused)) void test_kq(struct kfd_node *dev)
uint32_t *buffer, i; uint32_t *buffer, i;
int retval; int retval;
pr_err("Starting kernel queue test\n"); dev_err(dev->adev->dev, "Starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
if (unlikely(!kq)) { if (unlikely(!kq)) {
pr_err(" Failed to initialize HIQ\n"); dev_err(dev->adev->dev, " Failed to initialize HIQ\n");
pr_err("Kernel queue test failed\n"); dev_err(dev->adev->dev, "Kernel queue test failed\n");
return; return;
} }
retval = kq_acquire_packet_buffer(kq, 5, &buffer); retval = kq_acquire_packet_buffer(kq, 5, &buffer);
if (unlikely(retval != 0)) { if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n"); dev_err(dev->adev->dev, " Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n"); dev_err(dev->adev->dev, "Kernel queue test failed\n");
return; return;
} }
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet; buffer[i] = kq->nop_packet;
kq_submit_packet(kq); kq_submit_packet(kq);
pr_err("Ending kernel queue test\n"); dev_err(dev->adev->dev, "Ending kernel queue test\n");
} }
...@@ -118,12 +118,14 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, ...@@ -118,12 +118,14 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
* attention grabbing. * attention grabbing.
*/ */
if (gfx_info->max_shader_engines > KFD_MAX_NUM_SE) { if (gfx_info->max_shader_engines > KFD_MAX_NUM_SE) {
pr_err("Exceeded KFD_MAX_NUM_SE, chip reports %d\n", dev_err(mm->dev->adev->dev,
gfx_info->max_shader_engines); "Exceeded KFD_MAX_NUM_SE, chip reports %d\n",
gfx_info->max_shader_engines);
return; return;
} }
if (gfx_info->max_sh_per_se > KFD_MAX_NUM_SH_PER_SE) { if (gfx_info->max_sh_per_se > KFD_MAX_NUM_SH_PER_SE) {
pr_err("Exceeded KFD_MAX_NUM_SH, chip reports %d\n", dev_err(mm->dev->adev->dev,
"Exceeded KFD_MAX_NUM_SH, chip reports %d\n",
gfx_info->max_sh_per_se * gfx_info->max_shader_engines); gfx_info->max_sh_per_se * gfx_info->max_shader_engines);
return; return;
} }
......
...@@ -45,7 +45,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm, ...@@ -45,7 +45,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int process_count, queue_count, compute_queue_count, gws_queue_count; unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size; unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1; unsigned int max_proc_per_quantum = 1;
struct kfd_node *dev = pm->dqm->dev; struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
process_count = pm->dqm->processes_count; process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count; queue_count = pm->dqm->active_queue_count;
...@@ -59,14 +60,14 @@ static void pm_calc_rlib_size(struct packet_manager *pm, ...@@ -59,14 +60,14 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
*/ */
*over_subscription = false; *over_subscription = false;
if (dev->max_proc_per_quantum > 1) if (node->max_proc_per_quantum > 1)
max_proc_per_quantum = dev->max_proc_per_quantum; max_proc_per_quantum = node->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) || if ((process_count > max_proc_per_quantum) ||
compute_queue_count > get_cp_queues_num(pm->dqm) || compute_queue_count > get_cp_queues_num(pm->dqm) ||
gws_queue_count > 1) { gws_queue_count > 1) {
*over_subscription = true; *over_subscription = true;
pr_debug("Over subscribed runlist\n"); dev_dbg(dev, "Over subscribed runlist\n");
} }
map_queue_size = pm->pmf->map_queues_size; map_queue_size = pm->pmf->map_queues_size;
...@@ -81,7 +82,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, ...@@ -81,7 +82,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
if (*over_subscription) if (*over_subscription)
*rlib_size += pm->pmf->runlist_size; *rlib_size += pm->pmf->runlist_size;
pr_debug("runlist ib size %d\n", *rlib_size); dev_dbg(dev, "runlist ib size %d\n", *rlib_size);
} }
static int pm_allocate_runlist_ib(struct packet_manager *pm, static int pm_allocate_runlist_ib(struct packet_manager *pm,
...@@ -90,6 +91,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, ...@@ -90,6 +91,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
unsigned int *rl_buffer_size, unsigned int *rl_buffer_size,
bool *is_over_subscription) bool *is_over_subscription)
{ {
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
int retval; int retval;
if (WARN_ON(pm->allocated)) if (WARN_ON(pm->allocated))
...@@ -99,11 +102,10 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, ...@@ -99,11 +102,10 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
mutex_lock(&pm->lock); mutex_lock(&pm->lock);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
&pm->ib_buffer_obj);
if (retval) { if (retval) {
pr_err("Failed to allocate runlist IB\n"); dev_err(dev, "Failed to allocate runlist IB\n");
goto out; goto out;
} }
...@@ -125,6 +127,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -125,6 +127,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
{ {
unsigned int alloc_size_bytes; unsigned int alloc_size_bytes;
unsigned int *rl_buffer, rl_wptr, i; unsigned int *rl_buffer, rl_wptr, i;
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
int retval, processes_mapped; int retval, processes_mapped;
struct device_process_node *cur; struct device_process_node *cur;
struct qcm_process_device *qpd; struct qcm_process_device *qpd;
...@@ -142,7 +146,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -142,7 +146,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
*rl_size_bytes = alloc_size_bytes; *rl_size_bytes = alloc_size_bytes;
pm->ib_size_bytes = alloc_size_bytes; pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n", dev_dbg(dev, "Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->active_queue_count); pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */ /* build the run list ib packet */
...@@ -150,7 +154,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -150,7 +154,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
qpd = cur->qpd; qpd = cur->qpd;
/* build map process packet */ /* build map process packet */
if (processes_mapped >= pm->dqm->processes_count) { if (processes_mapped >= pm->dqm->processes_count) {
pr_debug("Not enough space left in runlist IB\n"); dev_dbg(dev, "Not enough space left in runlist IB\n");
pm_release_ib(pm); pm_release_ib(pm);
return -ENOMEM; return -ENOMEM;
} }
...@@ -167,7 +171,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -167,7 +171,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!kq->queue->properties.is_active) if (!kq->queue->properties.is_active)
continue; continue;
pr_debug("static_queue, mapping kernel q %d, is debug status %d\n", dev_dbg(dev,
"static_queue, mapping kernel q %d, is debug status %d\n",
kq->queue->queue, qpd->is_debug); kq->queue->queue, qpd->is_debug);
retval = pm->pmf->map_queues(pm, retval = pm->pmf->map_queues(pm,
...@@ -186,7 +191,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -186,7 +191,8 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
if (!q->properties.is_active) if (!q->properties.is_active)
continue; continue;
pr_debug("static_queue, mapping user queue %d, is debug status %d\n", dev_dbg(dev,
"static_queue, mapping user queue %d, is debug status %d\n",
q->queue, qpd->is_debug); q->queue, qpd->is_debug);
retval = pm->pmf->map_queues(pm, retval = pm->pmf->map_queues(pm,
...@@ -203,11 +209,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -203,11 +209,13 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
} }
} }
pr_debug("Finished map process and queues to runlist\n"); dev_dbg(dev, "Finished map process and queues to runlist\n");
if (is_over_subscription) { if (is_over_subscription) {
if (!pm->is_over_subscription) if (!pm->is_over_subscription)
pr_warn("Runlist is getting oversubscribed. Expect reduced ROCm performance.\n"); dev_warn(
dev,
"Runlist is getting oversubscribed. Expect reduced ROCm performance.\n");
retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
*rl_gpu_addr, *rl_gpu_addr,
alloc_size_bytes / sizeof(uint32_t), alloc_size_bytes / sizeof(uint32_t),
...@@ -273,6 +281,8 @@ void pm_uninit(struct packet_manager *pm) ...@@ -273,6 +281,8 @@ void pm_uninit(struct packet_manager *pm)
int pm_send_set_resources(struct packet_manager *pm, int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res) struct scheduling_resources *res)
{ {
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
uint32_t *buffer, size; uint32_t *buffer, size;
int retval = 0; int retval = 0;
...@@ -282,7 +292,7 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -282,7 +292,7 @@ int pm_send_set_resources(struct packet_manager *pm,
size / sizeof(uint32_t), size / sizeof(uint32_t),
(unsigned int **)&buffer); (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); dev_err(dev, "Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM; retval = -ENOMEM;
goto out; goto out;
} }
...@@ -344,6 +354,8 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -344,6 +354,8 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
uint64_t fence_value) uint64_t fence_value)
{ {
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
uint32_t *buffer, size; uint32_t *buffer, size;
int retval = 0; int retval = 0;
...@@ -355,7 +367,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -355,7 +367,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
kq_acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer); size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); dev_err(dev, "Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM; retval = -ENOMEM;
goto out; goto out;
} }
...@@ -373,6 +385,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -373,6 +385,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
{ {
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
int retval = 0; int retval = 0;
uint32_t *buffer, size; uint32_t *buffer, size;
...@@ -386,7 +400,8 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) ...@@ -386,7 +400,8 @@ int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
(unsigned int **)&buffer); (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); dev_err(dev,
"Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM; retval = -ENOMEM;
goto out; goto out;
} }
...@@ -407,6 +422,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, ...@@ -407,6 +422,8 @@ int pm_send_unmap_queue(struct packet_manager *pm,
enum kfd_unmap_queues_filter filter, enum kfd_unmap_queues_filter filter,
uint32_t filter_param, bool reset) uint32_t filter_param, bool reset)
{ {
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
uint32_t *buffer, size; uint32_t *buffer, size;
int retval = 0; int retval = 0;
...@@ -415,7 +432,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, ...@@ -415,7 +432,7 @@ int pm_send_unmap_queue(struct packet_manager *pm,
kq_acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer); size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); dev_err(dev, "Failed to allocate buffer on kernel queue\n");
retval = -ENOMEM; retval = -ENOMEM;
goto out; goto out;
} }
...@@ -464,6 +481,8 @@ int pm_debugfs_runlist(struct seq_file *m, void *data) ...@@ -464,6 +481,8 @@ int pm_debugfs_runlist(struct seq_file *m, void *data)
int pm_debugfs_hang_hws(struct packet_manager *pm) int pm_debugfs_hang_hws(struct packet_manager *pm)
{ {
struct kfd_node *node = pm->dqm->dev;
struct device *dev = node->adev->dev;
uint32_t *buffer, size; uint32_t *buffer, size;
int r = 0; int r = 0;
...@@ -475,16 +494,16 @@ int pm_debugfs_hang_hws(struct packet_manager *pm) ...@@ -475,16 +494,16 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
kq_acquire_packet_buffer(pm->priv_queue, kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer); size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) { if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n"); dev_err(dev, "Failed to allocate buffer on kernel queue\n");
r = -ENOMEM; r = -ENOMEM;
goto out; goto out;
} }
memset(buffer, 0x55, size); memset(buffer, 0x55, size);
kq_submit_packet(pm->priv_queue); kq_submit_packet(pm->priv_queue);
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.", dev_info(dev, "Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
buffer[0], buffer[1], buffer[2], buffer[3], buffer[0], buffer[1], buffer[2], buffer[3], buffer[4],
buffer[4], buffer[5], buffer[6]); buffer[5], buffer[6]);
out: out:
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
return r; return r;
......
...@@ -1311,7 +1311,8 @@ int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) ...@@ -1311,7 +1311,8 @@ int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
if (IS_ERR_VALUE(qpd->tba_addr)) { if (IS_ERR_VALUE(qpd->tba_addr)) {
int err = qpd->tba_addr; int err = qpd->tba_addr;
pr_err("Failure to set tba address. error %d.\n", err); dev_err(dev->adev->dev,
"Failure to set tba address. error %d.\n", err);
qpd->tba_addr = 0; qpd->tba_addr = 0;
qpd->cwsr_kaddr = NULL; qpd->cwsr_kaddr = NULL;
return err; return err;
...@@ -1611,7 +1612,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, ...@@ -1611,7 +1612,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
&pdd->proc_ctx_cpu_ptr, &pdd->proc_ctx_cpu_ptr,
false); false);
if (retval) { if (retval) {
pr_err("failed to allocate process context bo\n"); dev_err(dev->adev->dev,
"failed to allocate process context bo\n");
goto err_free_pdd; goto err_free_pdd;
} }
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
...@@ -1676,7 +1678,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, ...@@ -1676,7 +1678,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
&p->kgd_process_info, &p->kgd_process_info,
&ef); &ef);
if (ret) { if (ret) {
pr_err("Failed to create process VM object\n"); dev_err(dev->adev->dev, "Failed to create process VM object\n");
return ret; return ret;
} }
RCU_INIT_POINTER(p->ef, ef); RCU_INIT_POINTER(p->ef, ef);
...@@ -1723,7 +1725,7 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, ...@@ -1723,7 +1725,7 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
pdd = kfd_get_process_device_data(dev, p); pdd = kfd_get_process_device_data(dev, p);
if (!pdd) { if (!pdd) {
pr_err("Process device data doesn't exist\n"); dev_err(dev->adev->dev, "Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1833,6 +1835,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) ...@@ -1833,6 +1835,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
for (i = 0; i < p->n_pdds; i++) { for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i]; struct kfd_process_device *pdd = p->pdds[i];
struct device *dev = pdd->dev->adev->dev;
kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid, kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
trigger); trigger);
...@@ -1844,7 +1847,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) ...@@ -1844,7 +1847,7 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
* them been add back since they actually not be saved right now. * them been add back since they actually not be saved right now.
*/ */
if (r && r != -EIO) { if (r && r != -EIO) {
pr_err("Failed to evict process queues\n"); dev_err(dev, "Failed to evict process queues\n");
goto fail; goto fail;
} }
n_evicted++; n_evicted++;
...@@ -1866,7 +1869,8 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger) ...@@ -1866,7 +1869,8 @@ int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd)) &pdd->qpd))
pr_err("Failed to restore queues\n"); dev_err(pdd->dev->adev->dev,
"Failed to restore queues\n");
n_evicted--; n_evicted--;
} }
...@@ -1882,13 +1886,14 @@ int kfd_process_restore_queues(struct kfd_process *p) ...@@ -1882,13 +1886,14 @@ int kfd_process_restore_queues(struct kfd_process *p)
for (i = 0; i < p->n_pdds; i++) { for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i]; struct kfd_process_device *pdd = p->pdds[i];
struct device *dev = pdd->dev->adev->dev;
kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid); kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd); &pdd->qpd);
if (r) { if (r) {
pr_err("Failed to restore process queues\n"); dev_err(dev, "Failed to restore process queues\n");
if (!ret) if (!ret)
ret = r; ret = r;
} }
...@@ -2065,7 +2070,7 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, ...@@ -2065,7 +2070,7 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
struct qcm_process_device *qpd; struct qcm_process_device *qpd;
if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
pr_err("Incorrect CWSR mapping size.\n"); dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2077,7 +2082,8 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, ...@@ -2077,7 +2082,8 @@ int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(KFD_CWSR_TBA_TMA_SIZE)); get_order(KFD_CWSR_TBA_TMA_SIZE));
if (!qpd->cwsr_kaddr) { if (!qpd->cwsr_kaddr) {
pr_err("Error allocating per process CWSR buffer.\n"); dev_err(dev->adev->dev,
"Error allocating per process CWSR buffer.\n");
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment