Commit 4eacc26b authored by Kent Russell's avatar Kent Russell Committed by Oded Gabbay

drm/amdkfd: Change x==NULL/false references to !x

Upstream prefers the !x notation to x==NULL or x==false. Along those lines
change the ==true or !=NULL references as well. Also make the references
to !x the same, excluding () for readability.
Signed-off-by: default avatarKent Russell <kent.russell@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 79775b62
...@@ -265,7 +265,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, ...@@ -265,7 +265,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); pr_debug("Looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) { if (!dev) {
pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); pr_debug("Could not find gpu id 0x%x\n", args->gpu_id);
return -EINVAL; return -EINVAL;
} }
...@@ -400,7 +400,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep, ...@@ -400,7 +400,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
} }
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
mutex_lock(&p->mutex); mutex_lock(&p->mutex);
...@@ -443,7 +443,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, ...@@ -443,7 +443,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
long status = 0; long status = 0;
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) { if (dev->device_info->asic_family == CHIP_CARRIZO) {
...@@ -465,7 +465,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, ...@@ -465,7 +465,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
return PTR_ERR(pdd); return PTR_ERR(pdd);
} }
if (dev->dbgmgr == NULL) { if (!dev->dbgmgr) {
/* In case of a legal call, we have no dbgmgr yet */ /* In case of a legal call, we have no dbgmgr yet */
create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev); create_ok = kfd_dbgmgr_create(&dbgmgr_ptr, dev);
if (create_ok) { if (create_ok) {
...@@ -494,7 +494,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, ...@@ -494,7 +494,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
long status; long status;
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) { if (dev->device_info->asic_family == CHIP_CARRIZO) {
...@@ -505,7 +505,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, ...@@ -505,7 +505,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
mutex_lock(kfd_get_dbgmgr_mutex()); mutex_lock(kfd_get_dbgmgr_mutex());
status = kfd_dbgmgr_unregister(dev->dbgmgr, p); status = kfd_dbgmgr_unregister(dev->dbgmgr, p);
if (status == 0) { if (!status) {
kfd_dbgmgr_destroy(dev->dbgmgr); kfd_dbgmgr_destroy(dev->dbgmgr);
dev->dbgmgr = NULL; dev->dbgmgr = NULL;
} }
...@@ -539,7 +539,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, ...@@ -539,7 +539,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info)); memset((void *) &aw_info, 0, sizeof(struct dbg_address_watch_info));
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) { if (dev->device_info->asic_family == CHIP_CARRIZO) {
...@@ -646,7 +646,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, ...@@ -646,7 +646,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep,
sizeof(wac_info.trapId); sizeof(wac_info.trapId);
dev = kfd_device_by_id(args->gpu_id); dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) { if (dev->device_info->asic_family == CHIP_CARRIZO) {
...@@ -782,9 +782,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, ...@@ -782,9 +782,9 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
"scratch_limit %llX\n", pdd->scratch_limit); "scratch_limit %llX\n", pdd->scratch_limit);
args->num_of_nodes++; args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) !=
NULL && pdd = kfd_get_next_process_device_data(p, pdd);
(args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
} }
mutex_unlock(&p->mutex); mutex_unlock(&p->mutex);
......
...@@ -77,7 +77,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -77,7 +77,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kq->ops.acquire_packet_buffer(kq, status = kq->ops.acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t), pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff); &ib_packet_buff);
if (status != 0) { if (status) {
pr_err("acquire_packet_buffer failed\n"); pr_err("acquire_packet_buffer failed\n");
return status; return status;
} }
...@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, ...@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t), status = kfd_gtt_sa_allocate(dbgdev->dev, sizeof(uint64_t),
&mem_obj); &mem_obj);
if (status != 0) { if (status) {
pr_err("Failed to allocate GART memory\n"); pr_err("Failed to allocate GART memory\n");
kq->ops.rollback_packet(kq); kq->ops.rollback_packet(kq);
return status; return status;
...@@ -202,7 +202,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev) ...@@ -202,7 +202,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
kq = pqm_get_kernel_queue(dbgdev->pqm, qid); kq = pqm_get_kernel_queue(dbgdev->pqm, qid);
if (kq == NULL) { if (!kq) {
pr_err("Error getting DIQ\n"); pr_err("Error getting DIQ\n");
pqm_destroy_queue(dbgdev->pqm, qid); pqm_destroy_queue(dbgdev->pqm, qid);
return -EFAULT; return -EFAULT;
...@@ -252,7 +252,7 @@ static void dbgdev_address_watch_set_registers( ...@@ -252,7 +252,7 @@ static void dbgdev_address_watch_set_registers(
addrLo->u32All = 0; addrLo->u32All = 0;
cntl->u32All = 0; cntl->u32All = 0;
if (adw_info->watch_mask != NULL) if (adw_info->watch_mask)
cntl->bitfields.mask = cntl->bitfields.mask =
(uint32_t) (adw_info->watch_mask[index] & (uint32_t) (adw_info->watch_mask[index] &
ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK); ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK);
...@@ -307,8 +307,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, ...@@ -307,8 +307,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev,
return -EINVAL; return -EINVAL;
} }
if ((adw_info->watch_mode == NULL) || if (!adw_info->watch_mode || !adw_info->watch_address) {
(adw_info->watch_address == NULL)) {
pr_err("adw_info fields are not valid\n"); pr_err("adw_info fields are not valid\n");
return -EINVAL; return -EINVAL;
} }
...@@ -375,15 +374,14 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, ...@@ -375,15 +374,14 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
return -EINVAL; return -EINVAL;
} }
if ((NULL == adw_info->watch_mode) || if (!adw_info->watch_mode || !adw_info->watch_address) {
(NULL == adw_info->watch_address)) {
pr_err("adw_info fields are not valid\n"); pr_err("adw_info fields are not valid\n");
return -EINVAL; return -EINVAL;
} }
status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj); status = kfd_gtt_sa_allocate(dbgdev->dev, ib_size, &mem_obj);
if (status != 0) { if (status) {
pr_err("Failed to allocate GART memory\n"); pr_err("Failed to allocate GART memory\n");
return status; return status;
} }
...@@ -490,7 +488,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, ...@@ -490,7 +488,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint, packet_buff_uint,
ib_size); ib_size);
if (status != 0) { if (status) {
pr_err("Failed to submit IB to DIQ\n"); pr_err("Failed to submit IB to DIQ\n");
break; break;
} }
...@@ -711,7 +709,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev, ...@@ -711,7 +709,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packet_buff_uint, packet_buff_uint,
ib_size); ib_size);
if (status != 0) if (status)
pr_err("Failed to submit IB to DIQ\n"); pr_err("Failed to submit IB to DIQ\n");
kfd_gtt_sa_free(dbgdev->dev, mem_obj); kfd_gtt_sa_free(dbgdev->dev, mem_obj);
......
...@@ -55,7 +55,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr) ...@@ -55,7 +55,7 @@ static void kfd_dbgmgr_uninitialize(struct kfd_dbgmgr *pmgr)
void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr) void kfd_dbgmgr_destroy(struct kfd_dbgmgr *pmgr)
{ {
if (pmgr != NULL) { if (pmgr) {
kfd_dbgmgr_uninitialize(pmgr); kfd_dbgmgr_uninitialize(pmgr);
kfree(pmgr); kfree(pmgr);
} }
...@@ -66,7 +66,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) ...@@ -66,7 +66,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
struct kfd_dbgmgr *new_buff; struct kfd_dbgmgr *new_buff;
BUG_ON(pdev == NULL); BUG_ON(!pdev);
BUG_ON(!pdev->init_complete); BUG_ON(!pdev->init_complete);
new_buff = kfd_alloc_struct(new_buff); new_buff = kfd_alloc_struct(new_buff);
......
...@@ -98,7 +98,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did) ...@@ -98,7 +98,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
if (supported_devices[i].did == did) { if (supported_devices[i].did == did) {
BUG_ON(supported_devices[i].device_info == NULL); BUG_ON(!supported_devices[i].device_info);
return supported_devices[i].device_info; return supported_devices[i].device_info;
} }
} }
...@@ -212,7 +212,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, ...@@ -212,7 +212,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
flags); flags);
dev = kfd_device_by_pci_dev(pdev); dev = kfd_device_by_pci_dev(pdev);
BUG_ON(dev == NULL); BUG_ON(!dev);
kfd_signal_iommu_event(dev, pasid, address, kfd_signal_iommu_event(dev, pasid, address,
flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
...@@ -262,7 +262,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -262,7 +262,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_doorbell_init(kfd); kfd_doorbell_init(kfd);
if (kfd_topology_add_device(kfd) != 0) { if (kfd_topology_add_device(kfd)) {
dev_err(kfd_device, "Error adding device to topology\n"); dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error; goto kfd_topology_add_device_error;
} }
...@@ -288,7 +288,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -288,7 +288,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error; goto device_queue_manager_error;
} }
if (kfd->dqm->ops.start(kfd->dqm) != 0) { if (kfd->dqm->ops.start(kfd->dqm)) {
dev_err(kfd_device, dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n", "Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
...@@ -341,7 +341,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) ...@@ -341,7 +341,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
void kgd2kfd_suspend(struct kfd_dev *kfd) void kgd2kfd_suspend(struct kfd_dev *kfd)
{ {
BUG_ON(kfd == NULL); BUG_ON(!kfd);
if (kfd->init_complete) { if (kfd->init_complete) {
kfd->dqm->ops.stop(kfd->dqm); kfd->dqm->ops.stop(kfd->dqm);
......
...@@ -167,7 +167,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -167,7 +167,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list)) { if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q); retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) { if (retval) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return retval; return retval;
} }
...@@ -180,7 +180,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -180,7 +180,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
retval = create_sdma_queue_nocpsch(dqm, q, qpd); retval = create_sdma_queue_nocpsch(dqm, q, qpd);
if (retval != 0) { if (retval) {
if (list_empty(&qpd->queues_list)) { if (list_empty(&qpd->queues_list)) {
deallocate_vmid(dqm, qpd, q); deallocate_vmid(dqm, qpd, q);
*allocated_vmid = 0; *allocated_vmid = 0;
...@@ -262,16 +262,16 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -262,16 +262,16 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !q || !qpd); BUG_ON(!dqm || !q || !qpd);
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) if (!mqd)
return -ENOMEM; return -ENOMEM;
retval = allocate_hqd(dqm, q); retval = allocate_hqd(dqm, q);
if (retval != 0) if (retval)
return retval; return retval;
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval != 0) { if (retval) {
deallocate_hqd(dqm, q); deallocate_hqd(dqm, q);
return retval; return retval;
} }
...@@ -281,7 +281,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -281,7 +281,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
retval = mqd->load_mqd(mqd, q->mqd, q->pipe, retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
q->queue, (uint32_t __user *) q->properties.write_ptr); q->queue, (uint32_t __user *) q->properties.write_ptr);
if (retval != 0) { if (retval) {
deallocate_hqd(dqm, q); deallocate_hqd(dqm, q);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
return retval; return retval;
...@@ -330,7 +330,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -330,7 +330,7 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
q->pipe, q->queue); q->pipe, q->queue);
if (retval != 0) if (retval)
goto out; goto out;
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
...@@ -365,7 +365,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) ...@@ -365,7 +365,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
mqd = dqm->ops.get_mqd_manager(dqm, mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type)); get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) { if (!mqd) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return -ENOMEM; return -ENOMEM;
} }
...@@ -381,7 +381,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) ...@@ -381,7 +381,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = mqd->update_mqd(mqd, q->mqd, &q->properties); retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if ((q->properties.is_active) && (!prev_active)) if ((q->properties.is_active) && (!prev_active))
dqm->queue_count++; dqm->queue_count++;
else if ((!q->properties.is_active) && (prev_active)) else if (!q->properties.is_active && prev_active)
dqm->queue_count--; dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS) if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
...@@ -403,7 +403,7 @@ static struct mqd_manager *get_mqd_manager_nocpsch( ...@@ -403,7 +403,7 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
mqd = dqm->mqds[type]; mqd = dqm->mqds[type];
if (!mqd) { if (!mqd) {
mqd = mqd_manager_init(type, dqm->dev); mqd = mqd_manager_init(type, dqm->dev);
if (mqd == NULL) if (!mqd)
pr_err("mqd manager is NULL"); pr_err("mqd manager is NULL");
dqm->mqds[type] = mqd; dqm->mqds[type] = mqd;
} }
...@@ -485,7 +485,7 @@ static void init_interrupts(struct device_queue_manager *dqm) ...@@ -485,7 +485,7 @@ static void init_interrupts(struct device_queue_manager *dqm)
{ {
unsigned int i; unsigned int i;
BUG_ON(dqm == NULL); BUG_ON(!dqm);
for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
if (is_pipe_enabled(dqm, 0, i)) if (is_pipe_enabled(dqm, 0, i))
...@@ -589,7 +589,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -589,7 +589,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
return -ENOMEM; return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id); retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval != 0) if (retval)
return retval; return retval;
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
...@@ -602,14 +602,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -602,14 +602,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval != 0) { if (retval) {
deallocate_sdma_queue(dqm, q->sdma_id); deallocate_sdma_queue(dqm, q->sdma_id);
return retval; return retval;
} }
retval = mqd->load_mqd(mqd, q->mqd, 0, retval = mqd->load_mqd(mqd, q->mqd, 0,
0, NULL); 0, NULL);
if (retval != 0) { if (retval) {
deallocate_sdma_queue(dqm, q->sdma_id); deallocate_sdma_queue(dqm, q->sdma_id);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
return retval; return retval;
...@@ -680,7 +680,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) ...@@ -680,7 +680,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0; dqm->sdma_queue_count = 0;
dqm->active_runlist = false; dqm->active_runlist = false;
retval = dqm->ops_asic_specific.initialize(dqm); retval = dqm->ops_asic_specific.initialize(dqm);
if (retval != 0) if (retval)
goto fail_init_pipelines; goto fail_init_pipelines;
return 0; return 0;
...@@ -700,11 +700,11 @@ static int start_cpsch(struct device_queue_manager *dqm) ...@@ -700,11 +700,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
retval = 0; retval = 0;
retval = pm_init(&dqm->packets, dqm); retval = pm_init(&dqm->packets, dqm);
if (retval != 0) if (retval)
goto fail_packet_manager_init; goto fail_packet_manager_init;
retval = set_sched_resources(dqm); retval = set_sched_resources(dqm);
if (retval != 0) if (retval)
goto fail_set_sched_resources; goto fail_set_sched_resources;
pr_debug("Allocating fence memory\n"); pr_debug("Allocating fence memory\n");
...@@ -713,7 +713,7 @@ static int start_cpsch(struct device_queue_manager *dqm) ...@@ -713,7 +713,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
&dqm->fence_mem); &dqm->fence_mem);
if (retval != 0) if (retval)
goto fail_allocate_vidmem; goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr; dqm->fence_addr = dqm->fence_mem->cpu_ptr;
...@@ -845,7 +845,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -845,7 +845,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mqd = dqm->ops.get_mqd_manager(dqm, mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type)); get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) { if (!mqd) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return -ENOMEM; return -ENOMEM;
} }
...@@ -853,7 +853,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -853,7 +853,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd); dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties); &q->gart_mqd_addr, &q->properties);
if (retval != 0) if (retval)
goto out; goto out;
list_add(&q->list, &qpd->queues_list); list_add(&q->list, &qpd->queues_list);
...@@ -934,7 +934,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, ...@@ -934,7 +934,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
preempt_type, 0, false, 0); preempt_type, 0, false, 0);
if (retval != 0) if (retval)
goto out; goto out;
*dqm->fence_addr = KFD_FENCE_INIT; *dqm->fence_addr = KFD_FENCE_INIT;
...@@ -943,7 +943,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, ...@@ -943,7 +943,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */ /* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
if (retval != 0) { if (retval) {
pdd = kfd_get_process_device_data(dqm->dev, pdd = kfd_get_process_device_data(dqm->dev,
kfd_get_process(current)); kfd_get_process(current));
pdd->reset_wavefronts = true; pdd->reset_wavefronts = true;
...@@ -968,7 +968,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) ...@@ -968,7 +968,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
retval = destroy_queues_cpsch(dqm, false, false); retval = destroy_queues_cpsch(dqm, false, false);
if (retval != 0) { if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption"); pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
goto out; goto out;
} }
...@@ -984,7 +984,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock) ...@@ -984,7 +984,7 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
} }
retval = pm_send_runlist(&dqm->packets, &dqm->queues); retval = pm_send_runlist(&dqm->packets, &dqm->queues);
if (retval != 0) { if (retval) {
pr_err("failed to execute runlist"); pr_err("failed to execute runlist");
goto out; goto out;
} }
...@@ -1193,7 +1193,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1193,7 +1193,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
break; break;
} }
if (dqm->ops.initialize(dqm) != 0) { if (dqm->ops.initialize(dqm)) {
kfree(dqm); kfree(dqm);
return NULL; return NULL;
} }
......
...@@ -131,7 +131,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) ...@@ -131,7 +131,7 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
/* Find kfd device according to gpu id */ /* Find kfd device according to gpu id */
dev = kfd_device_by_id(vma->vm_pgoff); dev = kfd_device_by_id(vma->vm_pgoff);
if (dev == NULL) if (!dev)
return -EINVAL; return -EINVAL;
/* Calculate physical address of doorbell */ /* Calculate physical address of doorbell */
......
...@@ -247,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p) ...@@ -247,7 +247,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = p->next_nonsignal_event_id; for (id = p->next_nonsignal_event_id;
id < KFD_LAST_NONSIGNAL_EVENT_ID && id < KFD_LAST_NONSIGNAL_EVENT_ID &&
lookup_event_by_id(p, id) != NULL; lookup_event_by_id(p, id);
id++) id++)
; ;
...@@ -266,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p) ...@@ -266,7 +266,7 @@ static u32 make_nonsignal_event_id(struct kfd_process *p)
for (id = KFD_FIRST_NONSIGNAL_EVENT_ID; for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
id < KFD_LAST_NONSIGNAL_EVENT_ID && id < KFD_LAST_NONSIGNAL_EVENT_ID &&
lookup_event_by_id(p, id) != NULL; lookup_event_by_id(p, id);
id++) id++)
; ;
...@@ -342,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p) ...@@ -342,7 +342,7 @@ void kfd_event_init_process(struct kfd_process *p)
static void destroy_event(struct kfd_process *p, struct kfd_event *ev) static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{ {
if (ev->signal_page != NULL) { if (ev->signal_page) {
release_event_notification_slot(ev->signal_page, release_event_notification_slot(ev->signal_page,
ev->signal_slot_index); ev->signal_slot_index);
p->signal_event_count--; p->signal_event_count--;
......
...@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process) ...@@ -304,7 +304,7 @@ int kfd_init_apertures(struct kfd_process *process)
id < NUM_OF_SUPPORTED_GPUS) { id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_create_process_device_data(dev, process); pdd = kfd_create_process_device_data(dev, process);
if (pdd == NULL) { if (!pdd) {
pr_err("Failed to create process device data\n"); pr_err("Failed to create process device data\n");
return -1; return -1;
} }
......
...@@ -67,12 +67,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -67,12 +67,12 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
break; break;
} }
if (kq->mqd == NULL) if (!kq->mqd)
return false; return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
if (prop.doorbell_ptr == NULL) { if (!prop.doorbell_ptr) {
pr_err("Failed to initialize doorbell"); pr_err("Failed to initialize doorbell");
goto err_get_kernel_doorbell; goto err_get_kernel_doorbell;
} }
...@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -87,7 +87,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_gpu_addr = kq->pq->gpu_addr; kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size); retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
if (retval == false) if (!retval)
goto err_eop_allocate_vidmem; goto err_eop_allocate_vidmem;
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel), retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
......
...@@ -99,7 +99,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -99,7 +99,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = AQL_ENABLE; m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m; *mqd = m;
if (gart_addr != NULL) if (gart_addr)
*gart_addr = addr; *gart_addr = addr;
retval = mm->update_mqd(mm, m, q); retval = mm->update_mqd(mm, m, q);
...@@ -127,7 +127,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, ...@@ -127,7 +127,7 @@ static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
memset(m, 0, sizeof(struct cik_sdma_rlc_registers)); memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m; *mqd = m;
if (gart_addr != NULL) if (gart_addr)
*gart_addr = (*mqd_mem_obj)->gpu_addr; *gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q); retval = mm->update_mqd(mm, m, q);
......
...@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, ...@@ -85,7 +85,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_iq_rptr = 1; m->cp_hqd_iq_rptr = 1;
*mqd = m; *mqd = m;
if (gart_addr != NULL) if (gart_addr)
*gart_addr = addr; *gart_addr = addr;
retval = mm->update_mqd(mm, m, q); retval = mm->update_mqd(mm, m, q);
......
...@@ -98,14 +98,14 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, ...@@ -98,14 +98,14 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
BUG_ON(!pm); BUG_ON(!pm);
BUG_ON(pm->allocated); BUG_ON(pm->allocated);
BUG_ON(is_over_subscription == NULL); BUG_ON(!is_over_subscription);
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
&pm->ib_buffer_obj); &pm->ib_buffer_obj);
if (retval != 0) { if (retval) {
pr_err("Failed to allocate runlist IB\n"); pr_err("Failed to allocate runlist IB\n");
return retval; return retval;
} }
...@@ -321,7 +321,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -321,7 +321,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
&alloc_size_bytes, &is_over_subscription); &alloc_size_bytes, &is_over_subscription);
if (retval != 0) if (retval)
return retval; return retval;
*rl_size_bytes = alloc_size_bytes; *rl_size_bytes = alloc_size_bytes;
...@@ -340,7 +340,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -340,7 +340,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
} }
retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd); retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
if (retval != 0) if (retval)
return retval; return retval;
proccesses_mapped++; proccesses_mapped++;
...@@ -365,7 +365,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -365,7 +365,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
&rl_buffer[rl_wptr], &rl_buffer[rl_wptr],
kq->queue, kq->queue,
qpd->is_debug); qpd->is_debug);
if (retval != 0) if (retval)
return retval; return retval;
inc_wptr(&rl_wptr, inc_wptr(&rl_wptr,
...@@ -392,7 +392,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, ...@@ -392,7 +392,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
q, q,
qpd->is_debug); qpd->is_debug);
if (retval != 0) if (retval)
return retval; return retval;
inc_wptr(&rl_wptr, inc_wptr(&rl_wptr,
...@@ -421,7 +421,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) ...@@ -421,7 +421,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
pm->dqm = dqm; pm->dqm = dqm;
mutex_init(&pm->lock); mutex_init(&pm->lock);
pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
if (pm->priv_queue == NULL) { if (!pm->priv_queue) {
mutex_destroy(&pm->lock); mutex_destroy(&pm->lock);
return -ENOMEM; return -ENOMEM;
} }
...@@ -449,7 +449,7 @@ int pm_send_set_resources(struct packet_manager *pm, ...@@ -449,7 +449,7 @@ int pm_send_set_resources(struct packet_manager *pm,
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t), sizeof(*packet) / sizeof(uint32_t),
(unsigned int **)&packet); (unsigned int **)&packet);
if (packet == NULL) { if (!packet) {
mutex_unlock(&pm->lock); mutex_unlock(&pm->lock);
pr_err("Failed to allocate buffer on kernel queue\n"); pr_err("Failed to allocate buffer on kernel queue\n");
return -ENOMEM; return -ENOMEM;
...@@ -491,7 +491,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -491,7 +491,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
&rl_ib_size); &rl_ib_size);
if (retval != 0) if (retval)
goto fail_create_runlist_ib; goto fail_create_runlist_ib;
pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr); pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
...@@ -501,12 +501,12 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) ...@@ -501,12 +501,12 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer); packet_size_dwords, &rl_buffer);
if (retval != 0) if (retval)
goto fail_acquire_packet_buffer; goto fail_acquire_packet_buffer;
retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr, retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
rl_ib_size / sizeof(uint32_t), false); rl_ib_size / sizeof(uint32_t), false);
if (retval != 0) if (retval)
goto fail_create_runlist; goto fail_create_runlist;
pm->priv_queue->ops.submit_packet(pm->priv_queue); pm->priv_queue->ops.submit_packet(pm->priv_queue);
...@@ -537,7 +537,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, ...@@ -537,7 +537,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
pm->priv_queue, pm->priv_queue,
sizeof(struct pm4_query_status) / sizeof(uint32_t), sizeof(struct pm4_query_status) / sizeof(uint32_t),
(unsigned int **)&packet); (unsigned int **)&packet);
if (retval != 0) if (retval)
goto fail_acquire_packet_buffer; goto fail_acquire_packet_buffer;
packet->header.u32all = build_pm4_header(IT_QUERY_STATUS, packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
...@@ -580,7 +580,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, ...@@ -580,7 +580,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
pm->priv_queue, pm->priv_queue,
sizeof(struct pm4_unmap_queues) / sizeof(uint32_t), sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
&buffer); &buffer);
if (retval != 0) if (retval)
goto err_acquire_packet_buffer; goto err_acquire_packet_buffer;
packet = (struct pm4_unmap_queues *)buffer; packet = (struct pm4_unmap_queues *)buffer;
......
...@@ -81,7 +81,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread) ...@@ -81,7 +81,7 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
BUG_ON(!kfd_process_wq); BUG_ON(!kfd_process_wq);
if (thread->mm == NULL) if (!thread->mm)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */ /* Only the pthreads threading model is supported. */
...@@ -117,7 +117,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread) ...@@ -117,7 +117,7 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
{ {
struct kfd_process *process; struct kfd_process *process;
if (thread->mm == NULL) if (!thread->mm)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */ /* Only the pthreads threading model is supported. */
...@@ -407,7 +407,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) ...@@ -407,7 +407,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
struct kfd_process *p; struct kfd_process *p;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
BUG_ON(dev == NULL); BUG_ON(!dev);
/* /*
* Look for the process that matches the pasid. If there is no such * Look for the process that matches the pasid. If there is no such
......
...@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p) ...@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
pqm->queue_slot_bitmap = pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL); BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL) if (!pqm->queue_slot_bitmap)
return -ENOMEM; return -ENOMEM;
pqm->process = p; pqm->process = p;
...@@ -223,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -223,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
break; break;
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
if (kq == NULL) { if (!kq) {
retval = -ENOMEM; retval = -ENOMEM;
goto err_create_queue; goto err_create_queue;
} }
...@@ -279,7 +279,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) ...@@ -279,7 +279,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
retval = 0; retval = 0;
pqn = get_queue_by_qid(pqm, qid); pqn = get_queue_by_qid(pqm, qid);
if (pqn == NULL) { if (!pqn) {
pr_err("Queue id does not match any known queue\n"); pr_err("Queue id does not match any known queue\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -416,7 +416,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void) ...@@ -416,7 +416,7 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
struct kfd_topology_device *dev; struct kfd_topology_device *dev;
dev = kfd_alloc_struct(dev); dev = kfd_alloc_struct(dev);
if (dev == NULL) { if (!dev) {
pr_err("No memory to allocate a topology device"); pr_err("No memory to allocate a topology device");
return NULL; return NULL;
} }
...@@ -957,7 +957,7 @@ static int kfd_topology_update_sysfs(void) ...@@ -957,7 +957,7 @@ static int kfd_topology_update_sysfs(void)
int ret; int ret;
pr_info("Creating topology SYSFS entries\n"); pr_info("Creating topology SYSFS entries\n");
if (sys_props.kobj_topology == NULL) { if (!sys_props.kobj_topology) {
sys_props.kobj_topology = sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology); kfd_alloc_struct(sys_props.kobj_topology);
if (!sys_props.kobj_topology) if (!sys_props.kobj_topology)
...@@ -1120,7 +1120,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) ...@@ -1120,7 +1120,7 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
BUG_ON(!gpu); BUG_ON(!gpu);
list_for_each_entry(dev, &topology_device_list, list) list_for_each_entry(dev, &topology_device_list, list)
if (dev->gpu == NULL && dev->node_props.simd_count > 0) { if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu; dev->gpu = gpu;
out_dev = dev; out_dev = dev;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment