Commit 45c9a5e4 authored by Oded Gabbay's avatar Oded Gabbay

drm/amdkfd: Encapsulate DQM functions in ops structure

This patch does some re-org on the device_queue_manager structure. It takes out
all the function pointers from the structure and puts them in a new structure,
called device_queue_manager_ops. Then, it puts an instance of that structure
inside device_queue_manager.

This re-org is done to prepare the DQM module to support more than one AMD APU
(Kaveri).
Signed-off-by: default avatarOded Gabbay <oded.gabbay@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9216ed29
...@@ -439,7 +439,7 @@ static long kfd_ioctl_set_memory_policy(struct file *filep, ...@@ -439,7 +439,7 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
(args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent; ? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm, if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
&pdd->qpd, &pdd->qpd,
default_policy, default_policy,
alternate_policy, alternate_policy,
......
...@@ -253,7 +253,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, ...@@ -253,7 +253,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error; goto device_queue_manager_error;
} }
if (kfd->dqm->start(kfd->dqm) != 0) { if (kfd->dqm->ops.start(kfd->dqm) != 0) {
dev_err(kfd_device, dev_err(kfd_device,
"Error starting queuen manager for device (%x:%x)\n", "Error starting queuen manager for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device); kfd->pdev->vendor, kfd->pdev->device);
...@@ -307,7 +307,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd) ...@@ -307,7 +307,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
BUG_ON(kfd == NULL); BUG_ON(kfd == NULL);
if (kfd->init_complete) { if (kfd->init_complete) {
kfd->dqm->stop(kfd->dqm); kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev); amd_iommu_free_device(kfd->pdev);
} }
...@@ -328,7 +328,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd) ...@@ -328,7 +328,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
return -ENXIO; return -ENXIO;
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback); iommu_pasid_shutdown_callback);
kfd->dqm->start(kfd->dqm); kfd->dqm->ops.start(kfd->dqm);
} }
return 0; return 0;
......
...@@ -271,7 +271,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -271,7 +271,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !q || !qpd); BUG_ON(!dqm || !q || !qpd);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) if (mqd == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -305,14 +305,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -305,14 +305,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) { if (mqd == NULL) {
retval = -ENOMEM; retval = -ENOMEM;
goto out; goto out;
} }
deallocate_hqd(dqm, q); deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
if (mqd == NULL) { if (mqd == NULL) {
retval = -ENOMEM; retval = -ENOMEM;
goto out; goto out;
...@@ -348,7 +348,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) ...@@ -348,7 +348,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
BUG_ON(!dqm || !q || !q->mqd); BUG_ON(!dqm || !q || !q->mqd);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
mqd = dqm->get_mqd_manager(dqm, q->properties.type); mqd = dqm->ops.get_mqd_manager(dqm, q->properties.type);
if (mqd == NULL) { if (mqd == NULL) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return -ENOMEM; return -ENOMEM;
...@@ -515,7 +515,7 @@ static int init_pipelines(struct device_queue_manager *dqm, ...@@ -515,7 +515,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num); memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) { if (mqd == NULL) {
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
return -ENOMEM; return -ENOMEM;
...@@ -646,7 +646,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, ...@@ -646,7 +646,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd; struct mqd_manager *mqd;
int retval; int retval;
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
if (!mqd) if (!mqd)
return -ENOMEM; return -ENOMEM;
...@@ -849,7 +849,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -849,7 +849,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
select_sdma_engine_id(q); select_sdma_engine_id(q);
mqd = dqm->get_mqd_manager(dqm, mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type)); get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) { if (mqd == NULL) {
...@@ -994,7 +994,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, ...@@ -994,7 +994,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
/* remove queue from list to prevent rescheduling after preemption */ /* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
mqd = dqm->get_mqd_manager(dqm, mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type)); get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) { if (!mqd) {
retval = -ENOMEM; retval = -ENOMEM;
...@@ -1116,40 +1116,40 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) ...@@ -1116,40 +1116,40 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case KFD_SCHED_POLICY_HWS: case KFD_SCHED_POLICY_HWS:
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
/* initialize dqm for cp scheduling */ /* initialize dqm for cp scheduling */
dqm->create_queue = create_queue_cpsch; dqm->ops.create_queue = create_queue_cpsch;
dqm->initialize = initialize_cpsch; dqm->ops.initialize = initialize_cpsch;
dqm->start = start_cpsch; dqm->ops.start = start_cpsch;
dqm->stop = stop_cpsch; dqm->ops.stop = stop_cpsch;
dqm->destroy_queue = destroy_queue_cpsch; dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->update_queue = update_queue; dqm->ops.update_queue = update_queue;
dqm->get_mqd_manager = get_mqd_manager_nocpsch; dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
dqm->register_process = register_process_nocpsch; dqm->ops.register_process = register_process_nocpsch;
dqm->unregister_process = unregister_process_nocpsch; dqm->ops.unregister_process = unregister_process_nocpsch;
dqm->uninitialize = uninitialize_nocpsch; dqm->ops.uninitialize = uninitialize_nocpsch;
dqm->create_kernel_queue = create_kernel_queue_cpsch; dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch; dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break; break;
case KFD_SCHED_POLICY_NO_HWS: case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */ /* initialize dqm for no cp scheduling */
dqm->start = start_nocpsch; dqm->ops.start = start_nocpsch;
dqm->stop = stop_nocpsch; dqm->ops.stop = stop_nocpsch;
dqm->create_queue = create_queue_nocpsch; dqm->ops.create_queue = create_queue_nocpsch;
dqm->destroy_queue = destroy_queue_nocpsch; dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->update_queue = update_queue; dqm->ops.update_queue = update_queue;
dqm->get_mqd_manager = get_mqd_manager_nocpsch; dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
dqm->register_process = register_process_nocpsch; dqm->ops.register_process = register_process_nocpsch;
dqm->unregister_process = unregister_process_nocpsch; dqm->ops.unregister_process = unregister_process_nocpsch;
dqm->initialize = initialize_nocpsch; dqm->ops.initialize = initialize_nocpsch;
dqm->uninitialize = uninitialize_nocpsch; dqm->ops.uninitialize = uninitialize_nocpsch;
dqm->set_cache_memory_policy = set_cache_memory_policy; dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break; break;
default: default:
BUG(); BUG();
break; break;
} }
if (dqm->initialize(dqm) != 0) { if (dqm->ops.initialize(dqm) != 0) {
kfree(dqm); kfree(dqm);
return NULL; return NULL;
} }
...@@ -1161,7 +1161,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm) ...@@ -1161,7 +1161,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
{ {
BUG_ON(!dqm); BUG_ON(!dqm);
dqm->uninitialize(dqm); dqm->ops.uninitialize(dqm);
kfree(dqm); kfree(dqm);
} }
...@@ -46,7 +46,7 @@ struct device_process_node { ...@@ -46,7 +46,7 @@ struct device_process_node {
}; };
/** /**
* struct device_queue_manager * struct device_queue_manager_ops
* *
* @create_queue: Queue creation routine. * @create_queue: Queue creation routine.
* *
...@@ -81,15 +81,9 @@ struct device_process_node { ...@@ -81,15 +81,9 @@ struct device_process_node {
* @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
* memory apertures. * memory apertures.
* *
* This struct is a base class for the kfd queues scheduler in the
* device level. The device base class should expose the basic operations
* for queue creation and queue destruction. This base class hides the
* scheduling mode of the driver and the specific implementation of the
* concrete device. This class is the only class in the queues scheduler
* that configures the H/W.
*/ */
struct device_queue_manager { struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm, int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q, struct queue *q,
struct qcm_process_device *qpd, struct qcm_process_device *qpd,
...@@ -124,7 +118,22 @@ struct device_queue_manager { ...@@ -124,7 +118,22 @@ struct device_queue_manager {
enum cache_policy alternate_policy, enum cache_policy alternate_policy,
void __user *alternate_aperture_base, void __user *alternate_aperture_base,
uint64_t alternate_aperture_size); uint64_t alternate_aperture_size);
};
/**
* struct device_queue_manager
*
* This struct is a base class for the kfd queues scheduler in the
* device level. The device base class should expose the basic operations
* for queue creation and queue destruction. This base class hides the
* scheduling mode of the driver and the specific implementation of the
* concrete device. This class is the only class in the queues scheduler
* that configures the H/W.
*
*/
struct device_queue_manager {
struct device_queue_manager_ops ops;
struct mqd_manager *mqds[KFD_MQD_TYPE_MAX]; struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
struct packet_manager packets; struct packet_manager packets;
......
...@@ -56,7 +56,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, ...@@ -56,7 +56,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) { switch (type) {
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ: case KFD_QUEUE_TYPE_HIQ:
kq->mqd = dev->dqm->get_mqd_manager(dev->dqm, kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_HIQ); KFD_MQD_TYPE_HIQ);
break; break;
default: default:
......
...@@ -178,7 +178,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -178,7 +178,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (list_empty(&pqm->queues)) { if (list_empty(&pqm->queues)) {
pdd->qpd.pqm = pqm; pdd->qpd.pqm = pqm;
dev->dqm->register_process(dev->dqm, &pdd->qpd); dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
} }
pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL); pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
...@@ -204,7 +204,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -204,7 +204,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue; goto err_create_queue;
pqn->q = q; pqn->q = q;
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid); &q->properties.vmid);
print_queue(q); print_queue(q);
break; break;
...@@ -217,7 +217,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, ...@@ -217,7 +217,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq->queue->properties.queue_id = *qid; kq->queue->properties.queue_id = *qid;
pqn->kq = kq; pqn->kq = kq;
pqn->q = NULL; pqn->q = NULL;
retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd); retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
kq, &pdd->qpd);
break; break;
default: default:
BUG(); BUG();
...@@ -285,13 +286,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) ...@@ -285,13 +286,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (pqn->kq) { if (pqn->kq) {
/* destroy kernel queue (DIQ) */ /* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm; dqm = pqn->kq->dev->dqm;
dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
kernel_queue_uninit(pqn->kq); kernel_queue_uninit(pqn->kq);
} }
if (pqn->q) { if (pqn->q) {
dqm = pqn->q->device->dqm; dqm = pqn->q->device->dqm;
retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q); retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval != 0) if (retval != 0)
return retval; return retval;
...@@ -303,7 +304,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) ...@@ -303,7 +304,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
clear_bit(qid, pqm->queue_slot_bitmap); clear_bit(qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues)) if (list_empty(&pqm->queues))
dqm->unregister_process(dqm, &pdd->qpd); dqm->ops.unregister_process(dqm, &pdd->qpd);
return retval; return retval;
} }
...@@ -324,7 +325,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, ...@@ -324,7 +325,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
pqn->q->properties.queue_percent = p->queue_percent; pqn->q->properties.queue_percent = p->queue_percent;
pqn->q->properties.priority = p->priority; pqn->q->properties.priority = p->priority;
retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q); retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q);
if (retval != 0) if (retval != 0)
return retval; return retval;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment