Commit eff6f4a0 authored by Oded Gabbay's avatar Oded Gabbay Committed by Greg Kroah-Hartman

habanalabs: add command submission module

This patch adds the main flow for the user to submit work to the device.

Each work is described by a command submission object (CS). The CS contains
3 arrays of command buffers: One for execution, and two for context-switch
(store and restore).

For each CB, the user specifies on which queue to put that CB. In case of
an internal queue, the entry doesn't contain a pointer to the CB but the
address in the on-chip memory that the CB resides at.

The driver parses some of the CBs to enforce security restrictions.

The user receives a sequence number that represents the CS object. The user
can then query the driver regarding the status of the CS, using that
sequence number.

In case the CS doesn't finish before the timeout expires, the driver will
perform a soft-reset of the device.
Reviewed-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f8c8c7d5
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
obj-m := habanalabs.o obj-m := habanalabs.o
habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \
command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \
command_submission.o
include $(src)/goya/Makefile include $(src)/goya/Makefile
habanalabs-y += $(HL_GOYA_FILES) habanalabs-y += $(HL_GOYA_FILES)
This diff is collapsed.
...@@ -12,6 +12,18 @@ ...@@ -12,6 +12,18 @@
static void hl_ctx_fini(struct hl_ctx *ctx) static void hl_ctx_fini(struct hl_ctx *ctx)
{ {
struct hl_device *hdev = ctx->hdev; struct hl_device *hdev = ctx->hdev;
int i;
/*
* If we arrived here, there are no jobs waiting for this context
* on its queues so we can safely remove it.
* This is because for each CS, we increment the ref count and for
* every CS that was finished we decrement it and we won't arrive
* to this function unless the ref count is 0
*/
for (i = 0 ; i < HL_MAX_PENDING_CS ; i++)
dma_fence_put(ctx->cs_pending[i]);
if (ctx->asid != HL_KERNEL_ASID_ID) if (ctx->asid != HL_KERNEL_ASID_ID)
hl_asid_free(hdev, ctx->asid); hl_asid_free(hdev, ctx->asid);
...@@ -23,8 +35,6 @@ void hl_ctx_do_release(struct kref *ref) ...@@ -23,8 +35,6 @@ void hl_ctx_do_release(struct kref *ref)
ctx = container_of(ref, struct hl_ctx, refcount); ctx = container_of(ref, struct hl_ctx, refcount);
dev_dbg(ctx->hdev->dev, "Now really releasing context %d\n", ctx->asid);
hl_ctx_fini(ctx); hl_ctx_fini(ctx);
if (ctx->hpriv) if (ctx->hpriv)
...@@ -90,6 +100,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) ...@@ -90,6 +100,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
kref_init(&ctx->refcount); kref_init(&ctx->refcount);
ctx->cs_sequence = 1;
spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_restore_token, 1);
ctx->thread_restore_wait_token = 0;
if (is_kernel_ctx) { if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
} else { } else {
...@@ -100,8 +115,6 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) ...@@ -100,8 +115,6 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
} }
} }
dev_dbg(hdev->dev, "Created context with ASID %u\n", ctx->asid);
return 0; return 0;
} }
...@@ -115,6 +128,37 @@ int hl_ctx_put(struct hl_ctx *ctx) ...@@ -115,6 +128,37 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release); return kref_put(&ctx->refcount, hl_ctx_do_release);
} }
struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
{
struct hl_device *hdev = ctx->hdev;
struct dma_fence *fence;
spin_lock(&ctx->cs_lock);
if (seq >= ctx->cs_sequence) {
dev_notice(hdev->dev,
"Can't wait on seq %llu because current CS is at seq %llu\n",
seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
return ERR_PTR(-EINVAL);
}
if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) {
dev_dbg(hdev->dev,
"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
seq, ctx->cs_sequence);
spin_unlock(&ctx->cs_lock);
return NULL;
}
fence = dma_fence_get(
ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]);
spin_unlock(&ctx->cs_lock);
return fence;
}
/* /*
* hl_ctx_mgr_init - initialize the context manager * hl_ctx_mgr_init - initialize the context manager
* *
......
...@@ -30,6 +30,8 @@ static void hpriv_release(struct kref *ref) ...@@ -30,6 +30,8 @@ static void hpriv_release(struct kref *ref)
put_pid(hpriv->taskpid); put_pid(hpriv->taskpid);
mutex_destroy(&hpriv->restore_phase_mutex);
kfree(hpriv); kfree(hpriv);
/* Now the FD is really closed */ /* Now the FD is really closed */
...@@ -208,6 +210,8 @@ static int device_early_init(struct hl_device *hdev) ...@@ -208,6 +210,8 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->fd_open_cnt_lock); mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->send_cpu_message_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0); atomic_set(&hdev->in_reset, 0);
atomic_set(&hdev->fd_open_cnt, 0); atomic_set(&hdev->fd_open_cnt, 0);
...@@ -593,6 +597,9 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, ...@@ -593,6 +597,9 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
*/ */
hdev->asic_funcs->halt_engines(hdev, hard_reset); hdev->asic_funcs->halt_engines(hdev, hard_reset);
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
if (hard_reset) { if (hard_reset) {
/* Release kernel context */ /* Release kernel context */
if (hl_ctx_put(hdev->kernel_ctx) != 1) { if (hl_ctx_put(hdev->kernel_ctx) != 1) {
...@@ -616,6 +623,12 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, ...@@ -616,6 +623,12 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]); hl_cq_reset(hdev, &hdev->completion_queue[i]);
/* Make sure the setup phase for the user context will run again */
if (hdev->user_ctx) {
atomic_set(&hdev->user_ctx->thread_restore_token, 1);
hdev->user_ctx->thread_restore_wait_token = 0;
}
/* Finished tear-down, starting to re-initialize */ /* Finished tear-down, starting to re-initialize */
if (hard_reset) { if (hard_reset) {
...@@ -952,6 +965,9 @@ void hl_device_fini(struct hl_device *hdev) ...@@ -952,6 +965,9 @@ void hl_device_fini(struct hl_device *hdev)
*/ */
hdev->asic_funcs->halt_engines(hdev, true); hdev->asic_funcs->halt_engines(hdev, true);
/* Go over all the queues, release all CS and their jobs */
hl_cs_rollback_all(hdev);
hl_cb_pool_fini(hdev); hl_cb_pool_fini(hdev);
/* Release kernel context */ /* Release kernel context */
......
This diff is collapsed.
This diff is collapsed.
...@@ -24,6 +24,17 @@ static struct class *hl_class; ...@@ -24,6 +24,17 @@ static struct class *hl_class;
DEFINE_IDR(hl_devs_idr); DEFINE_IDR(hl_devs_idr);
DEFINE_MUTEX(hl_devs_idr_lock); DEFINE_MUTEX(hl_devs_idr_lock);
static int timeout_locked = 5;
static int reset_on_lockup = 1;
module_param(timeout_locked, int, 0444);
MODULE_PARM_DESC(timeout_locked,
"Device lockup timeout in seconds (0 = disabled, default 5s)");
module_param(reset_on_lockup, int, 0444);
MODULE_PARM_DESC(reset_on_lockup,
"Do device reset on lockup (0 = no, 1 = yes, default yes)");
#define PCI_VENDOR_ID_HABANALABS 0x1da3 #define PCI_VENDOR_ID_HABANALABS 0x1da3
#define PCI_IDS_GOYA 0x0001 #define PCI_IDS_GOYA 0x0001
...@@ -113,6 +124,7 @@ int hl_device_open(struct inode *inode, struct file *filp) ...@@ -113,6 +124,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
hpriv->hdev = hdev; hpriv->hdev = hdev;
filp->private_data = hpriv; filp->private_data = hpriv;
hpriv->filp = filp; hpriv->filp = filp;
mutex_init(&hpriv->restore_phase_mutex);
kref_init(&hpriv->refcount); kref_init(&hpriv->refcount);
nonseekable_open(inode, filp); nonseekable_open(inode, filp);
...@@ -140,6 +152,7 @@ int hl_device_open(struct inode *inode, struct file *filp) ...@@ -140,6 +152,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
filp->private_data = NULL; filp->private_data = NULL;
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr);
mutex_destroy(&hpriv->restore_phase_mutex);
kfree(hpriv); kfree(hpriv);
close_device: close_device:
...@@ -172,8 +185,10 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, ...@@ -172,8 +185,10 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
return -ENOMEM; return -ENOMEM;
hdev->major = hl_major; hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
/* Parameters for bring-up - set them to defaults */ /* Parameters for bring-up - set them to defaults */
hdev->mmu_enable = 0;
hdev->cpu_enable = 1; hdev->cpu_enable = 1;
hdev->reset_pcilink = 0; hdev->reset_pcilink = 0;
hdev->cpu_queues_enable = 1; hdev->cpu_queues_enable = 1;
...@@ -193,6 +208,11 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, ...@@ -193,6 +208,11 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
if (!hdev->cpu_queues_enable) if (!hdev->cpu_queues_enable)
hdev->heartbeat = 0; hdev->heartbeat = 0;
if (timeout_locked)
hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
else
hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
hdev->disabled = true; hdev->disabled = true;
hdev->pdev = pdev; /* can be NULL in case of simulator device */ hdev->pdev = pdev; /* can be NULL in case of simulator device */
......
...@@ -16,7 +16,9 @@ ...@@ -16,7 +16,9 @@
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
static const struct hl_ioctl_desc hl_ioctls[] = { static const struct hl_ioctl_desc hl_ioctls[] = {
HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl) HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl)
}; };
#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) #define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls)
......
...@@ -34,6 +34,29 @@ static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) ...@@ -34,6 +34,29 @@ static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
return (abs(delta) - queue_len); return (abs(delta) - queue_len);
} }
void hl_int_hw_queue_update_ci(struct hl_cs *cs)
{
struct hl_device *hdev = cs->ctx->hdev;
struct hl_hw_queue *q;
int i;
hdev->asic_funcs->hw_queues_lock(hdev);
if (hdev->disabled)
goto out;
q = &hdev->kernel_queues[0];
for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
if (q->queue_type == QUEUE_TYPE_INT) {
q->ci += cs->jobs_in_queue_cnt[i];
q->ci &= ((q->int_queue_len << 1) - 1);
}
}
out:
hdev->asic_funcs->hw_queues_unlock(hdev);
}
/* /*
* ext_queue_submit_bd - Submit a buffer descriptor to an external queue * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
* *
...@@ -119,6 +142,37 @@ static int ext_queue_sanity_checks(struct hl_device *hdev, ...@@ -119,6 +142,37 @@ static int ext_queue_sanity_checks(struct hl_device *hdev,
return 0; return 0;
} }
/*
* int_queue_sanity_checks - perform some sanity checks on internal queue
*
* @hdev : pointer to hl_device structure
* @q : pointer to hl_hw_queue structure
* @num_of_entries : how many entries to check for space
*
* H/W queues spinlock should be taken before calling this function
*
* Perform the following:
* - Make sure we have enough space in the h/w queue
*
*/
static int int_queue_sanity_checks(struct hl_device *hdev,
struct hl_hw_queue *q,
int num_of_entries)
{
int free_slots_cnt;
/* Check we have enough space in the queue */
free_slots_cnt = queue_free_slots(q, q->int_queue_len);
if (free_slots_cnt < num_of_entries) {
dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
q->hw_queue_id, num_of_entries);
return -EAGAIN;
}
return 0;
}
/* /*
* hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
* *
...@@ -165,6 +219,184 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, ...@@ -165,6 +219,184 @@ int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
return rc; return rc;
} }
/*
* ext_hw_queue_schedule_job - submit an JOB to an external queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_cq_entry cq_pkt;
struct hl_cq *cq;
u64 cq_addr;
struct hl_cb *cb;
u32 ctl;
u32 len;
u64 ptr;
/*
* Update the JOB ID inside the BD CTL so the device would know what
* to write in the completion queue
*/
ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
cb = job->patched_cb;
len = job->job_cb_size;
ptr = cb->bus_address;
cq_pkt.data = (q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
& CQ_ENTRY_SHADOW_INDEX_MASK;
cq_pkt.data |= 1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT;
cq_pkt.data |= 1 << CQ_ENTRY_READY_SHIFT;
/*
* No need to protect pi_offset because scheduling to the
* H/W queues is done under the scheduler mutex
*
* No need to check if CQ is full because it was already
* checked in hl_queue_sanity_checks
*/
cq = &hdev->completion_queue[q->hw_queue_id];
cq_addr = cq->bus_address +
hdev->asic_prop.host_phys_base_address;
cq_addr += cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len,
cq_addr, cq_pkt.data, q->hw_queue_id);
q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
cq->pi = hl_cq_inc_ptr(cq->pi);
ext_queue_submit_bd(hdev, q, ctl, len, ptr);
}
/*
* int_hw_queue_schedule_job - submit an JOB to an internal queue
*
* @job: pointer to the job that needs to be submitted to the queue
*
* This function must be called when the scheduler mutex is taken
*
*/
static void int_hw_queue_schedule_job(struct hl_cs_job *job)
{
struct hl_device *hdev = job->cs->ctx->hdev;
struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
struct hl_bd bd;
u64 *pi, *pbd = (u64 *) &bd;
bd.ctl = 0;
bd.len = job->job_cb_size;
bd.ptr = (u64) (uintptr_t) job->user_cb;
pi = (u64 *) (uintptr_t) (q->kernel_address +
((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
pi[0] = pbd[0];
pi[1] = pbd[1];
q->pi++;
q->pi &= ((q->int_queue_len << 1) - 1);
/* Flush PQ entry write. Relevant only for specific ASICs */
hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
}
/*
* hl_hw_queue_schedule_cs - schedule a command submission
*
* @job : pointer to the CS
*
*/
int hl_hw_queue_schedule_cs(struct hl_cs *cs)
{
struct hl_device *hdev = cs->ctx->hdev;
struct hl_cs_job *job, *tmp;
struct hl_hw_queue *q;
int rc = 0, i, cq_cnt;
hdev->asic_funcs->hw_queues_lock(hdev);
if (hl_device_disabled_or_in_reset(hdev)) {
dev_err(hdev->dev,
"device is disabled or in reset, CS rejected!\n");
rc = -EPERM;
goto out;
}
q = &hdev->kernel_queues[0];
/* This loop assumes all external queues are consecutive */
for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
if (q->queue_type == QUEUE_TYPE_EXT) {
if (cs->jobs_in_queue_cnt[i]) {
rc = ext_queue_sanity_checks(hdev, q,
cs->jobs_in_queue_cnt[i], true);
if (rc)
goto unroll_cq_resv;
cq_cnt++;
}
} else if (q->queue_type == QUEUE_TYPE_INT) {
if (cs->jobs_in_queue_cnt[i]) {
rc = int_queue_sanity_checks(hdev, q,
cs->jobs_in_queue_cnt[i]);
if (rc)
goto unroll_cq_resv;
}
}
}
spin_lock(&hdev->hw_queues_mirror_lock);
list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
/* Queue TDR if the CS is the first entry and if timeout is wanted */
if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
(list_first_entry(&hdev->hw_queues_mirror_list,
struct hl_cs, mirror_node) == cs)) {
cs->tdr_active = true;
schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
spin_unlock(&hdev->hw_queues_mirror_lock);
} else {
spin_unlock(&hdev->hw_queues_mirror_lock);
}
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
if (job->ext_queue)
ext_hw_queue_schedule_job(job);
else
int_hw_queue_schedule_job(job);
}
cs->submitted = true;
goto out;
unroll_cq_resv:
/* This loop assumes all external queues are consecutive */
q = &hdev->kernel_queues[0];
for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
if ((q->queue_type == QUEUE_TYPE_EXT) &&
(cs->jobs_in_queue_cnt[i])) {
atomic_t *free_slots =
&hdev->completion_queue[i].free_slots_cnt;
atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
cq_cnt--;
}
}
out:
hdev->asic_funcs->hw_queues_unlock(hdev);
return rc;
}
/* /*
* hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
* *
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#include <linux/uaccess.h>
#include <linux/slab.h>
/*
* hl_pin_host_memory - pins a chunk of host memory
*
* @hdev : pointer to the habanalabs device structure
* @addr : the user-space virtual address of the memory area
* @size : the size of the memory area
* @userptr : pointer to hl_userptr structure
*
* This function does the following:
* - Pins the physical pages
* - Create a SG list from those pages
*/
int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size,
struct hl_userptr *userptr)
{
u64 start, end;
u32 npages, offset;
int rc;
if (!size) {
dev_err(hdev->dev, "size to pin is invalid - %d\n",
size);
return -EINVAL;
}
if (!access_ok((void __user *) (uintptr_t) addr, size)) {
dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n",
addr);
return -EFAULT;
}
/*
* If the combination of the address and size requested for this memory
* region causes an integer overflow, return error.
*/
if (((addr + size) < addr) ||
PAGE_ALIGN(addr + size) < (addr + size)) {
dev_err(hdev->dev,
"user pointer 0x%llx + %u causes integer overflow\n",
addr, size);
return -EINVAL;
}
start = addr & PAGE_MASK;
offset = addr & ~PAGE_MASK;
end = PAGE_ALIGN(addr + size);
npages = (end - start) >> PAGE_SHIFT;
userptr->size = size;
userptr->addr = addr;
userptr->dma_mapped = false;
INIT_LIST_HEAD(&userptr->job_node);
userptr->vec = frame_vector_create(npages);
if (!userptr->vec) {
dev_err(hdev->dev, "Failed to create frame vector\n");
return -ENOMEM;
}
rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
userptr->vec);
if (rc != npages) {
dev_err(hdev->dev,
"Failed to map host memory, user ptr probably wrong\n");
if (rc < 0)
goto destroy_framevec;
rc = -EFAULT;
goto put_framevec;
}
if (frame_vector_to_pages(userptr->vec) < 0) {
dev_err(hdev->dev,
"Failed to translate frame vector to pages\n");
rc = -EFAULT;
goto put_framevec;
}
userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
if (!userptr->sgt) {
rc = -ENOMEM;
goto put_framevec;
}
rc = sg_alloc_table_from_pages(userptr->sgt,
frame_vector_pages(userptr->vec),
npages, offset, size, GFP_ATOMIC);
if (rc < 0) {
dev_err(hdev->dev, "failed to create SG table from pages\n");
goto free_sgt;
}
return 0;
free_sgt:
kfree(userptr->sgt);
put_framevec:
put_vaddr_frames(userptr->vec);
destroy_framevec:
frame_vector_destroy(userptr->vec);
return rc;
}
/*
* hl_unpin_host_memory - unpins a chunk of host memory
*
* @hdev : pointer to the habanalabs device structure
* @userptr : pointer to hl_userptr structure
*
* This function does the following:
* - Unpins the physical pages related to the host memory
* - Free the SG list
*/
int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
{
struct page **pages;
if (userptr->dma_mapped)
hdev->asic_funcs->hl_dma_unmap_sg(hdev,
userptr->sgt->sgl,
userptr->sgt->nents,
userptr->dir);
pages = frame_vector_pages(userptr->vec);
if (!IS_ERR(pages)) {
int i;
for (i = 0; i < frame_vector_count(userptr->vec); i++)
set_page_dirty_lock(pages[i]);
}
put_vaddr_frames(userptr->vec);
frame_vector_destroy(userptr->vec);
list_del(&userptr->job_node);
sg_free_table(userptr->sgt);
kfree(userptr->sgt);
return 0;
}
/*
* hl_userptr_delete_list - clear userptr list
*
* @hdev : pointer to the habanalabs device structure
* @userptr_list : pointer to the list to clear
*
* This function does the following:
* - Iterates over the list and unpins the host memory and frees the userptr
* structure.
*/
void hl_userptr_delete_list(struct hl_device *hdev,
struct list_head *userptr_list)
{
struct hl_userptr *userptr, *tmp;
list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
hl_unpin_host_memory(hdev, userptr);
kfree(userptr);
}
INIT_LIST_HEAD(userptr_list);
}
/*
* hl_userptr_is_pinned - returns whether the given userptr is pinned
*
* @hdev : pointer to the habanalabs device structure
* @userptr_list : pointer to the list to clear
* @userptr : pointer to userptr to check
*
* This function does the following:
* - Iterates over the list and checks if the given userptr is in it, means is
* pinned. If so, returns true, otherwise returns false.
*/
bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
u32 size, struct list_head *userptr_list,
struct hl_userptr **userptr)
{
list_for_each_entry((*userptr), userptr_list, job_node) {
if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
return true;
}
return false;
}
...@@ -73,6 +73,95 @@ union hl_cb_args { ...@@ -73,6 +73,95 @@ union hl_cb_args {
struct hl_cb_out out; struct hl_cb_out out;
}; };
/*
* This structure size must always be fixed to 64-bytes for backward
* compatibility
*/
struct hl_cs_chunk {
/*
* For external queue, this represents a Handle of CB on the Host
* For internal queue, this represents an SRAM or DRAM address of the
* internal CB
*/
__u64 cb_handle;
/* Index of queue to put the CB on */
__u32 queue_index;
/*
* Size of command buffer with valid packets
* Can be smaller then actual CB size
*/
__u32 cb_size;
/* HL_CS_CHUNK_FLAGS_* */
__u32 cs_chunk_flags;
/* Align structure to 64 bytes */
__u32 pad[11];
};
#define HL_CS_FLAGS_FORCE_RESTORE 0x1
#define HL_CS_STATUS_SUCCESS 0
struct hl_cs_in {
/* this holds address of array of hl_cs_chunk for restore phase */
__u64 chunks_restore;
/* this holds address of array of hl_cs_chunk for execution phase */
__u64 chunks_execute;
/* this holds address of array of hl_cs_chunk for store phase -
* Currently not in use
*/
__u64 chunks_store;
/* Number of chunks in restore phase array */
__u32 num_chunks_restore;
/* Number of chunks in execution array */
__u32 num_chunks_execute;
/* Number of chunks in restore phase array - Currently not in use */
__u32 num_chunks_store;
/* HL_CS_FLAGS_* */
__u32 cs_flags;
/* Context ID - Currently not in use */
__u32 ctx_id;
};
struct hl_cs_out {
/* this holds the sequence number of the CS to pass to wait ioctl */
__u64 seq;
/* HL_CS_STATUS_* */
__u32 status;
__u32 pad;
};
union hl_cs_args {
struct hl_cs_in in;
struct hl_cs_out out;
};
struct hl_wait_cs_in {
/* Command submission sequence number */
__u64 seq;
/* Absolute timeout to wait in microseconds */
__u64 timeout_us;
/* Context ID - Currently not in use */
__u32 ctx_id;
__u32 pad;
};
#define HL_WAIT_CS_STATUS_COMPLETED 0
#define HL_WAIT_CS_STATUS_BUSY 1
#define HL_WAIT_CS_STATUS_TIMEDOUT 2
#define HL_WAIT_CS_STATUS_ABORTED 3
#define HL_WAIT_CS_STATUS_INTERRUPTED 4
struct hl_wait_cs_out {
/* HL_WAIT_CS_STATUS_* */
__u32 status;
__u32 pad;
};
union hl_wait_cs_args {
struct hl_wait_cs_in in;
struct hl_wait_cs_out out;
};
/* /*
* Command Buffer * Command Buffer
* - Request a Command Buffer * - Request a Command Buffer
...@@ -89,7 +178,74 @@ union hl_cb_args { ...@@ -89,7 +178,74 @@ union hl_cb_args {
#define HL_IOCTL_CB \ #define HL_IOCTL_CB \
_IOWR('H', 0x02, union hl_cb_args) _IOWR('H', 0x02, union hl_cb_args)
/*
* Command Submission
*
* To submit work to the device, the user need to call this IOCTL with a set
* of JOBS. That set of JOBS constitutes a CS object.
* Each JOB will be enqueued on a specific queue, according to the user's input.
* There can be more then one JOB per queue.
*
* There are two types of queues - external and internal. External queues
* are DMA queues which transfer data from/to the Host. All other queues are
* internal. The driver will get completion notifications from the device only
* on JOBS which are enqueued in the external queues.
*
* This IOCTL is asynchronous in regard to the actual execution of the CS. This
* means it returns immediately after ALL the JOBS were enqueued on their
* relevant queues. Therefore, the user mustn't assume the CS has been completed
* or has even started to execute.
*
* Upon successful enqueue, the IOCTL returns an opaque handle which the user
* can use with the "Wait for CS" IOCTL to check whether the handle's CS
* external JOBS have been completed. Note that if the CS has internal JOBS
* which can execute AFTER the external JOBS have finished, the driver might
* report that the CS has finished executing BEFORE the internal JOBS have
* actually finish executing.
*
* The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
* a second set is for "execution" phase and a third set is for "store" phase.
* The JOBS on the "restore" phase are enqueued only after context-switch
* (or if its the first CS for this context). The user can also order the
* driver to run the "restore" phase explicitly
*
*/
#define HL_IOCTL_CS \
_IOWR('H', 0x03, union hl_cs_args)
/*
* Wait for Command Submission
*
* The user can call this IOCTL with a handle it received from the CS IOCTL
* to wait until the handle's CS has finished executing. The user will wait
* inside the kernel until the CS has finished or until the user-requeusted
* timeout has expired.
*
* The return value of the IOCTL is a standard Linux error code. The possible
* values are:
*
* EINTR - Kernel waiting has been interrupted, e.g. due to OS signal
* that the user process received
* ETIMEDOUT - The CS has caused a timeout on the device
* EIO - The CS was aborted (usually because the device was reset)
* ENODEV - The device wants to do hard-reset (so user need to close FD)
*
* The driver also returns a custom define inside the IOCTL which can be:
*
* HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0)
* HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0)
* HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device
* (ETIMEDOUT)
* HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the
* device was reset (EIO)
* HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR)
*
*/
#define HL_IOCTL_WAIT_CS \
_IOWR('H', 0x04, union hl_wait_cs_args)
#define HL_COMMAND_START 0x02 #define HL_COMMAND_START 0x02
#define HL_COMMAND_END 0x03 #define HL_COMMAND_END 0x05
#endif /* HABANALABS_H_ */ #endif /* HABANALABS_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment