Commit 027d35d0 authored by Oded Gabbay's avatar Oded Gabbay

habanalabs: rename restore to ctx_switch when appropriate

This patch only does renaming of certain variables and structure members,
and their accompanied comments.

This is done to better reflect the actions these variables and members
represent.

There is no functional change in this patch.
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent b2377e03
...@@ -601,7 +601,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -601,7 +601,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
void __user *chunks; void __user *chunks;
u32 num_chunks; u32 num_chunks;
u64 cs_seq = ULONG_MAX; u64 cs_seq = ULONG_MAX;
int rc, do_restore; int rc, do_ctx_switch;
bool need_soft_reset = false; bool need_soft_reset = false;
if (hl_device_disabled_or_in_reset(hdev)) { if (hl_device_disabled_or_in_reset(hdev)) {
...@@ -612,9 +612,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -612,9 +612,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
goto out; goto out;
} }
do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0); do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
long ret; long ret;
chunks = (void __user *)(uintptr_t)args->in.chunks_restore; chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
...@@ -622,7 +622,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -622,7 +622,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
mutex_lock(&hpriv->restore_phase_mutex); mutex_lock(&hpriv->restore_phase_mutex);
if (do_restore) { if (do_ctx_switch) {
rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
...@@ -678,18 +678,18 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -678,18 +678,18 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
} }
} }
ctx->thread_restore_wait_token = 1; ctx->thread_ctx_switch_wait_token = 1;
} else if (!ctx->thread_restore_wait_token) { } else if (!ctx->thread_ctx_switch_wait_token) {
u32 tmp; u32 tmp;
rc = hl_poll_timeout_memory(hdev, rc = hl_poll_timeout_memory(hdev,
(u64) (uintptr_t) &ctx->thread_restore_wait_token, (u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
jiffies_to_usecs(hdev->timeout_jiffies), jiffies_to_usecs(hdev->timeout_jiffies),
&tmp); &tmp);
if (rc || !tmp) { if (rc || !tmp) {
dev_err(hdev->dev, dev_err(hdev->dev,
"restore phase hasn't finished in time\n"); "context switch phase didn't finish in time\n");
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
goto out; goto out;
} }
......
...@@ -106,8 +106,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) ...@@ -106,8 +106,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
ctx->cs_sequence = 1; ctx->cs_sequence = 1;
spin_lock_init(&ctx->cs_lock); spin_lock_init(&ctx->cs_lock);
atomic_set(&ctx->thread_restore_token, 1); atomic_set(&ctx->thread_ctx_switch_token, 1);
ctx->thread_restore_wait_token = 0; ctx->thread_ctx_switch_wait_token = 0;
if (is_kernel_ctx) { if (is_kernel_ctx) {
ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
......
...@@ -710,10 +710,10 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, ...@@ -710,10 +710,10 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]); hl_cq_reset(hdev, &hdev->completion_queue[i]);
/* Make sure the setup phase for the user context will run again */ /* Make sure the context switch phase will run again */
if (hdev->user_ctx) { if (hdev->user_ctx) {
atomic_set(&hdev->user_ctx->thread_restore_token, 1); atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
hdev->user_ctx->thread_restore_wait_token = 0; hdev->user_ctx->thread_ctx_switch_wait_token = 0;
} }
/* Finished tear-down, starting to re-initialize */ /* Finished tear-down, starting to re-initialize */
......
...@@ -615,12 +615,13 @@ struct hl_va_range { ...@@ -615,12 +615,13 @@ struct hl_va_range {
* DRAM mapping. * DRAM mapping.
* @cs_lock: spinlock to protect cs_sequence. * @cs_lock: spinlock to protect cs_sequence.
* @dram_phys_mem: amount of used physical DRAM memory by this context. * @dram_phys_mem: amount of used physical DRAM memory by this context.
* @thread_restore_token: token to prevent multiple threads of the same context * @thread_ctx_switch_token: token to prevent multiple threads of the same
* from running the restore phase. Only one thread * context from running the context switch phase.
* should run it. * Only a single thread should run it.
* @thread_restore_wait_token: token to prevent the threads that didn't run * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
* the restore phase from moving to their execution * the context switch phase from moving to their
* phase before the restore phase has finished. * execution phase before the context switch phase
* has finished.
* @asid: context's unique address space ID in the device's MMU. * @asid: context's unique address space ID in the device's MMU.
*/ */
struct hl_ctx { struct hl_ctx {
...@@ -640,8 +641,8 @@ struct hl_ctx { ...@@ -640,8 +641,8 @@ struct hl_ctx {
u64 *dram_default_hops; u64 *dram_default_hops;
spinlock_t cs_lock; spinlock_t cs_lock;
atomic64_t dram_phys_mem; atomic64_t dram_phys_mem;
atomic_t thread_restore_token; atomic_t thread_ctx_switch_token;
u32 thread_restore_wait_token; u32 thread_ctx_switch_wait_token;
u32 asid; u32 asid;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment