Commit cdfad4db authored by Tomasz Rusinowicz's avatar Tomasz Rusinowicz Committed by Jacek Lawrynowicz

accel/ivpu: Add NPU profiling support

Implement time based Metric Streamer profiling UAPI.

This is a generic mechanism allowing user mode tools to sample
NPU metrics. These metrics are defined by the FW and transparent to
the driver.

The user space can check for this feature by checking
DRM_IVPU_CAP_METRIC_STREAMER driver capability.
Signed-off-by: default avatarTomasz Rusinowicz <tomasz.rusinowicz@intel.com>
Signed-off-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240513120431.3187212-9-jacek.lawrynowicz@linux.intel.com
parent 68ca7b06
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
# Copyright (C) 2023 Intel Corporation # Copyright (C) 2023-2024 Intel Corporation
intel_vpu-y := \ intel_vpu-y := \
ivpu_drv.o \ ivpu_drv.o \
...@@ -13,6 +13,7 @@ intel_vpu-y := \ ...@@ -13,6 +13,7 @@ intel_vpu-y := \
ivpu_jsm_msg.o \ ivpu_jsm_msg.o \
ivpu_mmu.o \ ivpu_mmu.o \
ivpu_mmu_context.o \ ivpu_mmu_context.o \
ivpu_ms.o \
ivpu_pm.o ivpu_pm.o
intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "ivpu_jsm_msg.h" #include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h" #include "ivpu_mmu.h"
#include "ivpu_mmu_context.h" #include "ivpu_mmu_context.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h" #include "ivpu_pm.h"
#ifndef DRIVER_VERSION_STR #ifndef DRIVER_VERSION_STR
...@@ -100,6 +101,7 @@ static void file_priv_release(struct kref *ref) ...@@ -100,6 +101,7 @@ static void file_priv_release(struct kref *ref)
mutex_unlock(&vdev->context_list_lock); mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev);
mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock); mutex_destroy(&file_priv->lock);
kfree(file_priv); kfree(file_priv);
} }
...@@ -122,7 +124,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param ...@@ -122,7 +124,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
{ {
switch (args->index) { switch (args->index) {
case DRM_IVPU_CAP_METRIC_STREAMER: case DRM_IVPU_CAP_METRIC_STREAMER:
args->value = 0; args->value = 1;
break; break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE: case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 1; args->value = 1;
...@@ -231,10 +233,13 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) ...@@ -231,10 +233,13 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_dev_exit; goto err_dev_exit;
} }
INIT_LIST_HEAD(&file_priv->ms_instance_list);
file_priv->vdev = vdev; file_priv->vdev = vdev;
file_priv->bound = true; file_priv->bound = true;
kref_init(&file_priv->ref); kref_init(&file_priv->ref);
mutex_init(&file_priv->lock); mutex_init(&file_priv->lock);
mutex_init(&file_priv->ms_lock);
mutex_lock(&vdev->context_list_lock); mutex_lock(&vdev->context_list_lock);
...@@ -263,6 +268,7 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file) ...@@ -263,6 +268,7 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
xa_erase_irq(&vdev->context_xa, ctx_id); xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock: err_unlock:
mutex_unlock(&vdev->context_list_lock); mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->ms_lock);
mutex_destroy(&file_priv->lock); mutex_destroy(&file_priv->lock);
kfree(file_priv); kfree(file_priv);
err_dev_exit: err_dev_exit:
...@@ -278,6 +284,7 @@ static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -278,6 +284,7 @@ static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
file_priv->ctx.id, current->comm, task_pid_nr(current)); file_priv->ctx.id, current->comm, task_pid_nr(current));
ivpu_ms_cleanup(file_priv);
ivpu_file_priv_put(&file_priv); ivpu_file_priv_put(&file_priv);
} }
...@@ -288,6 +295,10 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { ...@@ -288,6 +295,10 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
}; };
static int ivpu_wait_for_ready(struct ivpu_device *vdev) static int ivpu_wait_for_ready(struct ivpu_device *vdev)
...@@ -638,6 +649,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev) ...@@ -638,6 +649,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
ivpu_prepare_for_reset(vdev); ivpu_prepare_for_reset(vdev);
ivpu_shutdown(vdev); ivpu_shutdown(vdev);
ivpu_ms_cleanup_all(vdev);
ivpu_jobs_abort_all(vdev); ivpu_jobs_abort_all(vdev);
ivpu_job_done_consumer_fini(vdev); ivpu_job_done_consumer_fini(vdev);
ivpu_pm_cancel_recovery(vdev); ivpu_pm_cancel_recovery(vdev);
......
...@@ -155,6 +155,9 @@ struct ivpu_file_priv { ...@@ -155,6 +155,9 @@ struct ivpu_file_priv {
struct mutex lock; /* Protects cmdq */ struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX]; struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
struct ivpu_mmu_context ctx; struct ivpu_mmu_context ctx;
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
bool has_mmu_faults; bool has_mmu_faults;
bool bound; bool bound;
}; };
......
...@@ -440,3 +440,101 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev) ...@@ -440,3 +440,101 @@ int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
return ret; return ret;
} }
int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
req.payload.metric_streamer_start.sampling_rate = sampling_rate;
req.payload.metric_streamer_start.buffer_addr = buffer_addr;
req.payload.metric_streamer_start.buffer_size = buffer_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
return ret;
}
return ret;
}
int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret)
ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
return ret;
}
int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
req.payload.metric_streamer_update.buffer_addr = buffer_addr;
req.payload.metric_streamer_update.buffer_size = buffer_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
return ret;
}
if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
resp.payload.metric_streamer_done.bytes_written, buffer_size);
return -EOVERFLOW;
}
*bytes_written = resp.payload.metric_streamer_done.bytes_written;
return ret;
}
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
u64 buffer_size, u32 *sample_size, u64 *info_size)
{
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
struct vpu_jsm_msg resp;
int ret;
req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
req.payload.metric_streamer_start.buffer_addr = buffer_addr;
req.payload.metric_streamer_start.buffer_size = buffer_size;
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
if (ret) {
ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
return ret;
}
if (!resp.payload.metric_streamer_done.sample_size) {
ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
return -EBADMSG;
}
if (sample_size)
*sample_size = resp.payload.metric_streamer_done.sample_size;
if (info_size)
*info_size = resp.payload.metric_streamer_done.bytes_written;
return ret;
}
...@@ -34,5 +34,11 @@ int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_ ...@@ -34,5 +34,11 @@ int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_
int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid, int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
u64 vpu_log_buffer_va); u64 vpu_log_buffer_va);
int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev); int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev);
int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
u64 sampling_rate, u64 buffer_addr, u64 buffer_size);
int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask);
int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
u64 buffer_addr, u64 buffer_size, u64 *bytes_written);
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
u64 buffer_size, u32 *sample_size, u64 *info_size);
#endif #endif
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#include <drm/drm_file.h>
#include "ivpu_drv.h"
#include "ivpu_gem.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
#define MS_INFO_BUFFER_SIZE SZ_16K
#define MS_NUM_BUFFERS 2
#define MS_READ_PERIOD_MULTIPLIER 2
#define MS_MIN_SAMPLE_PERIOD_NS 1000000
static struct ivpu_ms_instance *
get_instance_by_mask(struct ivpu_file_priv *file_priv, u64 metric_mask)
{
struct ivpu_ms_instance *ms;
lockdep_assert_held(&file_priv->ms_lock);
list_for_each_entry(ms, &file_priv->ms_instance_list, ms_instance_node)
if (ms->mask == metric_mask)
return ms;
return NULL;
}
int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_start *args = data;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
u64 single_buff_size;
u32 sample_size;
int ret;
if (!args->metric_group_mask || !args->read_period_samples ||
args->sampling_period_ns < MS_MIN_SAMPLE_PERIOD_NS)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask);
ret = -EALREADY;
goto unlock;
}
ms = kzalloc(sizeof(*ms), GFP_KERNEL);
if (!ms) {
ret = -ENOMEM;
goto unlock;
}
ms->mask = args->metric_group_mask;
ret = ivpu_jsm_metric_streamer_info(vdev, ms->mask, 0, 0, &sample_size, NULL);
if (ret)
goto err_free_ms;
single_buff_size = sample_size *
((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER);
ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS),
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!ms->bo) {
ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size);
ret = -ENOMEM;
goto err_free_ms;
}
ms->buff_size = ivpu_bo_size(ms->bo) / MS_NUM_BUFFERS;
ms->active_buff_vpu_addr = ms->bo->vpu_addr;
ms->inactive_buff_vpu_addr = ms->bo->vpu_addr + ms->buff_size;
ms->active_buff_ptr = ivpu_bo_vaddr(ms->bo);
ms->inactive_buff_ptr = ivpu_bo_vaddr(ms->bo) + ms->buff_size;
ret = ivpu_jsm_metric_streamer_start(vdev, ms->mask, args->sampling_period_ns,
ms->active_buff_vpu_addr, ms->buff_size);
if (ret)
goto err_free_bo;
args->sample_size = sample_size;
args->max_data_size = ivpu_bo_size(ms->bo);
list_add_tail(&ms->ms_instance_node, &file_priv->ms_instance_list);
goto unlock;
err_free_bo:
ivpu_bo_free(ms->bo);
err_free_ms:
kfree(ms);
unlock:
mutex_unlock(&file_priv->ms_lock);
return ret;
}
static int
copy_leftover_bytes(struct ivpu_ms_instance *ms,
void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
{
u64 copy_bytes;
if (ms->leftover_bytes) {
copy_bytes = min(user_size - *user_bytes_copied, ms->leftover_bytes);
if (copy_to_user(user_ptr + *user_bytes_copied, ms->leftover_addr, copy_bytes))
return -EFAULT;
ms->leftover_bytes -= copy_bytes;
ms->leftover_addr += copy_bytes;
*user_bytes_copied += copy_bytes;
}
return 0;
}
static int
copy_samples_to_user(struct ivpu_device *vdev, struct ivpu_ms_instance *ms,
void __user *user_ptr, u64 user_size, u64 *user_bytes_copied)
{
u64 bytes_written;
int ret;
*user_bytes_copied = 0;
ret = copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
if (ret)
return ret;
if (*user_bytes_copied == user_size)
return 0;
ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, ms->inactive_buff_vpu_addr,
ms->buff_size, &bytes_written);
if (ret)
return ret;
swap(ms->active_buff_vpu_addr, ms->inactive_buff_vpu_addr);
swap(ms->active_buff_ptr, ms->inactive_buff_ptr);
ms->leftover_bytes = bytes_written;
ms->leftover_addr = ms->inactive_buff_ptr;
return copy_leftover_bytes(ms, user_ptr, user_size, user_bytes_copied);
}
int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_ivpu_metric_streamer_get_data *args = data;
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
u64 bytes_written;
int ret;
if (!args->metric_group_mask)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
if (!ms) {
ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask);
ret = -EINVAL;
goto unlock;
}
if (!args->buffer_size) {
ret = ivpu_jsm_metric_streamer_update(vdev, ms->mask, 0, 0, &bytes_written);
if (ret)
goto unlock;
args->data_size = bytes_written + ms->leftover_bytes;
goto unlock;
}
if (!args->buffer_ptr) {
ret = -EINVAL;
goto unlock;
}
ret = copy_samples_to_user(vdev, ms, u64_to_user_ptr(args->buffer_ptr),
args->buffer_size, &args->data_size);
unlock:
mutex_unlock(&file_priv->ms_lock);
return ret;
}
static void free_instance(struct ivpu_file_priv *file_priv, struct ivpu_ms_instance *ms)
{
lockdep_assert_held(&file_priv->ms_lock);
list_del(&ms->ms_instance_node);
ivpu_jsm_metric_streamer_stop(file_priv->vdev, ms->mask);
ivpu_bo_free(ms->bo);
kfree(ms);
}
int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
struct drm_ivpu_metric_streamer_stop *args = data;
struct ivpu_ms_instance *ms;
if (!args->metric_group_mask)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
if (ms)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
return ms ? 0 : -EINVAL;
}
static inline struct ivpu_bo *get_ms_info_bo(struct ivpu_file_priv *file_priv)
{
lockdep_assert_held(&file_priv->ms_lock);
if (file_priv->ms_info_bo)
return file_priv->ms_info_bo;
file_priv->ms_info_bo = ivpu_bo_create_global(file_priv->vdev, MS_INFO_BUFFER_SIZE,
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
return file_priv->ms_info_bo;
}
int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_ivpu_metric_streamer_get_data *args = data;
struct ivpu_file_priv *file_priv = file->driver_priv;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_bo *bo;
u64 info_size;
int ret;
if (!args->metric_group_mask)
return -EINVAL;
if (!args->buffer_size)
return ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask,
0, 0, NULL, &args->data_size);
if (!args->buffer_ptr)
return -EINVAL;
mutex_lock(&file_priv->ms_lock);
bo = get_ms_info_bo(file_priv);
if (!bo) {
ret = -ENOMEM;
goto unlock;
}
ret = ivpu_jsm_metric_streamer_info(vdev, args->metric_group_mask, bo->vpu_addr,
ivpu_bo_size(bo), NULL, &info_size);
if (ret)
goto unlock;
if (args->buffer_size < info_size) {
ret = -ENOSPC;
goto unlock;
}
if (copy_to_user(u64_to_user_ptr(args->buffer_ptr), ivpu_bo_vaddr(bo), info_size))
ret = -EFAULT;
args->data_size = info_size;
unlock:
mutex_unlock(&file_priv->ms_lock);
return ret;
}
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv)
{
struct ivpu_ms_instance *ms, *tmp;
mutex_lock(&file_priv->ms_lock);
if (file_priv->ms_info_bo) {
ivpu_bo_free(file_priv->ms_info_bo);
file_priv->ms_info_bo = NULL;
}
list_for_each_entry_safe(ms, tmp, &file_priv->ms_instance_list, ms_instance_node)
free_instance(file_priv, ms);
mutex_unlock(&file_priv->ms_lock);
}
void ivpu_ms_cleanup_all(struct ivpu_device *vdev)
{
struct ivpu_file_priv *file_priv;
unsigned long ctx_id;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
ivpu_ms_cleanup(file_priv);
mutex_unlock(&vdev->context_list_lock);
}
/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Copyright (C) 2020-2024 Intel Corporation
*/
#ifndef __IVPU_MS_H__
#define __IVPU_MS_H__
#include <linux/list.h>
struct drm_device;
struct drm_file;
struct ivpu_bo;
struct ivpu_device;
struct ivpu_file_priv;
struct ivpu_ms_instance {
struct ivpu_bo *bo;
struct list_head ms_instance_node;
u64 mask;
u64 buff_size;
u64 active_buff_vpu_addr;
u64 inactive_buff_vpu_addr;
void *active_buff_ptr;
void *inactive_buff_ptr;
u64 leftover_bytes;
void *leftover_addr;
};
int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_ms_stop_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_ms_get_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void ivpu_ms_cleanup(struct ivpu_file_priv *file_priv);
void ivpu_ms_cleanup_all(struct ivpu_device *vdev);
#endif /* __IVPU_MS_H__ */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ivpu_job.h" #include "ivpu_job.h"
#include "ivpu_jsm_msg.h" #include "ivpu_jsm_msg.h"
#include "ivpu_mmu.h" #include "ivpu_mmu.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h" #include "ivpu_pm.h"
static bool ivpu_disable_recovery; static bool ivpu_disable_recovery;
...@@ -131,6 +132,7 @@ static void ivpu_pm_recovery_work(struct work_struct *work) ...@@ -131,6 +132,7 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
ivpu_suspend(vdev); ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev); ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev); ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev); ret = ivpu_resume(vdev);
if (ret) if (ret)
...@@ -333,6 +335,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev) ...@@ -333,6 +335,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
ivpu_hw_reset(vdev); ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev); ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev); ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ivpu_dbg(vdev, PM, "Pre-reset done.\n"); ivpu_dbg(vdev, PM, "Pre-reset done.\n");
} }
......
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/* /*
* Copyright (C) 2020-2023 Intel Corporation * Copyright (C) 2020-2024 Intel Corporation
*/ */
#ifndef __UAPI_IVPU_DRM_H__ #ifndef __UAPI_IVPU_DRM_H__
...@@ -21,6 +21,10 @@ extern "C" { ...@@ -21,6 +21,10 @@ extern "C" {
#define DRM_IVPU_BO_INFO 0x03 #define DRM_IVPU_BO_INFO 0x03
#define DRM_IVPU_SUBMIT 0x05 #define DRM_IVPU_SUBMIT 0x05
#define DRM_IVPU_BO_WAIT 0x06 #define DRM_IVPU_BO_WAIT 0x06
#define DRM_IVPU_METRIC_STREAMER_START 0x07
#define DRM_IVPU_METRIC_STREAMER_STOP 0x08
#define DRM_IVPU_METRIC_STREAMER_GET_DATA 0x09
#define DRM_IVPU_METRIC_STREAMER_GET_INFO 0x0a
#define DRM_IOCTL_IVPU_GET_PARAM \ #define DRM_IOCTL_IVPU_GET_PARAM \
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param) DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
...@@ -40,6 +44,22 @@ extern "C" { ...@@ -40,6 +44,22 @@ extern "C" {
#define DRM_IOCTL_IVPU_BO_WAIT \ #define DRM_IOCTL_IVPU_BO_WAIT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait) DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
#define DRM_IOCTL_IVPU_METRIC_STREAMER_START \
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_START, \
struct drm_ivpu_metric_streamer_start)
#define DRM_IOCTL_IVPU_METRIC_STREAMER_STOP \
DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_STOP, \
struct drm_ivpu_metric_streamer_stop)
#define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA \
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_DATA, \
struct drm_ivpu_metric_streamer_get_data)
#define DRM_IOCTL_IVPU_METRIC_STREAMER_GET_INFO \
DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_METRIC_STREAMER_GET_INFO, \
struct drm_ivpu_metric_streamer_get_data)
/** /**
* DOC: contexts * DOC: contexts
* *
...@@ -336,6 +356,53 @@ struct drm_ivpu_bo_wait { ...@@ -336,6 +356,53 @@ struct drm_ivpu_bo_wait {
__u32 pad; __u32 pad;
}; };
/**
* struct drm_ivpu_metric_streamer_start - Start collecting metric data
*/
struct drm_ivpu_metric_streamer_start {
/** @metric_group_mask: Indicates metric streamer instance */
__u64 metric_group_mask;
/** @sampling_period_ns: Sampling period in nanoseconds */
__u64 sampling_period_ns;
/**
* @read_period_samples:
*
* Number of samples after which user space will try to read the data.
* Reading the data after significantly longer period may cause data loss.
*/
__u32 read_period_samples;
/** @sample_size: Returned size of a single sample in bytes */
__u32 sample_size;
/** @max_data_size: Returned max @data_size from %DRM_IOCTL_IVPU_METRIC_STREAMER_GET_DATA */
__u32 max_data_size;
};
/**
* struct drm_ivpu_metric_streamer_get_data - Copy collected metric data
*/
struct drm_ivpu_metric_streamer_get_data {
/** @metric_group_mask: Indicates metric streamer instance */
__u64 metric_group_mask;
/** @buffer_ptr: A pointer to a destination for the copied data */
__u64 buffer_ptr;
/** @buffer_size: Size of the destination buffer */
__u64 buffer_size;
/**
* @data_size: Returned size of copied metric data
*
* If the @buffer_size is zero, returns the amount of data ready to be copied.
*/
__u64 data_size;
};
/**
* struct drm_ivpu_metric_streamer_stop - Stop collecting metric data
*/
struct drm_ivpu_metric_streamer_stop {
/** @metric_group_mask: Indicates metric streamer instance */
__u64 metric_group_mask;
};
#if defined(__cplusplus) #if defined(__cplusplus)
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment