Commit 8047d36f authored by Wachowski, Karol's avatar Wachowski, Karol Committed by Jacek Lawrynowicz

accel/ivpu: Add debug prints for MMU map/unmap operations

It is common need to be able to see IOVA/physical to VPU addresses
mappings. Especially when debugging different kind of memory related
issues. Lack of such logs forces user to modify and recompile KMD manually.

This commit adds those logs under MMU debug mask which can be turned on
dynamically with module param during KMD load.
Signed-off-by: default avatarWachowski, Karol <karol.wachowski@intel.com>
Signed-off-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240115134434.493839-4-jacek.lawrynowicz@linux.intel.com
parent 929acfb9
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#define IVPU_DBG_JSM BIT(10) #define IVPU_DBG_JSM BIT(10)
#define IVPU_DBG_KREF BIT(11) #define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12) #define IVPU_DBG_RPM BIT(12)
#define IVPU_DBG_MMU_MAP BIT(13)
#define ivpu_err(vdev, fmt, ...) \ #define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
......
...@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, ...@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset; size_t size = sg_dma_len(sg) + sg->offset;
ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
ctx->id, dma_addr, vpu_addr, size);
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) { if (ret) {
ivpu_err(vdev, "Failed to map context pages\n"); ivpu_err(vdev, "Failed to map context pages\n");
...@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, ...@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
/* Ensure page table modifications are flushed from wc buffers to memory */ /* Ensure page table modifications are flushed from wc buffers to memory */
wmb(); wmb();
mutex_unlock(&ctx->lock); mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
...@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct ...@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_lock(&ctx->lock); mutex_lock(&ctx->lock);
for_each_sgtable_dma_sg(sgt, sg, i) { for_each_sgtable_dma_sg(sgt, sg, i) {
dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
size_t size = sg_dma_len(sg) + sg->offset; size_t size = sg_dma_len(sg) + sg->offset;
ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
ctx->id, dma_addr, vpu_addr, size);
ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
vpu_addr += size; vpu_addr += size;
} }
/* Ensure page table modifications are flushed from wc buffers to memory */ /* Ensure page table modifications are flushed from wc buffers to memory */
wmb(); wmb();
mutex_unlock(&ctx->lock); mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment