Commit 392bf222 authored by Ashutosh Dixit's avatar Ashutosh Dixit

drm/xe/oa/uapi: OA buffer mmap

Allow the OA buffer to be mmap'd to userspace. This is needed for the MMIO
trigger use case. Even otherwise, with whitelisted OA head/tail ptr
registers, userspace can receive/interpret OA data from the mmap'd buffer
without issuing read()'s on the OA stream fd.

v2: Remove unmap_mapping_range from xe_oa_release (Thomas H)
    Use vm_flags_mod (Umesh)
Acked-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Suggested-by: default avatarUmesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Reviewed-by: default avatarUmesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Signed-off-by: default avatarAshutosh Dixit <ashutosh.dixit@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240618014609.3233427-14-ashutosh.dixit@intel.com
parent dd6b4718
......@@ -824,6 +824,8 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
return PTR_ERR(bo);
stream->oa_buffer.bo = bo;
/* mmap implementation requires OA buffer to be in system memory */
xe_assert(stream->oa->xe, bo->vmap.is_iomem == 0);
stream->oa_buffer.vaddr = bo->vmap.vaddr;
return 0;
}
......@@ -1125,6 +1127,49 @@ static int xe_oa_release(struct inode *inode, struct file *file)
return 0;
}
static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
{
struct xe_oa_stream *stream = file->private_data;
struct xe_bo *bo = stream->oa_buffer.bo;
unsigned long start = vma->vm_start;
int i, ret;
if (xe_perf_stream_paranoid && !perfmon_capable()) {
drm_dbg(&stream->oa->xe->drm, "Insufficient privilege to map OA buffer\n");
return -EACCES;
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
/*
* Only support VM_READ, enforce MAP_PRIVATE by checking for
* VM_MAYSHARE, don't copy the vma on fork
*/
if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_SHARED | VM_MAYSHARE)) {
drm_dbg(&stream->oa->xe->drm, "mmap must be read only\n");
return -EINVAL;
}
vm_flags_mod(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY,
VM_MAYWRITE | VM_MAYEXEC);
xe_assert(stream->oa->xe, bo->ttm.ttm->num_pages ==
(vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
for (i = 0; i < bo->ttm.ttm->num_pages; i++) {
ret = remap_pfn_range(vma, start, page_to_pfn(bo->ttm.ttm->pages[i]),
PAGE_SIZE, vma->vm_page_prot);
if (ret)
break;
start += PAGE_SIZE;
}
return ret;
}
static const struct file_operations xe_oa_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
......@@ -1132,6 +1177,7 @@ static const struct file_operations xe_oa_fops = {
.poll = xe_oa_poll,
.read = xe_oa_read,
.unlocked_ioctl = xe_oa_ioctl,
.mmap = xe_oa_mmap,
};
static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment