Commit cbc543c5 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2022-10-20' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

drm-misc-fixes for v6.1-rc2:
- Fix a buffer overflow in format_helper_test.
- Set DDC pointer in drmm_connector_init.
- Compiler fixes for panfrost.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/c4d05683-8ebe-93b8-d24c-d1d2c68f12c4@linux.intel.com
parents a4294d5c 72655fb9
...@@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, ...@@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
} }
if (fence && !p->immediate) if (fence && !p->immediate) {
/*
* Most hw generations now have a separate queue for page table
* updates, but when the queue is shared with userspace we need
* the extra CPU round trip to correctly flush the TLB.
*/
set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
swap(*fence, f); swap(*fence, f);
}
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
......
...@@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev, ...@@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy)) if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL; return -EINVAL;
ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL); ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
if (ret) if (ret)
return ret; return ret;
......
...@@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter, ...@@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter,
{ {
struct panfrost_dump_object_header *hdr = iter->hdr; struct panfrost_dump_object_header *hdr = iter->hdr;
hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC); hdr->magic = PANFROSTDUMP_MAGIC;
hdr->type = cpu_to_le32(type); hdr->type = type;
hdr->file_offset = cpu_to_le32(iter->data - iter->start); hdr->file_offset = iter->data - iter->start;
hdr->file_size = cpu_to_le32(data_end - iter->data); hdr->file_size = data_end - iter->data;
iter->hdr++; iter->hdr++;
iter->data += le32_to_cpu(hdr->file_size); iter->data += hdr->file_size;
} }
static void static void
...@@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter, ...@@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter,
reg = panfrost_dump_registers[i] + js_as_offset; reg = panfrost_dump_registers[i] + js_as_offset;
dumpreg->reg = cpu_to_le32(reg); dumpreg->reg = reg;
dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg)); dumpreg->value = gpu_read(pfdev, reg);
} }
panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg); panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg);
...@@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job) ...@@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job)
struct panfrost_dump_iterator iter; struct panfrost_dump_iterator iter;
struct drm_gem_object *dbo; struct drm_gem_object *dbo;
unsigned int n_obj, n_bomap_pages; unsigned int n_obj, n_bomap_pages;
__le64 *bomap, *bomap_start; u64 *bomap, *bomap_start;
size_t file_size; size_t file_size;
u32 as_nr; u32 as_nr;
int slot; int slot;
...@@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job) ...@@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job)
* For now, we write the job identifier in the register dump header, * For now, we write the job identifier in the register dump header,
* so that we can decode the entire dump later with pandecode * so that we can decode the entire dump later with pandecode
*/ */
iter.hdr->reghdr.jc = cpu_to_le64(job->jc); iter.hdr->reghdr.jc = job->jc;
iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR); iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR;
iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR); iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR;
iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id); iter.hdr->reghdr.gpu_id = pfdev->features.id;
iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count); iter.hdr->reghdr.nbos = job->bo_count;
panfrost_core_dump_registers(&iter, pfdev, as_nr, slot); panfrost_core_dump_registers(&iter, pfdev, as_nr, slot);
...@@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job) ...@@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job)
WARN_ON(!mapping->active); WARN_ON(!mapping->active);
iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start)); iter.hdr->bomap.data[0] = bomap - bomap_start;
for_each_sgtable_page(bo->base.sgt, &page_iter, 0) { for_each_sgtable_page(bo->base.sgt, &page_iter, 0) {
struct page *page = sg_page_iter_page(&page_iter); struct page *page = sg_page_iter_page(&page_iter);
if (!IS_ERR(page)) { if (!IS_ERR(page)) {
*bomap++ = cpu_to_le64(page_to_phys(page)); *bomap++ = page_to_phys(page);
} else { } else {
dev_err(pfdev->dev, "Panfrost Dump: wrong page\n"); dev_err(pfdev->dev, "Panfrost Dump: wrong page\n");
*bomap++ = ~cpu_to_le64(0); *bomap++ = 0;
} }
} }
iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT); iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT;
vaddr = map.vaddr; vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size); memcpy(iter.data, vaddr, bo->base.base.size);
drm_gem_shmem_vunmap(&bo->base, &map); drm_gem_shmem_vunmap(&bo->base, &map);
iter.hdr->bomap.valid = cpu_to_le32(1); iter.hdr->bomap.valid = 1;
dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data + dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
bo->base.base.size); bo->base.base.size);
......
...@@ -385,7 +385,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) ...@@ -385,7 +385,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
} }
s_fence = to_drm_sched_fence(fence); s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched) { if (s_fence && s_fence->sched == sched &&
!test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
/* /*
* Fence is from the same scheduler, only need to wait for * Fence is from the same scheduler, only need to wait for
......
...@@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) ...@@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
iosys_map_set_vaddr(&src, xrgb8888); iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip); drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE); buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
} }
......
...@@ -32,6 +32,15 @@ ...@@ -32,6 +32,15 @@
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
/**
* DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining
*
* Setting this flag on a scheduler fence prevents pipelining of jobs depending
* on this fence. In other words we always insert a full CPU round trip before
* dependen jobs are pushed to the hw queue.
*/
#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
struct drm_gem_object; struct drm_gem_object;
struct drm_gpu_scheduler; struct drm_gpu_scheduler;
......
...@@ -235,25 +235,29 @@ struct drm_panfrost_madvise { ...@@ -235,25 +235,29 @@ struct drm_panfrost_madvise {
#define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1) #define PANFROSTDUMP_BUF_BO (PANFROSTDUMP_BUF_BOMAP + 1)
#define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1) #define PANFROSTDUMP_BUF_TRAILER (PANFROSTDUMP_BUF_BO + 1)
/*
* This structure is the native endianness of the dumping machine, tools can
* detect the endianness by looking at the value in 'magic'.
*/
struct panfrost_dump_object_header { struct panfrost_dump_object_header {
__le32 magic; __u32 magic;
__le32 type; __u32 type;
__le32 file_size; __u32 file_size;
__le32 file_offset; __u32 file_offset;
union { union {
struct pan_reg_hdr { struct {
__le64 jc; __u64 jc;
__le32 gpu_id; __u32 gpu_id;
__le32 major; __u32 major;
__le32 minor; __u32 minor;
__le64 nbos; __u64 nbos;
} reghdr; } reghdr;
struct pan_bomap_hdr { struct pan_bomap_hdr {
__le32 valid; __u32 valid;
__le64 iova; __u64 iova;
__le32 data[2]; __u32 data[2];
} bomap; } bomap;
/* /*
...@@ -261,14 +265,14 @@ struct panfrost_dump_object_header { ...@@ -261,14 +265,14 @@ struct panfrost_dump_object_header {
* with new fields and also keep it 512-byte aligned * with new fields and also keep it 512-byte aligned
*/ */
__le32 sizer[496]; __u32 sizer[496];
}; };
}; };
/* Registers object, an array of these */ /* Registers object, an array of these */
struct panfrost_dump_registers { struct panfrost_dump_registers {
__le32 reg; __u32 reg;
__le32 value; __u32 value;
}; };
#if defined(__cplusplus) #if defined(__cplusplus)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment