Commit d23db898 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2023-01-26' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

A fix and a preliminary patch to fix a memory leak in i915, and a use
after free fix for fbdev deferred io
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20230126104018.cbrcjxl5wefdbb2f@houat
parents f439a959 d6591da5
...@@ -171,11 +171,6 @@ static const struct fb_ops drm_fbdev_fb_ops = { ...@@ -171,11 +171,6 @@ static const struct fb_ops drm_fbdev_fb_ops = {
.fb_imageblit = drm_fbdev_fb_imageblit, .fb_imageblit = drm_fbdev_fb_imageblit,
}; };
static struct fb_deferred_io drm_fbdev_defio = {
.delay = HZ / 20,
.deferred_io = drm_fb_helper_deferred_io,
};
/* /*
* This function uses the client API to create a framebuffer backed by a dumb buffer. * This function uses the client API to create a framebuffer backed by a dumb buffer.
*/ */
...@@ -222,8 +217,14 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper, ...@@ -222,8 +217,14 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
return -ENOMEM; return -ENOMEM;
fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
fbi->fbdefio = &drm_fbdev_defio; /* Set a default deferred I/O handler */
fb_deferred_io_init(fbi); fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
fbi->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(fbi);
if (ret)
return ret;
} else { } else {
/* buffer is mapped for HW framebuffer */ /* buffer is mapped for HW framebuffer */
ret = drm_client_buffer_vmap(fb_helper->buffer, &map); ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
......
...@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, ...@@ -240,27 +240,8 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
} }
EXPORT_SYMBOL(drm_vma_offset_remove); EXPORT_SYMBOL(drm_vma_offset_remove);
/** static int vma_node_allow(struct drm_vma_offset_node *node,
* drm_vma_node_allow - Add open-file to list of allowed users struct drm_file *tag, bool ref_counted)
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
{ {
struct rb_node **iter; struct rb_node **iter;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
...@@ -282,6 +263,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag) ...@@ -282,6 +263,7 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
if (tag == entry->vm_tag) { if (tag == entry->vm_tag) {
if (ref_counted)
entry->vm_count++; entry->vm_count++;
goto unlock; goto unlock;
} else if (tag > entry->vm_tag) { } else if (tag > entry->vm_tag) {
...@@ -307,8 +289,58 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag) ...@@ -307,8 +289,58 @@ int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
kfree(new); kfree(new);
return ret; return ret;
} }
/**
* drm_vma_node_allow - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node. If @tag is
* already on this list, the ref-count is incremented.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* You must remove all open-files the same number of times as you added them
* before destroying the node. Otherwise, you will leak memory.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag)
{
return vma_node_allow(node, tag, true);
}
EXPORT_SYMBOL(drm_vma_node_allow); EXPORT_SYMBOL(drm_vma_node_allow);
/**
* drm_vma_node_allow_once - Add open-file to list of allowed users
* @node: Node to modify
* @tag: Tag of file to remove
*
* Add @tag to the list of allowed open-files for this node.
*
* The list of allowed-users is preserved across drm_vma_offset_add() and
* drm_vma_offset_remove() calls. You may even call it if the node is currently
* not added to any offset-manager.
*
* This is not ref-counted unlike drm_vma_node_allow() hence drm_vma_node_revoke()
* should only be called once after this.
*
* This is locked against concurrent access internally.
*
* RETURNS:
* 0 on success, negative error code on internal failure (out-of-mem)
*/
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag)
{
return vma_node_allow(node, tag, false);
}
EXPORT_SYMBOL(drm_vma_node_allow_once);
/** /**
* drm_vma_node_revoke - Remove open-file from list of allowed users * drm_vma_node_revoke - Remove open-file from list of allowed users
* @node: Node to modify * @node: Node to modify
......
...@@ -697,7 +697,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj, ...@@ -697,7 +697,7 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out: out:
if (file) if (file)
drm_vma_node_allow(&mmo->vma_node, file); drm_vma_node_allow_once(&mmo->vma_node, file);
return mmo; return mmo;
err: err:
......
...@@ -208,6 +208,18 @@ struct drm_fb_helper { ...@@ -208,6 +208,18 @@ struct drm_fb_helper {
* the smem_start field should always be cleared to zero. * the smem_start field should always be cleared to zero.
*/ */
bool hint_leak_smem_start; bool hint_leak_smem_start;
#ifdef CONFIG_FB_DEFERRED_IO
/**
* @fbdefio:
*
* Temporary storage for the driver's FB deferred I/O handler. If the
* driver uses the DRM fbdev emulation layer, this is set by the core
* to a generic deferred I/O handler if a driver is preferring to use
* a shadow buffer.
*/
struct fb_deferred_io fbdefio;
#endif
}; };
static inline struct drm_fb_helper * static inline struct drm_fb_helper *
......
...@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, ...@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node); struct drm_vma_offset_node *node);
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag); int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
void drm_vma_node_revoke(struct drm_vma_offset_node *node, void drm_vma_node_revoke(struct drm_vma_offset_node *node,
struct drm_file *tag); struct drm_file *tag);
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment