Commit bcd9d56f authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2023-03-16' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Short summary of fixes pull:

 * fix info leak in edid
 * build fix for accel/
 * ref-counting fix for fbdev deferred I/O
 * driver fixes
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20230316143347.GA9246@linux-uq9g
parents eeac8ede 4028cbf8
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-y += habanalabs/ obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-y += ivpu/ obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
...@@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter) ...@@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
* the EDID then we'll just return 0. * the EDID then we'll just return 0.
*/ */
base_block = kmalloc(EDID_LENGTH, GFP_KERNEL); base_block = kzalloc(EDID_LENGTH, GFP_KERNEL);
if (!base_block) if (!base_block)
return 0; return 0;
......
...@@ -1388,10 +1388,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail); ...@@ -1388,10 +1388,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail);
* *
* @lru: The LRU to scan * @lru: The LRU to scan
* @nr_to_scan: The number of pages to try to reclaim * @nr_to_scan: The number of pages to try to reclaim
* @remaining: The number of pages left to reclaim, should be initialized by caller
* @shrink: Callback to try to shrink/reclaim the object. * @shrink: Callback to try to shrink/reclaim the object.
*/ */
unsigned long unsigned long
drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj)) bool (*shrink)(struct drm_gem_object *obj))
{ {
struct drm_gem_lru still_in_lru; struct drm_gem_lru still_in_lru;
...@@ -1430,8 +1433,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, ...@@ -1430,8 +1433,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
* hit shrinker in response to trying to get backing pages * hit shrinker in response to trying to get backing pages
* for this obj (ie. while it's lock is already held) * for this obj (ie. while it's lock is already held)
*/ */
if (!dma_resv_trylock(obj->resv)) if (!dma_resv_trylock(obj->resv)) {
*remaining += obj->size >> PAGE_SHIFT;
goto tail; goto tail;
}
if (shrink(obj)) { if (shrink(obj)) {
freed += obj->size >> PAGE_SHIFT; freed += obj->size >> PAGE_SHIFT;
......
...@@ -619,11 +619,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct ...@@ -619,11 +619,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
int ret; int ret;
if (obj->import_attach) { if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);
vma->vm_private_data = NULL; vma->vm_private_data = NULL;
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/
if (!ret)
drm_gem_object_put(obj);
return dma_buf_mmap(obj->dma_buf, vma, 0); return ret;
} }
ret = drm_gem_shmem_get_pages(shmem); ret = drm_gem_shmem_get_pages(shmem);
......
...@@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, ...@@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data = &meson_dw_hdmi->dw_plat_data; dw_plat_data = &meson_dw_hdmi->dw_plat_data;
ret = devm_regulator_get_enable_optional(dev, "hdmi"); ret = devm_regulator_get_enable_optional(dev, "hdmi");
if (ret < 0) if (ret < 0 && ret != -ENODEV)
return ret; return ret;
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev, meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
......
...@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv) ...@@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv)
priv->io_base + _REG(VPP_DOLBY_CTRL)); priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080, writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1)); priv->io_base + _REG(VPP_DUMMY_DATA1));
writel_relaxed(0x42020,
priv->io_base + _REG(VPP_DUMMY_DATA));
} else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL)); writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
......
...@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
bool (*shrink)(struct drm_gem_object *obj); bool (*shrink)(struct drm_gem_object *obj);
bool cond; bool cond;
unsigned long freed; unsigned long freed;
unsigned long remaining;
} stages[] = { } stages[] = {
/* Stages of progressively more aggressive/expensive reclaim: */ /* Stages of progressively more aggressive/expensive reclaim: */
{ &priv->lru.dontneed, purge, true }, { &priv->lru.dontneed, purge, true },
...@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
}; };
long nr = sc->nr_to_scan; long nr = sc->nr_to_scan;
unsigned long freed = 0; unsigned long freed = 0;
unsigned long remaining = 0;
for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) { for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
if (!stages[i].cond) if (!stages[i].cond)
continue; continue;
stages[i].freed = stages[i].freed =
drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink); drm_gem_lru_scan(stages[i].lru, nr,
&stages[i].remaining,
stages[i].shrink);
nr -= stages[i].freed; nr -= stages[i].freed;
freed += stages[i].freed; freed += stages[i].freed;
remaining += stages[i].remaining;
} }
if (freed) { if (freed) {
...@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
stages[3].freed); stages[3].freed);
} }
return (freed > 0) ? freed : SHRINK_STOP; return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
} }
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
...@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
NULL, NULL,
}; };
unsigned idx, unmapped = 0; unsigned idx, unmapped = 0;
unsigned long remaining = 0;
for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
unmapped += drm_gem_lru_scan(lrus[idx], unmapped += drm_gem_lru_scan(lrus[idx],
vmap_shrink_limit - unmapped, vmap_shrink_limit - unmapped,
&remaining,
vmap_shrink); vmap_shrink);
} }
......
...@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, ...@@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
if (pm_runtime_active(pfdev->dev)) if (pm_runtime_active(pfdev->dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
pm_runtime_put_sync_autosuspend(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev);
} }
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
......
...@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev) ...@@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev)
/* drm_vblank_init calls kcalloc, which can fail */ /* drm_vblank_init calls kcalloc, which can fail */
ret = drm_vblank_init(drm, drm->mode_config.num_crtc); ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret) if (ret)
goto cleanup_mode_config; goto unbind_all;
/* Remove early framebuffers (ie. simplefb) */ /* Remove early framebuffers (ie. simplefb) */
ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver); ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver);
if (ret) if (ret)
goto cleanup_mode_config; goto unbind_all;
sun4i_framebuffer_init(drm); sun4i_framebuffer_init(drm);
...@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev) ...@@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev)
finish_poll: finish_poll:
drm_kms_helper_poll_fini(drm); drm_kms_helper_poll_fini(drm);
unbind_all:
component_unbind_all(dev, NULL);
cleanup_mode_config: cleanup_mode_config:
drm_mode_config_cleanup(drm); drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev); of_reserved_mem_device_release(dev);
......
...@@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, ...@@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo = res->bo; struct ttm_buffer_object *bo = res->bo;
uint32_t num_pages; uint32_t num_pages;
if (!bo) if (!bo || bo->resource != res)
continue; continue;
num_pages = PFN_UP(bo->base.size); num_pages = PFN_UP(bo->base.size);
......
...@@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, ...@@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api) if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(&vgdev->vdev->dev, dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE); bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
...@@ -1026,7 +1026,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, ...@@ -1026,7 +1026,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
if (virtio_gpu_is_shmem(bo) && use_dma_api) if (virtio_gpu_is_shmem(bo) && use_dma_api)
dma_sync_sgtable_for_device(&vgdev->vdev->dev, dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
bo->base.sgt, DMA_TO_DEVICE); bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
......
...@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) ...@@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
if (rc) if (rc)
return rc; return rc;
if (pci_enable_device(dp) < 0) { rc = pci_enable_device(dp);
if (rc < 0) {
dev_err(&dp->dev, "Cannot enable PCI device\n"); dev_err(&dp->dev, "Cannot enable PCI device\n");
goto err_out; goto err_out;
} }
if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) {
rc = -ENODEV;
goto err_disable; goto err_disable;
}
addr = pci_resource_start(dp, 0); addr = pci_resource_start(dp, 0);
if (addr == 0) if (addr == 0) {
rc = -ENODEV;
goto err_disable; goto err_disable;
}
p = framebuffer_alloc(0, &dp->dev); p = framebuffer_alloc(0, &dp->dev);
if (p == NULL) { if (p == NULL) {
...@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) ...@@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
init_chips(p, addr); init_chips(p, addr);
if (register_framebuffer(p) < 0) { rc = register_framebuffer(p);
if (rc < 0) {
dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n"); dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n");
goto err_unmap; goto err_unmap;
} }
......
...@@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info, ...@@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info,
struct inode *inode, struct inode *inode,
struct file *file) struct file *file)
{ {
struct fb_deferred_io *fbdefio = info->fbdefio;
file->f_mapping->a_ops = &fb_deferred_io_aops; file->f_mapping->a_ops = &fb_deferred_io_aops;
fbdefio->open_count++;
} }
EXPORT_SYMBOL_GPL(fb_deferred_io_open); EXPORT_SYMBOL_GPL(fb_deferred_io_open);
void fb_deferred_io_release(struct fb_info *info) static void fb_deferred_io_lastclose(struct fb_info *info)
{ {
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page; struct page *page;
int i; int i;
BUG_ON(!fbdefio);
cancel_delayed_work_sync(&info->deferred_work); cancel_delayed_work_sync(&info->deferred_work);
/* clear out the mapping that we setup */ /* clear out the mapping that we setup */
...@@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info) ...@@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info)
page->mapping = NULL; page->mapping = NULL;
} }
} }
void fb_deferred_io_release(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
if (!--fbdefio->open_count)
fb_deferred_io_lastclose(info);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_release); EXPORT_SYMBOL_GPL(fb_deferred_io_release);
void fb_deferred_io_cleanup(struct fb_info *info) void fb_deferred_io_cleanup(struct fb_info *info)
{ {
struct fb_deferred_io *fbdefio = info->fbdefio; struct fb_deferred_io *fbdefio = info->fbdefio;
fb_deferred_io_release(info); fb_deferred_io_lastclose(info);
kvfree(info->pagerefs); kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock); mutex_destroy(&fbdefio->lock);
......
...@@ -423,11 +423,11 @@ struct drm_bridge_funcs { ...@@ -423,11 +423,11 @@ struct drm_bridge_funcs {
* *
* The returned array must be allocated with kmalloc() and will be * The returned array must be allocated with kmalloc() and will be
* freed by the caller. If the allocation fails, NULL should be * freed by the caller. If the allocation fails, NULL should be
* returned. num_output_fmts must be set to the returned array size. * returned. num_input_fmts must be set to the returned array size.
* Formats listed in the returned array should be listed in decreasing * Formats listed in the returned array should be listed in decreasing
* preference order (the core will try all formats until it finds one * preference order (the core will try all formats until it finds one
* that works). When the format is not supported NULL should be * that works). When the format is not supported NULL should be
* returned and num_output_fmts should be set to 0. * returned and num_input_fmts should be set to 0.
* *
* This method is called on all elements of the bridge chain as part of * This method is called on all elements of the bridge chain as part of
* the bus format negotiation process that happens in * the bus format negotiation process that happens in
......
...@@ -476,7 +476,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, ...@@ -476,7 +476,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj); void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
unsigned int nr_to_scan,
unsigned long *remaining,
bool (*shrink)(struct drm_gem_object *obj)); bool (*shrink)(struct drm_gem_object *obj));
#endif /* __DRM_GEM_H__ */ #endif /* __DRM_GEM_H__ */
...@@ -212,6 +212,7 @@ struct fb_deferred_io { ...@@ -212,6 +212,7 @@ struct fb_deferred_io {
/* delay between mkwrite and deferred handler */ /* delay between mkwrite and deferred handler */
unsigned long delay; unsigned long delay;
bool sort_pagereflist; /* sort pagelist by offset */ bool sort_pagereflist; /* sort pagelist by offset */
int open_count; /* number of opened files; protected by fb_info lock */
struct mutex lock; /* mutex that protects the pageref list */ struct mutex lock; /* mutex that protects the pageref list */
struct list_head pagereflist; /* list of pagerefs for touched pages */ struct list_head pagereflist; /* list of pagerefs for touched pages */
/* callback */ /* callback */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment