Commit 47700948 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2022-06-17' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular drm fixes for rc3. Nothing too serious, i915, amdgpu and
  exynos all have a few small driver fixes, and two ttm fixes, and one
  compiler warning.

  atomic:
   - fix spurious compiler warning

  ttm:
   - add NULL ptr check in swapout code
   - fix bulk move handling

  i915:
   - Fix page fault on error state read
   - Fix memory leaks in per-gt sysfs
   - Fix multiple fence handling
   - Remove accidental static from a local variable

  amdgpu:
   - Fix regression in GTT size reporting
   - OLED backlight fix

  exynos:
   - Check a null pointer instead of IS_ERR()
   - Rework initialization code of Exynos MIC driver"

* tag 'drm-fixes-2022-06-17' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/display: Cap OLED brightness per max frame-average luminance
  drm/amdgpu: Fix GTT size reporting in amdgpu_ioctl
  drm/exynos: mic: Rework initialization
  drm/exynos: fix IS_ERR() vs NULL check in probe
  drm/ttm: fix bulk move handling v2
  drm/i915/uc: remove accidental static from a local variable
  drm/i915: Individualize fences before adding to dma_resv obj
  drm/i915/gt: Fix memory leaks in per-gt sysfs
  drm/i915/reset: Fix error_state_read ptr + offset use
  drm/ttm: fix missing NULL check in ttm_device_swapout
  drm/atomic: fix warning of unused variable
parents 0639b599 65cf7c02
......@@ -642,7 +642,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->visible_pin_size),
vram_gtt.vram_size);
vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
vram_gtt.gtt_size *= PAGE_SIZE;
vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
......@@ -675,7 +674,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
mem.gtt.total_heap_size = gtt_man->size;
mem.gtt.total_heap_size *= PAGE_SIZE;
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
......
......@@ -2812,7 +2812,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
u32 max_cll, min_cll, max, min, q, r;
u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
......@@ -2842,7 +2842,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps = &dm->backlight_caps[i];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
......@@ -2870,8 +2870,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
* The results of the above expressions can be verified at
* pre_computed_values.
*/
q = max_cll >> 5;
r = max_cll % 32;
q = max_avg >> 5;
r = max_avg % 32;
max = (1 << q) * pre_computed_values[r];
// min luminance: maxLum * (CV/255)^2 / 100
......
......@@ -176,15 +176,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
}, {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
}, {
DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI),
DRM_COMPONENT_DRIVER
......
......@@ -26,6 +26,7 @@
#include <drm/drm_print.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
/* Sysreg registers for MIC */
#define DSD_CFG_MUX 0x1004
......@@ -100,9 +101,7 @@ struct exynos_mic {
bool i80_mode;
struct videomode vm;
struct drm_encoder *encoder;
struct drm_bridge bridge;
struct drm_bridge *next_bridge;
bool enabled;
};
......@@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
writel(reg, mic->reg + MIC_OP);
}
static void mic_disable(struct drm_bridge *bridge) { }
static void mic_post_disable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
......@@ -297,34 +294,30 @@ static void mic_pre_enable(struct drm_bridge *bridge)
mutex_unlock(&mic_mutex);
}
static void mic_enable(struct drm_bridge *bridge) { }
static int mic_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct exynos_mic *mic = bridge->driver_private;
return drm_bridge_attach(bridge->encoder, mic->next_bridge,
&mic->bridge, flags);
}
static const struct drm_bridge_funcs mic_bridge_funcs = {
.disable = mic_disable,
.post_disable = mic_post_disable,
.mode_set = mic_mode_set,
.pre_enable = mic_pre_enable,
.enable = mic_enable,
.attach = mic_attach,
};
static int exynos_mic_bind(struct device *dev, struct device *master,
void *data)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev,
EXYNOS_DISPLAY_TYPE_LCD);
struct drm_encoder *e, *encoder = NULL;
drm_for_each_encoder(e, drm_dev)
if (e->possible_crtcs == drm_crtc_mask(&crtc->base))
encoder = e;
if (!encoder)
return -ENODEV;
mic->bridge.driver_private = mic;
return 0;
return drm_bridge_attach(encoder, &mic->bridge, NULL, 0);
}
static void exynos_mic_unbind(struct device *dev, struct device *master,
......@@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_mic *mic;
struct device_node *remote;
struct resource res;
int ret, i;
......@@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev)
}
}
remote = of_graph_get_remote_node(dev->of_node, 1, 0);
mic->next_bridge = of_drm_find_bridge(remote);
if (IS_ERR(mic->next_bridge)) {
DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n");
ret = PTR_ERR(mic->next_bridge);
goto err;
}
of_node_put(remote);
platform_set_drvdata(pdev, mic);
mic->bridge.funcs = &mic_bridge_funcs;
......
......@@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
}
}
err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
/* Reserve enough slots to accommodate composite fences */
err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
if (err)
return err;
......
......@@ -785,6 +785,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
intel_gt_sysfs_unregister(gt);
intel_rps_driver_unregister(&gt->rps);
intel_gsc_fini(&gt->gsc);
......
......@@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj)
static struct intel_gt *kobj_to_gt(struct kobject *kobj)
{
return container_of(kobj, struct kobj_gt, base)->gt;
return container_of(kobj, struct intel_gt, sysfs_gt);
}
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
......@@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = {
};
ATTRIBUTE_GROUPS(id);
/* A kobject needs a release() method even if it does nothing */
static void kobj_gt_release(struct kobject *kobj)
{
kfree(kobj);
}
static struct kobj_type kobj_gt_type = {
......@@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = {
void intel_gt_sysfs_register(struct intel_gt *gt)
{
struct kobj_gt *kg;
/*
* We need to make things right with the
* ABI compatibility. The files were originally
......@@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
if (gt_is_root(gt))
intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt));
kg = kzalloc(sizeof(*kg), GFP_KERNEL);
if (!kg)
/* init and xfer ownership to sysfs tree */
if (kobject_init_and_add(&gt->sysfs_gt, &kobj_gt_type,
gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_fail;
kobject_init(&kg->base, &kobj_gt_type);
kg->gt = gt;
/* xfer ownership to sysfs tree */
if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id))
goto exit_kobj_put;
intel_gt_sysfs_pm_init(gt, &kg->base);
intel_gt_sysfs_pm_init(gt, &gt->sysfs_gt);
return;
exit_kobj_put:
kobject_put(&kg->base);
exit_fail:
kobject_put(&gt->sysfs_gt);
drm_warn(&gt->i915->drm,
"failed to initialize gt%d sysfs root\n", gt->info.id);
}
void intel_gt_sysfs_unregister(struct intel_gt *gt)
{
kobject_put(&gt->sysfs_gt);
}
......@@ -13,11 +13,6 @@
struct intel_gt;
struct kobj_gt {
struct kobject base;
struct intel_gt *gt;
};
bool is_object_gt(struct kobject *kobj);
struct drm_i915_private *kobj_to_i915(struct kobject *kobj);
......@@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt,
const char *name);
void intel_gt_sysfs_register(struct intel_gt *gt);
void intel_gt_sysfs_unregister(struct intel_gt *gt);
struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev,
const char *name);
......
......@@ -224,6 +224,9 @@ struct intel_gt {
} mocs;
struct intel_pxp pxp;
/* gt/gtN sysfs */
struct kobject sysfs_gt;
};
enum intel_gt_scratch_field {
......
......@@ -156,7 +156,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
static const struct uc_fw_platform_requirement *fw_blobs;
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
u8 rev = INTEL_REVID(i915);
......
......@@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
ssize_t ret;
ssize_t ret = 0;
/*
* FIXME: Concurrent clients triggering resets and reading + clearing
* dumps can cause inconsistent sysfs reads when a user calls in with a
* non-zero offset to complete a prior partial read but the
* gpu_coredump has been cleared or replaced.
*/
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
......@@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
const char *str = "No error state collected\n";
size_t len = strlen(str);
ret = min_t(size_t, count, len - off);
memcpy(buf, str + off, ret);
if (off < len) {
ret = min_t(size_t, count, len - off);
memcpy(buf, str + off, ret);
}
}
return ret;
......@@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
kobject_put(dev_priv->sysfs_gt);
}
......@@ -23,6 +23,7 @@
*/
#include <linux/sched/mm.h>
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
#include "display/intel_frontbuffer.h"
......@@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
if (unlikely(err))
return err;
/*
* Reserve fences slot early to prevent an allocation after preparing
* the workload and associating fences with dma_resv.
*/
if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
struct dma_fence *curr;
int idx;
dma_fence_array_for_each(curr, idx, fence)
;
err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
if (unlikely(err))
return err;
}
if (flags & EXEC_OBJECT_WRITE) {
struct intel_frontbuffer *front;
......@@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
i915_active_add_request(&front->write, rq);
intel_frontbuffer_put(front);
}
}
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
if (fence) {
struct dma_fence *curr;
enum dma_resv_usage usage;
int idx;
if (fence) {
dma_resv_add_fence(vma->obj->base.resv, fence,
DMA_RESV_USAGE_WRITE);
obj->read_domains = 0;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
} else {
usage = DMA_RESV_USAGE_READ;
}
if (fence) {
dma_resv_add_fence(vma->obj->base.resv, fence,
DMA_RESV_USAGE_READ);
obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
dma_resv_add_fence(vma->obj->base.resv, curr, usage);
}
if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
......
......@@ -109,11 +109,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
return;
spin_lock(&bo->bdev->lru_lock);
if (bo->bulk_move && bo->resource)
ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
if (bo->resource)
ttm_resource_del_bulk_move(bo->resource, bo);
bo->bulk_move = bulk;
if (bo->bulk_move && bo->resource)
ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
if (bo->resource)
ttm_resource_add_bulk_move(bo->resource, bo);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_set_bulk_move);
......@@ -689,8 +689,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
WARN_ON_ONCE(!kref_read(&bo->kref));
if (!(bo->pin_count++) && bo->bulk_move && bo->resource)
ttm_lru_bulk_move_del(bo->bulk_move, bo->resource);
spin_lock(&bo->bdev->lru_lock);
if (bo->resource)
ttm_resource_del_bulk_move(bo->resource, bo);
++bo->pin_count;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_pin);
......@@ -707,8 +710,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
if (WARN_ON_ONCE(!bo->pin_count))
return;
if (!(--bo->pin_count) && bo->bulk_move && bo->resource)
ttm_lru_bulk_move_add(bo->bulk_move, bo->resource);
spin_lock(&bo->bdev->lru_lock);
--bo->pin_count;
if (bo->resource)
ttm_resource_add_bulk_move(bo->resource, bo);
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_unpin);
......
......@@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
ttm_resource_manager_for_each_res(man, &cursor, res) {
struct ttm_buffer_object *bo = res->bo;
uint32_t num_pages = PFN_UP(bo->base.size);
uint32_t num_pages;
if (!bo)
continue;
num_pages = PFN_UP(bo->base.size);
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
......
......@@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos,
}
/* Add the resource to a bulk_move cursor */
void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res)
static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
......@@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
}
/* Remove the resource from a bulk_move range */
void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res)
static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res)
{
struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res);
......@@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
}
}
/* Add the resource to a bulk move if the BO is configured for it */
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
if (bo->bulk_move && !bo->pin_count)
ttm_lru_bulk_move_add(bo->bulk_move, res);
}
/* Remove the resource from a bulk move if the BO is configured for it */
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo)
{
if (bo->bulk_move && !bo->pin_count)
ttm_lru_bulk_move_del(bo->bulk_move, res);
}
/* Move a resource to the LRU or bulk tail */
void ttm_resource_move_to_lru_tail(struct ttm_resource *res)
{
......@@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
res->bo = bo;
INIT_LIST_HEAD(&res->lru);
man = ttm_manager_type(bo->bdev, place->mem_type);
spin_lock(&bo->bdev->lru_lock);
man->usage += res->num_pages << PAGE_SHIFT;
if (bo->bulk_move)
ttm_lru_bulk_move_add(bo->bulk_move, res);
if (bo->pin_count)
list_add_tail(&res->lru, &bo->bdev->pinned);
else
ttm_resource_move_to_lru_tail(res);
list_add_tail(&res->lru, &man->lru[bo->priority]);
man->usage += res->num_pages << PAGE_SHIFT;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
......@@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
{
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, place->mem_type);
int ret;
ret = man->func->alloc(man, bo, place, res_ptr);
if (ret)
return ret;
return man->func->alloc(man, bo, place, res_ptr);
spin_lock(&bo->bdev->lru_lock);
ttm_resource_add_bulk_move(*res_ptr, bo);
spin_unlock(&bo->bdev->lru_lock);
return 0;
}
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
......@@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
if (!*res)
return;
if (bo->bulk_move) {
spin_lock(&bo->bdev->lru_lock);
ttm_lru_bulk_move_del(bo->bulk_move, *res);
spin_unlock(&bo->bdev->lru_lock);
}
spin_lock(&bo->bdev->lru_lock);
ttm_resource_del_bulk_move(*res, bo);
spin_unlock(&bo->bdev->lru_lock);
man = ttm_manager_type(bo->bdev, (*res)->mem_type);
man->func->free(man, *res);
*res = NULL;
......
......@@ -1022,6 +1022,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
for ((__i) = 0; \
(__i) < (__state)->num_private_objs && \
((obj) = (__state)->private_objs[__i].ptr, \
(void)(obj) /* Only to avoid unused-but-set-variable warning */, \
(new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
(__i)++)
......
......@@ -311,12 +311,12 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
}
void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res);
void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk,
struct ttm_resource *res);
void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
void ttm_resource_add_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_del_bulk_move(struct ttm_resource *res,
struct ttm_buffer_object *bo);
void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
void ttm_resource_init(struct ttm_buffer_object *bo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment