Commit 5c1e34b5 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2019-10-17' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

-dma-resv: Change shared_count to post-increment to fix lima crash (Qiang)
-ttm: A couple fixes related to lifetime and restore prefault behavior
 (Christian & Thomas)
-panfrost: Fill in missing feature reg values and fix stoppedjob timeouts
 (Steven)

Cc: Qiang Yu <yuq825@gmail.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Steven Price <steven.price@arm.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20191017203419.GA142909@art_vandelay
parents 7557d278 5b3ec813
...@@ -471,7 +471,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj, ...@@ -471,7 +471,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
if (pfence_excl) if (pfence_excl)
*pfence_excl = fence_excl; *pfence_excl = fence_excl;
else if (fence_excl) else if (fence_excl)
shared[++shared_count] = fence_excl; shared[shared_count++] = fence_excl;
if (!shared_count) { if (!shared_count) {
kfree(shared); kfree(shared);
......
...@@ -159,6 +159,9 @@ static const struct edid_quirk { ...@@ -159,6 +159,9 @@ static const struct edid_quirk {
/* Medion MD 30217 PG */ /* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
/* Lenovo G50 */
{ "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "dsi_cfg.h" #include "dsi_cfg.h"
#include "msm_kms.h" #include "msm_kms.h"
#define DSI_RESET_TOGGLE_DELAY_MS 20
static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
{ {
u32 ver; u32 ver;
...@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host) ...@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
wmb(); /* clocks need to be enabled before reset */ wmb(); /* clocks need to be enabled before reset */
dsi_write(msm_host, REG_DSI_RESET, 1); dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0); dsi_write(msm_host, REG_DSI_RESET, 0);
} }
...@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) ...@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
/* dsi controller can only be reset while clocks are running */ /* dsi controller can only be reset while clocks are running */
dsi_write(msm_host, REG_DSI_RESET, 1); dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
dsi_write(msm_host, REG_DSI_RESET, 0); dsi_write(msm_host, REG_DSI_RESET, 0);
wmb(); /* controller out of reset */ wmb(); /* controller out of reset */
dsi_write(msm_host, REG_DSI_CTRL, data0); dsi_write(msm_host, REG_DSI_CTRL, data0);
......
...@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) ...@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
......
...@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) ...@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)), job_read(pfdev, JS_TAIL_LO(js)),
sched_job); sched_job);
mutex_lock(&pfdev->reset_lock); if (!mutex_trylock(&pfdev->reset_lock))
return;
for (i = 0; i < NUM_JOB_SLOTS; i++) for (i = 0; i < NUM_JOB_SLOTS; i++) {
drm_sched_stop(&pfdev->js->queue[i].sched, sched_job); struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
drm_sched_stop(sched, sched_job);
if (js != i)
/* Ensure any timeouts on other slots have finished */
cancel_delayed_work_sync(&sched->work_tdr);
}
if (sched_job) drm_sched_increase_karma(sched_job);
drm_sched_increase_karma(sched_job);
spin_lock_irqsave(&pfdev->js->job_lock, flags); spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) { for (i = 0; i < NUM_JOB_SLOTS; i++) {
......
...@@ -63,7 +63,6 @@ config TINYDRM_REPAPER ...@@ -63,7 +63,6 @@ config TINYDRM_REPAPER
depends on DRM && SPI depends on DRM && SPI
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER select DRM_KMS_CMA_HELPER
depends on THERMAL || !THERMAL
help help
DRM driver for the following Pervasive Displays panels: DRM driver for the following Pervasive Displays panels:
1.44" TFT EPD Panel (E1144CS021) 1.44" TFT EPD Panel (E1144CS021)
......
...@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, ...@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
list_add_tail(&bo->lru, &man->lru[bo->priority]); list_add_tail(&bo->lru, &man->lru[bo->priority]);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
if (bo->ttm && !(bo->ttm->page_flags & if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
kref_get(&bo->list_kref); kref_get(&bo->list_kref);
} }
...@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) { if (!bo) {
if (busy_bo) if (busy_bo)
ttm_bo_get(busy_bo); kref_get(&busy_bo->list_kref);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo) if (busy_bo)
ttm_bo_put(busy_bo); kref_put(&busy_bo->list_kref, ttm_bo_release_list);
return ret; return ret;
} }
......
...@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) ...@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
else else
ret = vmf_insert_pfn(&cvma, address, pfn); ret = vmf_insert_pfn(&cvma, address, pfn);
/* /* Never error on prefaulted PTEs */
* Somebody beat us to this PTE or prefaulting to if (unlikely((ret & VM_FAULT_ERROR))) {
* an already populated PTE, or prefaulting error. if (i == 0)
*/ goto out_io_unlock;
else
if (unlikely((ret == VM_FAULT_NOPAGE && i > 0))) break;
break; }
else if (unlikely(ret & VM_FAULT_ERROR))
goto out_io_unlock;
address += PAGE_SIZE; address += PAGE_SIZE;
if (unlikely(++page_offset >= page_last)) if (unlikely(++page_offset >= page_last))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment