Commit 06f5b920 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2024-08-08' of...

Merge tag 'drm-intel-fixes-2024-08-08' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-fixes

- correct dual pps handling for MTL_PCH+ [display] (Dnyaneshwar Bhadane)
- Adjust vma offset for framebuffer mmap offset [gem] (Andi Shyti)
- Fix Virtual Memory mapping boundaries calculation [gem] (Andi Shyti)
- Allow evicting to use the requested placement (David Gow)
- Attempt to get pages without eviction first (David Gow)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tursulin@igalia.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZrSFpj20b1LbBhCJ@linux
parents fe0ce0d6 787db3bb
...@@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) ...@@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct drm_i915_private *i915) static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
{ {
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_DG1) if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1; return 1;
......
...@@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915) ...@@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915)
if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
return 2; return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_DG1) if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1; return 1;
......
...@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) ...@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
return i915_error_to_vmf_fault(err); return i915_error_to_vmf_fault(err);
} }
static void set_address_limits(struct vm_area_struct *area,
struct i915_vma *vma,
unsigned long obj_offset,
unsigned long *start_vaddr,
unsigned long *end_vaddr)
{
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
long start, end; /* memory boundaries */
/*
* Let's move into the ">> PAGE_SHIFT"
* domain to be sure not to lose bits
*/
vm_start = area->vm_start >> PAGE_SHIFT;
vm_end = area->vm_end >> PAGE_SHIFT;
vma_size = vma->size >> PAGE_SHIFT;
/*
* Calculate the memory boundaries by considering the offset
* provided by the user during memory mapping and the offset
* provided for the partial mapping.
*/
start = vm_start;
start -= obj_offset;
start += vma->gtt_view.partial.offset;
end = start + vma_size;
start = max_t(long, start, vm_start);
end = min_t(long, end, vm_end);
/* Let's move back into the "<< PAGE_SHIFT" domain */
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{ {
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
...@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) ...@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE; bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww; struct i915_gem_ww_ctx ww;
unsigned long obj_offset;
unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct i915_vma *vma; struct i915_vma *vma;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn;
int srcu; int srcu;
int ret; int ret;
/* We don't use vmf->pgoff since that has the fake offset */ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write); trace_i915_gem_object_fault(obj, page_offset, true, write);
...@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) ...@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (ret) if (ret)
goto err_unpin; goto err_unpin;
set_address_limits(area, vma, obj_offset, &start, &end);
pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
pfn += (start - area->vm_start) >> PAGE_SHIFT;
pfn += obj_offset - vma->gtt_view.partial.offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret) if (ret)
goto err_fence; goto err_fence;
...@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma ...@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
mmo = mmap_offset_attach(obj, mmap_type, NULL); mmo = mmap_offset_attach(obj, mmap_type, NULL);
if (IS_ERR(mmo)) if (IS_ERR(mmo))
return PTR_ERR(mmo); return PTR_ERR(mmo);
vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
} }
/* /*
......
...@@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, ...@@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
obj->mm.region, &places[0], obj->bo_offset, obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags); obj->base.size, flags);
places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */ /* Cache this on object? */
for (i = 0; i < num_allowed; ++i) { for (i = 0; i < num_allowed; ++i) {
...@@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, ...@@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true, .interruptible = true,
.no_wait_gpu = false, .no_wait_gpu = false,
}; };
int real_num_busy; struct ttm_placement initial_placement;
struct ttm_place initial_place;
int ret; int ret;
/* First try only the requested placement. No eviction. */ /* First try only the requested placement. No eviction. */
real_num_busy = placement->num_placement; initial_placement.num_placement = 1;
placement->num_placement = 1; memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
ret = ttm_bo_validate(bo, placement, &ctx); initial_place.flags |= TTM_PL_FLAG_DESIRED;
initial_placement.placement = &initial_place;
ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) { if (ret) {
ret = i915_ttm_err_to_gem(ret); ret = i915_ttm_err_to_gem(ret);
/* /*
...@@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, ...@@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements, * If the initial attempt fails, allow all accepted placements,
* evicting if necessary. * evicting if necessary.
*/ */
placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx); ret = ttm_bo_validate(bo, placement, &ctx);
if (ret) if (ret)
return i915_ttm_err_to_gem(ret); return i915_ttm_err_to_gem(ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment