Commit 6062acc1 authored by Matthew Auld's avatar Matthew Auld Committed by Rodrigo Vivi

drm/xe/stolen: don't map stolen on small-bar

The driver should still be functional with small-bar, just that the vram
size is clamped to the BAR size (until we add proper support for tiered
vram). For stolen vram we shouldn't iomap anything if the BAR size
doesn't also contain the stolen portion, since on discrete the stolen
portion is always at the end of normal vram. Stolen should still be
functional, just that allocating CPU visible io memory will always
return an error.

v2 (Lucas)
  - Mention in the commit message that stolen vram is always as the end
    of normal vram, which is why stolen in not mappable on small-bar
    systems.
  - Just make xe_ttm_stolen_inaccessible() return true for such cases.
    Also rename to xe_ttm_stolen_cpu_inaccessible to better describe
    that we are talking about direct CPU access. Plus add some
    kernel-doc.

Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/209Reported-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 0d83be77
...@@ -1158,7 +1158,7 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt, ...@@ -1158,7 +1158,7 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_gt *gt,
u64 end = offset == ~0ull ? offset : start + size; u64 end = offset == ~0ull ? offset : start + size;
if (flags & XE_BO_CREATE_STOLEN_BIT && if (flags & XE_BO_CREATE_STOLEN_BIT &&
xe_ttm_stolen_inaccessible(xe)) xe_ttm_stolen_cpu_inaccessible(xe))
flags |= XE_BO_CREATE_GGTT_BIT; flags |= XE_BO_CREATE_GGTT_BIT;
bo = xe_bo_create_locked_range(xe, gt, vm, size, start, end, type, flags); bo = xe_bo_create_locked_range(xe, gt, vm, size, start, end, type, flags);
......
...@@ -21,11 +21,6 @@ ...@@ -21,11 +21,6 @@
#include "xe_ttm_stolen_mgr.h" #include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h" #include "xe_ttm_vram_mgr.h"
bool xe_ttm_stolen_inaccessible(struct xe_device *xe)
{
return !IS_DGFX(xe) && GRAPHICS_VERx100(xe) < 1270;
}
struct xe_ttm_stolen_mgr { struct xe_ttm_stolen_mgr {
struct xe_ttm_vram_mgr base; struct xe_ttm_vram_mgr base;
...@@ -43,6 +38,34 @@ to_stolen_mgr(struct ttm_resource_manager *man) ...@@ -43,6 +38,34 @@ to_stolen_mgr(struct ttm_resource_manager *man)
return container_of(man, struct xe_ttm_stolen_mgr, base.manager); return container_of(man, struct xe_ttm_stolen_mgr, base.manager);
} }
/**
* xe_ttm_stolen_cpu_inaccessible - Can we directly CPU access stolen memory for
* this device.
*
* On some integrated platforms we can't directly access stolen via the CPU
* (like some normal system memory). Also on small-bar systems for discrete,
* since stolen is always as the end of normal VRAM, and the BAR likely doesn't
* stretch that far. However CPU access of stolen is generally rare, and at
* least on discrete should not be needed.
*
* If this is indeed inaccessible then we fallback to using the GGTT mappable
* aperture for CPU access. On discrete platforms we have no such thing, so when
* later attempting to CPU map the memory an error is instead thrown.
*/
bool xe_ttm_stolen_cpu_inaccessible(struct xe_device *xe)
{
struct ttm_resource_manager *ttm_mgr =
ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
struct xe_ttm_stolen_mgr *mgr;
if (!ttm_mgr)
return true;
mgr = to_stolen_mgr(ttm_mgr);
return !mgr->io_base || GRAPHICS_VERx100(xe) < 1270;
}
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{ {
struct pci_dev *pdev = to_pci_dev(xe->drm.dev); struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
...@@ -126,7 +149,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe) ...@@ -126,7 +149,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (IS_DGFX(xe)) if (IS_DGFX(xe))
stolen_size = detect_bar2_dgfx(xe, mgr); stolen_size = detect_bar2_dgfx(xe, mgr);
else if (!xe_ttm_stolen_inaccessible(xe)) else if (GRAPHICS_VERx100(xe) >= 1270)
stolen_size = detect_bar2_integrated(xe, mgr); stolen_size = detect_bar2_integrated(xe, mgr);
else else
stolen_size = detect_stolen(xe, mgr); stolen_size = detect_stolen(xe, mgr);
...@@ -140,7 +163,6 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe) ...@@ -140,7 +163,6 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (pgsize < PAGE_SIZE) if (pgsize < PAGE_SIZE)
pgsize = PAGE_SIZE; pgsize = PAGE_SIZE;
err = __xe_ttm_vram_mgr_init(xe, &mgr->base, XE_PL_STOLEN, stolen_size, pgsize); err = __xe_ttm_vram_mgr_init(xe, &mgr->base, XE_PL_STOLEN, stolen_size, pgsize);
if (err) { if (err) {
drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err); drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
...@@ -150,7 +172,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe) ...@@ -150,7 +172,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n", drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n",
stolen_size); stolen_size);
if (!xe_ttm_stolen_inaccessible(xe)) if (!xe_ttm_stolen_cpu_inaccessible(xe))
mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, stolen_size); mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, stolen_size);
} }
...@@ -161,10 +183,9 @@ u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset) ...@@ -161,10 +183,9 @@ u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset)
struct xe_ttm_stolen_mgr *mgr = to_stolen_mgr(ttm_mgr); struct xe_ttm_stolen_mgr *mgr = to_stolen_mgr(ttm_mgr);
struct xe_res_cursor cur; struct xe_res_cursor cur;
if (!mgr->io_base) XE_BUG_ON(!mgr->io_base);
return 0;
if (!IS_DGFX(xe) && xe_ttm_stolen_inaccessible(xe)) if (!IS_DGFX(xe) && xe_ttm_stolen_cpu_inaccessible(xe))
return mgr->io_base + xe_bo_ggtt_addr(bo) + offset; return mgr->io_base + xe_bo_ggtt_addr(bo) + offset;
xe_res_first(bo->ttm.resource, offset, 4096, &cur); xe_res_first(bo->ttm.resource, offset, 4096, &cur);
...@@ -202,6 +223,8 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe, ...@@ -202,6 +223,8 @@ static int __xe_ttm_stolen_io_mem_reserve_stolen(struct xe_device *xe,
#ifdef CONFIG_X86 #ifdef CONFIG_X86
struct xe_bo *bo = ttm_to_xe_bo(mem->bo); struct xe_bo *bo = ttm_to_xe_bo(mem->bo);
XE_BUG_ON(IS_DGFX(xe));
/* XXX: Require BO to be mapped to GGTT? */ /* XXX: Require BO to be mapped to GGTT? */
if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT))) if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_CREATE_GGTT_BIT)))
return -EIO; return -EIO;
...@@ -228,7 +251,7 @@ int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem) ...@@ -228,7 +251,7 @@ int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem)
if (!mgr || !mgr->io_base) if (!mgr || !mgr->io_base)
return -EIO; return -EIO;
if (!xe_ttm_stolen_inaccessible(xe)) if (!xe_ttm_stolen_cpu_inaccessible(xe))
return __xe_ttm_stolen_io_mem_reserve_bar2(xe, mgr, mem); return __xe_ttm_stolen_io_mem_reserve_bar2(xe, mgr, mem);
else else
return __xe_ttm_stolen_io_mem_reserve_stolen(xe, mgr, mem); return __xe_ttm_stolen_io_mem_reserve_stolen(xe, mgr, mem);
......
...@@ -14,7 +14,7 @@ struct xe_device; ...@@ -14,7 +14,7 @@ struct xe_device;
void xe_ttm_stolen_mgr_init(struct xe_device *xe); void xe_ttm_stolen_mgr_init(struct xe_device *xe);
int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem); int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem);
bool xe_ttm_stolen_inaccessible(struct xe_device *xe); bool xe_ttm_stolen_cpu_inaccessible(struct xe_device *xe);
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset); u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset);
u64 xe_ttm_stolen_gpu_offset(struct xe_device *xe); u64 xe_ttm_stolen_gpu_offset(struct xe_device *xe);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment