Commit 1a545ed7 authored by Chang, Bruce's avatar Chang, Bruce Committed by Rodrigo Vivi

drm/xe: fix pvc unload issue

Currently, unload pvc driver will generate a null dereference
and the call stack is as below.

[ 4850.618000] Call Trace:
[ 4850.620740]  <TASK>
[ 4850.623134]  ttm_bo_cleanup_memtype_use+0x3f/0x50 [ttm]
[ 4850.628661]  ttm_bo_release+0x154/0x2c0 [ttm]
[ 4850.633317]  ? drm_buddy_fini+0x62/0x80 [drm_buddy]
[ 4850.638487]  ? __kmem_cache_free+0x27d/0x2c0
[ 4850.643054]  ttm_bo_put+0x38/0x60 [ttm]
[ 4850.647190]  xe_gem_object_free+0x1f/0x30 [xe]
[ 4850.651945]  drm_gem_object_free+0x1e/0x30 [drm]
[ 4850.656904]  ggtt_fini_noalloc+0x9d/0xe0 [xe]
[ 4850.661574]  drm_managed_release+0xb5/0x150 [drm]
[ 4850.666617]  drm_dev_release+0x30/0x50 [drm]
[ 4850.671209]  devm_drm_dev_init_release+0x3c/0x60 [drm]

There are a couple issues, but the main one is due to TTM has only
one TTM_PL_TT region, but since pvc has 2 tiles and tries to setup
1 TTM_PL_TT each tile. The second will overwrite the first one.

During unload time, the first tile will reset the TTM_PL_TT manger
and when the second tile is trying to free Bo and it will generate
the null reference since the TTM manage is already got reset to 0.

The fix is to use one global TTM_PL_TT manager.

v2: make gtt mgr global and change the name to sys_mgr

Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Vivi, Rodrigo <rodrigo.vivi@intel.com>
Signed-off-by: default avatarBruce Chang <yu.bruce.chang@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 96578d10
......@@ -88,7 +88,7 @@ xe-y += xe_bb.o \
xe_step.o \
xe_sync.o \
xe_trace.o \
xe_ttm_gtt_mgr.o \
xe_ttm_sys_mgr.o \
xe_ttm_stolen_mgr.o \
xe_ttm_vram_mgr.o \
xe_tuning.o \
......
......@@ -27,6 +27,7 @@
#include "xe_pm.h"
#include "xe_query.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_vm_madvise.h"
#include "xe_wait_user_fence.h"
......@@ -262,6 +263,8 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_irq_shutdown;
xe_ttm_sys_mgr_init(xe);
for_each_gt(gt, xe, id) {
err = xe_gt_init_noalloc(gt);
if (err)
......
......@@ -116,4 +116,5 @@ static inline bool xe_device_has_flat_ccs(struct xe_device *xe)
}
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
#endif
......@@ -134,6 +134,8 @@ struct xe_device {
/** @mapping: pointer to VRAM mappable space */
void *__iomem mapping;
} vram;
/** @sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
} mem;
/** @usm: unified memory state */
......
......@@ -36,7 +36,6 @@
#include "xe_ring_ops.h"
#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_ttm_gtt_mgr.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_tuning.h"
#include "xe_uc.h"
......@@ -77,16 +76,11 @@ int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt)
if (!gt->mem.vram_mgr)
return -ENOMEM;
gt->mem.gtt_mgr = drmm_kzalloc(drm, sizeof(*gt->mem.gtt_mgr),
GFP_KERNEL);
if (!gt->mem.gtt_mgr)
return -ENOMEM;
} else {
struct xe_gt *full_gt = xe_find_full_gt(gt);
gt->mem.ggtt = full_gt->mem.ggtt;
gt->mem.vram_mgr = full_gt->mem.vram_mgr;
gt->mem.gtt_mgr = full_gt->mem.gtt_mgr;
}
gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
......@@ -98,26 +92,14 @@ static int gt_ttm_mgr_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
struct sysinfo si;
u64 gtt_size;
si_meminfo(&si);
gtt_size = (u64)si.totalram * si.mem_unit * 3/4;
if (gt->mem.vram.size) {
err = xe_ttm_vram_mgr_init(gt, gt->mem.vram_mgr);
if (err)
return err;
gtt_size = min(max((XE_DEFAULT_GTT_SIZE_MB << 20),
(u64)gt->mem.vram.size),
gtt_size);
xe->info.mem_region_mask |= BIT(gt->info.vram_id) << 1;
}
err = xe_ttm_gtt_mgr_init(gt, gt->mem.gtt_mgr, gtt_size);
if (err)
return err;
return 0;
}
......
......@@ -162,8 +162,6 @@ struct xe_gt {
} vram;
/** @vram_mgr: VRAM TTM manager */
struct xe_ttm_vram_mgr *vram_mgr;
/** @gtt_mr: GTT TTM manager */
struct xe_ttm_gtt_mgr *gtt_mgr;
/** @ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
} mem;
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_TTGM_GTT_MGR_H_
#define _XE_TTGM_GTT_MGR_H_
#include "xe_ttm_gtt_mgr_types.h"
struct xe_gt;
int xe_ttm_gtt_mgr_init(struct xe_gt *gt, struct xe_ttm_gtt_mgr *mgr,
u64 gtt_size);
#endif
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef _XE_TTM_GTT_MGR_TYPES_H_
#define _XE_TTM_GTT_MGR_TYPES_H_
#include <drm/ttm/ttm_device.h>
struct xe_gt;
struct xe_ttm_gtt_mgr {
struct xe_gt *gt;
struct ttm_resource_manager manager;
};
#endif
......@@ -4,6 +4,8 @@
* Copyright (C) 2021-2002 Red Hat
*/
#include "xe_ttm_sys_mgr.h"
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
......@@ -12,31 +14,24 @@
#include "xe_bo.h"
#include "xe_gt.h"
#include "xe_ttm_gtt_mgr.h"
struct xe_ttm_gtt_node {
struct xe_ttm_sys_node {
struct ttm_buffer_object *tbo;
struct ttm_range_mgr_node base;
};
static inline struct xe_ttm_gtt_mgr *
to_gtt_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct xe_ttm_gtt_mgr, manager);
}
static inline struct xe_ttm_gtt_node *
to_xe_ttm_gtt_node(struct ttm_resource *res)
static inline struct xe_ttm_sys_node *
to_xe_ttm_sys_node(struct ttm_resource *res)
{
return container_of(res, struct xe_ttm_gtt_node, base.base);
return container_of(res, struct xe_ttm_sys_node, base.base);
}
static int xe_ttm_gtt_mgr_new(struct ttm_resource_manager *man,
static int xe_ttm_sys_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct xe_ttm_gtt_node *node;
struct xe_ttm_sys_node *node;
int r;
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
......@@ -66,32 +61,31 @@ static int xe_ttm_gtt_mgr_new(struct ttm_resource_manager *man,
return r;
}
static void xe_ttm_gtt_mgr_del(struct ttm_resource_manager *man,
static void xe_ttm_sys_mgr_del(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct xe_ttm_gtt_node *node = to_xe_ttm_gtt_node(res);
struct xe_ttm_sys_node *node = to_xe_ttm_sys_node(res);
ttm_resource_fini(man, res);
kfree(node);
}
static void xe_ttm_gtt_mgr_debug(struct ttm_resource_manager *man,
static void xe_ttm_sys_mgr_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
}
static const struct ttm_resource_manager_func xe_ttm_gtt_mgr_func = {
.alloc = xe_ttm_gtt_mgr_new,
.free = xe_ttm_gtt_mgr_del,
.debug = xe_ttm_gtt_mgr_debug
static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {
.alloc = xe_ttm_sys_mgr_new,
.free = xe_ttm_sys_mgr_del,
.debug = xe_ttm_sys_mgr_debug
};
static void ttm_gtt_mgr_fini(struct drm_device *drm, void *arg)
static void ttm_sys_mgr_fini(struct drm_device *drm, void *arg)
{
struct xe_ttm_gtt_mgr *mgr = arg;
struct xe_device *xe = gt_to_xe(mgr->gt);
struct ttm_resource_manager *man = &mgr->manager;
struct xe_device *xe = (struct xe_device *)arg;
struct ttm_resource_manager *man = &xe->mem.sys_mgr;
int err;
ttm_resource_manager_set_used(man, false);
......@@ -104,27 +98,18 @@ static void ttm_gtt_mgr_fini(struct drm_device *drm, void *arg)
ttm_set_driver_manager(&xe->ttm, XE_PL_TT, NULL);
}
int xe_ttm_gtt_mgr_init(struct xe_gt *gt, struct xe_ttm_gtt_mgr *mgr,
u64 gtt_size)
int xe_ttm_sys_mgr_init(struct xe_device *xe)
{
struct xe_device *xe = gt_to_xe(gt);
struct ttm_resource_manager *man = &mgr->manager;
int err;
XE_BUG_ON(xe_gt_is_media_type(gt));
struct ttm_resource_manager *man = &xe->mem.sys_mgr;
struct sysinfo si;
u64 gtt_size;
mgr->gt = gt;
si_meminfo(&si);
gtt_size = (u64)si.totalram * si.mem_unit * 3/4;
man->use_tt = true;
man->func = &xe_ttm_gtt_mgr_func;
man->func = &xe_ttm_sys_mgr_func;
ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT);
ttm_set_driver_manager(&xe->ttm, XE_PL_TT, &mgr->manager);
ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man);
ttm_resource_manager_set_used(man, true);
err = drmm_add_action_or_reset(&xe->drm, ttm_gtt_mgr_fini, mgr);
if (err)
return err;
return 0;
return drmm_add_action_or_reset(&xe->drm, ttm_sys_mgr_fini, xe);
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef _XE_TTM_SYS_MGR_H_
#define _XE_TTM_SYS_MGR_H_
struct xe_device;
int xe_ttm_sys_mgr_init(struct xe_device *xe);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment