Commit ee5d2a8e authored by Christian König's avatar Christian König

drm/ttm: wire up the new pool as default one v2

Provide the necessary parameters by all drivers and use the new pool alloc
when no driver specific function is provided.

v2: fix the GEM VRAM helpers
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Reviewed-by: default avatarMadhav Chauhan <madhav.chauhan@amd.com>
Tested-by: default avatarHuang Rui <ray.huang@amd.com>
Link: https://patchwork.freedesktop.org/patch/397081/?series=83051&rev=1
parent d099fc8f
...@@ -1914,10 +1914,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1914,10 +1914,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock); mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev, r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
&amdgpu_bo_driver,
adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager, adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
dma_addressing_limited(adev->dev)); dma_addressing_limited(adev->dev));
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
......
...@@ -1045,10 +1045,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ...@@ -1045,10 +1045,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base; vmm->vram_base = vram_base;
vmm->vram_size = vram_size; vmm->vram_size = vram_size;
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
dev->vma_offset_manager, dev->vma_offset_manager,
true); false, true);
if (ret) if (ret)
return ret; return ret;
......
...@@ -281,6 +281,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) ...@@ -281,6 +281,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
struct nvkm_pci *pci = device->pci; struct nvkm_pci *pci = device->pci;
struct nvif_mmu *mmu = &drm->client.mmu; struct nvif_mmu *mmu = &drm->client.mmu;
struct drm_device *dev = drm->dev; struct drm_device *dev = drm->dev;
bool need_swiotlb = false;
int typei, ret; int typei, ret;
ret = nouveau_ttm_init_host(drm, 0); ret = nouveau_ttm_init_host(drm, 0);
...@@ -315,11 +316,14 @@ nouveau_ttm_init(struct nouveau_drm *drm) ...@@ -315,11 +316,14 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma; drm->agp.cma = pci->agp.cma;
} }
ret = ttm_bo_device_init(&drm->ttm.bdev, #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
&nouveau_bo_driver, need_swiotlb = !!swiotlb_nr_tbl();
dev->anon_inode->i_mapping, #endif
dev->vma_offset_manager,
drm->client.mmu.dmabits <= 32 ? true : false); ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
drm->dev->dev, dev->anon_inode->i_mapping,
dev->vma_offset_manager, need_swiotlb,
drm->client.mmu.dmabits <= 32);
if (ret) { if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret); NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret; return ret;
......
...@@ -194,11 +194,10 @@ int qxl_ttm_init(struct qxl_device *qdev) ...@@ -194,11 +194,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */ int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev, r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping, qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager, qdev->ddev.vma_offset_manager,
false); false, false);
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r; return r;
......
...@@ -846,10 +846,10 @@ int radeon_ttm_init(struct radeon_device *rdev) ...@@ -846,10 +846,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
int r; int r;
/* No others user of address space so set it to 0 */ /* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev, r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping, rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager, rdev->ddev->vma_offset_manager,
rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev)); dma_addressing_limited(&rdev->pdev->dev));
if (r) { if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r); DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
......
...@@ -1283,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) ...@@ -1283,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
pr_debug("Swap list %d was clean\n", i); pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_pool_fini(&bdev->pool);
if (!ret) if (!ret)
ttm_bo_global_release(); ttm_bo_global_release();
...@@ -1307,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev) ...@@ -1307,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
int ttm_bo_device_init(struct ttm_bo_device *bdev, int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver, struct ttm_bo_driver *driver,
struct device *dev,
struct address_space *mapping, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager, struct drm_vma_offset_manager *vma_manager,
bool need_dma32) bool use_dma_alloc, bool use_dma32)
{ {
struct ttm_bo_global *glob = &ttm_bo_glob; struct ttm_bo_global *glob = &ttm_bo_glob;
int ret; int ret;
...@@ -1324,12 +1327,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, ...@@ -1324,12 +1327,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->driver = driver; bdev->driver = driver;
ttm_bo_init_sysman(bdev); ttm_bo_init_sysman(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager; bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy); INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping; bdev->dev_mapping = mapping;
bdev->need_dma32 = need_dma32; bdev->need_dma32 = use_dma32;
mutex_lock(&ttm_global_mutex); mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list); list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex); mutex_unlock(&ttm_global_mutex);
......
...@@ -454,7 +454,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob) ...@@ -454,7 +454,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
} }
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE)); ttm_pool_mgr_init(glob->zone_kernel->max_mem / (2 * PAGE_SIZE));
return 0; return 0;
out_no_zone: out_no_zone:
ttm_mem_global_release(glob); ttm_mem_global_release(glob);
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#include <linux/file.h> #include <linux/file.h>
#include <drm/drm_cache.h> #include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
/** /**
* Allocates a ttm structure for the given BO. * Allocates a ttm structure for the given BO.
...@@ -321,7 +320,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, ...@@ -321,7 +320,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
if (bdev->driver->ttm_tt_populate) if (bdev->driver->ttm_tt_populate)
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else else
ret = ttm_pool_populate(ttm, ctx); ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret) if (ret)
return ret; return ret;
...@@ -363,6 +362,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev, ...@@ -363,6 +362,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
if (bdev->driver->ttm_tt_unpopulate) if (bdev->driver->ttm_tt_unpopulate)
bdev->driver->ttm_tt_unpopulate(bdev, ttm); bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else else
ttm_pool_unpopulate(ttm); ttm_pool_free(&bdev->pool, ttm);
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
} }
...@@ -878,10 +878,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -878,10 +878,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
drm_vma_offset_manager_init(&dev_priv->vma_manager, drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE); DRM_FILE_PAGE_OFFSET_SIZE);
ret = ttm_bo_device_init(&dev_priv->bdev, ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
&vmw_bo_driver, dev_priv->dev->dev,
dev->anon_inode->i_mapping, dev->anon_inode->i_mapping,
&dev_priv->vma_manager, &dev_priv->vma_manager,
dev_priv->map_mode == vmw_dma_alloc_coherent,
false); false);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n"); DRM_ERROR("Failed initializing TTM buffer object driver.\n");
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "ttm_module.h" #include "ttm_module.h"
#include "ttm_placement.h" #include "ttm_placement.h"
#include "ttm_tt.h" #include "ttm_tt.h"
#include "ttm_pool.h"
/** /**
* struct ttm_bo_driver * struct ttm_bo_driver
...@@ -295,6 +296,7 @@ struct ttm_bo_device { ...@@ -295,6 +296,7 @@ struct ttm_bo_device {
* Protected by internal locks. * Protected by internal locks.
*/ */
struct drm_vma_offset_manager *vma_manager; struct drm_vma_offset_manager *vma_manager;
struct ttm_pool pool;
/* /*
* Protected by the global:lru lock. * Protected by the global:lru lock.
...@@ -395,11 +397,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev); ...@@ -395,11 +397,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @bdev: A pointer to a struct ttm_bo_device to initialize. * @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global. * @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller. * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo. * @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager. * @vma_manager: A pointer to a vma manager.
* @file_page_offset: Offset into the device address space that is available * @use_dma_alloc: If coherent DMA allocation API should be used.
* for buffer data. This ensures compatibility with other users of the * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
* address space.
* *
* Initializes a struct ttm_bo_device: * Initializes a struct ttm_bo_device:
* Returns: * Returns:
...@@ -407,9 +409,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev); ...@@ -407,9 +409,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
*/ */
int ttm_bo_device_init(struct ttm_bo_device *bdev, int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver, struct ttm_bo_driver *driver,
struct device *dev,
struct address_space *mapping, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager, struct drm_vma_offset_manager *vma_manager,
bool need_dma32); bool use_dma_alloc, bool use_dma32);
/** /**
* ttm_bo_unmap_virtual * ttm_bo_unmap_virtual
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment