Commit 49d535d6 authored by Zack Rusin's avatar Zack Rusin

drm/vmwgfx: Remove explicit transparent hugepages support

Old versions of the svga device used to export virtual vram, handling of
which was optimized on top of transparent hugepages support. Only very
old devices (OpenGL 2.1 support and earlier) used this code and at this
point performance differences are negligible.

Because the code requires very old hardware versions to run it has
been largely untested and unused for a long time.

Furthermore removal of the ttm hugepages support in:
commit 0d979509 ("drm/ttm: remove ttm_bo_vm_insert_huge()")
broke the coherency mode in vmwgfx when running with hugepages.

Fixes: 0d979509 ("drm/ttm: remove ttm_bo_vm_insert_huge()")
Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarMaaz Mombasawala <mombasawalam@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211215184147.3688785-2-zack@kde.org
parent 72345114
......@@ -13,6 +13,5 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_hashtab.o vmwgfx_kms.o vmwgfx_d
vmwgfx_gem.o
vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
......@@ -701,23 +701,15 @@ static int vmw_dma_masks(struct vmw_private *dev_priv)
static int vmw_vram_manager_init(struct vmw_private *dev_priv)
{
int ret;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ret = vmw_thp_init(dev_priv);
#else
ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false,
dev_priv->vram_size >> PAGE_SHIFT);
#endif
ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false);
return ret;
}
static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vmw_thp_fini(dev_priv);
#else
ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM);
#endif
}
static int vmw_setup_pci_resources(struct vmw_private *dev,
......
......@@ -1564,11 +1564,6 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
/* Transparent hugepage support - vmwgfx_thp.c */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int vmw_thp_init(struct vmw_private *dev_priv);
void vmw_thp_fini(struct vmw_private *dev_priv);
#endif
/**
* VMW_DEBUG_KMS - Debug output for kernel mode-setting
......
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Huge page-table-entry support for IO memory.
*
* Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
*/
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
*
* @manager: TTM resource manager.
* @mm: The underlying range manager. Protected by @lock.
* @lock: Manager lock.
*/
struct vmw_thp_manager {
struct ttm_resource_manager manager;
struct drm_mm mm;
spinlock_t lock;
};
static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
{
return container_of(man, struct vmw_thp_manager, manager);
}
static const struct ttm_resource_manager_func vmw_thp_func;
static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
struct drm_mm *mm, struct drm_mm_node *node,
unsigned long align_pages,
const struct ttm_place *place,
struct ttm_resource *mem,
unsigned long lpfn,
enum drm_mm_insert_mode mode)
{
if (align_pages >= bo->page_alignment &&
(!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
return drm_mm_insert_node_in_range(mm, node,
mem->num_pages,
align_pages, 0,
place->fpfn, lpfn, mode);
}
return -ENOSPC;
}
static int vmw_thp_get_node(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
struct ttm_range_mgr_node *node;
unsigned long align_pages;
unsigned long lpfn;
enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
int ret;
node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
if (!node)
return -ENOMEM;
ttm_resource_init(bo, place, &node->base);
lpfn = place->lpfn;
if (!lpfn)
lpfn = man->size;
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
spin_lock(&rman->lock);
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place,
&node->base, lpfn, mode);
if (!ret)
goto found_unlock;
}
}
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
if (node->base.num_pages >= align_pages) {
ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
align_pages, place, &node->base,
lpfn, mode);
if (!ret)
goto found_unlock;
}
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
node->base.num_pages,
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
spin_unlock(&rman->lock);
if (unlikely(ret)) {
kfree(node);
} else {
node->base.start = node->mm_nodes[0].start;
*res = &node->base;
}
return ret;
}
static void vmw_thp_put_node(struct ttm_resource_manager *man,
struct ttm_resource *res)
{
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
struct vmw_thp_manager *rman = to_thp_manager(man);
spin_lock(&rman->lock);
drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&rman->lock);
kfree(node);
}
int vmw_thp_init(struct vmw_private *dev_priv)
{
struct vmw_thp_manager *rman;
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
if (!rman)
return -ENOMEM;
ttm_resource_manager_init(&rman->manager,
dev_priv->vram_size >> PAGE_SHIFT);
rman->manager.func = &vmw_thp_func;
drm_mm_init(&rman->mm, 0, rman->manager.size);
spin_lock_init(&rman->lock);
ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
ttm_resource_manager_set_used(&rman->manager, true);
return 0;
}
void vmw_thp_fini(struct vmw_private *dev_priv)
{
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
struct vmw_thp_manager *rman = to_thp_manager(man);
struct drm_mm *mm = &rman->mm;
int ret;
ttm_resource_manager_set_used(man, false);
ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
if (ret)
return;
spin_lock(&rman->lock);
drm_mm_clean(mm);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
kfree(rman);
}
static void vmw_thp_debug(struct ttm_resource_manager *man,
struct drm_printer *printer)
{
struct vmw_thp_manager *rman = to_thp_manager(man);
spin_lock(&rman->lock);
drm_mm_print(&rman->mm, printer);
spin_unlock(&rman->lock);
}
static const struct ttm_resource_manager_func vmw_thp_func = {
.alloc = vmw_thp_get_node,
.free = vmw_thp_put_node,
.debug = vmw_thp_debug
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment