Commit 37a48adf authored by Thomas Zimmermann's avatar Thomas Zimmermann

drm/vram: Add kmap ref-counting to GEM VRAM objects

The kmap and kunmap operations of GEM VRAM buffers can now be called
in interleaving pairs. The first call to drm_gem_vram_kmap() maps the
buffer's memory to kernel address space and the final call to
drm_gem_vram_kunmap() unmaps the memory. Intermediate calls to these
functions increment or decrement a reference counter.

This change allows for keeping buffer memory mapped for longer and
minimizes the amount of changes to TLB, page tables, etc.

v4:
	* lock in kmap()/kunmap() with ttm_bo_reserve()
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Reported-and-tested-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20190906122056.32018-2-tzimmermann@suse.de
parent e5ef909c
...@@ -26,6 +26,9 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) ...@@ -26,6 +26,9 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* TTM buffer object in 'bo' has already been cleaned * TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object. * up; only release the GEM object.
*/ */
WARN_ON(gbo->kmap_use_count);
drm_gem_object_release(&gbo->bo.base); drm_gem_object_release(&gbo->bo.base);
} }
...@@ -283,6 +286,34 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) ...@@ -283,6 +286,34 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
} }
EXPORT_SYMBOL(drm_gem_vram_unpin); EXPORT_SYMBOL(drm_gem_vram_unpin);
static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
bool map, bool *is_iomem)
{
int ret;
struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
if (gbo->kmap_use_count > 0)
goto out;
if (kmap->virtual || !map)
goto out;
ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
if (ret)
return ERR_PTR(ret);
out:
if (!kmap->virtual) {
if (is_iomem)
*is_iomem = false;
return NULL; /* not mapped; don't increment ref */
}
++gbo->kmap_use_count;
if (is_iomem)
return ttm_kmap_obj_virtual(kmap, is_iomem);
return kmap->virtual;
}
/** /**
* drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
* @gbo: the GEM VRAM object * @gbo: the GEM VRAM object
...@@ -304,40 +335,48 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, ...@@ -304,40 +335,48 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem) bool *is_iomem)
{ {
int ret; int ret;
struct ttm_bo_kmap_obj *kmap = &gbo->kmap; void *virtual;
if (kmap->virtual || !map) ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
goto out;
ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
ttm_bo_unreserve(&gbo->bo);
out: return virtual;
if (!is_iomem)
return kmap->virtual;
if (!kmap->virtual) {
*is_iomem = false;
return NULL;
}
return ttm_kmap_obj_virtual(kmap, is_iomem);
} }
EXPORT_SYMBOL(drm_gem_vram_kmap); EXPORT_SYMBOL(drm_gem_vram_kmap);
/** static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{ {
struct ttm_bo_kmap_obj *kmap = &gbo->kmap; struct ttm_bo_kmap_obj *kmap = &gbo->kmap;
if (WARN_ON_ONCE(!gbo->kmap_use_count))
return;
if (--gbo->kmap_use_count > 0)
return;
if (!kmap->virtual) if (!kmap->virtual)
return; return;
ttm_bo_kunmap(kmap); ttm_bo_kunmap(kmap);
kmap->virtual = NULL; kmap->virtual = NULL;
} }
/**
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{
int ret;
ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
drm_gem_vram_kunmap_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_kunmap); EXPORT_SYMBOL(drm_gem_vram_kunmap);
/** /**
......
...@@ -34,11 +34,25 @@ struct vm_area_struct; ...@@ -34,11 +34,25 @@ struct vm_area_struct;
* backed by VRAM. It can be used for simple framebuffer devices with * backed by VRAM. It can be used for simple framebuffer devices with
* dedicated memory. The buffer object can be evicted to system memory if * dedicated memory. The buffer object can be evicted to system memory if
* video memory becomes scarce. * video memory becomes scarce.
*
* GEM VRAM objects perform reference counting for pin and mapping
* operations. So a buffer object that has been pinned N times with
* drm_gem_vram_pin() must be unpinned N times with
* drm_gem_vram_unpin(). The same applies to pairs of
* drm_gem_vram_kmap() and drm_gem_vram_kunmap().
*/ */
struct drm_gem_vram_object { struct drm_gem_vram_object {
struct ttm_buffer_object bo; struct ttm_buffer_object bo;
struct ttm_bo_kmap_obj kmap; struct ttm_bo_kmap_obj kmap;
/**
* @kmap_use_count:
*
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
unsigned int kmap_use_count;
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */ /* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_place placements[2]; struct ttm_place placements[2];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment