Commit 8d153f71 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie

drm: update user token hashing and map handles

Keep hashed user tokens, with the following changes:
32-bit physical device addresses are mapped directly to user-tokens. No
    duplicate maps are allowed, and the addresses are assumed to be outside
    of the range 0x10000000 through 0x30000000. The user-token is identical
    to the 32-bit physical start-address of the map.
64-bit physical device addressed are mapped to user-tokens in the range
0x10000000 to 0x30000000 with page-size increments. The user_token should
    not be interpreted as an address.
Other map types, like upcoming TTM maps are mapped to user-tokens in the
    range
0x10000000 to 0x30000000 with page-size increments. The user_token should
    not be interpreted as an address.

Implement hashed map lookups.
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent 8669cbc5
......@@ -140,6 +140,7 @@
#define DRM_MEM_HASHTAB 23
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
#define DRM_MAP_HASH_OFFSET 0x10000000
/*@}*/
......@@ -485,6 +486,7 @@ typedef struct drm_sigdata {
*/
typedef struct drm_map_list {
struct list_head head; /**< list head */
drm_hash_item_t hash;
drm_map_t *map; /**< mapping */
unsigned int user_token;
} drm_map_list_t;
......@@ -662,6 +664,7 @@ typedef struct drm_device {
/*@{ */
drm_map_list_t *maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
/** \name Context handle management */
/*@{ */
......
......@@ -65,43 +65,27 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
return NULL;
}
/*
* Used to allocate 32-bit handles for mappings.
*/
#define START_RANGE 0x10000000
#define END_RANGE 0x40000000
#ifdef _LP64
static __inline__ unsigned int HandleID(unsigned long lhandle,
drm_device_t *dev)
int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
unsigned long user_token, int hashed_handle)
{
static unsigned int map32_handle = START_RANGE;
unsigned int hash;
if (lhandle & 0xffffffff00000000) {
hash = map32_handle;
map32_handle += PAGE_SIZE;
if (map32_handle > END_RANGE)
map32_handle = START_RANGE;
} else
hash = lhandle;
while (1) {
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head, head) {
if (_entry->user_token == hash)
break;
}
if (&_entry->head == &dev->maplist->head)
return hash;
int use_hashed_handle;
#if (BITS_PER_LONG == 64)
use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
#elif (BITS_PER_LONG == 32)
use_hashed_handle = hashed_handle;
#else
#error Unsupported long size. Neither 64 nor 32 bits.
#endif
hash += PAGE_SIZE;
map32_handle += PAGE_SIZE;
if (use_hashed_handle) {
return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
} else {
hash->key = user_token;
return drm_ht_insert_item(&dev->map_hash, hash);
}
}
#else
# define HandleID(x,dev) (unsigned int)(x)
#endif
/**
* Ioctl to specify a range of memory that is available for mapping by a non-root process.
......@@ -123,6 +107,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
drm_map_t *map;
drm_map_list_t *list;
drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
if (!map)
......@@ -257,11 +243,20 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist->head);
/* Assign a 32-bit handle */
/* We do it here so that dev->struct_mutex protects the increment */
list->user_token = HandleID(map->type == _DRM_SHM
? (unsigned long)map->handle
: map->offset, dev);
user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
map->offset;
ret = drm_map_handle(dev, &list->hash, user_token, FALSE);
if (ret) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
mutex_unlock(&dev->struct_mutex);
return ret;
}
list->user_token = list->hash.key;
mutex_unlock(&dev->struct_mutex);
*maplist = list;
......@@ -346,6 +341,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
if (r_list->map == map) {
list_del(list);
drm_ht_remove_key(&dev->map_hash, r_list->user_token);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
break;
}
......
......@@ -35,7 +35,7 @@
#ifndef DRM_HASHTAB_H
#define DRM_HASHTAB_H
#define drm_hash_entry(_ptr, _type, _member) list_entry(_ptr, _type, _member)
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
typedef struct drm_hash_item{
struct hlist_node head;
......
......@@ -75,6 +75,10 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
if (drm_ht_create(&dev->map_hash, 12)) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
return -ENOMEM;
}
/* the DRM has 6 basic counters */
dev->counters = 6;
......
......@@ -59,7 +59,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
drm_device_t *dev = priv->head->dev;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
struct list_head *list;
drm_hash_item_t *hash;
/*
* Find the right map
......@@ -70,14 +70,11 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
if (!dev->agp || !dev->agp->cant_use_aperture)
goto vm_nopage_error;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map;
if (!map)
continue;
if (r_list->user_token == (vma->vm_pgoff << PAGE_SHIFT))
break;
}
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash))
goto vm_nopage_error;
r_list = drm_hash_entry(hash, drm_map_list_t, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start;
......@@ -521,9 +518,8 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
unsigned long offset = 0;
struct list_head *list;
drm_hash_item_t *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
......@@ -543,23 +539,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
)
return drm_mmap_dma(filp, vma);
/* A sequential search of a linked list is
fine here because: 1) there will only be
about 5-10 entries in the list and, 2) a
DRI client only has to do this mapping
once, so it doesn't have to be optimized
for performance, even if the list was a
bit longer. */
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map;
if (!map)
continue;
if (r_list->user_token == vma->vm_pgoff << PAGE_SHIFT)
break;
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) {
DRM_ERROR("Could not find map\n");
return -EINVAL;
}
map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment