Commit 92c4eeb0 authored by Thomas Zimmermann's avatar Thomas Zimmermann

drm/udl: Remove flags field from struct udl_gem_object

The flags field in struct udl_gem controls mapping parameters: cached
access for local buffers, write-combined access for imported buffers.

We can drop the field and distinguish both cases by testing whether
struct drm_gem_object.import_attach is NULL.
Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Acked-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191107094307.19870-2-tzimmermann@suse.de
parent 165d3448
...@@ -241,7 +241,6 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, ...@@ -241,7 +241,6 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
goto fail_unmap; goto fail_unmap;
uobj->base.import_attach = attach; uobj->base.import_attach = attach;
uobj->flags = UDL_BO_WC;
return &uobj->base; return &uobj->base;
......
...@@ -29,9 +29,6 @@ struct drm_mode_create_dumb; ...@@ -29,9 +29,6 @@ struct drm_mode_create_dumb;
#define DRIVER_MINOR 0 #define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1 #define DRIVER_PATCHLEVEL 1
#define UDL_BO_CACHEABLE (1 << 0)
#define UDL_BO_WC (1 << 1)
struct udl_device; struct udl_device;
struct urb_node { struct urb_node {
...@@ -81,7 +78,6 @@ struct udl_gem_object { ...@@ -81,7 +78,6 @@ struct udl_gem_object {
struct page **pages; struct page **pages;
void *vmapping; void *vmapping;
struct sg_table *sg; struct sg_table *sg;
unsigned int flags;
}; };
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base) #define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
......
...@@ -25,7 +25,6 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, ...@@ -25,7 +25,6 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
return NULL; return NULL;
} }
obj->flags = UDL_BO_CACHEABLE;
return obj; return obj;
} }
...@@ -57,23 +56,6 @@ udl_gem_create(struct drm_file *file, ...@@ -57,23 +56,6 @@ udl_gem_create(struct drm_file *file,
return 0; return 0;
} }
static void update_vm_cache_attr(struct udl_gem_object *obj,
struct vm_area_struct *vma)
{
DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
/* non-cacheable as default. */
if (obj->flags & UDL_BO_CACHEABLE) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
} else if (obj->flags & UDL_BO_WC) {
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
} else {
vma->vm_page_prot =
pgprot_noncached(vm_get_page_prot(vma->vm_flags));
}
}
int udl_dumb_create(struct drm_file *file, int udl_dumb_create(struct drm_file *file,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args) struct drm_mode_create_dumb *args)
...@@ -86,16 +68,21 @@ int udl_dumb_create(struct drm_file *file, ...@@ -86,16 +68,21 @@ int udl_dumb_create(struct drm_file *file,
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
struct drm_gem_object *obj;
int ret; int ret;
ret = drm_gem_mmap(filp, vma); ret = drm_gem_mmap(filp, vma);
if (ret) if (ret)
return ret; return ret;
obj = vma->vm_private_data;
vma->vm_flags &= ~VM_PFNMAP; vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_MIXEDMAP;
update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (obj->import_attach)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment