Commit 9f07bb0d authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: fix get pages error path in etnaviv_gem_vaddr

In case that etnaviv_gem_get_pages is unable to get the required
pages the object mutex needs to be unlocked. Also return NULL in
this case instead of propagating the error, as callers of this
function might not be prepared to handle a pointer error, but
expect this call to follow the semantics of a plain vmap to return
NULL in case of an error.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
parent 45d16a6d
...@@ -216,7 +216,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) ...@@ -216,7 +216,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.hdr->iova = cpu_to_le64(vram->iova); iter.hdr->iova = cpu_to_le64(vram->iova);
vaddr = etnaviv_gem_vaddr(&obj->base); vaddr = etnaviv_gem_vaddr(&obj->base);
if (vaddr && !IS_ERR(vaddr)) if (vaddr)
memcpy(iter.data, vaddr, obj->base.size); memcpy(iter.data, vaddr, obj->base.size);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
......
...@@ -361,8 +361,10 @@ void *etnaviv_gem_vaddr(struct drm_gem_object *obj) ...@@ -361,8 +361,10 @@ void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
if (!etnaviv_obj->vaddr) { if (!etnaviv_obj->vaddr) {
struct page **pages = etnaviv_gem_get_pages(etnaviv_obj); struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
if (IS_ERR(pages)) if (IS_ERR(pages)) {
return ERR_CAST(pages); mutex_unlock(&etnaviv_obj->lock);
return NULL;
}
etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL)); VM_MAP, pgprot_writecombine(PAGE_KERNEL));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment