Commit 707d561f authored by Gerd Hoffmann's avatar Gerd Hoffmann

drm: allow limiting the scatter list size.

Add drm_device argument to drm_prime_pages_to_sg(), so we can
call dma_max_mapping_size() to figure the segment size limit
and call into __sg_alloc_table_from_pages() with the correct
limit.

This fixes virtio-gpu with sev.  Possibly it'll fix other bugs
too given that drm seems to totaly ignore segment size limits
so far ...

v2: place max_segment in drm driver not gem object.
v3: move max_segment next to the other gem fields.
v4: just use dma_max_mapping_size().
Signed-off-by: default avatarGerd Hoffmann <kraxel@redhat.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/20200907112425.15610-2-kraxel@redhat.com
parent 04e89ff3
...@@ -302,7 +302,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, ...@@ -302,7 +302,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
switch (bo->tbo.mem.mem_type) { switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT: case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
bo->tbo.num_pages); bo->tbo.num_pages);
if (IS_ERR(sgt)) if (IS_ERR(sgt))
return sgt; return sgt;
......
...@@ -656,7 +656,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj) ...@@ -656,7 +656,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
WARN_ON(shmem->base.import_attach); WARN_ON(shmem->base.import_attach);
return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
} }
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
......
...@@ -802,9 +802,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { ...@@ -802,9 +802,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* *
* This is useful for implementing &drm_gem_object_funcs.get_sg_table. * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
*/ */
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{ {
struct sg_table *sg = NULL; struct sg_table *sg = NULL;
size_t max_segment = 0;
int ret; int ret;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
...@@ -813,8 +815,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page ...@@ -813,8 +815,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
goto out; goto out;
} }
ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, if (dev)
nr_pages << PAGE_SHIFT, GFP_KERNEL); max_segment = dma_max_mapping_size(dev->dev);
if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
max_segment = SCATTERLIST_MAX_SEGMENT;
ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (ret) if (ret)
goto out; goto out;
......
...@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj) ...@@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT; int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt; struct sg_table *sgt;
sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
etnaviv_obj->pages, npages);
if (IS_ERR(sgt)) { if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n", dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt)); PTR_ERR(sgt));
......
...@@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
} }
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
......
...@@ -126,7 +126,7 @@ static struct page **get_pages(struct drm_gem_object *obj) ...@@ -126,7 +126,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
msm_obj->pages = p; msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(p, npages); msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) { if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt); void *ptr = ERR_CAST(msm_obj->sgt);
......
...@@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return NULL; return NULL;
return drm_prime_pages_to_sg(msm_obj->pages, npages); return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
} }
void *msm_gem_prime_vmap(struct drm_gem_object *obj) void *msm_gem_prime_vmap(struct drm_gem_object *obj)
......
...@@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -32,7 +32,7 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct nouveau_bo *nvbo = nouveau_gem_object(obj); struct nouveau_bo *nvbo = nouveau_gem_object(obj);
int npages = nvbo->bo.num_pages; int npages = nvbo->bo.num_pages;
return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages);
} }
void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
......
...@@ -36,7 +36,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -36,7 +36,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
struct radeon_bo *bo = gem_to_radeon_bo(obj); struct radeon_bo *bo = gem_to_radeon_bo(obj);
int npages = bo->tbo.num_pages; int npages = bo->tbo.num_pages;
return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages);
} }
void *radeon_gem_prime_vmap(struct drm_gem_object *obj) void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
......
...@@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) ...@@ -85,7 +85,8 @@ static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
rk_obj->pages, rk_obj->num_pages);
if (IS_ERR(rk_obj->sgt)) { if (IS_ERR(rk_obj->sgt)) {
ret = PTR_ERR(rk_obj->sgt); ret = PTR_ERR(rk_obj->sgt);
goto err_put_pages; goto err_put_pages;
...@@ -442,7 +443,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -442,7 +443,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
int ret; int ret;
if (rk_obj->pages) if (rk_obj->pages)
return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) if (!sgt)
......
...@@ -284,7 +284,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) ...@@ -284,7 +284,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
bo->num_pages = bo->gem.size >> PAGE_SHIFT; bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) { if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt); err = PTR_ERR(bo->sgt);
goto put_pages; goto put_pages;
......
...@@ -321,7 +321,7 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) ...@@ -321,7 +321,7 @@ static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
struct drm_vgem_gem_object *bo = to_vgem_bo(obj); struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
} }
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
......
...@@ -179,7 +179,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj) ...@@ -179,7 +179,8 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
if (!xen_obj->pages) if (!xen_obj->pages)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages); return drm_prime_pages_to_sg(gem_obj->dev,
xen_obj->pages, xen_obj->num_pages);
} }
struct drm_gem_object * struct drm_gem_object *
......
...@@ -88,7 +88,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr); ...@@ -88,7 +88,8 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages);
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags); int flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment