Commit e3c92eb4 authored by Somalapuram Amaranath's avatar Somalapuram Amaranath Committed by Christian König

drm/ttm: rework on ttm_resource to use size_t type

Change ttm_resource structure from num_pages to size_t size in bytes.
v1 -> v2: change PFN_UP(dst_mem->size) to ttm->num_pages
v1 -> v2: change bo->resource->size to bo->base.size at some places
v1 -> v2: remove the local variable
v1 -> v2: cleanup cmp_size_smaller_first()
v2 -> v3: adding missing PFN_UP in ttm_bo_vm_fault_reserved
Signed-off-by: default avatarSomalapuram Amaranath <Amaranath.Somalapuram@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221027091237.983582-1-Amaranath.Somalapuram@amd.comReviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
parent e1e7bc48
......@@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
node->mm_nodes[0].size = node->base.num_pages;
node->mm_nodes[0].size = PFN_UP(node->base.size);
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
......
......@@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
/* GWS and OA don't need any alignment. */
page_align = bp->byte_align;
size <<= PAGE_SHIFT;
} else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
/* Both size and alignment must be a multiple of 4. */
page_align = ALIGN(bp->byte_align, 4);
......@@ -776,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
if (r)
return r;
......
......@@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
if (!res)
goto fallback;
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
BUG_ON(start + size > res->size);
cur->mem_type = res->mem_type;
......@@ -110,7 +110,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
cur->size = size;
cur->remaining = size;
cur->node = NULL;
WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
WARN_ON(res && start + size > res->size);
return;
}
......
......@@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.resource->num_pages;
__entry->pages = PFN_UP(bo->tbo.resource->size);
__entry->type = bo->tbo.resource->mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
......
......@@ -381,7 +381,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
dst.offset = 0;
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
new_mem->size,
amdgpu_bo_encrypted(abo),
bo->base.resv, &fence);
if (r)
......@@ -424,7 +424,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT;
u64 mem_size = (u64)mem->size;
struct amdgpu_res_cursor cursor;
u64 end;
......@@ -568,7 +568,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
......
......@@ -439,7 +439,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT;
remaining_size = (u64)vres->base.size;
mutex_lock(&mgr->lock);
while (remaining_size) {
......@@ -498,7 +498,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
LIST_HEAD(temp);
trim_list = &vres->blocks;
original_size = (u64)vres->base.num_pages << PAGE_SHIFT;
original_size = (u64)vres->base.size;
/*
* If size value is rounded up to min_block_size, trim the last
......@@ -533,8 +533,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
amdgpu_vram_mgr_block_size(block);
start >>= PAGE_SHIFT;
if (start > vres->base.num_pages)
start -= vres->base.num_pages;
if (start > PFN_UP(vres->base.size))
start -= PFN_UP(vres->base.size);
else
start = 0;
vres->base.start = max(vres->base.start, start);
......
......@@ -649,7 +649,7 @@ bool i915_ttm_resource_mappable(struct ttm_resource *res)
if (!i915_ttm_cpu_maps_iomem(res))
return true;
return bman_res->used_visible_size == bman_res->base.num_pages;
return bman_res->used_visible_size == PFN_UP(bman_res->base.size);
}
static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
......
......@@ -158,7 +158,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
u32 page_alignment)
{
struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
const u64 size = res->num_pages << PAGE_SHIFT;
const u64 size = res->size;
const u32 max_segment = round_down(UINT_MAX, page_alignment);
struct drm_buddy *mm = bman_res->mm;
struct list_head *blocks = &bman_res->blocks;
......@@ -177,7 +177,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
i915_refct_sgt_init(rsgt, size);
st = &rsgt->table;
if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
}
......
......@@ -62,8 +62,8 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
if (place->fpfn || lpfn != man->size)
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
GEM_BUG_ON(!bman_res->base.num_pages);
size = bman_res->base.num_pages << PAGE_SHIFT;
GEM_BUG_ON(!bman_res->base.size);
size = bman_res->base.size;
min_page_size = bman->default_page_size;
if (bo->page_alignment)
......@@ -72,7 +72,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
GEM_BUG_ON(min_page_size < mm->chunk_size);
GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
if (place->fpfn + bman_res->base.num_pages != place->lpfn &&
if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
unsigned long pages;
......@@ -108,7 +108,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
goto err_free_blocks;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT;
u64 original_size = (u64)bman_res->base.size;
drm_buddy_block_trim(mm,
original_size,
......@@ -116,7 +116,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
}
if (lpfn <= bman->visible_size) {
bman_res->used_visible_size = bman_res->base.num_pages;
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
} else {
struct drm_buddy_block *block;
......@@ -228,7 +228,7 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man,
if (!place->fpfn &&
place->lpfn == i915_ttm_buddy_man_visible_size(man))
return bman_res->used_visible_size == res->num_pages;
return bman_res->used_visible_size == PFN_UP(res->size);
/* Check each drm buddy block individually */
list_for_each_entry(block, &bman_res->blocks, link) {
......
......@@ -244,7 +244,7 @@ void intel_region_ttm_resource_free(struct intel_memory_region *mem,
struct ttm_resource_manager *man = mem->region_private;
struct ttm_buffer_object mock_bo = {};
mock_bo.base.size = res->num_pages << PAGE_SHIFT;
mock_bo.base.size = res->size;
mock_bo.bdev = &mem->i915->bdev;
res->bo = &mock_bo;
......
......@@ -532,7 +532,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
......@@ -1236,7 +1236,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->resource->start + bo->resource->num_pages < mappable)
bo->resource->start + PFN_UP(bo->resource->size) < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
......
......@@ -52,7 +52,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
u32 src_offset = old_reg->start << PAGE_SHIFT;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
u32 dst_offset = new_reg->start << PAGE_SHIFT;
u32 page_count = new_reg->num_pages;
u32 page_count = PFN_UP(new_reg->size);
int ret;
ret = PUSH_WAIT(push, 3);
......@@ -62,7 +62,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma,
SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma);
page_count = new_reg->num_pages;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
......
......@@ -41,7 +41,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
{
struct nouveau_mem *mem = nouveau_mem(old_reg);
struct nvif_push *push = chan->chan.push;
u64 length = (new_reg->num_pages << PAGE_SHIFT);
u64 length = new_reg->size;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
int src_tiled = !!mem->kind;
......
......@@ -44,7 +44,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
if (ret)
return ret;
PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT,
PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size,
0x0308, upper_32_bits(mem->vma[0].addr),
0x030c, lower_32_bits(mem->vma[0].addr),
0x0310, upper_32_bits(mem->vma[1].addr),
......
......@@ -44,10 +44,10 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
u32 page_count = PFN_UP(new_reg->size);
int ret;
page_count = new_reg->num_pages;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
......
......@@ -42,10 +42,10 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct nouveau_mem *mem = nouveau_mem(old_reg);
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
u32 page_count = PFN_UP(new_reg->size);
int ret;
page_count = new_reg->num_pages;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 2047) ? 2047 : page_count;
......
......@@ -37,10 +37,10 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct nvif_push *push = chan->chan.push;
u64 src_offset = mem->vma[0].addr;
u64 dst_offset = mem->vma[1].addr;
u32 page_count = new_reg->num_pages;
u32 page_count = PFN_UP(new_reg->size);
int ret;
page_count = new_reg->num_pages;
page_count = PFN_UP(new_reg->size);
while (page_count) {
int line_count = (page_count > 8191) ? 8191 : page_count;
......
......@@ -58,7 +58,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
PITCH_IN, PAGE_SIZE,
PITCH_OUT, PAGE_SIZE,
LINE_LENGTH_IN, PAGE_SIZE,
LINE_COUNT, new_reg->num_pages);
LINE_COUNT, PFN_UP(new_reg->size));
PUSH_IMMD(push, NVA0B5, LAUNCH_DMA,
NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) |
......
......@@ -679,7 +679,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size),
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
......@@ -868,8 +868,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
nvbo->bo.resource->
num_pages,
PFN_UP(nvbo->bo.base.size),
&nvbo->kmap);
if (ret) {
WIND_RING(chan);
......
......@@ -115,7 +115,7 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
mutex_lock(&drm->master.lock);
ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT,
reg->num_pages << PAGE_SHIFT,
reg->size,
&args, sizeof(args), &mem->mem);
mutex_unlock(&drm->master.lock);
return ret;
......@@ -128,7 +128,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
struct nouveau_cli *cli = mem->cli;
struct nouveau_drm *drm = cli->drm;
struct nvif_mmu *mmu = &cli->mmu;
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
u64 size = ALIGN(reg->size, 1 << page);
int ret;
mutex_lock(&drm->master.lock);
......
......@@ -139,7 +139,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
mem = nouveau_mem(*res);
ttm_resource_init(bo, place, *res);
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
(long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
(long)(*res)->size, &mem->vma[0]);
if (ret) {
nouveau_mem_del(man, *res);
return ret;
......
......@@ -400,8 +400,11 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.resource->num_pages -
(int)lb->robj->tbo.resource->num_pages;
if (la->robj->tbo.base.size > lb->robj->tbo.base.size)
return 1;
if (la->robj->tbo.base.size < lb->robj->tbo.base.size)
return -1;
return 0;
}
/**
......
......@@ -232,7 +232,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
}
return 0;
}
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
if (r) {
return r;
}
......@@ -737,7 +737,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
size = bo->resource->num_pages << PAGE_SHIFT;
size = bo->resource->size;
offset = bo->resource->start << PAGE_SHIFT;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
......
......@@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.resource->num_pages;
__entry->pages = PFN_UP(bo->tbo.resource->size);
),
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
......
......@@ -181,7 +181,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
......@@ -268,7 +268,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
size_t bus_size = (size_t)mem->size;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
......
......@@ -51,9 +51,6 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
int i, mem_type;
drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
bo, bo->resource->num_pages, bo->base.size >> 10,
bo->base.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
mem_type = placement->placement[i].mem_type;
drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
......
......@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
......@@ -357,9 +357,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->resource->num_pages)
if (num_pages > PFN_UP(bo->resource->size))
return -EINVAL;
if ((start_page + num_pages) > bo->resource->num_pages)
if ((start_page + num_pages) > PFN_UP(bo->resource->size))
return -EINVAL;
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
......
......@@ -217,7 +217,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
page_last = vma_pages(vma) + vma->vm_pgoff -
drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->resource->num_pages))
if (unlikely(page_offset >= PFN_UP(bo->base.size)))
return VM_FAULT_SIGBUS;
prot = ttm_io_prot(bo, bo->resource, prot);
......@@ -412,7 +412,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
<< PAGE_SHIFT);
int ret;
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
if (len < 1 || (offset + len) > bo->base.size)
return -EIO;
ret = ttm_bo_reserve(bo, true, false, NULL);
......
......@@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
node->base.num_pages,
PFN_UP(node->base.size),
bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
......
......@@ -177,7 +177,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
struct ttm_resource_manager *man;
res->start = 0;
res->num_pages = PFN_UP(bo->base.size);
res->size = bo->base.size;
res->mem_type = place->mem_type;
res->placement = place->flags;
res->bus.addr = NULL;
......@@ -192,7 +192,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
list_add_tail(&res->lru, &bo->bdev->pinned);
else
list_add_tail(&res->lru, &man->lru[bo->priority]);
man->usage += res->num_pages << PAGE_SHIFT;
man->usage += res->size;
spin_unlock(&bo->bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_init);
......@@ -214,7 +214,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man,
spin_lock(&bdev->lru_lock);
list_del_init(&res->lru);
man->usage -= res->num_pages << PAGE_SHIFT;
man->usage -= res->size;
spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_resource_fini);
......@@ -665,17 +665,15 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
iter_io->needs_unmap = false;
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
iter_io->needs_unmap = true;
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
if (mem->bus.caching == ttm_write_combined)
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap_wc(mem->bus.offset,
bus_size));
mem->size));
else if (mem->bus.caching == ttm_cached)
iosys_map_set_vaddr(&iter_io->dmap,
memremap(mem->bus.offset, bus_size,
memremap(mem->bus.offset, mem->size,
MEMREMAP_WB |
MEMREMAP_WT |
MEMREMAP_WC));
......@@ -684,7 +682,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
if (iosys_map_is_null(&iter_io->dmap))
iosys_map_set_vaddr_iomem(&iter_io->dmap,
ioremap(mem->bus.offset,
bus_size));
mem->size));
if (iosys_map_is_null(&iter_io->dmap)) {
ret = -ENOMEM;
......
......@@ -483,8 +483,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_addr = NULL;
d.dst_pages = dst->ttm->pages;
d.src_pages = src->ttm->pages;
d.dst_num_pages = dst->resource->num_pages;
d.src_num_pages = src->resource->num_pages;
d.dst_num_pages = PFN_UP(dst->resource->size);
d.src_num_pages = PFN_UP(src->resource->size);
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
d.diff = diff;
......
......@@ -194,7 +194,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
int ret = 0;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->resource->num_pages;
place.lpfn = PFN_UP(bo->resource->size);
placement.num_placement = 1;
placement.placement = &place;
placement.num_busy_placement = 1;
......@@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation.
*/
if (bo->resource->mem_type == TTM_PL_VRAM &&
bo->resource->start < bo->resource->num_pages &&
bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
......@@ -352,7 +352,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
if (virtual)
return virtual;
ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
if (ret)
DRM_ERROR("Buffer object map failed: %d.\n", ret);
......
......@@ -443,7 +443,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* Do a page by page copy of COTables. This eliminates slow vmap()s.
* This should really be a TTM utility.
*/
for (i = 0; i < old_bo->resource->num_pages; ++i) {
for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
bool dummy;
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
......
......@@ -1047,7 +1047,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
......
......@@ -71,7 +71,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += (*res)->num_pages;
gman->used_gmr_pages += PFN_UP((*res)->size);
/*
* Because the graphics memory is a soft limit we can try to
* expand it instead of letting the userspace apps crash.
......@@ -114,7 +114,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
return 0;
nospace:
gman->used_gmr_pages -= (*res)->num_pages;
gman->used_gmr_pages -= PFN_UP((*res)->size);
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
ttm_resource_fini(man, *res);
......@@ -129,7 +129,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
ida_free(&gman->gmr_ida, res->start);
spin_lock(&gman->lock);
gman->used_gmr_pages -= res->num_pages;
gman->used_gmr_pages -= PFN_UP(res->size);
spin_unlock(&gman->lock);
ttm_resource_fini(man, res);
kfree(res);
......
......@@ -230,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t num_pages = vbo->base.resource->num_pages;
pgoff_t num_pages = PFN_UP(vbo->base.resource->size);
size_t size;
int ret;
......@@ -395,7 +395,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
return ret;
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->resource->num_pages)) {
if (unlikely(page_offset >= PFN_UP(bo->resource->size))) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
......@@ -438,7 +438,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
page_offset = vmf->pgoff -
drm_vma_node_start(&bo->base.vma_node);
if (page_offset >= bo->resource->num_pages ||
if (page_offset >= PFN_UP(bo->resource->size) ||
vmw_resources_clean(vbo, page_offset,
page_offset + PAGE_SIZE,
&allowed_prefault)) {
......
......@@ -197,7 +197,7 @@ struct ttm_bus_placement {
* struct ttm_resource
*
* @start: Start of the allocation.
* @num_pages: Actual size of resource in pages.
* @size: Actual size of resource in bytes.
* @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
......@@ -208,7 +208,7 @@ struct ttm_bus_placement {
*/
struct ttm_resource {
unsigned long start;
unsigned long num_pages;
size_t size;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment