Commit 187685cb authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

drm/i915: Make GEM object alloc/free and stolen created take dev_priv

Where it is more appropriate and also to be consistent with
the direction of the driver.

v2: Leave out object alloc/free inlining. (Joonas Lahtinen)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 908764f6
......@@ -2981,7 +2981,7 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
......@@ -3366,9 +3366,9 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
......
......@@ -622,9 +622,8 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return ret;
}
void *i915_gem_object_alloc(struct drm_device *dev)
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = to_i915(dev);
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
......@@ -3990,7 +3989,7 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(dev);
obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
......
......@@ -278,7 +278,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
obj = i915_gem_object_alloc(dev);
obj = i915_gem_object_alloc(to_i915(dev));
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
......
......@@ -155,7 +155,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(&i915->drm);
obj = i915_gem_object_alloc(i915);
if (!obj)
return ERR_PTR(-ENOMEM);
......
......@@ -579,22 +579,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(dev);
obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return NULL;
drm_gem_private_object_init(dev, &obj->base, stolen->size);
drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(to_i915(dev)) ?
I915_CACHE_LLC : I915_CACHE_NONE;
obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
......@@ -607,9 +606,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
......@@ -630,7 +628,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj)
return obj;
......@@ -640,12 +638,11 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
u32 stolen_offset,
u32 gtt_offset,
u32 size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
......@@ -655,7 +652,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
lockdep_assert_held(&dev->struct_mutex);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
......@@ -680,7 +677,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
......
......@@ -784,7 +784,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return -ENODEV;
}
obj = i915_gem_object_alloc(dev);
obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return -ENOMEM;
......
......@@ -2689,7 +2689,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mutex_lock(&dev->struct_mutex);
obj = i915_gem_object_create_stolen_for_preallocated(dev,
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
......
......@@ -257,7 +257,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
WARN_ON(engine->scratch);
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
obj = i915_gem_object_create_stolen(engine->i915, size);
if (!obj)
obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
......
......@@ -145,7 +145,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features. */
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
obj = i915_gem_object_create(dev, size);
if (IS_ERR(obj)) {
......
......@@ -1391,8 +1391,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
reg_bo = NULL;
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
PAGE_SIZE);
reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
if (reg_bo == NULL)
reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
if (IS_ERR(reg_bo))
......
......@@ -5805,7 +5805,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
int pcbr_offset;
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
pcbr_offset,
I915_GTT_OFFSET_NONE,
pctx_size);
......@@ -5822,7 +5822,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
goto out;
......
......@@ -1869,7 +1869,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj)
obj = i915_gem_object_create(&dev_priv->drm, size);
if (IS_ERR(obj))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment