Commit a48d4a33 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2020-01-22-1' of...

Merge tag 'drm-misc-fixes-2020-01-22-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

-mst: Fix SST branch device handling (Wayne)
-panfrost: Fix mapping of globally visible BO's (Boris)

Cc: Wayne Lin <Wayne.Lin@amd.com>
CC: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20200122213725.GA22099@art_vandelay
parents def9d278 bdefca2d
...@@ -1916,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, ...@@ -1916,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1; return parent_lct + 1;
} }
static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
{
switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
return true;
case DP_PEER_DEVICE_MST_BRANCHING:
/* For sst branch device */
if (!mcs)
return true;
return false;
}
return true;
}
static int
drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
bool new_mcs)
{ {
struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_topology_mgr *mgr = port->mgr;
struct drm_dp_mst_branch *mstb; struct drm_dp_mst_branch *mstb;
u8 rad[8], lct; u8 rad[8], lct;
int ret = 0; int ret = 0;
if (port->pdt == new_pdt) if (port->pdt == new_pdt && port->mcs == new_mcs)
return 0; return 0;
/* Teardown the old pdt, if there is one */ /* Teardown the old pdt, if there is one */
switch (port->pdt) { if (port->pdt != DP_PEER_DEVICE_NONE) {
case DP_PEER_DEVICE_DP_LEGACY_CONV: if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
case DP_PEER_DEVICE_SST_SINK: /*
/* * If the new PDT would also have an i2c bus,
* If the new PDT would also have an i2c bus, don't bother * don't bother with reregistering it
* with reregistering it */
*/ if (new_pdt != DP_PEER_DEVICE_NONE &&
if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
new_pdt == DP_PEER_DEVICE_SST_SINK) { port->pdt = new_pdt;
port->pdt = new_pdt; port->mcs = new_mcs;
return 0; return 0;
} }
/* remove i2c over sideband */ /* remove i2c over sideband */
drm_dp_mst_unregister_i2c_bus(&port->aux); drm_dp_mst_unregister_i2c_bus(&port->aux);
break; } else {
case DP_PEER_DEVICE_MST_BRANCHING: mutex_lock(&mgr->lock);
mutex_lock(&mgr->lock); drm_dp_mst_topology_put_mstb(port->mstb);
drm_dp_mst_topology_put_mstb(port->mstb); port->mstb = NULL;
port->mstb = NULL; mutex_unlock(&mgr->lock);
mutex_unlock(&mgr->lock); }
break;
} }
port->pdt = new_pdt; port->pdt = new_pdt;
switch (port->pdt) { port->mcs = new_mcs;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
/* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING: if (port->pdt != DP_PEER_DEVICE_NONE) {
lct = drm_dp_calculate_rad(port, rad); if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
mstb = drm_dp_add_mst_branch_device(lct, rad); /* add i2c over sideband */
if (!mstb) { ret = drm_dp_mst_register_i2c_bus(&port->aux);
ret = -ENOMEM; } else {
DRM_ERROR("Failed to create MSTB for port %p", port); lct = drm_dp_calculate_rad(port, rad);
goto out; mstb = drm_dp_add_mst_branch_device(lct, rad);
} if (!mstb) {
ret = -ENOMEM;
DRM_ERROR("Failed to create MSTB for port %p",
port);
goto out;
}
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
port->mstb = mstb; port->mstb = mstb;
mstb->mgr = port->mgr; mstb->mgr = port->mgr;
mstb->port_parent = port; mstb->port_parent = port;
/* /*
* Make sure this port's memory allocation stays * Make sure this port's memory allocation stays
* around until its child MSTB releases it * around until its child MSTB releases it
*/ */
drm_dp_mst_get_port_malloc(port); drm_dp_mst_get_port_malloc(port);
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
/* And make sure we send a link address for this */ /* And make sure we send a link address for this */
ret = 1; ret = 1;
break; }
} }
out: out:
...@@ -2135,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, ...@@ -2135,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
goto error; goto error;
} }
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || if (port->pdt != DP_PEER_DEVICE_NONE &&
port->pdt == DP_PEER_DEVICE_SST_SINK) && drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc); &port->aux.ddc);
drm_connector_set_tile_property(port->connector); drm_connector_set_tile_property(port->connector);
...@@ -2201,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -2201,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
int old_ddps = 0, ret; int old_ddps = 0, ret;
u8 new_pdt = DP_PEER_DEVICE_NONE; u8 new_pdt = DP_PEER_DEVICE_NONE;
bool new_mcs = 0;
bool created = false, send_link_addr = false, changed = false; bool created = false, send_link_addr = false, changed = false;
port = drm_dp_get_port(mstb, port_msg->port_number); port = drm_dp_get_port(mstb, port_msg->port_number);
...@@ -2245,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -2245,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
port->input = port_msg->input_port; port->input = port_msg->input_port;
if (!port->input) if (!port->input)
new_pdt = port_msg->peer_device_type; new_pdt = port_msg->peer_device_type;
port->mcs = port_msg->mcs; new_mcs = port_msg->mcs;
port->ddps = port_msg->ddps; port->ddps = port_msg->ddps;
port->ldps = port_msg->legacy_device_plug_status; port->ldps = port_msg->legacy_device_plug_status;
port->dpcd_rev = port_msg->dpcd_revision; port->dpcd_rev = port_msg->dpcd_revision;
...@@ -2272,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -2272,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
} }
} }
ret = drm_dp_port_set_pdt(port, new_pdt); ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) { if (ret == 1) {
send_link_addr = true; send_link_addr = true;
} else if (ret < 0) { } else if (ret < 0) {
...@@ -2286,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -2286,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
* we're coming out of suspend. In this case, always resend the link * we're coming out of suspend. In this case, always resend the link
* address if there's an MSTB on this port * address if there's an MSTB on this port
*/ */
if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
port->mcs)
send_link_addr = true; send_link_addr = true;
if (port->connector) if (port->connector)
...@@ -2323,6 +2341,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2323,6 +2341,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
int old_ddps, old_input, ret, i; int old_ddps, old_input, ret, i;
u8 new_pdt; u8 new_pdt;
bool new_mcs;
bool dowork = false, create_connector = false; bool dowork = false, create_connector = false;
port = drm_dp_get_port(mstb, conn_stat->port_number); port = drm_dp_get_port(mstb, conn_stat->port_number);
...@@ -2354,7 +2373,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2354,7 +2373,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
old_ddps = port->ddps; old_ddps = port->ddps;
old_input = port->input; old_input = port->input;
port->input = conn_stat->input_port; port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status; port->ldps = conn_stat->legacy_device_plug_status;
port->ddps = conn_stat->displayport_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status;
...@@ -2367,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2367,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
} }
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
new_mcs = conn_stat->message_capability_status;
ret = drm_dp_port_set_pdt(port, new_pdt); ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
if (ret == 1) { if (ret == 1) {
dowork = true; dowork = true;
} else if (ret < 0) { } else if (ret < 0) {
...@@ -3929,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, ...@@ -3929,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) { switch (port->pdt) {
case DP_PEER_DEVICE_NONE: case DP_PEER_DEVICE_NONE:
case DP_PEER_DEVICE_MST_BRANCHING: case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;
break; break;
case DP_PEER_DEVICE_SST_SINK: case DP_PEER_DEVICE_SST_SINK:
...@@ -4541,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) ...@@ -4541,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
if (port->connector) if (port->connector)
port->mgr->cbs->destroy_connector(port->mgr, port->connector); port->mgr->cbs->destroy_connector(port->mgr, port->connector);
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port); drm_dp_mst_put_port_malloc(port);
} }
......
...@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct ...@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
struct panfrost_file_priv *priv = file->driver_priv;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct drm_panfrost_create_bo *args = data; struct drm_panfrost_create_bo *args = data;
struct panfrost_gem_mapping *mapping;
if (!args->size || args->pad || if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
...@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, ...@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
args->offset = bo->node.start << PAGE_SHIFT; mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) {
drm_gem_object_put_unlocked(&bo->base.base);
return -EINVAL;
}
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
return 0; return 0;
} }
...@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, ...@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
struct drm_panfrost_submit *args, struct drm_panfrost_submit *args,
struct panfrost_job *job) struct panfrost_job *job)
{ {
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_object *bo;
unsigned int i;
int ret;
job->bo_count = args->bo_handle_count; job->bo_count = args->bo_handle_count;
if (!job->bo_count) if (!job->bo_count)
...@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev, ...@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
if (!job->implicit_fences) if (!job->implicit_fences)
return -ENOMEM; return -ENOMEM;
return drm_gem_objects_lookup(file_priv, ret = drm_gem_objects_lookup(file_priv,
(void __user *)(uintptr_t)args->bo_handles, (void __user *)(uintptr_t)args->bo_handles,
job->bo_count, &job->bos); job->bo_count, &job->bos);
if (ret)
return ret;
job->mappings = kvmalloc_array(job->bo_count,
sizeof(struct panfrost_gem_mapping *),
GFP_KERNEL | __GFP_ZERO);
if (!job->mappings)
return -ENOMEM;
for (i = 0; i < job->bo_count; i++) {
struct panfrost_gem_mapping *mapping;
bo = to_panfrost_bo(job->bos[i]);
mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) {
ret = -EINVAL;
break;
}
job->mappings[i] = mapping;
}
return ret;
} }
/** /**
...@@ -320,7 +357,9 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, ...@@ -320,7 +357,9 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_get_bo_offset *args = data; struct drm_panfrost_get_bo_offset *args = data;
struct panfrost_gem_mapping *mapping;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
...@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, ...@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
} }
bo = to_panfrost_bo(gem_obj); bo = to_panfrost_bo(gem_obj);
args->offset = bo->node.start << PAGE_SHIFT; mapping = panfrost_gem_mapping_get(bo, priv);
drm_gem_object_put_unlocked(gem_obj); drm_gem_object_put_unlocked(gem_obj);
if (!mapping)
return -EINVAL;
args->offset = mapping->mmnode.start << PAGE_SHIFT;
panfrost_gem_mapping_put(mapping);
return 0; return 0;
} }
static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data; struct drm_panfrost_madvise *args = data;
struct panfrost_device *pfdev = dev->dev_private; struct panfrost_device *pfdev = dev->dev_private;
struct drm_gem_object *gem_obj; struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
int ret = 0;
gem_obj = drm_gem_object_lookup(file_priv, args->handle); gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) { if (!gem_obj) {
...@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, ...@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
} }
bo = to_panfrost_bo(gem_obj);
mutex_lock(&pfdev->shrinker_lock); mutex_lock(&pfdev->shrinker_lock);
mutex_lock(&bo->mappings.lock);
if (args->madv == PANFROST_MADV_DONTNEED) {
struct panfrost_gem_mapping *first;
first = list_first_entry(&bo->mappings.list,
struct panfrost_gem_mapping,
node);
/*
* If we want to mark the BO purgeable, there must be only one
* user: the caller FD.
* We could do something smarter and mark the BO purgeable only
* when all its users have marked it purgeable, but globally
* visible/shared BOs are likely to never be marked purgeable
* anyway, so let's not bother.
*/
if (!list_is_singular(&bo->mappings.list) ||
WARN_ON_ONCE(first->mmu != &priv->mmu)) {
ret = -EINVAL;
goto out_unlock_mappings;
}
}
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) { if (args->retained) {
struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
if (args->madv == PANFROST_MADV_DONTNEED) if (args->madv == PANFROST_MADV_DONTNEED)
list_add_tail(&bo->base.madv_list, list_add_tail(&bo->base.madv_list,
&pfdev->shrinker_list); &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED) else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list); list_del_init(&bo->base.madv_list);
} }
out_unlock_mappings:
mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj); drm_gem_object_put_unlocked(gem_obj);
return 0; return ret;
} }
int panfrost_unstable_ioctl_check(void) int panfrost_unstable_ioctl_check(void)
......
...@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) ...@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
list_del_init(&bo->base.madv_list); list_del_init(&bo->base.madv_list);
mutex_unlock(&pfdev->shrinker_lock); mutex_unlock(&pfdev->shrinker_lock);
/*
* If we still have mappings attached to the BO, there's a problem in
* our refcounting.
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
if (bo->sgts) { if (bo->sgts) {
int i; int i;
int n_sgt = bo->base.base.size / SZ_2M; int n_sgt = bo->base.base.size / SZ_2M;
...@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) ...@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_gem_shmem_free_object(obj); drm_gem_shmem_free_object(obj);
} }
struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv)
{
struct panfrost_gem_mapping *iter, *mapping = NULL;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(iter, &bo->mappings.list, node) {
if (iter->mmu == &priv->mmu) {
kref_get(&iter->refcount);
mapping = iter;
break;
}
}
mutex_unlock(&bo->mappings.lock);
return mapping;
}
static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
{
struct panfrost_file_priv *priv;
if (mapping->active)
panfrost_mmu_unmap(mapping);
priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
spin_lock(&priv->mm_lock);
if (drm_mm_node_allocated(&mapping->mmnode))
drm_mm_remove_node(&mapping->mmnode);
spin_unlock(&priv->mm_lock);
}
static void panfrost_gem_mapping_release(struct kref *kref)
{
struct panfrost_gem_mapping *mapping;
mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
panfrost_gem_teardown_mapping(mapping);
drm_gem_object_put_unlocked(&mapping->obj->base.base);
kfree(mapping);
}
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
{
if (!mapping)
return;
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
}
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
{
struct panfrost_gem_mapping *mapping;
mutex_lock(&bo->mappings.lock);
list_for_each_entry(mapping, &bo->mappings.list, node)
panfrost_gem_teardown_mapping(mapping);
mutex_unlock(&bo->mappings.lock);
}
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{ {
int ret; int ret;
...@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) ...@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_gem_object *bo = to_panfrost_bo(obj);
unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
struct panfrost_file_priv *priv = file_priv->driver_priv; struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_mapping *mapping;
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping)
return -ENOMEM;
INIT_LIST_HEAD(&mapping->node);
kref_init(&mapping->refcount);
drm_gem_object_get(obj);
mapping->obj = bo;
/* /*
* Executable buffers cannot cross a 16MB boundary as the program * Executable buffers cannot cross a 16MB boundary as the program
...@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) ...@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
else else
align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
bo->mmu = &priv->mmu; mapping->mmu = &priv->mmu;
spin_lock(&priv->mm_lock); spin_lock(&priv->mm_lock);
ret = drm_mm_insert_node_generic(&priv->mm, &bo->node, ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
size >> PAGE_SHIFT, align, color, 0); size >> PAGE_SHIFT, align, color, 0);
spin_unlock(&priv->mm_lock); spin_unlock(&priv->mm_lock);
if (ret) if (ret)
return ret; goto err;
if (!bo->is_heap) { if (!bo->is_heap) {
ret = panfrost_mmu_map(bo); ret = panfrost_mmu_map(mapping);
if (ret) { if (ret)
spin_lock(&priv->mm_lock); goto err;
drm_mm_remove_node(&bo->node);
spin_unlock(&priv->mm_lock);
}
} }
mutex_lock(&bo->mappings.lock);
WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
list_add_tail(&mapping->node, &bo->mappings.list);
mutex_unlock(&bo->mappings.lock);
err:
if (ret)
panfrost_gem_mapping_put(mapping);
return ret; return ret;
} }
void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{ {
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv; struct panfrost_file_priv *priv = file_priv->driver_priv;
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_gem_mapping *mapping = NULL, *iter;
if (bo->is_mapped) mutex_lock(&bo->mappings.lock);
panfrost_mmu_unmap(bo); list_for_each_entry(iter, &bo->mappings.list, node) {
if (iter->mmu == &priv->mmu) {
mapping = iter;
list_del(&iter->node);
break;
}
}
mutex_unlock(&bo->mappings.lock);
spin_lock(&priv->mm_lock); panfrost_gem_mapping_put(mapping);
if (drm_mm_node_allocated(&bo->node))
drm_mm_remove_node(&bo->node);
spin_unlock(&priv->mm_lock);
} }
static int panfrost_gem_pin(struct drm_gem_object *obj) static int panfrost_gem_pin(struct drm_gem_object *obj)
...@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t ...@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
if (!obj) if (!obj)
return NULL; return NULL;
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs; obj->base.base.funcs = &panfrost_gem_funcs;
return &obj->base.base; return &obj->base.base;
......
...@@ -13,23 +13,46 @@ struct panfrost_gem_object { ...@@ -13,23 +13,46 @@ struct panfrost_gem_object {
struct drm_gem_shmem_object base; struct drm_gem_shmem_object base;
struct sg_table *sgts; struct sg_table *sgts;
struct panfrost_mmu *mmu; /*
struct drm_mm_node node; * Use a list for now. If searching a mapping ever becomes the
bool is_mapped :1; * bottleneck, we should consider using an RB-tree, or even better,
* let the core store drm_gem_object_mapping entries (where we
* could place driver specific data) instead of drm_gem_object ones
* in its drm_file->object_idr table.
*
* struct drm_gem_object_mapping {
* struct drm_gem_object *obj;
* void *driver_priv;
* };
*/
struct {
struct list_head list;
struct mutex lock;
} mappings;
bool noexec :1; bool noexec :1;
bool is_heap :1; bool is_heap :1;
}; };
struct panfrost_gem_mapping {
struct list_head node;
struct kref refcount;
struct panfrost_gem_object *obj;
struct drm_mm_node mmnode;
struct panfrost_mmu *mmu;
bool active :1;
};
static inline static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{ {
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
} }
static inline static inline struct panfrost_gem_mapping *
struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{ {
return container_of(node, struct panfrost_gem_object, node); return container_of(node, struct panfrost_gem_mapping, mmnode);
} }
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
...@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); ...@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj, void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv); struct drm_file *file_priv);
struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
......
...@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc ...@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
static bool panfrost_gem_purge(struct drm_gem_object *obj) static bool panfrost_gem_purge(struct drm_gem_object *obj)
{ {
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
if (!mutex_trylock(&shmem->pages_lock)) if (!mutex_trylock(&shmem->pages_lock))
return false; return false;
panfrost_mmu_unmap(to_panfrost_bo(obj)); panfrost_gem_teardown_mappings(bo);
drm_gem_shmem_purge_locked(obj); drm_gem_shmem_purge_locked(obj);
mutex_unlock(&shmem->pages_lock); mutex_unlock(&shmem->pages_lock);
......
...@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref) ...@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->done_fence); dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence); dma_fence_put(job->render_done_fence);
if (job->bos) { if (job->mappings) {
for (i = 0; i < job->bo_count; i++) for (i = 0; i < job->bo_count; i++)
panfrost_gem_mapping_put(job->mappings[i]);
kvfree(job->mappings);
}
if (job->bos) {
struct panfrost_gem_object *bo;
for (i = 0; i < job->bo_count; i++) {
bo = to_panfrost_bo(job->bos[i]);
drm_gem_object_put_unlocked(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]);
}
kvfree(job->bos); kvfree(job->bos);
} }
......
...@@ -32,6 +32,7 @@ struct panfrost_job { ...@@ -32,6 +32,7 @@ struct panfrost_job {
/* Exclusive fences we have taken from the BOs to wait for */ /* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences; struct dma_fence **implicit_fences;
struct panfrost_gem_mapping **mappings;
struct drm_gem_object **bos; struct drm_gem_object **bos;
u32 bo_count; u32 bo_count;
......
...@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, ...@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
return 0; return 0;
} }
int panfrost_mmu_map(struct panfrost_gem_object *bo) int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
{ {
struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base; struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt; struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE; int prot = IOMMU_READ | IOMMU_WRITE;
if (WARN_ON(bo->is_mapped)) if (WARN_ON(mapping->active))
return 0; return 0;
if (bo->noexec) if (bo->noexec)
...@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) ...@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
if (WARN_ON(IS_ERR(sgt))) if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt); return PTR_ERR(sgt);
mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
bo->is_mapped = true; prot, sgt);
mapping->active = true;
return 0; return 0;
} }
void panfrost_mmu_unmap(struct panfrost_gem_object *bo) void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
{ {
struct panfrost_gem_object *bo = mapping->obj;
struct drm_gem_object *obj = &bo->base.base; struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
u64 iova = bo->node.start << PAGE_SHIFT; u64 iova = mapping->mmnode.start << PAGE_SHIFT;
size_t len = bo->node.size << PAGE_SHIFT; size_t len = mapping->mmnode.size << PAGE_SHIFT;
size_t unmapped_len = 0; size_t unmapped_len = 0;
if (WARN_ON(!bo->is_mapped)) if (WARN_ON(!mapping->active))
return; return;
dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
mapping->mmu->as, iova, len);
while (unmapped_len < len) { while (unmapped_len < len) {
size_t unmapped_page; size_t unmapped_page;
...@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) ...@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
unmapped_len += pgsize; unmapped_len += pgsize;
} }
panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); panfrost_mmu_flush_range(pfdev, mapping->mmu,
bo->is_mapped = false; mapping->mmnode.start << PAGE_SHIFT, len);
mapping->active = false;
} }
static void mmu_tlb_inv_context_s1(void *cookie) static void mmu_tlb_inv_context_s1(void *cookie)
...@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) ...@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops); free_io_pgtable_ops(mmu->pgtbl_ops);
} }
static struct panfrost_gem_object * static struct panfrost_gem_mapping *
addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
{ {
struct panfrost_gem_object *bo = NULL; struct panfrost_gem_mapping *mapping = NULL;
struct panfrost_file_priv *priv; struct panfrost_file_priv *priv;
struct drm_mm_node *node; struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT; u64 offset = addr >> PAGE_SHIFT;
...@@ -418,8 +423,9 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -418,8 +423,9 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
drm_mm_for_each_node(node, &priv->mm) { drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start && if (offset >= node->start &&
offset < (node->start + node->size)) { offset < (node->start + node->size)) {
bo = drm_mm_node_to_panfrost_bo(node); mapping = drm_mm_node_to_panfrost_mapping(node);
drm_gem_object_get(&bo->base.base);
kref_get(&mapping->refcount);
break; break;
} }
} }
...@@ -427,7 +433,7 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -427,7 +433,7 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
spin_unlock(&priv->mm_lock); spin_unlock(&priv->mm_lock);
out: out:
spin_unlock(&pfdev->as_lock); spin_unlock(&pfdev->as_lock);
return bo; return mapping;
} }
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
...@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
u64 addr) u64 addr)
{ {
int ret, i; int ret, i;
struct panfrost_gem_mapping *bomapping;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct address_space *mapping; struct address_space *mapping;
pgoff_t page_offset; pgoff_t page_offset;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages; struct page **pages;
bo = addr_to_drm_mm_node(pfdev, as, addr); bomapping = addr_to_mapping(pfdev, as, addr);
if (!bo) if (!bomapping)
return -ENOENT; return -ENOENT;
bo = bomapping->obj;
if (!bo->is_heap) { if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
bo->node.start << PAGE_SHIFT); bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL; ret = -EINVAL;
goto err_bo; goto err_bo;
} }
WARN_ON(bo->mmu->as != as); WARN_ON(bomapping->mmu->as != as);
/* Assume 2MB alignment and size multiple */ /* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1); addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT; page_offset = addr >> PAGE_SHIFT;
page_offset -= bo->node.start; page_offset -= bomapping->mmnode.start;
mutex_lock(&bo->base.pages_lock); mutex_lock(&bo->base.pages_lock);
...@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, ...@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
goto err_map; goto err_map;
} }
mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); mmu_map_sg(pfdev, bomapping->mmu, addr,
IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
bo->is_mapped = true; bomapping->active = true;
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
drm_gem_object_put_unlocked(&bo->base.base); panfrost_gem_mapping_put(bomapping);
return 0; return 0;
......
...@@ -4,12 +4,12 @@ ...@@ -4,12 +4,12 @@
#ifndef __PANFROST_MMU_H__ #ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__ #define __PANFROST_MMU_H__
struct panfrost_gem_object; struct panfrost_gem_mapping;
struct panfrost_file_priv; struct panfrost_file_priv;
struct panfrost_mmu; struct panfrost_mmu;
int panfrost_mmu_map(struct panfrost_gem_object *bo); int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
void panfrost_mmu_unmap(struct panfrost_gem_object *bo); void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
int panfrost_mmu_init(struct panfrost_device *pfdev); int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define V4_SHADERS_PER_COREGROUP 4 #define V4_SHADERS_PER_COREGROUP 4
struct panfrost_perfcnt { struct panfrost_perfcnt {
struct panfrost_gem_object *bo; struct panfrost_gem_mapping *mapping;
size_t bosize; size_t bosize;
void *buf; void *buf;
struct panfrost_file_priv *user; struct panfrost_file_priv *user;
...@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) ...@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
int ret; int ret;
reinit_completion(&pfdev->perfcnt->dump_comp); reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_INT_CLEAR, gpu_write(pfdev, GPU_INT_CLEAR,
...@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, ...@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (IS_ERR(bo)) if (IS_ERR(bo))
return PTR_ERR(bo); return PTR_ERR(bo);
perfcnt->bo = to_panfrost_bo(&bo->base);
/* Map the perfcnt buf in the address space attached to file_priv. */ /* Map the perfcnt buf in the address space attached to file_priv. */
ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv); ret = panfrost_gem_open(&bo->base, file_priv);
if (ret) if (ret)
goto err_put_bo; goto err_put_bo;
perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
user);
if (!perfcnt->mapping) {
ret = -EINVAL;
goto err_close_bo;
}
perfcnt->buf = drm_gem_shmem_vmap(&bo->base); perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) { if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf); ret = PTR_ERR(perfcnt->buf);
goto err_close_bo; goto err_put_mapping;
} }
/* /*
...@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, ...@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
/* The BO ref is retained by the mapping. */
drm_gem_object_put_unlocked(&bo->base);
return 0; return 0;
err_vunmap: err_vunmap:
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo: err_close_bo:
panfrost_gem_close(&perfcnt->bo->base.base, file_priv); panfrost_gem_close(&bo->base, file_priv);
err_put_bo: err_put_bo:
drm_gem_object_put_unlocked(&bo->base); drm_gem_object_put_unlocked(&bo->base);
return ret; return ret;
...@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, ...@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL; perfcnt->user = NULL;
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL; perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->bo->base.base, file_priv); panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
drm_gem_object_put_unlocked(&perfcnt->bo->base.base); panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->bo = NULL; perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment