Commit 8e458fe2 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Generalise the clflush dma-worker

Extract the dma-fence worker used by clflush for wider use, as we
anticipate using workers coupled to dma-fences more frequently.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190821191606.17001-1-chris@chris-wilson.co.uk
parent 829e8def
...@@ -62,6 +62,7 @@ i915-y += \ ...@@ -62,6 +62,7 @@ i915-y += \
i915_memcpy.o \ i915_memcpy.o \
i915_mm.o \ i915_mm.o \
i915_sw_fence.o \ i915_sw_fence.o \
i915_sw_fence_work.o \
i915_syncmap.o \ i915_syncmap.o \
i915_user_extensions.o i915_user_extensions.o
......
...@@ -8,88 +8,67 @@ ...@@ -8,88 +8,67 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem_clflush.h" #include "i915_gem_clflush.h"
#include "i915_sw_fence_work.h"
#include "i915_trace.h" #include "i915_trace.h"
static DEFINE_SPINLOCK(clflush_lock);
struct clflush { struct clflush {
struct dma_fence dma; /* Must be first for dma_fence_free() */ struct dma_fence_work base;
struct i915_sw_fence wait;
struct work_struct work;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
}; };
static const char *i915_clflush_get_driver_name(struct dma_fence *fence) static void __do_clflush(struct drm_i915_gem_object *obj)
{
return DRIVER_NAME;
}
static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
{
return "clflush";
}
static void i915_clflush_release(struct dma_fence *fence)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
i915_sw_fence_fini(&clflush->wait);
BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
dma_fence_free(&clflush->dma);
}
static const struct dma_fence_ops i915_clflush_ops = {
.get_driver_name = i915_clflush_get_driver_name,
.get_timeline_name = i915_clflush_get_timeline_name,
.release = i915_clflush_release,
};
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
{ {
GEM_BUG_ON(!i915_gem_object_has_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages); drm_clflush_sg(obj->mm.pages);
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU); intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
} }
static void i915_clflush_work(struct work_struct *work) static int clflush_work(struct dma_fence_work *base)
{ {
struct clflush *clflush = container_of(work, typeof(*clflush), work); struct clflush *clflush = container_of(base, typeof(*clflush), base);
struct drm_i915_gem_object *obj = clflush->obj; struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
int err;
if (i915_gem_object_pin_pages(obj)) {
DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
goto out;
}
__i915_do_clflush(obj); err = i915_gem_object_pin_pages(obj);
if (err)
goto put;
__do_clflush(obj);
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
out: put:
i915_gem_object_put(obj); i915_gem_object_put(obj);
return err;
}
dma_fence_signal(&clflush->dma); static void clflush_release(struct dma_fence_work *base)
dma_fence_put(&clflush->dma); {
struct clflush *clflush = container_of(base, typeof(*clflush), base);
if (clflush->obj)
i915_gem_object_put(clflush->obj);
} }
static int __i915_sw_fence_call static const struct dma_fence_work_ops clflush_ops = {
i915_clflush_notify(struct i915_sw_fence *fence, .name = "clflush",
enum i915_sw_fence_notify state) .work = clflush_work,
.release = clflush_release,
};
static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
{ {
struct clflush *clflush = container_of(fence, typeof(*clflush), wait); struct clflush *clflush;
switch (state) { GEM_BUG_ON(!obj->cache_dirty);
case FENCE_COMPLETE:
schedule_work(&clflush->work);
break;
case FENCE_FREE: clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
dma_fence_put(&clflush->dma); if (!clflush)
break; return NULL;
}
dma_fence_work_init(&clflush->base, &clflush_ops);
clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
return NOTIFY_DONE; return clflush;
} }
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
...@@ -127,32 +106,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -127,32 +106,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
clflush = NULL; clflush = NULL;
if (!(flags & I915_CLFLUSH_SYNC)) if (!(flags & I915_CLFLUSH_SYNC))
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL); clflush = clflush_work_create(obj);
if (clflush) { if (clflush) {
GEM_BUG_ON(!obj->cache_dirty); i915_sw_fence_await_reservation(&clflush->base.chain,
obj->base.resv, NULL, true,
dma_fence_init(&clflush->dma, I915_FENCE_TIMEOUT,
&i915_clflush_ops,
&clflush_lock,
0, 0);
i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
clflush->obj = i915_gem_object_get(obj);
INIT_WORK(&clflush->work, i915_clflush_work);
dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait,
obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP); I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_resv_add_excl_fence(obj->base.resv, dma_fence_work_commit(&clflush->base);
&clflush->dma);
i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) { } else if (obj->mm.pages) {
__i915_do_clflush(obj); __do_clflush(obj);
} else { } else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
} }
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
#include "i915_sw_fence_work.h"
static void fence_work(struct work_struct *work)
{
struct dma_fence_work *f = container_of(work, typeof(*f), work);
int err;
err = f->ops->work(f);
if (err)
dma_fence_set_error(&f->dma, err);
dma_fence_signal(&f->dma);
dma_fence_put(&f->dma);
}
static int __i915_sw_fence_call
fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), chain);
switch (state) {
case FENCE_COMPLETE:
if (fence->error)
dma_fence_set_error(&f->dma, fence->error);
if (!f->dma.error) {
dma_fence_get(&f->dma);
queue_work(system_unbound_wq, &f->work);
} else {
dma_fence_signal(&f->dma);
}
break;
case FENCE_FREE:
dma_fence_put(&f->dma);
break;
}
return NOTIFY_DONE;
}
static const char *get_driver_name(struct dma_fence *fence)
{
return "dma-fence";
}
static const char *get_timeline_name(struct dma_fence *fence)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
return f->ops->name ?: "work";
}
static void fence_release(struct dma_fence *fence)
{
struct dma_fence_work *f = container_of(fence, typeof(*f), dma);
if (f->ops->release)
f->ops->release(f);
i915_sw_fence_fini(&f->chain);
BUILD_BUG_ON(offsetof(typeof(*f), dma));
dma_fence_free(&f->dma);
}
static const struct dma_fence_ops fence_ops = {
.get_driver_name = get_driver_name,
.get_timeline_name = get_timeline_name,
.release = fence_release,
};
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops)
{
spin_lock_init(&f->lock);
dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
i915_sw_fence_init(&f->chain, fence_notify);
INIT_WORK(&f->work, fence_work);
f->ops = ops;
}
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
{
if (!signal)
return 0;
return __i915_sw_fence_await_dma_fence(&f->chain, signal, &f->cb);
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2019 Intel Corporation
*/
#ifndef I915_SW_FENCE_WORK_H
#define I915_SW_FENCE_WORK_H
#include <linux/dma-fence.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include "i915_sw_fence.h"
struct dma_fence_work;
struct dma_fence_work_ops {
const char *name;
int (*work)(struct dma_fence_work *f);
void (*release)(struct dma_fence_work *f);
};
struct dma_fence_work {
struct dma_fence dma;
spinlock_t lock;
struct i915_sw_fence chain;
struct i915_sw_dma_fence_cb cb;
struct work_struct work;
const struct dma_fence_work_ops *ops;
};
void dma_fence_work_init(struct dma_fence_work *f,
const struct dma_fence_work_ops *ops);
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
static inline void dma_fence_work_commit(struct dma_fence_work *f)
{
i915_sw_fence_commit(&f->chain);
}
#endif /* I915_SW_FENCE_WORK_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment