Commit 55520832 authored by Ben Skeggs's avatar Ben Skeggs

drm/nouveau/fault: switch non-replayable faults to nvkm_event_ntfy

v2: fix flush_work() being called uninitialised during init
Signed-off-by: default avatarBen Skeggs <bskeggs@redhat.com>
Reviewed-by: default avatarLyude Paul <lyude@redhat.com>
parent f43e47c0
......@@ -2,7 +2,6 @@
#define __NVKM_FAULT_H__
#include <core/subdev.h>
#include <core/event.h>
#include <core/notify.h>
struct nvkm_fault {
const struct nvkm_fault_func *func;
......@@ -11,9 +10,11 @@ struct nvkm_fault {
struct nvkm_fault_buffer *buffer[2];
int buffer_nr;
#define NVKM_FAULT_BUFFER_EVENT_PENDING BIT(0)
struct nvkm_event event;
struct nvkm_notify nrpfb;
struct nvkm_event_ntfy nrpfb;
struct work_struct nrpfb_work;
struct nvkm_device_oclass user;
};
......
......@@ -145,7 +145,7 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
nvkm_notify_fini(&fault->nrpfb);
nvkm_event_ntfy_del(&fault->nrpfb);
nvkm_event_fini(&fault->event);
for (i = 0; i < fault->buffer_nr; i++) {
......
......@@ -65,7 +65,7 @@ gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
void
gp100_fault_intr(struct nvkm_fault *fault)
{
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
nvkm_event_send(&fault->event, NVKM_FAULT_BUFFER_EVENT_PENDING, 0, NULL, 0);
}
static const struct nvkm_fault_func
......
......@@ -27,10 +27,12 @@
#include <nvif/class.h>
static void
gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
void
gv100_fault_buffer_process(struct work_struct *work)
{
struct nvkm_device *device = buffer->fault->subdev.device;
struct nvkm_fault *fault = container_of(work, typeof(*fault), nrpfb_work);
struct nvkm_fault_buffer *buffer = fault->buffer[0];
struct nvkm_device *device = fault->subdev.device;
struct nvkm_memory *mem = buffer->mem;
u32 get = nvkm_rd32(device, buffer->get);
u32 put = nvkm_rd32(device, buffer->put);
......@@ -115,11 +117,12 @@ gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
}
static int
gv100_fault_ntfy_nrpfb(struct nvkm_notify *notify)
gv100_fault_ntfy_nrpfb(struct nvkm_event_ntfy *ntfy, u32 bits)
{
struct nvkm_fault *fault = container_of(notify, typeof(*fault), nrpfb);
gv100_fault_buffer_process(fault->buffer[0]);
return NVKM_NOTIFY_KEEP;
struct nvkm_fault *fault = container_of(ntfy, typeof(*fault), nrpfb);
schedule_work(&fault->nrpfb_work);
return NVKM_EVENT_KEEP;
}
static void
......@@ -163,14 +166,14 @@ gv100_fault_intr(struct nvkm_fault *fault)
if (stat & 0x20000000) {
if (fault->buffer[0]) {
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
nvkm_event_send(&fault->event, NVKM_FAULT_BUFFER_EVENT_PENDING, 0, NULL, 0);
stat &= ~0x20000000;
}
}
if (stat & 0x08000000) {
if (fault->buffer[1]) {
nvkm_event_send(&fault->event, 1, 1, NULL, 0);
nvkm_event_send(&fault->event, NVKM_FAULT_BUFFER_EVENT_PENDING, 1, NULL, 0);
stat &= ~0x08000000;
}
}
......@@ -183,9 +186,12 @@ gv100_fault_intr(struct nvkm_fault *fault)
static void
gv100_fault_fini(struct nvkm_fault *fault)
{
nvkm_notify_put(&fault->nrpfb);
nvkm_event_ntfy_block(&fault->nrpfb);
flush_work(&fault->nrpfb_work);
if (fault->buffer[0])
fault->func->buffer.fini(fault->buffer[0]);
nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
}
......@@ -194,15 +200,15 @@ gv100_fault_init(struct nvkm_fault *fault)
{
nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
fault->func->buffer.init(fault->buffer[0]);
nvkm_notify_get(&fault->nrpfb);
nvkm_event_ntfy_allow(&fault->nrpfb);
}
int
gv100_fault_oneinit(struct nvkm_fault *fault)
{
return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
gv100_fault_ntfy_nrpfb, true, NULL, 0, 0,
&fault->nrpfb);
nvkm_event_ntfy_add(&fault->event, 0, NVKM_FAULT_BUFFER_EVENT_PENDING, true,
gv100_fault_ntfy_nrpfb, &fault->nrpfb);
return 0;
}
static const struct nvkm_fault_func
......@@ -231,5 +237,10 @@ int
gv100_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
return nvkm_fault_new_(&gv100_fault, device, type, inst, pfault);
int ret = nvkm_fault_new_(&gv100_fault, device, type, inst, pfault);
if (ret)
return ret;
INIT_WORK(&(*pfault)->nrpfb_work, gv100_fault_buffer_process);
return 0;
}
......@@ -46,6 +46,7 @@ void gp100_fault_buffer_fini(struct nvkm_fault_buffer *);
void gp100_fault_buffer_init(struct nvkm_fault_buffer *);
u64 gp100_fault_buffer_pin(struct nvkm_fault_buffer *);
void gp100_fault_buffer_info(struct nvkm_fault_buffer *);
void gv100_fault_buffer_process(struct work_struct *);
void gp100_fault_intr(struct nvkm_fault *);
u64 gp10b_fault_buffer_pin(struct nvkm_fault_buffer *);
......
......@@ -126,7 +126,7 @@ tu102_fault_intr(struct nvkm_fault *fault)
nvkm_wr32(device, 0xb81010, 0x10);
if (fault->buffer[0]) {
nvkm_event_send(&fault->event, 1, 0, NULL, 0);
nvkm_event_send(&fault->event, NVKM_FAULT_BUFFER_EVENT_PENDING, 0, NULL, 0);
stat &= ~0x00000200;
}
}
......@@ -137,7 +137,7 @@ tu102_fault_intr(struct nvkm_fault *fault)
nvkm_wr32(device, 0xb81008, 0x1);
if (fault->buffer[1]) {
nvkm_event_send(&fault->event, 1, 1, NULL, 0);
nvkm_event_send(&fault->event, NVKM_FAULT_BUFFER_EVENT_PENDING, 1, NULL, 0);
stat &= ~0x00000100;
}
}
......@@ -150,7 +150,9 @@ tu102_fault_intr(struct nvkm_fault *fault)
static void
tu102_fault_fini(struct nvkm_fault *fault)
{
nvkm_notify_put(&fault->nrpfb);
nvkm_event_ntfy_block(&fault->nrpfb);
flush_work(&fault->nrpfb_work);
if (fault->buffer[0])
fault->func->buffer.fini(fault->buffer[0]);
/*XXX: disable priv faults */
......@@ -161,7 +163,7 @@ tu102_fault_init(struct nvkm_fault *fault)
{
/*XXX: enable priv faults */
fault->func->buffer.init(fault->buffer[0]);
nvkm_notify_get(&fault->nrpfb);
nvkm_event_ntfy_allow(&fault->nrpfb);
}
static const struct nvkm_fault_func
......@@ -184,5 +186,10 @@ int
tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
struct nvkm_fault **pfault)
{
return nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
if (ret)
return ret;
INIT_WORK(&(*pfault)->nrpfb_work, gv100_fault_buffer_process);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment