Commit 65dd4ed0 authored by Michal Wajdeczko's avatar Michal Wajdeczko Committed by Daniel Vetter

drm/i915/guc: Don't receive all G2H messages in irq handler

In irq handler try to receive just single G2H message, let other
messages to be received from tasklet.
Signed-off-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210603051630.2635-18-matthew.brost@intel.com
parent 2e496ac2
...@@ -81,6 +81,7 @@ enum { CTB_SEND = 0, CTB_RECV = 1 }; ...@@ -81,6 +81,7 @@ enum { CTB_SEND = 0, CTB_RECV = 1 };
enum { CTB_OWNER_HOST = 0 }; enum { CTB_OWNER_HOST = 0 };
static void ct_receive_tasklet_func(struct tasklet_struct *t);
static void ct_incoming_request_worker_func(struct work_struct *w); static void ct_incoming_request_worker_func(struct work_struct *w);
/** /**
...@@ -95,6 +96,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct) ...@@ -95,6 +96,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
INIT_LIST_HEAD(&ct->requests.pending); INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming); INIT_LIST_HEAD(&ct->requests.incoming);
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
} }
static inline const char *guc_ct_buffer_type_to_str(u32 type) static inline const char *guc_ct_buffer_type_to_str(u32 type)
...@@ -244,6 +246,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) ...@@ -244,6 +246,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
{ {
GEM_BUG_ON(ct->enabled); GEM_BUG_ON(ct->enabled);
tasklet_kill(&ct->receive_tasklet);
i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP); i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
memset(ct, 0, sizeof(*ct)); memset(ct, 0, sizeof(*ct));
} }
...@@ -651,7 +654,7 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data) ...@@ -651,7 +654,7 @@ static int ct_read(struct intel_guc_ct *ct, u32 *data)
CT_DEBUG(ct, "received %*ph\n", 4 * len, data); CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
desc->head = head * 4; desc->head = head * 4;
return 0; return available - len;
corrupted: corrupted:
CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n", CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
...@@ -687,10 +690,10 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) ...@@ -687,10 +690,10 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
u32 status; u32 status;
u32 datalen; u32 datalen;
struct ct_request *req; struct ct_request *req;
unsigned long flags;
bool found = false; bool found = false;
GEM_BUG_ON(!ct_header_is_response(header)); GEM_BUG_ON(!ct_header_is_response(header));
GEM_BUG_ON(!in_irq());
/* Response payload shall at least include fence and status */ /* Response payload shall at least include fence and status */
if (unlikely(len < 2)) { if (unlikely(len < 2)) {
...@@ -710,7 +713,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) ...@@ -710,7 +713,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
CT_DEBUG(ct, "response fence %u status %#x\n", fence, status); CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
spin_lock(&ct->requests.lock); spin_lock_irqsave(&ct->requests.lock, flags);
list_for_each_entry(req, &ct->requests.pending, link) { list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) { if (unlikely(fence != req->fence)) {
CT_DEBUG(ct, "request %u awaits response\n", CT_DEBUG(ct, "request %u awaits response\n",
...@@ -729,7 +732,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) ...@@ -729,7 +732,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
found = true; found = true;
break; break;
} }
spin_unlock(&ct->requests.lock); spin_unlock_irqrestore(&ct->requests.lock, flags);
if (!found) if (!found)
CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg); CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
...@@ -843,31 +846,55 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) ...@@ -843,31 +846,55 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
return 0; return 0;
} }
static int ct_receive(struct intel_guc_ct *ct)
{
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
unsigned long flags;
int ret;
spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
ret = ct_read(ct, msg);
spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
if (ret < 0)
return ret;
if (ct_header_is_response(msg[0]))
ct_handle_response(ct, msg);
else
ct_handle_request(ct, msg);
return ret;
}
static void ct_try_receive_message(struct intel_guc_ct *ct)
{
int ret;
if (GEM_WARN_ON(!ct->enabled))
return;
ret = ct_receive(ct);
if (ret > 0)
tasklet_hi_schedule(&ct->receive_tasklet);
}
static void ct_receive_tasklet_func(struct tasklet_struct *t)
{
struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
ct_try_receive_message(ct);
}
/* /*
* When we're communicating with the GuC over CT, GuC uses events * When we're communicating with the GuC over CT, GuC uses events
* to notify us about new messages being posted on the RECV buffer. * to notify us about new messages being posted on the RECV buffer.
*/ */
void intel_guc_ct_event_handler(struct intel_guc_ct *ct) void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{ {
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
unsigned long flags;
int err = 0;
if (unlikely(!ct->enabled)) { if (unlikely(!ct->enabled)) {
WARN(1, "Unexpected GuC event received while CT disabled!\n"); WARN(1, "Unexpected GuC event received while CT disabled!\n");
return; return;
} }
do { ct_try_receive_message(ct);
spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
err = ct_read(ct, msg);
spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
if (err)
break;
if (ct_header_is_response(msg[0]))
err = ct_handle_response(ct, msg);
else
err = ct_handle_request(ct, msg);
} while (!err);
} }
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#ifndef _INTEL_GUC_CT_H_ #ifndef _INTEL_GUC_CT_H_
#define _INTEL_GUC_CT_H_ #define _INTEL_GUC_CT_H_
#include <linux/interrupt.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -55,6 +56,8 @@ struct intel_guc_ct { ...@@ -55,6 +56,8 @@ struct intel_guc_ct {
struct intel_guc_ct_buffer recv; struct intel_guc_ct_buffer recv;
} ctbs; } ctbs;
struct tasklet_struct receive_tasklet;
struct { struct {
u32 last_fence; /* last fence used to send request */ u32 last_fence; /* last fence used to send request */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment