Commit 7aaec3a6 authored by Rodrigo Vivi's avatar Rodrigo Vivi

drm/xe: Let's return last value read on xe_mmio_wait32.

This is already useful because it avoids some extra reads
where registers might have changed after the timeout decision.

But also, it will be important to end the kill of i915's wait_for.
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
parent 86011ae2
......@@ -129,7 +129,7 @@ static int domain_wake_wait(struct xe_gt *gt,
struct xe_force_wake_domain *domain)
{
return xe_mmio_wait32(gt, domain->reg_ack, domain->val, domain->val,
XE_FORCE_WAKE_ACK_TIMEOUT_MS);
XE_FORCE_WAKE_ACK_TIMEOUT_MS, NULL);
}
static void domain_sleep(struct xe_gt *gt, struct xe_force_wake_domain *domain)
......@@ -141,7 +141,7 @@ static int domain_sleep_wait(struct xe_gt *gt,
struct xe_force_wake_domain *domain)
{
return xe_mmio_wait32(gt, domain->reg_ack, 0, domain->val,
XE_FORCE_WAKE_ACK_TIMEOUT_MS);
XE_FORCE_WAKE_ACK_TIMEOUT_MS, NULL);
}
#define for_each_fw_domain_masked(domain__, mask__, fw__, tmp__) \
......
......@@ -599,7 +599,7 @@ int do_gt_reset(struct xe_gt *gt)
int err;
xe_mmio_write32(gt, GEN6_GDRST.reg, GEN11_GRDOM_FULL);
err = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_FULL, 5);
err = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_FULL, 5, NULL);
if (err)
drm_err(&xe->drm,
"GT reset failed to clear GEN11_GRDOM_FULL\n");
......
......@@ -324,17 +324,17 @@ int xe_guc_reset(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
u32 guc_status;
u32 guc_status, gdrst;
int ret;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, GEN6_GDRST.reg, GEN11_GRDOM_GUC);
ret = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_GUC, 5);
ret = xe_mmio_wait32(gt, GEN6_GDRST.reg, 0, GEN11_GRDOM_GUC, 5, &gdrst);
if (ret) {
drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n",
xe_mmio_read32(gt, GEN6_GDRST.reg));
gdrst);
goto err_out;
}
......@@ -654,7 +654,7 @@ int xe_guc_send_mmio(struct xe_guc *guc, const u32 *request, u32 len)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
u32 header;
u32 header, reply;
u32 reply_reg = xe_gt_is_media_type(gt) ?
MEDIA_SOFT_SCRATCH(0).reg : GEN11_SOFT_SCRATCH(0).reg;
int ret;
......@@ -691,12 +691,11 @@ int xe_guc_send_mmio(struct xe_guc *guc, const u32 *request, u32 len)
ret = xe_mmio_wait32(gt, reply_reg,
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
GUC_HXG_ORIGIN_GUC),
GUC_HXG_MSG_0_ORIGIN,
50);
GUC_HXG_MSG_0_ORIGIN, 50, &reply);
if (ret) {
timeout:
drm_err(&xe->drm, "mmio request 0x%08x: no reply 0x%08x\n",
request[0], xe_mmio_read32(gt, reply_reg));
request[0], reply);
return ret;
}
......
......@@ -85,7 +85,7 @@ int xe_huc_auth(struct xe_huc *huc)
ret = xe_mmio_wait32(gt, GEN11_HUC_KERNEL_LOAD_INFO.reg,
HUC_LOAD_SUCCESSFUL,
HUC_LOAD_SUCCESSFUL, 100);
HUC_LOAD_SUCCESSFUL, 100, NULL);
if (ret) {
drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
goto fail;
......
......@@ -82,21 +82,28 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
return (reg_val & mask) != eval ? -EINVAL : 0;
}
static inline int xe_mmio_wait32(struct xe_gt *gt,
u32 reg, u32 val,
u32 mask, u32 timeout_ms)
static inline int xe_mmio_wait32(struct xe_gt *gt, u32 reg, u32 val,
u32 mask, u32 timeout_ms, u32 *out_val)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_ms(cur, timeout_ms);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
for (;;) {
if ((xe_mmio_read32(gt, reg) & mask) == val)
return 0;
read = xe_mmio_read32(gt, reg);
if ((read & mask) == val) {
ret = 0;
break;
}
cur = ktime_get_raw();
if (!ktime_before(cur, end))
return -ETIMEDOUT;
break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
......@@ -105,7 +112,10 @@ static inline int xe_mmio_wait32(struct xe_gt *gt,
wait <<= 1;
}
return -ETIMEDOUT;
if (out_val)
*out_val = read;
return ret;
}
int xe_mmio_ioctl(struct drm_device *dev, void *data,
......
......@@ -326,7 +326,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
u32 src_offset;
u32 src_offset, dma_ctrl;
int ret;
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
......@@ -352,11 +352,10 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = xe_mmio_wait32(gt, DMA_CTRL.reg, 0, START_DMA, 100);
ret = xe_mmio_wait32(gt, DMA_CTRL.reg, 0, START_DMA, 100, &dma_ctrl);
if (ret)
drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
xe_uc_fw_type_repr(uc_fw->type),
xe_mmio_read32(gt, DMA_CTRL.reg));
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
xe_mmio_write32(gt, DMA_CTRL.reg, _MASKED_BIT_DISABLE(dma_flags));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment