Commit 5c09bd6c authored by Gustavo Sousa's avatar Gustavo Sousa Committed by Rodrigo Vivi

drm/xe/mmio: Move xe_mmio_wait32() to xe_mmio.c

This function is big enough, let's move it to a shared compilation unit.

While at it, document it.

Here is the output of running bloat-o-metter on the new and old module
(execution provided by Lucas):

    $ ./scripts/bloat-o-meter build64/drivers/gpu/drm/xe/xe.ko{.old,}
    add/remove: 2/0 grow/shrink: 0/58 up/down: 554/-15645 (-15091)
    (...) # Lines in between omitted
    Total: Before=2181322, After=2166231, chg -0.69%

The overall reduction in the size is not that significant. Nevertheless,
keeping the function as inline arguably does not bring too much benefit
as well.

As noted by Lucas, we would probably benefit from an inline
function that did the fast-path check: do an optimistic first check
before entering the wait-logic, which itself would go to a compilation
unit. We might come back to implement this in the future if we have data
to justify it.

v2:
  - Add note in documentation for @timeout_us regarding the exponential
    backoff strategy. (Lucas)
  - Share output of bloat-o-meter in the commit message. (Lucas)
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20231116214000.70573-2-gustavo.sousa@intel.comSigned-off-by: default avatarGustavo Sousa <gustavo.sousa@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent a6a4ea6d
...@@ -489,3 +489,57 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) ...@@ -489,3 +489,57 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
return (u64)udw << 32 | ldw; return (u64)udw << 32 | ldw;
} }
/**
* xe_mmio_wait32() - Wait for a register to match the desired masked value
* @gt: MMIO target GT
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
* @val: desired value after applying the mask
* @timeout_us: time out after this period of time. Wait logic tries to be
* smart, applying an exponential backoff until @timeout_us is reached.
* @out_val: if not NULL, points where to store the last unmasked value
* @atomic: needs to be true if calling from an atomic context
*
* This function polls for the desired masked value and returns zero on success
* or -ETIMEDOUT if timed out.
*
* Note that @timeout_us represents the minimum amount of time to wait before
* giving up. The actual time taken by this function can be a little more than
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
* it is possible that this function succeeds even after @timeout_us has passed.
*/
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
for (;;) {
read = xe_mmio_read32(gt, reg);
if ((read & mask) == val) {
ret = 0;
break;
}
cur = ktime_get_raw();
if (!ktime_before(cur, end))
break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
if (atomic)
udelay(wait);
else
usleep_range(wait, wait << 1);
wait <<= 1;
}
if (out_val)
*out_val = read;
return ret;
}
...@@ -87,43 +87,6 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt, ...@@ -87,43 +87,6 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
return (reg_val & mask) != eval ? -EINVAL : 0; return (reg_val & mask) != eval ? -EINVAL : 0;
} }
static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask,
u32 val, u32 timeout_us, u32 *out_val,
bool atomic)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
int ret = -ETIMEDOUT;
s64 wait = 10;
u32 read;
for (;;) {
read = xe_mmio_read32(gt, reg);
if ((read & mask) == val) {
ret = 0;
break;
}
cur = ktime_get_raw();
if (!ktime_before(cur, end))
break;
if (ktime_after(ktime_add_us(cur, wait), end))
wait = ktime_us_delta(end, cur);
if (atomic)
udelay(wait);
else
usleep_range(wait, wait << 1);
wait <<= 1;
}
if (out_val)
*out_val = read;
return ret;
}
static inline bool xe_mmio_in_range(const struct xe_gt *gt, static inline bool xe_mmio_in_range(const struct xe_gt *gt,
const struct xe_mmio_range *range, const struct xe_mmio_range *range,
struct xe_reg reg) struct xe_reg reg)
...@@ -136,5 +99,7 @@ static inline bool xe_mmio_in_range(const struct xe_gt *gt, ...@@ -136,5 +99,7 @@ static inline bool xe_mmio_in_range(const struct xe_gt *gt,
int xe_mmio_probe_vram(struct xe_device *xe); int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg); u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment