Commit 5e7823c9 authored by Ian Munsie's avatar Ian Munsie Committed by Michael Ellerman

cxl: Fix bug where AFU disable operation had no effect

The AFU disable operation has a bug where it will not clear the enable
bit and therefore will have no effect. To date this has likely been
masked by fact that we perform an AFU reset before the disable, which
also has the effect of clearing the enable bit, making the following
disable operation effectively a noop on most hardware. This patch
modifies the afu_control function to take a parameter to clear from the
AFU control register so that the disable operation can clear the
appropriate bit.

This bug was uncovered on the Mellanox CX4, which uses an XSL rather
than a PSL. On the XSL the reset operation will not complete while the
AFU is enabled, meaning the enable bit was still set at the start of the
disable and as a result this bug was hit and the disable also timed out.

Because of this difference in behaviour between the PSL and XSL, this
patch now makes the reset dependent on the card using a PSL to avoid
waiting for a timeout on the XSL. It is entirely possible that we may be
able to drop the reset altogether if it turns out we only ever needed it
due to this bug - however I am not willing to drop it without further
regression testing and have added comments to the code explaining the
background.

This also fixes a small issue where the AFU_Cntl register was read
outside of the lock that protects it.
Signed-off-by: default avatarIan Munsie <imunsie@au1.ibm.com>
Reviewed-by: default avatarFrederic Barrat <fbarrat@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 2224b671
...@@ -552,6 +552,7 @@ struct cxl_service_layer_ops { ...@@ -552,6 +552,7 @@ struct cxl_service_layer_ops {
void (*write_timebase_ctrl)(struct cxl *adapter); void (*write_timebase_ctrl)(struct cxl *adapter);
u64 (*timebase_read)(struct cxl *adapter); u64 (*timebase_read)(struct cxl *adapter);
int capi_mode; int capi_mode;
bool needs_reset_before_disable;
}; };
struct cxl_native { struct cxl_native {
......
...@@ -21,10 +21,10 @@ ...@@ -21,10 +21,10 @@
#include "cxl.h" #include "cxl.h"
#include "trace.h" #include "trace.h"
static int afu_control(struct cxl_afu *afu, u64 command, static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
u64 result, u64 mask, bool enabled) u64 result, u64 mask, bool enabled)
{ {
u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); u64 AFU_Cntl;
unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
int rc = 0; int rc = 0;
...@@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command, ...@@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command,
trace_cxl_afu_ctrl(afu, command); trace_cxl_afu_ctrl(afu, command);
cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
while ((AFU_Cntl & mask) != result) { while ((AFU_Cntl & mask) != result) {
...@@ -67,7 +68,7 @@ static int afu_enable(struct cxl_afu *afu) ...@@ -67,7 +68,7 @@ static int afu_enable(struct cxl_afu *afu)
{ {
pr_devel("AFU enable request\n"); pr_devel("AFU enable request\n");
return afu_control(afu, CXL_AFU_Cntl_An_E, return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
CXL_AFU_Cntl_An_ES_Enabled, CXL_AFU_Cntl_An_ES_Enabled,
CXL_AFU_Cntl_An_ES_MASK, true); CXL_AFU_Cntl_An_ES_MASK, true);
} }
...@@ -76,7 +77,8 @@ int cxl_afu_disable(struct cxl_afu *afu) ...@@ -76,7 +77,8 @@ int cxl_afu_disable(struct cxl_afu *afu)
{ {
pr_devel("AFU disable request\n"); pr_devel("AFU disable request\n");
return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_ES_MASK, false); CXL_AFU_Cntl_An_ES_MASK, false);
} }
...@@ -85,7 +87,7 @@ static int native_afu_reset(struct cxl_afu *afu) ...@@ -85,7 +87,7 @@ static int native_afu_reset(struct cxl_afu *afu)
{ {
pr_devel("AFU reset request\n"); pr_devel("AFU reset request\n");
return afu_control(afu, CXL_AFU_Cntl_An_RA, return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
false); false);
...@@ -595,6 +597,32 @@ static int deactivate_afu_directed(struct cxl_afu *afu) ...@@ -595,6 +597,32 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
cxl_sysfs_afu_m_remove(afu); cxl_sysfs_afu_m_remove(afu);
cxl_chardev_afu_remove(afu); cxl_chardev_afu_remove(afu);
/*
* The CAIA section 2.2.1 indicates that the procedure for starting and
* stopping an AFU in AFU directed mode is AFU specific, which is not
* ideal since this code is generic and with one exception has no
* knowledge of the AFU. This is in contrast to the procedure for
* disabling a dedicated process AFU, which is documented to just
* require a reset. The architecture does indicate that both an AFU
* reset and an AFU disable should result in the AFU being disabled and
* we do both followed by a PSL purge for safety.
*
* Notably we used to have some issues with the disable sequence on PSL
* cards, which is why we ended up using this heavy weight procedure in
* the first place, however a bug was discovered that had rendered the
* disable operation ineffective, so it is conceivable that was the
* sole explanation for those difficulties. Careful regression testing
* is recommended if anyone attempts to remove or reorder these
* operations.
*
* The XSL on the Mellanox CX4 behaves a little differently from the
* PSL based cards and will time out an AFU reset if the AFU is still
* enabled. That card is special in that we do have a means to identify
* it from this code, so in that case we skip the reset and just use a
* disable/purge to avoid the timeout and corresponding noise in the
* kernel log.
*/
if (afu->adapter->native->sl_ops->needs_reset_before_disable)
cxl_ops->afu_reset(afu); cxl_ops->afu_reset(afu);
cxl_afu_disable(afu); cxl_afu_disable(afu);
cxl_psl_purge(afu); cxl_psl_purge(afu);
...@@ -735,6 +763,22 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel, ...@@ -735,6 +763,22 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel,
static inline int detach_process_native_dedicated(struct cxl_context *ctx) static inline int detach_process_native_dedicated(struct cxl_context *ctx)
{ {
/*
* The CAIA section 2.1.1 indicates that we need to do an AFU reset to
* stop the AFU in dedicated mode (we therefore do not make that
* optional like we do in the afu directed path). It does not indicate
* that we need to do an explicit disable (which should occur
* implicitly as part of the reset) or purge, but we do these as well
* to be on the safe side.
*
* Notably we used to have some issues with the disable sequence
* (before the sequence was spelled out in the architecture) which is
* why we were so heavy weight in the first place, however a bug was
* discovered that had rendered the disable operation ineffective, so
* it is conceivable that was the sole explanation for those
* difficulties. Point is, we should be careful and do some regression
* testing if we ever attempt to remove any part of this procedure.
*/
cxl_ops->afu_reset(ctx->afu); cxl_ops->afu_reset(ctx->afu);
cxl_afu_disable(ctx->afu); cxl_afu_disable(ctx->afu);
cxl_psl_purge(ctx->afu); cxl_psl_purge(ctx->afu);
......
...@@ -1309,6 +1309,7 @@ static const struct cxl_service_layer_ops psl_ops = { ...@@ -1309,6 +1309,7 @@ static const struct cxl_service_layer_ops psl_ops = {
.write_timebase_ctrl = write_timebase_ctrl_psl, .write_timebase_ctrl = write_timebase_ctrl_psl,
.timebase_read = timebase_read_psl, .timebase_read = timebase_read_psl,
.capi_mode = OPAL_PHB_CAPI_MODE_CAPI, .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
.needs_reset_before_disable = true,
}; };
static const struct cxl_service_layer_ops xsl_ops = { static const struct cxl_service_layer_ops xsl_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment