Commit fef4912b authored by Harsh Jain's avatar Harsh Jain Committed by Herbert Xu

crypto: chelsio - Handle PCI shutdown event

chcr receives "CXGB4_STATE_DETACH" event on PCI Shutdown.
Wait for processing of inflight request and Mark the device unavailable.
Signed-off-by: default avatarHarsh Jain <harsh@chelsio.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c4f6d44d
This diff is collapsed.
......@@ -26,10 +26,7 @@
#include "chcr_core.h"
#include "cxgb4_uld.h"
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static atomic_t dev_count;
static struct uld_ctx *ctx_rr;
static struct chcr_driver_data drv_data;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
......@@ -53,6 +50,29 @@ static struct cxgb4_uld_info chcr_uld_info = {
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
static void detach_work_fn(struct work_struct *work)
{
struct chcr_dev *dev;
dev = container_of(work, struct chcr_dev, detach_work.work);
if (atomic_read(&dev->inflight)) {
dev->wqretry--;
if (dev->wqretry) {
pr_debug("Request Inflight Count %d\n",
atomic_read(&dev->inflight));
schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
} else {
WARN(1, "CHCR:%d request Still Pending\n",
atomic_read(&dev->inflight));
complete(&dev->detach_comp);
}
} else {
complete(&dev->detach_comp);
}
}
struct uld_ctx *assign_chcr_device(void)
{
struct uld_ctx *u_ctx = NULL;
......@@ -63,56 +83,70 @@ struct uld_ctx *assign_chcr_device(void)
* Although One session must use the same device to
* maintain request-response ordering.
*/
mutex_lock(&dev_mutex);
if (!list_empty(&uld_ctx_list)) {
u_ctx = ctx_rr;
if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
ctx_rr = list_first_entry(&uld_ctx_list,
struct uld_ctx,
entry);
mutex_lock(&drv_data.drv_mutex);
if (!list_empty(&drv_data.act_dev)) {
u_ctx = drv_data.last_dev;
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
drv_data.last_dev = list_first_entry(&drv_data.act_dev,
struct uld_ctx, entry);
else
ctx_rr = list_next_entry(ctx_rr, entry);
drv_data.last_dev =
list_next_entry(drv_data.last_dev, entry);
}
mutex_unlock(&dev_mutex);
mutex_unlock(&drv_data.drv_mutex);
return u_ctx;
}
static int chcr_dev_add(struct uld_ctx *u_ctx)
static void chcr_dev_add(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENXIO;
dev = &u_ctx->dev;
dev->state = CHCR_ATTACH;
atomic_set(&dev->inflight, 0);
mutex_lock(&drv_data.drv_mutex);
list_move(&u_ctx->entry, &drv_data.act_dev);
if (!drv_data.last_dev)
drv_data.last_dev = u_ctx;
mutex_unlock(&drv_data.drv_mutex);
}
static void chcr_dev_init(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
dev = &u_ctx->dev;
spin_lock_init(&dev->lock_chcr_dev);
u_ctx->dev = dev;
dev->u_ctx = u_ctx;
atomic_inc(&dev_count);
mutex_lock(&dev_mutex);
list_add_tail(&u_ctx->entry, &uld_ctx_list);
if (!ctx_rr)
ctx_rr = u_ctx;
mutex_unlock(&dev_mutex);
return 0;
INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
init_completion(&dev->detach_comp);
dev->state = CHCR_INIT;
dev->wqretry = WQ_RETRY;
atomic_inc(&drv_data.dev_count);
atomic_set(&dev->inflight, 0);
mutex_lock(&drv_data.drv_mutex);
list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
if (!drv_data.last_dev)
drv_data.last_dev = u_ctx;
mutex_unlock(&drv_data.drv_mutex);
}
static int chcr_dev_remove(struct uld_ctx *u_ctx)
static int chcr_dev_move(struct uld_ctx *u_ctx)
{
if (ctx_rr == u_ctx) {
if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
ctx_rr = list_first_entry(&uld_ctx_list,
struct uld_ctx,
entry);
mutex_lock(&drv_data.drv_mutex);
if (drv_data.last_dev == u_ctx) {
if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
drv_data.last_dev = list_first_entry(&drv_data.act_dev,
struct uld_ctx, entry);
else
ctx_rr = list_next_entry(ctx_rr, entry);
drv_data.last_dev =
list_next_entry(drv_data.last_dev, entry);
}
list_del(&u_ctx->entry);
if (list_empty(&uld_ctx_list))
ctx_rr = NULL;
kfree(u_ctx->dev);
u_ctx->dev = NULL;
atomic_dec(&dev_count);
list_move(&u_ctx->entry, &drv_data.inact_dev);
if (list_empty(&drv_data.act_dev))
drv_data.last_dev = NULL;
atomic_dec(&drv_data.dev_count);
mutex_unlock(&drv_data.drv_mutex);
return 0;
}
......@@ -167,6 +201,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
chcr_dev_init(u_ctx);
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
chcr_add_xfrmops(lld);
......@@ -179,7 +214,7 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
struct chcr_dev *dev = u_ctx->dev;
struct chcr_dev *dev = &u_ctx->dev;
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
if (rpl->opcode != CPL_FW6_PLD) {
......@@ -201,6 +236,28 @@ int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
static void chcr_detach_device(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev = &u_ctx->dev;
spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH) {
spin_unlock_bh(&dev->lock_chcr_dev);
pr_debug("Detached Event received for already detach device\n");
return;
}
dev->state = CHCR_DETACH;
spin_unlock_bh(&dev->lock_chcr_dev);
if (atomic_read(&dev->inflight) != 0) {
schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
wait_for_completion(&dev->detach_comp);
}
// Move u_ctx to inactive_dev list
chcr_dev_move(u_ctx);
}
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
......@@ -208,23 +265,16 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
switch (state) {
case CXGB4_STATE_UP:
if (!u_ctx->dev) {
ret = chcr_dev_add(u_ctx);
if (ret != 0)
return ret;
if (u_ctx->dev.state != CHCR_INIT) {
// ALready Initialised.
return 0;
}
if (atomic_read(&dev_count) == 1)
ret = start_crypto();
chcr_dev_add(u_ctx);
ret = start_crypto();
break;
case CXGB4_STATE_DETACH:
if (u_ctx->dev) {
mutex_lock(&dev_mutex);
chcr_dev_remove(u_ctx);
mutex_unlock(&dev_mutex);
}
if (!atomic_read(&dev_count))
stop_crypto();
chcr_detach_device(u_ctx);
break;
case CXGB4_STATE_START_RECOVERY:
......@@ -237,7 +287,13 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
INIT_LIST_HEAD(&drv_data.act_dev);
INIT_LIST_HEAD(&drv_data.inact_dev);
atomic_set(&drv_data.dev_count, 0);
mutex_init(&drv_data.drv_mutex);
drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
return 0;
}
......@@ -245,18 +301,20 @@ static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
if (atomic_read(&dev_count))
stop_crypto();
stop_crypto();
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
mutex_lock(&dev_mutex);
list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
if (u_ctx->dev)
chcr_dev_remove(u_ctx);
mutex_lock(&drv_data.drv_mutex);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
list_del(&u_ctx->entry);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
list_del(&u_ctx->entry);
kfree(u_ctx);
}
mutex_unlock(&drv_data.drv_mutex);
}
module_init(chcr_crypto_init);
......
......@@ -47,7 +47,7 @@
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
#define WQ_DETACH_TM (msecs_to_jiffies(50))
#define PAD_ERROR_BIT 1
#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
......@@ -61,9 +61,6 @@
#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
DUMMY_BYTES + \
sizeof(struct ulptx_sgl))
#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
struct uld_ctx;
struct _key_ctx {
......@@ -121,6 +118,20 @@ struct _key_ctx {
#define KEYCTX_TX_WR_AUTHIN_G(x) \
(((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
#define WQ_RETRY 5
struct chcr_driver_data {
struct list_head act_dev;
struct list_head inact_dev;
atomic_t dev_count;
struct mutex drv_mutex;
struct uld_ctx *last_dev;
};
enum chcr_state {
CHCR_INIT = 0,
CHCR_ATTACH,
CHCR_DETACH,
};
struct chcr_wr {
struct fw_crypto_lookaside_wr wreq;
struct ulp_txpkt ulptx;
......@@ -131,14 +142,18 @@ struct chcr_wr {
struct chcr_dev {
spinlock_t lock_chcr_dev;
struct uld_ctx *u_ctx;
enum chcr_state state;
atomic_t inflight;
int wqretry;
struct delayed_work detach_work;
struct completion detach_comp;
unsigned char tx_channel_id;
};
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
struct chcr_dev *dev;
struct chcr_dev dev;
};
struct sge_opaque_hdr {
......@@ -189,6 +204,13 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
static inline void *padap(struct chcr_dev *dev)
{
struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
return pci_get_drvdata(u_ctx->lldi.pdev);
}
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment