Commit f35fe5f4 authored by Alexander Usyskin's avatar Alexander Usyskin Committed by Greg Kroah-Hartman

mei: add a vtag map for each client

Vtag map is a list of tuples of vtag and file pointer (struct
mei_cl_vtag) associated with a particular me host client.
Signed-off-by: default avatarAlexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: default avatarTomas Winkler <tomas.winkler@intel.com>
Link: https://lore.kernel.org/r/20200818115147.2567012-8-tomas.winkler@intel.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d1376f3d
......@@ -354,6 +354,27 @@ static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
mei_io_cb_free(cb);
}
/**
* mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
*
* Locking: called under "dev->device_lock" lock
*
* @cl: mei client
* @fp: pointer to file structure
*/
static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
if (cl_vtag->fp == fp) {
cl_vtag->pending_read = true;
return;
}
}
}
/**
* mei_io_cb_init - allocate and initialize io callback
*
......@@ -435,6 +456,19 @@ static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
mei_io_cb_free(cb);
}
/**
* mei_cl_free_pending - free pending cb
*
* @cl: host client
*/
static void mei_cl_free_pending(struct mei_cl *cl)
{
struct mei_cl_cb *cb;
cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
mei_io_cb_free(cb);
}
/**
* mei_cl_alloc_cb - a convenient wrapper for allocating read cb
*
......@@ -544,7 +578,9 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
mei_io_list_free_fp(&cl->rd_pending, fp);
/* free pending cb only in final flush */
if (!fp)
mei_cl_free_pending(cl);
spin_lock(&cl->rd_completed_lock);
mei_io_list_free_fp(&cl->rd_completed, fp);
spin_unlock(&cl->rd_completed_lock);
......@@ -565,6 +601,7 @@ static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
init_waitqueue_head(&cl->ev_wait);
INIT_LIST_HEAD(&cl->vtag_map);
spin_lock_init(&cl->rd_completed_lock);
INIT_LIST_HEAD(&cl->rd_completed);
INIT_LIST_HEAD(&cl->rd_pending);
......@@ -1237,8 +1274,117 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
return 0;
}
/**
* mei_cl_vtag_alloc - allocate and fill the vtag structure
*
* @fp: pointer to file structure
* @vtag: vm tag
*
* Return:
* * Pointer to allocated struct - on success
* * ERR_PTR(-ENOMEM) on memory allocation failure
*/
struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
{
struct mei_cl_vtag *cl_vtag;
cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
if (!cl_vtag)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cl_vtag->list);
cl_vtag->vtag = vtag;
cl_vtag->fp = fp;
return cl_vtag;
}
/**
* mei_cl_fp_by_vtag - obtain the file pointer by vtag
*
* @cl: host client
* @vtag: vm tag
*
* Return:
* * A file pointer - on success
* * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
*/
const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
{
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list)
if (vtag_l->vtag == vtag)
return vtag_l->fp;
return ERR_PTR(-ENOENT);
}
/**
* mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
*
* @cl: host client
* @vtag: vm tag
*/
static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
{
struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list) {
if (vtag_l->vtag == vtag) {
vtag_l->pending_read = false;
break;
}
}
}
/**
* mei_cl_read_vtag_add_fc - add flow control for next pending reader
* in the vtag list
*
* @cl: host client
*/
static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
{
struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
if (cl_vtag->pending_read) {
if (mei_cl_enqueue_ctrl_wr_cb(cl,
mei_cl_mtu(cl),
MEI_FOP_READ,
cl_vtag->fp))
cl->rx_flow_ctrl_creds++;
break;
}
}
}
/**
* mei_cl_vt_support_check - check if client support vtags
*
* @cl: host client
*
* Return:
* * 0 - supported, or not connected at all
* * -EOPNOTSUPP - vtags are not supported by client
*/
int mei_cl_vt_support_check(const struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
if (!dev->hbm_f_vt_supported)
return -EOPNOTSUPP;
if (!cl->me_cl)
return 0;
return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
}
/**
* mei_cl_add_rd_completed - add read completed callback to list with lock
* and vtag check
*
* @cl: host client
* @cb: callback block
......@@ -1246,6 +1392,20 @@ static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
*/
void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
{
const struct file *fp;
if (!mei_cl_vt_support_check(cl)) {
fp = mei_cl_fp_by_vtag(cl, cb->vtag);
if (IS_ERR(fp)) {
/* client already disconnected, discarding */
mei_io_cb_free(cb);
return;
}
cb->fp = fp;
mei_cl_reset_read_by_vtag(cl, cb->vtag);
mei_cl_read_vtag_add_fc(cl);
}
spin_lock(&cl->rd_completed_lock);
list_add_tail(&cb->list, &cl->rd_completed);
spin_unlock(&cl->rd_completed_lock);
......@@ -1520,13 +1680,17 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
return 0;
/* HW currently supports only one pending read */
if (cl->rx_flow_ctrl_creds)
if (cl->rx_flow_ctrl_creds) {
mei_cl_set_read_by_fp(cl, fp);
return -EBUSY;
}
cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
mei_cl_set_read_by_fp(cl, fp);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
......
......@@ -146,6 +146,9 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
const struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag);
const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag);
int mei_cl_vt_support_check(const struct mei_cl *cl);
/*
* MEI input output function prototype
*/
......
......@@ -80,6 +80,27 @@ static int mei_open(struct inode *inode, struct file *file)
return err;
}
/**
* mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
*
* @cl: host client
* @fp: pointer to file structure
*
*/
static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
const struct file *fp)
{
struct mei_cl_vtag *vtag_l, *next;
list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
if (vtag_l->fp == fp) {
list_del(&vtag_l->list);
kfree(vtag_l);
return;
}
}
}
/**
* mei_release - the release function
*
......@@ -101,17 +122,35 @@ static int mei_release(struct inode *inode, struct file *file)
mutex_lock(&dev->device_lock);
rets = mei_cl_disconnect(cl);
mei_cl_vtag_remove_by_fp(cl, file);
if (!list_empty(&cl->vtag_map)) {
cl_dbg(dev, cl, "not the last vtag\n");
mei_cl_flush_queues(cl, file);
rets = 0;
goto out;
}
rets = mei_cl_disconnect(cl);
/*
* Check again: This is necessary since disconnect releases the lock
* and another client can connect in the meantime.
*/
if (!list_empty(&cl->vtag_map)) {
cl_dbg(dev, cl, "not the last vtag after disconnect\n");
mei_cl_flush_queues(cl, file);
goto out;
}
mei_cl_flush_queues(cl, NULL);
cl_dbg(dev, cl, "removing\n");
mei_cl_unlink(cl);
kfree(cl);
out:
file->private_data = NULL;
kfree(cl);
mutex_unlock(&dev->device_lock);
return rets;
}
......@@ -237,6 +276,28 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
mutex_unlock(&dev->device_lock);
return rets;
}
/**
* mei_cl_vtag_by_fp - obtain the vtag by file pointer
*
* @cl: host client
* @fp: pointer to file structure
*
* Return: vtag value on success, otherwise 0
*/
static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
{
struct mei_cl_vtag *cl_vtag;
if (!fp)
return 0;
list_for_each_entry(cl_vtag, &cl->vtag_map, list)
if (cl_vtag->fp == fp)
return cl_vtag->vtag;
return 0;
}
/**
* mei_write - the write function.
*
......@@ -314,6 +375,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
rets = -ENOMEM;
goto out;
}
cb->vtag = mei_cl_vtag_by_fp(cl, file);
rets = copy_from_user(cb->buf.data, ubuf, length);
if (rets) {
......
......@@ -193,6 +193,21 @@ struct mei_cl_cb {
u32 blocking:1;
};
/**
* struct mei_cl_vtag - file pointer to vtag mapping structure
*
* @list: link in map queue
* @fp: file pointer
* @vtag: corresponding vtag
* @pending_read: the read is pending on this file
*/
struct mei_cl_vtag {
struct list_head list;
const struct file *fp;
u8 vtag;
u8 pending_read:1;
};
/**
* struct mei_cl - me client host representation
* carried in file->private_data
......@@ -209,6 +224,7 @@ struct mei_cl_cb {
* @me_cl: fw client connected
* @fp: file associated with client
* @host_client_id: host id
* @vtag_map: vtag map
* @tx_flow_ctrl_creds: transmit flow credentials
* @rx_flow_ctrl_creds: receive flow credentials
* @timer_count: watchdog timer for operation completion
......@@ -235,6 +251,7 @@ struct mei_cl {
struct mei_me_client *me_cl;
const struct file *fp;
u8 host_client_id;
struct list_head vtag_map;
u8 tx_flow_ctrl_creds;
u8 rx_flow_ctrl_creds;
u8 timer_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment