Commit 687a2859 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'mhi-for-v6.8' of...

Merge tag 'mhi-for-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi into char-misc-next

Manivannan writes:

MHI Host
========

- Added alignment check for event ring read pointer to avoid the potential
  buffer corruption issue.
- Added support for SDX75 modem which takes longer time to enter READY state.
- Added spinlock to protect concurrent access while queuing transfer ring
  elements.
- Dropped the read channel lock before invoking the client callback as the
  client can potentially queue buffers thus ending up wtih soft lockup.

MHI Endpoint
============

- Used kzalloc() to allocate event ring elements instead of allocating the
  elements on the stack, as the endpoint controller trying to queue them may not
  be able to use vmalloc memory (using DMA).
- Used slab allocator for allocting the memory for objects used frequently and
  are of fixed size.
- Added support for interrupt moderation timer feature which is used by the host
  to limit the number of interrupts raised by the device for an event ring.
- Added async read/write DMA support for transferring data between host and the
  endpoint. So far MHI EP stack assumed that the data will be transferred
  synchronously (i.e., it sends completion once the transfer APIs are returned).
  But this impacts the throughput if the controller is using DMA to do the
  transfer.

  So to add async suport, existing sync transfer APIs are renamed to
  {read/write}_sync and also introduced two new APIs {read/write}_async for
  carrying out the async transfer.

  Controllers implementing the async APIs should queue the buffers and return
  immediately without waiting for transfer completion. Once the transfer
  completion happens later, they should invoke the completion callback so that
  the MHI EP stack can send the completion event to the host.

  The controller driver patches (PCI EPF) for this async support are also merged
  to the MHI tree with Acks from PCI maintainers.
- Fixed the DMA channel direction in error path of the PCI EPF driver.

* tag 'mhi-for-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi:
  bus: mhi: host: Drop chan lock before queuing buffers
  bus: mhi: host: Add spinlock to protect WP access when queueing TREs
  PCI: epf-mhi: Fix the DMA data direction of dma_unmap_single()
  bus: mhi: ep: Add checks for read/write callbacks while registering controllers
  bus: mhi: ep: Add support for async DMA read operation
  bus: mhi: ep: Add support for async DMA write operation
  PCI: epf-mhi: Enable MHI async read/write support
  PCI: epf-mhi: Add support for DMA async read/write operation
  PCI: epf-mhi: Simulate async read/write using iATU
  bus: mhi: ep: Introduce async read/write callbacks
  bus: mhi: ep: Rename read_from_host() and write_to_host() APIs
  bus: mhi: ep: Pass mhi_ep_buf_info struct to read/write APIs
  bus: mhi: ep: Add support for interrupt moderation timer
  bus: mhi: ep: Use slab allocator where applicable
  bus: mhi: host: Add alignment check for event ring read pointer
  bus: mhi: host: pci_generic: Add SDX75 based modem support
  bus: mhi: host: Add a separate timeout parameter for waiting ready
  bus: mhi: ep: Do not allocate event ring element on stack
parents 093976dd 01bd694a
......@@ -126,6 +126,7 @@ struct mhi_ep_ring {
union mhi_ep_ring_ctx *ring_ctx;
struct mhi_ring_element *ring_cache;
enum mhi_ep_ring_type type;
struct delayed_work intmodt_work;
u64 rbase;
size_t rd_offset;
size_t wr_offset;
......@@ -135,7 +136,9 @@ struct mhi_ep_ring {
u32 ch_id;
u32 er_index;
u32 irq_vector;
u32 intmodt;
bool started;
bool irq_pending;
};
struct mhi_ep_cmd {
......@@ -159,6 +162,7 @@ struct mhi_ep_chan {
void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
enum mhi_ch_state state;
enum dma_data_direction dir;
size_t rd_offset;
u64 tre_loc;
u32 tre_size;
u32 tre_bytes_left;
......
This diff is collapsed.
......@@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
size_t start, copy_size;
struct mhi_ep_buf_info buf_info = {};
size_t start;
int ret;
/* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */
......@@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
start = ring->wr_offset;
if (start < end) {
copy_size = (end - start) * sizeof(struct mhi_ring_element);
ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
(start * sizeof(struct mhi_ring_element)),
&ring->ring_cache[start], copy_size);
buf_info.size = (end - start) * sizeof(struct mhi_ring_element);
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
buf_info.dev_addr = &ring->ring_cache[start];
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
} else {
copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase +
(start * sizeof(struct mhi_ring_element)),
&ring->ring_cache[start], copy_size);
buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element);
buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element));
buf_info.dev_addr = &ring->ring_cache[start];
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
if (end) {
ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase,
&ring->ring_cache[0],
end * sizeof(struct mhi_ring_element));
buf_info.host_addr = ring->rbase;
buf_info.dev_addr = &ring->ring_cache[0];
buf_info.size = end * sizeof(struct mhi_ring_element);
ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info);
if (ret < 0)
return ret;
}
}
dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size);
dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size);
return 0;
}
......@@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
{
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ep_buf_info buf_info = {};
size_t old_offset = 0;
u32 num_free_elem;
__le64 rp;
......@@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)),
sizeof(*el));
if (ret < 0)
return ret;
buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
buf_info.dev_addr = el;
buf_info.size = sizeof(*el);
return 0;
return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
}
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
......@@ -157,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32
}
}
static void mhi_ep_raise_irq(struct work_struct *work)
{
struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work);
struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
WRITE_ONCE(ring->irq_pending, false);
}
int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
union mhi_ep_ring_ctx *ctx)
{
......@@ -173,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
if (ring->type == RING_TYPE_CH)
ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex);
if (ring->type == RING_TYPE_ER)
if (ring->type == RING_TYPE_ER) {
ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec);
ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK,
le32_to_cpu(ring->ring_ctx->ev.intmod));
INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq);
}
/* During ring init, both rp and wp are equal */
memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64));
......@@ -201,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
{
if (ring->type == RING_TYPE_ER)
cancel_delayed_work_sync(&ring->intmodt_work);
ring->started = false;
kfree(ring->ring_cache);
ring->ring_cache = NULL;
......
......@@ -881,6 +881,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl,
if (!mhi_cntrl->timeout_ms)
mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms;
mhi_cntrl->bounce_buf = config->use_bounce_buf;
mhi_cntrl->buffer_len = config->buf_len;
if (!mhi_cntrl->buffer_len)
......
......@@ -321,7 +321,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
u32 *out);
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 mask,
u32 val, u32 delayus);
u32 val, u32 delayus, u32 timeout_ms);
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
u32 offset, u32 val);
int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
......
......@@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset,
u32 mask, u32 val, u32 delayus)
u32 mask, u32 val, u32 delayus,
u32 timeout_ms)
{
int ret;
u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
u32 out, retry = (timeout_ms * 1000) / delayus;
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
......@@ -268,7 +269,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
{
return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len &&
!(addr & (sizeof(struct mhi_ring_element) - 1));
}
int mhi_destroy_device(struct device *dev, void *data)
......@@ -642,6 +644,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
read_unlock_bh(&mhi_chan->lock);
/* notify client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
......@@ -667,6 +671,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
kfree(buf_info->cb_buf);
}
}
read_lock_bh(&mhi_chan->lock);
}
break;
} /* CC_EOT */
......@@ -1122,17 +1128,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(ret)) {
ret = -EAGAIN;
goto exit_unlock;
}
if (unlikely(ret))
return -EAGAIN;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
goto exit_unlock;
return ret;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* Packet is queued, take a usage ref to exit M3 if necessary
* for host->device buffer, balanced put is done on buffer completion
......@@ -1152,7 +1156,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
if (dir == DMA_FROM_DEVICE)
mhi_cntrl->runtime_put(mhi_cntrl);
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret;
......@@ -1204,6 +1207,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int eot, eob, chain, bei;
int ret;
/* Protect accesses for reading and incrementing WP */
write_lock_bh(&mhi_chan->lock);
buf_ring = &mhi_chan->buf_ring;
tre_ring = &mhi_chan->tre_ring;
......@@ -1221,8 +1227,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
if (!info->pre_mapped) {
ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
if (ret)
if (ret) {
write_unlock_bh(&mhi_chan->lock);
return ret;
}
}
eob = !!(flags & MHI_EOB);
......@@ -1239,6 +1247,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
write_unlock_bh(&mhi_chan->lock);
return 0;
}
......
......@@ -269,6 +269,16 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
static const struct mhi_controller_config modem_qcom_v2_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
.ready_timeout_ms = 50000,
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
.ch_cfg = modem_qcom_v1_mhi_channels,
.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
.event_cfg = modem_qcom_v1_mhi_events,
};
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.max_channels = 128,
.timeout_ms = 8000,
......@@ -278,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
.event_cfg = modem_qcom_v1_mhi_events,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = {
.name = "qcom-sdx75m",
.fw = "qcom/sdx75m/xbl.elf",
.edl = "qcom/sdx75m/edl.mbn",
.config = &modem_qcom_v2_mhiv_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
.sideband_wake = false,
};
static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
.name = "qcom-sdx65m",
.fw = "qcom/sdx65m/xbl.elf",
......@@ -600,6 +620,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
......
......@@ -163,6 +163,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
enum mhi_pm_state cur_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 interval_us = 25000; /* poll register field every 25 milliseconds */
u32 timeout_ms;
int ret, i;
/* Check if device entered error state */
......@@ -173,14 +174,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
/* Wait for RESET to be cleared and READY bit to be set by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 0, interval_us);
MHICTRL_RESET_MASK, 0, interval_us,
mhi_cntrl->timeout_ms);
if (ret) {
dev_err(dev, "Device failed to clear MHI Reset\n");
return ret;
}
timeout_ms = mhi_cntrl->ready_timeout_ms ?
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
MHISTATUS_READY_MASK, 1, interval_us);
MHISTATUS_READY_MASK, 1, interval_us,
timeout_ms);
if (ret) {
dev_err(dev, "Device failed to enter MHI Ready\n");
return ret;
......@@ -479,7 +484,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
/* Wait for the reset bit to be cleared by the device */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 0, 25000);
MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms);
if (ret)
dev_err(dev, "Device failed to clear MHI Reset\n");
......@@ -492,8 +497,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
/* wait for ready to be set */
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
MHISTATUS,
MHISTATUS_READY_MASK, 1, 25000);
MHISTATUS, MHISTATUS_READY_MASK,
1, 25000, mhi_cntrl->timeout_ms);
if (ret)
dev_err(dev, "Device failed to enter READY state\n");
}
......@@ -1111,7 +1116,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, 0, interval_us);
MHICTRL_RESET_MASK, 0, interval_us,
mhi_cntrl->timeout_ms);
if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
goto error_exit;
......@@ -1202,14 +1208,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
{
int ret = mhi_async_power_up(mhi_cntrl);
u32 timeout_ms;
if (ret)
return ret;
/* Some devices need more time to set ready during power up */
timeout_ms = mhi_cntrl->ready_timeout_ms ?
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
if (ret)
......
......@@ -266,6 +266,7 @@ struct mhi_event_config {
* struct mhi_controller_config - Root MHI controller configuration
* @max_channels: Maximum number of channels supported
* @timeout_ms: Timeout value for operations. 0 means use default
* @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
* @buf_len: Size of automatically allocated buffers. 0 means use default
* @num_channels: Number of channels defined in @ch_cfg
* @ch_cfg: Array of defined channels
......@@ -277,6 +278,7 @@ struct mhi_event_config {
struct mhi_controller_config {
u32 max_channels;
u32 timeout_ms;
u32 ready_timeout_ms;
u32 buf_len;
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
......@@ -330,6 +332,7 @@ struct mhi_controller_config {
* @pm_mutex: Mutex for suspend/resume operation
* @pm_lock: Lock for protecting MHI power management state
* @timeout_ms: Timeout in ms for state transitions
* @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
* @pm_state: MHI power management state
* @db_access: DB access states
* @ee: MHI device execution environment
......@@ -419,6 +422,7 @@ struct mhi_controller {
struct mutex pm_mutex;
rwlock_t pm_lock;
u32 timeout_ms;
u32 ready_timeout_ms;
u32 pm_state;
u32 db_access;
enum mhi_ee_type ee;
......
......@@ -49,6 +49,27 @@ struct mhi_ep_db_info {
u32 status;
};
/**
* struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
* @mhi_dev: MHI device associated with this buffer
* @dev_addr: Address of the buffer in endpoint
* @host_addr: Address of the bufffer in host
* @size: Size of the buffer
* @code: Transfer completion code
* @cb: Callback to be executed by controller drivers after transfer completion (async)
* @cb_buf: Opaque buffer to be passed to the callback
*/
struct mhi_ep_buf_info {
struct mhi_ep_device *mhi_dev;
void *dev_addr;
u64 host_addr;
size_t size;
int code;
void (*cb)(struct mhi_ep_buf_info *buf_info);
void *cb_buf;
};
/**
* struct mhi_ep_cntrl - MHI Endpoint controller structure
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
......@@ -82,8 +103,10 @@ struct mhi_ep_db_info {
* @raise_irq: CB function for raising IRQ to the host
* @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
* @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
* @read_from_host: CB function for reading from host memory from endpoint
* @write_to_host: CB function for writing to host memory from endpoint
* @read_sync: CB function for reading from host memory synchronously
* @write_sync: CB function for writing to host memory synchronously
* @read_async: CB function for reading from host memory asynchronously
* @write_async: CB function for writing to host memory asynchronously
* @mhi_state: MHI Endpoint state
* @max_chan: Maximum channels supported by the endpoint controller
* @mru: MRU (Maximum Receive Unit) value of the endpoint controller
......@@ -128,14 +151,19 @@ struct mhi_ep_cntrl {
struct work_struct reset_work;
struct work_struct cmd_ring_work;
struct work_struct ch_ring_work;
struct kmem_cache *ring_item_cache;
struct kmem_cache *ev_ring_el_cache;
struct kmem_cache *tre_buf_cache;
void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
void __iomem **virt, size_t size);
void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
void __iomem *virt, size_t size);
int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size);
int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size);
int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
enum mhi_state mhi_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment