Commit 0307c839 authored by Golan Ben Ami's avatar Golan Ben Ami Committed by Luca Coelho

iwlwifi: pcie: support rx structures for 22560 devices

The rfh for 22560 devices has changed so it supports now
the same arch of using used and free lists, but different
structures to support the last.
Use the new structures, hw dependent, to manage the lists.

bd, the free list, uses the iwl_rx_transfer_desc,
in which the vid is stored in the structs' rbid
field, and the page address in the addr field.

used_bd, the used list, uses the iwl_rx_completion_desc
struct, in which the vid is stored in the structs' rbid
field.

rb_stts, the hw "write" pointer of rx is stored in a
__le16 array, in which each entry represents the write
pointer per queue.
Signed-off-by: default avatarGolan Ben Ami <golan.ben.ami@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent bfdbe132
...@@ -72,6 +72,7 @@ struct iwl_host_cmd; ...@@ -72,6 +72,7 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page * @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW * @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table * @vid: index of this rxb in the global table
* @size: size used from the buffer
*/ */
struct iwl_rx_mem_buffer { struct iwl_rx_mem_buffer {
dma_addr_t page_dma; dma_addr_t page_dma;
...@@ -79,6 +80,7 @@ struct iwl_rx_mem_buffer { ...@@ -79,6 +80,7 @@ struct iwl_rx_mem_buffer {
u16 vid; u16 vid;
bool invalid; bool invalid;
struct list_head list; struct list_head list;
u32 size;
}; };
/** /**
...@@ -159,8 +161,10 @@ enum iwl_completion_desc_wifi_status { ...@@ -159,8 +161,10 @@ enum iwl_completion_desc_wifi_status {
IWL_CD_STTS_REPLAY_ERR, IWL_CD_STTS_REPLAY_ERR,
}; };
#define IWL_RX_TD_TYPE 0xff000000 #define IWL_RX_TD_TYPE_MSK 0xff000000
#define IWL_RX_TD_SIZE 0x00ffffff #define IWL_RX_TD_SIZE_MSK 0x00ffffff
#define IWL_RX_TD_SIZE_2K BIT(11)
#define IWL_RX_TD_TYPE 0
/** /**
* struct iwl_rx_transfer_desc - transfer descriptor * struct iwl_rx_transfer_desc - transfer descriptor
...@@ -204,6 +208,7 @@ struct iwl_rx_completion_desc { ...@@ -204,6 +208,7 @@ struct iwl_rx_completion_desc {
* @id: queue index * @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
* In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
...@@ -230,7 +235,7 @@ struct iwl_rxq { ...@@ -230,7 +235,7 @@ struct iwl_rxq {
int id; int id;
void *bd; void *bd;
dma_addr_t bd_dma; dma_addr_t bd_dma;
__le32 *used_bd; void *used_bd;
dma_addr_t used_bd_dma; dma_addr_t used_bd_dma;
__le16 *tr_tail; __le16 *tr_tail;
dma_addr_t tr_tail_dma; dma_addr_t tr_tail_dma;
...@@ -245,7 +250,7 @@ struct iwl_rxq { ...@@ -245,7 +250,7 @@ struct iwl_rxq {
struct list_head rx_free; struct list_head rx_free;
struct list_head rx_used; struct list_head rx_used;
bool need_update; bool need_update;
struct iwl_rb_status *rb_stts; void *rb_stts;
dma_addr_t rb_stts_dma; dma_addr_t rb_stts_dma;
spinlock_t lock; spinlock_t lock;
struct napi_struct napi; struct napi_struct napi;
...@@ -289,6 +294,24 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) ...@@ -289,6 +294,24 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
} }
/**
* iwl_get_closed_rb_stts - get closed rb stts from different structs
* @rxq - the rxq to get the rb stts from
*/
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
} else {
struct iwl_rb_status *rb_stts = rxq->rb_stts;
return READ_ONCE(rb_stts->closed_rb_num);
}
}
/** /**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end * iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index * @index -- current index
......
...@@ -242,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) ...@@ -242,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
} }
} }
static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
bd[rxq->write].type_n_size =
cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
} else {
__le64 *bd = rxq->bd;
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
}
}
/* /*
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/ */
...@@ -263,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, ...@@ -263,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
while (rxq->free_count) { while (rxq->free_count) {
__le64 *bd = (__le64 *)rxq->bd;
/* Get next free Rx buffer, remove from free list */ /* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list); list);
...@@ -273,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, ...@@ -273,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
/* 12 first bits are expected to be empty */ /* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */ /* Point to Rx buffer via next RBD in circular buffer */
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq->free_count--; rxq->free_count--;
} }
...@@ -617,28 +634,45 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) ...@@ -617,28 +634,45 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans); iwl_pcie_rx_allocator(trans_pcie->trans);
} }
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
struct iwl_rx_transfer_desc *rx_td;
if (use_rx_td)
return sizeof(*rx_td);
else
return trans->cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq) struct iwl_rxq *rxq)
{ {
struct device *dev = trans->dev; struct device *dev = trans->dev;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : struct iwl_rx_completion_desc *rx_cd;
sizeof(__le32); bool use_rx_td = (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd) if (rxq->bd)
dma_free_coherent(dev, free_size * rxq->queue_size, dma_free_coherent(trans->dev,
free_size * rxq->queue_size,
rxq->bd, rxq->bd_dma); rxq->bd, rxq->bd_dma);
rxq->bd_dma = 0; rxq->bd_dma = 0;
rxq->bd = NULL; rxq->bd = NULL;
if (rxq->rb_stts) if (rxq->rb_stts)
dma_free_coherent(trans->dev, dma_free_coherent(trans->dev,
use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status), sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma); rxq->rb_stts, rxq->rb_stts_dma);
rxq->rb_stts_dma = 0; rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL; rxq->rb_stts = NULL;
if (rxq->used_bd) if (rxq->used_bd)
dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, dma_free_coherent(trans->dev,
(use_rx_td ? sizeof(*rx_cd) :
sizeof(__le32)) * rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma); rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0; rxq->used_bd_dma = 0;
rxq->used_bd = NULL; rxq->used_bd = NULL;
...@@ -664,9 +698,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -664,9 +698,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct device *dev = trans->dev; struct device *dev = trans->dev;
struct iwl_rx_completion_desc *rx_cd;
int i; int i;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : int free_size;
sizeof(__le32); bool use_rx_td = (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560);
spin_lock_init(&rxq->lock); spin_lock_init(&rxq->lock);
if (trans->cfg->mq_rx_supported) if (trans->cfg->mq_rx_supported)
...@@ -674,6 +710,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -674,6 +710,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else else
rxq->queue_size = RX_QUEUE_SIZE; rxq->queue_size = RX_QUEUE_SIZE;
free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
/* /*
* Allocate the circular buffer of Read Buffer Descriptors * Allocate the circular buffer of Read Buffer Descriptors
* (RBDs) * (RBDs)
...@@ -686,7 +724,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -686,7 +724,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->cfg->mq_rx_supported) { if (trans->cfg->mq_rx_supported) {
rxq->used_bd = dma_zalloc_coherent(dev, rxq->used_bd = dma_zalloc_coherent(dev,
sizeof(__le32) * (use_rx_td ?
sizeof(*rx_cd) :
sizeof(__le32)) *
rxq->queue_size, rxq->queue_size,
&rxq->used_bd_dma, &rxq->used_bd_dma,
GFP_KERNEL); GFP_KERNEL);
...@@ -695,13 +735,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -695,13 +735,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
} }
/* Allocate the driver's pointer to receive buffer status */ /* Allocate the driver's pointer to receive buffer status */
rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
sizeof(__le16) :
sizeof(struct iwl_rb_status),
&rxq->rb_stts_dma, &rxq->rb_stts_dma,
GFP_KERNEL); GFP_KERNEL);
if (!rxq->rb_stts) if (!rxq->rb_stts)
goto err; goto err;
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) if (!use_rx_td)
return 0; return 0;
/* Allocate the driver's pointer to TR tail */ /* Allocate the driver's pointer to TR tail */
...@@ -717,6 +759,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -717,6 +759,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
GFP_KERNEL); GFP_KERNEL);
if (!rxq->cr_tail) if (!rxq->cr_tail)
goto err; goto err;
/*
* W/A 22560 device step Z0 must be non zero bug
* TODO: remove this when stop supporting Z0
*/
*rxq->cr_tail = cpu_to_le16(500);
return 0; return 0;
...@@ -1000,7 +1047,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -1000,7 +1047,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0; rxq->read = 0;
rxq->write = 0; rxq->write = 0;
rxq->write_actual = 0; rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); memset(rxq->rb_stts, 0,
(trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq); iwl_pcie_rx_init_rxb_lists(rxq);
...@@ -1249,6 +1298,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1249,6 +1298,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
} }
page_stolen |= rxcb._page_stolen; page_stolen |= rxcb._page_stolen;
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
} }
...@@ -1297,7 +1348,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1297,7 +1348,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx /* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */ * buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
i = rxq->read; i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */ /* W/A 9000 device step A0 wrap-around bug */
...@@ -1314,11 +1365,24 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1314,11 +1365,24 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
emergency = true; emergency = true;
if (trans->cfg->mq_rx_supported) { if (trans->cfg->mq_rx_supported) {
u16 vid;
/* /*
* used_bd is a 32 bit but only 12 are used to retrieve * used_bd is a 32/16 bit but only 12 are used
* the vid * to retrieve the vid
*/ */
u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; if (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_completion_desc *rx_cd =
&((struct iwl_rx_completion_desc *)
rxq->used_bd)[i];
vid = le16_to_cpu(rx_cd->rbid) & 0x0FFF;
} else {
__le32 *used =
&((__le32 *)rxq->used_bd)[i];
vid = le32_to_cpu(*used) & 0x0FFF;
}
if (WARN(!vid || if (WARN(!vid ||
vid > ARRAY_SIZE(trans_pcie->global_table), vid > ARRAY_SIZE(trans_pcie->global_table),
...@@ -1332,6 +1396,16 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1332,6 +1396,16 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
iwl_force_nmi(trans); iwl_force_nmi(trans);
goto out; goto out;
} }
if (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_completion_desc *rx_cd =
&((struct iwl_rx_completion_desc *)
rxq->used_bd)[i];
rxb->size = le32_to_cpu(rx_cd->size) &
IWL_RX_CD_SIZE;
}
rxb->invalid = true; rxb->invalid = true;
} else { } else {
rxb = rxq->queue[i]; rxb = rxq->queue[i];
...@@ -1378,6 +1452,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1378,6 +1452,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
out: out:
/* Backtrack one entry */ /* Backtrack one entry */
rxq->read = i; rxq->read = i;
/* update cr tail with the rxq read pointer */
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock); spin_unlock(&rxq->lock);
/* /*
......
...@@ -2545,10 +2545,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, ...@@ -2545,10 +2545,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count); rxq->free_count);
if (rxq->rb_stts) { if (rxq->rb_stts) {
u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
rxq));
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: %u\n", "\tclosed_rb_num: %u\n",
le16_to_cpu(rxq->rb_stts->closed_rb_num) & r & 0x0FFF);
0x0FFF);
} else { } else {
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n"); "\tclosed_rb_num: Not Allocated\n");
...@@ -2754,7 +2755,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, ...@@ -2754,7 +2755,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
for (i = rxq->read, j = 0; for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums; i != r && j < allocated_rb_nums;
...@@ -3039,7 +3040,8 @@ static struct iwl_trans_dump_data ...@@ -3039,7 +3040,8 @@ static struct iwl_trans_dump_data
/* Dump RBs is supported only for pre-9000 devices (1 queue) */ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0]; struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */ /* RBs */
num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) num_rbs =
le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
& 0x0FFF; & 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) + len += num_rbs * (sizeof(*data) +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment