Commit 255ba065 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach

Revert "iwlwifi: pcie: New RBD allocation model"

This reverts commit 5f175703.

This patch introduced a high latency in buffer allocation
under extreme load. This latency caused a firmwre crash.
The same scenario works fine with this patch reverted.
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
parent 8465fe6a
...@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) ...@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define RX_QUEUE_MASK 255 #define RX_QUEUE_MASK 255
#define RX_QUEUE_SIZE_LOG 8 #define RX_QUEUE_SIZE_LOG 8
/*
* RX related structures and functions
*/
#define RX_FREE_BUFFERS 64
#define RX_LOW_WATERMARK 8
/** /**
* struct iwl_rb_status - reserve buffer status * struct iwl_rb_status - reserve buffer status
* host memory mapped FH registers * host memory mapped FH registers
......
...@@ -44,15 +44,6 @@ ...@@ -44,15 +44,6 @@
#include "iwl-io.h" #include "iwl-io.h"
#include "iwl-op-mode.h" #include "iwl-op-mode.h"
/*
* RX related structures and functions
*/
#define RX_NUM_QUEUES 1
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
#define RX_LOW_WATERMARK 8
struct iwl_host_cmd; struct iwl_host_cmd;
/*This file includes the declaration that are internal to the /*This file includes the declaration that are internal to the
...@@ -86,29 +77,29 @@ struct isr_statistics { ...@@ -86,29 +77,29 @@ struct isr_statistics {
* struct iwl_rxq - Rx queue * struct iwl_rxq - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @pool:
* @queue:
* @read: Shared index to newest available Rx buffer * @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet * @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free * @free_count: Number of pre-allocated buffers in rx_free
* @used_count: Number of RBDs handled to allocator to use for allocation
* @write_actual: * @write_actual:
* @rx_free: list of RBDs with allocated RB ready for use * @rx_free: list of free SKBs for use
* @rx_used: list of RBDs with no RB attached * @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index * @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status * @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status * @rb_stts_dma: bus address of receive buffer status
* @lock: * @lock:
* @pool: initial pool of iwl_rx_mem_buffer for the queue
* @queue: actual rx queue
* *
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/ */
struct iwl_rxq { struct iwl_rxq {
__le32 *bd; __le32 *bd;
dma_addr_t bd_dma; dma_addr_t bd_dma;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
u32 read; u32 read;
u32 write; u32 write;
u32 free_count; u32 free_count;
u32 used_count;
u32 write_actual; u32 write_actual;
struct list_head rx_free; struct list_head rx_free;
struct list_head rx_used; struct list_head rx_used;
...@@ -116,32 +107,6 @@ struct iwl_rxq { ...@@ -116,32 +107,6 @@ struct iwl_rxq {
struct iwl_rb_status *rb_stts; struct iwl_rb_status *rb_stts;
dma_addr_t rb_stts_dma; dma_addr_t rb_stts_dma;
spinlock_t lock; spinlock_t lock;
struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
};
/**
* struct iwl_rb_allocator - Rx allocator
* @pool: initial pool of allocator
* @req_pending: number of requests the allcator had not processed yet
* @req_ready: number of requests honored and ready for claiming
* @rbd_allocated: RBDs with pages allocated and ready to be handled to
* the queue. This is a list of &struct iwl_rx_mem_buffer
* @rbd_empty: RBDs with no page attached for allocator use. This is a list
* of &struct iwl_rx_mem_buffer
* @lock: protects the rbd_allocated and rbd_empty lists
* @alloc_wq: work queue for background calls
* @rx_alloc: work struct for background calls
*/
struct iwl_rb_allocator {
struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
atomic_t req_pending;
atomic_t req_ready;
struct list_head rbd_allocated;
struct list_head rbd_empty;
spinlock_t lock;
struct workqueue_struct *alloc_wq;
struct work_struct rx_alloc;
}; };
struct iwl_dma_ptr { struct iwl_dma_ptr {
...@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) ...@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
/** /**
* struct iwl_trans_pcie - PCIe transport specific data * struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data * @rxq: all the RX queue data
* @rba: allocator for RX replenishing * @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv * @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area * @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM * @scd_base_addr: scheduler sram base address in SRAM
...@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) ...@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
*/ */
struct iwl_trans_pcie { struct iwl_trans_pcie {
struct iwl_rxq rxq; struct iwl_rxq rxq;
struct iwl_rb_allocator rba; struct work_struct rx_replenish;
struct iwl_trans *trans; struct iwl_trans *trans;
struct iwl_drv *drv; struct iwl_drv *drv;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment