Commit af1fa210 authored by Arend van Spriel's avatar Arend van Spriel Committed by John W. Linville

brcmfmac: use pre-allocated scatter-gather table for txglomming

Instead of allocating a scatter-gather table for every transmit
reuse a pre-allocated table. The transmit path will be faster by
taking out this allocation.
Reviewed-by: default avatarDaniel (Deognyoun) Kim <dekim@broadcom.com>
Reviewed-by: default avatarHante Meuleman <meuleman@broadcom.com>
Reviewed-by: default avatarFranky (Zhenhui) Lin <frankyl@broadcom.com>
Reviewed-by: default avatarPieter-Paul Giesberts <pieterpg@broadcom.com>
Signed-off-by: default avatarArend van Spriel <arend@broadcom.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 4aca7a18
...@@ -53,6 +53,12 @@ ...@@ -53,6 +53,12 @@
/* Maximum milliseconds to wait for F2 to come up */ /* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000 #define SDIO_WAIT_F2RDY 3000
#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */
static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id) static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
{ {
...@@ -487,7 +493,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, ...@@ -487,7 +493,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
struct mmc_request mmc_req; struct mmc_request mmc_req;
struct mmc_command mmc_cmd; struct mmc_command mmc_cmd;
struct mmc_data mmc_dat; struct mmc_data mmc_dat;
struct sg_table st;
struct scatterlist *sgl; struct scatterlist *sgl;
int ret = 0; int ret = 0;
...@@ -532,16 +537,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, ...@@ -532,16 +537,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
pkt_offset = 0; pkt_offset = 0;
pkt_next = target_list->next; pkt_next = target_list->next;
if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
ret = -ENOMEM;
goto exit;
}
memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command)); memset(&mmc_cmd, 0, sizeof(struct mmc_command));
memset(&mmc_dat, 0, sizeof(struct mmc_data)); memset(&mmc_dat, 0, sizeof(struct mmc_data));
mmc_dat.sg = st.sgl; mmc_dat.sg = sdiodev->sgtable.sgl;
mmc_dat.blksz = func_blk_sz; mmc_dat.blksz = func_blk_sz;
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED; mmc_cmd.opcode = SD_IO_RW_EXTENDED;
...@@ -557,7 +557,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, ...@@ -557,7 +557,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
while (seg_sz) { while (seg_sz) {
req_sz = 0; req_sz = 0;
sg_cnt = 0; sg_cnt = 0;
sgl = st.sgl; sgl = sdiodev->sgtable.sgl;
/* prep sg table */ /* prep sg table */
while (pkt_next != (struct sk_buff *)target_list) { while (pkt_next != (struct sk_buff *)target_list) {
pkt_data = pkt_next->data + pkt_offset; pkt_data = pkt_next->data + pkt_offset;
...@@ -639,7 +639,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, ...@@ -639,7 +639,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
} }
exit: exit:
sg_free_table(&st); sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
while ((pkt_next = __skb_dequeue(&local_list)) != NULL) while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
brcmu_pkt_buf_free_skb(pkt_next); brcmu_pkt_buf_free_skb(pkt_next);
...@@ -863,6 +863,29 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn) ...@@ -863,6 +863,29 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
return 0; return 0;
} }
static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
{
uint nents;
int err;
if (!sdiodev->sg_support)
return;
nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
nents += (nents >> 4) + 1;
WARN_ON(nents > sdiodev->max_segment_count);
brcmf_dbg(TRACE, "nents=%d\n", nents);
err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
if (err < 0) {
brcmf_err("allocation failed: disable scatter-gather");
sdiodev->sg_support = false;
}
sdiodev->txglomsz = brcmf_sdiod_txglomsz;
}
static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
{ {
if (sdiodev->bus) { if (sdiodev->bus) {
...@@ -880,6 +903,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) ...@@ -880,6 +903,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
sdio_disable_func(sdiodev->func[1]); sdio_disable_func(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]); sdio_release_host(sdiodev->func[1]);
sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0; sdiodev->sbwad = 0;
return 0; return 0;
...@@ -935,6 +959,11 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) ...@@ -935,6 +959,11 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
SG_MAX_SINGLE_ALLOC); SG_MAX_SINGLE_ALLOC);
sdiodev->max_segment_size = host->max_seg_size; sdiodev->max_segment_size = host->max_seg_size;
/* allocate scatter-gather table. sg support
* will be disabled upon allocation failure.
*/
brcmf_sdiod_sgtable_alloc(sdiodev);
/* try to attach to the target device */ /* try to attach to the target device */
sdiodev->bus = brcmf_sdio_probe(sdiodev); sdiodev->bus = brcmf_sdio_probe(sdiodev);
if (!sdiodev->bus) { if (!sdiodev->bus) {
......
...@@ -113,8 +113,6 @@ struct rte_console { ...@@ -113,8 +113,6 @@ struct rte_console {
#define BRCMF_TXBOUND 20 /* Default for max tx frames in #define BRCMF_TXBOUND 20 /* Default for max tx frames in
one scheduling */ one scheduling */
#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
#define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */ #define BRCMF_TXMINMAX 1 /* Max tx frames if rx still pending */
#define MEMBLOCK 2048 /* Block size used for downloading #define MEMBLOCK 2048 /* Block size used for downloading
...@@ -511,10 +509,6 @@ static const uint max_roundup = 512; ...@@ -511,10 +509,6 @@ static const uint max_roundup = 512;
#define ALIGNMENT 4 #define ALIGNMENT 4
static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
enum brcmf_sdio_frmtype { enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_NORMAL, BRCMF_SDIO_FT_NORMAL,
BRCMF_SDIO_FT_SUPER, BRCMF_SDIO_FT_SUPER,
...@@ -2321,7 +2315,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes) ...@@ -2321,7 +2315,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
__skb_queue_head_init(&pktq); __skb_queue_head_init(&pktq);
if (bus->txglom) if (bus->txglom)
pkt_num = min_t(u8, bus->tx_max - bus->tx_seq, pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
brcmf_sdio_txglomsz); bus->sdiodev->txglomsz);
pkt_num = min_t(u32, pkt_num, pkt_num = min_t(u32, pkt_num,
brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol)); brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
spin_lock_bh(&bus->txqlock); spin_lock_bh(&bus->txqlock);
......
...@@ -180,6 +180,8 @@ struct brcmf_sdio_dev { ...@@ -180,6 +180,8 @@ struct brcmf_sdio_dev {
uint max_request_size; uint max_request_size;
ushort max_segment_count; ushort max_segment_count;
uint max_segment_size; uint max_segment_size;
uint txglomsz;
struct sg_table sgtable;
}; };
/* sdio core registers */ /* sdio core registers */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment