Commit cd8b1eb4 authored by Raghu Vatsavayi's avatar Raghu Vatsavayi Committed by David S. Miller

liquidio: Common enable irq function

Add support of common irq enable functionality for both
iq(instruction queue) and oq(output queue).
Signed-off-by: default avatarDerek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: default avatarSatanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: default avatarRaghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 83101ce3
......@@ -192,6 +192,7 @@ static void octeon_droq_bh(unsigned long pdev)
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
lio_enable_irq(oct->droq[q_no], NULL);
}
if (reschedule)
......
......@@ -846,7 +846,7 @@ struct oct_mdio_cmd {
/* intrmod: max. packets to trigger interrupt */
#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
/* intrmod: min. packets to trigger interrupt */
#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
#define LIO_INTRMOD_RXMINCNT_TRIGGER 0
/* intrmod: max. time to trigger interrupt */
#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
/* 66xx:intrmod: min. time to trigger interrupt
......
......@@ -1122,3 +1122,20 @@ int lio_get_device_id(void *dev)
return octeon_dev->octeon_id;
return -1;
}
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
{
/* the whole thing needs to be atomic, ideally */
if (droq) {
spin_lock_bh(&droq->lock);
writel(droq->pkt_count, droq->pkts_sent_reg);
droq->pkt_count = 0;
spin_unlock_bh(&droq->lock);
}
if (iq) {
spin_lock_bh(&iq->lock);
writel(iq->pkt_in_done, iq->inst_cnt_reg);
iq->pkt_in_done = 0;
spin_unlock_bh(&iq->lock);
}
}
......@@ -660,6 +660,8 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
/* LiquidIO driver pivate flags */
enum {
OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
......
......@@ -92,22 +92,25 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
/** Check for packets on Droq. This function should be called with
* lock held.
/** Check for packets on Droq. This function should be called with lock held.
* @param droq - Droq on which count is checked.
* @return Returns packet count.
*/
u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
u32 last_count;
pkt_count = readl(droq->pkts_sent_reg);
if (pkt_count) {
atomic_add(pkt_count, &droq->pkts_pending);
writel(pkt_count, droq->pkts_sent_reg);
}
return pkt_count;
last_count = pkt_count - droq->pkt_count;
droq->pkt_count = pkt_count;
/* we shall write to cnts at napi irq enable or end of droq tasklet */
if (last_count)
atomic_add(last_count, &droq->pkts_pending);
return last_count;
}
static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
......@@ -735,16 +738,20 @@ octeon_droq_process_packets(struct octeon_device *oct,
u32 pkt_count = 0, pkts_processed = 0;
struct list_head *tmp, *tmp2;
/* Grab the droq lock */
spin_lock(&droq->lock);
octeon_droq_check_hw_for_pkts(droq);
pkt_count = atomic_read(&droq->pkts_pending);
if (!pkt_count)
if (!pkt_count) {
spin_unlock(&droq->lock);
return 0;
}
if (pkt_count > budget)
pkt_count = budget;
/* Grab the droq lock */
spin_lock(&droq->lock);
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
atomic_sub(pkts_processed, &droq->pkts_pending);
......@@ -789,6 +796,8 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
spin_lock(&droq->lock);
while (total_pkts_processed < budget) {
octeon_droq_check_hw_for_pkts(droq);
pkts_available =
CVM_MIN((budget - total_pkts_processed),
(u32)(atomic_read(&droq->pkts_pending)));
......@@ -803,8 +812,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
atomic_sub(pkts_processed, &droq->pkts_pending);
total_pkts_processed += pkts_processed;
octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
......
......@@ -261,6 +261,8 @@ struct octeon_droq {
u32 q_no;
u32 pkt_count;
struct octeon_droq_ops ops;
struct octeon_device *oct_dev;
......
......@@ -88,6 +88,8 @@ struct octeon_instr_queue {
/** A spinlock to protect while posting on the ring. */
spinlock_t post_lock;
u32 pkt_in_done;
/** A spinlock to protect access to the input ring.*/
spinlock_t iq_flush_running_lock;
......
......@@ -499,6 +499,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
if (!oct)
return;
iq = oct->instr_queue[iq_no];
if (!iq)
return;
......@@ -514,6 +515,8 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
/* Flush the instruction queue */
octeon_flush_iq(oct, iq, 1, 0);
lio_enable_irq(NULL, iq);
}
/* Called by the Poll thread at regular intervals to check the instruction
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment