Commit 9355fb6a authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier

IB/ipath: Add support for 7220 receive queue changes

Newer HCAs have a HW option to write a sequence number to each receive
queue entry and avoid a separate DMA of the tail register to memory.
This patch adds support for these changes.
Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 2ba3f56e
...@@ -198,7 +198,7 @@ typedef enum _ipath_ureg { ...@@ -198,7 +198,7 @@ typedef enum _ipath_ureg {
#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4 #define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
#define IPATH_RUNTIME_RCVHDR_COPY 0x8 #define IPATH_RUNTIME_RCVHDR_COPY 0x8
#define IPATH_RUNTIME_MASTER 0x10 #define IPATH_RUNTIME_MASTER 0x10
/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */ #define IPATH_RUNTIME_NODMA_RTAIL 0x80
#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400 #define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800 #define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
...@@ -662,8 +662,12 @@ struct infinipath_counters { ...@@ -662,8 +662,12 @@ struct infinipath_counters {
#define INFINIPATH_RHF_LENGTH_SHIFT 0 #define INFINIPATH_RHF_LENGTH_SHIFT 0
#define INFINIPATH_RHF_RCVTYPE_MASK 0x7 #define INFINIPATH_RHF_RCVTYPE_MASK 0x7
#define INFINIPATH_RHF_RCVTYPE_SHIFT 11 #define INFINIPATH_RHF_RCVTYPE_SHIFT 11
#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF #define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
#define INFINIPATH_RHF_EGRINDEX_SHIFT 16 #define INFINIPATH_RHF_EGRINDEX_SHIFT 16
#define INFINIPATH_RHF_SEQ_MASK 0xF
#define INFINIPATH_RHF_SEQ_SHIFT 0
#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
#define INFINIPATH_RHF_H_ICRCERR 0x80000000 #define INFINIPATH_RHF_H_ICRCERR 0x80000000
#define INFINIPATH_RHF_H_VCRCERR 0x40000000 #define INFINIPATH_RHF_H_VCRCERR 0x40000000
#define INFINIPATH_RHF_H_PARITYERR 0x20000000 #define INFINIPATH_RHF_H_PARITYERR 0x20000000
...@@ -673,6 +677,8 @@ struct infinipath_counters { ...@@ -673,6 +677,8 @@ struct infinipath_counters {
#define INFINIPATH_RHF_H_TIDERR 0x02000000 #define INFINIPATH_RHF_H_TIDERR 0x02000000
#define INFINIPATH_RHF_H_MKERR 0x01000000 #define INFINIPATH_RHF_H_MKERR 0x01000000
#define INFINIPATH_RHF_H_IBERR 0x00800000 #define INFINIPATH_RHF_H_IBERR 0x00800000
#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
#define INFINIPATH_RHF_L_USE_EGR 0x80000000
#define INFINIPATH_RHF_L_SWA 0x00008000 #define INFINIPATH_RHF_L_SWA 0x00008000
#define INFINIPATH_RHF_L_SWB 0x00004000 #define INFINIPATH_RHF_L_SWB 0x00004000
...@@ -696,6 +702,7 @@ struct infinipath_counters { ...@@ -696,6 +702,7 @@ struct infinipath_counters {
/* SendPIO per-buffer control */ /* SendPIO per-buffer control */
#define INFINIPATH_SP_TEST 0x40 #define INFINIPATH_SP_TEST 0x40
#define INFINIPATH_SP_TESTEBP 0x20 #define INFINIPATH_SP_TESTEBP 0x20
#define INFINIPATH_SP_TRIGGER_SHIFT 15
/* SendPIOAvail bits */ /* SendPIOAvail bits */
#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1 #define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
...@@ -762,6 +769,7 @@ struct ether_header { ...@@ -762,6 +769,7 @@ struct ether_header {
#define IPATH_MSN_MASK 0xFFFFFF #define IPATH_MSN_MASK 0xFFFFFF
#define IPATH_QPN_MASK 0xFFFFFF #define IPATH_QPN_MASK 0xFFFFFF
#define IPATH_MULTICAST_LID_BASE 0xC000 #define IPATH_MULTICAST_LID_BASE 0xC000
#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
#define IPATH_MULTICAST_QPN 0xFFFFFF #define IPATH_MULTICAST_QPN 0xFFFFFF
/* Receive Header Queue: receive type (from infinipath) */ /* Receive Header Queue: receive type (from infinipath) */
...@@ -781,7 +789,7 @@ struct ether_header { ...@@ -781,7 +789,7 @@ struct ether_header {
*/ */
static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf) static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
{ {
return __le32_to_cpu(rbuf[1]); return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
} }
static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf) static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
...@@ -802,6 +810,23 @@ static inline __u32 ipath_hdrget_index(const __le32 * rbuf) ...@@ -802,6 +810,23 @@ static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
& INFINIPATH_RHF_EGRINDEX_MASK; & INFINIPATH_RHF_EGRINDEX_MASK;
} }
static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
{
return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
& INFINIPATH_RHF_SEQ_MASK;
}
static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
{
return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
& INFINIPATH_RHF_HDRQ_OFFSET_MASK;
}
static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
{
return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
}
static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword) static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
{ {
return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT) return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
......
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include "ipath_kernel.h" #include "ipath_kernel.h"
#include "ipath_verbs.h" #include "ipath_verbs.h"
#include "ipath_common.h"
static void ipath_update_pio_bufs(struct ipath_devdata *); static void ipath_update_pio_bufs(struct ipath_devdata *);
...@@ -720,6 +719,8 @@ static void __devexit cleanup_device(struct ipath_devdata *dd) ...@@ -720,6 +719,8 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
tmpp = dd->ipath_pageshadow; tmpp = dd->ipath_pageshadow;
dd->ipath_pageshadow = NULL; dd->ipath_pageshadow = NULL;
vfree(tmpp); vfree(tmpp);
dd->ipath_egrtidbase = NULL;
} }
/* /*
...@@ -1078,18 +1079,17 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd, ...@@ -1078,18 +1079,17 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
u32 eflags, u32 eflags,
u32 l, u32 l,
u32 etail, u32 etail,
u64 *rc) __le32 *rhf_addr,
struct ipath_message_header *hdr)
{ {
char emsg[128]; char emsg[128];
struct ipath_message_header *hdr;
get_rhf_errstring(eflags, emsg, sizeof emsg); get_rhf_errstring(eflags, emsg, sizeof emsg);
hdr = (struct ipath_message_header *)&rc[1];
ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u " ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
"tlen=%x opcode=%x egridx=%x: %s\n", "tlen=%x opcode=%x egridx=%x: %s\n",
eflags, l, eflags, l,
ipath_hdrget_rcv_type((__le32 *) rc), ipath_hdrget_rcv_type(rhf_addr),
ipath_hdrget_length_in_bytes((__le32 *) rc), ipath_hdrget_length_in_bytes(rhf_addr),
be32_to_cpu(hdr->bth[0]) >> 24, be32_to_cpu(hdr->bth[0]) >> 24,
etail, emsg); etail, emsg);
...@@ -1114,55 +1114,52 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd, ...@@ -1114,55 +1114,52 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
*/ */
void ipath_kreceive(struct ipath_portdata *pd) void ipath_kreceive(struct ipath_portdata *pd)
{ {
u64 *rc;
struct ipath_devdata *dd = pd->port_dd; struct ipath_devdata *dd = pd->port_dd;
__le32 *rhf_addr;
void *ebuf; void *ebuf;
const u32 rsize = dd->ipath_rcvhdrentsize; /* words */ const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
u32 etail = -1, l, hdrqtail; u32 etail = -1, l, hdrqtail;
struct ipath_message_header *hdr; struct ipath_message_header *hdr;
u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0; u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
static u64 totcalls; /* stats, may eventually remove */ static u64 totcalls; /* stats, may eventually remove */
int last;
if (!dd->ipath_hdrqtailptr) {
ipath_dev_err(dd,
"hdrqtailptr not set, can't do receives\n");
goto bail;
}
l = pd->port_head; l = pd->port_head;
rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
u32 seq = ipath_hdrget_seq(rhf_addr);
if (seq != pd->port_seq_cnt)
goto bail;
hdrqtail = 0;
} else {
hdrqtail = ipath_get_rcvhdrtail(pd); hdrqtail = ipath_get_rcvhdrtail(pd);
if (l == hdrqtail) if (l == hdrqtail)
goto bail; goto bail;
smp_rmb();
}
reloop: reloop:
for (i = 0; l != hdrqtail; i++) { for (last = 0, i = 1; !last; i++) {
u32 qp; hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
u8 *bthbytes; eflags = ipath_hdrget_err_flags(rhf_addr);
etype = ipath_hdrget_rcv_type(rhf_addr);
rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
hdr = (struct ipath_message_header *)&rc[1];
/*
* could make a network order version of IPATH_KD_QP, and
* do the obvious shift before masking to speed this up.
*/
qp = ntohl(hdr->bth[1]) & 0xffffff;
bthbytes = (u8 *) hdr->bth;
eflags = ipath_hdrget_err_flags((__le32 *) rc);
etype = ipath_hdrget_rcv_type((__le32 *) rc);
/* total length */ /* total length */
tlen = ipath_hdrget_length_in_bytes((__le32 *) rc); tlen = ipath_hdrget_length_in_bytes(rhf_addr);
ebuf = NULL; ebuf = NULL;
if (etype != RCVHQ_RCV_TYPE_EXPECTED) { if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
ipath_hdrget_use_egr_buf(rhf_addr) :
(etype != RCVHQ_RCV_TYPE_EXPECTED)) {
/* /*
* it turns out that the chips uses an eager buffer * It turns out that the chip uses an eager buffer
* for all non-expected packets, whether it "needs" * for all non-expected packets, whether it "needs"
* one or not. So always get the index, but don't * one or not. So always get the index, but don't
* set ebuf (so we try to copy data) unless the * set ebuf (so we try to copy data) unless the
* length requires it. * length requires it.
*/ */
etail = ipath_hdrget_index((__le32 *) rc); etail = ipath_hdrget_index(rhf_addr);
updegr = 1;
if (tlen > sizeof(*hdr) || if (tlen > sizeof(*hdr) ||
etype == RCVHQ_RCV_TYPE_NON_KD) etype == RCVHQ_RCV_TYPE_NON_KD)
ebuf = ipath_get_egrbuf(dd, etail); ebuf = ipath_get_egrbuf(dd, etail);
...@@ -1173,75 +1170,91 @@ void ipath_kreceive(struct ipath_portdata *pd) ...@@ -1173,75 +1170,91 @@ void ipath_kreceive(struct ipath_portdata *pd)
* packets; only ipathhdrerr should be set. * packets; only ipathhdrerr should be set.
*/ */
if (etype != RCVHQ_RCV_TYPE_NON_KD && etype != if (etype != RCVHQ_RCV_TYPE_NON_KD &&
RCVHQ_RCV_TYPE_ERROR && ipath_hdrget_ipath_ver( etype != RCVHQ_RCV_TYPE_ERROR &&
hdr->iph.ver_port_tid_offset) != ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
IPS_PROTO_VERSION) { IPS_PROTO_VERSION)
ipath_cdbg(PKT, "Bad InfiniPath protocol version " ipath_cdbg(PKT, "Bad InfiniPath protocol version "
"%x\n", etype); "%x\n", etype);
}
if (unlikely(eflags)) if (unlikely(eflags))
ipath_rcv_hdrerr(dd, eflags, l, etail, rc); ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
else if (etype == RCVHQ_RCV_TYPE_NON_KD) { else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen); ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
if (dd->ipath_lli_counter) if (dd->ipath_lli_counter)
dd->ipath_lli_counter--; dd->ipath_lli_counter--;
} else if (etype == RCVHQ_RCV_TYPE_EAGER) {
u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
ipath_cdbg(PKT, "typ %x, opcode %x (eager, " ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
"qp=%x), len %x; ignored\n", "qp=%x), len %x; ignored\n",
etype, bthbytes[0], qp, tlen); etype, opcode, qp, tlen);
} }
else if (etype == RCVHQ_RCV_TYPE_EAGER)
ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
"qp=%x), len %x; ignored\n",
etype, bthbytes[0], qp, tlen);
else if (etype == RCVHQ_RCV_TYPE_EXPECTED) else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
be32_to_cpu(hdr->bth[0]) & 0xff); be32_to_cpu(hdr->bth[0]) >> 24);
else { else {
/* /*
* error packet, type of error unknown. * error packet, type of error unknown.
* Probably type 3, but we don't know, so don't * Probably type 3, but we don't know, so don't
* even try to print the opcode, etc. * even try to print the opcode, etc.
* Usually caused by a "bad packet", that has no
* BTH, when the LRH says it should.
*/ */
ipath_dbg("Error Pkt, but no eflags! egrbuf %x, " ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
"len %x\nhdrq@%lx;hdrq+%x rhf: %llx; " " %x, len %x hdrq+%x rhf: %Lx\n",
"hdr %llx %llx %llx %llx %llx\n", etail, tlen, l,
etail, tlen, (unsigned long) rc, l, le64_to_cpu(*(__le64 *) rhf_addr));
(unsigned long long) rc[0], if (ipath_debug & __IPATH_ERRPKTDBG) {
(unsigned long long) rc[1], u32 j, *d, dw = rsize-2;
(unsigned long long) rc[2], if (rsize > (tlen>>2))
(unsigned long long) rc[3], dw = tlen>>2;
(unsigned long long) rc[4], d = (u32 *)hdr;
(unsigned long long) rc[5]); printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
dw);
for (j = 0; j < dw; j++)
printk(KERN_DEBUG "%8x%s", d[j],
(j%8) == 7 ? "\n" : " ");
printk(KERN_DEBUG ".\n");
}
} }
l += rsize; l += rsize;
if (l >= maxcnt) if (l >= maxcnt)
l = 0; l = 0;
if (etype != RCVHQ_RCV_TYPE_EXPECTED) rhf_addr = (__le32 *) pd->port_rcvhdrq +
updegr = 1; l + dd->ipath_rhf_offset;
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
u32 seq = ipath_hdrget_seq(rhf_addr);
if (++pd->port_seq_cnt > 13)
pd->port_seq_cnt = 1;
if (seq != pd->port_seq_cnt)
last = 1;
} else if (l == hdrqtail)
last = 1;
/* /*
* update head regs on last packet, and every 16 packets. * update head regs on last packet, and every 16 packets.
* Reduce bus traffic, while still trying to prevent * Reduce bus traffic, while still trying to prevent
* rcvhdrq overflows, for when the queue is nearly full * rcvhdrq overflows, for when the queue is nearly full
*/ */
if (l == hdrqtail || (i && !(i&0xf))) { if (last || !(i & 0xf)) {
u64 lval; u64 lval = l;
if (l == hdrqtail)
/* request IBA6120 interrupt only on last */ /* request IBA6120 and 7220 interrupt only on last */
lval = dd->ipath_rhdrhead_intr_off | l; if (last)
else lval |= dd->ipath_rhdrhead_intr_off;
lval = l; ipath_write_ureg(dd, ur_rcvhdrhead, lval,
ipath_write_ureg(dd, ur_rcvhdrhead, lval, 0); pd->port_port);
if (updegr) { if (updegr) {
ipath_write_ureg(dd, ur_rcvegrindexhead, ipath_write_ureg(dd, ur_rcvegrindexhead,
etail, 0); etail, pd->port_port);
updegr = 0; updegr = 0;
} }
} }
} }
if (!dd->ipath_rhdrhead_intr_off && !reloop) { if (!dd->ipath_rhdrhead_intr_off && !reloop &&
!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
/* IBA6110 workaround; we can have a race clearing chip /* IBA6110 workaround; we can have a race clearing chip
* interrupt with another interrupt about to be delivered, * interrupt with another interrupt about to be delivered,
* and can clear it before it is delivered on the GPIO * and can clear it before it is delivered on the GPIO
...@@ -1638,19 +1651,27 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, ...@@ -1638,19 +1651,27 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
ret = -ENOMEM; ret = -ENOMEM;
goto bail; goto bail;
} }
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent( pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
&dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, GFP_KERNEL); &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
GFP_KERNEL);
if (!pd->port_rcvhdrtail_kvaddr) { if (!pd->port_rcvhdrtail_kvaddr) {
ipath_dev_err(dd, "attempt to allocate 1 page " ipath_dev_err(dd, "attempt to allocate 1 page "
"for port %u rcvhdrqtailaddr failed\n", "for port %u rcvhdrqtailaddr "
pd->port_port); "failed\n", pd->port_port);
ret = -ENOMEM; ret = -ENOMEM;
dma_free_coherent(&dd->pcidev->dev, amt, dma_free_coherent(&dd->pcidev->dev, amt,
pd->port_rcvhdrq, pd->port_rcvhdrq_phys); pd->port_rcvhdrq,
pd->port_rcvhdrq_phys);
pd->port_rcvhdrq = NULL; pd->port_rcvhdrq = NULL;
goto bail; goto bail;
} }
pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail; pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
"physical\n", pd->port_port,
(unsigned long long) phys_hdrqtail);
}
pd->port_rcvhdrq_size = amt; pd->port_rcvhdrq_size = amt;
...@@ -1660,10 +1681,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, ...@@ -1660,10 +1681,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
(unsigned long) pd->port_rcvhdrq_phys, (unsigned long) pd->port_rcvhdrq_phys,
(unsigned long) pd->port_rcvhdrq_size, (unsigned long) pd->port_rcvhdrq_size,
pd->port_port); pd->port_port);
ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx physical\n",
pd->port_port,
(unsigned long long) phys_hdrqtail);
} }
else else
ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; " ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
...@@ -1687,7 +1704,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd, ...@@ -1687,7 +1704,6 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
pd->port_port, pd->port_rcvhdrq_phys); pd->port_port, pd->port_rcvhdrq_phys);
ret = 0;
bail: bail:
return ret; return ret;
} }
...@@ -2222,7 +2238,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd) ...@@ -2222,7 +2238,7 @@ void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
ipath_cdbg(VERBOSE, "free closed port %d " ipath_cdbg(VERBOSE, "free closed port %d "
"ipath_port0_skbinfo @ %p\n", pd->port_port, "ipath_port0_skbinfo @ %p\n", pd->port_port,
skbinfo); skbinfo);
for (e = 0; e < dd->ipath_rcvegrcnt; e++) for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
if (skbinfo[e].skb) { if (skbinfo[e].skb) {
pci_unmap_single(dd->pcidev, skbinfo[e].phys, pci_unmap_single(dd->pcidev, skbinfo[e].phys,
dd->ipath_ibmaxlen, dd->ipath_ibmaxlen,
......
...@@ -1930,22 +1930,25 @@ static int ipath_do_user_init(struct file *fp, ...@@ -1930,22 +1930,25 @@ static int ipath_do_user_init(struct file *fp,
pd->port_hdrqfull_poll = pd->port_hdrqfull; pd->port_hdrqfull_poll = pd->port_hdrqfull;
/* /*
* now enable the port; the tail registers will be written to memory * Now enable the port for receive.
* by the chip as soon as it sees the write to * For chips that are set to DMA the tail register to memory
* dd->ipath_kregs->kr_rcvctrl. The update only happens on * when they change (and when the update bit transitions from
* transition from 0 to 1, so clear it first, then set it as part of * 0 to 1. So for those chips, we turn it off and then back on.
* enabling the port. This will (very briefly) affect any other * This will (very briefly) affect any other open ports, but the
* open ports, but it shouldn't be long enough to be an issue. * duration is very short, and therefore isn't an issue. We
* We explictly set the in-memory copy to 0 beforehand, so we don't * explictly set the in-memory tail copy to 0 beforehand, so we
* have to wait to be sure the DMA update has happened. * don't have to wait to be sure the DMA update has happened
* (chip resets head/tail to 0 on transition to enable).
*/ */
if (pd->port_rcvhdrtail_kvaddr)
ipath_clear_rcvhdrtail(pd);
set_bit(dd->ipath_r_portenable_shift + pd->port_port, set_bit(dd->ipath_r_portenable_shift + pd->port_port,
&dd->ipath_rcvctrl); &dd->ipath_rcvctrl);
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
if (pd->port_rcvhdrtail_kvaddr)
ipath_clear_rcvhdrtail(pd);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl & dd->ipath_rcvctrl &
~(1ULL << dd->ipath_r_tailupd_shift)); ~(1ULL << dd->ipath_r_tailupd_shift));
}
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl); dd->ipath_rcvctrl);
/* Notify any waiting slaves */ /* Notify any waiting slaves */
...@@ -1973,14 +1976,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd) ...@@ -1973,14 +1976,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
pd->port_port); pd->port_port);
for (i = port_tidbase; i < maxtid; i++) { for (i = port_tidbase; i < maxtid; i++) {
if (!dd->ipath_pageshadow[i]) struct page *ps = dd->ipath_pageshadow[i];
if (!ps)
continue; continue;
dd->ipath_pageshadow[i] = NULL;
pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
PAGE_SIZE, PCI_DMA_FROMDEVICE); PAGE_SIZE, PCI_DMA_FROMDEVICE);
ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i], ipath_release_user_pages_on_close(&ps, 1);
1);
dd->ipath_pageshadow[i] = NULL;
cnt++; cnt++;
ipath_stats.sps_pageunlocks++; ipath_stats.sps_pageunlocks++;
} }
......
...@@ -306,7 +306,9 @@ static const struct ipath_cregs ipath_ht_cregs = { ...@@ -306,7 +306,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
/* kr_intstatus, kr_intclear, kr_intmask bits */ /* kr_intstatus, kr_intclear, kr_intmask bits */
#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1) #define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
#define INFINIPATH_I_RCVURG_SHIFT 0
#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1) #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0 #define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
......
...@@ -316,7 +316,9 @@ static const struct ipath_cregs ipath_pe_cregs = { ...@@ -316,7 +316,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
/* kr_intstatus, kr_intclear, kr_intmask bits */ /* kr_intstatus, kr_intclear, kr_intmask bits */
#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1) #define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
#define INFINIPATH_I_RCVURG_SHIFT 0
#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1) #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL #define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
......
...@@ -695,8 +695,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -695,8 +695,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
struct ipath_portdata *pd = dd->ipath_pd[i]; struct ipath_portdata *pd = dd->ipath_pd[i];
if (i == 0) { if (i == 0) {
hd = pd->port_head; hd = pd->port_head;
tl = (u32) le64_to_cpu( tl = ipath_get_hdrqtail(pd);
*dd->ipath_hdrqtailptr);
} else if (pd && pd->port_cnt && } else if (pd && pd->port_cnt &&
pd->port_rcvhdrtail_kvaddr) { pd->port_rcvhdrtail_kvaddr) {
/* /*
...@@ -732,8 +731,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) ...@@ -732,8 +731,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* vs user) * vs user)
*/ */
ipath_stats.sps_etidfull++; ipath_stats.sps_etidfull++;
if (pd->port_head != if (pd->port_head != ipath_get_hdrqtail(pd))
(u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
chkerrpkts = 1; chkerrpkts = 1;
} }
...@@ -952,7 +950,7 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) ...@@ -952,7 +950,7 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
* process was waiting for a packet to arrive, and didn't want * process was waiting for a packet to arrive, and didn't want
* to poll * to poll
*/ */
static void handle_urcv(struct ipath_devdata *dd, u32 istat) static void handle_urcv(struct ipath_devdata *dd, u64 istat)
{ {
u64 portr; u64 portr;
int i; int i;
...@@ -968,9 +966,9 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) ...@@ -968,9 +966,9 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
* and ipath_poll_next()... * and ipath_poll_next()...
*/ */
rmb(); rmb();
portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & portr = ((istat >> dd->ipath_i_rcvavail_shift) &
dd->ipath_i_rcvavail_mask) dd->ipath_i_rcvavail_mask) |
| ((istat >> INFINIPATH_I_RCVURG_SHIFT) & ((istat >> dd->ipath_i_rcvurg_shift) &
dd->ipath_i_rcvurg_mask); dd->ipath_i_rcvurg_mask);
for (i = 1; i < dd->ipath_cfgports; i++) { for (i = 1; i < dd->ipath_cfgports; i++) {
struct ipath_portdata *pd = dd->ipath_pd[i]; struct ipath_portdata *pd = dd->ipath_pd[i];
...@@ -991,7 +989,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat) ...@@ -991,7 +989,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
} }
if (rcvdint) { if (rcvdint) {
/* only want to take one interrupt, so turn off the rcv /* only want to take one interrupt, so turn off the rcv
* interrupt for all the ports that we did the wakeup on * interrupt for all the ports that we set the rcv_waiting
* (but never for kernel port) * (but never for kernel port)
*/ */
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
...@@ -1006,8 +1004,7 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1006,8 +1004,7 @@ irqreturn_t ipath_intr(int irq, void *data)
ipath_err_t estat = 0; ipath_err_t estat = 0;
irqreturn_t ret; irqreturn_t ret;
static unsigned unexpected = 0; static unsigned unexpected = 0;
static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) | u64 kportrbits;
(1U<<INFINIPATH_I_RCVURG_SHIFT);
ipath_stats.sps_ints++; ipath_stats.sps_ints++;
...@@ -1076,9 +1073,7 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1076,9 +1073,7 @@ irqreturn_t ipath_intr(int irq, void *data)
ipath_dev_err(dd, "Read of error status failed " ipath_dev_err(dd, "Read of error status failed "
"(all bits set); ignoring\n"); "(all bits set); ignoring\n");
else else
if (handle_errors(dd, estat)) chk0rcv |= handle_errors(dd, estat);
/* force calling ipath_kreceive() */
chk0rcv = 1;
} }
if (istat & INFINIPATH_I_GPIO) { if (istat & INFINIPATH_I_GPIO) {
...@@ -1158,7 +1153,6 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1158,7 +1153,6 @@ irqreturn_t ipath_intr(int irq, void *data)
(u64) to_clear); (u64) to_clear);
} }
} }
chk0rcv |= istat & port0rbits;
/* /*
* Clear the interrupt bits we found set, unless they are receive * Clear the interrupt bits we found set, unless they are receive
...@@ -1171,20 +1165,20 @@ irqreturn_t ipath_intr(int irq, void *data) ...@@ -1171,20 +1165,20 @@ irqreturn_t ipath_intr(int irq, void *data)
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat); ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
/* /*
* handle port0 receive before checking for pio buffers available, * Handle kernel receive queues before checking for pio buffers
* since receives can overflow; piobuf waiters can afford a few * available since receives can overflow; piobuf waiters can afford
* extra cycles, since they were waiting anyway, and user's waiting * a few extra cycles, since they were waiting anyway, and user's
* for receive are at the bottom. * waiting for receive are at the bottom.
*/ */
if (chk0rcv) { kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
(1ULL << dd->ipath_i_rcvurg_shift);
if (chk0rcv || (istat & kportrbits)) {
istat &= ~kportrbits;
ipath_kreceive(dd->ipath_pd[0]); ipath_kreceive(dd->ipath_pd[0]);
istat &= ~port0rbits;
} }
if (istat & ((dd->ipath_i_rcvavail_mask << if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
INFINIPATH_I_RCVAVAIL_SHIFT) (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
| (dd->ipath_i_rcvurg_mask <<
INFINIPATH_I_RCVURG_SHIFT)))
handle_urcv(dd, istat); handle_urcv(dd, istat);
if (istat & INFINIPATH_I_SPIOBUFAVAIL) { if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
......
...@@ -175,6 +175,8 @@ struct ipath_portdata { ...@@ -175,6 +175,8 @@ struct ipath_portdata {
u16 poll_type; u16 poll_type;
/* port rcvhdrq head offset */ /* port rcvhdrq head offset */
u32 port_head; u32 port_head;
/* receive packet sequence counter */
u32 port_seq_cnt;
}; };
struct sk_buff; struct sk_buff;
...@@ -224,11 +226,6 @@ struct ipath_devdata { ...@@ -224,11 +226,6 @@ struct ipath_devdata {
unsigned long ipath_physaddr; unsigned long ipath_physaddr;
/* base of memory alloced for ipath_kregbase, for free */ /* base of memory alloced for ipath_kregbase, for free */
u64 *ipath_kregalloc; u64 *ipath_kregalloc;
/*
* virtual address where port0 rcvhdrqtail updated for this unit.
* only written to by the chip, not the driver.
*/
volatile __le64 *ipath_hdrqtailptr;
/* ipath_cfgports pointers */ /* ipath_cfgports pointers */
struct ipath_portdata **ipath_pd; struct ipath_portdata **ipath_pd;
/* sk_buffs used by port 0 eager receive queue */ /* sk_buffs used by port 0 eager receive queue */
...@@ -286,6 +283,7 @@ struct ipath_devdata { ...@@ -286,6 +283,7 @@ struct ipath_devdata {
/* per chip actions needed for IB Link up/down changes */ /* per chip actions needed for IB Link up/down changes */
int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64); int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
unsigned ipath_lastegr_idx;
struct ipath_ibdev *verbs_dev; struct ipath_ibdev *verbs_dev;
struct timer_list verbs_timer; struct timer_list verbs_timer;
/* total dwords sent (summed from counter) */ /* total dwords sent (summed from counter) */
...@@ -593,14 +591,6 @@ struct ipath_devdata { ...@@ -593,14 +591,6 @@ struct ipath_devdata {
u8 ipath_minrev; u8 ipath_minrev;
/* board rev, from ipath_revision */ /* board rev, from ipath_revision */
u8 ipath_boardrev; u8 ipath_boardrev;
u8 ipath_r_portenable_shift;
u8 ipath_r_intravail_shift;
u8 ipath_r_tailupd_shift;
u8 ipath_r_portcfg_shift;
/* unit # of this chip, if present */
int ipath_unit;
/* saved for restore after reset */ /* saved for restore after reset */
u8 ipath_pci_cacheline; u8 ipath_pci_cacheline;
/* LID mask control */ /* LID mask control */
...@@ -616,6 +606,14 @@ struct ipath_devdata { ...@@ -616,6 +606,14 @@ struct ipath_devdata {
/* Rx Polarity inversion (compensate for ~tx on partner) */ /* Rx Polarity inversion (compensate for ~tx on partner) */
u8 ipath_rx_pol_inv; u8 ipath_rx_pol_inv;
u8 ipath_r_portenable_shift;
u8 ipath_r_intravail_shift;
u8 ipath_r_tailupd_shift;
u8 ipath_r_portcfg_shift;
/* unit # of this chip, if present */
int ipath_unit;
/* local link integrity counter */ /* local link integrity counter */
u32 ipath_lli_counter; u32 ipath_lli_counter;
/* local link integrity errors */ /* local link integrity errors */
...@@ -645,8 +643,8 @@ struct ipath_devdata { ...@@ -645,8 +643,8 @@ struct ipath_devdata {
* Below should be computable from number of ports, * Below should be computable from number of ports,
* since they are never modified. * since they are never modified.
*/ */
u32 ipath_i_rcvavail_mask; u64 ipath_i_rcvavail_mask;
u32 ipath_i_rcvurg_mask; u64 ipath_i_rcvurg_mask;
u16 ipath_i_rcvurg_shift; u16 ipath_i_rcvurg_shift;
u16 ipath_i_rcvavail_shift; u16 ipath_i_rcvavail_shift;
...@@ -835,6 +833,8 @@ void ipath_hol_event(unsigned long); ...@@ -835,6 +833,8 @@ void ipath_hol_event(unsigned long);
#define IPATH_LINKUNK 0x400 #define IPATH_LINKUNK 0x400
/* Write combining flush needed for PIO */ /* Write combining flush needed for PIO */
#define IPATH_PIO_FLUSH_WC 0x1000 #define IPATH_PIO_FLUSH_WC 0x1000
/* DMA Receive tail pointer */
#define IPATH_NODMA_RTAIL 0x2000
/* no IB cable, or no device on IB cable */ /* no IB cable, or no device on IB cable */
#define IPATH_NOCABLE 0x4000 #define IPATH_NOCABLE 0x4000
/* Supports port zero per packet receive interrupts via /* Supports port zero per packet receive interrupts via
...@@ -845,9 +845,9 @@ void ipath_hol_event(unsigned long); ...@@ -845,9 +845,9 @@ void ipath_hol_event(unsigned long);
/* packet/word counters are 32 bit, else those 4 counters /* packet/word counters are 32 bit, else those 4 counters
* are 64bit */ * are 64bit */
#define IPATH_32BITCOUNTERS 0x20000 #define IPATH_32BITCOUNTERS 0x20000
/* can miss port0 rx interrupts */
/* Interrupt register is 64 bits */ /* Interrupt register is 64 bits */
#define IPATH_INTREG_64 0x40000 #define IPATH_INTREG_64 0x40000
/* can miss port0 rx interrupts */
#define IPATH_DISABLED 0x80000 /* administratively disabled */ #define IPATH_DISABLED 0x80000 /* administratively disabled */
/* Use GPIO interrupts for new counters */ /* Use GPIO interrupts for new counters */
#define IPATH_GPIO_ERRINTRS 0x100000 #define IPATH_GPIO_ERRINTRS 0x100000
...@@ -1035,6 +1035,27 @@ static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd) ...@@ -1035,6 +1035,27 @@ static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
pd->port_rcvhdrtail_kvaddr)); pd->port_rcvhdrtail_kvaddr));
} }
static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
{
const struct ipath_devdata *dd = pd->port_dd;
u32 hdrqtail;
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
__le32 *rhf_addr;
u32 seq;
rhf_addr = (__le32 *) pd->port_rcvhdrq +
pd->port_head + dd->ipath_rhf_offset;
seq = ipath_hdrget_seq(rhf_addr);
hdrqtail = pd->port_head;
if (seq == pd->port_seq_cnt)
hdrqtail++;
} else
hdrqtail = ipath_get_rcvhdrtail(pd);
return hdrqtail;
}
static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r) static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
{ {
return (dd->ipath_flags & IPATH_INTREG_64) ? return (dd->ipath_flags & IPATH_INTREG_64) ?
......
...@@ -86,8 +86,6 @@ ...@@ -86,8 +86,6 @@
#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38) #define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
/* kr_intstatus, kr_intclear, kr_intmask bits */ /* kr_intstatus, kr_intclear, kr_intmask bits */
#define INFINIPATH_I_RCVURG_SHIFT 0
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
#define INFINIPATH_I_ERROR 0x80000000 #define INFINIPATH_I_ERROR 0x80000000
#define INFINIPATH_I_SPIOSENT 0x40000000 #define INFINIPATH_I_SPIOSENT 0x40000000
#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000 #define INFINIPATH_I_SPIOBUFAVAIL 0x20000000
......
...@@ -136,6 +136,7 @@ static void ipath_qcheck(struct ipath_devdata *dd) ...@@ -136,6 +136,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
struct ipath_portdata *pd = dd->ipath_pd[0]; struct ipath_portdata *pd = dd->ipath_pd[0];
size_t blen = 0; size_t blen = 0;
char buf[128]; char buf[128];
u32 hdrqtail;
*buf = 0; *buf = 0;
if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) { if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
...@@ -174,17 +175,18 @@ static void ipath_qcheck(struct ipath_devdata *dd) ...@@ -174,17 +175,18 @@ static void ipath_qcheck(struct ipath_devdata *dd)
if (blen) if (blen)
ipath_dbg("%s\n", buf); ipath_dbg("%s\n", buf);
if (pd->port_head != (u32) hdrqtail = ipath_get_hdrqtail(pd);
le64_to_cpu(*dd->ipath_hdrqtailptr)) { if (pd->port_head != hdrqtail) {
if (dd->ipath_lastport0rcv_cnt == if (dd->ipath_lastport0rcv_cnt ==
ipath_stats.sps_port0pkts) { ipath_stats.sps_port0pkts) {
ipath_cdbg(PKT, "missing rcv interrupts? " ipath_cdbg(PKT, "missing rcv interrupts? "
"port0 hd=%llx tl=%x; port0pkts %llx\n", "port0 hd=%x tl=%x; port0pkts %llx; write"
(unsigned long long) " hd (w/intr)\n",
le64_to_cpu(*dd->ipath_hdrqtailptr), pd->port_head, hdrqtail,
pd->port_head,
(unsigned long long) (unsigned long long)
ipath_stats.sps_port0pkts); ipath_stats.sps_port0pkts);
ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
dd->ipath_rhdrhead_intr_off, pd->port_port);
} }
dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts; dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment