Commit e467e104 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Fix world-writable child interface control sysfs attributes
  IB/qib: Clean up properly if qib_init() fails
  IB/qib: Completion queue callback needs to be single threaded
  IB/qib: Update 7322 serdes tables
  IB/qib: Clear 6120 hardware error register
  IB/qib: Clear eager buffer memory for each new process
  IB/qib: Mask hardware error during link reset
  IB/qib: Don't mark VL15 bufs as WC to avoid a rare 7322 chip problem
  RDMA/cxgb4: Derive smac_idx from port viid
  RDMA/cxgb4: Avoid false GTS CIDX_INC overflows
  RDMA/cxgb4: Don't call abort_connection() for active connect failures
  RDMA/cxgb4: Use the DMA state API instead of the pci equivalents
parents b9f39959 9e770044
...@@ -969,7 +969,8 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) ...@@ -969,7 +969,8 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
goto err; goto err;
goto out; goto out;
err: err:
abort_connection(ep, skb, GFP_KERNEL); state_set(&ep->com, ABORTING);
send_abort(ep, skb, GFP_KERNEL);
out: out:
connect_reply_upcall(ep, err); connect_reply_upcall(ep, err);
return; return;
...@@ -1372,7 +1373,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1372,7 +1373,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
pdev, 0); pdev, 0);
mtu = pdev->mtu; mtu = pdev->mtu;
tx_chan = cxgb4_port_chan(pdev); tx_chan = cxgb4_port_chan(pdev);
smac_idx = tx_chan << 1; smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(pdev) * step; txq_idx = cxgb4_port_idx(pdev) * step;
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
...@@ -1383,7 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ...@@ -1383,7 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
dst->neighbour->dev, 0); dst->neighbour->dev, 0);
mtu = dst_mtu(dst); mtu = dst_mtu(dst);
tx_chan = cxgb4_port_chan(dst->neighbour->dev); tx_chan = cxgb4_port_chan(dst->neighbour->dev);
smac_idx = tx_chan << 1; smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
...@@ -1950,7 +1951,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1950,7 +1951,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
pdev, 0); pdev, 0);
ep->mtu = pdev->mtu; ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev); ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = ep->tx_chan << 1; ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
step = ep->com.dev->rdev.lldi.ntxq / step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan; ep->com.dev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step; ep->txq_idx = cxgb4_port_idx(pdev) * step;
...@@ -1965,7 +1966,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ...@@ -1965,7 +1966,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->dst->neighbour->dev, 0); ep->dst->neighbour->dev, 0);
ep->mtu = dst_mtu(ep->dst); ep->mtu = dst_mtu(ep->dst);
ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
ep->smac_idx = ep->tx_chan << 1; ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
0x7F) << 1;
step = ep->com.dev->rdev.lldi.ntxq / step = ep->com.dev->rdev.lldi.ntxq /
ep->com.dev->rdev.lldi.nchan; ep->com.dev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
......
...@@ -77,7 +77,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -77,7 +77,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
kfree(cq->sw_queue); kfree(cq->sw_queue);
dma_free_coherent(&(rdev->lldi.pdev->dev), dma_free_coherent(&(rdev->lldi.pdev->dev),
cq->memsize, cq->queue, cq->memsize, cq->queue,
pci_unmap_addr(cq, mapping)); dma_unmap_addr(cq, mapping));
c4iw_put_cqid(rdev, cq->cqid, uctx); c4iw_put_cqid(rdev, cq->cqid, uctx);
return ret; return ret;
} }
...@@ -112,7 +112,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -112,7 +112,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
ret = -ENOMEM; ret = -ENOMEM;
goto err3; goto err3;
} }
pci_unmap_addr_set(cq, mapping, cq->dma_addr); dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, cq->memsize); memset(cq->queue, 0, cq->memsize);
/* build fw_ri_res_wr */ /* build fw_ri_res_wr */
...@@ -179,7 +179,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -179,7 +179,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
return 0; return 0;
err4: err4:
dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
pci_unmap_addr(cq, mapping)); dma_unmap_addr(cq, mapping));
err3: err3:
kfree(cq->sw_queue); kfree(cq->sw_queue);
err2: err2:
...@@ -764,7 +764,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, ...@@ -764,7 +764,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
struct c4iw_create_cq_resp uresp; struct c4iw_create_cq_resp uresp;
struct c4iw_ucontext *ucontext = NULL; struct c4iw_ucontext *ucontext = NULL;
int ret; int ret;
size_t memsize; size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2; struct c4iw_mm_entry *mm, *mm2;
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
...@@ -788,14 +788,29 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, ...@@ -788,14 +788,29 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
* entries must be multiple of 16 for HW. * entries must be multiple of 16 for HW.
*/ */
entries = roundup(entries, 16); entries = roundup(entries, 16);
memsize = entries * sizeof *chp->cq.queue;
/*
* Make actual HW queue 2x to avoid cdix_inc overflows.
*/
hwentries = entries * 2;
/*
* Make HW queue at least 64 entries so GTS updates aren't too
* frequent.
*/
if (hwentries < 64)
hwentries = 64;
memsize = hwentries * sizeof *chp->cq.queue;
/* /*
* memsize must be a multiple of the page size if its a user cq. * memsize must be a multiple of the page size if its a user cq.
*/ */
if (ucontext) if (ucontext) {
memsize = roundup(memsize, PAGE_SIZE); memsize = roundup(memsize, PAGE_SIZE);
chp->cq.size = entries; hwentries = memsize / sizeof *chp->cq.queue;
}
chp->cq.size = hwentries;
chp->cq.memsize = memsize; chp->cq.memsize = memsize;
ret = create_cq(&rhp->rdev, &chp->cq, ret = create_cq(&rhp->rdev, &chp->cq,
...@@ -805,7 +820,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, ...@@ -805,7 +820,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
chp->rhp = rhp; chp->rhp = rhp;
chp->cq.size--; /* status page */ chp->cq.size--; /* status page */
chp->ibcq.cqe = chp->cq.size - 1; chp->ibcq.cqe = entries - 2;
spin_lock_init(&chp->lock); spin_lock_init(&chp->lock);
atomic_set(&chp->refcnt, 1); atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait); init_waitqueue_head(&chp->wait);
......
...@@ -261,7 +261,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) ...@@ -261,7 +261,7 @@ static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
struct c4iw_fr_page_list { struct c4iw_fr_page_list {
struct ib_fast_reg_page_list ibpl; struct ib_fast_reg_page_list ibpl;
DECLARE_PCI_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_ADDR(mapping);
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct c4iw_dev *dev; struct c4iw_dev *dev;
int size; int size;
......
...@@ -764,7 +764,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, ...@@ -764,7 +764,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
if (!c4pl) if (!c4pl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
pci_unmap_addr_set(c4pl, mapping, dma_addr); dma_unmap_addr_set(c4pl, mapping, dma_addr);
c4pl->dma_addr = dma_addr; c4pl->dma_addr = dma_addr;
c4pl->dev = dev; c4pl->dev = dev;
c4pl->size = size; c4pl->size = size;
...@@ -779,7 +779,7 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) ...@@ -779,7 +779,7 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
c4pl, pci_unmap_addr(c4pl, mapping)); c4pl, dma_unmap_addr(c4pl, mapping));
} }
int c4iw_dereg_mr(struct ib_mr *ib_mr) int c4iw_dereg_mr(struct ib_mr *ib_mr)
......
...@@ -40,10 +40,10 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -40,10 +40,10 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
*/ */
dma_free_coherent(&(rdev->lldi.pdev->dev), dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, wq->rq.queue, wq->rq.memsize, wq->rq.queue,
pci_unmap_addr(&wq->rq, mapping)); dma_unmap_addr(&wq->rq, mapping));
dma_free_coherent(&(rdev->lldi.pdev->dev), dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->sq.memsize, wq->sq.queue, wq->sq.memsize, wq->sq.queue,
pci_unmap_addr(&wq->sq, mapping)); dma_unmap_addr(&wq->sq, mapping));
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
kfree(wq->rq.sw_rq); kfree(wq->rq.sw_rq);
kfree(wq->sq.sw_sq); kfree(wq->sq.sw_sq);
...@@ -99,7 +99,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -99,7 +99,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (!wq->sq.queue) if (!wq->sq.queue)
goto err5; goto err5;
memset(wq->sq.queue, 0, wq->sq.memsize); memset(wq->sq.queue, 0, wq->sq.memsize);
pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, &(wq->rq.dma_addr), wq->rq.memsize, &(wq->rq.dma_addr),
...@@ -112,7 +112,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -112,7 +112,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->rq.queue, wq->rq.queue,
(unsigned long long)virt_to_phys(wq->rq.queue)); (unsigned long long)virt_to_phys(wq->rq.queue));
memset(wq->rq.queue, 0, wq->rq.memsize); memset(wq->rq.queue, 0, wq->rq.memsize);
pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
wq->db = rdev->lldi.db_reg; wq->db = rdev->lldi.db_reg;
wq->gts = rdev->lldi.gts_reg; wq->gts = rdev->lldi.gts_reg;
...@@ -217,11 +217,11 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -217,11 +217,11 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
err7: err7:
dma_free_coherent(&(rdev->lldi.pdev->dev), dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->rq.memsize, wq->rq.queue, wq->rq.memsize, wq->rq.queue,
pci_unmap_addr(&wq->rq, mapping)); dma_unmap_addr(&wq->rq, mapping));
err6: err6:
dma_free_coherent(&(rdev->lldi.pdev->dev), dma_free_coherent(&(rdev->lldi.pdev->dev),
wq->sq.memsize, wq->sq.queue, wq->sq.memsize, wq->sq.queue,
pci_unmap_addr(&wq->sq, mapping)); dma_unmap_addr(&wq->sq, mapping));
err5: err5:
c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4: err4:
......
...@@ -279,7 +279,7 @@ struct t4_swsqe { ...@@ -279,7 +279,7 @@ struct t4_swsqe {
struct t4_sq { struct t4_sq {
union t4_wr *queue; union t4_wr *queue;
dma_addr_t dma_addr; dma_addr_t dma_addr;
DECLARE_PCI_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swsqe *sw_sq; struct t4_swsqe *sw_sq;
struct t4_swsqe *oldest_read; struct t4_swsqe *oldest_read;
u64 udb; u64 udb;
...@@ -298,7 +298,7 @@ struct t4_swrqe { ...@@ -298,7 +298,7 @@ struct t4_swrqe {
struct t4_rq { struct t4_rq {
union t4_recv_wr *queue; union t4_recv_wr *queue;
dma_addr_t dma_addr; dma_addr_t dma_addr;
DECLARE_PCI_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swrqe *sw_rq; struct t4_swrqe *sw_rq;
u64 udb; u64 udb;
size_t memsize; size_t memsize;
...@@ -429,7 +429,7 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) ...@@ -429,7 +429,7 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
struct t4_cq { struct t4_cq {
struct t4_cqe *queue; struct t4_cqe *queue;
dma_addr_t dma_addr; dma_addr_t dma_addr;
DECLARE_PCI_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_cqe *sw_queue; struct t4_cqe *sw_queue;
void __iomem *gts; void __iomem *gts;
struct c4iw_rdev *rdev; struct c4iw_rdev *rdev;
......
...@@ -686,6 +686,7 @@ struct qib_devdata { ...@@ -686,6 +686,7 @@ struct qib_devdata {
void __iomem *piobase; void __iomem *piobase;
/* mem-mapped pointer to base of user chip regs (if using WC PAT) */ /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
u64 __iomem *userbase; u64 __iomem *userbase;
void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
/* /*
* points to area where PIOavail registers will be DMA'ed. * points to area where PIOavail registers will be DMA'ed.
* Has to be on a page of it's own, because the page will be * Has to be on a page of it's own, because the page will be
......
...@@ -742,15 +742,15 @@ ...@@ -742,15 +742,15 @@
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
#define QIB_7322_HwErrMask_statusValidNoEopMask_1_LSB 0xE #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE
#define QIB_7322_HwErrMask_statusValidNoEopMask_1_MSB 0xE #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE
#define QIB_7322_HwErrMask_statusValidNoEopMask_1_RMASK 0x1 #define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1 #define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
#define QIB_7322_HwErrMask_statusValidNoEopMask_0_LSB 0xC #define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC
#define QIB_7322_HwErrMask_statusValidNoEopMask_0_MSB 0xC #define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC
#define QIB_7322_HwErrMask_statusValidNoEopMask_0_RMASK 0x1 #define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1
#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB #define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB #define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1 #define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
...@@ -796,15 +796,15 @@ ...@@ -796,15 +796,15 @@
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
#define QIB_7322_HwErrStatus_statusValidNoEop_1_LSB 0xE #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE
#define QIB_7322_HwErrStatus_statusValidNoEop_1_MSB 0xE #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE
#define QIB_7322_HwErrStatus_statusValidNoEop_1_RMASK 0x1 #define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1 #define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
#define QIB_7322_HwErrStatus_statusValidNoEop_0_LSB 0xC #define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC
#define QIB_7322_HwErrStatus_statusValidNoEop_0_MSB 0xC #define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC
#define QIB_7322_HwErrStatus_statusValidNoEop_0_RMASK 0x1 #define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1
#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB #define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB #define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1 #define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
...@@ -850,15 +850,15 @@ ...@@ -850,15 +850,15 @@
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_LSB 0xE #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_MSB 0xE #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_1_RMASK 0x1 #define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1 #define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_LSB 0xC #define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_MSB 0xC #define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC
#define QIB_7322_HwErrClear_IBCBusToSPCparityErrClear_0_RMASK 0x1 #define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1
#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB #define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB #define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1 #define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
...@@ -880,15 +880,15 @@ ...@@ -880,15 +880,15 @@
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_LSB 0xE #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_MSB 0xE #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_1_RMASK 0x1 #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1 #define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_LSB 0xC #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_MSB 0xC #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC
#define QIB_7322_HwDiagCtrl_ForcestatusValidNoEop_0_RMASK 0x1 #define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1
#define QIB_7322_EXTStatus_OFFS 0xC0 #define QIB_7322_EXTStatus_OFFS 0xC0
#define QIB_7322_EXTStatus_DEF 0x000000000000X000 #define QIB_7322_EXTStatus_DEF 0x000000000000X000
......
...@@ -233,6 +233,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, ...@@ -233,6 +233,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
u32 __iomem *map = NULL; u32 __iomem *map = NULL;
u32 cnt = 0; u32 cnt = 0;
u32 tot4k, offs4k;
/* First, simplest case, offset is within the first map. */ /* First, simplest case, offset is within the first map. */
kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
...@@ -250,7 +251,8 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, ...@@ -250,7 +251,8 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
if (dd->userbase) { if (dd->userbase) {
/* If user regs mapped, they are after send, so set limit. */ /* If user regs mapped, they are after send, so set limit. */
u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
snd_lim = dd->uregbase; if (!dd->piovl15base)
snd_lim = dd->uregbase;
krb32 = (u32 __iomem *)dd->userbase; krb32 = (u32 __iomem *)dd->userbase;
if (offset >= dd->uregbase && offset < ulim) { if (offset >= dd->uregbase && offset < ulim) {
map = krb32 + (offset - dd->uregbase) / sizeof(u32); map = krb32 + (offset - dd->uregbase) / sizeof(u32);
...@@ -277,14 +279,14 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, ...@@ -277,14 +279,14 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
/* If 4k buffers exist, account for them by bumping /* If 4k buffers exist, account for them by bumping
* appropriate limit. * appropriate limit.
*/ */
tot4k = dd->piobcnt4k * dd->align4k;
offs4k = dd->piobufbase >> 32;
if (dd->piobcnt4k) { if (dd->piobcnt4k) {
u32 tot4k = dd->piobcnt4k * dd->align4k;
u32 offs4k = dd->piobufbase >> 32;
if (snd_bottom > offs4k) if (snd_bottom > offs4k)
snd_bottom = offs4k; snd_bottom = offs4k;
else { else {
/* 4k above 2k. Bump snd_lim, if needed*/ /* 4k above 2k. Bump snd_lim, if needed*/
if (!dd->userbase) if (!dd->userbase || dd->piovl15base)
snd_lim = offs4k + tot4k; snd_lim = offs4k + tot4k;
} }
} }
...@@ -298,6 +300,15 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, ...@@ -298,6 +300,15 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
cnt = snd_lim - offset; cnt = snd_lim - offset;
} }
if (!map && offs4k && dd->piovl15base) {
snd_lim = offs4k + tot4k + 2 * dd->align4k;
if (offset >= (offs4k + tot4k) && offset < snd_lim) {
map = (u32 __iomem *)dd->piovl15base +
((offset - (offs4k + tot4k)) / sizeof(u32));
cnt = snd_lim - offset;
}
}
mapped: mapped:
if (cntp) if (cntp)
*cntp = cnt; *cntp = cnt;
......
...@@ -1355,8 +1355,7 @@ static int qib_6120_bringup_serdes(struct qib_pportdata *ppd) ...@@ -1355,8 +1355,7 @@ static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
hwstat = qib_read_kreg64(dd, kr_hwerrstatus); hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
if (hwstat) { if (hwstat) {
/* should just have PLL, clear all set, in an case */ /* should just have PLL, clear all set, in an case */
if (hwstat & ~QLOGIC_IB_HWE_SERDESPLLFAILED) qib_write_kreg(dd, kr_hwerrclear, hwstat);
qib_write_kreg(dd, kr_hwerrclear, hwstat);
qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr)); qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
} }
......
...@@ -543,7 +543,7 @@ struct vendor_txdds_ent { ...@@ -543,7 +543,7 @@ struct vendor_txdds_ent {
static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
#define TXDDS_EXTRA_SZ 11 /* number of extra tx settings entries */ #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
#define H1_FORCE_VAL 8 #define H1_FORCE_VAL 8
...@@ -1100,9 +1100,9 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { ...@@ -1100,9 +1100,9 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(SDmaMemReadErr, 1), HWE_AUTO_P(SDmaMemReadErr, 1),
HWE_AUTO_P(SDmaMemReadErr, 0), HWE_AUTO_P(SDmaMemReadErr, 0),
HWE_AUTO_P(IBCBusFromSPCParityErr, 1), HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
HWE_AUTO_P(IBCBusToSPCParityErr, 1),
HWE_AUTO_P(IBCBusFromSPCParityErr, 0), HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
HWE_AUTO_P(statusValidNoEop, 1), HWE_AUTO(statusValidNoEop),
HWE_AUTO_P(statusValidNoEop, 0),
HWE_AUTO(LATriggered), HWE_AUTO(LATriggered),
{ .mask = 0 } { .mask = 0 }
}; };
...@@ -4763,6 +4763,8 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) ...@@ -4763,6 +4763,8 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
SYM_MASK(IBPCSConfig_0, tx_rx_reset); SYM_MASK(IBPCSConfig_0, tx_rx_reset);
val = qib_read_kreg_port(ppd, krp_ib_pcsconfig); val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
qib_write_kreg(dd, kr_hwerrmask,
dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
qib_write_kreg_port(ppd, krp_ibcctrl_a, qib_write_kreg_port(ppd, krp_ibcctrl_a,
ppd->cpspec->ibcctrl_a & ppd->cpspec->ibcctrl_a &
~SYM_MASK(IBCCtrlA_0, IBLinkEn)); ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
...@@ -4772,6 +4774,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd) ...@@ -4772,6 +4774,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits); qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a); qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL); qib_write_kreg(dd, kr_scratch, 0ULL);
qib_write_kreg(dd, kr_hwerrclear,
SYM_MASK(HwErrClear, statusValidNoEopClear));
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
} }
/* /*
...@@ -5624,6 +5629,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) ...@@ -5624,6 +5629,8 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
if (ppd->port != port || !ppd->link_speed_supported) if (ppd->port != port || !ppd->link_speed_supported)
continue; continue;
ppd->cpspec->no_eep = val; ppd->cpspec->no_eep = val;
if (seth1)
ppd->cpspec->h1_val = h1;
/* now change the IBC and serdes, overriding generic */ /* now change the IBC and serdes, overriding generic */
init_txdds_table(ppd, 1); init_txdds_table(ppd, 1);
any++; any++;
...@@ -6064,9 +6071,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd) ...@@ -6064,9 +6071,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
* the "cable info" setup here. Can be overridden * the "cable info" setup here. Can be overridden
* in adapter-specific routines. * in adapter-specific routines.
*/ */
if (!(ppd->dd->flags & QIB_HAS_QSFP)) { if (!(dd->flags & QIB_HAS_QSFP)) {
if (!IS_QMH(ppd->dd) && !IS_QME(ppd->dd)) if (!IS_QMH(dd) && !IS_QME(dd))
qib_devinfo(ppd->dd->pcidev, "IB%u:%u: " qib_devinfo(dd->pcidev, "IB%u:%u: "
"Unknown mezzanine card type\n", "Unknown mezzanine card type\n",
dd->unit, ppd->port); dd->unit, ppd->port);
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME; cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
...@@ -6119,9 +6126,25 @@ static int qib_init_7322_variables(struct qib_devdata *dd) ...@@ -6119,9 +6126,25 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
qib_set_ctxtcnt(dd); qib_set_ctxtcnt(dd);
if (qib_wc_pat) { if (qib_wc_pat) {
ret = init_chip_wc_pat(dd, NUM_VL15_BUFS * dd->align4k); resource_size_t vl15off;
/*
* We do not set WC on the VL15 buffers to avoid
* a rare problem with unaligned writes from
* interrupt-flushed store buffers, so we need
* to map those separately here. We can't solve
* this for the rarely used mtrr case.
*/
ret = init_chip_wc_pat(dd, 0);
if (ret) if (ret)
goto bail; goto bail;
/* vl15 buffers start just after the 4k buffers */
vl15off = dd->physaddr + (dd->piobufbase >> 32) +
dd->piobcnt4k * dd->align4k;
dd->piovl15base = ioremap_nocache(vl15off,
NUM_VL15_BUFS * dd->align4k);
if (!dd->piovl15base)
goto bail;
} }
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
...@@ -6932,6 +6955,8 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = { ...@@ -6932,6 +6955,8 @@ static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
{ 0, 0, 0, 11 }, /* QME7342 backplane settings */ { 0, 0, 0, 11 }, /* QME7342 backplane settings */
{ 0, 0, 0, 3 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 4 }, /* QMH7342 backplane settings */
}; };
static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
...@@ -6947,6 +6972,8 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = { ...@@ -6947,6 +6972,8 @@ static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
{ 0, 0, 0, 13 }, /* QME7342 backplane settings */ { 0, 0, 0, 13 }, /* QME7342 backplane settings */
{ 0, 0, 0, 9 }, /* QMH7342 backplane settings */
{ 0, 0, 0, 10 }, /* QMH7342 backplane settings */
}; };
static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
...@@ -6962,6 +6989,8 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { ...@@ -6962,6 +6989,8 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
{ 0, 1, 12, 6 }, /* QME7342 backplane setting */ { 0, 1, 12, 6 }, /* QME7342 backplane setting */
{ 0, 1, 12, 7 }, /* QME7342 backplane setting */ { 0, 1, 12, 7 }, /* QME7342 backplane setting */
{ 0, 1, 12, 8 }, /* QME7342 backplane setting */ { 0, 1, 12, 8 }, /* QME7342 backplane setting */
{ 0, 1, 0, 10 }, /* QMH7342 backplane settings */
{ 0, 1, 0, 12 }, /* QMH7342 backplane settings */
}; };
static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
......
...@@ -1059,7 +1059,7 @@ static int __init qlogic_ib_init(void) ...@@ -1059,7 +1059,7 @@ static int __init qlogic_ib_init(void)
goto bail_dev; goto bail_dev;
} }
qib_cq_wq = create_workqueue("qib_cq"); qib_cq_wq = create_singlethread_workqueue("qib_cq");
if (!qib_cq_wq) { if (!qib_cq_wq) {
ret = -ENOMEM; ret = -ENOMEM;
goto bail_wq; goto bail_wq;
...@@ -1289,8 +1289,18 @@ static int __devinit qib_init_one(struct pci_dev *pdev, ...@@ -1289,8 +1289,18 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (qib_mini_init || initfail || ret) { if (qib_mini_init || initfail || ret) {
qib_stop_timers(dd); qib_stop_timers(dd);
flush_scheduled_work();
for (pidx = 0; pidx < dd->num_pports; ++pidx) for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->f_quiet_serdes(dd->pport + pidx); dd->f_quiet_serdes(dd->pport + pidx);
if (qib_mini_init)
goto bail;
if (!j) {
(void) qibfs_remove(dd);
qib_device_remove(dd);
}
if (!ret)
qib_unregister_ib_device(dd);
qib_postinit_cleanup(dd);
if (initfail) if (initfail)
ret = initfail; ret = initfail;
goto bail; goto bail;
...@@ -1472,6 +1482,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) ...@@ -1472,6 +1482,9 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
dma_addr_t pa = rcd->rcvegrbuf_phys[chunk]; dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
unsigned i; unsigned i;
/* clear for security and sanity on each use */
memset(rcd->rcvegrbuf[chunk], 0, size);
for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
dd->f_put_tid(dd, e + egroff + dd->f_put_tid(dd, e + egroff +
(u64 __iomem *) (u64 __iomem *)
...@@ -1499,6 +1512,12 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) ...@@ -1499,6 +1512,12 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
return -ENOMEM; return -ENOMEM;
} }
/*
* Note: Changes to this routine should be mirrored
* for the diagnostics routine qib_remap_ioaddr32().
* There is also related code for VL15 buffers in qib_init_7322_variables().
* The teardown code that unmaps is in qib_pcie_ddcleanup()
*/
int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
{ {
u64 __iomem *qib_kregbase = NULL; u64 __iomem *qib_kregbase = NULL;
......
...@@ -179,6 +179,8 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd) ...@@ -179,6 +179,8 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
iounmap(dd->piobase); iounmap(dd->piobase);
if (dd->userbase) if (dd->userbase)
iounmap(dd->userbase); iounmap(dd->userbase);
if (dd->piovl15base)
iounmap(dd->piovl15base);
pci_disable_device(dd->pcidev); pci_disable_device(dd->pcidev);
pci_release_regions(dd->pcidev); pci_release_regions(dd->pcidev);
......
...@@ -340,9 +340,13 @@ u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum, ...@@ -340,9 +340,13 @@ u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
if (i < dd->piobcnt2k) if (i < dd->piobcnt2k)
buf = (u32 __iomem *)(dd->pio2kbase + buf = (u32 __iomem *)(dd->pio2kbase +
i * dd->palign); i * dd->palign);
else else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
buf = (u32 __iomem *)(dd->pio4kbase + buf = (u32 __iomem *)(dd->pio4kbase +
(i - dd->piobcnt2k) * dd->align4k); (i - dd->piobcnt2k) * dd->align4k);
else
buf = (u32 __iomem *)(dd->piovl15base +
(i - (dd->piobcnt2k + dd->piobcnt4k)) *
dd->align4k);
if (pbufnum) if (pbufnum)
*pbufnum = i; *pbufnum = i;
dd->upd_pio_shadow = 0; dd->upd_pio_shadow = 0;
......
...@@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev, ...@@ -1163,7 +1163,7 @@ static ssize_t create_child(struct device *dev,
return ret ? ret : count; return ret ? ret : count;
} }
static DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child); static DEVICE_ATTR(create_child, S_IWUSR, NULL, create_child);
static ssize_t delete_child(struct device *dev, static ssize_t delete_child(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
...@@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev, ...@@ -1183,7 +1183,7 @@ static ssize_t delete_child(struct device *dev,
return ret ? ret : count; return ret ? ret : count;
} }
static DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child); static DEVICE_ATTR(delete_child, S_IWUSR, NULL, delete_child);
int ipoib_add_pkey_attr(struct net_device *dev) int ipoib_add_pkey_attr(struct net_device *dev)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment