Commit 700456bd authored by Joe Perches's avatar Joe Perches Committed by Doug Ledford

cxgb4: Use more common logging style

Convert printks to pr_<level>

Miscellanea:

o Coalesce formats
o Realign arguments
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent b7b37ee0
This diff is collapsed.
...@@ -159,7 +159,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -159,7 +159,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
&cq->bar2_qid, &cq->bar2_qid,
user ? &cq->bar2_pa : NULL); user ? &cq->bar2_pa : NULL);
if (user && !cq->bar2_pa) { if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", pr_warn("%s: cqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), cq->cqid); pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL; ret = -EINVAL;
goto err4; goto err4;
...@@ -766,8 +766,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -766,8 +766,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->opcode = IB_WC_SEND; wc->opcode = IB_WC_SEND;
break; break;
default: default:
printk(KERN_ERR MOD "Unexpected opcode %d " pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
"in the CQE received for QPID=0x%0x\n",
CQE_OPCODE(&cqe), CQE_QPID(&cqe)); CQE_OPCODE(&cqe), CQE_QPID(&cqe));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
...@@ -822,8 +821,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -822,8 +821,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->status = IB_WC_WR_FLUSH_ERR; wc->status = IB_WC_WR_FLUSH_ERR;
break; break;
default: default:
printk(KERN_ERR MOD pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
"Unexpected cqe_status 0x%x for QPID=0x%0x\n",
CQE_STATUS(&cqe), CQE_QPID(&cqe)); CQE_STATUS(&cqe), CQE_QPID(&cqe));
wc->status = IB_WC_FATAL_ERR; wc->status = IB_WC_FATAL_ERR;
} }
......
...@@ -334,7 +334,7 @@ static int qp_release(struct inode *inode, struct file *file) ...@@ -334,7 +334,7 @@ static int qp_release(struct inode *inode, struct file *file)
{ {
struct c4iw_debugfs_data *qpd = file->private_data; struct c4iw_debugfs_data *qpd = file->private_data;
if (!qpd) { if (!qpd) {
printk(KERN_INFO "%s null qpd?\n", __func__); pr_info("%s null qpd?\n", __func__);
return 0; return 0;
} }
vfree(qpd->buf); vfree(qpd->buf);
...@@ -422,7 +422,7 @@ static int stag_release(struct inode *inode, struct file *file) ...@@ -422,7 +422,7 @@ static int stag_release(struct inode *inode, struct file *file)
{ {
struct c4iw_debugfs_data *stagd = file->private_data; struct c4iw_debugfs_data *stagd = file->private_data;
if (!stagd) { if (!stagd) {
printk(KERN_INFO "%s null stagd?\n", __func__); pr_info("%s null stagd?\n", __func__);
return 0; return 0;
} }
vfree(stagd->buf); vfree(stagd->buf);
...@@ -796,15 +796,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -796,15 +796,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
* cqid and qpid range must match for now. * cqid and qpid range must match for now.
*/ */
if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n", pr_err("%s: unsupported udb/ucq densities %u/%u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
rdev->lldi.ucq_density); rdev->lldi.ucq_density);
return -EINVAL; return -EINVAL;
} }
if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
pr_err(MOD "%s: unsupported qp and cq id ranges " pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n",
"qp start %u size %u cq start %u size %u\n",
pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
rdev->lldi.vr->cq.size); rdev->lldi.vr->cq.size);
...@@ -843,22 +842,22 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -843,22 +842,22 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing resources\n", err); pr_err("error %d initializing resources\n", err);
return err; return err;
} }
err = c4iw_pblpool_create(rdev); err = c4iw_pblpool_create(rdev);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); pr_err("error %d initializing pbl pool\n", err);
goto destroy_resource; goto destroy_resource;
} }
err = c4iw_rqtpool_create(rdev); err = c4iw_rqtpool_create(rdev);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); pr_err("error %d initializing rqt pool\n", err);
goto destroy_pblpool; goto destroy_pblpool;
} }
err = c4iw_ocqp_pool_create(rdev); err = c4iw_ocqp_pool_create(rdev);
if (err) { if (err) {
printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); pr_err("error %d initializing ocqp pool\n", err);
goto destroy_rqtpool; goto destroy_rqtpool;
} }
rdev->status_page = (struct t4_dev_status_page *) rdev->status_page = (struct t4_dev_status_page *)
...@@ -954,17 +953,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -954,17 +953,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
int ret; int ret;
if (!rdma_supported(infop)) { if (!rdma_supported(infop)) {
printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", pr_info("%s: RDMA not supported on this device\n",
pci_name(infop->pdev)); pci_name(infop->pdev));
return ERR_PTR(-ENOSYS); return ERR_PTR(-ENOSYS);
} }
if (!ocqp_supported(infop)) if (!ocqp_supported(infop))
pr_info("%s: On-Chip Queues not supported on this device.\n", pr_info("%s: On-Chip Queues not supported on this device\n",
pci_name(infop->pdev)); pci_name(infop->pdev));
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) { if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n"); pr_err("Cannot allocate ib device\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
devp->rdev.lldi = *infop; devp->rdev.lldi = *infop;
...@@ -1000,7 +999,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -1000,7 +999,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
pci_resource_len(devp->rdev.lldi.pdev, 2)); pci_resource_len(devp->rdev.lldi.pdev, 2));
if (!devp->rdev.bar2_kva) { if (!devp->rdev.bar2_kva) {
pr_err(MOD "Unable to ioremap BAR2\n"); pr_err("Unable to ioremap BAR2\n");
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1012,7 +1011,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -1012,7 +1011,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
devp->rdev.lldi.vr->ocq.size); devp->rdev.lldi.vr->ocq.size);
if (!devp->rdev.oc_mw_kva) { if (!devp->rdev.oc_mw_kva) {
pr_err(MOD "Unable to ioremap onchip mem\n"); pr_err("Unable to ioremap onchip mem\n");
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1025,7 +1024,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -1025,7 +1024,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
ret = c4iw_rdev_open(&devp->rdev); ret = c4iw_rdev_open(&devp->rdev);
if (ret) { if (ret) {
printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); pr_err("Unable to open CXIO rdev err %d\n", ret);
ib_dealloc_device(&devp->ibdev); ib_dealloc_device(&devp->ibdev);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1138,8 +1137,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, ...@@ -1138,8 +1137,7 @@ static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
goto out; goto out;
if (c4iw_handlers[opcode] == NULL) { if (c4iw_handlers[opcode] == NULL) {
pr_info("%s no handler opcode 0x%x...\n", __func__, pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
opcode);
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
...@@ -1176,13 +1174,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, ...@@ -1176,13 +1174,11 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (recv_rx_pkt(dev, gl, rsp)) if (recv_rx_pkt(dev, gl, rsp))
return 0; return 0;
pr_info("%s: unexpected FL contents at %p, " \ pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n",
"RSS %#llx, FL %#llx, len %u\n", pci_name(ctx->lldi.pdev), gl->va,
pci_name(ctx->lldi.pdev), gl->va, be64_to_cpu(*rsp),
(unsigned long long)be64_to_cpu(*rsp), be64_to_cpu(*(__force __be64 *)gl->va),
(unsigned long long)be64_to_cpu( gl->tot_len);
*(__force __be64 *)gl->va),
gl->tot_len);
return 0; return 0;
} else { } else {
...@@ -1195,8 +1191,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, ...@@ -1195,8 +1191,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
if (c4iw_handlers[opcode]) { if (c4iw_handlers[opcode]) {
c4iw_handlers[opcode](dev, skb); c4iw_handlers[opcode](dev, skb);
} else { } else {
pr_info("%s no handler opcode 0x%x...\n", __func__, pr_info("%s no handler opcode 0x%x...\n", __func__, opcode);
opcode);
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -1212,14 +1207,13 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1212,14 +1207,13 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
PDBG("%s new_state %u\n", __func__, new_state); PDBG("%s new_state %u\n", __func__, new_state);
switch (new_state) { switch (new_state) {
case CXGB4_STATE_UP: case CXGB4_STATE_UP:
printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
if (!ctx->dev) { if (!ctx->dev) {
int ret; int ret;
ctx->dev = c4iw_alloc(&ctx->lldi); ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) { if (IS_ERR(ctx->dev)) {
printk(KERN_ERR MOD pr_err("%s: initialization failed: %ld\n",
"%s: initialization failed: %ld\n",
pci_name(ctx->lldi.pdev), pci_name(ctx->lldi.pdev),
PTR_ERR(ctx->dev)); PTR_ERR(ctx->dev));
ctx->dev = NULL; ctx->dev = NULL;
...@@ -1227,22 +1221,19 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1227,22 +1221,19 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
} }
ret = c4iw_register_device(ctx->dev); ret = c4iw_register_device(ctx->dev);
if (ret) { if (ret) {
printk(KERN_ERR MOD pr_err("%s: RDMA registration failed: %d\n",
"%s: RDMA registration failed: %d\n",
pci_name(ctx->lldi.pdev), ret); pci_name(ctx->lldi.pdev), ret);
c4iw_dealloc(ctx); c4iw_dealloc(ctx);
} }
} }
break; break;
case CXGB4_STATE_DOWN: case CXGB4_STATE_DOWN:
printk(KERN_INFO MOD "%s: Down\n", pr_info("%s: Down\n", pci_name(ctx->lldi.pdev));
pci_name(ctx->lldi.pdev));
if (ctx->dev) if (ctx->dev)
c4iw_remove(ctx); c4iw_remove(ctx);
break; break;
case CXGB4_STATE_START_RECOVERY: case CXGB4_STATE_START_RECOVERY:
printk(KERN_INFO MOD "%s: Fatal Error\n", pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev));
pci_name(ctx->lldi.pdev));
if (ctx->dev) { if (ctx->dev) {
struct ib_event event; struct ib_event event;
...@@ -1255,8 +1246,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1255,8 +1246,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
} }
break; break;
case CXGB4_STATE_DETACH: case CXGB4_STATE_DETACH:
printk(KERN_INFO MOD "%s: Detach\n", pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev));
pci_name(ctx->lldi.pdev));
if (ctx->dev) if (ctx->dev)
c4iw_remove(ctx); c4iw_remove(ctx);
break; break;
...@@ -1406,9 +1396,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) ...@@ -1406,9 +1396,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_sq_host_wq_pidx(&qp->wq), t4_sq_host_wq_pidx(&qp->wq),
t4_sq_wq_size(&qp->wq)); t4_sq_wq_size(&qp->wq));
if (ret) { if (ret) {
pr_err(MOD "%s: Fatal error - " pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n",
"DB overflow recovery failed - "
"error syncing SQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.sq.qid); pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
spin_unlock(&qp->lock); spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock); spin_unlock_irq(&qp->rhp->lock);
...@@ -1422,9 +1410,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) ...@@ -1422,9 +1410,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
t4_rq_wq_size(&qp->wq)); t4_rq_wq_size(&qp->wq));
if (ret) { if (ret) {
pr_err(MOD "%s: Fatal error - " pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n",
"DB overflow recovery failed - "
"error syncing RQ qid %u\n",
pci_name(ctx->lldi.pdev), qp->wq.rq.qid); pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
spin_unlock(&qp->lock); spin_unlock(&qp->lock);
spin_unlock_irq(&qp->rhp->lock); spin_unlock_irq(&qp->rhp->lock);
...@@ -1455,7 +1441,7 @@ static void recover_queues(struct uld_ctx *ctx) ...@@ -1455,7 +1441,7 @@ static void recover_queues(struct uld_ctx *ctx)
/* flush the SGE contexts */ /* flush the SGE contexts */
ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
if (ret) { if (ret) {
printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", pr_err("%s: Fatal error - DB overflow recovery failed\n",
pci_name(ctx->lldi.pdev)); pci_name(ctx->lldi.pdev));
return; return;
} }
...@@ -1513,8 +1499,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) ...@@ -1513,8 +1499,8 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
mutex_unlock(&ctx->dev->rdev.stats.lock); mutex_unlock(&ctx->dev->rdev.stats.lock);
break; break;
default: default:
printk(KERN_WARNING MOD "%s: unknown control cmd %u\n", pr_warn("%s: unknown control cmd %u\n",
pci_name(ctx->lldi.pdev), control); pci_name(ctx->lldi.pdev), control);
break; break;
} }
return 0; return 0;
...@@ -1543,8 +1529,7 @@ static int __init c4iw_init_module(void) ...@@ -1543,8 +1529,7 @@ static int __init c4iw_init_module(void)
c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
if (!c4iw_debugfs_root) if (!c4iw_debugfs_root)
printk(KERN_WARNING MOD pr_warn("could not create debugfs entry, continuing\n");
"could not create debugfs entry, continuing\n");
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
......
...@@ -124,8 +124,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -124,8 +124,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
spin_lock_irq(&dev->lock); spin_lock_irq(&dev->lock);
qhp = get_qhp(dev, CQE_QPID(err_cqe)); qhp = get_qhp(dev, CQE_QPID(err_cqe));
if (!qhp) { if (!qhp) {
printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " pr_err("BAD AE qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe), CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
...@@ -140,8 +139,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -140,8 +139,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
cqid = qhp->attr.rcq; cqid = qhp->attr.rcq;
chp = get_chp(dev, cqid); chp = get_chp(dev, cqid);
if (!chp) { if (!chp) {
printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " pr_err("BAD AE cqid 0x%x qpid 0x%x opcode %d status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
"status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
cqid, CQE_QPID(err_cqe), cqid, CQE_QPID(err_cqe),
CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
...@@ -165,7 +163,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -165,7 +163,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
/* Completion Events */ /* Completion Events */
case T4_ERR_SUCCESS: case T4_ERR_SUCCESS:
printk(KERN_ERR MOD "AE with status 0!\n"); pr_err("AE with status 0!\n");
break; break;
case T4_ERR_STAG: case T4_ERR_STAG:
...@@ -207,7 +205,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -207,7 +205,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break; break;
default: default:
printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n", pr_err("Unknown T4 status 0x%x QPID 0x%x\n",
CQE_STATUS(err_cqe), qhp->wq.sq.qid); CQE_STATUS(err_cqe), qhp->wq.sq.qid);
post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
break; break;
......
...@@ -64,6 +64,12 @@ ...@@ -64,6 +64,12 @@
#define DRV_NAME "iw_cxgb4" #define DRV_NAME "iw_cxgb4"
#define MOD DRV_NAME ":" #define MOD DRV_NAME ":"
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
extern int c4iw_debug; extern int c4iw_debug;
#define PDBG(fmt, args...) \ #define PDBG(fmt, args...) \
do { \ do { \
......
...@@ -234,10 +234,8 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, ...@@ -234,10 +234,8 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
if (len > inline_threshold) { if (len > inline_threshold) {
if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) { if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
printk_ratelimited(KERN_WARNING pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
"%s: dma map" pci_name(rdev->lldi.pdev));
" failure (non fatal)\n",
pci_name(rdev->lldi.pdev));
return _c4iw_write_mem_inline(rdev, addr, len, return _c4iw_write_mem_inline(rdev, addr, len,
data, skb); data, skb);
} else { } else {
......
...@@ -123,7 +123,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -123,7 +123,6 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
{ {
struct c4iw_ucontext *context; struct c4iw_ucontext *context;
struct c4iw_dev *rhp = to_c4iw_dev(ibdev); struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
static int warned;
struct c4iw_alloc_ucontext_resp uresp; struct c4iw_alloc_ucontext_resp uresp;
int ret = 0; int ret = 0;
struct c4iw_mm_entry *mm = NULL; struct c4iw_mm_entry *mm = NULL;
...@@ -141,8 +140,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -141,8 +140,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
kref_init(&context->kref); kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++) pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
} else { } else {
mm = kmalloc(sizeof(*mm), GFP_KERNEL); mm = kmalloc(sizeof(*mm), GFP_KERNEL);
......
...@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
* User mode must have bar2 access. * User mode must have bar2 access.
*/ */
if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma; goto free_dma;
} }
...@@ -1671,8 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1671,8 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
goto err; goto err;
break; break;
default: default:
printk(KERN_ERR "%s in a bad state %d\n", pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
__func__, qhp->attr.state);
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
break; break;
......
...@@ -293,10 +293,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) ...@@ -293,10 +293,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
PDBG("%s failed to add PBL chunk (%x/%x)\n", PDBG("%s failed to add PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk); __func__, pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
printk(KERN_WARNING MOD pr_warn("Failed to add all PBL chunks (%x/%x)\n",
"Failed to add all PBL chunks (%x/%x)\n", pbl_start, pbl_top - pbl_start);
pbl_start,
pbl_top - pbl_start);
return 0; return 0;
} }
pbl_chunk >>= 1; pbl_chunk >>= 1;
...@@ -326,7 +324,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -326,7 +324,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
if (!addr) if (!addr)
pr_warn_ratelimited(MOD "%s: Out of RQT memory\n", pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev)); pci_name(rdev->lldi.pdev));
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (addr) { if (addr) {
...@@ -366,9 +364,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) ...@@ -366,9 +364,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
PDBG("%s failed to add RQT chunk (%x/%x)\n", PDBG("%s failed to add RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk); __func__, rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
printk(KERN_WARNING MOD pr_warn("Failed to add all RQT chunks (%x/%x)\n",
"Failed to add all RQT chunks (%x/%x)\n", rqt_start, rqt_top - rqt_start);
rqt_start, rqt_top - rqt_start);
return 0; return 0;
} }
rqt_chunk >>= 1; rqt_chunk >>= 1;
...@@ -432,9 +429,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) ...@@ -432,9 +429,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
PDBG("%s failed to add OCQP chunk (%x/%x)\n", PDBG("%s failed to add OCQP chunk (%x/%x)\n",
__func__, start, chunk); __func__, start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) { if (chunk <= 1024 << MIN_OCQP_SHIFT) {
printk(KERN_WARNING MOD pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
"Failed to add all OCQP chunks (%x/%x)\n", start, top - start);
start, top - start);
return 0; return 0;
} }
chunk >>= 1; chunk >>= 1;
......
...@@ -656,7 +656,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -656,7 +656,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
ret = -EOVERFLOW; ret = -EOVERFLOW;
cq->error = 1; cq->error = 1;
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); pr_err("cq overflow cqid %u\n", cq->cqid);
BUG_ON(1); BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment