Commit 1a91de28 authored by Joe Perches's avatar Joe Perches Committed by David S. Miller

mellanox: Logging message cleanups

Use a more current logging style.

o Coalesce formats
o Add missing spaces for coalesced formats
o Align arguments for modified formats
o Add missing newlines for some logging messages
o Use DRV_NAME as part of format instead of %s, DRV_NAME to
  reduce overall text.
o Use ..., ##__VA_ARGS__ instead of args... in macros
o Correct a few format typos
o Use a single line message where appropriate
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Acked-By: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2db2a15a
...@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param, ...@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* First, verify that the master reports correct status */ /* First, verify that the master reports correct status */
if (comm_pending(dev)) { if (comm_pending(dev)) {
mlx4_warn(dev, "Communication channel is not idle." mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
"my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd); priv->cmd.comm_toggle, cmd);
return -EAGAIN; return -EAGAIN;
} }
...@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, ...@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param = *out_param =
be64_to_cpu(vhcr->out_param); be64_to_cpu(vhcr->out_param);
else { else {
mlx4_err(dev, "response expected while" mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
"output mailbox is NULL for " op);
"command 0x%x\n", op);
vhcr->status = CMD_STAT_BAD_PARAM; vhcr->status = CMD_STAT_BAD_PARAM;
} }
} }
...@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, ...@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param = *out_param =
be64_to_cpu(vhcr->out_param); be64_to_cpu(vhcr->out_param);
else { else {
mlx4_err(dev, "response expected while" mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
"output mailbox is NULL for " op);
"command 0x%x\n", op);
vhcr->status = CMD_STAT_BAD_PARAM; vhcr->status = CMD_STAT_BAD_PARAM;
} }
} }
ret = mlx4_status_to_errno(vhcr->status); ret = mlx4_status_to_errno(vhcr->status);
} else } else
mlx4_err(dev, "failed execution of VHCR_POST command" mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
"opcode 0x%x\n", op); op);
} }
mutex_unlock(&priv->cmd.slave_cmd_mutex); mutex_unlock(&priv->cmd.slave_cmd_mutex);
...@@ -625,9 +622,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr, ...@@ -625,9 +622,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
if ((slave_addr & 0xfff) | (master_addr & 0xfff) | if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
(slave & ~0x7f) | (size & 0xff)) { (slave & ~0x7f) | (size & 0xff)) {
mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
"master_addr:0x%llx slave_id:%d size:%d\n", slave_addr, master_addr, slave, size);
slave_addr, master_addr, slave, size);
return -EINVAL; return -EINVAL;
} }
...@@ -788,8 +784,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, ...@@ -788,8 +784,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) || ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->method == IB_MGMT_METHOD_SET))) { smp->method == IB_MGMT_METHOD_SET))) {
mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, " mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
"class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
slave, smp->method, smp->mgmt_class, slave, smp->method, smp->mgmt_class,
be16_to_cpu(smp->attr_id)); be16_to_cpu(smp->attr_id));
return -EPERM; return -EPERM;
...@@ -1409,8 +1404,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, ...@@ -1409,8 +1404,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
ALIGN(sizeof(struct mlx4_vhcr_cmd), ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1); MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) { if (ret) {
mlx4_err(dev, "%s:Failed reading vhcr" mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
"ret: 0x%x\n", __func__, ret); __func__, ret);
kfree(vhcr); kfree(vhcr);
return ret; return ret;
} }
...@@ -1461,9 +1456,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, ...@@ -1461,9 +1456,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* Apply permission and bound checks if applicable */ /* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
"checks for resource_id:%d\n", vhcr->op, slave, vhcr->op, slave, vhcr->in_modifier);
vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP; vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status; goto out_status;
} }
...@@ -1502,8 +1496,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, ...@@ -1502,8 +1496,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
} }
if (err) { if (err) {
mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
" error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err); vhcr->op, slave, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err); vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status; goto out_status;
...@@ -1537,8 +1530,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, ...@@ -1537,8 +1530,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
__func__); __func__);
else if (vhcr->e_bit && else if (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
mlx4_warn(dev, "Failed to generate command completion " mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
"eqe for slave %d\n", slave); slave);
} }
out: out:
...@@ -1577,8 +1570,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, ...@@ -1577,8 +1570,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port); slave, port);
mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
vp_admin->default_qos, vp_admin->link_state); vp_admin->default_vlan, vp_admin->default_qos,
vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL); work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work) if (!work)
...@@ -1591,7 +1585,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, ...@@ -1591,7 +1585,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
&admin_vlan_ix); &admin_vlan_ix);
if (err) { if (err) {
kfree(work); kfree(work);
mlx4_warn((&priv->dev), mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n", "No vlan resources slave %d, port %d\n",
slave, port); slave, port);
return err; return err;
...@@ -1600,7 +1594,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv, ...@@ -1600,7 +1594,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
admin_vlan_ix = NO_INDX; admin_vlan_ix = NO_INDX;
} }
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg((&(priv->dev)), mlx4_dbg(&priv->dev,
"alloc vlan %d idx %d slave %d port %d\n", "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan), (int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port); admin_vlan_ix, slave, port);
...@@ -1661,12 +1655,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) ...@@ -1661,12 +1655,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
vp_admin->default_vlan, &(vp_oper->vlan_idx)); vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) { if (err) {
vp_oper->vlan_idx = NO_INDX; vp_oper->vlan_idx = NO_INDX;
mlx4_warn((&priv->dev), mlx4_warn(&priv->dev,
"No vlan resorces slave %d, port %d\n", "No vlan resorces slave %d, port %d\n",
slave, port); slave, port);
return err; return err;
} }
mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan), (int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port); vp_oper->vlan_idx, slave, port);
} }
...@@ -1677,12 +1671,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave) ...@@ -1677,12 +1671,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
if (0 > vp_oper->mac_idx) { if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx; err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX; vp_oper->mac_idx = NO_INDX;
mlx4_warn((&priv->dev), mlx4_warn(&priv->dev,
"No mac resorces slave %d, port %d\n", "No mac resorces slave %d, port %d\n",
slave, port); slave, port);
return err; return err;
} }
mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port); vp_oper->state.mac, vp_oper->mac_idx, slave, port);
} }
} }
...@@ -1731,8 +1725,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, ...@@ -1731,8 +1725,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
slave_state[slave].comm_toggle ^= 1; slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31; reply = (u32) slave_state[slave].comm_toggle << 31;
if (toggle != slave_state[slave].comm_toggle) { if (toggle != slave_state[slave].comm_toggle) {
mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
"STATE COMPROMISIED ***\n", toggle, slave); toggle, slave);
goto reset_slave; goto reset_slave;
} }
if (cmd == MLX4_COMM_CMD_RESET) { if (cmd == MLX4_COMM_CMD_RESET) {
...@@ -1759,8 +1753,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, ...@@ -1759,8 +1753,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
/*command from slave in the middle of FLR*/ /*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET && if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
"in the middle of FLR\n", slave, cmd); slave, cmd);
return; return;
} }
...@@ -1798,8 +1792,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, ...@@ -1798,8 +1792,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_lock(&priv->cmd.slave_cmd_mutex); mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) { if (mlx4_master_process_vhcr(dev, slave, NULL)) {
mlx4_err(dev, "Failed processing vhcr for slave:%d," mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
" resetting slave.\n", slave); slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex); mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave; goto reset_slave;
} }
...@@ -1816,8 +1810,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd, ...@@ -1816,8 +1810,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
is_going_down = 1; is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) { if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d)" mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
" executing from slave:%d\n",
cmd, slave); cmd, slave);
return; return;
} }
...@@ -1880,9 +1873,8 @@ void mlx4_master_comm_channel(struct work_struct *work) ...@@ -1880,9 +1873,8 @@ void mlx4_master_comm_channel(struct work_struct *work)
if (toggle != slt) { if (toggle != slt) {
if (master->slave_state[slave].comm_toggle if (master->slave_state[slave].comm_toggle
!= slt) { != slt) {
printk(KERN_INFO "slave %d out of sync." printk(KERN_INFO "slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
" read toggle %d, state toggle %d. " slave, slt,
"Resynching.\n", slave, slt,
master->slave_state[slave].comm_toggle); master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle = master->slave_state[slave].comm_toggle =
slt; slt;
...@@ -1896,8 +1888,7 @@ void mlx4_master_comm_channel(struct work_struct *work) ...@@ -1896,8 +1888,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
} }
if (reported && reported != served) if (reported && reported != served)
mlx4_warn(dev, "Got command event with bitmask from %d slaves" mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
" but %d were served\n",
reported, served); reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev)) if (mlx4_ARM_COMM_CHANNEL(dev))
...@@ -1953,7 +1944,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) ...@@ -1953,7 +1944,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
ioremap(pci_resource_start(dev->pdev, 2) + ioremap(pci_resource_start(dev->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) { if (!priv->mfunc.comm) {
mlx4_err(dev, "Couldn't map communication vector.\n"); mlx4_err(dev, "Couldn't map communication vector\n");
goto err_vhcr; goto err_vhcr;
} }
...@@ -2080,7 +2071,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev) ...@@ -2080,7 +2071,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE); MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) { if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register.\n"); mlx4_err(dev, "Couldn't map command register\n");
return -ENOMEM; return -ENOMEM;
} }
} }
......
...@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&cq->vector)) { &cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port) cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors; % mdev->dev->caps.num_comp_vectors;
mlx4_warn(mdev, "Failed Assigning an EQ to " mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
"%s ,Falling back to legacy EQ's\n",
name); name);
} }
} }
......
...@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev, ...@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
} else { } else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) { if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n", en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
cmd->fs.ring_cookie); cmd->fs.ring_cookie);
return -EINVAL; return -EINVAL;
} }
qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
if (!qpn) { if (!qpn) {
en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n", en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
cmd->fs.ring_cookie); cmd->fs.ring_cookie);
return -EINVAL; return -EINVAL;
} }
...@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, ...@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
} }
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id); err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) { if (err) {
en_err(priv, "Fail to attach network rule at location %d.\n", en_err(priv, "Fail to attach network rule at location %d\n",
cmd->fs.location); cmd->fs.location);
goto out_free_list; goto out_free_list;
} }
......
...@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) ...@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
MLX4_EN_MAX_TX_RING_P_UP); MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) { & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
params->udp_rss = 0; params->udp_rss = 0;
} }
for (i = 1; i <= MLX4_MAX_PORTS; i++) { for (i = 1; i <= MLX4_MAX_PORTS; i++) {
...@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support) if (!mdev->LSO_support)
mlx4_warn(mdev, "LSO not supported, please upgrade to later " mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
"FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull, if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
...@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Build device profile according to supplied module parameters */ /* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev); err = mlx4_en_get_profile(mdev);
if (err) { if (err) {
mlx4_err(mdev, "Bad module parameters, aborting.\n"); mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr; goto err_mr;
} }
......
...@@ -1576,7 +1576,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1576,7 +1576,7 @@ int mlx4_en_start_port(struct net_device *dev)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq); err = mlx4_en_set_cq_moder(priv, cq);
if (err) { if (err) {
en_err(priv, "Failed setting cq moderation parameters"); en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq); mlx4_en_deactivate_cq(priv, cq);
goto cq_err; goto cq_err;
} }
...@@ -1615,7 +1615,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1615,7 +1615,7 @@ int mlx4_en_start_port(struct net_device *dev)
} }
err = mlx4_en_set_cq_moder(priv, cq); err = mlx4_en_set_cq_moder(priv, cq);
if (err) { if (err) {
en_err(priv, "Failed setting cq moderation parameters"); en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq); mlx4_en_deactivate_cq(priv, cq);
goto tx_err; goto tx_err;
} }
...@@ -2594,8 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2594,8 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
prof->tx_pause, prof->tx_ppp, prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp); prof->rx_pause, prof->rx_ppp);
if (err) { if (err) {
en_err(priv, "Failed setting port general configurations " en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
"for port %d, with error %d\n", priv->port, err); priv->port, err);
goto out; goto out;
} }
......
...@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) ...@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
ring->actual_size, ring->actual_size,
GFP_KERNEL)) { GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate " en_err(priv, "Failed to allocate enough rx buffers\n");
"enough rx buffers\n");
return -ENOMEM; return -ENOMEM;
} else { } else {
new_size = rounddown_pow_of_two(ring->actual_size); new_size = rounddown_pow_of_two(ring->actual_size);
en_warn(priv, "Only %d buffers allocated " en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
"reducing ring size to %d",
ring->actual_size, new_size); ring->actual_size, new_size);
goto reduce_rings; goto reduce_rings;
} }
...@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */ /* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) { MLX4_CQE_OPCODE_ERROR)) {
en_err(priv, "CQE completed in error - vendor " en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
"syndrom:%d syndrom:%d\n", ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *)cqe)->syndrome);
((struct mlx4_err_cqe *) cqe)->syndrome);
goto next; goto next;
} }
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
...@@ -944,8 +941,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) ...@@ -944,8 +941,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->rx_skb_size = eff_mtu; priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc)); priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
"num_frags:%d):\n", eff_mtu, priv->num_frags); eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) { for (i = 0; i < priv->num_frags; i++) {
en_err(priv, en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n", " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
......
...@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf = ring->wqres.buf.direct.buf; ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
"buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring, ring->buf, ring->size, ring->buf_size,
ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn; ring->qpn = qpn;
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
...@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) { if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)", err); en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
ring->bf.uar = &mdev->priv_uar; ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map; ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false; ring->bf_enabled = false;
......
...@@ -152,14 +152,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work) ...@@ -152,14 +152,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
if (i != dev->caps.function && if (i != dev->caps.function &&
master->slave_state[i].active) master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe)) if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to " mlx4_warn(dev, "Failed to generate event for slave %d\n",
" generate event " i);
"for slave %d\n", i);
} }
} else { } else {
if (mlx4_GEN_EQE(dev, slave, eqe)) if (mlx4_GEN_EQE(dev, slave, eqe))
mlx4_warn(dev, "Failed to generate event " mlx4_warn(dev, "Failed to generate event for slave %d\n",
"for slave %d\n", slave); slave);
} }
++slave_eq->cons; ++slave_eq->cons;
} }
...@@ -177,8 +176,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) ...@@ -177,8 +176,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^ if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. " mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
"No free EQE on slave events queue\n", slave); slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags); spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return; return;
} }
...@@ -375,9 +374,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, ...@@ -375,9 +374,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
} }
break; break;
default: default:
pr_err("%s: BUG!!! UNKNOWN state: " pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
"slave:%d, port:%d\n", __func__, slave, port); __func__, slave, port);
goto out; goto out;
} }
ret = mlx4_get_slave_port_state(dev, slave, port); ret = mlx4_get_slave_port_state(dev, slave, port);
...@@ -425,8 +424,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work) ...@@ -425,8 +424,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
for (i = 0 ; i < dev->num_slaves; i++) { for (i = 0 ; i < dev->num_slaves; i++) {
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
mlx4_dbg(dev, "mlx4_handle_slave_flr: " mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
"clean slave: %d\n", i); i);
mlx4_delete_all_resources_for_slave(dev, i); mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/ /*return the slave to running mode*/
...@@ -438,8 +437,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work) ...@@ -438,8 +437,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) if (err)
mlx4_warn(dev, "Failed to notify FW on " mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
"FLR done (slave:%d)\n", i); i);
} }
} }
} }
...@@ -490,9 +489,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -490,9 +489,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.qp.qpn) be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave); & 0xffffff, &slave);
if (ret && ret != -ENOENT) { if (ret && ret != -ENOENT) {
mlx4_dbg(dev, "QP event %02x(%02x) on " mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
"EQ %d at index %u: could "
"not get slave id (%d)\n",
eqe->type, eqe->subtype, eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret); eq->eqn, eq->cons_index, ret);
break; break;
...@@ -520,23 +517,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -520,23 +517,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
& 0xffffff, & 0xffffff,
&slave); &slave);
if (ret && ret != -ENOENT) { if (ret && ret != -ENOENT) {
mlx4_warn(dev, "SRQ event %02x(%02x) " mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
"on EQ %d at index %u: could"
" not get slave id (%d)\n",
eqe->type, eqe->subtype, eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret); eq->eqn, eq->cons_index, ret);
break; break;
} }
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x," mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
" event: %02x(%02x)\n", __func__, __func__, slave,
slave,
be32_to_cpu(eqe->event.srq.srqn), be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype); eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) { if (!ret && slave != dev->caps.function) {
mlx4_warn(dev, "%s: sending event " mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
"%02x(%02x) to slave:%d\n", __func__, eqe->type,
__func__, eqe->type,
eqe->subtype, slave); eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe); mlx4_slave_event(dev, slave, eqe);
break; break;
...@@ -569,8 +562,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -569,8 +562,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev)) if (i == mlx4_master_func_num(dev))
continue; continue;
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN" mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
" to slave: %d, port:%d\n",
__func__, i, port); __func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
...@@ -634,11 +626,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -634,11 +626,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.cq_err.cqn) be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave); & 0xffffff, &slave);
if (ret && ret != -ENOENT) { if (ret && ret != -ENOENT) {
mlx4_dbg(dev, "CQ event %02x(%02x) on " mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
"EQ %d at index %u: could " eqe->type, eqe->subtype,
"not get slave id (%d)\n", eq->eqn, eq->cons_index, ret);
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break; break;
} }
...@@ -667,8 +657,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -667,8 +657,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_COMM_CHANNEL: case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) { if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Received comm channel event " mlx4_warn(dev, "Received comm channel event for non master device\n");
"for non master device\n");
break; break;
} }
memcpy(&priv->mfunc.master.comm_arm_bit_vector, memcpy(&priv->mfunc.master.comm_arm_bit_vector,
...@@ -681,8 +670,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -681,8 +670,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_FLR_EVENT: case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
if (!mlx4_is_master(dev)) { if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Non-master function received" mlx4_warn(dev, "Non-master function received FLR event\n");
"FLR event\n");
break; break;
} }
...@@ -711,22 +699,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -711,22 +699,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
if (mlx4_is_master(dev)) if (mlx4_is_master(dev))
for (i = 0; i < dev->num_slaves; i++) { for (i = 0; i < dev->num_slaves; i++) {
mlx4_dbg(dev, "%s: Sending " mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
"MLX4_FATAL_WARNING_SUBTYPE_WARMING" __func__, i);
" to slave: %d\n", __func__, i);
if (i == dev->caps.function) if (i == dev->caps.function)
continue; continue;
mlx4_slave_event(dev, i, eqe); mlx4_slave_event(dev, i, eqe);
} }
mlx4_err(dev, "Temperature Threshold was reached! " mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
"Threshold: %d celsius degrees; " be16_to_cpu(eqe->event.warming.warning_threshold),
"Current Temperature: %d\n", be16_to_cpu(eqe->event.warming.current_temperature));
be16_to_cpu(eqe->event.warming.warning_threshold),
be16_to_cpu(eqe->event.warming.current_temperature));
} else } else
mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
"subtype %02x on EQ %d at index %u. owner=%x, "
"nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn, eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent, eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id, eqe->slave_id,
...@@ -743,9 +726,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -743,9 +726,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT: case MLX4_EVENT_TYPE_ECC_DETECT:
default: default:
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at " mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
"index %u. owner=%x, nent=0x%x, slave=%x, "
"ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn, eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent, eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id, eqe->slave_id,
...@@ -1088,7 +1069,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev) ...@@ -1088,7 +1069,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) { if (!priv->clr_base) {
mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -428,8 +428,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, ...@@ -428,8 +428,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) { } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET); MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) { if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is " mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
"enforced on this ib port\n");
err = -EPROTONOSUPPORT; err = -EPROTONOSUPPORT;
goto out; goto out;
} }
...@@ -1054,10 +1053,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) ...@@ -1054,10 +1053,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
*/ */
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1; lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) { if (lg < MLX4_ICM_PAGE_SHIFT) {
mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n", mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
MLX4_ICM_PAGE_SIZE, MLX4_ICM_PAGE_SIZE,
(unsigned long long) mlx4_icm_addr(&iter), (unsigned long long) mlx4_icm_addr(&iter),
mlx4_icm_size(&iter)); mlx4_icm_size(&iter));
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
...@@ -1093,14 +1092,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt) ...@@ -1093,14 +1092,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
switch (op) { switch (op) {
case MLX4_CMD_MAP_FA: case MLX4_CMD_MAP_FA:
mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts); mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
break; break;
case MLX4_CMD_MAP_ICM_AUX: case MLX4_CMD_MAP_ICM_AUX:
mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts); mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
break; break;
case MLX4_CMD_MAP_ICM: case MLX4_CMD_MAP_ICM:
mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n", mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
tc, ts, (unsigned long long) virt - (ts << 10)); tc, ts, (unsigned long long) virt - (ts << 10));
break; break;
} }
...@@ -1186,14 +1185,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ...@@ -1186,14 +1185,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET); MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV || if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) { cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
mlx4_err(dev, "Installed FW has unsupported " mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
"command interface revision %d.\n",
cmd_if_rev); cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n", mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32), (int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff, (int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff); (int) dev->caps.fw_ver & 0xffff);
mlx4_err(dev, "This driver version supports only revisions %d to %d.\n", mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV); MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV; err = -ENODEV;
goto out; goto out;
......
This diff is collapsed.
...@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port, ...@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != hash) { if (*index != hash) {
mlx4_err(dev, "Found zero MGID in AMGM.\n"); mlx4_err(dev, "Found zero MGID in AMGM\n");
err = -EINVAL; err = -EINVAL;
} }
return err; return err;
...@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str, ...@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
mlx4_err(dev, "%s", buf); mlx4_err(dev, "%s", buf);
if (len >= BUF_SIZE) if (len >= BUF_SIZE)
mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
} }
int mlx4_flow_attach(struct mlx4_dev *dev, int mlx4_flow_attach(struct mlx4_dev *dev,
...@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev, ...@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM) if (ret == -ENOMEM)
mlx4_err_rule(dev, mlx4_err_rule(dev,
"mcg table is full. Fail to register network rule.\n", "mcg table is full. Fail to register network rule\n",
rule); rule);
else if (ret) else if (ret)
mlx4_err_rule(dev, "Fail to register network rule.\n", rule); mlx4_err_rule(dev, "Fail to register network rule\n", rule);
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
...@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], ...@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
members_count = be32_to_cpu(mgm->members_count) & 0xffffff; members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == dev->caps.num_qp_per_mgm) { if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full.\n", index); mlx4_err(dev, "MGM at index %x is full\n", index);
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
...@@ -1042,7 +1042,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], ...@@ -1042,7 +1042,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
} }
if (err && link && index != -1) { if (err && link && index != -1) {
if (index < dev->caps.num_mgms) if (index < dev->caps.num_mgms)
mlx4_warn(dev, "Got AMGM index %d < %d", mlx4_warn(dev, "Got AMGM index %d < %d\n",
index, dev->caps.num_mgms); index, dev->caps.num_mgms);
else else
mlx4_bitmap_free(&priv->mcg_table.bitmap, mlx4_bitmap_free(&priv->mcg_table.bitmap,
...@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], ...@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (amgm_index) { if (amgm_index) {
if (amgm_index < dev->caps.num_mgms) if (amgm_index < dev->caps.num_mgms)
mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d", mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
index, amgm_index, dev->caps.num_mgms); index, amgm_index, dev->caps.num_mgms);
else else
mlx4_bitmap_free(&priv->mcg_table.bitmap, mlx4_bitmap_free(&priv->mcg_table.bitmap,
...@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], ...@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out; goto out;
if (index < dev->caps.num_mgms) if (index < dev->caps.num_mgms)
mlx4_warn(dev, "entry %d had next AMGM index %d < %d", mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
prev, index, dev->caps.num_mgms); prev, index, dev->caps.num_mgms);
else else
mlx4_bitmap_free(&priv->mcg_table.bitmap, mlx4_bitmap_free(&priv->mcg_table.bitmap,
......
...@@ -216,18 +216,19 @@ extern int mlx4_debug_level; ...@@ -216,18 +216,19 @@ extern int mlx4_debug_level;
#define mlx4_debug_level (0) #define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */ #endif /* CONFIG_MLX4_DEBUG */
#define mlx4_dbg(mdev, format, arg...) \ #define mlx4_dbg(mdev, format, ...) \
do { \ do { \
if (mlx4_debug_level) \ if (mlx4_debug_level) \
dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
##__VA_ARGS__); \
} while (0) } while (0)
#define mlx4_err(mdev, format, arg...) \ #define mlx4_err(mdev, format, ...) \
dev_err(&mdev->pdev->dev, format, ##arg) dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
#define mlx4_info(mdev, format, arg...) \ #define mlx4_info(mdev, format, ...) \
dev_info(&mdev->pdev->dev, format, ##arg) dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
#define mlx4_warn(mdev, format, arg...) \ #define mlx4_warn(mdev, format, ...) \
dev_warn(&mdev->pdev->dev, format, ##arg) dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size; extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg; extern int log_mtts_per_seg;
......
...@@ -830,26 +830,26 @@ __printf(3, 4) ...@@ -830,26 +830,26 @@ __printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv, int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...); const char *format, ...);
#define en_dbg(mlevel, priv, format, arg...) \ #define en_dbg(mlevel, priv, format, ...) \
do { \ do { \
if (NETIF_MSG_##mlevel & priv->msg_enable) \ if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
en_print(KERN_DEBUG, priv, format, ##arg); \ en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
} while (0) } while (0)
#define en_warn(priv, format, arg...) \ #define en_warn(priv, format, ...) \
en_print(KERN_WARNING, priv, format, ##arg) en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
#define en_err(priv, format, arg...) \ #define en_err(priv, format, ...) \
en_print(KERN_ERR, priv, format, ##arg) en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
#define en_info(priv, format, arg...) \ #define en_info(priv, format, ...) \
en_print(KERN_INFO, priv, format, ## arg) en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
#define mlx4_err(mdev, format, arg...) \ #define mlx4_err(mdev, format, ...) \
pr_err("%s %s: " format, DRV_NAME, \ pr_err(DRV_NAME " %s: " format, \
dev_name(&mdev->pdev->dev), ##arg) dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#define mlx4_info(mdev, format, arg...) \ #define mlx4_info(mdev, format, ...) \
pr_info("%s %s: " format, DRV_NAME, \ pr_info(DRV_NAME " %s: " format, \
dev_name(&mdev->pdev->dev), ##arg) dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#define mlx4_warn(mdev, format, arg...) \ #define mlx4_warn(mdev, format, ...) \
pr_warning("%s %s: " format, DRV_NAME, \ pr_warn(DRV_NAME " %s: " format, \
dev_name(&mdev->pdev->dev), ##arg) dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#endif #endif
...@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) ...@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED); MLX4_CMD_WRAPPED);
if (err) if (err)
mlx4_warn(dev, "Failed to free mtt range at:" mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
"%d order:%d\n", offset, order); offset, order);
return; return;
} }
__mlx4_free_mtt_range(dev, offset, order); __mlx4_free_mtt_range(dev, offset, order);
...@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr) ...@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
key_to_hw_index(mr->key) & key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1)); (dev->caps.num_mpts - 1));
if (err) { if (err) {
mlx4_warn(dev, "HW2SW_MPT failed (%d),", err); mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
mlx4_warn(dev, "MR has MWs bound to it.\n"); err);
return err; return err;
} }
...@@ -773,7 +773,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) ...@@ -773,7 +773,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
mlx4_alloc_mtt_range(dev, mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1)); fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) { if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %u is too small.\n", mlx4_warn(dev, "MTT table of order %u is too small\n",
mr_table->mtt_buddy.max_order); mr_table->mtt_buddy.max_order);
err = -ENOMEM; err = -ENOMEM;
goto err_reserve_mtts; goto err_reserve_mtts;
...@@ -954,8 +954,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, ...@@ -954,8 +954,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
mailbox = mlx4_alloc_cmd_mailbox(dev); mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) { if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox); err = PTR_ERR(mailbox);
printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox" printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n",
" failed (%d)\n", err); err);
return; return;
} }
......
...@@ -244,8 +244,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) ...@@ -244,8 +244,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (validate_index(dev, table, index)) if (validate_index(dev, table, index))
goto out; goto out;
if (--table->refs[index]) { if (--table->refs[index]) {
mlx4_dbg(dev, "Have more references for index %d," mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
"no need to modify mac table\n", index); index);
goto out; goto out;
} }
...@@ -443,9 +443,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) ...@@ -443,9 +443,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
} }
if (--table->refs[index]) { if (--table->refs[index]) {
mlx4_dbg(dev, "Have %d more references for index %d," mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
"no need to modify vlan table\n", table->refs[index], table->refs[index], index);
index);
goto out; goto out;
} }
table->entries[index] = 0; table->entries[index] = 0;
...@@ -706,8 +705,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, ...@@ -706,8 +705,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
sizeof(gid_entry_tbl->raw))) { sizeof(gid_entry_tbl->raw))) {
/* found duplicate */ /* found duplicate */
mlx4_warn(dev, "requested gid entry for slave:%d " mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
"is a duplicate of gid at index %d\n",
slave, i); slave, i);
return -EINVAL; return -EINVAL;
} }
......
...@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, ...@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
} }
if (total_size > dev_cap->max_icm_sz) { if (total_size > dev_cap->max_icm_sz) {
mlx4_err(dev, "Profile requires 0x%llx bytes; " mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
"won't fit in 0x%llx bytes of context memory.\n", (unsigned long long) total_size,
(unsigned long long) total_size, (unsigned long long) dev_cap->max_icm_sz);
(unsigned long long) dev_cap->max_icm_sz);
kfree(profile); kfree(profile);
return -ENOMEM; return -ENOMEM;
} }
if (profile[i].size) if (profile[i].size)
mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, " mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
"size 0x%10llx\n", i, res_name[profile[i].type],
i, res_name[profile[i].type], profile[i].log_num, profile[i].log_num,
(unsigned long long) profile[i].start, (unsigned long long) profile[i].start,
(unsigned long long) profile[i].size); (unsigned long long) profile[i].size);
} }
......
...@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) ...@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
MLX4_CMD_FREE_RES, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) { if (err) {
mlx4_warn(dev, "Failed to release qp range" mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
" base:%d cnt:%d\n", base_qpn, cnt); base_qpn, cnt);
} }
} else } else
__mlx4_qp_release_range(dev, base_qpn, cnt); __mlx4_qp_release_range(dev, base_qpn, cnt);
...@@ -577,8 +577,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -577,8 +577,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp); context, 0, 0, qp);
if (err) { if (err) {
mlx4_err(dev, "Failed to bring QP to state: " mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
"%d with error: %d\n",
states[i + 1], err); states[i + 1], err);
return err; return err;
} }
......
...@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
hca_header = kmalloc(256, GFP_KERNEL); hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) { if (!hca_header) {
err = -ENOMEM; err = -ENOMEM;
mlx4_err(dev, "Couldn't allocate memory to save HCA " mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
"PCI header, aborting.\n");
goto out; goto out;
} }
...@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
continue; continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't save HCA " mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
"PCI header, aborting.\n");
goto out; goto out;
} }
} }
...@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
MLX4_RESET_SIZE); MLX4_RESET_SIZE);
if (!reset) { if (!reset) {
err = -ENOMEM; err = -ENOMEM;
mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
goto out; goto out;
} }
...@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (vendor == 0xffff) { if (vendor == 0xffff) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "PCI device did not come back after reset, " mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
"aborting.\n");
goto out; goto out;
} }
...@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
devctl)) { devctl)) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express " mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
"Device Control register, aborting.\n");
goto out; goto out;
} }
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
linkctl)) { linkctl)) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express " mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
"Link control register, aborting.\n");
goto out; goto out;
} }
} }
...@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA reg %x, " mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
"aborting.\n", i); i);
goto out; goto out;
} }
} }
...@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, PCI_COMMAND, if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) { hca_header[PCI_COMMAND / 4])) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA COMMAND, " mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
"aborting.\n");
goto out; goto out;
} }
......
...@@ -3857,7 +3857,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, ...@@ -3857,7 +3857,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
} }
} }
if (!be_mac) { if (!be_mac) {
pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port); port);
return -EINVAL; return -EINVAL;
} }
...@@ -3900,7 +3900,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3900,7 +3900,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp); err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) { if (err) {
pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err; return err;
} }
rule_header = (struct _rule_hw *)(ctrl + 1); rule_header = (struct _rule_hw *)(ctrl + 1);
...@@ -3918,7 +3918,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3918,7 +3918,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP: case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) { if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put;
...@@ -3927,7 +3927,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3927,7 +3927,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break; break;
default: default:
pr_err("Corrupted mailbox.\n"); pr_err("Corrupted mailbox\n");
err = -EINVAL; err = -EINVAL;
goto err_put; goto err_put;
} }
...@@ -3941,7 +3941,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3941,7 +3941,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) { if (err) {
mlx4_err(dev, "Fail to add flow steering resources.\n "); mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/ /* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0, mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
...@@ -3979,7 +3979,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, ...@@ -3979,7 +3979,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) { if (err) {
mlx4_err(dev, "Fail to remove flow steering resources.\n "); mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out; goto out;
} }
...@@ -4108,8 +4108,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) ...@@ -4108,8 +4108,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_QP); err = move_all_busy(dev, slave, RES_QP);
if (err) if (err)
mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
"for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) { list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
...@@ -4147,10 +4147,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) ...@@ -4147,10 +4147,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_dbg(dev, "rem_slave_qps: failed" mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
" to move slave %d qpn %d to" slave, qp->local_qpn);
" reset\n", slave,
qp->local_qpn);
atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count); atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count); atomic_dec(&qp->mtt->ref_count);
...@@ -4184,8 +4182,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) ...@@ -4184,8 +4182,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_SRQ); err = move_all_busy(dev, slave, RES_SRQ);
if (err) if (err)
mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) { list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
...@@ -4215,9 +4213,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) ...@@ -4215,9 +4213,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_dbg(dev, "rem_slave_srqs: failed" mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
" to move slave %d srq %d to"
" SW ownership\n",
slave, srqn); slave, srqn);
atomic_dec(&srq->mtt->ref_count); atomic_dec(&srq->mtt->ref_count);
...@@ -4252,8 +4248,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) ...@@ -4252,8 +4248,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_CQ); err = move_all_busy(dev, slave, RES_CQ);
if (err) if (err)
mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) { list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
...@@ -4283,9 +4279,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) ...@@ -4283,9 +4279,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_dbg(dev, "rem_slave_cqs: failed" mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
" to move slave %d cq %d to"
" SW ownership\n",
slave, cqn); slave, cqn);
atomic_dec(&cq->mtt->ref_count); atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED; state = RES_CQ_ALLOCATED;
...@@ -4317,8 +4311,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) ...@@ -4317,8 +4311,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MPT); err = move_all_busy(dev, slave, RES_MPT);
if (err) if (err)
mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
...@@ -4353,9 +4347,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) ...@@ -4353,9 +4347,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_dbg(dev, "rem_slave_mrs: failed" mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
" to move slave %d mpt %d to"
" SW ownership\n",
slave, mptn); slave, mptn);
if (mpt->mtt) if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count); atomic_dec(&mpt->mtt->ref_count);
...@@ -4387,8 +4379,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) ...@@ -4387,8 +4379,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MTT); err = move_all_busy(dev, slave, RES_MTT);
if (err) if (err)
mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
...@@ -4490,8 +4482,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) ...@@ -4490,8 +4482,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_EQ); err = move_all_busy(dev, slave, RES_EQ);
if (err) if (err)
mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) { list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
...@@ -4523,9 +4515,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) ...@@ -4523,9 +4515,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE); MLX4_CMD_NATIVE);
if (err) if (err)
mlx4_dbg(dev, "rem_slave_eqs: failed" mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
" to move slave %d eqs %d to" slave, eqn);
" SW ownership\n", slave, eqn);
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count); atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED; state = RES_EQ_RESERVED;
...@@ -4554,8 +4545,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) ...@@ -4554,8 +4545,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_COUNTER); err = move_all_busy(dev, slave, RES_COUNTER);
if (err) if (err)
mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) { list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
...@@ -4585,8 +4576,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) ...@@ -4585,8 +4576,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_XRCD); err = move_all_busy(dev, slave, RES_XRCD);
if (err) if (err)
mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
"busy for slave %d\n", slave); slave);
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
...@@ -4731,10 +4722,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) ...@@ -4731,10 +4722,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
0, MLX4_CMD_UPDATE_QP, 0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) { if (err) {
mlx4_info(dev, "UPDATE_QP failed for slave %d, " mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
"port %d, qpn %d (%d)\n", work->slave, port, qp->local_qpn, err);
work->slave, port, qp->local_qpn,
err);
errors++; errors++;
} }
} }
......
...@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) ...@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_command_str(msg_to_opcode(ent->in)), mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in)); msg_to_opcode(ent->in));
} }
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
deliv_status_to_str(ent->status), ent->status); err, deliv_status_to_str(ent->status), ent->status);
return err; return err;
} }
......
...@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
*/ */
rmb(); rmb();
mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type)); mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
eq->eqn, eqe_type_str(eqe->type));
switch (eqe->type) { switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP: case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
...@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) ...@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages); mlx5_core_req_pages_handler(dev, func_id, npages);
} }
break; break;
default: default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn); mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
break; break;
} }
......
...@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev) ...@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) { if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
return err; return err;
} }
} }
...@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev) ...@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) { if (err) {
dev_warn(&pdev->dev, dev_warn(&pdev->dev,
"Warning: couldn't set 64-bit consistent PCI DMA mask.\n"); "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Can't set consistent PCI DMA mask, aborting.\n"); "Can't set consistent PCI DMA mask, aborting\n");
return err; return err;
} }
} }
...@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev) ...@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
int err = 0; int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing registers BAR, aborting.\n"); dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
return -ENODEV; return -ENODEV;
} }
...@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) ...@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
err = pci_enable_device(pdev); err = pci_enable_device(pdev);
if (err) { if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg; goto err_dbg;
} }
err = request_bar(pdev); err = request_bar(pdev);
if (err) { if (err) {
dev_err(&pdev->dev, "error requesting BARs, aborting.\n"); dev_err(&pdev->dev, "error requesting BARs, aborting\n");
goto err_disable; goto err_disable;
} }
......
...@@ -39,24 +39,26 @@ ...@@ -39,24 +39,26 @@
extern int mlx5_core_debug_mask; extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, arg...) \ #define mlx5_core_dbg(dev, format, ...) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ pr_debug("%s:%s:%d:(pid %d): " format, \
current->pid, ##arg) (dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_dbg_mask(dev, mask, format, arg...) \ #define mlx5_core_dbg_mask(dev, mask, format, ...) \
do { \ do { \
if ((mask) & mlx5_core_debug_mask) \ if ((mask) & mlx5_core_debug_mask) \
pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \ mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
__func__, __LINE__, current->pid, ##arg); \
} while (0) } while (0)
#define mlx5_core_err(dev, format, arg...) \ #define mlx5_core_err(dev, format, ...) \
pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ pr_err("%s:%s:%d:(pid %d): " format, \
current->pid, ##arg) (dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn(dev, format, arg...) \ #define mlx5_core_warn(dev, format, ...) \
pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \ pr_warn("%s:%s:%d:(pid %d): " format, \
current->pid, ##arg) (dev)->priv.name, __func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum { enum {
MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_DATA, /* print command payload only */
......
...@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, ...@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
} }
if (err) { if (err) {
mlx5_core_dbg(dev, "cmd exec faile %d\n", err); mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err; return err;
} }
...@@ -191,7 +191,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, ...@@ -191,7 +191,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
} }
if (out.hdr.status) { if (out.hdr.status) {
mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status); mlx5_core_err(dev, "create_psv bad status %d\n",
out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr); return mlx5_cmd_status_to_err(&out.hdr);
} }
...@@ -220,7 +221,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num) ...@@ -220,7 +221,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
} }
if (out.hdr.status) { if (out.hdr.status) {
mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status); mlx5_core_err(dev, "destroy_psv bad status %d\n",
out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr); err = mlx5_cmd_status_to_err(&out.hdr);
goto out; goto out;
} }
......
...@@ -311,7 +311,8 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -311,7 +311,8 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->num_entries = cpu_to_be32(npages); in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) { if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_alloc; goto out_alloc;
} }
dev->priv.fw_pages += npages; dev->priv.fw_pages += npages;
...@@ -319,7 +320,8 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, ...@@ -319,7 +320,8 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (out.hdr.status) { if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr); err = mlx5_cmd_status_to_err(&out.hdr);
if (err) { if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status); mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
func_id, npages, out.hdr.status);
goto out_alloc; goto out_alloc;
} }
} }
...@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, ...@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) { if (err) {
mlx5_core_err(dev, "failed recliaming pages\n"); mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free; goto out_free;
} }
dev->priv.fw_pages -= npages; dev->priv.fw_pages -= npages;
...@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work) ...@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
err = give_pages(dev, req->func_id, req->npages, 1); err = give_pages(dev, req->func_id, req->npages, 1);
if (err) if (err)
mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ? mlx5_core_warn(dev, "%s fail %d\n",
"reclaim" : "give", err); req->npages < 0 ? "reclaim" : "give", err);
kfree(req); kfree(req);
} }
...@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) ...@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
optimal_reclaimed_pages(), optimal_reclaimed_pages(),
&nclaimed); &nclaimed);
if (err) { if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err); mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
return err; return err;
} }
if (nclaimed) if (nclaimed)
......
...@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, ...@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) { if (err) {
mlx5_core_warn(dev, "ret %d", err); mlx5_core_warn(dev, "ret %d\n", err);
return err; return err;
} }
...@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, ...@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = radix_tree_insert(&table->tree, qp->qpn, qp); err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock); spin_unlock_irq(&table->lock);
if (err) { if (err) {
mlx5_core_warn(dev, "err %d", err); mlx5_core_warn(dev, "err %d\n", err);
goto err_cmd; goto err_cmd;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment