Commit 5331fa0d authored by Jason Gunthorpe's avatar Jason Gunthorpe

Merge branch 'mlx5-next' into rdma.git for-next

From
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Required for dependencies on the next series

* branch 'mlx5-next':
  net/mlx5: E-Switch, add a new prio to be used by the RDMA side
  net/mlx5: E-Switch, don't use hardcoded values for FDB prios
  net/mlx5: Fix false compilation warning
  net/mlx5: Expose MPEIN (Management PCIE INfo) register layout
  net/mlx5: Add rate limit print macros
  net/mlx5: Add explicit bar address field
  net/mlx5: Replace dev_err/warn/info by mlx5_core_err/warn/info
  net/mlx5: Use dev->priv.name instead of dev_name
  net/mlx5: Make mlx5_core messages independent from mdev->pdev
  net/mlx5: Break load_one into three stages
  net/mlx5: Function setup/teardown procedures
  net/mlx5: Move health and page alloc init to mdev_init
  net/mlx5: Split mdev init and pci init
  net/mlx5: Remove redundant init functions parameter
  net/mlx5: Remove spinlock support from mlx5_write64
  net/mlx5: Remove unused MLX5_*_DOORBELL_LOCK macros
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parents ab7efbe2 d9cb0675
...@@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr, ...@@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
return ret; return ret;
} }
*addr = pci_resource_start(dev->pdev, 0) + *addr = dev->bar_addr +
MLX5_GET64(alloc_memic_out, out, memic_start_addr); MLX5_GET64(alloc_memic_out, out, memic_start_addr);
return 0; return 0;
...@@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length) ...@@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
u64 start_page_idx; u64 start_page_idx;
int err; int err;
addr -= pci_resource_start(dev->pdev, 0); addr -= dev->bar_addr;
start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT; start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC); MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
......
...@@ -2009,7 +2009,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, ...@@ -2009,7 +2009,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
} }
static int get_command(unsigned long offset) static int get_command(unsigned long offset)
...@@ -2199,7 +2199,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -2199,7 +2199,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
page_idx + npages) page_idx + npages)
return -EINVAL; return -EINVAL;
pfn = ((pci_resource_start(dev->mdev->pdev, 0) + pfn = ((dev->mdev->bar_addr +
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) + PAGE_SHIFT) +
page_idx; page_idx;
...@@ -2283,7 +2283,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, ...@@ -2283,7 +2283,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
goto err_free; goto err_free;
start_offset = memic_addr & ~PAGE_MASK; start_offset = memic_addr & ~PAGE_MASK;
page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) - page_idx = (memic_addr - memic->dev->bar_addr -
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
PAGE_SHIFT; PAGE_SHIFT;
...@@ -2326,7 +2326,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs) ...@@ -2326,7 +2326,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
if (ret) if (ret)
return ret; return ret;
page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) - page_idx = (dm->dev_addr - memic->dev->bar_addr -
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >> MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
PAGE_SHIFT; PAGE_SHIFT;
bitmap_clear(rdma_udata_to_drv_context( bitmap_clear(rdma_udata_to_drv_context(
......
...@@ -1194,8 +1194,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr, ...@@ -1194,8 +1194,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
MLX5_SET64(mkc, mkc, len, length); MLX5_SET64(mkc, mkc, len, length);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET64(mkc, mkc, start_addr, MLX5_SET64(mkc, mkc, start_addr, memic_addr - dev->mdev->bar_addr);
memic_addr - pci_resource_start(dev->mdev->pdev, 0));
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen); err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err) if (err)
......
...@@ -5126,7 +5126,7 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -5126,7 +5126,7 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
wmb(); wmb();
/* currently we support only regular doorbells */ /* currently we support only regular doorbells */
mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL); mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock /* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order. * and reach the HCA out of order.
*/ */
......
...@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev) ...@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
dev_name(&dev->pdev->dev)); dev->priv.name);
} }
static void clean_debug_files(struct mlx5_core_dev *dev) static void clean_debug_files(struct mlx5_core_dev *dev)
...@@ -1902,7 +1902,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1902,7 +1902,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev(dev); cmd_if_rev = cmdif_rev(dev);
if (cmd_if_rev != CMD_IF_REV) { if (cmd_if_rev != CMD_IF_REV) {
dev_err(&dev->pdev->dev, mlx5_core_err(dev,
"Driver cmdif rev(%d) differs from firmware's(%d)\n", "Driver cmdif rev(%d) differs from firmware's(%d)\n",
CMD_IF_REV, cmd_if_rev); CMD_IF_REV, cmd_if_rev);
return -EINVAL; return -EINVAL;
...@@ -1921,14 +1921,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1921,14 +1921,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->log_sz = cmd_l >> 4 & 0xf; cmd->log_sz = cmd_l >> 4 & 0xf;
cmd->log_stride = cmd_l & 0xf; cmd->log_stride = cmd_l & 0xf;
if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n", mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
1 << cmd->log_sz); 1 << cmd->log_sz);
err = -EINVAL; err = -EINVAL;
goto err_free_page; goto err_free_page;
} }
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
dev_err(&dev->pdev->dev, "command queue size overflow\n"); mlx5_core_err(dev, "command queue size overflow\n");
err = -EINVAL; err = -EINVAL;
goto err_free_page; goto err_free_page;
} }
...@@ -1939,7 +1939,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1939,7 +1939,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
if (cmd->cmdif_rev > CMD_IF_REV) { if (cmd->cmdif_rev > CMD_IF_REV) {
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev); CMD_IF_REV, cmd->cmdif_rev);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto err_free_page; goto err_free_page;
...@@ -1956,7 +1956,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1956,7 +1956,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma); cmd_l = (u32)(cmd->dma);
if (cmd_l & 0xfff) { if (cmd_l & 0xfff) {
dev_err(&dev->pdev->dev, "invalid command queue address\n"); mlx5_core_err(dev, "invalid command queue address\n");
err = -ENOMEM; err = -ENOMEM;
goto err_free_page; goto err_free_page;
} }
...@@ -1976,7 +1976,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -1976,7 +1976,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
set_wqname(dev); set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name); cmd->wq = create_singlethread_workqueue(cmd->wq_name);
if (!cmd->wq) { if (!cmd->wq) {
dev_err(&dev->pdev->dev, "failed to create command workqueue\n"); mlx5_core_err(dev, "failed to create command workqueue\n");
err = -ENOMEM; err = -ENOMEM;
goto err_cache; goto err_cache;
} }
......
...@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw, ...@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
TP_STRUCT__entry( TP_STRUCT__entry(
__string(dev_name, dev_name(&tracer->dev->pdev->dev)) __string(dev_name, tracer->dev->priv.name)
__field(u64, trace_timestamp) __field(u64, trace_timestamp)
__field(bool, lost) __field(bool, lost)
__field(u8, event_id) __field(u8, event_id)
...@@ -55,7 +55,7 @@ TRACE_EVENT(mlx5_fw, ...@@ -55,7 +55,7 @@ TRACE_EVENT(mlx5_fw,
), ),
TP_fast_assign( TP_fast_assign(
__assign_str(dev_name, dev_name(&tracer->dev->pdev->dev)); __assign_str(dev_name, tracer->dev->priv.name);
__entry->trace_timestamp = trace_timestamp; __entry->trace_timestamp = trace_timestamp;
__entry->lost = lost; __entry->lost = lost;
__entry->event_id = event_id; __entry->event_id = event_id;
......
...@@ -929,7 +929,7 @@ void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, ...@@ -929,7 +929,7 @@ void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
*/ */
wmb(); wmb();
mlx5_write64((__be32 *)ctrl, uar_map, NULL); mlx5_write64((__be32 *)ctrl, uar_map);
} }
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
......
...@@ -43,11 +43,6 @@ ...@@ -43,11 +43,6 @@
#include "ecpf.h" #include "ecpf.h"
#include "lib/eq.h" #include "lib/eq.h"
enum {
FDB_FAST_PATH = 0,
FDB_SLOW_PATH
};
/* There are two match-all miss flows, one for unicast dst mac and /* There are two match-all miss flows, one for unicast dst mac and
* one for multicast. * one for multicast.
*/ */
......
...@@ -135,7 +135,7 @@ static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe) ...@@ -135,7 +135,7 @@ static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
*conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
/* Make sure that doorbell record is visible before ringing */ /* Make sure that doorbell record is visible before ringing */
wmb(); wmb();
mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL); mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
} }
static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn, static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/mlx5/eq.h> #include <linux/mlx5/eq.h>
#include "mlx5_core.h"
#include "lib/eq.h" #include "lib/eq.h"
#include "fpga/cmd.h" #include "fpga/cmd.h"
...@@ -62,26 +63,26 @@ struct mlx5_fpga_device { ...@@ -62,26 +63,26 @@ struct mlx5_fpga_device {
}; };
#define mlx5_fpga_dbg(__adev, format, ...) \ #define mlx5_fpga_dbg(__adev, format, ...) \
dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ mlx5_core_dbg((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, ##__VA_ARGS__) __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_err(__adev, format, ...) \ #define mlx5_fpga_err(__adev, format, ...) \
dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ mlx5_core_err((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, ##__VA_ARGS__) __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_warn(__adev, format, ...) \ #define mlx5_fpga_warn(__adev, format, ...) \
dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \ mlx5_core_warn((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, ##__VA_ARGS__) __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \ #define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \ mlx5_core_err_rl((__adev)->mdev, "FPGA: %s:%d: " \
format, __func__, __LINE__, ##__VA_ARGS__) format, __func__, __LINE__, ##__VA_ARGS__)
#define mlx5_fpga_notice(__adev, format, ...) \ #define mlx5_fpga_notice(__adev, format, ...) \
dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
#define mlx5_fpga_info(__adev, format, ...) \ #define mlx5_fpga_info(__adev, format, ...) \
dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__) mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
int mlx5_fpga_init(struct mlx5_core_dev *mdev); int mlx5_fpga_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev); void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
......
...@@ -819,7 +819,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio ...@@ -819,7 +819,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_flow_root_namespace *root = find_root(&prio->node);
struct mlx5_ft_underlay_qp *uqp; struct mlx5_ft_underlay_qp *uqp;
int min_level = INT_MAX; int min_level = INT_MAX;
int err; int err = 0;
u32 qpn; u32 qpn;
if (root->root_ft) if (root->root_ft)
...@@ -2516,8 +2516,16 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ...@@ -2516,8 +2516,16 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_sub_ns) if (!steering->fdb_sub_ns)
return -ENOMEM; return -ENOMEM;
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
}
levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1); levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0, maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
FDB_FAST_PATH,
levels); levels);
if (IS_ERR(maj_prio)) { if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio); err = PTR_ERR(maj_prio);
...@@ -2542,7 +2550,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) ...@@ -2542,7 +2550,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
steering->fdb_sub_ns[chain] = ns; steering->fdb_sub_ns[chain] = ns;
} }
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1); maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) { if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio); err = PTR_ERR(maj_prio);
goto out_err; goto out_err;
......
...@@ -152,11 +152,11 @@ static void health_recover(struct work_struct *work) ...@@ -152,11 +152,11 @@ static void health_recover(struct work_struct *work)
nic_state = mlx5_get_nic_state(dev); nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) { if (nic_state == MLX5_NIC_IFC_INVALID) {
dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); mlx5_core_err(dev, "health recovery flow aborted since the nic state is invalid\n");
return; return;
} }
dev_err(&dev->pdev->dev, "starting health recovery flow\n"); mlx5_core_err(dev, "starting health recovery flow\n");
mlx5_recover_device(dev); mlx5_recover_device(dev);
} }
...@@ -180,7 +180,7 @@ static void health_care(struct work_struct *work) ...@@ -180,7 +180,7 @@ static void health_care(struct work_struct *work)
if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay); schedule_delayed_work(&health->recover_work, recover_delay);
else else
dev_err(&dev->pdev->dev, mlx5_core_err(dev,
"new health works are not permitted at this stage\n"); "new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags); spin_unlock_irqrestore(&health->wq_lock, flags);
} }
...@@ -228,18 +228,22 @@ static void print_health_info(struct mlx5_core_dev *dev) ...@@ -228,18 +228,22 @@ static void print_health_info(struct mlx5_core_dev *dev)
return; return;
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
ioread32be(h->assert_var + i));
dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); ioread32be(&h->assert_exit_ptr));
mlx5_core_err(dev, "assert_callra 0x%08x\n",
ioread32be(&h->assert_callra));
sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); mlx5_core_err(dev, "fw_ver %s\n", fw_str);
dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); hsynd_str(ioread8(&h->synd)));
mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
fw = ioread32be(&h->fw_ver); fw = ioread32be(&h->fw_ver);
dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
} }
static unsigned long get_next_poll_jiffies(void) static unsigned long get_next_poll_jiffies(void)
...@@ -262,8 +266,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev) ...@@ -262,8 +266,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
queue_work(health->wq, &health->work); queue_work(health->wq, &health->work);
else else
dev_err(&dev->pdev->dev, mlx5_core_err(dev, "new health works are not permitted at this stage\n");
"new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags); spin_unlock_irqrestore(&health->wq_lock, flags);
} }
...@@ -284,7 +287,7 @@ static void poll_health(struct timer_list *t) ...@@ -284,7 +287,7 @@ static void poll_health(struct timer_list *t)
health->prev = count; health->prev = count;
if (health->miss_counter == MAX_MISSES) { if (health->miss_counter == MAX_MISSES) {
dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); mlx5_core_err(dev, "device's health compromised - reached miss count\n");
print_health_info(dev); print_health_info(dev);
} }
...@@ -352,6 +355,13 @@ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) ...@@ -352,6 +355,13 @@ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
cancel_delayed_work_sync(&dev->priv.health.recover_work); cancel_delayed_work_sync(&dev->priv.health.recover_work);
} }
void mlx5_health_flush(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
flush_workqueue(health->wq);
}
void mlx5_health_cleanup(struct mlx5_core_dev *dev) void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{ {
struct mlx5_core_health *health = &dev->priv.health; struct mlx5_core_health *health = &dev->priv.health;
...@@ -370,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) ...@@ -370,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
return -ENOMEM; return -ENOMEM;
strcpy(name, "mlx5_health"); strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev)); strcat(name, dev->priv.name);
health->wq = create_singlethread_workqueue(name); health->wq = create_singlethread_workqueue(name);
kfree(name); kfree(name);
if (!health->wq) if (!health->wq)
......
...@@ -587,24 +587,23 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) ...@@ -587,24 +587,23 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
static int set_hca_cap(struct mlx5_core_dev *dev) static int set_hca_cap(struct mlx5_core_dev *dev)
{ {
struct pci_dev *pdev = dev->pdev;
int err; int err;
err = handle_hca_cap(dev); err = handle_hca_cap(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n"); mlx5_core_err(dev, "handle_hca_cap failed\n");
goto out; goto out;
} }
err = handle_hca_cap_atomic(dev); err = handle_hca_cap_atomic(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n"); mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto out; goto out;
} }
err = handle_hca_cap_odp(dev); err = handle_hca_cap_odp(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "handle_hca_cap_odp failed\n"); mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
goto out; goto out;
} }
...@@ -736,36 +735,29 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) ...@@ -736,36 +735,29 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
const struct pci_device_id *id)
{ {
struct pci_dev *pdev = dev->pdev; struct mlx5_priv *priv = &dev->priv;
int err = 0; int err = 0;
pci_set_drvdata(dev->pdev, dev); dev->pdev = pdev;
strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN); priv->pci_dev_data = id->driver_data;
priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
mutex_init(&priv->pgdir_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
spin_lock_init(&priv->mkey_lock);
mutex_init(&priv->alloc_mutex); pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
priv->numa_node = dev_to_node(&dev->pdev->dev); priv->numa_node = dev_to_node(&dev->pdev->dev);
if (mlx5_debugfs_root)
priv->dbg_root =
debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
err = mlx5_pci_enable_device(dev); err = mlx5_pci_enable_device(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
goto err_dbg; return err;
} }
err = request_bar(pdev); err = request_bar(pdev);
if (err) { if (err) {
dev_err(&pdev->dev, "error requesting BARs, aborting\n"); mlx5_core_err(dev, "error requesting BARs, aborting\n");
goto err_disable; goto err_disable;
} }
...@@ -773,7 +765,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -773,7 +765,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = set_dma_caps(pdev); err = set_dma_caps(pdev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n"); mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
goto err_clr_master; goto err_clr_master;
} }
...@@ -782,11 +774,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -782,11 +774,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128)) pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
mlx5_core_dbg(dev, "Enabling pci atomics failed\n"); mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
dev->iseg_base = pci_resource_start(dev->pdev, 0); dev->iseg_base = dev->bar_addr;
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) { if (!dev->iseg) {
err = -ENOMEM; err = -ENOMEM;
dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n"); mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master; goto err_clr_master;
} }
...@@ -797,52 +789,47 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -797,52 +789,47 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
release_bar(dev->pdev); release_bar(dev->pdev);
err_disable: err_disable:
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
err_dbg:
debugfs_remove(priv->dbg_root);
return err; return err;
} }
static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) static void mlx5_pci_close(struct mlx5_core_dev *dev)
{ {
iounmap(dev->iseg); iounmap(dev->iseg);
pci_clear_master(dev->pdev); pci_clear_master(dev->pdev);
release_bar(dev->pdev); release_bar(dev->pdev);
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
debugfs_remove_recursive(priv->dbg_root);
} }
static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) static int mlx5_init_once(struct mlx5_core_dev *dev)
{ {
struct pci_dev *pdev = dev->pdev;
int err; int err;
priv->devcom = mlx5_devcom_register_device(dev); dev->priv.devcom = mlx5_devcom_register_device(dev);
if (IS_ERR(priv->devcom)) if (IS_ERR(dev->priv.devcom))
dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n", mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
priv->devcom); dev->priv.devcom);
err = mlx5_query_board_id(dev); err = mlx5_query_board_id(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "query board id failed\n"); mlx5_core_err(dev, "query board id failed\n");
goto err_devcom; goto err_devcom;
} }
err = mlx5_eq_table_init(dev); err = mlx5_eq_table_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize eq\n"); mlx5_core_err(dev, "failed to initialize eq\n");
goto err_devcom; goto err_devcom;
} }
err = mlx5_events_init(dev); err = mlx5_events_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize events\n"); mlx5_core_err(dev, "failed to initialize events\n");
goto err_eq_cleanup; goto err_eq_cleanup;
} }
err = mlx5_cq_debugfs_init(dev); err = mlx5_cq_debugfs_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to initialize cq debugfs\n"); mlx5_core_err(dev, "failed to initialize cq debugfs\n");
goto err_events_cleanup; goto err_events_cleanup;
} }
...@@ -858,31 +845,31 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) ...@@ -858,31 +845,31 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = mlx5_init_rl_table(dev); err = mlx5_init_rl_table(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n"); mlx5_core_err(dev, "Failed to init rate limiting\n");
goto err_tables_cleanup; goto err_tables_cleanup;
} }
err = mlx5_mpfs_init(dev); err = mlx5_mpfs_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init l2 table %d\n", err); mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
goto err_rl_cleanup; goto err_rl_cleanup;
} }
err = mlx5_eswitch_init(dev); err = mlx5_eswitch_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init eswitch %d\n", err); mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
goto err_mpfs_cleanup; goto err_mpfs_cleanup;
} }
err = mlx5_sriov_init(dev); err = mlx5_sriov_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init sriov %d\n", err); mlx5_core_err(dev, "Failed to init sriov %d\n", err);
goto err_eswitch_cleanup; goto err_eswitch_cleanup;
} }
err = mlx5_fpga_init(dev); err = mlx5_fpga_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init fpga device %d\n", err); mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
goto err_sriov_cleanup; goto err_sriov_cleanup;
} }
...@@ -932,93 +919,78 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -932,93 +919,78 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom); mlx5_devcom_unregister_device(dev->priv.devcom);
} }
static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
bool boot)
{ {
struct pci_dev *pdev = dev->pdev;
int err; int err;
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
__func__);
goto out;
}
dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
fw_rev_min(dev), fw_rev_sub(dev)); fw_rev_min(dev), fw_rev_sub(dev));
/* Only PFs hold the relevant PCIe information for this query */ /* Only PFs hold the relevant PCIe information for this query */
if (mlx5_core_is_pf(dev)) if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev); pcie_print_link_status(dev->pdev);
/* on load removing any previous indication of internal error, device is
* up
*/
dev->state = MLX5_DEVICE_STATE_UP;
/* wait for firmware to accept initialization segments configurations /* wait for firmware to accept initialization segments configurations
*/ */
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
if (err) { if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
FW_PRE_INIT_TIMEOUT_MILI); FW_PRE_INIT_TIMEOUT_MILI);
goto out_err; return err;
} }
err = mlx5_cmd_init(dev); err = mlx5_cmd_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
goto out_err; return err;
} }
err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI); err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
if (err) { if (err) {
dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n", mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
FW_INIT_TIMEOUT_MILI); FW_INIT_TIMEOUT_MILI);
goto err_cmd_cleanup; goto err_cmd_cleanup;
} }
err = mlx5_core_enable_hca(dev, 0); err = mlx5_core_enable_hca(dev, 0);
if (err) { if (err) {
dev_err(&pdev->dev, "enable hca failed\n"); mlx5_core_err(dev, "enable hca failed\n");
goto err_cmd_cleanup; goto err_cmd_cleanup;
} }
err = mlx5_core_set_issi(dev); err = mlx5_core_set_issi(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to set issi\n"); mlx5_core_err(dev, "failed to set issi\n");
goto err_disable_hca; goto err_disable_hca;
} }
err = mlx5_satisfy_startup_pages(dev, 1); err = mlx5_satisfy_startup_pages(dev, 1);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n"); mlx5_core_err(dev, "failed to allocate boot pages\n");
goto err_disable_hca; goto err_disable_hca;
} }
err = set_hca_ctrl(dev); err = set_hca_ctrl(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "set_hca_ctrl failed\n"); mlx5_core_err(dev, "set_hca_ctrl failed\n");
goto reclaim_boot_pages; goto reclaim_boot_pages;
} }
err = set_hca_cap(dev); err = set_hca_cap(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "set_hca_cap failed\n"); mlx5_core_err(dev, "set_hca_cap failed\n");
goto reclaim_boot_pages; goto reclaim_boot_pages;
} }
err = mlx5_satisfy_startup_pages(dev, 0); err = mlx5_satisfy_startup_pages(dev, 0);
if (err) { if (err) {
dev_err(&pdev->dev, "failed to allocate init pages\n"); mlx5_core_err(dev, "failed to allocate init pages\n");
goto reclaim_boot_pages; goto reclaim_boot_pages;
} }
err = mlx5_cmd_init_hca(dev, sw_owner_id); err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) { if (err) {
dev_err(&pdev->dev, "init hca failed\n"); mlx5_core_err(dev, "init hca failed\n");
goto reclaim_boot_pages; goto reclaim_boot_pages;
} }
...@@ -1028,23 +1000,50 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1028,23 +1000,50 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_query_hca_caps(dev); err = mlx5_query_hca_caps(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "query hca failed\n"); mlx5_core_err(dev, "query hca failed\n");
goto err_stop_poll; goto stop_health;
} }
if (boot) { return 0;
err = mlx5_init_once(dev, priv);
stop_health:
mlx5_stop_health_poll(dev, boot);
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_cleanup(dev);
return err;
}
static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
{
int err;
mlx5_stop_health_poll(dev, boot);
err = mlx5_cmd_teardown_hca(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "sw objs init failed\n"); mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
goto err_stop_poll; return err;
}
} }
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_cleanup(dev);
return 0;
}
static int mlx5_load(struct mlx5_core_dev *dev)
{
int err;
dev->priv.uar = mlx5_get_uars_page(dev); dev->priv.uar = mlx5_get_uars_page(dev);
if (IS_ERR(dev->priv.uar)) { if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); mlx5_core_err(dev, "Failed allocating uar, aborting\n");
err = PTR_ERR(dev->priv.uar); err = PTR_ERR(dev->priv.uar);
goto err_get_uars; return err;
} }
mlx5_events_start(dev); mlx5_events_start(dev);
...@@ -1052,132 +1051,155 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1052,132 +1051,155 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_eq_table_create(dev); err = mlx5_eq_table_create(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to create EQs\n"); mlx5_core_err(dev, "Failed to create EQs\n");
goto err_eq_table; goto err_eq_table;
} }
err = mlx5_fw_tracer_init(dev->tracer); err = mlx5_fw_tracer_init(dev->tracer);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init FW tracer\n"); mlx5_core_err(dev, "Failed to init FW tracer\n");
goto err_fw_tracer; goto err_fw_tracer;
} }
err = mlx5_fpga_device_start(dev); err = mlx5_fpga_device_start(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "fpga device start failed %d\n", err); mlx5_core_err(dev, "fpga device start failed %d\n", err);
goto err_fpga_start; goto err_fpga_start;
} }
err = mlx5_accel_ipsec_init(dev); err = mlx5_accel_ipsec_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "IPSec device start failed %d\n", err); mlx5_core_err(dev, "IPSec device start failed %d\n", err);
goto err_ipsec_start; goto err_ipsec_start;
} }
err = mlx5_accel_tls_init(dev); err = mlx5_accel_tls_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "TLS device start failed %d\n", err); mlx5_core_err(dev, "TLS device start failed %d\n", err);
goto err_tls_start; goto err_tls_start;
} }
err = mlx5_init_fs(dev); err = mlx5_init_fs(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init flow steering\n"); mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs; goto err_fs;
} }
err = mlx5_core_set_hca_defaults(dev); err = mlx5_core_set_hca_defaults(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to set hca defaults\n"); mlx5_core_err(dev, "Failed to set hca defaults\n");
goto err_fs; goto err_fs;
} }
err = mlx5_sriov_attach(dev); err = mlx5_sriov_attach(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "sriov init failed %d\n", err); mlx5_core_err(dev, "sriov init failed %d\n", err);
goto err_sriov; goto err_sriov;
} }
err = mlx5_ec_init(dev); err = mlx5_ec_init(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "Failed to init embedded CPU\n"); mlx5_core_err(dev, "Failed to init embedded CPU\n");
goto err_ec; goto err_ec;
} }
if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
err = mlx5_register_device(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
goto err_reg_dev;
}
}
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
return 0; return 0;
err_reg_dev:
mlx5_ec_cleanup(dev);
err_ec: err_ec:
mlx5_sriov_detach(dev); mlx5_sriov_detach(dev);
err_sriov: err_sriov:
mlx5_cleanup_fs(dev); mlx5_cleanup_fs(dev);
err_fs: err_fs:
mlx5_accel_tls_cleanup(dev); mlx5_accel_tls_cleanup(dev);
err_tls_start: err_tls_start:
mlx5_accel_ipsec_cleanup(dev); mlx5_accel_ipsec_cleanup(dev);
err_ipsec_start: err_ipsec_start:
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
err_fpga_start: err_fpga_start:
mlx5_fw_tracer_cleanup(dev->tracer); mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer: err_fw_tracer:
mlx5_eq_table_destroy(dev); mlx5_eq_table_destroy(dev);
err_eq_table: err_eq_table:
mlx5_pagealloc_stop(dev); mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev); mlx5_events_stop(dev);
mlx5_put_uars_page(dev, priv->uar); mlx5_put_uars_page(dev, dev->priv.uar);
return err;
}
static void mlx5_unload(struct mlx5_core_dev *dev)
{
mlx5_ec_cleanup(dev);
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
mlx5_put_uars_page(dev, dev->priv.uar);
}
err_get_uars: static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
if (boot) {
mlx5_cleanup_once(dev); int err = 0;
err_stop_poll: dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_stop_health_poll(dev, boot); mutex_lock(&dev->intf_state_mutex);
if (mlx5_cmd_teardown_hca(dev)) { if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); mlx5_core_warn(dev, "interface is up, NOP\n");
goto out_err; goto out;
} }
/* remove any previous indication of internal error */
dev->state = MLX5_DEVICE_STATE_UP;
reclaim_boot_pages: err = mlx5_function_setup(dev, boot);
mlx5_reclaim_startup_pages(dev); if (err)
goto out;
err_disable_hca: if (boot) {
mlx5_core_disable_hca(dev, 0); err = mlx5_init_once(dev);
if (err) {
mlx5_core_err(dev, "sw objs init failed\n");
goto function_teardown;
}
}
err_cmd_cleanup: err = mlx5_load(dev);
mlx5_cmd_cleanup(dev); if (err)
goto err_load;
out_err: if (mlx5_device_registered(dev)) {
mlx5_attach_device(dev);
} else {
err = mlx5_register_device(dev);
if (err) {
mlx5_core_err(dev, "register device failed %d\n", err);
goto err_reg_dev;
}
}
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
return err;
err_reg_dev:
mlx5_unload(dev);
err_load:
if (boot)
mlx5_cleanup_once(dev);
function_teardown:
mlx5_function_teardown(dev, boot);
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
return err; return err;
} }
static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
bool cleanup)
{ {
int err = 0; int err = 0;
...@@ -1186,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1186,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mutex_lock(&dev->intf_state_mutex); mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", mlx5_core_warn(dev, "%s: interface is down, NOP\n",
__func__); __func__);
if (cleanup) if (cleanup)
mlx5_cleanup_once(dev); mlx5_cleanup_once(dev);
...@@ -1198,30 +1220,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, ...@@ -1198,30 +1220,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev)) if (mlx5_device_registered(dev))
mlx5_detach_device(dev); mlx5_detach_device(dev);
mlx5_ec_cleanup(dev); mlx5_unload(dev);
mlx5_sriov_detach(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
mlx5_put_uars_page(dev, priv->uar);
if (cleanup) if (cleanup)
mlx5_cleanup_once(dev); mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out;
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_cleanup(dev);
mlx5_function_teardown(dev, cleanup);
out: out:
mutex_unlock(&dev->intf_state_mutex); mutex_unlock(&dev->intf_state_mutex);
return err; return err;
...@@ -1238,29 +1242,15 @@ static const struct devlink_ops mlx5_devlink_ops = { ...@@ -1238,29 +1242,15 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif #endif
}; };
#define MLX5_IB_MOD "mlx5_ib" static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx, const char *name)
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{ {
struct mlx5_core_dev *dev; struct mlx5_priv *priv = &dev->priv;
struct devlink *devlink;
struct mlx5_priv *priv;
int err; int err;
devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev)); strncpy(priv->name, name, MLX5_MAX_NAME_LEN);
if (!devlink) { priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
}
dev = devlink_priv(devlink);
priv = &dev->priv;
priv->pci_dev_data = id->driver_data;
pci_set_drvdata(pdev, dev);
dev->pdev = pdev; dev->profile = &profile[profile_idx];
dev->profile = &profile[prof_sel];
INIT_LIST_HEAD(&priv->ctx_list); INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock); spin_lock_init(&priv->ctx_lock);
...@@ -1272,25 +1262,72 @@ static int init_one(struct pci_dev *pdev, ...@@ -1272,25 +1262,72 @@ static int init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&priv->bfregs.reg_head.list); INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
INIT_LIST_HEAD(&priv->bfregs.wc_head.list); INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
err = mlx5_pci_init(dev, priv); mutex_init(&priv->alloc_mutex);
if (err) { mutex_init(&priv->pgdir_mutex);
dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); INIT_LIST_HEAD(&priv->pgdir_list);
goto clean_dev; spin_lock_init(&priv->mkey_lock);
priv->dbg_root = debugfs_create_dir(name, mlx5_debugfs_root);
if (!priv->dbg_root) {
pr_err("mlx5_core: %s error, Cannot create debugfs dir, aborting\n", name);
return -ENOMEM;
} }
err = mlx5_health_init(dev); err = mlx5_health_init(dev);
if (err) { if (err)
dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err); goto err_health_init;
goto close_pci;
}
err = mlx5_pagealloc_init(dev); err = mlx5_pagealloc_init(dev);
if (err) if (err)
goto err_pagealloc_init; goto err_pagealloc_init;
err = mlx5_load_one(dev, priv, true); return 0;
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
debugfs_remove(dev->priv.dbg_root);
return err;
}
static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
}
#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
struct devlink *devlink;
int err;
devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
if (!devlink) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
}
dev = devlink_priv(devlink);
err = mlx5_mdev_init(dev, prof_sel, dev_name(&pdev->dev));
if (err)
goto mdev_init_err;
err = mlx5_pci_init(dev, pdev, id);
if (err) {
mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
err);
goto pci_init_err;
}
err = mlx5_load_one(dev, true);
if (err) { if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err); mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
err);
goto err_load_one; goto err_load_one;
} }
...@@ -1304,14 +1341,13 @@ static int init_one(struct pci_dev *pdev, ...@@ -1304,14 +1341,13 @@ static int init_one(struct pci_dev *pdev,
return 0; return 0;
clean_load: clean_load:
mlx5_unload_one(dev, priv, true); mlx5_unload_one(dev, true);
err_load_one: err_load_one:
mlx5_pagealloc_cleanup(dev); mlx5_pci_close(dev);
err_pagealloc_init: pci_init_err:
mlx5_health_cleanup(dev); mlx5_mdev_uninit(dev);
close_pci: mdev_init_err:
mlx5_pci_close(dev, priv);
clean_dev:
devlink_free(devlink); devlink_free(devlink);
return err; return err;
...@@ -1321,20 +1357,18 @@ static void remove_one(struct pci_dev *pdev) ...@@ -1321,20 +1357,18 @@ static void remove_one(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev); struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_priv *priv = &dev->priv;
devlink_unregister(devlink); devlink_unregister(devlink);
mlx5_unregister_device(dev); mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, priv, true)) { if (mlx5_unload_one(dev, true)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); mlx5_core_err(dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev); mlx5_health_flush(dev);
return; return;
} }
mlx5_pagealloc_cleanup(dev); mlx5_pci_close(dev);
mlx5_health_cleanup(dev); mlx5_mdev_uninit(dev);
mlx5_pci_close(dev, priv);
devlink_free(devlink); devlink_free(devlink);
} }
...@@ -1342,12 +1376,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, ...@@ -1342,12 +1376,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state) pci_channel_state_t state)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__); mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev, false); mlx5_enter_error_state(dev, false);
mlx5_unload_one(dev, priv, false); mlx5_unload_one(dev, false);
/* In case of kernel call drain the health wq */ /* In case of kernel call drain the health wq */
if (state) { if (state) {
mlx5_drain_health_wq(dev); mlx5_drain_health_wq(dev);
...@@ -1374,7 +1407,9 @@ static int wait_vital(struct pci_dev *pdev) ...@@ -1374,7 +1407,9 @@ static int wait_vital(struct pci_dev *pdev)
count = ioread32be(health->health_counter); count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) { if (count && count != 0xffffffff) {
if (last_count && last_count != count) { if (last_count && last_count != count) {
dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); mlx5_core_info(dev,
"wait vital counter value 0x%x after %d iterations\n",
count, i);
return 0; return 0;
} }
last_count = count; last_count = count;
...@@ -1390,12 +1425,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) ...@@ -1390,12 +1425,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err; int err;
dev_info(&pdev->dev, "%s was called\n", __func__); mlx5_core_info(dev, "%s was called\n", __func__);
err = mlx5_pci_enable_device(dev); err = mlx5_pci_enable_device(dev);
if (err) { if (err) {
dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n" mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
, __func__, err); __func__, err);
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
...@@ -1404,7 +1439,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) ...@@ -1404,7 +1439,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev); pci_save_state(pdev);
if (wait_vital(pdev)) { if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__); mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
...@@ -1414,17 +1449,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev) ...@@ -1414,17 +1449,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
static void mlx5_pci_resume(struct pci_dev *pdev) static void mlx5_pci_resume(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
int err; int err;
dev_info(&pdev->dev, "%s was called\n", __func__); mlx5_core_info(dev, "%s was called\n", __func__);
err = mlx5_load_one(dev, priv, false); err = mlx5_load_one(dev, false);
if (err) if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n" mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
, __func__, err); __func__, err);
else else
dev_info(&pdev->dev, "%s: device recovered\n", __func__); mlx5_core_info(dev, "%s: device recovered\n", __func__);
} }
static const struct pci_error_handlers mlx5_err_handler = { static const struct pci_error_handlers mlx5_err_handler = {
...@@ -1486,13 +1520,12 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) ...@@ -1486,13 +1520,12 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
static void shutdown(struct pci_dev *pdev) static void shutdown(struct pci_dev *pdev)
{ {
struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_priv *priv = &dev->priv;
int err; int err;
dev_info(&pdev->dev, "Shutdown was called\n"); mlx5_core_info(dev, "Shutdown was called\n");
err = mlx5_try_fast_unload(dev); err = mlx5_try_fast_unload(dev);
if (err) if (err)
mlx5_unload_one(dev, priv, false); mlx5_unload_one(dev, false);
mlx5_pci_disable_device(dev); mlx5_pci_disable_device(dev);
} }
......
...@@ -48,12 +48,12 @@ ...@@ -48,12 +48,12 @@
extern uint mlx5_core_debug_mask; extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \ #define mlx5_core_dbg(__dev, format, ...) \
dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ pr_debug("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_dbg_once(__dev, format, ...) \ #define mlx5_core_dbg_once(__dev, format, ...) \
dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ pr_debug_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
...@@ -64,28 +64,37 @@ do { \ ...@@ -64,28 +64,37 @@ do { \
} while (0) } while (0)
#define mlx5_core_err(__dev, format, ...) \ #define mlx5_core_err(__dev, format, ...) \
dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ pr_err("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_err_rl(__dev, format, ...) \ #define mlx5_core_err_rl(__dev, format, ...) \
dev_err_ratelimited(&(__dev)->pdev->dev, \ pr_err_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
"%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_warn(__dev, format, ...) \ #define mlx5_core_warn(__dev, format, ...) \
dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ pr_warn("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_warn_once(__dev, format, ...) \ #define mlx5_core_warn_once(__dev, format, ...) \
dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ pr_warn_once("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
#define mlx5_core_warn_rl(__dev, format, ...) \
pr_warn_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \ __func__, __LINE__, current->pid, \
##__VA_ARGS__) ##__VA_ARGS__)
#define mlx5_core_info(__dev, format, ...) \ #define mlx5_core_info(__dev, format, ...) \
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) pr_info("%s " format, (__dev)->priv.name, ##__VA_ARGS__)
#define mlx5_core_info_rl(__dev, format, ...) \
pr_info_ratelimited("%s:%s:%d:(pid %d): " format, (__dev)->priv.name, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
enum { enum {
MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_DATA, /* print command payload only */
......
...@@ -79,7 +79,7 @@ static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index) ...@@ -79,7 +79,7 @@ static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
else else
system_page_index = index; system_page_index = index;
return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index; return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
} }
static void up_rel_func(struct kref *kref) static void up_rel_func(struct kref *kref)
......
...@@ -170,7 +170,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, ...@@ -170,7 +170,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn); doorbell[1] = cpu_to_be32(cq->cqn);
mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL); mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
} }
static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
......
...@@ -36,46 +36,25 @@ ...@@ -36,46 +36,25 @@
#define MLX5_BF_OFFSET 0x800 #define MLX5_BF_OFFSET 0x800
#define MLX5_CQ_DOORBELL 0x20 #define MLX5_CQ_DOORBELL 0x20
#if BITS_PER_LONG == 64
/* Assume that we can just write a 64-bit doorbell atomically. s390 /* Assume that we can just write a 64-bit doorbell atomically. s390
* actually doesn't have writeq() but S/390 systems don't even have * actually doesn't have writeq() but S/390 systems don't even have
* PCI so we won't worry about it. * PCI so we won't worry about it.
*
* Note that the write is not atomic on 32-bit systems! In contrast to 64-bit
* ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use
* it at your own discretion, protected by some kind of lock on 32 bits.
*
* TODO: use write{q,l}_relaxed()
*/ */
#define MLX5_DECLARE_DOORBELL_LOCK(name) static inline void mlx5_write64(__be32 val[2], void __iomem *dest)
#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
spinlock_t *doorbell_lock)
{ {
#if BITS_PER_LONG == 64
__raw_writeq(*(u64 *)val, dest); __raw_writeq(*(u64 *)val, dest);
}
#else #else
/* Just fall back to a spinlock to protect the doorbell if
* BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
* MMIO writes.
*/
#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
spinlock_t *doorbell_lock)
{
unsigned long flags;
if (doorbell_lock)
spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest); __raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4); __raw_writel((__force u32) val[1], dest + 4);
if (doorbell_lock)
spin_unlock_irqrestore(doorbell_lock, flags);
}
#endif #endif
}
#endif /* MLX5_DOORBELL_H */ #endif /* MLX5_DOORBELL_H */
...@@ -133,6 +133,7 @@ enum { ...@@ -133,6 +133,7 @@ enum {
MLX5_REG_MTRC_CONF = 0x9041, MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042, MLX5_REG_MTRC_STDB = 0x9042,
MLX5_REG_MTRC_CTRL = 0x9043, MLX5_REG_MTRC_CTRL = 0x9043,
MLX5_REG_MPEIN = 0x9050,
MLX5_REG_MPCNT = 0x9051, MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053, MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054, MLX5_REG_MTPPSE = 0x9054,
...@@ -660,6 +661,7 @@ struct mlx5_core_dev { ...@@ -660,6 +661,7 @@ struct mlx5_core_dev {
u64 sys_image_guid; u64 sys_image_guid;
phys_addr_t iseg_base; phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg; struct mlx5_init_seg __iomem *iseg;
phys_addr_t bar_addr;
enum mlx5_device_state state; enum mlx5_device_state state;
/* sync interface state */ /* sync interface state */
struct mutex intf_state_mutex; struct mutex intf_state_mutex;
...@@ -885,6 +887,7 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); ...@@ -885,6 +887,7 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev);
......
...@@ -75,6 +75,12 @@ enum mlx5_flow_namespace_type { ...@@ -75,6 +75,12 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_EGRESS, MLX5_FLOW_NAMESPACE_EGRESS,
}; };
enum {
FDB_BYPASS_PATH,
FDB_FAST_PATH,
FDB_SLOW_PATH,
};
struct mlx5_flow_table; struct mlx5_flow_table;
struct mlx5_flow_group; struct mlx5_flow_group;
struct mlx5_flow_namespace; struct mlx5_flow_namespace;
......
...@@ -8026,6 +8026,52 @@ struct mlx5_ifc_ppcnt_reg_bits { ...@@ -8026,6 +8026,52 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
}; };
struct mlx5_ifc_mpein_reg_bits {
u8 reserved_at_0[0x2];
u8 depth[0x6];
u8 pcie_index[0x8];
u8 node[0x8];
u8 reserved_at_18[0x8];
u8 capability_mask[0x20];
u8 reserved_at_40[0x8];
u8 link_width_enabled[0x8];
u8 link_speed_enabled[0x10];
u8 lane0_physical_position[0x8];
u8 link_width_active[0x8];
u8 link_speed_active[0x10];
u8 num_of_pfs[0x10];
u8 num_of_vfs[0x10];
u8 bdf0[0x10];
u8 reserved_at_b0[0x10];
u8 max_read_request_size[0x4];
u8 max_payload_size[0x4];
u8 reserved_at_c8[0x5];
u8 pwr_status[0x3];
u8 port_type[0x4];
u8 reserved_at_d4[0xb];
u8 lane_reversal[0x1];
u8 reserved_at_e0[0x14];
u8 pci_power[0xc];
u8 reserved_at_100[0x20];
u8 device_status[0x10];
u8 port_state[0x8];
u8 reserved_at_138[0x8];
u8 reserved_at_140[0x10];
u8 receiver_detect_result[0x10];
u8 reserved_at_160[0x20];
};
struct mlx5_ifc_mpcnt_reg_bits { struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8]; u8 reserved_at_0[0x8];
u8 pcie_index[0x8]; u8 pcie_index[0x8];
...@@ -8345,7 +8391,9 @@ struct mlx5_ifc_pcam_reg_bits { ...@@ -8345,7 +8391,9 @@ struct mlx5_ifc_pcam_reg_bits {
}; };
struct mlx5_ifc_mcam_enhanced_features_bits { struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x74]; u8 reserved_at_0[0x6e];
u8 pci_status_and_power[0x1];
u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1]; u8 mark_tx_action_cnp[0x1];
u8 mark_tx_action_cqe[0x1]; u8 mark_tx_action_cqe[0x1];
u8 dynamic_tx_overflow[0x1]; u8 dynamic_tx_overflow[0x1];
...@@ -8953,6 +9001,7 @@ union mlx5_ifc_ports_control_registers_document_bits { ...@@ -8953,6 +9001,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg; struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg; struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
struct mlx5_ifc_mpein_reg_bits mpein_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg; struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg; struct mlx5_ifc_pplr_reg_bits pplr_reg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment