Commit 404e5a12 authored by Shay Drory's avatar Shay Drory Committed by Jason Gunthorpe

RDMA/mlx4: Do not map the core_clock page to user space unless enabled

Currently when mlx4 maps the hca_core_clock page to the user space there
are read-modifiable registers, one of which is semaphore, on this page as
well as the clock counter. If user reads the wrong offset, it can modify
the semaphore and hang the device.

Do not map the hca_core_clock page to the user space unless the device has
been put in a backwards compatibility mode to support this feature.

After this patch, mlx4 core_clock won't be mapped to user space on the
majority of existing devices and the uverbs device time feature in
ibv_query_rt_values_ex() will be disabled.

Fixes: 52033cfb ("IB/mlx4: Add mmap call to map the hardware clock")
Link: https://lore.kernel.org/r/9632304e0d6790af84b3b706d8c18732bc0d5e27.1622726305.git.leonro@nvidia.comSigned-off-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent a0ffb4c1
...@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -581,12 +581,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
if (!mlx4_is_slave(dev->dev))
err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
resp.response_length += sizeof(resp.hca_core_clock_offset); resp.response_length += sizeof(resp.hca_core_clock_offset);
if (!err && !mlx4_is_slave(dev->dev)) { if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET; resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
} }
......
...@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -823,6 +823,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
...@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -841,6 +842,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (mlx4_is_mfunc(dev)) if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox); disable_unsupported_roce_caps(outbox);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf); dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
......
...@@ -131,6 +131,7 @@ struct mlx4_dev_cap { ...@@ -131,6 +131,7 @@ struct mlx4_dev_cap {
u32 health_buffer_addrs; u32 health_buffer_addrs;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
bool wol_port[MLX4_MAX_PORTS + 1]; bool wol_port[MLX4_MAX_PORTS + 1];
bool map_clock_to_user;
}; };
struct mlx4_func_cap { struct mlx4_func_cap {
......
...@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -498,6 +498,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
} }
} }
dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE; dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
...@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev, ...@@ -1948,6 +1949,11 @@ int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!dev->caps.map_clock_to_user) {
mlx4_dbg(dev, "Map clock to user is not supported.\n");
return -EOPNOTSUPP;
}
if (!params) if (!params)
return -EINVAL; return -EINVAL;
......
...@@ -630,6 +630,7 @@ struct mlx4_caps { ...@@ -630,6 +630,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1]; bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps; struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs; u32 health_buffer_addrs;
bool map_clock_to_user;
}; };
struct mlx4_buf_list { struct mlx4_buf_list {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment