Commit 1ffeb2eb authored by Jack Morgenstein's avatar Jack Morgenstein Committed by Roland Dreier

IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support

1. Introduce the basic SR-IOV parvirtualization context objects for
   multiplexing and demultiplexing MADs.
2. Introduce support for the new proxy and tunnel QP types.

This patch introduces the objects required by the master for managing
QP paravirtualization for guests.

struct mlx4_ib_sriov is created by the master only.
It is a container for the following:

1. All the info required by the PPF to multiplex and de-multiplex MADs
   (including those from the PF). (struct mlx4_ib_demux_ctx demux)
2. All the info required to manage alias GUIDs (i.e., the GUID at
   index 0 that each guest perceives.  In fact, this is not the GUID
   which is actually at index 0, but is, in fact, the GUID which is at
   index[<VF number>] in the physical table.
3. structures which are used to manage CM paravirtualization
4. structures for managing the real special QPs when running in SR-IOV
   mode.  The real SQPs are controlled by the PPF in this case.  All
   SQPs created and controlled by the ib core layer are proxy SQP.

struct mlx4_ib_demux_ctx contains the information per port needed
to manage paravirtualization:

1. All multicast paravirt info
2. All tunnel-qp paravirt info for the port.
3. GUID-table and GUID-prefix for the port
4. work queues.

struct mlx4_ib_demux_pv_ctx contains all the info for managing the
paravirtualized QPs for one slave/port.

struct mlx4_ib_demux_pv_qp contains the info need to run an individual
QP (either tunnel qp or real SQP).

Note:  We made use of the 2 most significant bits in enum
mlx4_ib_qp_flags (based on enum ib_qp_create_flags in ib_verbs.h).
We need these bits in the low-level driver for internal purposes.
Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 73aaa741
......@@ -547,6 +547,26 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
checksum == cpu_to_be16(0xffff);
}
static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
unsigned tail, struct mlx4_cqe *cqe)
{
struct mlx4_ib_proxy_sqp_hdr *hdr;
ib_dma_sync_single_for_cpu(qp->ibqp.device,
qp->sqp_proxy_rcv[tail].map,
sizeof (struct mlx4_ib_proxy_sqp_hdr),
DMA_FROM_DEVICE);
hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index);
wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32);
wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
wc->dlid_path_bits = 0;
return 0;
}
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
struct mlx4_ib_qp **cur_qp,
struct ib_wc *wc)
......@@ -559,6 +579,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
int is_error;
u32 g_mlpath_rqpn;
u16 wqe_ctr;
unsigned tail = 0;
repoll:
cqe = next_cqe_sw(cq);
......@@ -634,7 +655,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
mlx4_ib_free_srq_wqe(srq, wqe_ctr);
} else {
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
tail = wq->tail & (wq->wqe_cnt - 1);
wc->wr_id = wq->wrid[tail];
++wq->tail;
}
......@@ -717,6 +739,13 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
break;
}
if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
if ((*cur_qp)->mlx4_ib_qp_type &
(MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
}
wc->slid = be16_to_cpu(cqe->rlid);
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
......
......@@ -133,8 +133,10 @@ struct mlx4_ib_wq {
};
enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = 1 << 0,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
MLX4_IB_SRIOV_SQP = 1 << 31,
};
struct mlx4_ib_gid_entry {
......@@ -144,6 +146,68 @@ struct mlx4_ib_gid_entry {
u8 port;
};
enum mlx4_ib_qp_type {
/*
* IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
* here (and in that order) since the MAD layer uses them as
* indices into a 2-entry table.
*/
MLX4_IB_QPT_SMI = IB_QPT_SMI,
MLX4_IB_QPT_GSI = IB_QPT_GSI,
MLX4_IB_QPT_RC = IB_QPT_RC,
MLX4_IB_QPT_UC = IB_QPT_UC,
MLX4_IB_QPT_UD = IB_QPT_UD,
MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
MLX4_IB_QPT_PROXY_SMI_OWNER = 1 << 16,
MLX4_IB_QPT_PROXY_SMI = 1 << 17,
MLX4_IB_QPT_PROXY_GSI = 1 << 18,
MLX4_IB_QPT_TUN_SMI_OWNER = 1 << 19,
MLX4_IB_QPT_TUN_SMI = 1 << 20,
MLX4_IB_QPT_TUN_GSI = 1 << 21,
};
#define MLX4_IB_QPT_ANY_SRIOV (MLX4_IB_QPT_PROXY_SMI_OWNER | \
MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
struct mlx4_ib_tunnel_header {
struct mlx4_av av;
__be32 remote_qpn;
__be32 qkey;
__be16 vlan;
u8 mac[6];
__be16 pkey_index;
u8 reserved[6];
};
struct mlx4_ib_buf {
void *addr;
dma_addr_t map;
};
struct mlx4_rcv_tunnel_hdr {
__be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
* 0x0 - no vlan was in the packet
* 0x01 - C-VLAN was in the packet */
u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
u8 reserved;
__be16 pkey_index;
__be16 sl_vid;
__be16 slid_mac_47_32;
__be32 mac_31_0;
};
struct mlx4_ib_proxy_sqp_hdr {
struct ib_grh grh;
struct mlx4_rcv_tunnel_hdr tun;
} __packed;
struct mlx4_ib_qp {
struct ib_qp ibqp;
struct mlx4_qp mqp;
......@@ -159,6 +223,7 @@ struct mlx4_ib_qp {
int sq_spare_wqes;
struct mlx4_ib_wq sq;
enum mlx4_ib_qp_type mlx4_ib_qp_type;
struct ib_umem *umem;
struct mlx4_mtt mtt;
int buf_size;
......@@ -174,6 +239,8 @@ struct mlx4_ib_qp {
int mlx_type;
struct list_head gid_list;
struct list_head steering_rules;
struct mlx4_ib_buf *sqp_proxy_rcv;
};
struct mlx4_ib_srq {
......@@ -196,6 +263,55 @@ struct mlx4_ib_ah {
union mlx4_ext_av av;
};
struct mlx4_ib_tun_tx_buf {
struct mlx4_ib_buf buf;
struct ib_ah *ah;
};
struct mlx4_ib_demux_pv_qp {
struct ib_qp *qp;
enum ib_qp_type proxy_qpt;
struct mlx4_ib_buf *ring;
struct mlx4_ib_tun_tx_buf *tx_ring;
spinlock_t tx_lock;
unsigned tx_ix_head;
unsigned tx_ix_tail;
};
struct mlx4_ib_demux_pv_ctx {
int port;
int slave;
int has_smi;
struct ib_device *ib_dev;
struct ib_cq *cq;
struct ib_pd *pd;
struct ib_mr *mr;
struct work_struct work;
struct workqueue_struct *wq;
struct mlx4_ib_demux_pv_qp qp[2];
};
struct mlx4_ib_demux_ctx {
struct ib_device *ib_dev;
int port;
struct workqueue_struct *wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
__be64 subnet_prefix;
__be64 guid_cache[128];
struct mlx4_ib_dev *dev;
struct mlx4_ib_demux_pv_ctx **tun;
};
struct mlx4_ib_sriov {
struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
/* when using this spinlock you should use "irq" because
* it may be called from interrupt context.*/
spinlock_t going_down_lock;
int is_going_down;
};
struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
......@@ -216,6 +332,7 @@ struct mlx4_ib_dev {
struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
struct ib_ah *sm_ah[MLX4_MAX_PORTS];
spinlock_t sm_lock;
struct mlx4_ib_sriov sriov;
struct mutex cap_mask_mutex;
bool ib_active;
......@@ -231,6 +348,13 @@ struct ib_event_work {
struct mlx4_eqe ib_eqe;
};
struct mlx4_ib_qp_tunnel_init_attr {
struct ib_qp_init_attr init_attr;
int slave;
enum ib_qp_type proxy_qp_type;
u8 port;
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
......
This diff is collapsed.
......@@ -389,6 +389,7 @@ struct mlx4_caps {
enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
u32 max_counters;
u8 port_ib_mtu[MLX4_MAX_PORTS + 1];
u16 sqp_demux;
};
struct mlx4_buf_list {
......
......@@ -126,7 +126,8 @@ struct mlx4_rss_context {
struct mlx4_qp_path {
u8 fl;
u8 reserved1[2];
u8 reserved1[1];
u8 disable_pkey_check;
u8 pkey_index;
u8 counter_index;
u8 grh_mylmc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment