Commit 715ab1a8 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Jason Gunthorpe

IB/rdmavt: Fix ab/ba include issues

The currently include file ordering for rdmavt headers has an
ab/ba include issue the precludes using inlines from rdma_vt.h
in rdmavt_qp.h.

At the heart of the issue is that rdma_vt.h includes rdmavt_qp.h.

Fix the ordering issue by adjusting rdma_vt.h to not require rdmavt_qp.h
and move qp related inlines to rdmavt_qp.h.

Additionally, promote rvt_mmap_info to rdma_vt.h since it is shared
by rdmavt_cq.h and rdmavt_qp.h.
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 62644c1d
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_hdrs.h> #include <rdma/ib_hdrs.h>
#include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h>
#include <rdma/rdmavt_cq.h> #include <rdma/rdmavt_cq.h>
struct qib_ctxtdata; struct qib_ctxtdata;
......
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
* *
*/ */
#include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h>
int rvt_driver_qp_init(struct rvt_dev_info *rdi); int rvt_driver_qp_init(struct rvt_dev_info *rdi);
void rvt_qp_exit(struct rvt_dev_info *rdi); void rvt_qp_exit(struct rvt_dev_info *rdi);
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
* *
*/ */
#include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h>
#include <rdma/ib_hdrs.h> #include <rdma/ib_hdrs.h>
/* /*
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#include <linux/trace_seq.h> #include <linux/trace_seq.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM rvt_qp #define TRACE_SYSTEM rvt_qp
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#include <linux/trace_seq.h> #include <linux/trace_seq.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM rvt_rc #define TRACE_SYSTEM rvt_rc
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#include <linux/trace_seq.h> #include <linux/trace_seq.h>
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/rdma_vt.h> #include <rdma/rdmavt_qp.h>
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM rvt_tx #define TRACE_SYSTEM rvt_tx
......
...@@ -59,7 +59,6 @@ ...@@ -59,7 +59,6 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <rdma/ib_mad.h> #include <rdma/ib_mad.h>
#include <rdma/rdmavt_mr.h> #include <rdma/rdmavt_mr.h>
#include <rdma/rdmavt_qp.h>
#define RVT_MAX_PKEY_VALUES 16 #define RVT_MAX_PKEY_VALUES 16
...@@ -72,6 +71,8 @@ struct trap_list { ...@@ -72,6 +71,8 @@ struct trap_list {
struct list_head list; struct list_head list;
}; };
struct rvt_qp;
struct rvt_qpn_table;
struct rvt_ibport { struct rvt_ibport {
struct rvt_qp __rcu *qp[2]; struct rvt_qp __rcu *qp[2];
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
...@@ -206,6 +207,20 @@ struct rvt_ah { ...@@ -206,6 +207,20 @@ struct rvt_ah {
u8 log_pmtu; u8 log_pmtu;
}; };
/*
* This structure is used by rvt_mmap() to validate an offset
* when an mmap() request is made. The vm_area_struct then uses
* this as its vm_private_data.
*/
struct rvt_mmap_info {
struct list_head pending_mmaps;
struct ib_ucontext *context;
void *obj;
__u64 offset;
struct kref ref;
u32 size;
};
/* memory working set size */ /* memory working set size */
struct rvt_wss { struct rvt_wss {
unsigned long *entries; unsigned long *entries;
...@@ -501,16 +516,6 @@ static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev) ...@@ -501,16 +516,6 @@ static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
return container_of(ibdev, struct rvt_dev_info, ibdev); return container_of(ibdev, struct rvt_dev_info, ibdev);
} }
static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct rvt_srq, ibsrq);
}
static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct rvt_qp, ibqp);
}
static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi) static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
{ {
/* /*
...@@ -548,57 +553,6 @@ static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi, ...@@ -548,57 +553,6 @@ static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
return rdi->ports[port_index]->pkey_table[index]; return rdi->ports[port_index]->pkey_table[index];
} }
/**
* rvt_lookup_qpn - return the QP with the given QPN
* @ibp: the ibport
* @qpn: the QP number to look up
*
* The caller must hold the rcu_read_lock(), and keep the lock until
* the returned qp is no longer in use.
*/
/* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
struct rvt_ibport *rvp,
u32 qpn) __must_hold(RCU)
{
struct rvt_qp *qp = NULL;
if (unlikely(qpn <= 1)) {
qp = rcu_dereference(rvp->qp[qpn]);
} else {
u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
qp = rcu_dereference(qp->next))
if (qp->ibqp.qp_num == qpn)
break;
}
return qp;
}
/**
* rvt_mod_retry_timer - mod a retry timer
* @qp - the QP
* @shift - timeout shift to wait for multiple packets
* Modify a potentially already running retry timer
*/
static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */
mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
(qp->timeout_jiffies << shift));
}
static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
{
return rvt_mod_retry_timer_ext(qp, 0);
}
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports); struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi); void rvt_dealloc_device(struct rvt_dev_info *rdi);
int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id); int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
......
...@@ -210,20 +210,6 @@ struct rvt_rq { ...@@ -210,20 +210,6 @@ struct rvt_rq {
spinlock_t lock ____cacheline_aligned_in_smp; spinlock_t lock ____cacheline_aligned_in_smp;
}; };
/*
* This structure is used by rvt_mmap() to validate an offset
* when an mmap() request is made. The vm_area_struct then uses
* this as its vm_private_data.
*/
struct rvt_mmap_info {
struct list_head pending_mmaps;
struct ib_ucontext *context;
void *obj;
__u64 offset;
struct kref ref;
unsigned size;
};
/* /*
* This structure holds the information that the send tasklet needs * This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation. * to send a RDMA read response or atomic operation.
...@@ -398,6 +384,16 @@ struct rvt_srq { ...@@ -398,6 +384,16 @@ struct rvt_srq {
u32 limit; u32 limit;
}; };
static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
{
return container_of(ibsrq, struct rvt_srq, ibsrq);
}
static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct rvt_qp, ibqp);
}
#define RVT_QPN_MAX BIT(24) #define RVT_QPN_MAX BIT(24)
#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
...@@ -677,6 +673,56 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout) ...@@ -677,6 +673,56 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL; return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
} }
/**
* rvt_lookup_qpn - return the QP with the given QPN
* @ibp: the ibport
* @qpn: the QP number to look up
*
* The caller must hold the rcu_read_lock(), and keep the lock until
* the returned qp is no longer in use.
*/
static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
struct rvt_ibport *rvp,
u32 qpn) __must_hold(RCU)
{
struct rvt_qp *qp = NULL;
if (unlikely(qpn <= 1)) {
qp = rcu_dereference(rvp->qp[qpn]);
} else {
u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
qp = rcu_dereference(qp->next))
if (qp->ibqp.qp_num == qpn)
break;
}
return qp;
}
/**
* rvt_mod_retry_timer - mod a retry timer
* @qp - the QP
* @shift - timeout shift to wait for multiple packets
* Modify a potentially already running retry timer
*/
static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
{
struct ib_qp *ibqp = &qp->ibqp;
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_TIMER;
/* 4.096 usec. * (1 << qp->timeout) */
mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
(qp->timeout_jiffies << shift));
}
static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
{
return rvt_mod_retry_timer_ext(qp, 0);
}
extern const int ib_rvt_state_ops[]; extern const int ib_rvt_state_ops[];
struct rvt_dev_info; struct rvt_dev_info;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment