Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
6dd7abae
Commit
6dd7abae
authored
Feb 19, 2017
by
Doug Ledford
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'k.o/for-4.10-rc' into HEAD
parents
6df6b4a9
646ebd41
Changes
30
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
283 additions
and
196 deletions
+283
-196
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma.c
+2
-1
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem.c
+2
-0
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
+1
-10
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cm.c
+4
-3
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/cq.c
+13
-8
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/device.c
+9
-0
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+20
-4
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/provider.c
+17
-16
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/qp.c
+94
-53
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4.h
+2
-0
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
+1
-10
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
+1
-11
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/main.c
+15
-8
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr.h
+5
-3
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qedr/qedr_cm.c
+4
-10
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qedr/verbs.c
+41
-21
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+1
-3
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+1
-1
drivers/infiniband/sw/rxe/rxe_mr.c
drivers/infiniband/sw/rxe/rxe_mr.c
+5
-3
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_net.c
+1
-1
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_qp.c
+1
-2
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/sw/rxe/rxe_resp.c
+1
-1
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.c
+4
-7
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iscsi_iser.h
+0
-2
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/iser/iser_verbs.c
+1
-12
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srp/ib_srp.c
+13
-2
include/rdma/ib_verbs.h
include/rdma/ib_verbs.h
+14
-0
include/uapi/rdma/Kbuild
include/uapi/rdma/Kbuild
+1
-0
include/uapi/rdma/cxgb3-abi.h
include/uapi/rdma/cxgb3-abi.h
+1
-1
include/uapi/rdma/ib_user_verbs.h
include/uapi/rdma/ib_user_verbs.h
+8
-3
No files found.
drivers/infiniband/core/cma.c
View file @
6dd7abae
...
...
@@ -2851,7 +2851,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if
(
!
src_addr
||
!
src_addr
->
sa_family
)
{
src_addr
=
(
struct
sockaddr
*
)
&
id
->
route
.
addr
.
src_addr
;
src_addr
->
sa_family
=
dst_addr
->
sa_family
;
if
(
dst_addr
->
sa_family
==
AF_INET6
)
{
if
(
IS_ENABLED
(
CONFIG_IPV6
)
&&
dst_addr
->
sa_family
==
AF_INET6
)
{
struct
sockaddr_in6
*
src_addr6
=
(
struct
sockaddr_in6
*
)
src_addr
;
struct
sockaddr_in6
*
dst_addr6
=
(
struct
sockaddr_in6
*
)
dst_addr
;
src_addr6
->
sin6_scope_id
=
dst_addr6
->
sin6_scope_id
;
...
...
drivers/infiniband/core/umem.c
View file @
6dd7abae
...
...
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
IB_ACCESS_REMOTE_ATOMIC
|
IB_ACCESS_MW_BIND
));
if
(
access
&
IB_ACCESS_ON_DEMAND
)
{
put_pid
(
umem
->
pid
);
ret
=
ib_umem_odp_get
(
context
,
umem
);
if
(
ret
)
{
kfree
(
umem
);
...
...
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list
=
(
struct
page
**
)
__get_free_page
(
GFP_KERNEL
);
if
(
!
page_list
)
{
put_pid
(
umem
->
pid
);
kfree
(
umem
);
return
ERR_PTR
(
-
ENOMEM
);
}
...
...
drivers/infiniband/hw/cxgb3/iwch_provider.c
View file @
6dd7abae
...
...
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
memset
(
props
,
0
,
sizeof
(
struct
ib_port_attr
));
props
->
max_mtu
=
IB_MTU_4096
;
if
(
netdev
->
mtu
>=
4096
)
props
->
active_mtu
=
IB_MTU_4096
;
else
if
(
netdev
->
mtu
>=
2048
)
props
->
active_mtu
=
IB_MTU_2048
;
else
if
(
netdev
->
mtu
>=
1024
)
props
->
active_mtu
=
IB_MTU_1024
;
else
if
(
netdev
->
mtu
>=
512
)
props
->
active_mtu
=
IB_MTU_512
;
else
props
->
active_mtu
=
IB_MTU_256
;
props
->
active_mtu
=
ib_mtu_int_to_enum
(
netdev
->
mtu
);
if
(
!
netif_carrier_ok
(
netdev
))
props
->
state
=
IB_PORT_DOWN
;
...
...
drivers/infiniband/hw/cxgb4/cm.c
View file @
6dd7abae
...
...
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_trim
(
skb
,
dlen
);
mutex_lock
(
&
ep
->
com
.
mutex
);
/* update RX credits */
update_rx_credits
(
ep
,
dlen
);
switch
(
ep
->
com
.
state
)
{
case
MPA_REQ_SENT
:
update_rx_credits
(
ep
,
dlen
);
ep
->
rcv_seq
+=
dlen
;
disconnect
=
process_mpa_reply
(
ep
,
skb
);
break
;
case
MPA_REQ_WAIT
:
update_rx_credits
(
ep
,
dlen
);
ep
->
rcv_seq
+=
dlen
;
disconnect
=
process_mpa_request
(
ep
,
skb
);
break
;
case
FPDU_MODE
:
{
struct
c4iw_qp_attributes
attrs
;
update_rx_credits
(
ep
,
dlen
);
BUG_ON
(
!
ep
->
com
.
qp
);
if
(
status
)
pr_err
(
"%s Unexpected streaming data."
\
...
...
drivers/infiniband/hw/cxgb4/cq.c
View file @
6dd7abae
...
...
@@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
goto
skip_cqe
;
}
/*
* Special cqe for drain WR completions...
*/
if
(
CQE_OPCODE
(
hw_cqe
)
==
C4IW_DRAIN_OPCODE
)
{
*
cookie
=
CQE_DRAIN_COOKIE
(
hw_cqe
);
*
cqe
=
*
hw_cqe
;
goto
skip_cqe
;
}
/*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
...
...
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr
(
qhp
->
rhp
,
CQE_WRID_FR_STAG
(
&
cqe
));
break
;
case
C4IW_DRAIN_OPCODE
:
wc
->
opcode
=
IB_WC_SEND
;
break
;
default:
printk
(
KERN_ERR
MOD
"Unexpected opcode %d "
"in the CQE received for QPID=0x%0x
\n
"
,
...
...
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
if
(
wq
)
{
if
(
unlikely
(
qhp
->
attr
.
state
!=
C4IW_QP_STATE_RTS
))
{
if
(
t4_sq_empty
(
wq
))
complete
(
&
qhp
->
sq_drained
);
if
(
t4_rq_empty
(
wq
))
complete
(
&
qhp
->
rq_drained
);
}
if
(
wq
)
spin_unlock
(
&
qhp
->
lock
);
}
return
ret
;
}
...
...
drivers/infiniband/hw/cxgb4/device.c
View file @
6dd7abae
...
...
@@ -881,9 +881,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
}
}
rdev
->
free_workq
=
create_singlethread_workqueue
(
"iw_cxgb4_free"
);
if
(
!
rdev
->
free_workq
)
{
err
=
-
ENOMEM
;
goto
err_free_status_page
;
}
rdev
->
status_page
->
db_off
=
0
;
return
0
;
err_free_status_page:
free_page
((
unsigned
long
)
rdev
->
status_page
);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy
(
rdev
);
destroy_rqtpool:
...
...
@@ -897,6 +905,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
static
void
c4iw_rdev_close
(
struct
c4iw_rdev
*
rdev
)
{
destroy_workqueue
(
rdev
->
free_workq
);
kfree
(
rdev
->
wr_log
);
free_page
((
unsigned
long
)
rdev
->
status_page
);
c4iw_pblpool_destroy
(
rdev
);
...
...
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
View file @
6dd7abae
...
...
@@ -45,6 +45,7 @@
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h>
...
...
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
struct
list_head
qpids
;
struct
list_head
cqids
;
struct
mutex
lock
;
struct
kref
kref
;
};
enum
c4iw_rdev_flags
{
...
...
@@ -183,6 +185,7 @@ struct c4iw_rdev {
atomic_t
wr_log_idx
;
struct
wr_log_entry
*
wr_log
;
int
wr_log_size
;
struct
workqueue_struct
*
free_workq
;
};
static
inline
int
c4iw_fatal_error
(
struct
c4iw_rdev
*
rdev
)
...
...
@@ -480,8 +483,8 @@ struct c4iw_qp {
wait_queue_head_t
wait
;
struct
timer_list
timer
;
int
sq_sig_all
;
struct
completion
rq_drained
;
struct
c
ompletion
sq_drained
;
struct
work_struct
free_work
;
struct
c
4iw_ucontext
*
ucontext
;
};
static
inline
struct
c4iw_qp
*
to_c4iw_qp
(
struct
ib_qp
*
ibqp
)
...
...
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
u32
key
;
spinlock_t
mmap_lock
;
struct
list_head
mmaps
;
struct
kref
kref
;
};
static
inline
struct
c4iw_ucontext
*
to_c4iw_ucontext
(
struct
ib_ucontext
*
c
)
...
...
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return
container_of
(
c
,
struct
c4iw_ucontext
,
ibucontext
);
}
void
_c4iw_free_ucontext
(
struct
kref
*
kref
);
static
inline
void
c4iw_put_ucontext
(
struct
c4iw_ucontext
*
ucontext
)
{
kref_put
(
&
ucontext
->
kref
,
_c4iw_free_ucontext
);
}
static
inline
void
c4iw_get_ucontext
(
struct
c4iw_ucontext
*
ucontext
)
{
kref_get
(
&
ucontext
->
kref
);
}
struct
c4iw_mm_entry
{
struct
list_head
entry
;
u64
addr
;
...
...
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return
IB_QPS_ERR
;
}
#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
static
inline
u32
c4iw_ib_to_tpt_access
(
int
a
)
{
return
(
a
&
IB_ACCESS_REMOTE_WRITE
?
FW_RI_MEM_ACCESS_REM_WRITE
:
0
)
|
...
...
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
extern
int
db_fc_threshold
;
extern
int
db_coalescing_threshold
;
extern
int
use_dsgl
;
void
c4iw_drain_rq
(
struct
ib_qp
*
qp
);
void
c4iw_drain_sq
(
struct
ib_qp
*
qp
);
void
c4iw_invalidate_mr
(
struct
c4iw_dev
*
rhp
,
u32
rkey
);
#endif
drivers/infiniband/hw/cxgb4/provider.c
View file @
6dd7abae
...
...
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
return
-
ENOSYS
;
}
static
int
c4iw_dealloc_ucontext
(
struct
ib_ucontext
*
context
)
void
_c4iw_free_ucontext
(
struct
kref
*
kref
)
{
struct
c4iw_
dev
*
rhp
=
to_c4iw_dev
(
context
->
device
)
;
struct
c4iw_
ucontext
*
ucontext
=
to_c4iw_ucontext
(
context
)
;
struct
c4iw_
ucontext
*
ucontext
;
struct
c4iw_
dev
*
rhp
;
struct
c4iw_mm_entry
*
mm
,
*
tmp
;
PDBG
(
"%s context %p
\n
"
,
__func__
,
context
);
ucontext
=
container_of
(
kref
,
struct
c4iw_ucontext
,
kref
);
rhp
=
to_c4iw_dev
(
ucontext
->
ibucontext
.
device
);
PDBG
(
"%s ucontext %p
\n
"
,
__func__
,
ucontext
);
list_for_each_entry_safe
(
mm
,
tmp
,
&
ucontext
->
mmaps
,
entry
)
kfree
(
mm
);
c4iw_release_dev_ucontext
(
&
rhp
->
rdev
,
&
ucontext
->
uctx
);
kfree
(
ucontext
);
}
static
int
c4iw_dealloc_ucontext
(
struct
ib_ucontext
*
context
)
{
struct
c4iw_ucontext
*
ucontext
=
to_c4iw_ucontext
(
context
);
PDBG
(
"%s context %p
\n
"
,
__func__
,
context
);
c4iw_put_ucontext
(
ucontext
);
return
0
;
}
...
...
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext
(
&
rhp
->
rdev
,
&
context
->
uctx
);
INIT_LIST_HEAD
(
&
context
->
mmaps
);
spin_lock_init
(
&
context
->
mmap_lock
);
kref_init
(
&
context
->
kref
);
if
(
udata
->
outlen
<
sizeof
(
uresp
)
-
sizeof
(
uresp
.
reserved
))
{
if
(
!
warned
++
)
...
...
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
memset
(
props
,
0
,
sizeof
(
struct
ib_port_attr
));
props
->
max_mtu
=
IB_MTU_4096
;
if
(
netdev
->
mtu
>=
4096
)
props
->
active_mtu
=
IB_MTU_4096
;
else
if
(
netdev
->
mtu
>=
2048
)
props
->
active_mtu
=
IB_MTU_2048
;
else
if
(
netdev
->
mtu
>=
1024
)
props
->
active_mtu
=
IB_MTU_1024
;
else
if
(
netdev
->
mtu
>=
512
)
props
->
active_mtu
=
IB_MTU_512
;
else
props
->
active_mtu
=
IB_MTU_256
;
props
->
active_mtu
=
ib_mtu_int_to_enum
(
netdev
->
mtu
);
if
(
!
netif_carrier_ok
(
netdev
))
props
->
state
=
IB_PORT_DOWN
;
...
...
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev
->
ibdev
.
uverbs_abi_ver
=
C4IW_UVERBS_ABI_VERSION
;
dev
->
ibdev
.
get_port_immutable
=
c4iw_port_immutable
;
dev
->
ibdev
.
get_dev_fw_str
=
get_dev_fw_str
;
dev
->
ibdev
.
drain_sq
=
c4iw_drain_sq
;
dev
->
ibdev
.
drain_rq
=
c4iw_drain_rq
;
dev
->
ibdev
.
iwcm
=
kmalloc
(
sizeof
(
struct
iw_cm_verbs
),
GFP_KERNEL
);
if
(
!
dev
->
ibdev
.
iwcm
)
...
...
drivers/infiniband/hw/cxgb4/qp.c
View file @
6dd7abae
...
...
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return
0
;
}
static
void
_free_qp
(
struct
kref
*
kref
)
static
void
free_qp_work
(
struct
work_struct
*
work
)
{
struct
c4iw_ucontext
*
ucontext
;
struct
c4iw_qp
*
qhp
;
struct
c4iw_dev
*
rhp
;
qhp
=
container_of
(
work
,
struct
c4iw_qp
,
free_work
);
ucontext
=
qhp
->
ucontext
;
rhp
=
qhp
->
rhp
;
PDBG
(
"%s qhp %p ucontext %p
\n
"
,
__func__
,
qhp
,
ucontext
);
destroy_qp
(
&
rhp
->
rdev
,
&
qhp
->
wq
,
ucontext
?
&
ucontext
->
uctx
:
&
rhp
->
rdev
.
uctx
);
if
(
ucontext
)
c4iw_put_ucontext
(
ucontext
);
kfree
(
qhp
);
}
static
void
queue_qp_free
(
struct
kref
*
kref
)
{
struct
c4iw_qp
*
qhp
;
qhp
=
container_of
(
kref
,
struct
c4iw_qp
,
kref
);
PDBG
(
"%s qhp %p
\n
"
,
__func__
,
qhp
);
kfree
(
qhp
);
queue_work
(
qhp
->
rhp
->
rdev
.
free_workq
,
&
qhp
->
free_work
);
}
void
c4iw_qp_add_ref
(
struct
ib_qp
*
qp
)
...
...
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
void
c4iw_qp_rem_ref
(
struct
ib_qp
*
qp
)
{
PDBG
(
"%s ib_qp %p
\n
"
,
__func__
,
qp
);
kref_put
(
&
to_c4iw_qp
(
qp
)
->
kref
,
_free_qp
);
kref_put
(
&
to_c4iw_qp
(
qp
)
->
kref
,
queue_qp_free
);
}
static
void
add_to_fc_list
(
struct
list_head
*
head
,
struct
list_head
*
entry
)
...
...
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return
0
;
}
static
void
complete_sq_drain_wr
(
struct
c4iw_qp
*
qhp
,
struct
ib_send_wr
*
wr
)
{
struct
t4_cqe
cqe
=
{};
struct
c4iw_cq
*
schp
;
unsigned
long
flag
;
struct
t4_cq
*
cq
;
schp
=
to_c4iw_cq
(
qhp
->
ibqp
.
send_cq
);
cq
=
&
schp
->
cq
;
cqe
.
u
.
drain_cookie
=
wr
->
wr_id
;
cqe
.
header
=
cpu_to_be32
(
CQE_STATUS_V
(
T4_ERR_SWFLUSH
)
|
CQE_OPCODE_V
(
C4IW_DRAIN_OPCODE
)
|
CQE_TYPE_V
(
1
)
|
CQE_SWCQE_V
(
1
)
|
CQE_QPID_V
(
qhp
->
wq
.
sq
.
qid
));
spin_lock_irqsave
(
&
schp
->
lock
,
flag
);
cqe
.
bits_type_ts
=
cpu_to_be64
(
CQE_GENBIT_V
((
u64
)
cq
->
gen
));
cq
->
sw_queue
[
cq
->
sw_pidx
]
=
cqe
;
t4_swcq_produce
(
cq
);
spin_unlock_irqrestore
(
&
schp
->
lock
,
flag
);
spin_lock_irqsave
(
&
schp
->
comp_handler_lock
,
flag
);
(
*
schp
->
ibcq
.
comp_handler
)(
&
schp
->
ibcq
,
schp
->
ibcq
.
cq_context
);
spin_unlock_irqrestore
(
&
schp
->
comp_handler_lock
,
flag
);
}
static
void
complete_rq_drain_wr
(
struct
c4iw_qp
*
qhp
,
struct
ib_recv_wr
*
wr
)
{
struct
t4_cqe
cqe
=
{};
struct
c4iw_cq
*
rchp
;
unsigned
long
flag
;
struct
t4_cq
*
cq
;
rchp
=
to_c4iw_cq
(
qhp
->
ibqp
.
recv_cq
);
cq
=
&
rchp
->
cq
;
cqe
.
u
.
drain_cookie
=
wr
->
wr_id
;
cqe
.
header
=
cpu_to_be32
(
CQE_STATUS_V
(
T4_ERR_SWFLUSH
)
|
CQE_OPCODE_V
(
C4IW_DRAIN_OPCODE
)
|
CQE_TYPE_V
(
0
)
|
CQE_SWCQE_V
(
1
)
|
CQE_QPID_V
(
qhp
->
wq
.
sq
.
qid
));
spin_lock_irqsave
(
&
rchp
->
lock
,
flag
);
cqe
.
bits_type_ts
=
cpu_to_be64
(
CQE_GENBIT_V
((
u64
)
cq
->
gen
));
cq
->
sw_queue
[
cq
->
sw_pidx
]
=
cqe
;
t4_swcq_produce
(
cq
);
spin_unlock_irqrestore
(
&
rchp
->
lock
,
flag
);
spin_lock_irqsave
(
&
rchp
->
comp_handler_lock
,
flag
);
(
*
rchp
->
ibcq
.
comp_handler
)(
&
rchp
->
ibcq
,
rchp
->
ibcq
.
cq_context
);
spin_unlock_irqrestore
(
&
rchp
->
comp_handler_lock
,
flag
);
}
int
c4iw_post_send
(
struct
ib_qp
*
ibqp
,
struct
ib_send_wr
*
wr
,
struct
ib_send_wr
**
bad_wr
)
{
...
...
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave
(
&
qhp
->
lock
,
flag
);
if
(
t4_wq_in_error
(
&
qhp
->
wq
))
{
spin_unlock_irqrestore
(
&
qhp
->
lock
,
flag
);
*
bad_wr
=
wr
;
return
-
EINVAL
;
complete_sq_drain_wr
(
qhp
,
wr
)
;
return
err
;
}
num_wrs
=
t4_sq_avail
(
&
qhp
->
wq
);
if
(
num_wrs
==
0
)
{
...
...
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave
(
&
qhp
->
lock
,
flag
);
if
(
t4_wq_in_error
(
&
qhp
->
wq
))
{
spin_unlock_irqrestore
(
&
qhp
->
lock
,
flag
);
*
bad_wr
=
wr
;
return
-
EINVAL
;
complete_rq_drain_wr
(
qhp
,
wr
)
;
return
err
;
}
num_wrs
=
t4_rq_avail
(
&
qhp
->
wq
);
if
(
num_wrs
==
0
)
{
...
...
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break
;
case
C4IW_QP_STATE_CLOSING
:
if
(
!
internal
)
{
/*
* Allow kernel users to move to ERROR for qp draining.
*/
if
(
!
internal
&&
(
qhp
->
ibqp
.
uobject
||
attrs
->
next_state
!=
C4IW_QP_STATE_ERROR
))
{
ret
=
-
EINVAL
;
goto
out
;
}
...
...
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
struct
c4iw_dev
*
rhp
;
struct
c4iw_qp
*
qhp
;
struct
c4iw_qp_attributes
attrs
;
struct
c4iw_ucontext
*
ucontext
;
qhp
=
to_c4iw_qp
(
ib_qp
);
rhp
=
qhp
->
rhp
;
...
...
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
spin_unlock_irq
(
&
rhp
->
lock
);
free_ird
(
rhp
,
qhp
->
attr
.
max_ird
);
ucontext
=
ib_qp
->
uobject
?
to_c4iw_ucontext
(
ib_qp
->
uobject
->
context
)
:
NULL
;
destroy_qp
(
&
rhp
->
rdev
,
&
qhp
->
wq
,
ucontext
?
&
ucontext
->
uctx
:
&
rhp
->
rdev
.
uctx
);
c4iw_qp_rem_ref
(
ib_qp
);
PDBG
(
"%s ib_qp %p qpid 0x%0x
\n
"
,
__func__
,
ib_qp
,
qhp
->
wq
.
sq
.
qid
);
...
...
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp
->
attr
.
max_ird
=
0
;
qhp
->
sq_sig_all
=
attrs
->
sq_sig_type
==
IB_SIGNAL_ALL_WR
;
spin_lock_init
(
&
qhp
->
lock
);
init_completion
(
&
qhp
->
sq_drained
);
init_completion
(
&
qhp
->
rq_drained
);
mutex_init
(
&
qhp
->
mutex
);
init_waitqueue_head
(
&
qhp
->
wait
);
kref_init
(
&
qhp
->
kref
);
INIT_WORK
(
&
qhp
->
free_work
,
free_qp_work
);
ret
=
insert_handle
(
rhp
,
&
rhp
->
qpidr
,
qhp
,
qhp
->
wq
.
sq
.
qid
);
if
(
ret
)
...
...
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ma_sync_key_mm
->
len
=
PAGE_SIZE
;
insert_mmap
(
ucontext
,
ma_sync_key_mm
);
}
c4iw_get_ucontext
(
ucontext
);
qhp
->
ucontext
=
ucontext
;
}
qhp
->
ibqp
.
qp_num
=
qhp
->
wq
.
sq
.
qid
;
init_timer
(
&
(
qhp
->
timer
));
...
...
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr
->
sq_sig_type
=
qhp
->
sq_sig_all
?
IB_SIGNAL_ALL_WR
:
0
;
return
0
;
}
static
void
move_qp_to_err
(
struct
c4iw_qp
*
qp
)
{
struct
c4iw_qp_attributes
attrs
=
{
.
next_state
=
C4IW_QP_STATE_ERROR
};
(
void
)
c4iw_modify_qp
(
qp
->
rhp
,
qp
,
C4IW_QP_ATTR_NEXT_STATE
,
&
attrs
,
1
);
}
void
c4iw_drain_sq
(
struct
ib_qp
*
ibqp
)
{
struct
c4iw_qp
*
qp
=
to_c4iw_qp
(
ibqp
);
unsigned
long
flag
;
bool
need_to_wait
;
move_qp_to_err
(
qp
);
spin_lock_irqsave
(
&
qp
->
lock
,
flag
);
need_to_wait
=
!
t4_sq_empty
(
&
qp
->
wq
);
spin_unlock_irqrestore
(
&
qp
->
lock
,
flag
);
if
(
need_to_wait
)
wait_for_completion
(
&
qp
->
sq_drained
);
}
void
c4iw_drain_rq
(
struct
ib_qp
*
ibqp
)
{
struct
c4iw_qp
*
qp
=
to_c4iw_qp
(
ibqp
);
unsigned
long
flag
;
bool
need_to_wait
;
move_qp_to_err
(
qp
);
spin_lock_irqsave
(
&
qp
->
lock
,
flag
);
need_to_wait
=
!
t4_rq_empty
(
&
qp
->
wq
);
spin_unlock_irqrestore
(
&
qp
->
lock
,
flag
);
if
(
need_to_wait
)
wait_for_completion
(
&
qp
->
rq_drained
);
}
drivers/infiniband/hw/cxgb4/t4.h
View file @
6dd7abae
...
...
@@ -179,6 +179,7 @@ struct t4_cqe {
__be32
wrid_hi
;
__be32
wrid_low
;
}
gen
;
u64
drain_cookie
;
}
u
;
__be64
reserved
;
__be64
bits_type_ts
;
...
...
@@ -238,6 +239,7 @@ struct t4_cqe {
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */
#define CQE_GENBIT_S 63
...
...
drivers/infiniband/hw/i40iw/i40iw_verbs.c
View file @
6dd7abae
...
...
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
memset
(
props
,
0
,
sizeof
(
*
props
));
props
->
max_mtu
=
IB_MTU_4096
;
if
(
netdev
->
mtu
>=
4096
)
props
->
active_mtu
=
IB_MTU_4096
;
else
if
(
netdev
->
mtu
>=
2048
)
props
->
active_mtu
=
IB_MTU_2048
;
else
if
(
netdev
->
mtu
>=
1024
)
props
->
active_mtu
=
IB_MTU_1024
;
else
if
(
netdev
->
mtu
>=
512
)
props
->
active_mtu
=
IB_MTU_512
;
else
props
->
active_mtu
=
IB_MTU_256
;
props
->
active_mtu
=
ib_mtu_int_to_enum
(
netdev
->
mtu
);
props
->
lid
=
1
;
if
(
netif_carrier_ok
(
iwdev
->
netdev
))
...
...
drivers/infiniband/hw/nes/nes_verbs.c
View file @
6dd7abae
...
...
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
memset
(
props
,
0
,
sizeof
(
*
props
));
props
->
max_mtu
=
IB_MTU_4096
;
if
(
netdev
->
mtu
>=
4096
)
props
->
active_mtu
=
IB_MTU_4096
;
else
if
(
netdev
->
mtu
>=
2048
)
props
->
active_mtu
=
IB_MTU_2048
;
else
if
(
netdev
->
mtu
>=
1024
)
props
->
active_mtu
=
IB_MTU_1024
;
else
if
(
netdev
->
mtu
>=
512
)
props
->
active_mtu
=
IB_MTU_512
;
else
props
->
active_mtu
=
IB_MTU_256
;
props
->
active_mtu
=
ib_mtu_int_to_enum
(
netdev
->
mtu
);
props
->
lid
=
1
;
props
->
lmc
=
0
;
...
...
drivers/infiniband/hw/qedr/main.c
View file @
6dd7abae
...
...
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return
0
;
}
void
qedr_unaffiliated_event
(
void
*
context
,
u8
event_code
)
void
qedr_unaffiliated_event
(
void
*
context
,
u8
event_code
)
{
pr_err
(
"unaffiliated event not implemented yet
\n
"
);
}
...
...
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if
(
device_create_file
(
&
dev
->
ibdev
.
dev
,
qedr_attributes
[
i
]))
goto
sysfs_err
;
if
(
!
test_and_set_bit
(
QEDR_ENET_STATE_BIT
,
&
dev
->
enet_state
))
qedr_ib_dispatch_event
(
dev
,
QEDR_PORT
,
IB_EVENT_PORT_ACTIVE
);
DP_DEBUG
(
dev
,
QEDR_MSG_INIT
,
"qedr driver loaded successfully
\n
"
);
return
dev
;
...
...
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
ib_dealloc_device
(
&
dev
->
ibdev
);
}
static
int
qedr_close
(
struct
qedr_dev
*
dev
)
static
void
qedr_close
(
struct
qedr_dev
*
dev
)
{
qedr_ib_dispatch_event
(
dev
,
1
,
IB_EVENT_PORT_ERR
);
return
0
;
if
(
test_and_clear_bit
(
QEDR_ENET_STATE_BIT
,
&
dev
->
enet_state
))
qedr_ib_dispatch_event
(
dev
,
QEDR_PORT
,
IB_EVENT_PORT_ERR
);
}
static
void
qedr_shutdown
(
struct
qedr_dev
*
dev
)
...
...
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
qedr_remove
(
dev
);
}
static
void
qedr_open
(
struct
qedr_dev
*
dev
)
{
if
(
!
test_and_set_bit
(
QEDR_ENET_STATE_BIT
,
&
dev
->
enet_state
))
qedr_ib_dispatch_event
(
dev
,
QEDR_PORT
,
IB_EVENT_PORT_ACTIVE
);
}
static
void
qedr_mac_address_change
(
struct
qedr_dev
*
dev
)
{
union
ib_gid
*
sgid
=
&
dev
->
sgid_tbl
[
0
];
...
...
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
ether_addr_copy
(
dev
->
gsi_ll2_mac_address
,
dev
->
ndev
->
dev_addr
);
qedr_ib_dispatch_event
(
dev
,
1
,
IB_EVENT_GID_CHANGE
);
qedr_ib_dispatch_event
(
dev
,
QEDR_PORT
,
IB_EVENT_GID_CHANGE
);
if
(
rc
)
DP_ERR
(
dev
,
"Error updating mac filter
\n
"
);
...
...
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
{
switch
(
event
)
{
case
QEDE_UP
:
qedr_
ib_dispatch_event
(
dev
,
1
,
IB_EVENT_PORT_ACTIVE
);
qedr_
open
(
dev
);
break
;
case
QEDE_DOWN
:
qedr_close
(
dev
);
...
...
drivers/infiniband/hw/qedr/qedr.h
View file @
6dd7abae
...
...
@@ -113,6 +113,8 @@ struct qedr_device_attr {
struct
qed_rdma_events
events
;
};
#define QEDR_ENET_STATE_BIT (0)
struct
qedr_dev
{
struct
ib_device
ibdev
;
struct
qed_dev
*
cdev
;
...
...
@@ -153,6 +155,8 @@ struct qedr_dev {
struct
qedr_cq
*
gsi_sqcq
;
struct
qedr_cq
*
gsi_rqcq
;
struct
qedr_qp
*
gsi_qp
;
unsigned
long
enet_state
;
};
#define QEDR_MAX_SQ_PBL (0x8000)
...
...
@@ -188,6 +192,7 @@ struct qedr_dev {
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1)
#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
...
...
@@ -251,9 +256,6 @@ struct qedr_cq {
u16
icid
;
/* Lock to protect completion handler */
spinlock_t
comp_handler_lock
;
/* Lock to protect multiplem CQ's */
spinlock_t
cq_lock
;
u8
arm_flags
;
...
...
drivers/infiniband/hw/qedr/qedr_cm.c
View file @
6dd7abae
...
...
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
qedr_inc_sw_gsi_cons
(
&
qp
->
sq
);
spin_unlock_irqrestore
(
&
qp
->
q_lock
,
flags
);
if
(
cq
->
ibcq
.
comp_handler
)
{
spin_lock_irqsave
(
&
cq
->
comp_handler_lock
,
flags
);
if
(
cq
->
ibcq
.
comp_handler
)
(
*
cq
->
ibcq
.
comp_handler
)
(
&
cq
->
ibcq
,
cq
->
ibcq
.
cq_context
);
spin_unlock_irqrestore
(
&
cq
->
comp_handler_lock
,
flags
);
}
}
void
qedr_ll2_rx_cb
(
void
*
_dev
,
struct
qed_roce_ll2_packet
*
pkt
,
...
...
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
spin_unlock_irqrestore
(
&
qp
->
q_lock
,
flags
);
if
(
cq
->
ibcq
.
comp_handler
)
{
spin_lock_irqsave
(
&
cq
->
comp_handler_lock
,
flags
);
if
(
cq
->
ibcq
.
comp_handler
)
(
*
cq
->
ibcq
.
comp_handler
)
(
&
cq
->
ibcq
,
cq
->
ibcq
.
cq_context
);
spin_unlock_irqrestore
(
&
cq
->
comp_handler_lock
,
flags
);
}
}
static
void
qedr_destroy_gsi_cq
(
struct
qedr_dev
*
dev
,
...
...
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if
(
ether_addr_equal
(
udh
.
eth
.
smac_h
,
udh
.
eth
.
dmac_h
))
packet
->
tx_dest
=
QED_ROCE_LL2_TX_DEST_NW
;
else
packet
->
tx_dest
=
QED_ROCE_LL2_TX_DEST_LB
;
else
packet
->
tx_dest
=
QED_ROCE_LL2_TX_DEST_NW
;
packet
->
roce_mode
=
roce_mode
;
memcpy
(
packet
->
header
.
vaddr
,
ud_header_buffer
,
header_size
);
...
...
drivers/infiniband/hw/qedr/verbs.c
View file @
6dd7abae
...
...
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
struct
ib_ucontext
*
context
,
struct
ib_udata
*
udata
)
{
struct
qedr_dev
*
dev
=
get_qedr_dev
(
ibdev
);
struct
qedr_ucontext
*
uctx
=
NULL
;
struct
qedr_alloc_pd_uresp
uresp
;
struct
qedr_pd
*
pd
;
u16
pd_id
;
int
rc
;
...
...
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if
(
!
pd
)
return
ERR_PTR
(
-
ENOMEM
);
dev
->
ops
->
rdma_alloc_pd
(
dev
->
rdma_ctx
,
&
pd_id
);
rc
=
dev
->
ops
->
rdma_alloc_pd
(
dev
->
rdma_ctx
,
&
pd_id
);
if
(
rc
)
goto
err
;
uresp
.
pd_id
=
pd_id
;
pd
->
pd_id
=
pd_id
;
if
(
udata
&&
context
)
{
struct
qedr_alloc_pd_uresp
uresp
;
uresp
.
pd_id
=
pd_id
;
rc
=
ib_copy_to_udata
(
udata
,
&
uresp
,
sizeof
(
uresp
));
if
(
rc
)
if
(
rc
)
{
DP_ERR
(
dev
,
"copy error pd_id=0x%x.
\n
"
,
pd_id
);
uctx
=
get_qedr_ucontext
(
context
);
uctx
->
pd
=
pd
;
pd
->
uctx
=
uctx
;
dev
->
ops
->
rdma_dealloc_pd
(
dev
->
rdma_ctx
,
pd_id
);
goto
err
;
}
pd
->
uctx
=
get_qedr_ucontext
(
context
);
pd
->
uctx
->
pd
=
pd
;
}
return
&
pd
->
ibpd
;
err:
kfree
(
pd
);
return
ERR_PTR
(
rc
);
}
int
qedr_dealloc_pd
(
struct
ib_pd
*
ibpd
)
...
...
@@ -1516,7 +1526,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
return
ERR_PTR
(
-
EFAULT
);
}
enum
ib_qp_state
qedr_get_ibqp_state
(
enum
qed_roce_qp_state
qp_state
)
static
enum
ib_qp_state
qedr_get_ibqp_state
(
enum
qed_roce_qp_state
qp_state
)
{
switch
(
qp_state
)
{
case
QED_ROCE_QP_STATE_RESET
:
...
...
@@ -1537,7 +1547,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
return
IB_QPS_ERR
;
}
enum
qed_roce_qp_state
qedr_get_state_from_ibqp
(
enum
ib_qp_state
qp_state
)
static
enum
qed_roce_qp_state
qedr_get_state_from_ibqp
(
enum
ib_qp_state
qp_state
)
{
switch
(
qp_state
)
{
case
IB_QPS_RESET
:
...
...
@@ -1573,7 +1584,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
int
status
=
0
;
if
(
new_state
==
qp
->
state
)
return
1
;
return
0
;
switch
(
qp
->
state
)
{
case
QED_ROCE_QP_STATE_RESET
:
...
...
@@ -1649,6 +1660,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* ERR->XXX */
switch
(
new_state
)
{
case
QED_ROCE_QP_STATE_RESET
:
if
((
qp
->
rq
.
prod
!=
qp
->
rq
.
cons
)
||
(
qp
->
sq
.
prod
!=
qp
->
sq
.
cons
))
{
DP_NOTICE
(
dev
,
"Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x
\n
"
,
qp
->
rq
.
prod
,
qp
->
rq
.
cons
,
qp
->
sq
.
prod
,
qp
->
sq
.
cons
);
status
=
-
EINVAL
;
}
break
;
default:
status
=
-
EINVAL
;
...
...
@@ -1781,7 +1800,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp_params
.
sgid
.
dwords
[
2
],
qp_params
.
sgid
.
dwords
[
3
]);
DP_DEBUG
(
dev
,
QEDR_MSG_QP
,
"remote_mac=[%pM]
\n
"
,
qp_params
.
remote_mac_addr
);
;
qp_params
.
mtu
=
qp
->
mtu
;
qp_params
.
lb_indication
=
false
;
...
...
@@ -1932,7 +1950,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr
->
qp_state
=
qedr_get_ibqp_state
(
params
.
state
);
qp_attr
->
cur_qp_state
=
qedr_get_ibqp_state
(
params
.
state
);
qp_attr
->
path_mtu
=
ib
oe_get_mtu
(
params
.
mtu
);
qp_attr
->
path_mtu
=
ib
_mtu_int_to_enum
(
params
.
mtu
);
qp_attr
->
path_mig_state
=
IB_MIG_MIGRATED
;
qp_attr
->
rq_psn
=
params
.
rq_psn
;
qp_attr
->
sq_psn
=
params
.
sq_psn
;
...
...
@@ -1944,7 +1962,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr
->
cap
.
max_recv_wr
=
qp
->
rq
.
max_wr
;
qp_attr
->
cap
.
max_send_sge
=
qp
->
sq
.
max_sges
;
qp_attr
->
cap
.
max_recv_sge
=
qp
->
rq
.
max_sges
;
qp_attr
->
cap
.
max_inline_data
=
qp
->
max_inline_data
;
qp_attr
->
cap
.
max_inline_data
=
ROCE_REQ_MAX_INLINE_DATA_SIZE
;
qp_init_attr
->
cap
=
qp_attr
->
cap
;
memcpy
(
&
qp_attr
->
ah_attr
.
grh
.
dgid
.
raw
[
0
],
&
params
.
dgid
.
bytes
[
0
],
...
...
@@ -2225,7 +2243,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
return
rc
;
}
struct
qedr_mr
*
__qedr_alloc_mr
(
struct
ib_pd
*
ibpd
,
int
max_page_list_len
)
static
struct
qedr_mr
*
__qedr_alloc_mr
(
struct
ib_pd
*
ibpd
,
int
max_page_list_len
)
{
struct
qedr_pd
*
pd
=
get_qedr_pd
(
ibpd
);
struct
qedr_dev
*
dev
=
get_qedr_dev
(
ibpd
->
device
);
...
...
@@ -2627,7 +2646,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
return
0
;
}
enum
ib_wc_opcode
qedr_ib_to_wc_opcode
(
enum
ib_wr_opcode
opcode
)
static
enum
ib_wc_opcode
qedr_ib_to_wc_opcode
(
enum
ib_wr_opcode
opcode
)
{
switch
(
opcode
)
{
case
IB_WR_RDMA_WRITE
:
...
...
@@ -2652,7 +2671,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
inline
bool
qedr_can_post_send
(
struct
qedr_qp
*
qp
,
struct
ib_send_wr
*
wr
)
static
inline
bool
qedr_can_post_send
(
struct
qedr_qp
*
qp
,
struct
ib_send_wr
*
wr
)
{
int
wq_is_full
,
err_wr
,
pbl_is_full
;
struct
qedr_dev
*
dev
=
qp
->
dev
;
...
...
@@ -2689,7 +2708,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return
true
;
}
int
__qedr_post_send
(
struct
ib_qp
*
ibqp
,
struct
ib_send_wr
*
wr
,
static
int
__qedr_post_send
(
struct
ib_qp
*
ibqp
,
struct
ib_send_wr
*
wr
,
struct
ib_send_wr
**
bad_wr
)
{
struct
qedr_dev
*
dev
=
get_qedr_dev
(
ibqp
->
device
);
...
...
@@ -3157,9 +3176,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
IB_WC_SUCCESS
,
0
);
break
;
case
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR
:
DP_ERR
(
dev
,
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x
\n
"
,
cq
->
icid
,
qp
->
icid
);
if
(
qp
->
state
!=
QED_ROCE_QP_STATE_ERR
)
DP_ERR
(
dev
,
"Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x
\n
"
,
cq
->
icid
,
qp
->
icid
);
cnt
=
process_req
(
dev
,
qp
,
cq
,
num_entries
,
wc
,
req
->
sq_cons
,
IB_WC_WR_FLUSH_ERR
,
1
);
break
;
...
...
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
View file @
6dd7abae
...
...
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
if
(
ret
)
{
dev_err
(
&
pdev
->
dev
,
"failed to allocate interrupts
\n
"
);
ret
=
-
ENOMEM
;
goto
err_
netdevice
;
goto
err_
free_cq_ring
;
}
/* Allocate UAR table. */
...
...
@@ -1092,8 +1092,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
err_free_intrs:
pvrdma_free_irq
(
dev
);
pvrdma_disable_msi_all
(
dev
);
err_netdevice:
unregister_netdevice_notifier
(
&
dev
->
nb_netdev
);
err_free_cq_ring:
pvrdma_page_dir_cleanup
(
dev
,
&
dev
->
cq_pdir
);
err_free_async_ring:
...
...
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
View file @
6dd7abae
...
...
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
union
pvrdma_cmd_resp
rsp
;
struct
pvrdma_cmd_create_uc
*
cmd
=
&
req
.
create_uc
;
struct
pvrdma_cmd_create_uc_resp
*
resp
=
&
rsp
.
create_uc_resp
;
struct
pvrdma_alloc_ucontext_resp
uresp
;
struct
pvrdma_alloc_ucontext_resp
uresp
=
{
0
}
;
int
ret
;
void
*
ptr
;
...
...
drivers/infiniband/sw/rxe/rxe_mr.c
View file @
6dd7abae
...
...
@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
case
RXE_MEM_TYPE_MR
:
case
RXE_MEM_TYPE_FMR
:
return
((
iova
<
mem
->
iova
)
||
((
iova
+
length
)
>
(
mem
->
iova
+
mem
->
length
)))
?
-
EFAULT
:
0
;
if
(
iova
<
mem
->
iova
||
length
>
mem
->
length
||
iova
>
mem
->
iova
+
mem
->
length
-
length
)
return
-
EFAULT
;
return
0
;
default:
return
-
EFAULT
;
...
...
drivers/infiniband/sw/rxe/rxe_net.c
View file @
6dd7abae
...
...
@@ -538,7 +538,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
}
spin_lock_bh
(
&
dev_list_lock
);
list_add_tail
(
&
rxe
_dev_list
,
&
rxe
->
list
);
list_add_tail
(
&
rxe
->
list
,
&
rxe_dev_
list
);
spin_unlock_bh
(
&
dev_list_lock
);
return
rxe
;
}
...
...
drivers/infiniband/sw/rxe/rxe_qp.c
View file @
6dd7abae
...
...
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
del_timer_sync
(
&
qp
->
rnr_nak_timer
);
rxe_cleanup_task
(
&
qp
->
req
.
task
);
if
(
qp_type
(
qp
)
==
IB_QPT_RC
)
rxe_cleanup_task
(
&
qp
->
comp
.
task
);
rxe_cleanup_task
(
&
qp
->
comp
.
task
);
/* flush out any receive wr's or pending requests */
__rxe_do_task
(
&
qp
->
req
.
task
);
...
...
drivers/infiniband/sw/rxe/rxe_resp.c
View file @
6dd7abae
...
...
@@ -479,7 +479,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
goto
err
;
}
resid
=
mtu
;
qp
->
resp
.
resid
=
mtu
;
}
else
{
if
(
pktlen
!=
resid
)
{
state
=
RESPST_ERR_LENGTH
;
...
...
drivers/infiniband/ulp/iser/iscsi_iser.c
View file @
6dd7abae
...
...
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC
);
}
/*
* Limit the sg_tablesize and max_sectors based on the device
* max fastreg page list length.
*/
shost
->
sg_tablesize
=
min_t
(
unsigned
short
,
shost
->
sg_tablesize
,
ib_conn
->
device
->
ib_device
->
attrs
.
max_fast_reg_page_list_len
);
if
(
iscsi_host_add
(
shost
,
ib_conn
->
device
->
ib_device
->
dma_device
))
{
mutex_unlock
(
&
iser_conn
->
state_mutex
);
...
...
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
max_fr_sectors
=
((
shost
->
sg_tablesize
-
1
)
*
PAGE_SIZE
)
>>
9
;
shost
->
max_sectors
=
min
(
iser_max_sectors
,
max_fr_sectors
);
iser_dbg
(
"iser_conn %p, sg_tablesize %u, max_sectors %u
\n
"
,
iser_conn
,
shost
->
sg_tablesize
,
shost
->
max_sectors
);
if
(
cmds_max
>
max_cmds
)
{
iser_info
(
"cmds_max changed from %u to %u
\n
"
,
cmds_max
,
max_cmds
);
...
...
drivers/infiniband/ulp/iser/iscsi_iser.h
View file @
6dd7abae
...
...
@@ -496,7 +496,6 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
* @scsi_max_sectors: scsi host max sectors
*/
struct
iser_conn
{
struct
ib_conn
ib_conn
;
...
...
@@ -519,7 +518,6 @@ struct iser_conn {
struct
iser_rx_desc
*
rx_descs
;
u32
num_rx_descs
;
unsigned
short
scsi_sg_tablesize
;
unsigned
int
scsi_max_sectors
;
bool
snd_w_inv
;
};
...
...
drivers/infiniband/ulp/iser/iser_verbs.c
View file @
6dd7abae
...
...
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
sup_sg_tablesize
=
min_t
(
unsigned
,
ISCSI_ISER_MAX_SG_TABLESIZE
,
device
->
ib_device
->
attrs
.
max_fast_reg_page_list_len
);
if
(
sg_tablesize
>
sup_sg_tablesize
)
{
sg_tablesize
=
sup_sg_tablesize
;
iser_conn
->
scsi_max_sectors
=
sg_tablesize
*
SIZE_4K
/
512
;
}
else
{
iser_conn
->
scsi_max_sectors
=
max_sectors
;
}
iser_conn
->
scsi_sg_tablesize
=
sg_tablesize
;
iser_dbg
(
"iser_conn %p, sg_tablesize %u, max_sectors %u
\n
"
,
iser_conn
,
iser_conn
->
scsi_sg_tablesize
,
iser_conn
->
scsi_max_sectors
);
iser_conn
->
scsi_sg_tablesize
=
min
(
sg_tablesize
,
sup_sg_tablesize
);
}
/**
...
...
drivers/infiniband/ulp/srp/ib_srp.c
View file @
6dd7abae
...
...
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct
srp_fr_desc
*
d
;
struct
ib_mr
*
mr
;
int
i
,
ret
=
-
EINVAL
;
enum
ib_mr_type
mr_type
;
if
(
pool_size
<=
0
)
goto
err
;
...
...
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
spin_lock_init
(
&
pool
->
lock
);
INIT_LIST_HEAD
(
&
pool
->
free_list
);
if
(
device
->
attrs
.
device_cap_flags
&
IB_DEVICE_SG_GAPS_REG
)
mr_type
=
IB_MR_TYPE_SG_GAPS
;
else
mr_type
=
IB_MR_TYPE_MEM_REG
;
for
(
i
=
0
,
d
=
&
pool
->
desc
[
0
];
i
<
pool
->
size
;
i
++
,
d
++
)
{
mr
=
ib_alloc_mr
(
pd
,
IB_MR_TYPE_MEM_REG
,
max_page_list_len
);
mr
=
ib_alloc_mr
(
pd
,
mr_type
,
max_page_list_len
);
if
(
IS_ERR
(
mr
))
{
ret
=
PTR_ERR
(
mr
);
if
(
ret
==
-
ENOMEM
)
...
...
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
indirect_sg_entries
=
cmd_sg_entries
;
}
if
(
indirect_sg_entries
>
SG_MAX_SEGMENTS
)
{
pr_warn
(
"Clamping indirect_sg_entries to %u
\n
"
,
SG_MAX_SEGMENTS
);
indirect_sg_entries
=
SG_MAX_SEGMENTS
;
}
srp_remove_wq
=
create_workqueue
(
"srp_remove"
);
if
(
!
srp_remove_wq
)
{
ret
=
-
ENOMEM
;
...
...
include/rdma/ib_verbs.h
View file @
6dd7abae
...
...
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
}
}
static
inline
enum
ib_mtu
ib_mtu_int_to_enum
(
int
mtu
)
{
if
(
mtu
>=
4096
)
return
IB_MTU_4096
;
else
if
(
mtu
>=
2048
)
return
IB_MTU_2048
;
else
if
(
mtu
>=
1024
)
return
IB_MTU_1024
;
else
if
(
mtu
>=
512
)
return
IB_MTU_512
;
else
return
IB_MTU_256
;
}
enum
ib_port_state
{
IB_PORT_NOP
=
0
,
IB_PORT_DOWN
=
1
,
...
...
include/uapi/rdma/Kbuild
View file @
6dd7abae
...
...
@@ -17,3 +17,4 @@ header-y += nes-abi.h
header-y += ocrdma-abi.h
header-y += hns-abi.h
header-y += vmw_pvrdma-abi.h
header-y += qedr-abi.h
include/uapi/rdma/cxgb3-abi.h
View file @
6dd7abae
...
...
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
#ifndef CXGB3_ABI_USER_H
#define CX
BG
3_ABI_USER_H
#define CX
GB
3_ABI_USER_H
#include <linux/types.h>
...
...
include/uapi/rdma/ib_user_verbs.h
View file @
6dd7abae
...
...
@@ -37,7 +37,6 @@
#define IB_USER_VERBS_H
#include <linux/types.h>
#include <rdma/ib_verbs.h>
/*
* Increment this value if any changes that break userspace ABI
...
...
@@ -548,11 +547,17 @@ enum {
};
enum
{
IB_USER_LEGACY_LAST_QP_ATTR_MASK
=
IB_QP_DEST_QPN
/*
* This value is equal to IB_QP_DEST_QPN.
*/
IB_USER_LEGACY_LAST_QP_ATTR_MASK
=
1ULL
<<
20
,
};
enum
{
IB_USER_LAST_QP_ATTR_MASK
=
IB_QP_RATE_LIMIT
/*
* This value is equal to IB_QP_RATE_LIMIT.
*/
IB_USER_LAST_QP_ATTR_MASK
=
1ULL
<<
25
,
};
struct
ib_uverbs_ex_create_qp
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment