Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
6fa1f2f0
Commit
6fa1f2f0
authored
Nov 16, 2016
by
Doug Ledford
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'hfi1' and 'mlx' into k.o/for-4.9-rc
parents
2b16056f
6d931308
Changes
17
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
203 additions
and
50 deletions
+203
-50
drivers/infiniband/core/addr.c
drivers/infiniband/core/addr.c
+9
-2
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm.c
+110
-16
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma.c
+20
-1
drivers/infiniband/core/umem.c
drivers/infiniband/core/umem.c
+1
-1
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+2
-5
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/ah.c
+4
-1
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/cq.c
+4
-1
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/cq.c
+1
-2
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/main.c
+7
-4
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mlx5_ib.h
+2
-0
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/mr.c
+5
-1
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qp.c
+10
-2
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_net.c
+2
-6
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_qp.c
+2
-0
drivers/infiniband/sw/rxe/rxe_queue.c
drivers/infiniband/sw/rxe/rxe_queue.c
+9
-0
drivers/infiniband/sw/rxe/rxe_queue.h
drivers/infiniband/sw/rxe/rxe_queue.h
+2
-0
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_req.c
+13
-8
No files found.
drivers/infiniband/core/addr.c
View file @
6fa1f2f0
...
...
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel);
struct
resolve_cb_context
{
struct
rdma_dev_addr
*
addr
;
struct
completion
comp
;
int
status
;
};
static
void
resolve_cb
(
int
status
,
struct
sockaddr
*
src_addr
,
struct
rdma_dev_addr
*
addr
,
void
*
context
)
{
memcpy
(((
struct
resolve_cb_context
*
)
context
)
->
addr
,
addr
,
sizeof
(
struct
rdma_dev_addr
));
if
(
!
status
)
memcpy
(((
struct
resolve_cb_context
*
)
context
)
->
addr
,
addr
,
sizeof
(
struct
rdma_dev_addr
));
((
struct
resolve_cb_context
*
)
context
)
->
status
=
status
;
complete
(
&
((
struct
resolve_cb_context
*
)
context
)
->
comp
);
}
...
...
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
wait_for_completion
(
&
ctx
.
comp
);
ret
=
ctx
.
status
;
if
(
ret
)
return
ret
;
memcpy
(
dmac
,
dev_addr
.
dst_dev_addr
,
ETH_ALEN
);
dev
=
dev_get_by_index
(
&
init_net
,
dev_addr
.
bound_dev_if
);
if
(
!
dev
)
...
...
drivers/infiniband/core/cm.c
View file @
6fa1f2f0
...
...
@@ -80,6 +80,8 @@ static struct ib_cm {
__be32
random_id_operand
;
struct
list_head
timewait_list
;
struct
workqueue_struct
*
wq
;
/* Sync on cm change port state */
spinlock_t
state_lock
;
}
cm
;
/* Counter indexes ordered by attribute ID */
...
...
@@ -161,6 +163,8 @@ struct cm_port {
struct
ib_mad_agent
*
mad_agent
;
struct
kobject
port_obj
;
u8
port_num
;
struct
list_head
cm_priv_prim_list
;
struct
list_head
cm_priv_altr_list
;
struct
cm_counter_group
counter_group
[
CM_COUNTER_GROUPS
];
};
...
...
@@ -241,6 +245,12 @@ struct cm_id_private {
u8
service_timeout
;
u8
target_ack_delay
;
struct
list_head
prim_list
;
struct
list_head
altr_list
;
/* Indicates that the send port mad is registered and av is set */
int
prim_send_port_not_ready
;
int
altr_send_port_not_ready
;
struct
list_head
work_list
;
atomic_t
work_count
;
};
...
...
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct
ib_mad_agent
*
mad_agent
;
struct
ib_mad_send_buf
*
m
;
struct
ib_ah
*
ah
;
struct
cm_av
*
av
;
unsigned
long
flags
,
flags2
;
int
ret
=
0
;
/* don't let the port to be released till the agent is down */
spin_lock_irqsave
(
&
cm
.
state_lock
,
flags2
);
spin_lock_irqsave
(
&
cm
.
lock
,
flags
);
if
(
!
cm_id_priv
->
prim_send_port_not_ready
)
av
=
&
cm_id_priv
->
av
;
else
if
(
!
cm_id_priv
->
altr_send_port_not_ready
&&
(
cm_id_priv
->
alt_av
.
port
))
av
=
&
cm_id_priv
->
alt_av
;
else
{
pr_info
(
"%s: not valid CM id
\n
"
,
__func__
);
ret
=
-
ENODEV
;
spin_unlock_irqrestore
(
&
cm
.
lock
,
flags
);
goto
out
;
}
spin_unlock_irqrestore
(
&
cm
.
lock
,
flags
);
/* Make sure the port haven't released the mad yet */
mad_agent
=
cm_id_priv
->
av
.
port
->
mad_agent
;
ah
=
ib_create_ah
(
mad_agent
->
qp
->
pd
,
&
cm_id_priv
->
av
.
ah_attr
);
if
(
IS_ERR
(
ah
))
return
PTR_ERR
(
ah
);
if
(
!
mad_agent
)
{
pr_info
(
"%s: not a valid MAD agent
\n
"
,
__func__
);
ret
=
-
ENODEV
;
goto
out
;
}
ah
=
ib_create_ah
(
mad_agent
->
qp
->
pd
,
&
av
->
ah_attr
);
if
(
IS_ERR
(
ah
))
{
ret
=
PTR_ERR
(
ah
);
goto
out
;
}
m
=
ib_create_send_mad
(
mad_agent
,
cm_id_priv
->
id
.
remote_cm_qpn
,
cm_id_priv
->
av
.
pkey_index
,
av
->
pkey_index
,
0
,
IB_MGMT_MAD_HDR
,
IB_MGMT_MAD_DATA
,
GFP_ATOMIC
,
IB_MGMT_BASE_VERSION
);
if
(
IS_ERR
(
m
))
{
ib_destroy_ah
(
ah
);
return
PTR_ERR
(
m
);
ret
=
PTR_ERR
(
m
);
goto
out
;
}
/* Timeout set by caller if response is expected. */
...
...
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
atomic_inc
(
&
cm_id_priv
->
refcount
);
m
->
context
[
0
]
=
cm_id_priv
;
*
msg
=
m
;
return
0
;
out:
spin_unlock_irqrestore
(
&
cm
.
state_lock
,
flags2
);
return
ret
;
}
static
int
cm_alloc_response_msg
(
struct
cm_port
*
port
,
...
...
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
grh
,
&
av
->
ah_attr
);
}
static
int
cm_init_av_by_path
(
struct
ib_sa_path_rec
*
path
,
struct
cm_av
*
av
)
static
int
cm_init_av_by_path
(
struct
ib_sa_path_rec
*
path
,
struct
cm_av
*
av
,
struct
cm_id_private
*
cm_id_priv
)
{
struct
cm_device
*
cm_dev
;
struct
cm_port
*
port
=
NULL
;
...
...
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
&
av
->
ah_attr
);
av
->
timeout
=
path
->
packet_life_time
+
1
;
return
0
;
spin_lock_irqsave
(
&
cm
.
lock
,
flags
);
if
(
&
cm_id_priv
->
av
==
av
)
list_add_tail
(
&
cm_id_priv
->
prim_list
,
&
port
->
cm_priv_prim_list
);
else
if
(
&
cm_id_priv
->
alt_av
==
av
)
list_add_tail
(
&
cm_id_priv
->
altr_list
,
&
port
->
cm_priv_altr_list
);
else
ret
=
-
EINVAL
;
spin_unlock_irqrestore
(
&
cm
.
lock
,
flags
);
return
ret
;
}
static
int
cm_alloc_id
(
struct
cm_id_private
*
cm_id_priv
)
...
...
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
spin_lock_init
(
&
cm_id_priv
->
lock
);
init_completion
(
&
cm_id_priv
->
comp
);
INIT_LIST_HEAD
(
&
cm_id_priv
->
work_list
);
INIT_LIST_HEAD
(
&
cm_id_priv
->
prim_list
);
INIT_LIST_HEAD
(
&
cm_id_priv
->
altr_list
);
atomic_set
(
&
cm_id_priv
->
work_count
,
-
1
);
atomic_set
(
&
cm_id_priv
->
refcount
,
1
);
return
&
cm_id_priv
->
id
;
...
...
@@ -892,6 +945,15 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
break
;
}
spin_lock_irq
(
&
cm
.
lock
);
if
(
!
list_empty
(
&
cm_id_priv
->
altr_list
)
&&
(
!
cm_id_priv
->
altr_send_port_not_ready
))
list_del
(
&
cm_id_priv
->
altr_list
);
if
(
!
list_empty
(
&
cm_id_priv
->
prim_list
)
&&
(
!
cm_id_priv
->
prim_send_port_not_ready
))
list_del
(
&
cm_id_priv
->
prim_list
);
spin_unlock_irq
(
&
cm
.
lock
);
cm_free_id
(
cm_id
->
local_id
);
cm_deref_id
(
cm_id_priv
);
wait_for_completion
(
&
cm_id_priv
->
comp
);
...
...
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
goto
out
;
}
ret
=
cm_init_av_by_path
(
param
->
primary_path
,
&
cm_id_priv
->
av
);
ret
=
cm_init_av_by_path
(
param
->
primary_path
,
&
cm_id_priv
->
av
,
cm_id_priv
);
if
(
ret
)
goto
error1
;
if
(
param
->
alternate_path
)
{
ret
=
cm_init_av_by_path
(
param
->
alternate_path
,
&
cm_id_priv
->
alt_av
);
&
cm_id_priv
->
alt_av
,
cm_id_priv
);
if
(
ret
)
goto
error1
;
}
...
...
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work)
dev_put
(
gid_attr
.
ndev
);
}
work
->
path
[
0
].
gid_type
=
gid_attr
.
gid_type
;
ret
=
cm_init_av_by_path
(
&
work
->
path
[
0
],
&
cm_id_priv
->
av
);
ret
=
cm_init_av_by_path
(
&
work
->
path
[
0
],
&
cm_id_priv
->
av
,
cm_id_priv
);
}
if
(
ret
)
{
int
err
=
ib_get_cached_gid
(
work
->
port
->
cm_dev
->
ib_device
,
...
...
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work)
goto
rejected
;
}
if
(
req_msg
->
alt_local_lid
)
{
ret
=
cm_init_av_by_path
(
&
work
->
path
[
1
],
&
cm_id_priv
->
alt_av
);
ret
=
cm_init_av_by_path
(
&
work
->
path
[
1
],
&
cm_id_priv
->
alt_av
,
cm_id_priv
);
if
(
ret
)
{
ib_send_cm_rej
(
cm_id
,
IB_CM_REJ_INVALID_ALT_GID
,
&
work
->
path
[
0
].
sgid
,
...
...
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id,
goto
out
;
}
ret
=
cm_init_av_by_path
(
alternate_path
,
&
cm_id_priv
->
alt_av
);
ret
=
cm_init_av_by_path
(
alternate_path
,
&
cm_id_priv
->
alt_av
,
cm_id_priv
);
if
(
ret
)
goto
out
;
cm_id_priv
->
alt_av
.
timeout
=
...
...
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work)
cm_init_av_for_response
(
work
->
port
,
work
->
mad_recv_wc
->
wc
,
work
->
mad_recv_wc
->
recv_buf
.
grh
,
&
cm_id_priv
->
av
);
cm_init_av_by_path
(
param
->
alternate_path
,
&
cm_id_priv
->
alt_av
);
cm_init_av_by_path
(
param
->
alternate_path
,
&
cm_id_priv
->
alt_av
,
cm_id_priv
);
ret
=
atomic_inc_and_test
(
&
cm_id_priv
->
work_count
);
if
(
!
ret
)
list_add_tail
(
&
work
->
list
,
&
cm_id_priv
->
work_list
);
...
...
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
return
-
EINVAL
;
cm_id_priv
=
container_of
(
cm_id
,
struct
cm_id_private
,
id
);
ret
=
cm_init_av_by_path
(
param
->
path
,
&
cm_id_priv
->
av
);
ret
=
cm_init_av_by_path
(
param
->
path
,
&
cm_id_priv
->
av
,
cm_id_priv
);
if
(
ret
)
goto
out
;
...
...
@@ -3468,7 +3535,9 @@ static int cm_establish(struct ib_cm_id *cm_id)
static
int
cm_migrate
(
struct
ib_cm_id
*
cm_id
)
{
struct
cm_id_private
*
cm_id_priv
;
struct
cm_av
tmp_av
;
unsigned
long
flags
;
int
tmp_send_port_not_ready
;
int
ret
=
0
;
cm_id_priv
=
container_of
(
cm_id
,
struct
cm_id_private
,
id
);
...
...
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id)
(
cm_id
->
lap_state
==
IB_CM_LAP_UNINIT
||
cm_id
->
lap_state
==
IB_CM_LAP_IDLE
))
{
cm_id
->
lap_state
=
IB_CM_LAP_IDLE
;
/* Swap address vector */
tmp_av
=
cm_id_priv
->
av
;
cm_id_priv
->
av
=
cm_id_priv
->
alt_av
;
cm_id_priv
->
alt_av
=
tmp_av
;
/* Swap port send ready state */
tmp_send_port_not_ready
=
cm_id_priv
->
prim_send_port_not_ready
;
cm_id_priv
->
prim_send_port_not_ready
=
cm_id_priv
->
altr_send_port_not_ready
;
cm_id_priv
->
altr_send_port_not_ready
=
tmp_send_port_not_ready
;
}
else
ret
=
-
EINVAL
;
spin_unlock_irqrestore
(
&
cm_id_priv
->
lock
,
flags
);
...
...
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device)
port
->
cm_dev
=
cm_dev
;
port
->
port_num
=
i
;
INIT_LIST_HEAD
(
&
port
->
cm_priv_prim_list
);
INIT_LIST_HEAD
(
&
port
->
cm_priv_altr_list
);
ret
=
cm_create_port_fs
(
port
);
if
(
ret
)
goto
error1
;
...
...
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
{
struct
cm_device
*
cm_dev
=
client_data
;
struct
cm_port
*
port
;
struct
cm_id_private
*
cm_id_priv
;
struct
ib_mad_agent
*
cur_mad_agent
;
struct
ib_port_modify
port_modify
=
{
.
clr_port_cap_mask
=
IB_PORT_CM_SUP
};
...
...
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
port
=
cm_dev
->
port
[
i
-
1
];
ib_modify_port
(
ib_device
,
port
->
port_num
,
0
,
&
port_modify
);
/* Mark all the cm_id's as not valid */
spin_lock_irq
(
&
cm
.
lock
);
list_for_each_entry
(
cm_id_priv
,
&
port
->
cm_priv_altr_list
,
altr_list
)
cm_id_priv
->
altr_send_port_not_ready
=
1
;
list_for_each_entry
(
cm_id_priv
,
&
port
->
cm_priv_prim_list
,
prim_list
)
cm_id_priv
->
prim_send_port_not_ready
=
1
;
spin_unlock_irq
(
&
cm
.
lock
);
/*
* We flush the queue here after the going_down set, this
* verify that no new works will be queued in the recv handler,
* after that we can call the unregister_mad_agent
*/
flush_workqueue
(
cm
.
wq
);
ib_unregister_mad_agent
(
port
->
mad_agent
);
spin_lock_irq
(
&
cm
.
state_lock
);
cur_mad_agent
=
port
->
mad_agent
;
port
->
mad_agent
=
NULL
;
spin_unlock_irq
(
&
cm
.
state_lock
);
ib_unregister_mad_agent
(
cur_mad_agent
);
cm_remove_port_fs
(
port
);
}
device_unregister
(
cm_dev
->
device
);
kfree
(
cm_dev
);
}
...
...
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD
(
&
cm
.
device_list
);
rwlock_init
(
&
cm
.
device_lock
);
spin_lock_init
(
&
cm
.
lock
);
spin_lock_init
(
&
cm
.
state_lock
);
cm
.
listen_service_table
=
RB_ROOT
;
cm
.
listen_service_id
=
be64_to_cpu
(
IB_CM_ASSIGN_SERVICE_ID
);
cm
.
remote_id_table
=
RB_ROOT
;
...
...
drivers/infiniband/core/cma.c
View file @
6fa1f2f0
...
...
@@ -2436,6 +2436,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return
0
;
}
static
enum
ib_gid_type
cma_route_gid_type
(
enum
rdma_network_type
network_type
,
unsigned
long
supported_gids
,
enum
ib_gid_type
default_gid
)
{
if
((
network_type
==
RDMA_NETWORK_IPV4
||
network_type
==
RDMA_NETWORK_IPV6
)
&&
test_bit
(
IB_GID_TYPE_ROCE_UDP_ENCAP
,
&
supported_gids
))
return
IB_GID_TYPE_ROCE_UDP_ENCAP
;
return
default_gid
;
}
static
int
cma_resolve_iboe_route
(
struct
rdma_id_private
*
id_priv
)
{
struct
rdma_route
*
route
=
&
id_priv
->
id
.
route
;
...
...
@@ -2461,6 +2473,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route
->
num_paths
=
1
;
if
(
addr
->
dev_addr
.
bound_dev_if
)
{
unsigned
long
supported_gids
;
ndev
=
dev_get_by_index
(
&
init_net
,
addr
->
dev_addr
.
bound_dev_if
);
if
(
!
ndev
)
{
ret
=
-
ENODEV
;
...
...
@@ -2484,7 +2498,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route
->
path_rec
->
net
=
&
init_net
;
route
->
path_rec
->
ifindex
=
ndev
->
ifindex
;
route
->
path_rec
->
gid_type
=
id_priv
->
gid_type
;
supported_gids
=
roce_gid_type_mask_support
(
id_priv
->
id
.
device
,
id_priv
->
id
.
port_num
);
route
->
path_rec
->
gid_type
=
cma_route_gid_type
(
addr
->
dev_addr
.
network
,
supported_gids
,
id_priv
->
gid_type
);
}
if
(
!
ndev
)
{
ret
=
-
ENODEV
;
...
...
drivers/infiniband/core/umem.c
View file @
6fa1f2f0
...
...
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base
=
addr
&
PAGE_MASK
;
if
(
npages
==
0
)
{
if
(
npages
==
0
||
npages
>
UINT_MAX
)
{
ret
=
-
EINVAL
;
goto
out
;
}
...
...
drivers/infiniband/core/uverbs_main.c
View file @
6fa1f2f0
...
...
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
container_of
(
uobj
,
struct
ib_uqp_object
,
uevent
.
uobject
);
idr_remove_uobj
(
&
ib_uverbs_qp_idr
,
uobj
);
if
(
qp
!=
qp
->
real_qp
)
{
ib_close_qp
(
qp
);
}
else
{
if
(
qp
==
qp
->
real_qp
)
ib_uverbs_detach_umcast
(
qp
,
uqp
);
ib_destroy_qp
(
qp
);
}
ib_destroy_qp
(
qp
);
ib_uverbs_release_uevent
(
file
,
&
uqp
->
uevent
);
kfree
(
uqp
);
}
...
...
drivers/infiniband/hw/mlx4/ah.c
View file @
6fa1f2f0
...
...
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
if
(
vlan_tag
<
0x1000
)
vlan_tag
|=
(
ah_attr
->
sl
&
7
)
<<
13
;
ah
->
av
.
eth
.
port_pd
=
cpu_to_be32
(
to_mpd
(
pd
)
->
pdn
|
(
ah_attr
->
port_num
<<
24
));
ah
->
av
.
eth
.
gid_index
=
mlx4_ib_gid_index_to_real_index
(
ibdev
,
ah_attr
->
port_num
,
ah_attr
->
grh
.
sgid_index
);
ret
=
mlx4_ib_gid_index_to_real_index
(
ibdev
,
ah_attr
->
port_num
,
ah_attr
->
grh
.
sgid_index
);
if
(
ret
<
0
)
return
ERR_PTR
(
ret
);
ah
->
av
.
eth
.
gid_index
=
ret
;
ah
->
av
.
eth
.
vlan
=
cpu_to_be16
(
vlan_tag
);
ah
->
av
.
eth
.
hop_limit
=
ah_attr
->
grh
.
hop_limit
;
if
(
ah_attr
->
static_rate
)
{
...
...
drivers/infiniband/hw/mlx4/cq.c
View file @
6fa1f2f0
...
...
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if
(
context
)
if
(
ib_copy_to_udata
(
udata
,
&
cq
->
mcq
.
cqn
,
sizeof
(
__u32
)))
{
err
=
-
EFAULT
;
goto
err_
dbmap
;
goto
err_
cq_free
;
}
return
&
cq
->
ibcq
;
err_cq_free:
mlx4_cq_free
(
dev
->
dev
,
&
cq
->
mcq
);
err_dbmap:
if
(
context
)
mlx4_ib_db_unmap_user
(
to_mucontext
(
context
),
&
cq
->
db
);
...
...
drivers/infiniband/hw/mlx5/cq.c
View file @
6fa1f2f0
...
...
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
if
(
err
)
goto
err_create
;
}
else
{
/* for now choose 64 bytes till we have a proper interface */
cqe_size
=
64
;
cqe_size
=
cache_line_size
()
==
128
?
128
:
64
;
err
=
create_cq_kernel
(
dev
,
cq
,
entries
,
cqe_size
,
&
cqb
,
&
index
,
&
inlen
);
if
(
err
)
...
...
drivers/infiniband/hw/mlx5/main.c
View file @
6fa1f2f0
...
...
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
{
struct
mlx5_ib_dev
*
ibdev
=
(
struct
mlx5_ib_dev
*
)
context
;
struct
ib_event
ibev
;
bool
fatal
=
false
;
u8
port
=
0
;
switch
(
event
)
{
case
MLX5_DEV_EVENT_SYS_ERROR
:
ibdev
->
ib_active
=
false
;
ibev
.
event
=
IB_EVENT_DEVICE_FATAL
;
mlx5_ib_handle_internal_error
(
ibdev
);
fatal
=
true
;
break
;
case
MLX5_DEV_EVENT_PORT_UP
:
...
...
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if
(
ibdev
->
ib_active
)
ib_dispatch_event
(
&
ibev
);
if
(
fatal
)
ibdev
->
ib_active
=
false
;
}
static
void
get_ext_port_caps
(
struct
mlx5_ib_dev
*
dev
)
...
...
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
}
err
=
init_node_data
(
dev
);
if
(
err
)
goto
err_
dealloc
;
goto
err_
free_port
;
mutex_init
(
&
dev
->
flow_db
.
lock
);
mutex_init
(
&
dev
->
cap_mask_mutex
);
...
...
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if
(
ll
==
IB_LINK_LAYER_ETHERNET
)
{
err
=
mlx5_enable_roce
(
dev
);
if
(
err
)
goto
err_
dealloc
;
goto
err_
free_port
;
}
err
=
create_dev_resources
(
&
dev
->
devr
);
...
...
drivers/infiniband/hw/mlx5/mlx5_ib.h
View file @
6fa1f2f0
...
...
@@ -626,6 +626,8 @@ struct mlx5_ib_dev {
struct
mlx5_ib_resources
devr
;
struct
mlx5_mr_cache
cache
;
struct
timer_list
delay_timer
;
/* Prevents soft lock on massive reg MRs */
struct
mutex
slow_path_mutex
;
int
fill_delay
;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct
ib_odp_caps
odp_caps
;
...
...
drivers/infiniband/hw/mlx5/mr.c
View file @
6fa1f2f0
...
...
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
int
err
;
int
i
;
mutex_init
(
&
dev
->
slow_path_mutex
);
cache
->
wq
=
alloc_ordered_workqueue
(
"mkey_cache"
,
WQ_MEM_RECLAIM
);
if
(
!
cache
->
wq
)
{
mlx5_ib_warn
(
dev
,
"failed to create work queue
\n
"
);
...
...
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto
error
;
}
if
(
!
mr
)
if
(
!
mr
)
{
mutex_lock
(
&
dev
->
slow_path_mutex
);
mr
=
reg_create
(
NULL
,
pd
,
virt_addr
,
length
,
umem
,
ncont
,
page_shift
,
access_flags
);
mutex_unlock
(
&
dev
->
slow_path_mutex
);
}
if
(
IS_ERR
(
mr
))
{
err
=
PTR_ERR
(
mr
);
...
...
drivers/infiniband/hw/mlx5/qp.c
View file @
6fa1f2f0
...
...
@@ -2052,8 +2052,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
mlx5_ib_dbg
(
dev
,
"ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x
\n
"
,
qp
->
ibqp
.
qp_num
,
qp
->
trans_qp
.
base
.
mqp
.
qpn
,
to_mcq
(
init_attr
->
recv_cq
)
->
mcq
.
cqn
,
to_mcq
(
init_attr
->
send_cq
)
->
mcq
.
cqn
);
init_attr
->
recv_cq
?
to_mcq
(
init_attr
->
recv_cq
)
->
mcq
.
cqn
:
-
1
,
init_attr
->
send_cq
?
to_mcq
(
init_attr
->
send_cq
)
->
mcq
.
cqn
:
-
1
);
qp
->
trans_qp
.
xrcdn
=
xrcdn
;
...
...
@@ -4815,6 +4815,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
udata
->
inlen
))
return
ERR_PTR
(
-
EOPNOTSUPP
);
if
(
init_attr
->
log_ind_tbl_size
>
MLX5_CAP_GEN
(
dev
->
mdev
,
log_max_rqt_size
))
{
mlx5_ib_dbg
(
dev
,
"log_ind_tbl_size = %d is bigger than supported = %d
\n
"
,
init_attr
->
log_ind_tbl_size
,
MLX5_CAP_GEN
(
dev
->
mdev
,
log_max_rqt_size
));
return
ERR_PTR
(
-
EINVAL
);
}
min_resp_len
=
offsetof
(
typeof
(
resp
),
reserved
)
+
sizeof
(
resp
.
reserved
);
if
(
udata
->
outlen
&&
udata
->
outlen
<
min_resp_len
)
return
ERR_PTR
(
-
EINVAL
);
...
...
drivers/infiniband/sw/rxe/rxe_net.c
View file @
6fa1f2f0
...
...
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
{
int
err
;
struct
socket
*
sock
;
struct
udp_port_cfg
udp_cfg
;
struct
udp_tunnel_sock_cfg
tnl_cfg
;
memset
(
&
udp_cfg
,
0
,
sizeof
(
udp_cfg
));
struct
udp_port_cfg
udp_cfg
=
{
0
};
struct
udp_tunnel_sock_cfg
tnl_cfg
=
{
0
};
if
(
ipv6
)
{
udp_cfg
.
family
=
AF_INET6
;
...
...
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return
ERR_PTR
(
err
);
}
tnl_cfg
.
sk_user_data
=
NULL
;
tnl_cfg
.
encap_type
=
1
;
tnl_cfg
.
encap_rcv
=
rxe_udp_encap_recv
;
tnl_cfg
.
encap_destroy
=
NULL
;
/* Setup UDP tunnel */
setup_udp_tunnel_sock
(
net
,
sock
,
&
tnl_cfg
);
...
...
drivers/infiniband/sw/rxe/rxe_qp.c
View file @
6fa1f2f0
...
...
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
if
(
qp
->
sq
.
queue
)
{
__rxe_do_task
(
&
qp
->
comp
.
task
);
__rxe_do_task
(
&
qp
->
req
.
task
);
rxe_queue_reset
(
qp
->
sq
.
queue
);
}
/* cleanup attributes */
...
...
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp
->
req
.
state
=
QP_STATE_ERROR
;
qp
->
resp
.
state
=
QP_STATE_ERROR
;
qp
->
attr
.
qp_state
=
IB_QPS_ERR
;
/* drain work and packet queues */
rxe_run_task
(
&
qp
->
resp
.
task
,
1
);
...
...
drivers/infiniband/sw/rxe/rxe_queue.c
View file @
6fa1f2f0
...
...
@@ -84,6 +84,15 @@ int do_mmap_info(struct rxe_dev *rxe,
return
-
EINVAL
;
}
inline
void
rxe_queue_reset
(
struct
rxe_queue
*
q
)
{
/* queue is comprised from header and the memory
* of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
* reset only the queue itself and not the management header
*/
memset
(
q
->
buf
->
data
,
0
,
q
->
buf_size
-
sizeof
(
struct
rxe_queue_buf
));
}
struct
rxe_queue
*
rxe_queue_init
(
struct
rxe_dev
*
rxe
,
int
*
num_elem
,
unsigned
int
elem_size
)
...
...
drivers/infiniband/sw/rxe/rxe_queue.h
View file @
6fa1f2f0
...
...
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe,
size_t
buf_size
,
struct
rxe_mmap_info
**
ip_p
);
void
rxe_queue_reset
(
struct
rxe_queue
*
q
);
struct
rxe_queue
*
rxe_queue_init
(
struct
rxe_dev
*
rxe
,
int
*
num_elem
,
unsigned
int
elem_size
);
...
...
drivers/infiniband/sw/rxe/rxe_req.c
View file @
6fa1f2f0
...
...
@@ -696,7 +696,8 @@ int rxe_requester(void *arg)
qp
->
req
.
wqe_index
);
wqe
->
state
=
wqe_state_done
;
wqe
->
status
=
IB_WC_SUCCESS
;
goto
complete
;
__rxe_do_task
(
&
qp
->
comp
.
task
);
return
0
;
}
payload
=
mtu
;
}
...
...
@@ -745,13 +746,17 @@ int rxe_requester(void *arg)
wqe
->
status
=
IB_WC_LOC_PROT_ERR
;
wqe
->
state
=
wqe_state_error
;
complete:
if
(
qp_type
(
qp
)
!=
IB_QPT_RC
)
{
while
(
rxe_completer
(
qp
)
==
0
)
;
}
return
0
;
/*
* IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS
* ---------8<---------8<-------------
* ...Note that if a completion error occurs, a Work Completion
* will always be generated, even if the signaling
* indicator requests an Unsignaled Completion.
* ---------8<---------8<-------------
*/
wqe
->
wr
.
send_flags
|=
IB_SEND_SIGNALED
;
__rxe_do_task
(
&
qp
->
comp
.
task
);
return
-
EAGAIN
;
exit:
return
-
EAGAIN
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment