Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2116fe4e
Commit
2116fe4e
authored
Sep 14, 2012
by
Roland Dreier
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'cxgb4', 'ipoib', 'mlx4', 'ocrdma' and 'qib' into for-next
parents
92dd6c3d
b5120a6e
dd03e734
ae3bca90
4c355005
Changes
7
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
77 additions
and
74 deletions
+77
-74
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+4
-4
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mad.c
+2
-1
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib.h
+4
-1
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+44
-49
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+0
-2
drivers/net/ethernet/mellanox/mlx4/icm.c
drivers/net/ethernet/mellanox/mlx4/icm.c
+18
-12
drivers/net/ethernet/mellanox/mlx4/icm.h
drivers/net/ethernet/mellanox/mlx4/icm.h
+5
-5
No files found.
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
View file @
2116fe4e
...
...
@@ -2219,7 +2219,6 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
u32
wqe_idx
;
if
(
!
qp
->
wqe_wr_id_tbl
[
tail
].
signaled
)
{
expand
=
true
;
/* CQE cannot be consumed yet */
*
polled
=
false
;
/* WC cannot be consumed yet */
}
else
{
ibwc
->
status
=
IB_WC_SUCCESS
;
...
...
@@ -2227,10 +2226,11 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
ibwc
->
qp
=
&
qp
->
ibqp
;
ocrdma_update_wc
(
qp
,
ibwc
,
tail
);
*
polled
=
true
;
}
wqe_idx
=
le32_to_cpu
(
cqe
->
wq
.
wqeidx
)
&
OCRDMA_CQE_WQEIDX_MASK
;
if
(
tail
!=
wqe_idx
)
expand
=
true
;
/* Coalesced CQE can't be consumed yet */
}
ocrdma_hwq_inc_tail
(
&
qp
->
sq
);
return
expand
;
}
...
...
drivers/infiniband/hw/qib/qib_mad.c
View file @
2116fe4e
...
...
@@ -471,11 +471,12 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if
(
port_num
!=
port
)
{
ibp
=
to_iport
(
ibdev
,
port_num
);
ret
=
check_mkey
(
ibp
,
smp
,
0
);
if
(
ret
)
if
(
ret
)
{
ret
=
IB_MAD_RESULT_FAILURE
;
goto
bail
;
}
}
}
dd
=
dd_from_ibdev
(
ibdev
);
/* IB numbers ports from 1, hdw from 0 */
...
...
drivers/infiniband/ulp/ipoib/ipoib.h
View file @
2116fe4e
...
...
@@ -262,7 +262,10 @@ struct ipoib_ethtool_st {
u16
max_coalesced_frames
;
};
struct
ipoib_neigh_table
;
struct
ipoib_neigh_hash
{
struct
ipoib_neigh_table
*
ntbl
;
struct
ipoib_neigh
__rcu
**
buckets
;
struct
rcu_head
rcu
;
u32
mask
;
...
...
@@ -271,9 +274,9 @@ struct ipoib_neigh_hash {
struct
ipoib_neigh_table
{
struct
ipoib_neigh_hash
__rcu
*
htbl
;
rwlock_t
rwlock
;
atomic_t
entries
;
struct
completion
flushed
;
struct
completion
deleted
;
};
/*
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
View file @
2116fe4e
...
...
@@ -546,15 +546,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct
ipoib_neigh
*
neigh
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
neigh
=
ipoib_neigh_alloc
(
daddr
,
dev
);
if
(
!
neigh
)
{
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
++
dev
->
stats
.
tx_dropped
;
dev_kfree_skb_any
(
skb
);
return
;
}
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
path
=
__path_find
(
dev
,
daddr
+
4
);
if
(
!
path
)
{
path
=
path_rec_create
(
dev
,
daddr
+
4
);
...
...
@@ -863,10 +863,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
if
(
test_bit
(
IPOIB_STOP_NEIGH_GC
,
&
priv
->
flags
))
return
;
write_lock_bh
(
&
ntbl
->
rwlock
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
htbl
=
rcu_dereference_protected
(
ntbl
->
htbl
,
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
if
(
!
htbl
)
goto
out_unlock
;
...
...
@@ -883,16 +883,14 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
struct
ipoib_neigh
__rcu
**
np
=
&
htbl
->
buckets
[
i
];
while
((
neigh
=
rcu_dereference_protected
(
*
np
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)))
!=
NULL
)
{
lockdep_is_held
(
&
priv
->
lock
)))
!=
NULL
)
{
/* was the neigh idle for two GC periods */
if
(
time_after
(
neigh_obsolete
,
neigh
->
alive
))
{
rcu_assign_pointer
(
*
np
,
rcu_dereference_protected
(
neigh
->
hnext
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)));
lockdep_is_held
(
&
priv
->
lock
)));
/* remove from path/mc list */
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
list_del
(
&
neigh
->
list
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
call_rcu
(
&
neigh
->
rcu
,
ipoib_neigh_reclaim
);
}
else
{
np
=
&
neigh
->
hnext
;
...
...
@@ -902,7 +900,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
}
out_unlock:
write_unlock_bh
(
&
ntbl
->
rwlock
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
}
static
void
ipoib_reap_neigh
(
struct
work_struct
*
work
)
...
...
@@ -947,10 +945,8 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
struct
ipoib_neigh
*
neigh
;
u32
hash_val
;
write_lock_bh
(
&
ntbl
->
rwlock
);
htbl
=
rcu_dereference_protected
(
ntbl
->
htbl
,
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
if
(
!
htbl
)
{
neigh
=
NULL
;
goto
out_unlock
;
...
...
@@ -961,10 +957,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
*/
hash_val
=
ipoib_addr_hash
(
htbl
,
daddr
);
for
(
neigh
=
rcu_dereference_protected
(
htbl
->
buckets
[
hash_val
],
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
neigh
!=
NULL
;
neigh
=
rcu_dereference_protected
(
neigh
->
hnext
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)))
{
lockdep_is_held
(
&
priv
->
lock
)))
{
if
(
memcmp
(
daddr
,
neigh
->
daddr
,
INFINIBAND_ALEN
)
==
0
)
{
/* found, take one ref on behalf of the caller */
if
(
!
atomic_inc_not_zero
(
&
neigh
->
refcnt
))
{
...
...
@@ -987,12 +983,11 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
/* put in hash */
rcu_assign_pointer
(
neigh
->
hnext
,
rcu_dereference_protected
(
htbl
->
buckets
[
hash_val
],
lockdep_is_held
(
&
ntbl
->
rw
lock
)));
lockdep_is_held
(
&
priv
->
lock
)));
rcu_assign_pointer
(
htbl
->
buckets
[
hash_val
],
neigh
);
atomic_inc
(
&
ntbl
->
entries
);
out_unlock:
write_unlock_bh
(
&
ntbl
->
rwlock
);
return
neigh
;
}
...
...
@@ -1040,35 +1035,29 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
struct
ipoib_neigh
*
n
;
u32
hash_val
;
write_lock_bh
(
&
ntbl
->
rwlock
);
htbl
=
rcu_dereference_protected
(
ntbl
->
htbl
,
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
if
(
!
htbl
)
goto
out_unlock
;
return
;
hash_val
=
ipoib_addr_hash
(
htbl
,
neigh
->
daddr
);
np
=
&
htbl
->
buckets
[
hash_val
];
for
(
n
=
rcu_dereference_protected
(
*
np
,
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
n
!=
NULL
;
n
=
rcu_dereference_protected
(
*
np
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)))
{
lockdep_is_held
(
&
priv
->
lock
)))
{
if
(
n
==
neigh
)
{
/* found */
rcu_assign_pointer
(
*
np
,
rcu_dereference_protected
(
neigh
->
hnext
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)));
lockdep_is_held
(
&
priv
->
lock
)));
call_rcu
(
&
neigh
->
rcu
,
ipoib_neigh_reclaim
);
goto
out_unlock
;
return
;
}
else
{
np
=
&
n
->
hnext
;
}
}
out_unlock:
write_unlock_bh
(
&
ntbl
->
rwlock
);
}
static
int
ipoib_neigh_hash_init
(
struct
ipoib_dev_priv
*
priv
)
...
...
@@ -1080,7 +1069,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
clear_bit
(
IPOIB_NEIGH_TBL_FLUSH
,
&
priv
->
flags
);
ntbl
->
htbl
=
NULL
;
rwlock_init
(
&
ntbl
->
rwlock
);
htbl
=
kzalloc
(
sizeof
(
*
htbl
),
GFP_KERNEL
);
if
(
!
htbl
)
return
-
ENOMEM
;
...
...
@@ -1095,6 +1083,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
htbl
->
mask
=
(
size
-
1
);
htbl
->
buckets
=
buckets
;
ntbl
->
htbl
=
htbl
;
htbl
->
ntbl
=
ntbl
;
atomic_set
(
&
ntbl
->
entries
,
0
);
/* start garbage collection */
...
...
@@ -1111,9 +1100,11 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct
ipoib_neigh_hash
,
rcu
);
struct
ipoib_neigh
__rcu
**
buckets
=
htbl
->
buckets
;
struct
ipoib_neigh_table
*
ntbl
=
htbl
->
ntbl
;
kfree
(
buckets
);
kfree
(
htbl
);
complete
(
&
ntbl
->
deleted
);
}
void
ipoib_del_neighs_by_gid
(
struct
net_device
*
dev
,
u8
*
gid
)
...
...
@@ -1125,10 +1116,10 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
int
i
;
/* remove all neigh connected to a given path or mcast */
write_lock_bh
(
&
ntbl
->
rwlock
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
htbl
=
rcu_dereference_protected
(
ntbl
->
htbl
,
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
if
(
!
htbl
)
goto
out_unlock
;
...
...
@@ -1138,16 +1129,14 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
struct
ipoib_neigh
__rcu
**
np
=
&
htbl
->
buckets
[
i
];
while
((
neigh
=
rcu_dereference_protected
(
*
np
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)))
!=
NULL
)
{
lockdep_is_held
(
&
priv
->
lock
)))
!=
NULL
)
{
/* delete neighs belong to this parent */
if
(
!
memcmp
(
gid
,
neigh
->
daddr
+
4
,
sizeof
(
union
ib_gid
)))
{
rcu_assign_pointer
(
*
np
,
rcu_dereference_protected
(
neigh
->
hnext
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)));
lockdep_is_held
(
&
priv
->
lock
)));
/* remove from parent list */
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
list_del
(
&
neigh
->
list
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
call_rcu
(
&
neigh
->
rcu
,
ipoib_neigh_reclaim
);
}
else
{
np
=
&
neigh
->
hnext
;
...
...
@@ -1156,7 +1145,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
}
}
out_unlock:
write_unlock_bh
(
&
ntbl
->
rwlock
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
}
static
void
ipoib_flush_neighs
(
struct
ipoib_dev_priv
*
priv
)
...
...
@@ -1164,37 +1153,44 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
struct
ipoib_neigh_table
*
ntbl
=
&
priv
->
ntbl
;
struct
ipoib_neigh_hash
*
htbl
;
unsigned
long
flags
;
int
i
;
int
i
,
wait_flushed
=
0
;
write_lock_bh
(
&
ntbl
->
rwlock
);
init_completion
(
&
priv
->
ntbl
.
flushed
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
htbl
=
rcu_dereference_protected
(
ntbl
->
htbl
,
lockdep_is_held
(
&
ntbl
->
rw
lock
));
lockdep_is_held
(
&
priv
->
lock
));
if
(
!
htbl
)
goto
out_unlock
;
wait_flushed
=
atomic_read
(
&
priv
->
ntbl
.
entries
);
if
(
!
wait_flushed
)
goto
free_htbl
;
for
(
i
=
0
;
i
<
htbl
->
size
;
i
++
)
{
struct
ipoib_neigh
*
neigh
;
struct
ipoib_neigh
__rcu
**
np
=
&
htbl
->
buckets
[
i
];
while
((
neigh
=
rcu_dereference_protected
(
*
np
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)))
!=
NULL
)
{
lockdep_is_held
(
&
priv
->
lock
)))
!=
NULL
)
{
rcu_assign_pointer
(
*
np
,
rcu_dereference_protected
(
neigh
->
hnext
,
lockdep_is_held
(
&
ntbl
->
rw
lock
)));
lockdep_is_held
(
&
priv
->
lock
)));
/* remove from path/mc list */
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
list_del
(
&
neigh
->
list
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
call_rcu
(
&
neigh
->
rcu
,
ipoib_neigh_reclaim
);
}
}
free_htbl:
rcu_assign_pointer
(
ntbl
->
htbl
,
NULL
);
call_rcu
(
&
htbl
->
rcu
,
neigh_hash_free_rcu
);
out_unlock:
write_unlock_bh
(
&
ntbl
->
rwlock
);
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
if
(
wait_flushed
)
wait_for_completion
(
&
priv
->
ntbl
.
flushed
);
}
static
void
ipoib_neigh_hash_uninit
(
struct
net_device
*
dev
)
...
...
@@ -1203,7 +1199,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
int
stopped
;
ipoib_dbg
(
priv
,
"ipoib_neigh_hash_uninit
\n
"
);
init_completion
(
&
priv
->
ntbl
.
flush
ed
);
init_completion
(
&
priv
->
ntbl
.
delet
ed
);
set_bit
(
IPOIB_NEIGH_TBL_FLUSH
,
&
priv
->
flags
);
/* Stop GC if called at init fail need to cancel work */
...
...
@@ -1211,10 +1207,9 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
if
(
!
stopped
)
cancel_delayed_work
(
&
priv
->
neigh_reap_task
);
if
(
atomic_read
(
&
priv
->
ntbl
.
entries
))
{
ipoib_flush_neighs
(
priv
);
wait_for_completion
(
&
priv
->
ntbl
.
flushed
);
}
wait_for_completion
(
&
priv
->
ntbl
.
deleted
);
}
...
...
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
View file @
2116fe4e
...
...
@@ -707,9 +707,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
neigh
=
ipoib_neigh_get
(
dev
,
daddr
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
if
(
!
neigh
)
{
spin_unlock_irqrestore
(
&
priv
->
lock
,
flags
);
neigh
=
ipoib_neigh_alloc
(
daddr
,
dev
);
spin_lock_irqsave
(
&
priv
->
lock
,
flags
);
if
(
neigh
)
{
kref_get
(
&
mcast
->
ah
->
ref
);
neigh
->
ah
=
mcast
->
ah
;
...
...
drivers/net/ethernet/mellanox/mlx4/icm.c
View file @
2116fe4e
...
...
@@ -227,9 +227,10 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
MLX4_CMD_TIME_CLASS_B
,
MLX4_CMD_NATIVE
);
}
int
mlx4_table_get
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
obj
)
int
mlx4_table_get
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
u32
obj
)
{
int
i
=
(
obj
&
(
table
->
num_obj
-
1
))
/
(
MLX4_TABLE_CHUNK_SIZE
/
table
->
obj_size
);
u32
i
=
(
obj
&
(
table
->
num_obj
-
1
))
/
(
MLX4_TABLE_CHUNK_SIZE
/
table
->
obj_size
);
int
ret
=
0
;
mutex_lock
(
&
table
->
mutex
);
...
...
@@ -262,16 +263,18 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
return
ret
;
}
void
mlx4_table_put
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
obj
)
void
mlx4_table_put
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
u32
obj
)
{
int
i
;
u32
i
;
u64
offset
;
i
=
(
obj
&
(
table
->
num_obj
-
1
))
/
(
MLX4_TABLE_CHUNK_SIZE
/
table
->
obj_size
);
mutex_lock
(
&
table
->
mutex
);
if
(
--
table
->
icm
[
i
]
->
refcount
==
0
)
{
mlx4_UNMAP_ICM
(
dev
,
table
->
virt
+
i
*
MLX4_TABLE_CHUNK_SIZE
,
offset
=
(
u64
)
i
*
MLX4_TABLE_CHUNK_SIZE
;
mlx4_UNMAP_ICM
(
dev
,
table
->
virt
+
offset
,
MLX4_TABLE_CHUNK_SIZE
/
MLX4_ICM_PAGE_SIZE
);
mlx4_free_icm
(
dev
,
table
->
icm
[
i
],
table
->
coherent
);
table
->
icm
[
i
]
=
NULL
;
...
...
@@ -280,9 +283,11 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
mutex_unlock
(
&
table
->
mutex
);
}
void
*
mlx4_table_find
(
struct
mlx4_icm_table
*
table
,
int
obj
,
dma_addr_t
*
dma_handle
)
void
*
mlx4_table_find
(
struct
mlx4_icm_table
*
table
,
u32
obj
,
dma_addr_t
*
dma_handle
)
{
int
idx
,
offset
,
dma_offset
,
i
;
int
offset
,
dma_offset
,
i
;
u64
idx
;
struct
mlx4_icm_chunk
*
chunk
;
struct
mlx4_icm
*
icm
;
struct
page
*
page
=
NULL
;
...
...
@@ -292,7 +297,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
mutex_lock
(
&
table
->
mutex
);
idx
=
(
obj
&
(
table
->
num_obj
-
1
))
*
table
->
obj_size
;
idx
=
(
u64
)
(
obj
&
(
table
->
num_obj
-
1
))
*
table
->
obj_size
;
icm
=
table
->
icm
[
idx
/
MLX4_TABLE_CHUNK_SIZE
];
dma_offset
=
offset
=
idx
%
MLX4_TABLE_CHUNK_SIZE
;
...
...
@@ -326,10 +331,11 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han
}
int
mlx4_table_get_range
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
start
,
int
end
)
u32
start
,
u32
end
)
{
int
inc
=
MLX4_TABLE_CHUNK_SIZE
/
table
->
obj_size
;
int
i
,
err
;
int
err
;
u32
i
;
for
(
i
=
start
;
i
<=
end
;
i
+=
inc
)
{
err
=
mlx4_table_get
(
dev
,
table
,
i
);
...
...
@@ -349,9 +355,9 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
}
void
mlx4_table_put_range
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
start
,
int
end
)
u32
start
,
u32
end
)
{
int
i
;
u32
i
;
for
(
i
=
start
;
i
<=
end
;
i
+=
MLX4_TABLE_CHUNK_SIZE
/
table
->
obj_size
)
mlx4_table_put
(
dev
,
table
,
i
);
...
...
drivers/net/ethernet/mellanox/mlx4/icm.h
View file @
2116fe4e
...
...
@@ -71,17 +71,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t
gfp_mask
,
int
coherent
);
void
mlx4_free_icm
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm
*
icm
,
int
coherent
);
int
mlx4_table_get
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
obj
);
void
mlx4_table_put
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
obj
);
int
mlx4_table_get
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
u32
obj
);
void
mlx4_table_put
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
u32
obj
);
int
mlx4_table_get_range
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
start
,
int
end
);
u32
start
,
u32
end
);
void
mlx4_table_put_range
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
int
start
,
int
end
);
u32
start
,
u32
end
);
int
mlx4_init_icm_table
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
,
u64
virt
,
int
obj_size
,
u32
nobj
,
int
reserved
,
int
use_lowmem
,
int
use_coherent
);
void
mlx4_cleanup_icm_table
(
struct
mlx4_dev
*
dev
,
struct
mlx4_icm_table
*
table
);
void
*
mlx4_table_find
(
struct
mlx4_icm_table
*
table
,
int
obj
,
dma_addr_t
*
dma_handle
);
void
*
mlx4_table_find
(
struct
mlx4_icm_table
*
table
,
u32
obj
,
dma_addr_t
*
dma_handle
);
static
inline
void
mlx4_icm_first
(
struct
mlx4_icm
*
icm
,
struct
mlx4_icm_iter
*
iter
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment