Commit d67bc5d4 authored by Ilya Lesokhin's avatar Ilya Lesokhin Committed by Doug Ledford

IB/mlx5: Simplify mlx5_ib_cont_pages

The patch simplifies mlx5_ib_cont_pages and fixes the following
issues in the original implementation:

First issues is related to alignment of the PFNs. After the check
base + p != PFN, the alignment of the PFN wasn't checked. So the PFN
sequence 0, 1, 1, 2 would result in a page_shift of 13 even though
the 3rd PFN is not 8KB aligned.

This wasn't actually a bug because it was supported by all the
existing mlx5 compatible device, but we don't want to require
this support in all future devices.

Another issue is because the inner loop didn't advance PFN so
the test "if (base + p != pfn)" always failed for SGE with
len > (1<<page_shift).

Fixes: e126ba97 ("mlx5: Add driver for Mellanox Connect-IB adapters")
Signed-off-by: default avatarIlya Lesokhin <ilyal@mellanox.com>
Reviewed-by: default avatarEli Cohen <eli@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 7c9d9662
...@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, ...@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
{ {
unsigned long tmp; unsigned long tmp;
unsigned long m; unsigned long m;
int i, k; u64 base = ~0, p = 0;
u64 base = 0; u64 len, pfn;
int p = 0; int i = 0;
int skip;
int mask;
u64 len;
u64 pfn;
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
unsigned long page_shift = umem->page_shift; unsigned long page_shift = umem->page_shift;
...@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, ...@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
m = find_first_bit(&tmp, BITS_PER_LONG); m = find_first_bit(&tmp, BITS_PER_LONG);
if (max_page_shift) if (max_page_shift)
m = min_t(unsigned long, max_page_shift - page_shift, m); m = min_t(unsigned long, max_page_shift - page_shift, m);
skip = 1 << m;
mask = skip - 1;
i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
len = sg_dma_len(sg) >> page_shift; len = sg_dma_len(sg) >> page_shift;
pfn = sg_dma_address(sg) >> page_shift; pfn = sg_dma_address(sg) >> page_shift;
for (k = 0; k < len; k++) { if (base + p != pfn) {
if (!(i & mask)) { /* If either the offset or the new
tmp = (unsigned long)pfn; * base are unaligned update m
m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); */
skip = 1 << m; tmp = (unsigned long)(pfn | p);
mask = skip - 1; if (!IS_ALIGNED(tmp, 1 << m))
base = pfn; m = find_first_bit(&tmp, BITS_PER_LONG);
p = 0;
} else { base = pfn;
if (base + p != pfn) { p = 0;
tmp = (unsigned long)p;
m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
base = pfn;
p = 0;
}
}
p++;
i++;
} }
p += len;
i += len;
} }
if (i) { if (i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment