Commit c9636244 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin

tools/testing/scatterlist: Test new __sg_alloc_table_from_pages

Exercise the new __sg_alloc_table_from_pages API (and through
it also the old sg_alloc_table_from_pages), checking that the
created table has the expected number of segments depending on
the sequence of input pages and other conditions.

v2: Move to data driven for readability.
v3: Add some more testcases and -fsanitize=undefined. (Chris Wilson)
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: linux-kernel@vger.kernel.org
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20170906145506.14952-1-tvrtko.ursulin@linux.intel.com
[tursulin: whitespace fixup]
parent 5602452e
CFLAGS += -I. -I../../include -g -O2 -Wall -fsanitize=address
LDFLAGS += -fsanitize=address -fsanitize=undefined
TARGETS = main
OFILES = main.o scatterlist.o
ifeq ($(BUILD), 32)
CFLAGS += -m32
LDFLAGS += -m32
endif
targets: include $(TARGETS)
main: $(OFILES)
clean:
$(RM) $(TARGETS) $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h asm/io.h
@rmdir asm
scatterlist.c: ../../../lib/scatterlist.c
@sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
.PHONY: include
include: ../../../include/linux/scatterlist.h
@mkdir -p linux
@mkdir -p asm
@touch asm/io.h
@touch linux/highmem.h
@touch linux/kmemleak.h
@cp $< linux/scatterlist.h
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <limits.h>
#include <stdio.h>
typedef unsigned long dma_addr_t;
#define unlikely
#define BUG_ON(x) assert(!(x))
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
unlikely(__ret_warn_on); \
})
#define WARN_ON_ONCE(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
assert(0); \
unlikely(__ret_warn_on); \
})
#define PAGE_SIZE (4096)
#define PAGE_SHIFT (12)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
#define virt_to_page(x) ((void *)x)
#define page_address(x) ((void *)x)
static inline unsigned long page_to_phys(struct page *page)
{
assert(0);
return 0;
}
#define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
#define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
#define __min(t1, t2, min1, min2, x, y) ({ \
t1 min1 = (x); \
t2 min2 = (y); \
(void) (&min1 == &min2); \
min1 < min2 ? min1 : min2; })
#define ___PASTE(a,b) a##b
#define __PASTE(a,b) ___PASTE(a,b)
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
#define min(x, y) \
__min(typeof(x), typeof(y), \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
x, y)
#define min_t(type, x, y) \
__min(type, type, \
__UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
x, y)
#define preemptible() (1)
static inline void *kmap(struct page *page)
{
assert(0);
return NULL;
}
static inline void *kmap_atomic(struct page *page)
{
assert(0);
return NULL;
}
static inline void kunmap(void *addr)
{
assert(0);
}
static inline void kunmap_atomic(void *addr)
{
assert(0);
}
static inline unsigned long __get_free_page(unsigned int flags)
{
return (unsigned long)malloc(PAGE_SIZE);
}
static inline void free_page(unsigned long page)
{
free((void *)page);
}
static inline void *kmalloc(unsigned int size, unsigned int flags)
{
return malloc(size);
}
#define kfree(x) free(x)
#define kmemleak_alloc(a, b, c, d)
#define kmemleak_free(a)
#define PageSlab(p) (0)
#define flush_kernel_dcache_page(p)
#endif
#include <stdio.h>
#include <assert.h>
#include <linux/scatterlist.h>
#define MAX_PAGES (64)
static void set_pages(struct page **pages, const unsigned *array, unsigned num)
{
unsigned int i;
assert(num < MAX_PAGES);
for (i = 0; i < num; i++)
pages[i] = (struct page *)(unsigned long)
((1 + array[i]) * PAGE_SIZE);
}
#define pfn(...) (unsigned []){ __VA_ARGS__ }
int main(void)
{
const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
struct test {
int alloc_ret;
unsigned num_pages;
unsigned *pfn;
unsigned size;
unsigned int max_seg;
unsigned int expected_segments;
} *test, tests[] = {
{ -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
{ -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
{ -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
{ 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
{ 0, 1, pfn(0), 1, sgmax, 1 },
{ 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },
{ 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 },
{ 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 },
{ 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 },
{ 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 },
{ 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 },
{ 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 },
{ 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 },
{ 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 },
{ 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 },
{ 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 },
{ 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
{ 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
{ 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 },
{ 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
{ 0, 0, NULL, 0, 0, 0 },
};
unsigned int i;
for (i = 0, test = tests; test->expected_segments; test++, i++) {
struct page *pages[MAX_PAGES];
struct sg_table st;
int ret;
set_pages(pages, test->pfn, test->num_pages);
ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages,
0, test->size, test->max_seg,
GFP_KERNEL);
assert(ret == test->alloc_ret);
if (test->alloc_ret)
continue;
assert(st.nents == test->expected_segments);
assert(st.orig_nents == test->expected_segments);
sg_free_table(&st);
}
assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment