Commit 90b19993 authored by Jaroslav Kysela's avatar Jaroslav Kysela

ALSA CVS update - Takashi Iwai <tiwai@suse.de>

Memalloc module,ALSA Core
- fixed the lock up with SG-buffer handler.
- removed non-existing export symbol.
- clean up ifdefs.
parent ee22cdd3
......@@ -84,7 +84,7 @@ struct snd_sg_buf {
int tblsize; /* allocated table size */
struct snd_sg_page *table; /* address table */
struct page **page_table; /* page table (for vmap/vunmap) */
const struct snd_dma_device *dev;
struct snd_dma_device dev;
};
/*
......@@ -104,9 +104,6 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_sg_buf *sgbuf, size_t off
}
/* snd_dma_device management */
void snd_dma_device_init(const struct snd_dma_device *dev, int type, void *data);
/* allocate/release a buffer */
int snd_dma_alloc_pages(const struct snd_dma_device *dev, size_t size,
struct snd_dma_buffer *dmab);
......
......@@ -84,10 +84,7 @@ struct snd_mem_list {
* Hacks
*/
#ifdef CONFIG_PCI
#if defined(__i386__) || defined(__ppc__) || defined(__x86_64__)
#define HACK_PCI_ALLOC_CONSISTENT
/*
* A hack to allocate large buffers via dma_alloc_coherent()
......@@ -111,7 +108,7 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
void *ret;
u64 dma_mask;
if (dev == NULL)
if (dev == NULL || !dev->dma_mask)
return dma_alloc_coherent(dev, size, dma_handle, flags);
dma_mask = *dev->dma_mask;
*dev->dma_mask = 0xffffffff; /* do without masking */
......@@ -137,7 +134,6 @@ static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size,
#define dma_alloc_coherent snd_dma_hack_alloc_coherent
#endif /* arch */
#endif /* CONFIG_PCI */
/*
*
......@@ -937,8 +933,6 @@ __setup("snd-page-alloc=", snd_mem_setup);
/*
* exports
*/
EXPORT_SYMBOL(snd_dma_device_init);
EXPORT_SYMBOL(snd_dma_alloc_pages);
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
EXPORT_SYMBOL(snd_dma_free_pages);
......
......@@ -33,17 +33,17 @@
int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
{
struct snd_sg_buf *sgbuf = dmab->private_data;
struct snd_dma_buffer dmab;
struct snd_dma_buffer tmpb;
int i;
if (! sgbuf)
return -EINVAL;
for (i = 0; i < sgbuf->pages; i++) {
dmab.area = sgbuf->table[i].buf;
dmab.addr = sgbuf->table[i].addr;
dmab.bytes = PAGE_SIZE;
snd_dma_free_pages(sgbuf->dev, &dmab);
tmpb.area = sgbuf->table[i].buf;
tmpb.addr = sgbuf->table[i].addr;
tmpb.bytes = PAGE_SIZE;
snd_dma_free_pages(&sgbuf->dev, &tmpb);
}
if (dmab->area)
vunmap(dmab->area);
......@@ -65,9 +65,7 @@ void *snd_malloc_sgbuf_pages(const struct snd_dma_device *dev,
{
struct snd_sg_buf *sgbuf;
unsigned int i, pages;
void *ptr;
dma_addr_t addr;
struct snd_dma_buffer dmab;
struct snd_dma_buffer tmpb;
dmab->area = NULL;
dmab->addr = 0;
......@@ -75,7 +73,11 @@ void *snd_malloc_sgbuf_pages(const struct snd_dma_device *dev,
if (! sgbuf)
return NULL;
memset(sgbuf, 0, sizeof(*sgbuf));
sgbuf->dev = dev;
sgbuf->dev = *dev;
if (dev->type == SNDRV_DMA_TYPE_PCI_SG)
sgbuf->dev.type = SNDRV_DMA_TYPE_PCI;
else
sgbuf->dev.type =SNDRV_DMA_TYPE_DEV;
pages = snd_sgbuf_aligned_pages(size);
sgbuf->tblsize = sgbuf_align_table(pages);
sgbuf->table = kmalloc(sizeof(*sgbuf->table) * sgbuf->tblsize, GFP_KERNEL);
......@@ -89,15 +91,15 @@ void *snd_malloc_sgbuf_pages(const struct snd_dma_device *dev,
/* allocate each page */
for (i = 0; i < pages; i++) {
if (snd_dma_alloc_pages(dev, PAGE_SIZE, &dmab) < 0) {
if (snd_dma_alloc_pages(&sgbuf->dev, PAGE_SIZE, &tmpb) < 0) {
if (res_size == NULL)
goto _failed;
*res_size = size = sgbuf->pages * PAGE_SIZE;
break;
}
sgbuf->table[i].buf = dmab.area;
sgbuf->table[i].addr = dmab.addr;
sgbuf->page_table[i] = virt_to_page(ptr);
sgbuf->table[i].buf = tmpb.area;
sgbuf->table[i].addr = tmpb.addr;
sgbuf->page_table[i] = virt_to_page(tmpb.area);
sgbuf->pages++;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment