Commit ada65c74 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Sumit Semwal

dma-buf: remove fallback for !CONFIG_DMA_SHARED_BUFFER

Documentation says that code requiring dma-buf should add it to
select, so inline fallbacks are not going to be used. A link error
will make it obvious what went wrong, instead of silently doing
nothing at runtime.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarRob Clark <rob.clark@linaro.org>
Signed-off-by: default avatarSumit Semwal <sumit.semwal@linaro.org>
parent b6fa0cd6
...@@ -156,7 +156,6 @@ static inline void get_dma_buf(struct dma_buf *dmabuf) ...@@ -156,7 +156,6 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file); get_file(dmabuf->file);
} }
#ifdef CONFIG_DMA_SHARED_BUFFER
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev); struct device *dev);
void dma_buf_detach(struct dma_buf *dmabuf, void dma_buf_detach(struct dma_buf *dmabuf,
...@@ -184,103 +183,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, ...@@ -184,103 +183,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long); unsigned long);
void *dma_buf_vmap(struct dma_buf *); void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr); void dma_buf_vunmap(struct dma_buf *, void *vaddr);
#else
static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
{
return ERR_PTR(-ENODEV);
}
static inline void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach)
{
return;
}
static inline struct dma_buf *dma_buf_export(void *priv,
const struct dma_buf_ops *ops,
size_t size, int flags)
{
return ERR_PTR(-ENODEV);
}
static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
return -ENODEV;
}
static inline struct dma_buf *dma_buf_get(int fd)
{
return ERR_PTR(-ENODEV);
}
static inline void dma_buf_put(struct dma_buf *dmabuf)
{
return;
}
static inline struct sg_table *dma_buf_map_attachment(
struct dma_buf_attachment *attach, enum dma_data_direction write)
{
return ERR_PTR(-ENODEV);
}
static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg, enum dma_data_direction dir)
{
return;
}
static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
size_t start, size_t len,
enum dma_data_direction dir)
{
return -ENODEV;
}
static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
size_t start, size_t len,
enum dma_data_direction dir)
{
}
static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf,
unsigned long pnum)
{
return NULL;
}
static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum)
{
return NULL;
}
static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
static inline int dma_buf_mmap(struct dma_buf *dmabuf,
struct vm_area_struct *vma,
unsigned long pgoff)
{
return -ENODEV;
}
static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
{
return NULL;
}
static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */ #endif /* __DMA_BUF_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment