Commit aa1e6f1a authored by Dan Williams's avatar Dan Williams

dmaengine: kill struct dma_client and supporting infrastructure

All users have been converted to either the general-purpose allocator,
dma_find_channel, or dma_request_channel.
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>


parent 209b84a8
...@@ -31,15 +31,12 @@ ...@@ -31,15 +31,12 @@
* *
* LOCKING: * LOCKING:
* *
* The subsystem keeps two global lists, dma_device_list and dma_client_list. * The subsystem keeps a global list of dma_device structs it is protected by a
* Both of these are protected by a mutex, dma_list_mutex. * mutex, dma_list_mutex.
* *
* Each device has a channels list, which runs unlocked but is never modified * Each device has a channels list, which runs unlocked but is never modified
* once the device is registered, it's just setup by the driver. * once the device is registered, it's just setup by the driver.
* *
* Each client is responsible for keeping track of the channels it uses. See
* the definition of dma_event_callback in dmaengine.h.
*
* Each device has a kref, which is initialized to 1 when the device is * Each device has a kref, which is initialized to 1 when the device is
* registered. A kref_get is done for each device registered. When the * registered. A kref_get is done for each device registered. When the
* device is released, the corresponding kref_put is done in the release * device is released, the corresponding kref_put is done in the release
...@@ -74,7 +71,6 @@ ...@@ -74,7 +71,6 @@
static DEFINE_MUTEX(dma_list_mutex); static DEFINE_MUTEX(dma_list_mutex);
static LIST_HEAD(dma_device_list); static LIST_HEAD(dma_device_list);
static LIST_HEAD(dma_client_list);
static long dmaengine_ref_count; static long dmaengine_ref_count;
/* --- sysfs implementation --- */ /* --- sysfs implementation --- */
...@@ -189,7 +185,7 @@ static int dma_chan_get(struct dma_chan *chan) ...@@ -189,7 +185,7 @@ static int dma_chan_get(struct dma_chan *chan)
/* allocate upon first client reference */ /* allocate upon first client reference */
if (chan->client_count == 1 && err == 0) { if (chan->client_count == 1 && err == 0) {
int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL); int desc_cnt = chan->device->device_alloc_chan_resources(chan);
if (desc_cnt < 0) { if (desc_cnt < 0) {
err = desc_cnt; err = desc_cnt;
...@@ -218,40 +214,6 @@ static void dma_chan_put(struct dma_chan *chan) ...@@ -218,40 +214,6 @@ static void dma_chan_put(struct dma_chan *chan)
chan->device->device_free_chan_resources(chan); chan->device->device_free_chan_resources(chan);
} }
/**
* dma_client_chan_alloc - try to allocate channels to a client
* @client: &dma_client
*
* Called with dma_list_mutex held.
*/
static void dma_client_chan_alloc(struct dma_client *client)
{
struct dma_device *device;
struct dma_chan *chan;
enum dma_state_client ack;
/* Find a channel */
list_for_each_entry(device, &dma_device_list, global_node) {
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
continue;
if (!dma_device_satisfies_mask(device, client->cap_mask))
continue;
list_for_each_entry(chan, &device->channels, device_node) {
if (!chan->client_count)
continue;
ack = client->event_callback(client, chan,
DMA_RESOURCE_AVAILABLE);
/* we are done once this client rejects
* an available resource
*/
if (ack == DMA_NAK)
return;
}
}
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{ {
enum dma_status status; enum dma_status status;
...@@ -584,21 +546,6 @@ void dma_release_channel(struct dma_chan *chan) ...@@ -584,21 +546,6 @@ void dma_release_channel(struct dma_chan *chan)
} }
EXPORT_SYMBOL_GPL(dma_release_channel); EXPORT_SYMBOL_GPL(dma_release_channel);
/**
* dma_chans_notify_available - broadcast available channels to the clients
*/
static void dma_clients_notify_available(void)
{
struct dma_client *client;
mutex_lock(&dma_list_mutex);
list_for_each_entry(client, &dma_client_list, global_node)
dma_client_chan_alloc(client);
mutex_unlock(&dma_list_mutex);
}
/** /**
* dmaengine_get - register interest in dma_channels * dmaengine_get - register interest in dma_channels
*/ */
...@@ -659,19 +606,6 @@ void dmaengine_put(void) ...@@ -659,19 +606,6 @@ void dmaengine_put(void)
} }
EXPORT_SYMBOL(dmaengine_put); EXPORT_SYMBOL(dmaengine_put);
/**
* dma_async_client_chan_request - send all available channels to the
* client that satisfy the capability mask
* @client - requester
*/
void dma_async_client_chan_request(struct dma_client *client)
{
mutex_lock(&dma_list_mutex);
dma_client_chan_alloc(client);
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL(dma_async_client_chan_request);
/** /**
* dma_async_device_register - registers DMA devices found * dma_async_device_register - registers DMA devices found
* @device: &dma_device * @device: &dma_device
...@@ -765,8 +699,6 @@ int dma_async_device_register(struct dma_device *device) ...@@ -765,8 +699,6 @@ int dma_async_device_register(struct dma_device *device)
dma_channel_rebalance(); dma_channel_rebalance();
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
dma_clients_notify_available();
return 0; return 0;
err_out: err_out:
......
...@@ -758,8 +758,7 @@ static void dwc_issue_pending(struct dma_chan *chan) ...@@ -758,8 +758,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
spin_unlock_bh(&dwc->lock); spin_unlock_bh(&dwc->lock);
} }
static int dwc_alloc_chan_resources(struct dma_chan *chan, static int dwc_alloc_chan_resources(struct dma_chan *chan)
struct dma_client *client)
{ {
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device); struct dw_dma *dw = to_dw_dma(chan->device);
......
...@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( ...@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
* *
* Return - The number of descriptors allocated. * Return - The number of descriptors allocated.
*/ */
static int fsl_dma_alloc_chan_resources(struct dma_chan *chan, static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
struct dma_client *client)
{ {
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
......
...@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) ...@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
* ioat_dma_alloc_chan_resources - returns the number of allocated descriptors * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
* @chan: the channel to be filled out * @chan: the channel to be filled out
*/ */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan, static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
struct dma_client *client)
{ {
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *desc; struct ioat_desc_sw *desc;
...@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
dma_chan = container_of(device->common.channels.next, dma_chan = container_of(device->common.channels.next,
struct dma_chan, struct dma_chan,
device_node); device_node);
if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) { if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
dev_err(&device->pdev->dev, dev_err(&device->pdev->dev,
"selftest cannot allocate chan resource\n"); "selftest cannot allocate chan resource\n");
err = -ENODEV; err = -ENODEV;
......
...@@ -470,8 +470,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan); ...@@ -470,8 +470,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
* greater than 2x the number slots needed to satisfy a device->max_xor * greater than 2x the number slots needed to satisfy a device->max_xor
* request. * request.
* */ * */
static int iop_adma_alloc_chan_resources(struct dma_chan *chan, static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
struct dma_client *client)
{ {
char *hw_desc; char *hw_desc;
int idx; int idx;
...@@ -865,7 +864,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) ...@@ -865,7 +864,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
dma_chan = container_of(device->common.channels.next, dma_chan = container_of(device->common.channels.next,
struct dma_chan, struct dma_chan,
device_node); device_node);
if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV; err = -ENODEV;
goto out; goto out;
} }
...@@ -963,7 +962,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) ...@@ -963,7 +962,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
dma_chan = container_of(device->common.channels.next, dma_chan = container_of(device->common.channels.next,
struct dma_chan, struct dma_chan,
device_node); device_node);
if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) { if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV; err = -ENODEV;
goto out; goto out;
} }
......
...@@ -606,8 +606,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -606,8 +606,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
} }
/* returns the number of allocated descriptors */ /* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan, static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
struct dma_client *client)
{ {
char *hw_desc; char *hw_desc;
int idx; int idx;
...@@ -957,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) ...@@ -957,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
dma_chan = container_of(device->common.channels.next, dma_chan = container_of(device->common.channels.next,
struct dma_chan, struct dma_chan,
device_node); device_node);
if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV; err = -ENODEV;
goto out; goto out;
} }
...@@ -1052,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device) ...@@ -1052,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
dma_chan = container_of(device->common.channels.next, dma_chan = container_of(device->common.channels.next,
struct dma_chan, struct dma_chan,
device_node); device_node);
if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV; err = -ENODEV;
goto out; goto out;
} }
......
...@@ -55,7 +55,6 @@ enum atmel_mci_state { ...@@ -55,7 +55,6 @@ enum atmel_mci_state {
struct atmel_mci_dma { struct atmel_mci_dma {
#ifdef CONFIG_MMC_ATMELMCI_DMA #ifdef CONFIG_MMC_ATMELMCI_DMA
struct dma_client client;
struct dma_chan *chan; struct dma_chan *chan;
struct dma_async_tx_descriptor *data_desc; struct dma_async_tx_descriptor *data_desc;
#endif #endif
......
...@@ -28,20 +28,6 @@ ...@@ -28,20 +28,6 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
/**
* enum dma_state - resource PNP/power management state
* @DMA_RESOURCE_SUSPEND: DMA device going into low power state
* @DMA_RESOURCE_RESUME: DMA device returning to full power
* @DMA_RESOURCE_AVAILABLE: DMA device available to the system
* @DMA_RESOURCE_REMOVED: DMA device removed from the system
*/
enum dma_state {
DMA_RESOURCE_SUSPEND,
DMA_RESOURCE_RESUME,
DMA_RESOURCE_AVAILABLE,
DMA_RESOURCE_REMOVED,
};
/** /**
* enum dma_state_client - state of the channel in the client * enum dma_state_client - state of the channel in the client
* @DMA_ACK: client would like to use, or was using this channel * @DMA_ACK: client would like to use, or was using this channel
...@@ -170,23 +156,6 @@ struct dma_chan { ...@@ -170,23 +156,6 @@ struct dma_chan {
void dma_chan_cleanup(struct kref *kref); void dma_chan_cleanup(struct kref *kref);
/*
* typedef dma_event_callback - function pointer to a DMA event callback
* For each channel added to the system this routine is called for each client.
* If the client would like to use the channel it returns '1' to signal (ack)
* the dmaengine core to take out a reference on the channel and its
* corresponding device. A client must not 'ack' an available channel more
* than once. When a channel is removed all clients are notified. If a client
* is using the channel it must 'ack' the removal. A client must not 'ack' a
* removed channel more than once.
* @client - 'this' pointer for the client context
* @chan - channel to be acted upon
* @state - available or removed
*/
struct dma_client;
typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
struct dma_chan *chan, enum dma_state state);
/** /**
* typedef dma_filter_fn - callback filter for dma_request_channel * typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed * @chan: channel to be reviewed
...@@ -199,21 +168,6 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client, ...@@ -199,21 +168,6 @@ typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
*/ */
typedef enum dma_state_client (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef enum dma_state_client (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
/**
* struct dma_client - info on the entity making use of DMA services
* @event_callback: func ptr to call when something happens
* @cap_mask: only return channels that satisfy the requested capabilities
* a value of zero corresponds to any capability
* @slave: data for preparing slave transfer. Must be non-NULL iff the
* DMA_SLAVE capability is requested.
* @global_node: list_head for global dma_client_list
*/
struct dma_client {
dma_event_callback event_callback;
dma_cap_mask_t cap_mask;
struct list_head global_node;
};
typedef void (*dma_async_tx_callback)(void *dma_async_param); typedef void (*dma_async_tx_callback)(void *dma_async_param);
/** /**
* struct dma_async_tx_descriptor - async transaction descriptor * struct dma_async_tx_descriptor - async transaction descriptor
...@@ -285,8 +239,7 @@ struct dma_device { ...@@ -285,8 +239,7 @@ struct dma_device {
int dev_id; int dev_id;
struct device *dev; struct device *dev;
int (*device_alloc_chan_resources)(struct dma_chan *chan, int (*device_alloc_chan_resources)(struct dma_chan *chan);
struct dma_client *client);
void (*device_free_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
...@@ -320,7 +273,6 @@ struct dma_device { ...@@ -320,7 +273,6 @@ struct dma_device {
void dmaengine_get(void); void dmaengine_get(void);
void dmaengine_put(void); void dmaengine_put(void);
void dma_async_client_chan_request(struct dma_client *client);
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len); void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
......
...@@ -167,25 +167,6 @@ static DEFINE_SPINLOCK(ptype_lock); ...@@ -167,25 +167,6 @@ static DEFINE_SPINLOCK(ptype_lock);
static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
static struct list_head ptype_all __read_mostly; /* Taps */ static struct list_head ptype_all __read_mostly; /* Taps */
#ifdef CONFIG_NET_DMA
struct net_dma {
struct dma_client client;
spinlock_t lock;
cpumask_t channel_mask;
struct dma_chan **channels;
};
static enum dma_state_client
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
enum dma_state state);
static struct net_dma net_dma = {
.client = {
.event_callback = netdev_dma_event,
},
};
#endif
/* /*
* The @dev_base_head list is protected by @dev_base_lock and the rtnl * The @dev_base_head list is protected by @dev_base_lock and the rtnl
* semaphore. * semaphore.
...@@ -4826,81 +4807,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, ...@@ -4826,81 +4807,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK; return NOTIFY_OK;
} }
#ifdef CONFIG_NET_DMA
/**
* netdev_dma_event - event callback for the net_dma_client
* @client: should always be net_dma_client
* @chan: DMA channel for the event
* @state: DMA state to be handled
*/
static enum dma_state_client
netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
enum dma_state state)
{
int i, found = 0, pos = -1;
struct net_dma *net_dma =
container_of(client, struct net_dma, client);
enum dma_state_client ack = DMA_DUP; /* default: take no action */
spin_lock(&net_dma->lock);
switch (state) {
case DMA_RESOURCE_AVAILABLE:
for (i = 0; i < nr_cpu_ids; i++)
if (net_dma->channels[i] == chan) {
found = 1;
break;
} else if (net_dma->channels[i] == NULL && pos < 0)
pos = i;
if (!found && pos >= 0) {
ack = DMA_ACK;
net_dma->channels[pos] = chan;
cpu_set(pos, net_dma->channel_mask);
}
break;
case DMA_RESOURCE_REMOVED:
for (i = 0; i < nr_cpu_ids; i++)
if (net_dma->channels[i] == chan) {
found = 1;
pos = i;
break;
}
if (found) {
ack = DMA_ACK;
cpu_clear(pos, net_dma->channel_mask);
net_dma->channels[i] = NULL;
}
break;
default:
break;
}
spin_unlock(&net_dma->lock);
return ack;
}
/**
* netdev_dma_register - register the networking subsystem as a DMA client
*/
static int __init netdev_dma_register(void)
{
net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
GFP_KERNEL);
if (unlikely(!net_dma.channels)) {
printk(KERN_NOTICE
"netdev_dma: no memory for net_dma.channels\n");
return -ENOMEM;
}
spin_lock_init(&net_dma.lock);
dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
dmaengine_get();
return 0;
}
#else
static int __init netdev_dma_register(void) { return -ENODEV; }
#endif /* CONFIG_NET_DMA */
/** /**
* netdev_increment_features - increment feature set by one * netdev_increment_features - increment feature set by one
...@@ -5120,14 +5026,15 @@ static int __init net_dev_init(void) ...@@ -5120,14 +5026,15 @@ static int __init net_dev_init(void)
if (register_pernet_device(&default_device_ops)) if (register_pernet_device(&default_device_ops))
goto out; goto out;
netdev_dma_register();
open_softirq(NET_TX_SOFTIRQ, net_tx_action); open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action); open_softirq(NET_RX_SOFTIRQ, net_rx_action);
hotcpu_notifier(dev_cpu_callback, 0); hotcpu_notifier(dev_cpu_callback, 0);
dst_init(); dst_init();
dev_mcast_init(); dev_mcast_init();
#ifdef CONFIG_NET_DMA
dmaengine_get();
#endif
rc = 0; rc = 0;
out: out:
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment