Commit 714b5b4c authored by Nuno Sa's avatar Nuno Sa Committed by Jonathan Cameron

iio: buffer: iio: core: move to the cleanup.h magic

Use the new cleanup magic for handling mutexes in IIO. This allows us to
greatly simplify some code paths.
Signed-off-by: default avatarNuno Sa <nuno.sa@analog.com>
Link: https://lore.kernel.org/r/20240229-iio-use-cleanup-magic-v3-3-c3d34889ae3c@analog.comSigned-off-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
parent 095be2d5
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
* - Alternative access techniques? * - Alternative access techniques?
*/ */
#include <linux/anon_inodes.h> #include <linux/anon_inodes.h>
#include <linux/cleanup.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/device.h> #include <linux/device.h>
...@@ -533,28 +534,26 @@ static ssize_t iio_scan_el_store(struct device *dev, ...@@ -533,28 +534,26 @@ static ssize_t iio_scan_el_store(struct device *dev,
ret = kstrtobool(buf, &state); ret = kstrtobool(buf, &state);
if (ret < 0) if (ret < 0)
return ret; return ret;
mutex_lock(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) { guard(mutex)(&iio_dev_opaque->mlock);
ret = -EBUSY; if (iio_buffer_is_active(buffer))
goto error_ret; return -EBUSY;
}
ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0) if (ret < 0)
goto error_ret; return ret;
if (!state && ret) {
ret = iio_scan_mask_clear(buffer, this_attr->address); if (state && ret)
if (ret) return len;
goto error_ret;
} else if (state && !ret) { if (state)
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
else
ret = iio_scan_mask_clear(buffer, this_attr->address);
if (ret) if (ret)
goto error_ret; return ret;
}
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
return ret < 0 ? ret : len; return len;
} }
static ssize_t iio_scan_el_ts_show(struct device *dev, static ssize_t iio_scan_el_ts_show(struct device *dev,
...@@ -581,16 +580,13 @@ static ssize_t iio_scan_el_ts_store(struct device *dev, ...@@ -581,16 +580,13 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
if (ret < 0) if (ret < 0)
return ret; return ret;
mutex_lock(&iio_dev_opaque->mlock); guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) { if (iio_buffer_is_active(buffer))
ret = -EBUSY; return -EBUSY;
goto error_ret;
}
buffer->scan_timestamp = state; buffer->scan_timestamp = state;
error_ret:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len; return len;
} }
static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
...@@ -674,21 +670,16 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr, ...@@ -674,21 +670,16 @@ static ssize_t length_store(struct device *dev, struct device_attribute *attr,
if (val == buffer->length) if (val == buffer->length)
return len; return len;
mutex_lock(&iio_dev_opaque->mlock); guard(mutex)(&iio_dev_opaque->mlock);
if (iio_buffer_is_active(buffer)) { if (iio_buffer_is_active(buffer))
ret = -EBUSY; return -EBUSY;
} else {
buffer->access->set_length(buffer, val); buffer->access->set_length(buffer, val);
ret = 0;
}
if (ret)
goto out;
if (buffer->length && buffer->length < buffer->watermark) if (buffer->length && buffer->length < buffer->watermark)
buffer->watermark = buffer->length; buffer->watermark = buffer->length;
out:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len; return len;
} }
static ssize_t enable_show(struct device *dev, struct device_attribute *attr, static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
...@@ -1268,7 +1259,6 @@ int iio_update_buffers(struct iio_dev *indio_dev, ...@@ -1268,7 +1259,6 @@ int iio_update_buffers(struct iio_dev *indio_dev,
struct iio_buffer *remove_buffer) struct iio_buffer *remove_buffer)
{ {
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int ret;
if (insert_buffer == remove_buffer) if (insert_buffer == remove_buffer)
return 0; return 0;
...@@ -1277,8 +1267,8 @@ int iio_update_buffers(struct iio_dev *indio_dev, ...@@ -1277,8 +1267,8 @@ int iio_update_buffers(struct iio_dev *indio_dev,
insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT) insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
return -EINVAL; return -EINVAL;
mutex_lock(&iio_dev_opaque->info_exist_lock); guard(mutex)(&iio_dev_opaque->info_exist_lock);
mutex_lock(&iio_dev_opaque->mlock); guard(mutex)(&iio_dev_opaque->mlock);
if (insert_buffer && iio_buffer_is_active(insert_buffer)) if (insert_buffer && iio_buffer_is_active(insert_buffer))
insert_buffer = NULL; insert_buffer = NULL;
...@@ -1286,23 +1276,13 @@ int iio_update_buffers(struct iio_dev *indio_dev, ...@@ -1286,23 +1276,13 @@ int iio_update_buffers(struct iio_dev *indio_dev,
if (remove_buffer && !iio_buffer_is_active(remove_buffer)) if (remove_buffer && !iio_buffer_is_active(remove_buffer))
remove_buffer = NULL; remove_buffer = NULL;
if (!insert_buffer && !remove_buffer) { if (!insert_buffer && !remove_buffer)
ret = 0; return 0;
goto out_unlock;
}
if (!indio_dev->info) {
ret = -ENODEV;
goto out_unlock;
}
ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
out_unlock: if (!indio_dev->info)
mutex_unlock(&iio_dev_opaque->mlock); return -ENODEV;
mutex_unlock(&iio_dev_opaque->info_exist_lock);
return ret; return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
} }
EXPORT_SYMBOL_GPL(iio_update_buffers); EXPORT_SYMBOL_GPL(iio_update_buffers);
...@@ -1326,22 +1306,22 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, ...@@ -1326,22 +1306,22 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret < 0) if (ret < 0)
return ret; return ret;
mutex_lock(&iio_dev_opaque->mlock); guard(mutex)(&iio_dev_opaque->mlock);
/* Find out if it is in the list */ /* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer); inlist = iio_buffer_is_active(buffer);
/* Already in desired state */ /* Already in desired state */
if (inlist == requested_state) if (inlist == requested_state)
goto done; return len;
if (requested_state) if (requested_state)
ret = __iio_update_buffers(indio_dev, buffer, NULL); ret = __iio_update_buffers(indio_dev, buffer, NULL);
else else
ret = __iio_update_buffers(indio_dev, NULL, buffer); ret = __iio_update_buffers(indio_dev, NULL, buffer);
if (ret)
return ret;
done: return len;
mutex_unlock(&iio_dev_opaque->mlock);
return (ret < 0) ? ret : len;
} }
static ssize_t watermark_show(struct device *dev, struct device_attribute *attr, static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
...@@ -1368,23 +1348,17 @@ static ssize_t watermark_store(struct device *dev, ...@@ -1368,23 +1348,17 @@ static ssize_t watermark_store(struct device *dev,
if (!val) if (!val)
return -EINVAL; return -EINVAL;
mutex_lock(&iio_dev_opaque->mlock); guard(mutex)(&iio_dev_opaque->mlock);
if (val > buffer->length) { if (val > buffer->length)
ret = -EINVAL; return -EINVAL;
goto out;
}
if (iio_buffer_is_active(buffer)) { if (iio_buffer_is_active(buffer))
ret = -EBUSY; return -EBUSY;
goto out;
}
buffer->watermark = val; buffer->watermark = val;
out:
mutex_unlock(&iio_dev_opaque->mlock);
return ret ? ret : len; return len;
} }
static ssize_t data_available_show(struct device *dev, static ssize_t data_available_show(struct device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment