Commit a9da0fb6 authored by Mike Snitzer's avatar Mike Snitzer

dm vdo: remove all sysfs interfaces

Also update target major version number.

All info is (or will be) accessible through alternative interfaces
(e.g. "dmsetup message", module params, etc).
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
Signed-off-by: default avatarKen Raeburn <raeburn@redhat.com>
Signed-off-by: default avatarMatthew Sakai <msakai@redhat.com>
parent 4e415248
......@@ -28,19 +28,15 @@ dm-vdo-objs := \
packer.o \
permassert.o \
physical-zone.o \
pool-sysfs.o \
pool-sysfs-stats.o \
priority-table.o \
recovery-journal.o \
repair.o \
slab-depot.o \
status-codes.o \
string-utils.o \
sysfs.o \
thread-device.o \
thread-registry.o \
thread-utils.o \
uds-sysfs.o \
vdo.o \
vio.o \
wait-queue.o \
......
......@@ -120,7 +120,6 @@
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/ratelimit.h>
#include <linux/spinlock.h>
......@@ -279,7 +278,6 @@ struct hash_lock {
struct hash_zones {
struct action_manager *manager;
struct kobject dedupe_directory;
struct uds_parameters parameters;
struct uds_index_session *index_session;
struct ratelimit_state ratelimiter;
......@@ -2022,56 +2020,6 @@ void vdo_share_compressed_write_lock(struct data_vio *data_vio,
VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment");
}
static void dedupe_kobj_release(struct kobject *directory)
{
vdo_free(container_of(directory, struct hash_zones, dedupe_directory));
}
static ssize_t dedupe_status_show(struct kobject *directory, struct attribute *attr,
char *buf)
{
struct uds_attribute *ua = container_of(attr, struct uds_attribute, attr);
struct hash_zones *zones = container_of(directory, struct hash_zones,
dedupe_directory);
if (ua->show_string != NULL)
return sprintf(buf, "%s\n", ua->show_string(zones));
else
return -EINVAL;
}
static ssize_t dedupe_status_store(struct kobject *kobj __always_unused,
struct attribute *attr __always_unused,
const char *buf __always_unused,
size_t length __always_unused)
{
return -EINVAL;
}
/*----------------------------------------------------------------------*/
static const struct sysfs_ops dedupe_sysfs_ops = {
.show = dedupe_status_show,
.store = dedupe_status_store,
};
static struct uds_attribute dedupe_status_attribute = {
.attr = {.name = "status", .mode = 0444, },
.show_string = vdo_get_dedupe_index_state_name,
};
static struct attribute *dedupe_attrs[] = {
&dedupe_status_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(dedupe);
static const struct kobj_type dedupe_directory_type = {
.release = dedupe_kobj_release,
.sysfs_ops = &dedupe_sysfs_ops,
.default_groups = dedupe_groups,
};
static void start_uds_queue(void *ptr)
{
/*
......@@ -2266,7 +2214,6 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
vdo_initialize_completion(&zones->completion, vdo, VDO_HASH_ZONES_COMPLETION);
vdo_set_completion_callback(&zones->completion, change_dedupe_state,
vdo->thread_config.dedupe_thread);
kobject_init(&zones->dedupe_directory, &dedupe_directory_type);
return VDO_SUCCESS;
}
......@@ -2537,10 +2484,7 @@ void vdo_free_hash_zones(struct hash_zones *zones)
vdo_finish_dedupe_index(zones);
ratelimit_state_exit(&zones->ratelimiter);
if (vdo_get_admin_state_code(&zones->state) == VDO_ADMIN_STATE_NEW)
vdo_free(zones);
else
kobject_put(&zones->dedupe_directory);
vdo_free(zones);
}
static void initiate_suspend_index(struct admin_state *state)
......@@ -3047,17 +2991,9 @@ int vdo_message_dedupe_index(struct hash_zones *zones, const char *name)
return -EINVAL;
}
int vdo_add_dedupe_index_sysfs(struct hash_zones *zones)
void vdo_set_dedupe_state_normal(struct hash_zones *zones)
{
int result = kobject_add(&zones->dedupe_directory,
&zones->completion.vdo->vdo_directory, "dedupe");
if (result == 0) {
vdo_set_admin_state_code(&zones->state,
VDO_ADMIN_STATE_NORMAL_OPERATION);
}
return result;
vdo_set_admin_state_code(&zones->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
}
/* If create_flag, create a new index without first attempting to load an existing index. */
......
......@@ -97,7 +97,7 @@ u64 vdo_get_dedupe_index_timeout_count(struct hash_zones *zones);
int vdo_message_dedupe_index(struct hash_zones *zones, const char *name);
int vdo_add_dedupe_index_sysfs(struct hash_zones *zones);
void vdo_set_dedupe_state_normal(struct hash_zones *zones);
void vdo_start_dedupe_index(struct hash_zones *zones, bool create_flag);
......
......@@ -27,7 +27,6 @@
#include "logger.h"
#include "memory-alloc.h"
#include "message-stats.h"
#include "pool-sysfs.h"
#include "recovery-journal.h"
#include "repair.h"
#include "slab-depot.h"
......@@ -36,7 +35,6 @@
#include "thread-device.h"
#include "thread-registry.h"
#include "types.h"
#include "uds-sysfs.h"
#include "vdo.h"
#include "vio.h"
......@@ -54,7 +52,6 @@ enum admin_phases {
GROW_PHYSICAL_PHASE_END,
GROW_PHYSICAL_PHASE_ERROR,
LOAD_PHASE_START,
LOAD_PHASE_STATS,
LOAD_PHASE_LOAD_DEPOT,
LOAD_PHASE_MAKE_DIRTY,
LOAD_PHASE_PREPARE_TO_ALLOCATE,
......@@ -104,7 +101,6 @@ static const char * const ADMIN_PHASE_NAMES[] = {
"GROW_PHYSICAL_PHASE_END",
"GROW_PHYSICAL_PHASE_ERROR",
"LOAD_PHASE_START",
"LOAD_PHASE_STATS",
"LOAD_PHASE_LOAD_DEPOT",
"LOAD_PHASE_MAKE_DIRTY",
"LOAD_PHASE_PREPARE_TO_ALLOCATE",
......@@ -947,8 +943,8 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits)
* blocked task warnings in kernel logs. In order to avoid these warnings, we choose to
* use the smallest reasonable value.
*
* The value is displayed in sysfs, and also used by dm-thin to determine whether to pass
* down discards. The block layer splits large discards on this boundary when this is set.
* The value is used by dm-thin to determine whether to pass down discards. The block layer
* splits large discards on this boundary when this is set.
*/
limits->max_discard_sectors =
(vdo->device_config->max_discard_blocks * VDO_SECTORS_PER_BLOCK);
......@@ -2174,32 +2170,6 @@ static enum slab_depot_load_type get_load_type(struct vdo *vdo)
return VDO_SLAB_DEPOT_NORMAL_LOAD;
}
/**
* vdo_initialize_kobjects() - Initialize the vdo sysfs directory.
* @vdo: The vdo being initialized.
*
* Return: VDO_SUCCESS or an error code.
*/
static int vdo_initialize_kobjects(struct vdo *vdo)
{
int result;
struct dm_target *target = vdo->device_config->owning_target;
struct mapped_device *md = dm_table_get_md(target->table);
kobject_init(&vdo->vdo_directory, &vdo_directory_type);
vdo->sysfs_added = true;
result = kobject_add(&vdo->vdo_directory, &disk_to_dev(dm_disk(md))->kobj,
"vdo");
if (result != 0)
return VDO_CANT_ADD_SYSFS_NODE;
result = vdo_add_dedupe_index_sysfs(vdo->hash_zones);
if (result != 0)
return VDO_CANT_ADD_SYSFS_NODE;
return vdo_add_sysfs_stats_dir(vdo);
}
/**
* load_callback() - Callback to do the destructive parts of loading a VDO.
* @completion: The sub-task completion.
......@@ -2225,11 +2195,8 @@ static void load_callback(struct vdo_completion *completion)
vdo_allow_read_only_mode_entry(completion);
return;
case LOAD_PHASE_STATS:
vdo_continue_completion(completion, vdo_initialize_kobjects(vdo));
return;
case LOAD_PHASE_LOAD_DEPOT:
vdo_set_dedupe_state_normal(vdo->hash_zones);
if (vdo_is_read_only(vdo)) {
/*
* In read-only mode we don't use the allocator and it may not even be
......@@ -2866,7 +2833,7 @@ static void vdo_resume(struct dm_target *ti)
static struct target_type vdo_target_bio = {
.features = DM_TARGET_SINGLETON,
.name = "vdo",
.version = { 8, 2, 0 },
.version = { 9, 0, 0 },
.module = THIS_MODULE,
.ctr = vdo_ctr,
.dtr = vdo_dtr,
......@@ -2905,8 +2872,6 @@ static int __init vdo_init(void)
/* Memory tracking must be initialized first for accurate accounting. */
vdo_memory_init();
uds_init_sysfs();
vdo_initialize_thread_device_registry();
vdo_initialize_device_registry_once();
uds_log_info("loaded version %s", CURRENT_VERSION);
......@@ -2933,7 +2898,6 @@ static int __init vdo_init(void)
static void __exit vdo_exit(void)
{
vdo_module_destroy();
uds_put_sysfs();
/* Memory tracking cleanup must be done last. */
vdo_memory_exit();
}
......
......@@ -56,11 +56,6 @@ int uds_get_log_level(void)
return log_level;
}
void uds_set_log_level(int new_log_level)
{
log_level = new_log_level;
}
int uds_log_string_to_priority(const char *string)
{
int i;
......
......@@ -37,8 +37,6 @@
int uds_get_log_level(void);
void uds_set_log_level(int new_log_level);
int uds_log_string_to_priority(const char *string);
const char *uds_log_priority_to_string(int priority);
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023 Red Hat
*/
#include "pool-sysfs.h"
#include <linux/kstrtox.h>
#include "memory-alloc.h"
#include "string-utils.h"
#include "data-vio.h"
#include "dedupe.h"
#include "vdo.h"
struct pool_attribute {
struct attribute attr;
ssize_t (*show)(struct vdo *vdo, char *buf);
ssize_t (*store)(struct vdo *vdo, const char *value, size_t count);
};
static ssize_t vdo_pool_attr_show(struct kobject *directory, struct attribute *attr,
char *buf)
{
struct pool_attribute *pool_attr = container_of(attr, struct pool_attribute,
attr);
struct vdo *vdo = container_of(directory, struct vdo, vdo_directory);
if (pool_attr->show == NULL)
return -EINVAL;
return pool_attr->show(vdo, buf);
}
static ssize_t vdo_pool_attr_store(struct kobject *directory, struct attribute *attr,
const char *buf, size_t length)
{
struct pool_attribute *pool_attr = container_of(attr, struct pool_attribute,
attr);
struct vdo *vdo = container_of(directory, struct vdo, vdo_directory);
if (pool_attr->store == NULL)
return -EINVAL;
return pool_attr->store(vdo, buf, length);
}
static const struct sysfs_ops vdo_pool_sysfs_ops = {
.show = vdo_pool_attr_show,
.store = vdo_pool_attr_store,
};
static ssize_t pool_compressing_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%s\n", (vdo_get_compressing(vdo) ? "1" : "0"));
}
static ssize_t pool_discards_active_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n",
get_data_vio_pool_active_discards(vdo->data_vio_pool));
}
static ssize_t pool_discards_limit_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n", get_data_vio_pool_discard_limit(vdo->data_vio_pool));
}
static ssize_t pool_discards_limit_store(struct vdo *vdo, const char *buf, size_t length)
{
unsigned int value;
int result;
if ((length > 12) || (kstrtouint(buf, 10, &value) < 0) || (value < 1))
return -EINVAL;
result = set_data_vio_pool_discard_limit(vdo->data_vio_pool, value);
if (result != VDO_SUCCESS)
return -EINVAL;
return length;
}
static ssize_t pool_discards_maximum_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n",
get_data_vio_pool_maximum_discards(vdo->data_vio_pool));
}
static ssize_t pool_instance_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n", vdo->instance);
}
static ssize_t pool_requests_active_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n",
get_data_vio_pool_active_requests(vdo->data_vio_pool));
}
static ssize_t pool_requests_limit_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n", get_data_vio_pool_request_limit(vdo->data_vio_pool));
}
static ssize_t pool_requests_maximum_show(struct vdo *vdo, char *buf)
{
return sprintf(buf, "%u\n",
get_data_vio_pool_maximum_requests(vdo->data_vio_pool));
}
static void vdo_pool_release(struct kobject *directory)
{
vdo_free(container_of(directory, struct vdo, vdo_directory));
}
static struct pool_attribute vdo_pool_compressing_attr = {
.attr = {
.name = "compressing",
.mode = 0444,
},
.show = pool_compressing_show,
};
static struct pool_attribute vdo_pool_discards_active_attr = {
.attr = {
.name = "discards_active",
.mode = 0444,
},
.show = pool_discards_active_show,
};
static struct pool_attribute vdo_pool_discards_limit_attr = {
.attr = {
.name = "discards_limit",
.mode = 0644,
},
.show = pool_discards_limit_show,
.store = pool_discards_limit_store,
};
static struct pool_attribute vdo_pool_discards_maximum_attr = {
.attr = {
.name = "discards_maximum",
.mode = 0444,
},
.show = pool_discards_maximum_show,
};
static struct pool_attribute vdo_pool_instance_attr = {
.attr = {
.name = "instance",
.mode = 0444,
},
.show = pool_instance_show,
};
static struct pool_attribute vdo_pool_requests_active_attr = {
.attr = {
.name = "requests_active",
.mode = 0444,
},
.show = pool_requests_active_show,
};
static struct pool_attribute vdo_pool_requests_limit_attr = {
.attr = {
.name = "requests_limit",
.mode = 0444,
},
.show = pool_requests_limit_show,
};
static struct pool_attribute vdo_pool_requests_maximum_attr = {
.attr = {
.name = "requests_maximum",
.mode = 0444,
},
.show = pool_requests_maximum_show,
};
static struct attribute *pool_attrs[] = {
&vdo_pool_compressing_attr.attr,
&vdo_pool_discards_active_attr.attr,
&vdo_pool_discards_limit_attr.attr,
&vdo_pool_discards_maximum_attr.attr,
&vdo_pool_instance_attr.attr,
&vdo_pool_requests_active_attr.attr,
&vdo_pool_requests_limit_attr.attr,
&vdo_pool_requests_maximum_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(pool);
const struct kobj_type vdo_directory_type = {
.release = vdo_pool_release,
.sysfs_ops = &vdo_pool_sysfs_ops,
.default_groups = pool_groups,
};
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2023 Red Hat
*/
#ifndef VDO_POOL_SYSFS_H
#define VDO_POOL_SYSFS_H
#include <linux/kobject.h>
/* The kobj_type used for setting up the kernel layer kobject. */
extern const struct kobj_type vdo_directory_type;
/* The sysfs_ops used for the "statistics" subdirectory. */
extern const struct sysfs_ops vdo_pool_stats_sysfs_ops;
/* The attribute used for the "statistics" subdirectory. */
extern struct attribute *vdo_pool_stats_attrs[];
#endif /* VDO_POOL_SYSFS_H */
......@@ -38,7 +38,6 @@ const struct error_info vdo_status_list[] = {
{ "VDO_BAD_NONCE", "Bad nonce" },
{ "VDO_JOURNAL_OVERFLOW", "Journal sequence number overflow" },
{ "VDO_INVALID_ADMIN_STATE", "Invalid operation for current state" },
{ "VDO_CANT_ADD_SYSFS_NODE", "Failed to add sysfs node" },
};
static atomic_t vdo_status_codes_registered = ATOMIC_INIT(0);
......
......@@ -72,8 +72,6 @@ enum vdo_status_codes {
VDO_JOURNAL_OVERFLOW,
/* the VDO is not in a state to perform an admin operation */
VDO_INVALID_ADMIN_STATE,
/* failure adding a sysfs node */
VDO_CANT_ADD_SYSFS_NODE,
/* one more than last error code */
VDO_STATUS_CODE_LAST,
VDO_STATUS_CODE_BLOCK_END = VDO_ERRORS_BLOCK_END
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023 Red Hat
*/
#include <linux/module.h>
#include "logger.h"
#include "constants.h"
#include "dedupe.h"
#include "vdo.h"
static int vdo_log_level_show(char *buf, const struct kernel_param *kp)
{
return sprintf(buf, "%s\n", uds_log_priority_to_string(uds_get_log_level()));
}
static int vdo_log_level_store(const char *buf, const struct kernel_param *kp)
{
static char internal_buf[11];
int n = strlen(buf);
if (n > 10)
return -EINVAL;
memset(internal_buf, '\000', sizeof(internal_buf));
memcpy(internal_buf, buf, n);
if (internal_buf[n - 1] == '\n')
internal_buf[n - 1] = '\000';
uds_set_log_level(uds_log_string_to_priority(internal_buf));
return 0;
}
static int vdo_dedupe_timeout_interval_store(const char *buf,
const struct kernel_param *kp)
{
int result = param_set_uint(buf, kp);
if (result != 0)
return result;
vdo_set_dedupe_index_timeout_interval(*(uint *)kp->arg);
return 0;
}
static int vdo_min_dedupe_timer_interval_store(const char *buf,
const struct kernel_param *kp)
{
int result = param_set_uint(buf, kp);
if (result != 0)
return result;
vdo_set_dedupe_index_min_timer_interval(*(uint *)kp->arg);
return 0;
}
static const struct kernel_param_ops log_level_ops = {
.set = vdo_log_level_store,
.get = vdo_log_level_show,
};
static const struct kernel_param_ops dedupe_timeout_ops = {
.set = vdo_dedupe_timeout_interval_store,
.get = param_get_uint,
};
static const struct kernel_param_ops dedupe_timer_ops = {
.set = vdo_min_dedupe_timer_interval_store,
.get = param_get_uint,
};
module_param_cb(log_level, &log_level_ops, NULL, 0644);
module_param_cb(deduplication_timeout_interval, &dedupe_timeout_ops,
&vdo_dedupe_index_timeout_interval, 0644);
module_param_cb(min_deduplication_timer_interval, &dedupe_timer_ops,
&vdo_dedupe_index_min_timer_interval, 0644);
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2023 Red Hat
*/
#include "uds-sysfs.h"
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "logger.h"
#include "memory-alloc.h"
#include "string-utils.h"
#include "indexer.h"
#define UDS_SYSFS_NAME "uds"
static struct {
/* /sys/uds */
struct kobject kobj;
/* /sys/uds/parameter */
struct kobject parameter_kobj;
/* These flags are used to ensure a clean shutdown */
/* /sys/uds flag */
bool flag;
/* /sys/uds/parameter flag */
bool parameter_flag;
} object_root;
static char *buffer_to_string(const char *buf, size_t length)
{
char *string;
if (vdo_allocate(length + 1, char, __func__, &string) != VDO_SUCCESS)
return NULL;
memcpy(string, buf, length);
string[length] = '\0';
if (string[length - 1] == '\n')
string[length - 1] = '\0';
return string;
}
/*
* This is the code for any directory in the /sys/<module_name> tree that contains no regular files
* (only subdirectories).
*/
static void empty_release(struct kobject *kobj)
{
}
static ssize_t empty_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
return 0;
}
static ssize_t empty_store(struct kobject *kobj, struct attribute *attr, const char *buf,
size_t length)
{
return length;
}
static const struct sysfs_ops empty_ops = {
.show = empty_show,
.store = empty_store,
};
static struct attribute *empty_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(empty);
static const struct kobj_type empty_object_type = {
.release = empty_release,
.sysfs_ops = &empty_ops,
.default_groups = empty_groups,
};
/*
* This is the code for the /sys/<module_name>/parameter directory.
* <dir>/log_level UDS_LOG_LEVEL
*/
struct parameter_attribute {
struct attribute attr;
const char *(*show_string)(void);
void (*store_string)(const char *string);
};
static ssize_t parameter_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct parameter_attribute *pa;
pa = container_of(attr, struct parameter_attribute, attr);
if (pa->show_string != NULL)
return sprintf(buf, "%s\n", pa->show_string());
else
return -EINVAL;
}
static ssize_t parameter_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t length)
{
char *string;
struct parameter_attribute *pa;
pa = container_of(attr, struct parameter_attribute, attr);
if (pa->store_string == NULL)
return -EINVAL;
string = buffer_to_string(buf, length);
if (string == NULL)
return -ENOMEM;
pa->store_string(string);
vdo_free(string);
return length;
}
static const char *parameter_show_log_level(void)
{
return uds_log_priority_to_string(uds_get_log_level());
}
static void parameter_store_log_level(const char *string)
{
uds_set_log_level(uds_log_string_to_priority(string));
}
static struct parameter_attribute log_level_attr = {
.attr = { .name = "log_level", .mode = 0600 },
.show_string = parameter_show_log_level,
.store_string = parameter_store_log_level,
};
static struct attribute *parameter_attrs[] = {
&log_level_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(parameter);
static const struct sysfs_ops parameter_ops = {
.show = parameter_show,
.store = parameter_store,
};
static const struct kobj_type parameter_object_type = {
.release = empty_release,
.sysfs_ops = &parameter_ops,
.default_groups = parameter_groups,
};
int uds_init_sysfs(void)
{
int result;
memset(&object_root, 0, sizeof(object_root));
kobject_init(&object_root.kobj, &empty_object_type);
result = kobject_add(&object_root.kobj, NULL, UDS_SYSFS_NAME);
if (result == 0) {
object_root.flag = true;
kobject_init(&object_root.parameter_kobj, &parameter_object_type);
result = kobject_add(&object_root.parameter_kobj, &object_root.kobj,
"parameter");
if (result == 0)
object_root.parameter_flag = true;
}
if (result != 0)
uds_put_sysfs();
return result;
}
void uds_put_sysfs(void)
{
if (object_root.parameter_flag)
kobject_put(&object_root.parameter_kobj);
if (object_root.flag)
kobject_put(&object_root.kobj);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2023 Red Hat
*/
#ifndef UDS_SYSFS_H
#define UDS_SYSFS_H
int uds_init_sysfs(void);
void uds_put_sysfs(void);
#endif /* UDS_SYSFS_H */
......@@ -53,7 +53,6 @@
#include "logical-zone.h"
#include "packer.h"
#include "physical-zone.h"
#include "pool-sysfs.h"
#include "recovery-journal.h"
#include "slab-depot.h"
#include "statistics.h"
......@@ -691,13 +690,6 @@ void vdo_destroy(struct vdo *vdo)
vdo->allocations_allowed = true;
/* Stop services that need to gather VDO statistics from the worker threads. */
if (vdo->sysfs_added) {
init_completion(&vdo->stats_shutdown);
kobject_put(&vdo->stats_directory);
wait_for_completion(&vdo->stats_shutdown);
}
finish_vdo(vdo);
unregister_vdo(vdo);
free_data_vio_pool(vdo->data_vio_pool);
......@@ -732,15 +724,7 @@ void vdo_destroy(struct vdo *vdo)
vdo_free(vdo_forget(vdo->compression_context));
}
/*
* The call to kobject_put on the kobj sysfs node will decrement its reference count; when
* the count goes to zero the VDO object will be freed as a side effect.
*/
if (!vdo->sysfs_added)
vdo_free(vdo);
else
kobject_put(&vdo->vdo_directory);
vdo_free(vdo);
}
static int initialize_super_block(struct vdo *vdo, struct vdo_super_block *super_block)
......@@ -817,42 +801,6 @@ void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent)
REQ_OP_READ);
}
/**
* pool_stats_release() - Signal that sysfs stats have been shut down.
* @directory: The vdo stats directory.
*/
static void pool_stats_release(struct kobject *directory)
{
struct vdo *vdo = container_of(directory, struct vdo, stats_directory);
complete(&vdo->stats_shutdown);
}
ATTRIBUTE_GROUPS(vdo_pool_stats);
static const struct kobj_type stats_directory_type = {
.release = pool_stats_release,
.sysfs_ops = &vdo_pool_stats_sysfs_ops,
.default_groups = vdo_pool_stats_groups,
};
/**
* vdo_add_sysfs_stats_dir() - Add the stats directory to the vdo sysfs directory.
* @vdo: The vdo.
*
* Return: VDO_SUCCESS or an error.
*/
int vdo_add_sysfs_stats_dir(struct vdo *vdo)
{
int result;
kobject_init(&vdo->stats_directory, &stats_directory_type);
result = kobject_add(&vdo->stats_directory, &vdo->vdo_directory, "statistics");
if (result != 0)
return VDO_CANT_ADD_SYSFS_NODE;
return VDO_SUCCESS;
}
/**
* vdo_get_backing_device() - Get the block device object underlying a vdo.
* @vdo: The vdo.
......
......@@ -10,7 +10,6 @@
#include <linux/blk_types.h>
#include <linux/completion.h>
#include <linux/dm-kcopyd.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/spinlock.h>
......@@ -248,11 +247,6 @@ struct vdo {
struct vdo_statistics stats_buffer;
/* Protects the stats_buffer */
struct mutex stats_mutex;
/* true if sysfs directory is set up */
bool sysfs_added;
/* Used when shutting down the sysfs statistics */
struct completion stats_shutdown;
/* A list of all device_configs referencing this vdo */
struct list_head device_config_list;
......@@ -264,15 +258,10 @@ struct vdo {
u64 starting_sector_offset;
struct volume_geometry geometry;
/* For sysfs */
struct kobject vdo_directory;
struct kobject stats_directory;
/* N blobs of context data for LZ4 code, one per CPU thread. */
char **compression_context;
};
/**
* vdo_uses_bio_ack_queue() - Indicate whether the vdo is configured to use a separate work queue
* for acknowledging received and processed bios.
......@@ -315,8 +304,6 @@ void vdo_destroy(struct vdo *vdo);
void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent);
int __must_check vdo_add_sysfs_stats_dir(struct vdo *vdo);
struct block_device * __must_check vdo_get_backing_device(const struct vdo *vdo);
const char * __must_check vdo_get_device_name(const struct dm_target *target);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment