Commit 33bd6f06 authored by Mike Snitzer's avatar Mike Snitzer

dm table: make 'struct dm_table' definition accessible to all of DM core

Move 'struct dm_table' definition from dm-table.c to dm-core.h and
update DM core to access its members directly.

Helps optimize max_io_len() and other methods slightly.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 7465d7ac
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/genhd.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include <trace/events/block.h> #include <trace/events/block.h>
...@@ -25,9 +26,11 @@ struct dm_kobject_holder { ...@@ -25,9 +26,11 @@ struct dm_kobject_holder {
}; };
/* /*
* DM core internal structure that used directly by dm.c and dm-rq.c * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
* DM targets must _not_ deference a mapped_device to directly access its members! * DM targets must _not_ deference a mapped_device or dm_table to directly
* access their members!
*/ */
struct mapped_device { struct mapped_device {
struct mutex suspend_lock; struct mutex suspend_lock;
...@@ -119,6 +122,55 @@ void disable_discard(struct mapped_device *md); ...@@ -119,6 +122,55 @@ void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md); void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md); void disable_write_zeroes(struct mapped_device *md);
static inline sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
}
static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
}
#define DM_TABLE_MAX_DEPTH 16
struct dm_table {
struct mapped_device *md;
enum dm_queue_mode type;
/* btree table */
unsigned int depth;
unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
sector_t *index[DM_TABLE_MAX_DEPTH];
unsigned int num_targets;
unsigned int num_allocated;
sector_t *highs;
struct dm_target *targets;
struct target_type *immutable_target_type;
bool integrity_supported:1;
bool singleton:1;
unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
struct dm_md_mempools *mempools;
};
static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
{ {
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
......
...@@ -175,7 +175,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse ...@@ -175,7 +175,7 @@ static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long mse
void dm_mq_kick_requeue_list(struct mapped_device *md) void dm_mq_kick_requeue_list(struct mapped_device *md)
{ {
__dm_mq_kick_requeue_list(dm_get_md_queue(md), 0); __dm_mq_kick_requeue_list(md->queue, 0);
} }
EXPORT_SYMBOL(dm_mq_kick_requeue_list); EXPORT_SYMBOL(dm_mq_kick_requeue_list);
......
...@@ -25,48 +25,10 @@ ...@@ -25,48 +25,10 @@
#define DM_MSG_PREFIX "table" #define DM_MSG_PREFIX "table"
#define MAX_DEPTH 16
#define NODE_SIZE L1_CACHE_BYTES #define NODE_SIZE L1_CACHE_BYTES
#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
struct dm_table {
struct mapped_device *md;
enum dm_queue_mode type;
/* btree table */
unsigned int depth;
unsigned int counts[MAX_DEPTH]; /* in nodes */
sector_t *index[MAX_DEPTH];
unsigned int num_targets;
unsigned int num_allocated;
sector_t *highs;
struct dm_target *targets;
struct target_type *immutable_target_type;
bool integrity_supported:1;
bool singleton:1;
unsigned integrity_added:1;
/*
* Indicates the rw permissions for the new logical
* device. This should be a combination of FMODE_READ
* and FMODE_WRITE.
*/
fmode_t mode;
/* a list of devices used by this table */
struct list_head devices;
/* events get handed up using this callback */
void (*event_fn)(void *);
void *event_context;
struct dm_md_mempools *mempools;
};
/* /*
* Similar to ceiling(log_size(n)) * Similar to ceiling(log_size(n))
*/ */
...@@ -2085,16 +2047,11 @@ EXPORT_SYMBOL_GPL(dm_table_device_name); ...@@ -2085,16 +2047,11 @@ EXPORT_SYMBOL_GPL(dm_table_device_name);
void dm_table_run_md_queue_async(struct dm_table *t) void dm_table_run_md_queue_async(struct dm_table *t)
{ {
struct mapped_device *md;
struct request_queue *queue;
if (!dm_table_request_based(t)) if (!dm_table_request_based(t))
return; return;
md = dm_table_get_md(t); if (t->md->queue)
queue = dm_get_md_queue(md); blk_mq_run_hw_queues(t->md->queue, true);
if (queue)
blk_mq_run_hw_queues(queue, true);
} }
EXPORT_SYMBOL(dm_table_run_md_queue_async); EXPORT_SYMBOL(dm_table_run_md_queue_async);
...@@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w) ...@@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove(); dm_deferred_remove();
} }
sector_t dm_get_size(struct mapped_device *md)
{
return get_capacity(md->disk);
}
struct request_queue *dm_get_md_queue(struct mapped_device *md)
{
return md->queue;
}
struct dm_stats *dm_get_stats(struct mapped_device *md)
{
return &md->stats;
}
static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{ {
struct mapped_device *md = bdev->bd_disk->private_data; struct mapped_device *md = bdev->bd_disk->private_data;
...@@ -1057,7 +1042,7 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector) ...@@ -1057,7 +1042,7 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
* blk_max_size_offset() provides required splitting. * blk_max_size_offset() provides required splitting.
* - blk_max_size_offset() also respects q->limits.max_sectors * - blk_max_size_offset() also respects q->limits.max_sectors
*/ */
max_len = blk_max_size_offset(dm_table_get_md(ti->table)->queue, max_len = blk_max_size_offset(ti->table->md->queue,
target_offset); target_offset);
if (len > max_len) if (len > max_len)
len = max_len; len = max_len;
...@@ -2931,19 +2916,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md) ...@@ -2931,19 +2916,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md)
int dm_suspended(struct dm_target *ti) int dm_suspended(struct dm_target *ti)
{ {
return dm_suspended_md(dm_table_get_md(ti->table)); return dm_suspended_md(ti->table->md);
} }
EXPORT_SYMBOL_GPL(dm_suspended); EXPORT_SYMBOL_GPL(dm_suspended);
int dm_post_suspending(struct dm_target *ti) int dm_post_suspending(struct dm_target *ti)
{ {
return dm_post_suspending_md(dm_table_get_md(ti->table)); return dm_post_suspending_md(ti->table->md);
} }
EXPORT_SYMBOL_GPL(dm_post_suspending); EXPORT_SYMBOL_GPL(dm_post_suspending);
int dm_noflush_suspending(struct dm_target *ti) int dm_noflush_suspending(struct dm_target *ti)
{ {
return __noflush_suspending(dm_table_get_md(ti->table)); return __noflush_suspending(ti->table->md);
} }
EXPORT_SYMBOL_GPL(dm_noflush_suspending); EXPORT_SYMBOL_GPL(dm_noflush_suspending);
......
...@@ -179,12 +179,9 @@ int dm_open_count(struct mapped_device *md); ...@@ -179,12 +179,9 @@ int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred); int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
int dm_cancel_deferred_remove(struct mapped_device *md); int dm_cancel_deferred_remove(struct mapped_device *md);
int dm_request_based(struct mapped_device *md); int dm_request_based(struct mapped_device *md);
sector_t dm_get_size(struct mapped_device *md);
struct request_queue *dm_get_md_queue(struct mapped_device *md);
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
struct dm_dev **result); struct dm_dev **result);
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d); void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
struct dm_stats *dm_get_stats(struct mapped_device *md);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie); unsigned cookie);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment