Commit d386732b authored by André Almeida's avatar André Almeida Committed by Jens Axboe

blk-mq: fill header with kernel-doc

Insert documentation for structs, enums and functions at header file.
Format existing and new comments at struct blk_mq_ops as
kernel-doc comments.
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Signed-off-by: default avatarAndré Almeida <andrealmeid@collabora.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1fead718
...@@ -10,74 +10,171 @@ struct blk_mq_tags; ...@@ -10,74 +10,171 @@ struct blk_mq_tags;
struct blk_flush_queue; struct blk_flush_queue;
/** /**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
*/ */
struct blk_mq_hw_ctx { struct blk_mq_hw_ctx {
struct { struct {
/** @lock: Protects the dispatch list. */
spinlock_t lock; spinlock_t lock;
/**
* @dispatch: Used for requests that are ready to be
* dispatched to the hardware but for some reason (e.g. lack of
* resources) could not be sent to the hardware. As soon as the
* driver can send new requests, requests at this list will
* be sent first for a fairer dispatch.
*/
struct list_head dispatch; struct list_head dispatch;
unsigned long state; /* BLK_MQ_S_* flags */ /**
* @state: BLK_MQ_S_* flags. Defines the state of the hw
* queue (active, scheduled to restart, stopped).
*/
unsigned long state;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
/**
* @run_work: Used for scheduling a hardware queue run at a later time.
*/
struct delayed_work run_work; struct delayed_work run_work;
/** @cpumask: Map of available CPUs where this hctx can run. */
cpumask_var_t cpumask; cpumask_var_t cpumask;
/**
* @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
* selection from @cpumask.
*/
int next_cpu; int next_cpu;
/**
* @next_cpu_batch: Counter of how many works left in the batch before
* changing to the next CPU.
*/
int next_cpu_batch; int next_cpu_batch;
unsigned long flags; /* BLK_MQ_F_* flags */ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
unsigned long flags;
/**
* @sched_data: Pointer owned by the IO scheduler attached to a request
* queue. It's up to the IO scheduler how to use this pointer.
*/
void *sched_data; void *sched_data;
/**
* @queue: Pointer to the request queue that owns this hardware context.
*/
struct request_queue *queue; struct request_queue *queue;
/** @fq: Queue of requests that need to perform a flush operation. */
struct blk_flush_queue *fq; struct blk_flush_queue *fq;
/**
* @driver_data: Pointer to data owned by the block driver that created
* this hctx
*/
void *driver_data; void *driver_data;
/**
* @ctx_map: Bitmap for each software queue. If bit is on, there is a
* pending request in that software queue.
*/
struct sbitmap ctx_map; struct sbitmap ctx_map;
/**
* @dispatch_from: Software queue to be used when no scheduler was
* selected.
*/
struct blk_mq_ctx *dispatch_from; struct blk_mq_ctx *dispatch_from;
/**
* @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
* decide if the hw_queue is busy using Exponential Weighted Moving
* Average algorithm.
*/
unsigned int dispatch_busy; unsigned int dispatch_busy;
/** @type: HCTX_TYPE_* flags. Type of hardware queue. */
unsigned short type; unsigned short type;
/** @nr_ctx: Number of software queues. */
unsigned short nr_ctx; unsigned short nr_ctx;
/** @ctxs: Array of software queues. */
struct blk_mq_ctx **ctxs; struct blk_mq_ctx **ctxs;
/** @dispatch_wait_lock: Lock for dispatch_wait queue. */
spinlock_t dispatch_wait_lock; spinlock_t dispatch_wait_lock;
/**
* @dispatch_wait: Waitqueue to put requests when there is no tag
* available at the moment, to wait for another try in the future.
*/
wait_queue_entry_t dispatch_wait; wait_queue_entry_t dispatch_wait;
/**
* @wait_index: Index of next available dispatch_wait queue to insert
* requests.
*/
atomic_t wait_index; atomic_t wait_index;
/**
* @tags: Tags owned by the block driver. A tag at this set is only
* assigned when a request is dispatched from a hardware queue.
*/
struct blk_mq_tags *tags; struct blk_mq_tags *tags;
/**
* @sched_tags: Tags owned by I/O scheduler. If there is an I/O
* scheduler associated with a request queue, a tag is assigned when
* that request is allocated. Else, this member is not used.
*/
struct blk_mq_tags *sched_tags; struct blk_mq_tags *sched_tags;
/** @queued: Number of queued requests. */
unsigned long queued; unsigned long queued;
/** @run: Number of dispatched requests. */
unsigned long run; unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 7 #define BLK_MQ_MAX_DISPATCH_ORDER 7
/** @dispatched: Number of dispatch requests by queue. */
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node; unsigned int numa_node;
/** @queue_num: Index of this hardware queue. */
unsigned int queue_num; unsigned int queue_num;
/**
* @nr_active: Number of active requests. Only used when a tag set is
* shared across request queues.
*/
atomic_t nr_active; atomic_t nr_active;
/** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead; struct hlist_node cpuhp_dead;
/** @kobj: Kernel object for sysfs. */
struct kobject kobj; struct kobject kobj;
/** @poll_considered: Count times blk_poll() was called. */
unsigned long poll_considered; unsigned long poll_considered;
/** @poll_invoked: Count how many requests blk_poll() polled. */
unsigned long poll_invoked; unsigned long poll_invoked;
/** @poll_success: Count how many polled requests were completed. */
unsigned long poll_success; unsigned long poll_success;
#ifdef CONFIG_BLK_DEBUG_FS #ifdef CONFIG_BLK_DEBUG_FS
/**
* @debugfs_dir: debugfs directory for this hardware queue. Named
* as cpu<cpu_number>.
*/
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
/** @sched_debugfs_dir: debugfs directory for the scheduler. */
struct dentry *sched_debugfs_dir; struct dentry *sched_debugfs_dir;
#endif #endif
/** @hctx_list: List of all hardware queues. */
struct list_head hctx_list; struct list_head hctx_list;
/* Must be the last member - see also blk_mq_hw_ctx_size(). */ /**
* @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
* blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
* blk_mq_hw_ctx_size().
*/
struct srcu_struct srcu[0]; struct srcu_struct srcu[0];
}; };
/** /**
* struct blk_mq_queue_map - ctx -> hctx mapping * struct blk_mq_queue_map - Map software queues to hardware queues
* @mq_map: CPU ID to hardware queue index map. This is an array * @mq_map: CPU ID to hardware queue index map. This is an array
* with nr_cpu_ids elements. Each element has a value in the range * with nr_cpu_ids elements. Each element has a value in the range
* [@queue_offset, @queue_offset + @nr_queues). * [@queue_offset, @queue_offset + @nr_queues).
...@@ -92,10 +189,17 @@ struct blk_mq_queue_map { ...@@ -92,10 +189,17 @@ struct blk_mq_queue_map {
unsigned int queue_offset; unsigned int queue_offset;
}; };
/**
* enum hctx_type - Type of hardware queue
* @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
* @HCTX_TYPE_READ: Just for READ I/O.
* @HCTX_TYPE_POLL: Polled I/O of any kind.
* @HCTX_MAX_TYPES: Number of types of hctx.
*/
enum hctx_type { enum hctx_type {
HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */ HCTX_TYPE_DEFAULT,
HCTX_TYPE_READ, /* just for READ I/O */ HCTX_TYPE_READ,
HCTX_TYPE_POLL, /* polled I/O of any kind */ HCTX_TYPE_POLL,
HCTX_MAX_TYPES, HCTX_MAX_TYPES,
}; };
...@@ -147,6 +251,12 @@ struct blk_mq_tag_set { ...@@ -147,6 +251,12 @@ struct blk_mq_tag_set {
struct list_head tag_list; struct list_head tag_list;
}; };
/**
* struct blk_mq_queue_data - Data about a request inserted in a queue
*
* @rq: Request pointer.
* @last: If it is the last request in the queue.
*/
struct blk_mq_queue_data { struct blk_mq_queue_data {
struct request *rq; struct request *rq;
bool last; bool last;
...@@ -174,81 +284,101 @@ typedef bool (busy_fn)(struct request_queue *); ...@@ -174,81 +284,101 @@ typedef bool (busy_fn)(struct request_queue *);
typedef void (complete_fn)(struct request *); typedef void (complete_fn)(struct request *);
typedef void (cleanup_rq_fn)(struct request *); typedef void (cleanup_rq_fn)(struct request *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
* behaviour.
*/
struct blk_mq_ops { struct blk_mq_ops {
/* /**
* Queue request * @queue_rq: Queue a new request from block IO.
*/ */
queue_rq_fn *queue_rq; queue_rq_fn *queue_rq;
/* /**
* If a driver uses bd->last to judge when to submit requests to * @commit_rqs: If a driver uses bd->last to judge when to submit
* hardware, it must define this function. In case of errors that * requests to hardware, it must define this function. In case of errors
* make us stop issuing further requests, this hook serves the * that make us stop issuing further requests, this hook serves the
* purpose of kicking the hardware (which the last request otherwise * purpose of kicking the hardware (which the last request otherwise
* would have done). * would have done).
*/ */
commit_rqs_fn *commit_rqs; commit_rqs_fn *commit_rqs;
/* /**
* Reserve budget before queue request, once .queue_rq is * @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the * run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case * reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock. * of .get_budget for avoiding I/O deadlock.
*/ */
get_budget_fn *get_budget; get_budget_fn *get_budget;
/**
* @put_budget: Release the reserved budget.
*/
put_budget_fn *put_budget; put_budget_fn *put_budget;
/* /**
* Called on request timeout * @timeout: Called on request timeout.
*/ */
timeout_fn *timeout; timeout_fn *timeout;
/* /**
* Called to poll for completion of a specific tag. * @poll: Called to poll for completion of a specific tag.
*/ */
poll_fn *poll; poll_fn *poll;
/**
* @complete: Mark the request as complete.
*/
complete_fn *complete; complete_fn *complete;
/* /**
* Called when the block layer side of a hardware queue has been * @init_hctx: Called when the block layer side of a hardware queue has
* set up, allowing the driver to allocate/init matching structures. * been set up, allowing the driver to allocate/init matching
* Ditto for exit/teardown. * structures.
*/ */
init_hctx_fn *init_hctx; init_hctx_fn *init_hctx;
/**
* @exit_hctx: Ditto for exit/teardown.
*/
exit_hctx_fn *exit_hctx; exit_hctx_fn *exit_hctx;
/* /**
* Called for every command allocated by the block layer to allow * @init_request: Called for every command allocated by the block layer
* the driver to set up driver specific data. * to allow the driver to set up driver specific data.
* *
* Tag greater than or equal to queue_depth is for setting up * Tag greater than or equal to queue_depth is for setting up
* flush request. * flush request.
*
* Ditto for exit/teardown.
*/ */
init_request_fn *init_request; init_request_fn *init_request;
/**
* @exit_request: Ditto for exit/teardown.
*/
exit_request_fn *exit_request; exit_request_fn *exit_request;
/* Called from inside blk_get_request() */
/**
* @initialize_rq_fn: Called from inside blk_get_request().
*/
void (*initialize_rq_fn)(struct request *rq); void (*initialize_rq_fn)(struct request *rq);
/* /**
* Called before freeing one request which isn't completed yet, * @cleanup_rq: Called before freeing one request which isn't completed
* and usually for freeing the driver private data * yet, and usually for freeing the driver private data.
*/ */
cleanup_rq_fn *cleanup_rq; cleanup_rq_fn *cleanup_rq;
/* /**
* If set, returns whether or not this queue currently is busy * @busy: If set, returns whether or not this queue currently is busy.
*/ */
busy_fn *busy; busy_fn *busy;
/**
* @map_queues: This allows drivers specify their own queue mapping by
* overriding the setup-time function that builds the mq_map.
*/
map_queues_fn *map_queues; map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS #ifdef CONFIG_BLK_DEBUG_FS
/* /**
* Used by the debugfs implementation to show driver-specific * @show_rq: Used by the debugfs implementation to show driver-specific
* information about a request. * information about a request.
*/ */
void (*show_rq)(struct seq_file *m, struct request *rq); void (*show_rq)(struct seq_file *m, struct request *rq);
...@@ -391,14 +521,29 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q); ...@@ -391,14 +521,29 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq); unsigned int blk_mq_rq_cpu(struct request *rq);
/* /**
* blk_mq_rq_from_pdu - cast a PDU to a request
* @pdu: the PDU (Protocol Data Unit) to be casted
*
* Return: request
*
* Driver command data is immediately after the request. So subtract request * Driver command data is immediately after the request. So subtract request
* size to get back to the original request, add request size to get the PDU. * size to get back to the original request.
*/ */
static inline struct request *blk_mq_rq_from_pdu(void *pdu) static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{ {
return pdu - sizeof(struct request); return pdu - sizeof(struct request);
} }
/**
* blk_mq_rq_to_pdu - cast a request to a PDU
* @rq: the request to be casted
*
* Return: pointer to the PDU
*
* Driver command data is immediately after the request. So add request to get
* the PDU.
*/
static inline void *blk_mq_rq_to_pdu(struct request *rq) static inline void *blk_mq_rq_to_pdu(struct request *rq)
{ {
return rq + 1; return rq + 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment