mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-07-04 00:03:25 -04:00
block: document blk-plug
Thus spake Andrew Morton: "And I have the usual maintainability whine. If someone comes up to vmscan.c and sees it calling blk_start_plug(), how are they supposed to work out why that call is there? They go look at the blk_start_plug() definition and it is undocumented. I think we can do better than this?" Adapted from the LWN article - http://lwn.net/Articles/438256/ by Jens Axboe and from an earlier attempt by Shaohua Li to document blk-plug. [akpm@linux-foundation.org: grammatical and spelling tweaks] Signed-off-by: Suresh Jayaraman <sjayaraman@suse.de> Cc: Shaohua Li <shaohua.li@intel.com> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@google.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
27a84d54c0
commit
75df713627
2 changed files with 29 additions and 9 deletions
|
@ -860,17 +860,23 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int);
|
|||
extern void blk_put_queue(struct request_queue *);
|
||||
|
||||
/*
|
||||
* Note: Code in between changing the blk_plug list/cb_list or element of such
|
||||
* lists is preemptable, but such code can't do sleep (or be very careful),
|
||||
* otherwise data is corrupted. For details, please check schedule() where
|
||||
* blk_schedule_flush_plug() is called.
|
||||
* blk_plug permits building a queue of related requests by holding the I/O
|
||||
* fragments for a short period. This allows merging of sequential requests
|
||||
* into single larger request. As the requests are moved from a per-task list to
|
||||
* the device's request_queue in a batch, this results in improved scalability
|
||||
* as the lock contention for request_queue lock is reduced.
|
||||
*
|
||||
* It is ok not to disable preemption when adding the request to the plug list
|
||||
* or when attempting a merge, because blk_schedule_flush_list() will only flush
|
||||
* the plug list when the task sleeps by itself. For details, please see
|
||||
* schedule() where blk_schedule_flush_plug() is called.
|
||||
*/
|
||||
struct blk_plug {
|
||||
unsigned long magic;
|
||||
struct list_head list;
|
||||
struct list_head cb_list;
|
||||
unsigned int should_sort;
|
||||
unsigned int count;
|
||||
unsigned long magic; /* detect uninitialized use-cases */
|
||||
struct list_head list; /* requests */
|
||||
struct list_head cb_list; /* md requires an unplug callback */
|
||||
unsigned int should_sort; /* list to be sorted before flushing? */
|
||||
unsigned int count; /* number of queued requests */
|
||||
};
|
||||
#define BLK_MAX_REQUEST_COUNT 16
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue