mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-07-06 00:13:38 -04:00
block: cleanup the flush plug helpers
Consolidate the various helpers into a single blk_flush_plug helper that takes a plk_plug and the from_scheduler bool and switch all callsites to call it directly. Checks that the plug is non-NULL must be performed by the caller, something that most already do anyway. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211020144119.142582-5-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b600455d84
commit
008f75a20e
4 changed files with 16 additions and 36 deletions
|
@ -1089,7 +1089,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (current->plug)
|
if (current->plug)
|
||||||
blk_flush_plug_list(current->plug, false);
|
blk_flush_plug(current->plug, false);
|
||||||
|
|
||||||
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
|
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1637,7 +1637,7 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_check_plugged);
|
EXPORT_SYMBOL(blk_check_plugged);
|
||||||
|
|
||||||
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
|
||||||
{
|
{
|
||||||
if (!list_empty(&plug->cb_list))
|
if (!list_empty(&plug->cb_list))
|
||||||
flush_plug_callbacks(plug, from_schedule);
|
flush_plug_callbacks(plug, from_schedule);
|
||||||
|
@ -1659,11 +1659,10 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||||
*/
|
*/
|
||||||
void blk_finish_plug(struct blk_plug *plug)
|
void blk_finish_plug(struct blk_plug *plug)
|
||||||
{
|
{
|
||||||
if (plug != current->plug)
|
if (plug == current->plug) {
|
||||||
return;
|
blk_flush_plug(plug, false);
|
||||||
blk_flush_plug_list(plug, false);
|
current->plug = NULL;
|
||||||
|
}
|
||||||
current->plug = NULL;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_finish_plug);
|
EXPORT_SYMBOL(blk_finish_plug);
|
||||||
|
|
||||||
|
|
|
@ -1893,7 +1893,8 @@ static long writeback_sb_inodes(struct super_block *sb,
|
||||||
* unplug, so get our IOs out the door before we
|
* unplug, so get our IOs out the door before we
|
||||||
* give up the CPU.
|
* give up the CPU.
|
||||||
*/
|
*/
|
||||||
blk_flush_plug(current);
|
if (current->plug)
|
||||||
|
blk_flush_plug(current->plug, false);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2291,7 +2292,7 @@ void wakeup_flusher_threads(enum wb_reason reason)
|
||||||
* If we are expecting writeback progress we must submit plugged IO.
|
* If we are expecting writeback progress we must submit plugged IO.
|
||||||
*/
|
*/
|
||||||
if (blk_needs_flush_plug(current))
|
if (blk_needs_flush_plug(current))
|
||||||
blk_schedule_flush_plug(current);
|
blk_flush_plug(current->plug, true);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
|
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
|
||||||
|
|
|
@ -725,9 +725,8 @@ extern void blk_set_queue_dying(struct request_queue *);
|
||||||
* as the lock contention for request_queue lock is reduced.
|
* as the lock contention for request_queue lock is reduced.
|
||||||
*
|
*
|
||||||
* It is ok not to disable preemption when adding the request to the plug list
|
* It is ok not to disable preemption when adding the request to the plug list
|
||||||
* or when attempting a merge, because blk_schedule_flush_list() will only flush
|
* or when attempting a merge. For details, please see schedule() where
|
||||||
* the plug list when the task sleeps by itself. For details, please see
|
* blk_flush_plug() is called.
|
||||||
* schedule() where blk_schedule_flush_plug() is called.
|
|
||||||
*/
|
*/
|
||||||
struct blk_plug {
|
struct blk_plug {
|
||||||
struct request *mq_list; /* blk-mq requests */
|
struct request *mq_list; /* blk-mq requests */
|
||||||
|
@ -757,23 +756,8 @@ extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
|
||||||
extern void blk_start_plug(struct blk_plug *);
|
extern void blk_start_plug(struct blk_plug *);
|
||||||
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
|
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
|
||||||
extern void blk_finish_plug(struct blk_plug *);
|
extern void blk_finish_plug(struct blk_plug *);
|
||||||
extern void blk_flush_plug_list(struct blk_plug *, bool);
|
|
||||||
|
|
||||||
static inline void blk_flush_plug(struct task_struct *tsk)
|
void blk_flush_plug(struct blk_plug *plug, bool from_schedule);
|
||||||
{
|
|
||||||
struct blk_plug *plug = tsk->plug;
|
|
||||||
|
|
||||||
if (plug)
|
|
||||||
blk_flush_plug_list(plug, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void blk_schedule_flush_plug(struct task_struct *tsk)
|
|
||||||
{
|
|
||||||
struct blk_plug *plug = tsk->plug;
|
|
||||||
|
|
||||||
if (plug)
|
|
||||||
blk_flush_plug_list(plug, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
|
@ -802,15 +786,10 @@ static inline void blk_finish_plug(struct blk_plug *plug)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_flush_plug(struct task_struct *task)
|
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_schedule_flush_plug(struct task_struct *task)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -6343,7 +6343,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||||||
* make sure to submit it to avoid deadlocks.
|
* make sure to submit it to avoid deadlocks.
|
||||||
*/
|
*/
|
||||||
if (blk_needs_flush_plug(tsk))
|
if (blk_needs_flush_plug(tsk))
|
||||||
blk_schedule_flush_plug(tsk);
|
blk_flush_plug(tsk->plug, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sched_update_worker(struct task_struct *tsk)
|
static void sched_update_worker(struct task_struct *tsk)
|
||||||
|
@ -8354,7 +8354,8 @@ int io_schedule_prepare(void)
|
||||||
int old_iowait = current->in_iowait;
|
int old_iowait = current->in_iowait;
|
||||||
|
|
||||||
current->in_iowait = 1;
|
current->in_iowait = 1;
|
||||||
blk_schedule_flush_plug(current);
|
if (current->plug)
|
||||||
|
blk_flush_plug(current->plug, true);
|
||||||
|
|
||||||
return old_iowait;
|
return old_iowait;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue