sbitmap: optimize wakeup check

Even if we have no waiters on any of the sbitmap_queue wait states, we
still have to loop every entry to check. We do this for every IO, so
the cost adds up.

Shift a bit of the cost to the slow path, when we actually have waiters.
Wrap prepare_to_wait_exclusive() and finish_wait(), so we can maintain
an internal count of how many are currently active. Then we can simply
check this count in sbq_wake_ptr() and not have to loop if we don't
have any sleepers.

Convert the two users of sbitmap with waiting, blk-mq-tag and iSCSI.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2018-11-29 17:36:41 -07:00
parent ea86ea2cdc
commit 5d2ee7122c
4 changed files with 74 additions and 11 deletions

View file

@ -135,6 +135,11 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
/*
* @ws_active: count of currently active ws waitqueues
*/
atomic_t ws_active;
/**
* @round_robin: Allocate bits in strict round-robin order.
*/
@ -552,4 +557,33 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
*/
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
struct sbq_wait {
int accounted;
struct wait_queue_entry wait;
};
#define DEFINE_SBQ_WAIT(name) \
struct sbq_wait name = { \
.accounted = 0, \
.wait = { \
.private = current, \
.func = autoremove_wake_function, \
.entry = LIST_HEAD_INIT((name).wait.entry), \
} \
}
/*
* Wrapper around prepare_to_wait_exclusive(), which maintains some extra
* internal state.
*/
void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait, int state);
/*
* Must be paired with sbitmap_prepare_to_wait().
*/
void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
struct sbq_wait *sbq_wait);
#endif /* __LINUX_SCALE_BITMAP_H */