struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct sbitmap_queue *bt;
struct sbq_wait_state *ws;
- DEFINE_WAIT(wait);
+ DEFINE_SBQ_WAIT(wait);
unsigned int tag_offset;
bool drop_ctx;
int tag;
if (tag != -1)
break;
- prepare_to_wait_exclusive(&ws->wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
bt_prev = bt;
io_schedule();
+ sbitmap_finish_wait(bt, ws, &wait);
+
data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
data->ctx->cpu);
else
bt = &tags->bitmap_tags;
- finish_wait(&ws->wait, &wait);
-
/*
* If destination hw queue is changed, fake wake up on
* previous queue for compensating the wake up miss, so
if (drop_ctx && data->ctx)
blk_mq_put_ctx(data->ctx);
- finish_wait(&ws->wait, &wait);
+ sbitmap_finish_wait(bt, ws, &wait);
found_tag:
return tag + tag_offset;
static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
{
int tag = -1;
- DEFINE_WAIT(wait);
+ DEFINE_SBQ_WAIT(wait);
struct sbq_wait_state *ws;
+ struct sbitmap_queue *sbq;
if (state == TASK_RUNNING)
return tag;
- ws = &se_sess->sess_tag_pool.ws[0];
+ sbq = &se_sess->sess_tag_pool;
+ ws = &sbq->ws[0];
for (;;) {
- prepare_to_wait_exclusive(&ws->wait, &wait, state);
+ sbitmap_prepare_to_wait(sbq, ws, &wait, state);
if (signal_pending_state(state, current))
break;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup);
+ tag = sbitmap_queue_get(sbq, cpup);
if (tag >= 0)
break;
schedule();
}
- finish_wait(&ws->wait, &wait);
+ sbitmap_finish_wait(sbq, ws, &wait);
return tag;
}
*/
struct sbq_wait_state *ws;
+ /*
+ * @ws_active: count of currently active ws waitqueues
+ */
+ atomic_t ws_active;
+
/**
* @round_robin: Allocate bits in strict round-robin order.
*/
*/
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
+struct sbq_wait {
+ int accounted;
+ struct wait_queue_entry wait;
+};
+
+#define DEFINE_SBQ_WAIT(name) \
+ struct sbq_wait name = { \
+ .accounted = 0, \
+ .wait = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .entry = LIST_HEAD_INIT((name).wait.entry), \
+ } \
+ }
+
+/*
+ * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
+ * internal state.
+ */
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state);
+
+/*
+ * Must be paired with sbitmap_prepare_to_wait().
+ */
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
#endif /* __LINUX_SCALE_BITMAP_H */
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
+ atomic_set(&sbq->ws_active, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
{
int i, wake_index;
+ if (!atomic_read(&sbq->ws_active))
+ return NULL;
+
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
+ seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
+
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state)
+{
+ if (!sbq_wait->accounted) {
+ atomic_inc(&sbq->ws_active);
+ sbq_wait->accounted = 1;
+ }
+ prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
+}
+EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
+
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ finish_wait(&ws->wait, &sbq_wait->wait);
+ if (sbq_wait->accounted) {
+ atomic_dec(&sbq->ws_active);
+ sbq_wait->accounted = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_finish_wait);