kmem_cache_free(icq->__rcu_icq_cache, icq);
}
-/* Exit an icq. Called with both ioc and q locked. */
+/*
+ * Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
+ * mq.
+ */
static void ioc_exit_icq(struct io_cq *icq)
{
struct elevator_type *et = icq->q->elevator->type;
*/
void put_io_context_active(struct io_context *ioc)
{
+ struct elevator_type *et;
unsigned long flags;
struct io_cq *icq;
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
- if (spin_trylock(icq->q->queue_lock)) {
+
+ et = icq->q->elevator->type;
+ if (et->uses_mq) {
ioc_exit_icq(icq);
- spin_unlock(icq->q->queue_lock);
} else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
- goto retry;
+ if (spin_trylock(icq->q->queue_lock)) {
+ ioc_exit_icq(icq);
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ cpu_relax();
+ goto retry;
+ }
}
}
spin_unlock_irqrestore(&ioc->lock, flags);