blk_mq_realloc_hw_ctxs could be invoked during update hw queues.
At the momemt, IO is blocked. Change the gfp flags from GFP_KERNEL
to GFP_NOIO to avoid forever hang during memory allocation in
blk_mq_realloc_hw_ctxs.
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
{
WARN_ON_ONCE(q->mq_ops);
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+ q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
if (!q->fq)
return -ENOMEM;
EXPORT_SYMBOL(blkdev_issue_flush);
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size)
+ int node, int cmd_size, gfp_t flags)
{
struct blk_flush_queue *fq;
int rq_sz = sizeof(struct request);
- fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
+ fq = kzalloc_node(sizeof(*fq), flags, node);
if (!fq)
goto fail;
spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
- fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
+ fq->flush_rq = kzalloc_node(rq_sz, flags, node);
if (!fq->flush_rq)
goto fail_rq;
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
- GFP_KERNEL, node);
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
if (!hctx->ctxs)
goto unregister_cpu_notifier;
- if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
- node))
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
goto free_ctxs;
hctx->nr_ctx = 0;
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
- hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+ hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
if (!hctx->fq)
goto exit_hctx;
node = blk_mq_hw_queue_to_node(q->mq_map, i);
hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
- GFP_KERNEL, node);
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node);
if (!hctxs[i])
break;
- if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
- node)) {
+ if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node)) {
kfree(hctxs[i]);
hctxs[i] = NULL;
break;
}
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size);
+ int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,