|
|
|
|
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(all_q_mutex);
|
|
|
|
|
static LIST_HEAD(all_q_list);
|
|
|
|
|
|
|
|
|
|
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
|
|
|
|
|
static void blk_mq_run_queues(struct request_queue *q);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Check if any of the ctx's have pending work in this hardware queue
|
|
|
|
|
@@ -78,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
|
|
|
|
|
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int blk_mq_queue_enter(struct request_queue *q)
|
|
|
|
|
static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
|
|
|
|
|
{
|
|
|
|
|
while (true) {
|
|
|
|
|
int ret;
|
|
|
|
|
@@ -86,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
|
|
|
|
|
if (percpu_ref_tryget_live(&q->mq_usage_counter))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (!(gfp & __GFP_WAIT))
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
|
|
ret = wait_event_interruptible(q->mq_freeze_wq,
|
|
|
|
|
!q->mq_freeze_depth || blk_queue_dying(q));
|
|
|
|
|
if (blk_queue_dying(q))
|
|
|
|
|
@@ -118,7 +120,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
|
|
|
|
|
|
|
|
|
|
if (freeze) {
|
|
|
|
|
percpu_ref_kill(&q->mq_usage_counter);
|
|
|
|
|
blk_mq_run_queues(q);
|
|
|
|
|
blk_mq_run_hw_queues(q, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
|
|
|
|
|
@@ -257,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
|
|
|
|
struct blk_mq_alloc_data alloc_data;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = blk_mq_queue_enter(q);
|
|
|
|
|
ret = blk_mq_queue_enter(q, gfp);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
|
|
|
|
|
@@ -904,7 +906,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
|
|
|
|
&hctx->run_work, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void blk_mq_run_queues(struct request_queue *q)
|
|
|
|
|
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
|
|
|
|
|
{
|
|
|
|
|
struct blk_mq_hw_ctx *hctx;
|
|
|
|
|
int i;
|
|
|
|
|
@@ -915,9 +917,10 @@ static void blk_mq_run_queues(struct request_queue *q)
|
|
|
|
|
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
blk_mq_run_hw_queue(hctx, false);
|
|
|
|
|
blk_mq_run_hw_queue(hctx, async);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(blk_mq_run_hw_queues);
|
|
|
|
|
|
|
|
|
|
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|
|
|
|
{
|
|
|
|
|
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
|
|
|
|
int rw = bio_data_dir(bio);
|
|
|
|
|
struct blk_mq_alloc_data alloc_data;
|
|
|
|
|
|
|
|
|
|
if (unlikely(blk_mq_queue_enter(q))) {
|
|
|
|
|
if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
|
|
|
|
|
bio_endio(bio, -EIO);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
@@ -1890,10 +1893,26 @@ void blk_mq_release(struct request_queue *q)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
|
|
{
|
|
|
|
|
struct request_queue *uninit_q, *q;
|
|
|
|
|
|
|
|
|
|
uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
|
|
|
|
|
if (!uninit_q)
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
|
|
q = blk_mq_init_allocated_queue(set, uninit_q);
|
|
|
|
|
if (IS_ERR(q))
|
|
|
|
|
blk_cleanup_queue(uninit_q);
|
|
|
|
|
|
|
|
|
|
return q;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(blk_mq_init_queue);
|
|
|
|
|
|
|
|
|
|
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
|
|
|
|
struct request_queue *q)
|
|
|
|
|
{
|
|
|
|
|
struct blk_mq_hw_ctx **hctxs;
|
|
|
|
|
struct blk_mq_ctx __percpu *ctx;
|
|
|
|
|
struct request_queue *q;
|
|
|
|
|
unsigned int *map;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
@@ -1928,20 +1947,16 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
|
|
hctxs[i]->queue_num = i;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
|
|
|
|
|
if (!q)
|
|
|
|
|
goto err_hctxs;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Init percpu_ref in atomic mode so that it's faster to shutdown.
|
|
|
|
|
* See blk_register_queue() for details.
|
|
|
|
|
*/
|
|
|
|
|
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
|
|
|
|
|
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
|
|
|
|
|
goto err_mq_usage;
|
|
|
|
|
goto err_hctxs;
|
|
|
|
|
|
|
|
|
|
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
|
|
|
|
|
blk_queue_rq_timeout(q, 30000);
|
|
|
|
|
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
|
|
|
|
|
|
|
|
|
|
q->nr_queues = nr_cpu_ids;
|
|
|
|
|
q->nr_hw_queues = set->nr_hw_queues;
|
|
|
|
|
@@ -1967,9 +1982,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
|
|
else
|
|
|
|
|
blk_queue_make_request(q, blk_sq_make_request);
|
|
|
|
|
|
|
|
|
|
if (set->timeout)
|
|
|
|
|
blk_queue_rq_timeout(q, set->timeout);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Do this after blk_queue_make_request() overrides it...
|
|
|
|
|
*/
|
|
|
|
|
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
|
|
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
|
|
|
|
|
|
|
|
|
if (blk_mq_init_hw_queues(q, set))
|
|
|
|
|
goto err_mq_usage;
|
|
|
|
|
goto err_hctxs;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&all_q_mutex);
|
|
|
|
|
list_add_tail(&q->all_q_node, &all_q_list);
|
|
|
|
|
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|
|
|
|
|
|
|
|
|
return q;
|
|
|
|
|
|
|
|
|
|
err_mq_usage:
|
|
|
|
|
blk_cleanup_queue(q);
|
|
|
|
|
err_hctxs:
|
|
|
|
|
kfree(map);
|
|
|
|
|
for (i = 0; i < set->nr_hw_queues; i++) {
|
|
|
|
|
@@ -2009,7 +2019,7 @@ err_percpu:
|
|
|
|
|
free_percpu(ctx);
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(blk_mq_init_queue);
|
|
|
|
|
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
|
|
|
|
|
|
|
|
|
|
void blk_mq_free_queue(struct request_queue *q)
|
|
|
|
|
{
|
|
|
|
|
@@ -2161,7 +2171,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
|
|
|
|
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
|
|
|
|
|
if (!set->ops->queue_rq || !set->ops->map_queue)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
|
|
|
|
|
|