blk-mq: remove the request_queue argument to blk_insert_cloned_request
The request must be submitted to the queue it was allocated for, so remove the extra request_queue argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Mike Snitzer <snitzer@redhat.com> Link: https://lore.kernel.org/r/20220215100540.3892965-4-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
a5efda3c46
commit
28db4711bf
@@ -2843,11 +2843,11 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||||||
#ifdef CONFIG_BLK_MQ_STACKING
|
#ifdef CONFIG_BLK_MQ_STACKING
|
||||||
/**
|
/**
|
||||||
* blk_insert_cloned_request - Helper for stacking drivers to submit a request
|
* blk_insert_cloned_request - Helper for stacking drivers to submit a request
|
||||||
* @q: the queue to submit the request
|
|
||||||
* @rq: the request being queued
|
* @rq: the request being queued
|
||||||
*/
|
*/
|
||||||
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
blk_status_t blk_insert_cloned_request(struct request *rq)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = rq->q;
|
||||||
unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
|
unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
@@ -2881,8 +2881,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
|||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rq->q->disk &&
|
if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
|
||||||
should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq)))
|
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
if (blk_crypto_insert_cloned_request(rq))
|
if (blk_crypto_insert_cloned_request(rq))
|
||||||
@@ -2895,7 +2894,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
|||||||
* bypass a potential scheduler on the bottom device for
|
* bypass a potential scheduler on the bottom device for
|
||||||
* insert.
|
* insert.
|
||||||
*/
|
*/
|
||||||
blk_mq_run_dispatch_ops(rq->q,
|
blk_mq_run_dispatch_ops(q,
|
||||||
ret = blk_mq_request_issue_directly(rq, true));
|
ret = blk_mq_request_issue_directly(rq, true));
|
||||||
if (ret)
|
if (ret)
|
||||||
blk_account_io_done(rq, ktime_get_ns());
|
blk_account_io_done(rq, ktime_get_ns());
|
||||||
|
|||||||
@@ -311,7 +311,7 @@ static blk_status_t dm_dispatch_clone_request(struct request *clone, struct requ
|
|||||||
clone->rq_flags |= RQF_IO_STAT;
|
clone->rq_flags |= RQF_IO_STAT;
|
||||||
|
|
||||||
clone->start_time_ns = ktime_get_ns();
|
clone->start_time_ns = ktime_get_ns();
|
||||||
r = blk_insert_cloned_request(clone->q, clone);
|
r = blk_insert_cloned_request(clone);
|
||||||
if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
|
if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
|
||||||
/* must complete clone in terms of original request */
|
/* must complete clone in terms of original request */
|
||||||
dm_complete_request(rq, r);
|
dm_complete_request(rq, r);
|
||||||
|
|||||||
@@ -952,8 +952,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
|||||||
struct bio_set *bs, gfp_t gfp_mask,
|
struct bio_set *bs, gfp_t gfp_mask,
|
||||||
int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
|
int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
|
||||||
void blk_rq_unprep_clone(struct request *rq);
|
void blk_rq_unprep_clone(struct request *rq);
|
||||||
blk_status_t blk_insert_cloned_request(struct request_queue *q,
|
blk_status_t blk_insert_cloned_request(struct request *rq);
|
||||||
struct request *rq);
|
|
||||||
|
|
||||||
struct rq_map_data {
|
struct rq_map_data {
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
|||||||
Reference in New Issue
Block a user