diff --git a/block/blk-mq.c b/block/blk-mq.c index 3da2215b2912..f2d198b7f54a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3422,6 +3422,25 @@ EXPORT_SYMBOL_GPL(blk_rq_prep_clone); */ void blk_steal_bios(struct bio_list *list, struct request *rq) { + struct bio *bio; + + for (bio = rq->bio; bio; bio = bio->bi_next) { + if (bio->bi_opf & REQ_POLLED) { + bio->bi_opf &= ~REQ_POLLED; + bio->bi_cookie = BLK_QC_T_NONE; + } + /* + * The alternate request queue that we may end up submitting + * the bio to may be frozen temporarily, in this case REQ_NOWAIT + * will fail the I/O immediately with EAGAIN to the issuer. + * We are not in the issuer context which cannot block. Clear + * the flag to avoid spurious EAGAIN I/O failures. + */ + bio->bi_opf &= ~REQ_NOWAIT; + bio_clear_flag(bio, BIO_QOS_THROTTLED); + bio_clear_flag(bio, BIO_QOS_MERGED); + } + if (rq->bio) { if (list->tail) list->tail->bi_next = rq->bio; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index fc6800a9f7f9..ba00f0b72b85 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -154,21 +154,8 @@ void nvme_failover_req(struct request *req) } spin_lock_irqsave(&ns->head->requeue_lock, flags); - for (bio = req->bio; bio; bio = bio->bi_next) { + for (bio = req->bio; bio; bio = bio->bi_next) bio_set_dev(bio, ns->head->disk->part0); - if (bio->bi_opf & REQ_POLLED) { - bio->bi_opf &= ~REQ_POLLED; - bio->bi_cookie = BLK_QC_T_NONE; - } - /* - * The alternate request queue that we may end up submitting - * the bio to may be frozen temporarily, in this case REQ_NOWAIT - * will fail the I/O immediately with EAGAIN to the issuer. - * We are not in the issuer context which cannot block. Clear - * the flag to avoid spurious EAGAIN I/O failures. - */ - bio->bi_opf &= ~REQ_NOWAIT; - } blk_steal_bios(&ns->head->requeue_list, req); spin_unlock_irqrestore(&ns->head->requeue_lock, flags);