blob: d6cd501c0d348000e46d63012a03c37d7be97ec1 [file] [log] [blame]
Christoph Hellwig3dcf60bc2019-04-30 14:42:43 -04001// SPDX-License-Identifier: GPL-2.0
Jens Axboe86db1e22008-01-29 14:53:40 +01002/*
3 * Functions related to setting various queue properties from drivers
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
Jens Axboe320ae512013-10-24 09:20:05 +01009#include <linux/blk-mq.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -060010#include <linux/sched/sysctl.h>
Jens Axboe86db1e22008-01-29 14:53:40 +010011
12#include "blk.h"
Jens Axboebd166ef2017-01-17 06:03:22 -070013#include "blk-mq-sched.h"
Jens Axboe86db1e22008-01-29 14:53:40 +010014
Jens Axboe86db1e22008-01-29 14:53:40 +010015/**
16 * blk_end_sync_rq - executes a completion event on a request
17 * @rq: request to complete
Randy Dunlap710027a2008-08-19 20:13:11 +020018 * @error: end I/O status of the request
Jens Axboe86db1e22008-01-29 14:53:40 +010019 */
Christoph Hellwig2a842ac2017-06-03 09:38:04 +020020static void blk_end_sync_rq(struct request *rq, blk_status_t error)
Jens Axboe86db1e22008-01-29 14:53:40 +010021{
22 struct completion *waiting = rq->end_io_data;
23
Keith Buschfb9b16e2021-06-10 14:44:36 -070024 rq->end_io_data = (void *)(uintptr_t)error;
Jens Axboe86db1e22008-01-29 14:53:40 +010025
26 /*
27 * complete last, if this is a stack request the process (and thus
28 * the rq pointer) could be invalid right after this complete()
29 */
30 complete(waiting);
31}
Jens Axboe86db1e22008-01-29 14:53:40 +010032
33/**
Guoqing Jiang8eeed0b2021-01-25 05:49:57 +010034 * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
Jens Axboe86db1e22008-01-29 14:53:40 +010035 * @bd_disk: matching gendisk
36 * @rq: request to insert
37 * @at_head: insert request at head or tail of queue
38 * @done: I/O completion handler
39 *
40 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020041 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010042 * for execution. Don't wait for completion.
Muthukumar Rattye81ca6f2012-06-29 15:31:49 +000043 *
44 * Note:
45 * This function will invoke @done directly if the queue is dead.
Jens Axboe86db1e22008-01-29 14:53:40 +010046 */
Guoqing Jiang8eeed0b2021-01-25 05:49:57 +010047void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
48 int at_head, rq_end_io_fn *done)
Jens Axboe86db1e22008-01-29 14:53:40 +010049{
Tejun Heo8ba61432011-12-14 00:33:37 +010050 WARN_ON(irqs_disabled());
Christoph Hellwig57292b52017-01-31 16:57:29 +010051 WARN_ON(!blk_rq_is_passthrough(rq));
James Bottomleybfe159a2011-07-07 15:45:40 -050052
Jens Axboe86db1e22008-01-29 14:53:40 +010053 rq->rq_disk = bd_disk;
Jens Axboe86db1e22008-01-29 14:53:40 +010054 rq->end_io = done;
Jens Axboe320ae512013-10-24 09:20:05 +010055
Konstantin Khlebnikovb5af37a2020-05-27 07:24:16 +020056 blk_account_io_start(rq);
Logan Gunthorpe48d9b0d2019-10-10 17:36:26 -060057
Ming Lei43a5e4e2013-12-26 21:31:35 +080058 /*
59 * don't check dying flag for MQ because the request won't
Bart Van Assche68bdf1a2016-07-19 08:18:06 -070060 * be reused after dying flag is set
Ming Lei43a5e4e2013-12-26 21:31:35 +080061 */
Jens Axboea1ce35f2018-10-29 10:23:51 -060062 blk_mq_sched_insert_request(rq, at_head, true, false);
Jens Axboe86db1e22008-01-29 14:53:40 +010063}
64EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
65
Keith Buschc01b5a82021-06-10 14:44:34 -070066static bool blk_rq_is_poll(struct request *rq)
67{
68 return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL;
69}
70
71static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
72{
73 do {
74 blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
75 cond_resched();
76 } while (!completion_done(wait));
77}
78
Jens Axboe86db1e22008-01-29 14:53:40 +010079/**
80 * blk_execute_rq - insert a request into queue for execution
Jens Axboe86db1e22008-01-29 14:53:40 +010081 * @bd_disk: matching gendisk
82 * @rq: request to insert
83 * @at_head: insert request at head or tail of queue
84 *
85 * Description:
Randy Dunlap710027a2008-08-19 20:13:11 +020086 * Insert a fully prepared request at the back of the I/O scheduler queue
Jens Axboe86db1e22008-01-29 14:53:40 +010087 * for execution and wait for completion.
Keith Buschfb9b16e2021-06-10 14:44:36 -070088 * Return: The blk_status_t result provided to blk_mq_end_request().
Jens Axboe86db1e22008-01-29 14:53:40 +010089 */
Keith Buschfb9b16e2021-06-10 14:44:36 -070090blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
Jens Axboe86db1e22008-01-29 14:53:40 +010091{
92 DECLARE_COMPLETION_ONSTACK(wait);
Mark Lord4b197762010-09-24 09:51:13 -040093 unsigned long hang_check;
Jens Axboe86db1e22008-01-29 14:53:40 +010094
Jens Axboe86db1e22008-01-29 14:53:40 +010095 rq->end_io_data = &wait;
Guoqing Jiang8eeed0b2021-01-25 05:49:57 +010096 blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
Mark Lord4b197762010-09-24 09:51:13 -040097
98 /* Prevent hang_check timer from firing at us during very long I/O */
99 hang_check = sysctl_hung_task_timeout_secs;
Keith Buschc01b5a82021-06-10 14:44:34 -0700100
101 if (blk_rq_is_poll(rq))
102 blk_rq_poll_completion(rq, &wait);
103 else if (hang_check)
Vladimir Davydov55770222013-02-14 18:19:59 +0400104 while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
Mark Lord4b197762010-09-24 09:51:13 -0400105 else
Vladimir Davydov55770222013-02-14 18:19:59 +0400106 wait_for_completion_io(&wait);
Keith Buschfb9b16e2021-06-10 14:44:36 -0700107
108 return (blk_status_t)(uintptr_t)rq->end_io_data;
Jens Axboe86db1e22008-01-29 14:53:40 +0100109}
Jens Axboe86db1e22008-01-29 14:53:40 +0100110EXPORT_SYMBOL(blk_execute_rq);