blob: 42e8420747153b840317ec01c6020384449d3c3f [file] [log] [blame]
Bart Van Asschebca6b062018-09-26 14:01:03 -07001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/blk-pm.h>
4#include <linux/blkdev.h>
5#include <linux/pm_runtime.h>
Bart Van Assche7cedffe2018-09-26 14:01:09 -07006#include "blk-mq.h"
Bart Van Asschebca6b062018-09-26 14:01:03 -07007
8/**
9 * blk_pm_runtime_init - Block layer runtime PM initialization routine
10 * @q: the queue of the device
11 * @dev: the device the queue belongs to
12 *
13 * Description:
14 * Initialize runtime-PM-related fields for @q and start auto suspend for
15 * @dev. Drivers that want to take advantage of request-based runtime PM
16 * should call this function after @dev has been initialized, and its
17 * request queue @q has been allocated, and runtime PM for it can not happen
18 * yet(either due to disabled/forbidden or its usage_count > 0). In most
19 * cases, driver should call this function before any I/O has taken place.
20 *
21 * This function takes care of setting up using auto suspend for the device,
22 * the autosuspend delay is set to -1 to make runtime suspend impossible
23 * until an updated value is either set by user or by driver. Drivers do
24 * not need to touch other autosuspend settings.
25 *
26 * The block layer runtime PM is request based, so only works for drivers
27 * that use request as their IO unit instead of those directly use bio's.
28 */
29void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
30{
Bart Van Asschebca6b062018-09-26 14:01:03 -070031 q->dev = dev;
32 q->rpm_status = RPM_ACTIVE;
33 pm_runtime_set_autosuspend_delay(q->dev, -1);
34 pm_runtime_use_autosuspend(q->dev);
35}
36EXPORT_SYMBOL(blk_pm_runtime_init);
37
38/**
39 * blk_pre_runtime_suspend - Pre runtime suspend check
40 * @q: the queue of the device
41 *
42 * Description:
43 * This function will check if runtime suspend is allowed for the device
44 * by examining if there are any requests pending in the queue. If there
45 * are requests pending, the device can not be runtime suspended; otherwise,
46 * the queue's status will be updated to SUSPENDING and the driver can
47 * proceed to suspend the device.
48 *
49 * For the not allowed case, we mark last busy for the device so that
50 * runtime PM core will try to autosuspend it some time later.
51 *
52 * This function should be called near the start of the device's
53 * runtime_suspend callback.
54 *
55 * Return:
56 * 0 - OK to runtime suspend the device
57 * -EBUSY - Device should not be runtime suspended
58 */
59int blk_pre_runtime_suspend(struct request_queue *q)
60{
61 int ret = 0;
62
63 if (!q->dev)
64 return ret;
65
Bart Van Assche7cedffe2018-09-26 14:01:09 -070066 WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
67
Bart Van Asschefa4d0f12020-12-08 21:29:44 -080068 spin_lock_irq(&q->queue_lock);
69 q->rpm_status = RPM_SUSPENDING;
70 spin_unlock_irq(&q->queue_lock);
71
Bart Van Assche7cedffe2018-09-26 14:01:09 -070072 /*
73 * Increase the pm_only counter before checking whether any
74 * non-PM blk_queue_enter() calls are in progress to avoid that any
75 * new non-PM blk_queue_enter() calls succeed before the pm_only
76 * counter is decreased again.
77 */
78 blk_set_pm_only(q);
79 ret = -EBUSY;
80 /* Switch q_usage_counter from per-cpu to atomic mode. */
81 blk_freeze_queue_start(q);
82 /*
83 * Wait until atomic mode has been reached. Since that
84 * involves calling call_rcu(), it is guaranteed that later
85 * blk_queue_enter() calls see the pm-only state. See also
86 * http://lwn.net/Articles/573497/.
87 */
88 percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
89 if (percpu_ref_is_zero(&q->q_usage_counter))
90 ret = 0;
91 /* Switch q_usage_counter back to per-cpu mode. */
92 blk_mq_unfreeze_queue(q);
93
Bart Van Asschefa4d0f12020-12-08 21:29:44 -080094 if (ret < 0) {
95 spin_lock_irq(&q->queue_lock);
96 q->rpm_status = RPM_ACTIVE;
Bart Van Asschebca6b062018-09-26 14:01:03 -070097 pm_runtime_mark_last_busy(q->dev);
Bart Van Asschefa4d0f12020-12-08 21:29:44 -080098 spin_unlock_irq(&q->queue_lock);
Bart Van Assche7cedffe2018-09-26 14:01:09 -070099
Bart Van Assche7cedffe2018-09-26 14:01:09 -0700100 blk_clear_pm_only(q);
Bart Van Asschefa4d0f12020-12-08 21:29:44 -0800101 }
Bart Van Assche7cedffe2018-09-26 14:01:09 -0700102
Bart Van Asschebca6b062018-09-26 14:01:03 -0700103 return ret;
104}
105EXPORT_SYMBOL(blk_pre_runtime_suspend);
106
107/**
108 * blk_post_runtime_suspend - Post runtime suspend processing
109 * @q: the queue of the device
110 * @err: return value of the device's runtime_suspend function
111 *
112 * Description:
113 * Update the queue's runtime status according to the return value of the
114 * device's runtime suspend function and mark last busy for the device so
115 * that PM core will try to auto suspend the device at a later time.
116 *
117 * This function should be called near the end of the device's
118 * runtime_suspend callback.
119 */
120void blk_post_runtime_suspend(struct request_queue *q, int err)
121{
122 if (!q->dev)
123 return;
124
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700125 spin_lock_irq(&q->queue_lock);
Bart Van Asschebca6b062018-09-26 14:01:03 -0700126 if (!err) {
127 q->rpm_status = RPM_SUSPENDED;
128 } else {
129 q->rpm_status = RPM_ACTIVE;
130 pm_runtime_mark_last_busy(q->dev);
131 }
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700132 spin_unlock_irq(&q->queue_lock);
Bart Van Assche7cedffe2018-09-26 14:01:09 -0700133
134 if (err)
135 blk_clear_pm_only(q);
Bart Van Asschebca6b062018-09-26 14:01:03 -0700136}
137EXPORT_SYMBOL(blk_post_runtime_suspend);
138
139/**
140 * blk_pre_runtime_resume - Pre runtime resume processing
141 * @q: the queue of the device
142 *
143 * Description:
144 * Update the queue's runtime status to RESUMING in preparation for the
145 * runtime resume of the device.
146 *
147 * This function should be called near the start of the device's
148 * runtime_resume callback.
149 */
150void blk_pre_runtime_resume(struct request_queue *q)
151{
152 if (!q->dev)
153 return;
154
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700155 spin_lock_irq(&q->queue_lock);
Bart Van Asschebca6b062018-09-26 14:01:03 -0700156 q->rpm_status = RPM_RESUMING;
Christoph Hellwig0d945c12018-11-15 12:17:28 -0700157 spin_unlock_irq(&q->queue_lock);
Bart Van Asschebca6b062018-09-26 14:01:03 -0700158}
159EXPORT_SYMBOL(blk_pre_runtime_resume);
160
161/**
162 * blk_post_runtime_resume - Post runtime resume processing
163 * @q: the queue of the device
Bart Van Asschebca6b062018-09-26 14:01:03 -0700164 *
165 * Description:
Damien Le Moalc96b8172023-11-20 16:06:11 +0900166 * Restart the queue of a runtime suspended device. It does this regardless
167 * of whether the device's runtime-resume succeeded; even if it failed the
Alan Stern6e1fcab2021-12-20 19:21:26 +0800168 * driver or error handler will need to communicate with the device.
Bart Van Asschebca6b062018-09-26 14:01:03 -0700169 *
170 * This function should be called near the end of the device's
Damien Le Moalc96b8172023-11-20 16:06:11 +0900171 * runtime_resume callback to correct queue runtime PM status and re-enable
172 * peeking requests from the queue.
Bart Van Asschebca6b062018-09-26 14:01:03 -0700173 */
Alan Stern6e1fcab2021-12-20 19:21:26 +0800174void blk_post_runtime_resume(struct request_queue *q)
Bart Van Asschebca6b062018-09-26 14:01:03 -0700175{
Alan Stern8f38f8e2020-07-06 11:14:36 -0400176 int old_status;
177
178 if (!q->dev)
179 return;
180
181 spin_lock_irq(&q->queue_lock);
182 old_status = q->rpm_status;
183 q->rpm_status = RPM_ACTIVE;
184 pm_runtime_mark_last_busy(q->dev);
185 pm_request_autosuspend(q->dev);
186 spin_unlock_irq(&q->queue_lock);
187
188 if (old_status != RPM_ACTIVE)
189 blk_clear_pm_only(q);
Bart Van Asschebca6b062018-09-26 14:01:03 -0700190}
Damien Le Moalc96b8172023-11-20 16:06:11 +0900191EXPORT_SYMBOL(blk_post_runtime_resume);