blob: da1777e39eaa5298fbc41f3b7f9a313f3137f50f [file] [log] [blame]
Greg Kroah-Hartman5de363b2019-04-02 15:32:01 +02001// SPDX-License-Identifier: GPL-2.0
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002/*
3 * drivers/base/power/domain.c - Common code related to device power domains.
4 *
5 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02006 */
Joe Perches7a5bd122019-03-04 09:14:38 -08007#define pr_fmt(fmt) "PM: " fmt
8
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02009#include <linux/delay.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020010#include <linux/kernel.h>
11#include <linux/io.h>
Tomasz Figaaa422402014-09-19 20:27:36 +020012#include <linux/platform_device.h>
Viresh Kumar6a0ae732018-04-05 15:53:34 +053013#include <linux/pm_opp.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020014#include <linux/pm_runtime.h>
15#include <linux/pm_domain.h>
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +020016#include <linux/pm_qos.h>
Ulf Hanssonc11f6f52014-12-01 12:50:21 +010017#include <linux/pm_clock.h>
Rafael J. Wysockif7218892011-07-01 22:12:45 +020018#include <linux/slab.h>
19#include <linux/err.h>
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +020020#include <linux/sched.h>
21#include <linux/suspend.h>
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010022#include <linux/export.h>
Ulf Hanssoneb594b72019-03-27 15:35:46 +010023#include <linux/cpu.h>
Thierry Strudel718072c2020-12-08 11:19:55 -080024#include <linux/debugfs.h>
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010025
Tomeu Vizosoaa8e54b52016-01-07 16:46:14 +010026#include "power.h"
27
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +020028#define GENPD_RETRY_MAX_MS 250 /* Approximate */
29
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010030#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
31({ \
32 type (*__routine)(struct device *__d); \
33 type __ret = (type)0; \
34 \
35 __routine = genpd->dev_ops.callback; \
36 if (__routine) { \
37 __ret = __routine(dev); \
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +010038 } \
39 __ret; \
40})
Rafael J. Wysockif7218892011-07-01 22:12:45 +020041
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +020042static LIST_HEAD(gpd_list);
43static DEFINE_MUTEX(gpd_list_lock);
44
Lina Iyer35241d12016-10-14 10:47:54 -070045struct genpd_lock_ops {
46 void (*lock)(struct generic_pm_domain *genpd);
47 void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
48 int (*lock_interruptible)(struct generic_pm_domain *genpd);
49 void (*unlock)(struct generic_pm_domain *genpd);
50};
51
52static void genpd_lock_mtx(struct generic_pm_domain *genpd)
53{
54 mutex_lock(&genpd->mlock);
55}
56
57static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
58 int depth)
59{
60 mutex_lock_nested(&genpd->mlock, depth);
61}
62
63static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
64{
65 return mutex_lock_interruptible(&genpd->mlock);
66}
67
68static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
69{
70 return mutex_unlock(&genpd->mlock);
71}
72
73static const struct genpd_lock_ops genpd_mtx_ops = {
74 .lock = genpd_lock_mtx,
75 .lock_nested = genpd_lock_nested_mtx,
76 .lock_interruptible = genpd_lock_interruptible_mtx,
77 .unlock = genpd_unlock_mtx,
78};
79
Lina Iyerd716f472016-10-14 10:47:55 -070080static void genpd_lock_spin(struct generic_pm_domain *genpd)
81 __acquires(&genpd->slock)
82{
83 unsigned long flags;
84
85 spin_lock_irqsave(&genpd->slock, flags);
86 genpd->lock_flags = flags;
87}
88
89static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
90 int depth)
91 __acquires(&genpd->slock)
92{
93 unsigned long flags;
94
95 spin_lock_irqsave_nested(&genpd->slock, flags, depth);
96 genpd->lock_flags = flags;
97}
98
99static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
100 __acquires(&genpd->slock)
101{
102 unsigned long flags;
103
104 spin_lock_irqsave(&genpd->slock, flags);
105 genpd->lock_flags = flags;
106 return 0;
107}
108
109static void genpd_unlock_spin(struct generic_pm_domain *genpd)
110 __releases(&genpd->slock)
111{
112 spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
113}
114
115static const struct genpd_lock_ops genpd_spin_ops = {
116 .lock = genpd_lock_spin,
117 .lock_nested = genpd_lock_nested_spin,
118 .lock_interruptible = genpd_lock_interruptible_spin,
119 .unlock = genpd_unlock_spin,
120};
121
Lina Iyer35241d12016-10-14 10:47:54 -0700122#define genpd_lock(p) p->lock_ops->lock(p)
123#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
124#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
125#define genpd_unlock(p) p->lock_ops->unlock(p)
126
Ulf Hansson49f618e2020-09-24 13:04:47 +0200127#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
Lina Iyerd716f472016-10-14 10:47:55 -0700128#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
Ulf Hanssonffaa42e2017-03-20 11:19:21 +0100129#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
Geert Uytterhoeven95a20ef2017-11-07 13:48:11 +0100130#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
Ulf Hanssoneb594b72019-03-27 15:35:46 +0100131#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
Leonard Crestezed61e182019-04-30 15:06:11 +0000132#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
Ulf Hansson3dd91512023-08-25 13:26:32 +0200133#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
Lina Iyerd716f472016-10-14 10:47:55 -0700134
Ulf Hansson7a024442022-05-11 16:56:54 +0200135static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +0200136 const struct generic_pm_domain *genpd)
Lina Iyerd716f472016-10-14 10:47:55 -0700137{
138 bool ret;
139
140 ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
141
Ulf Hansson075c37d2017-03-20 11:19:23 +0100142 /*
Ulf Hansson7a024442022-05-11 16:56:54 +0200143 * Warn once if an IRQ safe device is attached to a domain, which
144 * callbacks are allowed to sleep. This indicates a suboptimal
145 * configuration for PM, but it doesn't matter for an always on domain.
Ulf Hansson075c37d2017-03-20 11:19:23 +0100146 */
Ulf Hanssonbcc19f62022-05-11 16:56:55 +0200147 if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
148 return ret;
149
150 if (ret)
Lina Iyerd716f472016-10-14 10:47:55 -0700151 dev_warn_once(dev, "PM domain %s will not be powered off\n",
152 genpd->name);
153
154 return ret;
155}
156
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200157static int genpd_runtime_suspend(struct device *dev);
158
Russell King446d999c2015-03-20 17:20:33 +0000159/*
160 * Get the generic PM domain for a particular struct device.
161 * This validates the struct device pointer, the PM domain pointer,
162 * and checks that the PM domain pointer is a real generic PM domain.
163 * Any failure results in NULL being returned.
164 */
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200165static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
Russell King446d999c2015-03-20 17:20:33 +0000166{
Russell King446d999c2015-03-20 17:20:33 +0000167 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
168 return NULL;
169
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200170 /* A genpd's always have its ->runtime_suspend() callback assigned. */
171 if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
172 return pd_to_genpd(dev->pm_domain);
Russell King446d999c2015-03-20 17:20:33 +0000173
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +0200174 return NULL;
Russell King446d999c2015-03-20 17:20:33 +0000175}
176
177/*
178 * This should only be used where we are certain that the pm_domain
179 * attached to the device is a genpd domain.
180 */
181static struct generic_pm_domain *dev_to_genpd(struct device *dev)
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200182{
183 if (IS_ERR_OR_NULL(dev->pm_domain))
184 return ERR_PTR(-EINVAL);
185
Rafael J. Wysocki596ba342011-07-01 22:13:19 +0200186 return pd_to_genpd(dev->pm_domain);
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200187}
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200188
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +0200189static int genpd_stop_dev(const struct generic_pm_domain *genpd,
190 struct device *dev)
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100191{
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200192 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100193}
194
Krzysztof Kozlowskid8600c82017-06-12 17:17:41 +0200195static int genpd_start_dev(const struct generic_pm_domain *genpd,
196 struct device *dev)
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100197{
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200198 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100199}
200
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200201static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200202{
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +0200203 bool ret = false;
204
205 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
206 ret = !!atomic_dec_and_test(&genpd->sd_count);
207
208 return ret;
209}
210
211static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
212{
213 atomic_inc(&genpd->sd_count);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100214 smp_mb__after_atomic();
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200215}
216
Thara Gopinathafece3a2017-07-14 13:10:15 -0400217#ifdef CONFIG_DEBUG_FS
Thierry Strudel718072c2020-12-08 11:19:55 -0800218static struct dentry *genpd_debugfs_dir;
219
220static void genpd_debug_add(struct generic_pm_domain *genpd);
221
222static void genpd_debug_remove(struct generic_pm_domain *genpd)
223{
Hsin-Yi Wang37101d32022-07-06 01:16:49 +0800224 if (!genpd_debugfs_dir)
225 return;
226
Greg Kroah-Hartman0b6200e2023-02-02 15:15:45 +0100227 debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
Thierry Strudel718072c2020-12-08 11:19:55 -0800228}
229
Thara Gopinathafece3a2017-07-14 13:10:15 -0400230static void genpd_update_accounting(struct generic_pm_domain *genpd)
231{
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +0200232 u64 delta, now;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400233
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +0200234 now = ktime_get_mono_fast_ns();
235 if (now <= genpd->accounting_time)
236 return;
237
238 delta = now - genpd->accounting_time;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400239
240 /*
241 * If genpd->status is active, it means we are just
242 * out of off and so update the idle time and vice
243 * versa.
244 */
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +0200245 if (genpd->status == GENPD_STATE_ON)
246 genpd->states[genpd->state_idx].idle_time += delta;
247 else
248 genpd->on_time += delta;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400249
250 genpd->accounting_time = now;
251}
252#else
Thierry Strudel718072c2020-12-08 11:19:55 -0800253static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
254static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
Thara Gopinathafece3a2017-07-14 13:10:15 -0400255static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
256#endif
257
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530258static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
259 unsigned int state)
260{
261 struct generic_pm_domain_data *pd_data;
262 struct pm_domain_data *pdd;
Viresh Kumar18edf492018-11-02 14:40:19 +0530263 struct gpd_link *link;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530264
265 /* New requested state is same as Max requested state */
266 if (state == genpd->performance_state)
267 return state;
268
269 /* New requested state is higher than Max requested state */
270 if (state > genpd->performance_state)
271 return state;
272
273 /* Traverse all devices within the domain */
274 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
275 pd_data = to_gpd_data(pdd);
276
277 if (pd_data->performance_state > state)
278 state = pd_data->performance_state;
279 }
280
281 /*
Viresh Kumar18edf492018-11-02 14:40:19 +0530282 * Traverse all sub-domains within the domain. This can be
283 * done without any additional locking as the link->performance_state
Kees Cook8d87ae42020-07-08 16:32:13 -0700284 * field is protected by the parent genpd->lock, which is already taken.
Viresh Kumar18edf492018-11-02 14:40:19 +0530285 *
286 * Also note that link->performance_state (subdomain's performance state
Kees Cook8d87ae42020-07-08 16:32:13 -0700287 * requirement to parent domain) is different from
288 * link->child->performance_state (current performance state requirement
Viresh Kumar18edf492018-11-02 14:40:19 +0530289 * of the devices/sub-domains of the subdomain) and so can have a
290 * different value.
291 *
292 * Note that we also take vote from powered-off sub-domains into account
293 * as the same is done for devices right now.
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530294 */
Kees Cook8d87ae42020-07-08 16:32:13 -0700295 list_for_each_entry(link, &genpd->parent_links, parent_node) {
Viresh Kumar18edf492018-11-02 14:40:19 +0530296 if (link->performance_state > state)
297 state = link->performance_state;
298 }
299
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530300 return state;
301}
302
Dmitry Osipenko079c42a2021-01-21 00:12:30 +0300303static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
304 struct generic_pm_domain *parent,
305 unsigned int pstate)
306{
307 if (!parent->set_performance_state)
308 return pstate;
309
310 return dev_pm_opp_xlate_performance_state(genpd->opp_table,
311 parent->opp_table,
312 pstate);
313}
314
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530315static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
Viresh Kumar18edf492018-11-02 14:40:19 +0530316 unsigned int state, int depth)
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530317{
Kees Cook8d87ae42020-07-08 16:32:13 -0700318 struct generic_pm_domain *parent;
Viresh Kumar18edf492018-11-02 14:40:19 +0530319 struct gpd_link *link;
Kees Cook8d87ae42020-07-08 16:32:13 -0700320 int parent_state, ret;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530321
322 if (state == genpd->performance_state)
323 return 0;
324
Kees Cook8d87ae42020-07-08 16:32:13 -0700325 /* Propagate to parents of genpd */
326 list_for_each_entry(link, &genpd->child_links, child_node) {
327 parent = link->parent;
Viresh Kumar18edf492018-11-02 14:40:19 +0530328
Kees Cook8d87ae42020-07-08 16:32:13 -0700329 /* Find parent's performance state */
Dmitry Osipenko079c42a2021-01-21 00:12:30 +0300330 ret = genpd_xlate_performance_state(genpd, parent, state);
Viresh Kumar18edf492018-11-02 14:40:19 +0530331 if (unlikely(ret < 0))
332 goto err;
333
Kees Cook8d87ae42020-07-08 16:32:13 -0700334 parent_state = ret;
Viresh Kumar18edf492018-11-02 14:40:19 +0530335
Kees Cook8d87ae42020-07-08 16:32:13 -0700336 genpd_lock_nested(parent, depth + 1);
Viresh Kumar18edf492018-11-02 14:40:19 +0530337
338 link->prev_performance_state = link->performance_state;
Kees Cook8d87ae42020-07-08 16:32:13 -0700339 link->performance_state = parent_state;
340 parent_state = _genpd_reeval_performance_state(parent,
341 parent_state);
342 ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
Viresh Kumar18edf492018-11-02 14:40:19 +0530343 if (ret)
344 link->performance_state = link->prev_performance_state;
345
Kees Cook8d87ae42020-07-08 16:32:13 -0700346 genpd_unlock(parent);
Viresh Kumar18edf492018-11-02 14:40:19 +0530347
348 if (ret)
349 goto err;
350 }
351
Dmitry Osipenko079c42a2021-01-21 00:12:30 +0300352 if (genpd->set_performance_state) {
353 ret = genpd->set_performance_state(genpd, state);
354 if (ret)
355 goto err;
356 }
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530357
358 genpd->performance_state = state;
359 return 0;
Viresh Kumar18edf492018-11-02 14:40:19 +0530360
361err:
362 /* Encountered an error, lets rollback */
Kees Cook8d87ae42020-07-08 16:32:13 -0700363 list_for_each_entry_continue_reverse(link, &genpd->child_links,
364 child_node) {
365 parent = link->parent;
Viresh Kumar18edf492018-11-02 14:40:19 +0530366
Kees Cook8d87ae42020-07-08 16:32:13 -0700367 genpd_lock_nested(parent, depth + 1);
Viresh Kumar18edf492018-11-02 14:40:19 +0530368
Kees Cook8d87ae42020-07-08 16:32:13 -0700369 parent_state = link->prev_performance_state;
370 link->performance_state = parent_state;
Viresh Kumar18edf492018-11-02 14:40:19 +0530371
Kees Cook8d87ae42020-07-08 16:32:13 -0700372 parent_state = _genpd_reeval_performance_state(parent,
373 parent_state);
374 if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
Viresh Kumar18edf492018-11-02 14:40:19 +0530375 pr_err("%s: Failed to roll back to %d performance state\n",
Kees Cook8d87ae42020-07-08 16:32:13 -0700376 parent->name, parent_state);
Viresh Kumar18edf492018-11-02 14:40:19 +0530377 }
378
Kees Cook8d87ae42020-07-08 16:32:13 -0700379 genpd_unlock(parent);
Viresh Kumar18edf492018-11-02 14:40:19 +0530380 }
381
382 return ret;
Viresh Kumarcd50c6d2018-10-31 14:56:54 +0530383}
384
Ulf Hansson0eef0912021-06-03 11:34:35 +0200385static int genpd_set_performance_state(struct device *dev, unsigned int state)
386{
387 struct generic_pm_domain *genpd = dev_to_genpd(dev);
388 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
389 unsigned int prev_state;
390 int ret;
391
392 prev_state = gpd_data->performance_state;
Ulf Hanssond97fe102021-06-03 11:34:36 +0200393 if (prev_state == state)
394 return 0;
395
Ulf Hansson0eef0912021-06-03 11:34:35 +0200396 gpd_data->performance_state = state;
397 state = _genpd_reeval_performance_state(genpd, state);
398
399 ret = _genpd_set_performance_state(genpd, state, 0);
400 if (ret)
401 gpd_data->performance_state = prev_state;
402
403 return ret;
404}
405
Ulf Hansson5937c3c2021-06-03 11:34:37 +0200406static int genpd_drop_performance_state(struct device *dev)
407{
408 unsigned int prev_state = dev_gpd_data(dev)->performance_state;
409
410 if (!genpd_set_performance_state(dev, 0))
411 return prev_state;
412
413 return 0;
414}
415
416static void genpd_restore_performance_state(struct device *dev,
417 unsigned int state)
418{
419 if (state)
420 genpd_set_performance_state(dev, state);
421}
422
Ulf Hansson401e0922023-09-25 15:17:08 +0200423static int genpd_dev_pm_set_performance_state(struct device *dev,
424 unsigned int state)
425{
426 struct generic_pm_domain *genpd = dev_to_genpd(dev);
427 int ret = 0;
428
429 genpd_lock(genpd);
430 if (pm_runtime_suspended(dev)) {
431 dev_gpd_data(dev)->rpm_pstate = state;
432 } else {
433 ret = genpd_set_performance_state(dev, state);
434 if (!ret)
435 dev_gpd_data(dev)->rpm_pstate = 0;
436 }
437 genpd_unlock(genpd);
438
439 return ret;
440}
441
Viresh Kumar42f62842017-10-12 15:07:23 +0530442/**
443 * dev_pm_genpd_set_performance_state- Set performance state of device's power
444 * domain.
445 *
446 * @dev: Device for which the performance-state needs to be set.
447 * @state: Target performance state of the device. This can be set as 0 when the
448 * device doesn't have any performance state constraints left (And so
449 * the device wouldn't participate anymore to find the target
450 * performance state of the genpd).
451 *
452 * It is assumed that the users guarantee that the genpd wouldn't be detached
453 * while this routine is getting called.
454 *
455 * Returns 0 on success and negative error values on failures.
456 */
457int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
458{
459 struct generic_pm_domain *genpd;
Viresh Kumar42f62842017-10-12 15:07:23 +0530460
Ulf Hansson3ea4ca92019-08-29 16:48:15 +0200461 genpd = dev_to_genpd_safe(dev);
462 if (!genpd)
Viresh Kumar42f62842017-10-12 15:07:23 +0530463 return -ENODEV;
464
Yangtao Lie757e7f2019-04-16 12:23:05 -0400465 if (WARN_ON(!dev->power.subsys_data ||
466 !dev->power.subsys_data->domain_data))
Viresh Kumar42f62842017-10-12 15:07:23 +0530467 return -EINVAL;
Viresh Kumar42f62842017-10-12 15:07:23 +0530468
Ulf Hansson401e0922023-09-25 15:17:08 +0200469 return genpd_dev_pm_set_performance_state(dev, state);
Viresh Kumar42f62842017-10-12 15:07:23 +0530470}
471EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
472
Lina Iyer67e32422021-01-20 08:50:41 -0700473/**
474 * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
475 *
476 * @dev: Device to handle
477 * @next: impending interrupt/wakeup for the device
478 *
479 *
480 * Allow devices to inform of the next wakeup. It's assumed that the users
481 * guarantee that the genpd wouldn't be detached while this routine is getting
482 * called. Additionally, it's also assumed that @dev isn't runtime suspended
483 * (RPM_SUSPENDED)."
484 * Although devices are expected to update the next_wakeup after the end of
485 * their usecase as well, it is possible the devices themselves may not know
486 * about that, so stale @next will be ignored when powering off the domain.
487 */
488void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
489{
Lina Iyer67e32422021-01-20 08:50:41 -0700490 struct generic_pm_domain *genpd;
Ulf Hansson9c74f2a2022-05-11 16:56:57 +0200491 struct gpd_timing_data *td;
Lina Iyer67e32422021-01-20 08:50:41 -0700492
493 genpd = dev_to_genpd_safe(dev);
494 if (!genpd)
495 return;
496
Ulf Hansson9c74f2a2022-05-11 16:56:57 +0200497 td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
498 if (td)
499 td->next_wakeup = next;
Lina Iyer67e32422021-01-20 08:50:41 -0700500}
501EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
502
Maulik Shah1498c502022-10-18 17:28:35 +0200503/**
504 * dev_pm_genpd_get_next_hrtimer - Return the next_hrtimer for the genpd
505 * @dev: A device that is attached to the genpd.
506 *
507 * This routine should typically be called for a device, at the point of when a
508 * GENPD_NOTIFY_PRE_OFF notification has been sent for it.
509 *
510 * Returns the aggregated value of the genpd's next hrtimer or KTIME_MAX if no
511 * valid value have been set.
512 */
513ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
514{
515 struct generic_pm_domain *genpd;
516
517 genpd = dev_to_genpd_safe(dev);
518 if (!genpd)
519 return KTIME_MAX;
520
521 if (genpd->gd)
522 return genpd->gd->next_hrtimer;
523
524 return KTIME_MAX;
525}
526EXPORT_SYMBOL_GPL(dev_pm_genpd_get_next_hrtimer);
527
Ulf Hanssona9236a02023-01-02 16:18:27 +0530528/*
529 * dev_pm_genpd_synced_poweroff - Next power off should be synchronous
530 *
531 * @dev: A device that is attached to the genpd.
532 *
533 * Allows a consumer of the genpd to notify the provider that the next power off
534 * should be synchronous.
535 *
536 * It is assumed that the users guarantee that the genpd wouldn't be detached
537 * while this routine is getting called.
538 */
539void dev_pm_genpd_synced_poweroff(struct device *dev)
540{
541 struct generic_pm_domain *genpd;
542
543 genpd = dev_to_genpd_safe(dev);
544 if (!genpd)
545 return;
546
547 genpd_lock(genpd);
548 genpd->synced_poweroff = true;
549 genpd_unlock(genpd);
550}
551EXPORT_SYMBOL_GPL(dev_pm_genpd_synced_poweroff);
552
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100553static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100554{
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100555 unsigned int state_idx = genpd->state_idx;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100556 ktime_t time_start;
557 s64 elapsed_ns;
Ulf Hansson330e3932020-10-20 10:10:35 +0200558 int ret;
Ulf Hanssond4f81382020-10-13 14:23:39 +0200559
560 /* Notify consumers that we are about to power on. */
Ulf Hansson330e3932020-10-20 10:10:35 +0200561 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
562 GENPD_NOTIFY_PRE_ON,
563 GENPD_NOTIFY_OFF, NULL);
Ulf Hanssond4f81382020-10-13 14:23:39 +0200564 ret = notifier_to_errno(ret);
565 if (ret)
Ulf Hansson330e3932020-10-20 10:10:35 +0200566 return ret;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100567
568 if (!genpd->power_on)
Ulf Hanssond4f81382020-10-13 14:23:39 +0200569 goto out;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100570
Ulf Hanssonb2a92f32022-05-11 16:57:04 +0200571 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
Ulf Hanssond4f81382020-10-13 14:23:39 +0200572 if (!timed) {
573 ret = genpd->power_on(genpd);
574 if (ret)
575 goto err;
576
577 goto out;
578 }
Geert Uytterhoevena4630c62015-05-29 17:24:23 +0200579
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100580 time_start = ktime_get();
581 ret = genpd->power_on(genpd);
582 if (ret)
Ulf Hanssond4f81382020-10-13 14:23:39 +0200583 goto err;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100584
585 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100586 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
Ulf Hanssond4f81382020-10-13 14:23:39 +0200587 goto out;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100588
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100589 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
Ulf Hansson69617952022-05-11 16:57:03 +0200590 genpd->gd->max_off_time_changed = true;
Russell King6d7d5c32015-03-20 17:20:28 +0000591 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
592 genpd->name, "on", elapsed_ns);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100593
Ulf Hanssond4f81382020-10-13 14:23:39 +0200594out:
595 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
Ulf Hanssona9236a02023-01-02 16:18:27 +0530596 genpd->synced_poweroff = false;
Ulf Hanssond4f81382020-10-13 14:23:39 +0200597 return 0;
598err:
599 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
600 NULL);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100601 return ret;
602}
603
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100604static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100605{
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100606 unsigned int state_idx = genpd->state_idx;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100607 ktime_t time_start;
608 s64 elapsed_ns;
Ulf Hansson330e3932020-10-20 10:10:35 +0200609 int ret;
Ulf Hanssond4f81382020-10-13 14:23:39 +0200610
611 /* Notify consumers that we are about to power off. */
Ulf Hansson330e3932020-10-20 10:10:35 +0200612 ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
613 GENPD_NOTIFY_PRE_OFF,
614 GENPD_NOTIFY_ON, NULL);
Ulf Hanssond4f81382020-10-13 14:23:39 +0200615 ret = notifier_to_errno(ret);
616 if (ret)
Ulf Hansson330e3932020-10-20 10:10:35 +0200617 return ret;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100618
619 if (!genpd->power_off)
Ulf Hanssond4f81382020-10-13 14:23:39 +0200620 goto out;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100621
Ulf Hanssonb2a92f32022-05-11 16:57:04 +0200622 timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
Ulf Hanssond4f81382020-10-13 14:23:39 +0200623 if (!timed) {
624 ret = genpd->power_off(genpd);
625 if (ret)
626 goto busy;
627
628 goto out;
629 }
Geert Uytterhoevena4630c62015-05-29 17:24:23 +0200630
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100631 time_start = ktime_get();
632 ret = genpd->power_off(genpd);
Aisheng Dong0cec68a2019-03-06 13:25:15 +0000633 if (ret)
Ulf Hanssond4f81382020-10-13 14:23:39 +0200634 goto busy;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100635
636 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100637 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
Ulf Hanssond4f81382020-10-13 14:23:39 +0200638 goto out;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100639
Axel Haslamfc5cbf02016-02-15 11:10:51 +0100640 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
Ulf Hansson69617952022-05-11 16:57:03 +0200641 genpd->gd->max_off_time_changed = true;
Russell King6d7d5c32015-03-20 17:20:28 +0000642 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
643 genpd->name, "off", elapsed_ns);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100644
Ulf Hanssond4f81382020-10-13 14:23:39 +0200645out:
646 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
647 NULL);
Aisheng Dong0cec68a2019-03-06 13:25:15 +0000648 return 0;
Ulf Hanssond4f81382020-10-13 14:23:39 +0200649busy:
Ulf Hansson330e3932020-10-20 10:10:35 +0200650 raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
Ulf Hanssond4f81382020-10-13 14:23:39 +0200651 return ret;
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100652}
653
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200654/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100655 * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
Moritz Fischera3d09c72016-01-27 08:29:27 +0100656 * @genpd: PM domain to power off.
Ulf Hansson29e47e22015-09-02 10:16:13 +0200657 *
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100658 * Queue up the execution of genpd_power_off() unless it's already been done
Ulf Hansson29e47e22015-09-02 10:16:13 +0200659 * before.
660 */
661static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
662{
663 queue_work(pm_wq, &genpd->power_off_work);
664}
665
666/**
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100667 * genpd_power_off - Remove power from a given PM domain.
668 * @genpd: PM domain to power down.
Ulf Hansson3c646492017-02-17 10:55:24 +0100669 * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
670 * RPM status of the releated device is in an intermediate state, not yet turned
671 * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
672 * be RPM_SUSPENDED, while it tries to power off the PM domain.
Yang Yingliang763663c2021-05-12 15:25:15 +0800673 * @depth: nesting count for lockdep.
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100674 *
675 * If all of the @genpd's devices have been suspended and all of its subdomains
676 * have been powered down, remove power from @genpd.
677 */
Ulf Hansson2da83542017-02-17 10:55:25 +0100678static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
679 unsigned int depth)
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100680{
681 struct pm_domain_data *pdd;
682 struct gpd_link *link;
683 unsigned int not_suspended = 0;
Ulf Hanssonf63816e2020-09-24 13:04:48 +0200684 int ret;
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100685
686 /*
687 * Do not try to power off the domain in the following situations:
688 * (1) The domain is already in the "power off" state.
689 * (2) System suspend is in progress.
690 */
Ulf Hansson41e2c8e2017-03-20 11:19:20 +0100691 if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100692 return 0;
693
Ulf Hanssonffaa42e2017-03-20 11:19:21 +0100694 /*
695 * Abort power off for the PM domain in the following situations:
696 * (1) The domain is configured as always on.
697 * (2) When the domain has a subdomain being powered on.
698 */
Leonard Crestezed61e182019-04-30 15:06:11 +0000699 if (genpd_is_always_on(genpd) ||
700 genpd_is_rpm_always_on(genpd) ||
701 atomic_read(&genpd->sd_count) > 0)
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100702 return -EBUSY;
703
Ulf Hanssone7d90cf2022-02-17 13:49:50 +0100704 /*
705 * The children must be in their deepest (powered-off) states to allow
706 * the parent to be powered off. Note that, there's no need for
707 * additional locking, as powering on a child, requires the parent's
708 * lock to be acquired first.
709 */
710 list_for_each_entry(link, &genpd->parent_links, parent_node) {
711 struct generic_pm_domain *child = link->child;
712 if (child->state_idx < child->state_count - 1)
713 return -EBUSY;
714 }
715
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100716 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100717 /*
718 * Do not allow PM domain to be powered off, when an IRQ safe
719 * device is part of a non-IRQ safe domain.
720 */
721 if (!pm_runtime_suspended(pdd->dev) ||
Ulf Hansson7a024442022-05-11 16:56:54 +0200722 irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100723 not_suspended++;
724 }
725
Ulf Hansson3c646492017-02-17 10:55:24 +0100726 if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100727 return -EBUSY;
728
729 if (genpd->gov && genpd->gov->power_down_ok) {
730 if (!genpd->gov->power_down_ok(&genpd->domain))
731 return -EAGAIN;
732 }
733
Ulf Hansson2c9b7f82018-10-03 16:38:15 +0200734 /* Default to shallowest state. */
735 if (!genpd->gov)
736 genpd->state_idx = 0;
737
Ulf Hanssonf63816e2020-09-24 13:04:48 +0200738 /* Don't power off, if a child domain is waiting to power on. */
739 if (atomic_read(&genpd->sd_count) > 0)
740 return -EBUSY;
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100741
Ulf Hanssonf63816e2020-09-24 13:04:48 +0200742 ret = _genpd_power_off(genpd, true);
Lina Iyerc6a113b2020-10-15 14:47:22 -0600743 if (ret) {
744 genpd->states[genpd->state_idx].rejected++;
Ulf Hanssonf63816e2020-09-24 13:04:48 +0200745 return ret;
Lina Iyerc6a113b2020-10-15 14:47:22 -0600746 }
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100747
Ulf Hansson49f618e2020-09-24 13:04:47 +0200748 genpd->status = GENPD_STATE_OFF;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400749 genpd_update_accounting(genpd);
Lina Iyerc6a113b2020-10-15 14:47:22 -0600750 genpd->states[genpd->state_idx].usage++;
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100751
Kees Cook8d87ae42020-07-08 16:32:13 -0700752 list_for_each_entry(link, &genpd->child_links, child_node) {
753 genpd_sd_counter_dec(link->parent);
754 genpd_lock_nested(link->parent, depth + 1);
755 genpd_power_off(link->parent, false, depth + 1);
756 genpd_unlock(link->parent);
Ulf Hansson1f8728b2017-02-17 10:55:23 +0100757 }
758
759 return 0;
760}
761
762/**
Kees Cook8d87ae42020-07-08 16:32:13 -0700763 * genpd_power_on - Restore power to a given PM domain and its parents.
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200764 * @genpd: PM domain to power up.
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100765 * @depth: nesting count for lockdep.
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200766 *
Kees Cook8d87ae42020-07-08 16:32:13 -0700767 * Restore power to @genpd and all of its parents so that it is possible to
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200768 * resume a device belonging to it.
769 */
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100770static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200771{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200772 struct gpd_link *link;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200773 int ret = 0;
774
Ulf Hansson41e2c8e2017-03-20 11:19:20 +0100775 if (genpd_status_on(genpd))
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200776 return 0;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200777
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200778 /*
779 * The list is guaranteed not to change while the loop below is being
Kees Cook8d87ae42020-07-08 16:32:13 -0700780 * executed, unless one of the parents' .power_on() callbacks fiddles
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200781 * with it.
782 */
Kees Cook8d87ae42020-07-08 16:32:13 -0700783 list_for_each_entry(link, &genpd->child_links, child_node) {
784 struct generic_pm_domain *parent = link->parent;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200785
Kees Cook8d87ae42020-07-08 16:32:13 -0700786 genpd_sd_counter_inc(parent);
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100787
Kees Cook8d87ae42020-07-08 16:32:13 -0700788 genpd_lock_nested(parent, depth + 1);
789 ret = genpd_power_on(parent, depth + 1);
790 genpd_unlock(parent);
Marek Szyprowski0106ef52016-01-20 10:13:42 +0100791
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200792 if (ret) {
Kees Cook8d87ae42020-07-08 16:32:13 -0700793 genpd_sd_counter_dec(parent);
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200794 goto err;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +0200795 }
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200796 }
797
Ulf Hansson86e12ea2016-12-08 14:45:20 +0100798 ret = _genpd_power_on(genpd, true);
Geert Uytterhoevenc8f0ea42014-11-10 19:39:19 +0100799 if (ret)
800 goto err;
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200801
Ulf Hansson49f618e2020-09-24 13:04:47 +0200802 genpd->status = GENPD_STATE_ON;
Thara Gopinathafece3a2017-07-14 13:10:15 -0400803 genpd_update_accounting(genpd);
804
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200805 return 0;
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200806
807 err:
Ulf Hansson29e47e22015-09-02 10:16:13 +0200808 list_for_each_entry_continue_reverse(link,
Kees Cook8d87ae42020-07-08 16:32:13 -0700809 &genpd->child_links,
810 child_node) {
811 genpd_sd_counter_dec(link->parent);
812 genpd_lock_nested(link->parent, depth + 1);
813 genpd_power_off(link->parent, false, depth + 1);
814 genpd_unlock(link->parent);
Ulf Hansson29e47e22015-09-02 10:16:13 +0200815 }
Rafael J. Wysocki9e08cf42011-08-08 23:43:22 +0200816
Rafael J. Wysocki3f241772011-08-08 23:43:29 +0200817 return ret;
818}
819
Ulf Hanssonea71c592019-10-16 15:16:24 +0200820static int genpd_dev_pm_start(struct device *dev)
821{
822 struct generic_pm_domain *genpd = dev_to_genpd(dev);
823
824 return genpd_start_dev(genpd, dev);
825}
826
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200827static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
828 unsigned long val, void *ptr)
829{
830 struct generic_pm_domain_data *gpd_data;
831 struct device *dev;
832
833 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200834 dev = gpd_data->base.dev;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200835
836 for (;;) {
Ulf Hanssonf38d1a62022-05-11 16:57:02 +0200837 struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200838 struct pm_domain_data *pdd;
Ulf Hansson66d29d82022-05-11 16:56:56 +0200839 struct gpd_timing_data *td;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200840
841 spin_lock_irq(&dev->power.lock);
842
843 pdd = dev->power.subsys_data ?
844 dev->power.subsys_data->domain_data : NULL;
Viresh Kumarb4883ca2017-05-16 10:52:43 +0530845 if (pdd) {
Ulf Hansson66d29d82022-05-11 16:56:56 +0200846 td = to_gpd_data(pdd)->td;
Ulf Hanssonf38d1a62022-05-11 16:57:02 +0200847 if (td) {
Ulf Hansson66d29d82022-05-11 16:56:56 +0200848 td->constraint_changed = true;
Ulf Hanssonf38d1a62022-05-11 16:57:02 +0200849 genpd = dev_to_genpd(dev);
850 }
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200851 }
852
853 spin_unlock_irq(&dev->power.lock);
854
855 if (!IS_ERR(genpd)) {
Lina Iyer35241d12016-10-14 10:47:54 -0700856 genpd_lock(genpd);
Ulf Hanssonf38d1a62022-05-11 16:57:02 +0200857 genpd->gd->max_off_time_changed = true;
Lina Iyer35241d12016-10-14 10:47:54 -0700858 genpd_unlock(genpd);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +0200859 }
860
861 dev = dev->parent;
862 if (!dev || dev->power.ignore_children)
863 break;
864 }
865
866 return NOTIFY_DONE;
867}
868
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200869/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200870 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
871 * @work: Work structure used for scheduling the execution of this function.
872 */
873static void genpd_power_off_work_fn(struct work_struct *work)
874{
875 struct generic_pm_domain *genpd;
876
877 genpd = container_of(work, struct generic_pm_domain, power_off_work);
878
Lina Iyer35241d12016-10-14 10:47:54 -0700879 genpd_lock(genpd);
Ulf Hansson2da83542017-02-17 10:55:25 +0100880 genpd_power_off(genpd, false, 0);
Lina Iyer35241d12016-10-14 10:47:54 -0700881 genpd_unlock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200882}
883
884/**
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200885 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
886 * @dev: Device to handle.
887 */
888static int __genpd_runtime_suspend(struct device *dev)
889{
890 int (*cb)(struct device *__dev);
891
892 if (dev->type && dev->type->pm)
893 cb = dev->type->pm->runtime_suspend;
894 else if (dev->class && dev->class->pm)
895 cb = dev->class->pm->runtime_suspend;
896 else if (dev->bus && dev->bus->pm)
897 cb = dev->bus->pm->runtime_suspend;
898 else
899 cb = NULL;
900
901 if (!cb && dev->driver && dev->driver->pm)
902 cb = dev->driver->pm->runtime_suspend;
903
904 return cb ? cb(dev) : 0;
905}
906
907/**
908 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
909 * @dev: Device to handle.
910 */
911static int __genpd_runtime_resume(struct device *dev)
912{
913 int (*cb)(struct device *__dev);
914
915 if (dev->type && dev->type->pm)
916 cb = dev->type->pm->runtime_resume;
917 else if (dev->class && dev->class->pm)
918 cb = dev->class->pm->runtime_resume;
919 else if (dev->bus && dev->bus->pm)
920 cb = dev->bus->pm->runtime_resume;
921 else
922 cb = NULL;
923
924 if (!cb && dev->driver && dev->driver->pm)
925 cb = dev->driver->pm->runtime_resume;
926
927 return cb ? cb(dev) : 0;
928}
929
930/**
Ulf Hansson795bd2e2016-03-31 11:21:26 +0200931 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200932 * @dev: Device to suspend.
933 *
934 * Carry out a runtime suspend of a device under the assumption that its
935 * pm_domain field points to the domain member of an object of type
936 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
937 */
Ulf Hansson795bd2e2016-03-31 11:21:26 +0200938static int genpd_runtime_suspend(struct device *dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200939{
940 struct generic_pm_domain *genpd;
Ulf Hansson9df39212016-03-31 11:21:25 +0200941 bool (*suspend_ok)(struct device *__dev);
Ulf Hansson5937c3c2021-06-03 11:34:37 +0200942 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
Ulf Hansson66d29d82022-05-11 16:56:56 +0200943 struct gpd_timing_data *td = gpd_data->td;
Ulf Hanssonffe12852015-11-30 16:21:38 +0100944 bool runtime_pm = pm_runtime_enabled(dev);
Ulf Hansson3b84bf32022-05-11 16:56:58 +0200945 ktime_t time_start = 0;
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200946 s64 elapsed_ns;
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100947 int ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200948
949 dev_dbg(dev, "%s()\n", __func__);
950
Rafael J. Wysocki52480512011-07-01 22:13:10 +0200951 genpd = dev_to_genpd(dev);
952 if (IS_ERR(genpd))
Rafael J. Wysockif7218892011-07-01 22:12:45 +0200953 return -EINVAL;
954
Ulf Hanssonffe12852015-11-30 16:21:38 +0100955 /*
956 * A runtime PM centric subsystem/driver may re-use the runtime PM
957 * callbacks for other purposes than runtime PM. In those scenarios
958 * runtime PM is disabled. Under these circumstances, we shall skip
959 * validating/measuring the PM QoS latency.
960 */
Ulf Hansson9df39212016-03-31 11:21:25 +0200961 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
962 if (runtime_pm && suspend_ok && !suspend_ok(dev))
Rafael J. Wysockib02c9992011-12-01 00:02:05 +0100963 return -EBUSY;
964
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200965 /* Measure suspend latency. */
Ulf Hansson3b84bf32022-05-11 16:56:58 +0200966 if (td && runtime_pm)
Ulf Hanssonffe12852015-11-30 16:21:38 +0100967 time_start = ktime_get();
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200968
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200969 ret = __genpd_runtime_suspend(dev);
Rafael J. Wysockid5e4cbf2011-11-27 13:11:36 +0100970 if (ret)
971 return ret;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +0200972
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200973 ret = genpd_stop_dev(genpd, dev);
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200974 if (ret) {
Ulf Hansson54eeddb2016-03-31 11:21:27 +0200975 __genpd_runtime_resume(dev);
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +0200976 return ret;
977 }
978
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200979 /* Update suspend latency value if the measured time exceeds it. */
Ulf Hansson3b84bf32022-05-11 16:56:58 +0200980 if (td && runtime_pm) {
Ulf Hanssonffe12852015-11-30 16:21:38 +0100981 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Ulf Hansson3b84bf32022-05-11 16:56:58 +0200982 if (elapsed_ns > td->suspend_latency_ns) {
Ulf Hanssonffe12852015-11-30 16:21:38 +0100983 td->suspend_latency_ns = elapsed_ns;
984 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
985 elapsed_ns);
Ulf Hanssonf38d1a62022-05-11 16:57:02 +0200986 genpd->gd->max_off_time_changed = true;
Ulf Hanssonffe12852015-11-30 16:21:38 +0100987 td->constraint_changed = true;
988 }
Ulf Hansson2b1d88c2015-10-15 17:02:19 +0200989 }
990
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200991 /*
Lina Iyerd716f472016-10-14 10:47:55 -0700992 * If power.irq_safe is set, this routine may be run with
993 * IRQs disabled, so suspend only if the PM domain also is irq_safe.
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200994 */
Ulf Hansson7a024442022-05-11 16:56:54 +0200995 if (irq_safe_dev_in_sleep_domain(dev, genpd))
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +0200996 return 0;
997
Lina Iyer35241d12016-10-14 10:47:54 -0700998 genpd_lock(genpd);
Ulf Hansson2da83542017-02-17 10:55:25 +0100999 genpd_power_off(genpd, true, 0);
Abel Vesaae8ac192022-11-15 23:25:43 +02001000 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
Lina Iyer35241d12016-10-14 10:47:54 -07001001 genpd_unlock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001002
1003 return 0;
1004}
1005
1006/**
Ulf Hansson795bd2e2016-03-31 11:21:26 +02001007 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001008 * @dev: Device to resume.
1009 *
1010 * Carry out a runtime resume of a device under the assumption that its
1011 * pm_domain field points to the domain member of an object of type
1012 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1013 */
Ulf Hansson795bd2e2016-03-31 11:21:26 +02001014static int genpd_runtime_resume(struct device *dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001015{
1016 struct generic_pm_domain *genpd;
Ulf Hansson5937c3c2021-06-03 11:34:37 +02001017 struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
Ulf Hansson66d29d82022-05-11 16:56:56 +02001018 struct gpd_timing_data *td = gpd_data->td;
Ulf Hansson3b84bf32022-05-11 16:56:58 +02001019 bool timed = td && pm_runtime_enabled(dev);
1020 ktime_t time_start = 0;
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001021 s64 elapsed_ns;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001022 int ret;
1023
1024 dev_dbg(dev, "%s()\n", __func__);
1025
Rafael J. Wysocki52480512011-07-01 22:13:10 +02001026 genpd = dev_to_genpd(dev);
1027 if (IS_ERR(genpd))
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001028 return -EINVAL;
1029
Lina Iyerd716f472016-10-14 10:47:55 -07001030 /*
1031 * As we don't power off a non IRQ safe domain, which holds
1032 * an IRQ safe device, we don't need to restore power to it.
1033 */
Ulf Hanssona2942372022-05-11 16:56:59 +02001034 if (irq_safe_dev_in_sleep_domain(dev, genpd))
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +02001035 goto out;
Rafael J. Wysocki0aa2a222011-08-25 15:37:04 +02001036
Lina Iyer35241d12016-10-14 10:47:54 -07001037 genpd_lock(genpd);
Abel Vesaae8ac192022-11-15 23:25:43 +02001038 genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001039 ret = genpd_power_on(genpd, 0);
Lina Iyer35241d12016-10-14 10:47:54 -07001040 genpd_unlock(genpd);
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001041
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +02001042 if (ret)
1043 return ret;
1044
1045 out:
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001046 /* Measure resume latency. */
Ulf Hansson3b84bf32022-05-11 16:56:58 +02001047 if (timed)
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001048 time_start = ktime_get();
1049
Laurent Pinchart076395c2016-03-02 01:20:38 +02001050 ret = genpd_start_dev(genpd, dev);
1051 if (ret)
1052 goto err_poweroff;
1053
Ulf Hansson54eeddb2016-03-31 11:21:27 +02001054 ret = __genpd_runtime_resume(dev);
Laurent Pinchart076395c2016-03-02 01:20:38 +02001055 if (ret)
1056 goto err_stop;
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001057
1058 /* Update resume latency value if the measured time exceeds it. */
Ulf Hansson3b84bf32022-05-11 16:56:58 +02001059 if (timed) {
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001060 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
Ulf Hansson3b84bf32022-05-11 16:56:58 +02001061 if (elapsed_ns > td->resume_latency_ns) {
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001062 td->resume_latency_ns = elapsed_ns;
1063 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
1064 elapsed_ns);
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02001065 genpd->gd->max_off_time_changed = true;
Ulf Hansson2b1d88c2015-10-15 17:02:19 +02001066 td->constraint_changed = true;
1067 }
1068 }
Ulf Hanssonba2bbfb2015-06-18 15:17:53 +02001069
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001070 return 0;
Laurent Pinchart076395c2016-03-02 01:20:38 +02001071
1072err_stop:
1073 genpd_stop_dev(genpd, dev);
1074err_poweroff:
Abaci Team6dc466d2021-01-27 16:42:05 +08001075 if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
Lina Iyer35241d12016-10-14 10:47:54 -07001076 genpd_lock(genpd);
Ulf Hansson2da83542017-02-17 10:55:25 +01001077 genpd_power_off(genpd, true, 0);
Abel Vesaae8ac192022-11-15 23:25:43 +02001078 gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
Lina Iyer35241d12016-10-14 10:47:54 -07001079 genpd_unlock(genpd);
Laurent Pinchart076395c2016-03-02 01:20:38 +02001080 }
1081
1082 return ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001083}
1084
Tushar Behera39ac5ba2014-03-28 10:50:21 +05301085static bool pd_ignore_unused;
1086static int __init pd_ignore_unused_setup(char *__unused)
1087{
1088 pd_ignore_unused = true;
1089 return 1;
1090}
1091__setup("pd_ignore_unused", pd_ignore_unused_setup);
1092
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +02001093/**
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001094 * genpd_power_off_unused - Power off all PM domains with no devices in use.
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +02001095 */
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001096static int __init genpd_power_off_unused(void)
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +02001097{
1098 struct generic_pm_domain *genpd;
1099
Tushar Behera39ac5ba2014-03-28 10:50:21 +05301100 if (pd_ignore_unused) {
1101 pr_warn("genpd: Not disabling unused power domains\n");
Ulf Hanssonbb4b72f2015-10-06 14:27:42 +02001102 return 0;
Tushar Behera39ac5ba2014-03-28 10:50:21 +05301103 }
1104
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +02001105 mutex_lock(&gpd_list_lock);
1106
1107 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1108 genpd_queue_power_off_work(genpd);
1109
1110 mutex_unlock(&gpd_list_lock);
Rafael J. Wysocki17f2ae72011-08-14 13:34:31 +02001111
Ulf Hansson2fe71dc2014-09-03 12:52:26 +02001112 return 0;
1113}
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001114late_initcall(genpd_power_off_unused);
Ulf Hansson2fe71dc2014-09-03 12:52:26 +02001115
Jon Hunter0159ec62016-09-12 12:01:10 +01001116#ifdef CONFIG_PM_SLEEP
1117
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001118/**
Kees Cook8d87ae42020-07-08 16:32:13 -07001119 * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001120 * @genpd: PM domain to power off, if possible.
Ulf Hansson0883ac02017-02-08 13:39:00 +01001121 * @use_lock: use the lock.
1122 * @depth: nesting count for lockdep.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001123 *
1124 * Check if the given PM domain can be powered off (during system suspend or
Kees Cook8d87ae42020-07-08 16:32:13 -07001125 * hibernation) and do that if so. Also, in that case propagate to its parents.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001126 *
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001127 * This function is only called in "noirq" and "syscore" stages of system power
Ulf Hansson0883ac02017-02-08 13:39:00 +01001128 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1129 * these cases the lock must be held.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001130 */
Ulf Hansson0883ac02017-02-08 13:39:00 +01001131static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
1132 unsigned int depth)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001133{
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001134 struct gpd_link *link;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001135
Ulf Hanssonffaa42e2017-03-20 11:19:21 +01001136 if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001137 return;
1138
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02001139 if (genpd->suspended_count != genpd->device_count
1140 || atomic_read(&genpd->sd_count) > 0)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001141 return;
1142
Ulf Hanssone7d90cf2022-02-17 13:49:50 +01001143 /* Check that the children are in their deepest (powered-off) state. */
1144 list_for_each_entry(link, &genpd->parent_links, parent_node) {
1145 struct generic_pm_domain *child = link->child;
1146 if (child->state_idx < child->state_count - 1)
1147 return;
1148 }
1149
Axel Haslamfc5cbf02016-02-15 11:10:51 +01001150 /* Choose the deepest state when suspending */
1151 genpd->state_idx = genpd->state_count - 1;
Ulf Hansson1c14967c2017-03-20 11:19:22 +01001152 if (_genpd_power_off(genpd, false))
1153 return;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001154
Ulf Hansson49f618e2020-09-24 13:04:47 +02001155 genpd->status = GENPD_STATE_OFF;
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001156
Kees Cook8d87ae42020-07-08 16:32:13 -07001157 list_for_each_entry(link, &genpd->child_links, child_node) {
1158 genpd_sd_counter_dec(link->parent);
Ulf Hansson0883ac02017-02-08 13:39:00 +01001159
1160 if (use_lock)
Kees Cook8d87ae42020-07-08 16:32:13 -07001161 genpd_lock_nested(link->parent, depth + 1);
Ulf Hansson0883ac02017-02-08 13:39:00 +01001162
Kees Cook8d87ae42020-07-08 16:32:13 -07001163 genpd_sync_power_off(link->parent, use_lock, depth + 1);
Ulf Hansson0883ac02017-02-08 13:39:00 +01001164
1165 if (use_lock)
Kees Cook8d87ae42020-07-08 16:32:13 -07001166 genpd_unlock(link->parent);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001167 }
1168}
1169
1170/**
Kees Cook8d87ae42020-07-08 16:32:13 -07001171 * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001172 * @genpd: PM domain to power on.
Ulf Hansson0883ac02017-02-08 13:39:00 +01001173 * @use_lock: use the lock.
1174 * @depth: nesting count for lockdep.
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001175 *
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001176 * This function is only called in "noirq" and "syscore" stages of system power
Ulf Hansson0883ac02017-02-08 13:39:00 +01001177 * transitions. The "noirq" callbacks may be executed asynchronously, thus in
1178 * these cases the lock must be held.
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001179 */
Ulf Hansson0883ac02017-02-08 13:39:00 +01001180static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
1181 unsigned int depth)
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001182{
1183 struct gpd_link *link;
1184
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001185 if (genpd_status_on(genpd))
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001186 return;
1187
Kees Cook8d87ae42020-07-08 16:32:13 -07001188 list_for_each_entry(link, &genpd->child_links, child_node) {
1189 genpd_sd_counter_inc(link->parent);
Ulf Hansson0883ac02017-02-08 13:39:00 +01001190
1191 if (use_lock)
Kees Cook8d87ae42020-07-08 16:32:13 -07001192 genpd_lock_nested(link->parent, depth + 1);
Ulf Hansson0883ac02017-02-08 13:39:00 +01001193
Kees Cook8d87ae42020-07-08 16:32:13 -07001194 genpd_sync_power_on(link->parent, use_lock, depth + 1);
Ulf Hansson0883ac02017-02-08 13:39:00 +01001195
1196 if (use_lock)
Kees Cook8d87ae42020-07-08 16:32:13 -07001197 genpd_unlock(link->parent);
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001198 }
1199
Ulf Hansson86e12ea2016-12-08 14:45:20 +01001200 _genpd_power_on(genpd, false);
Ulf Hansson49f618e2020-09-24 13:04:47 +02001201 genpd->status = GENPD_STATE_ON;
Rafael J. Wysocki802d8b42012-08-06 01:39:16 +02001202}
1203
1204/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001205 * genpd_prepare - Start power transition of a device in a PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001206 * @dev: Device to start the transition of.
1207 *
1208 * Start a power transition of a device (during a system-wide power transition)
1209 * under the assumption that its pm_domain field points to the domain member of
1210 * an object of type struct generic_pm_domain representing a PM domain
1211 * consisting of I/O devices.
1212 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001213static int genpd_prepare(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001214{
1215 struct generic_pm_domain *genpd;
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001216 int ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001217
1218 dev_dbg(dev, "%s()\n", __func__);
1219
1220 genpd = dev_to_genpd(dev);
1221 if (IS_ERR(genpd))
1222 return -EINVAL;
1223
Lina Iyer35241d12016-10-14 10:47:54 -07001224 genpd_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001225
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001226 if (genpd->prepared_count++ == 0)
Rafael J. Wysocki65533bb2012-03-13 22:39:37 +01001227 genpd->suspended_count = 0;
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001228
Lina Iyer35241d12016-10-14 10:47:54 -07001229 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001230
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001231 ret = pm_generic_prepare(dev);
Ulf Hansson5241ab402017-11-08 10:11:02 +01001232 if (ret < 0) {
Lina Iyer35241d12016-10-14 10:47:54 -07001233 genpd_lock(genpd);
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001234
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001235 genpd->prepared_count--;
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001236
Lina Iyer35241d12016-10-14 10:47:54 -07001237 genpd_unlock(genpd);
Rafael J. Wysockib6c10c82011-07-12 00:39:21 +02001238 }
Rafael J. Wysocki17b75ec2011-07-12 00:39:29 +02001239
Ulf Hansson5241ab402017-11-08 10:11:02 +01001240 /* Never return 1, as genpd don't cope with the direct_complete path. */
1241 return ret >= 0 ? 0 : ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001242}
1243
1244/**
Mikko Perttunen10da6542017-06-22 10:18:33 +03001245 * genpd_finish_suspend - Completion of suspend or hibernation of device in an
1246 * I/O pm domain.
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001247 * @dev: Device to suspend.
Shawn Guo615db6d2022-11-02 22:21:02 +08001248 * @suspend_noirq: Generic suspend_noirq callback.
1249 * @resume_noirq: Generic resume_noirq callback.
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001250 *
1251 * Stop the device and remove power from the domain if all devices in it have
1252 * been stopped.
1253 */
Shawn Guo615db6d2022-11-02 22:21:02 +08001254static int genpd_finish_suspend(struct device *dev,
1255 int (*suspend_noirq)(struct device *dev),
1256 int (*resume_noirq)(struct device *dev))
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001257{
1258 struct generic_pm_domain *genpd;
Ulf Hanssona9354242018-01-10 21:31:56 +01001259 int ret = 0;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001260
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001261 genpd = dev_to_genpd(dev);
1262 if (IS_ERR(genpd))
1263 return -EINVAL;
1264
Shawn Guo615db6d2022-11-02 22:21:02 +08001265 ret = suspend_noirq(dev);
Mikko Perttunen10da6542017-06-22 10:18:33 +03001266 if (ret)
1267 return ret;
1268
Patrice Chotard4e1d9a72020-11-19 08:25:39 +01001269 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
Ulf Hanssona9354242018-01-10 21:31:56 +01001270 return 0;
1271
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001272 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1273 !pm_runtime_status_suspended(dev)) {
1274 ret = genpd_stop_dev(genpd, dev);
Ulf Hanssona9354242018-01-10 21:31:56 +01001275 if (ret) {
Shawn Guo615db6d2022-11-02 22:21:02 +08001276 resume_noirq(dev);
Ulf Hansson122a2232016-05-30 11:33:14 +02001277 return ret;
Ulf Hanssona9354242018-01-10 21:31:56 +01001278 }
Ulf Hansson122a2232016-05-30 11:33:14 +02001279 }
1280
Ulf Hansson0883ac02017-02-08 13:39:00 +01001281 genpd_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001282 genpd->suspended_count++;
Ulf Hansson0883ac02017-02-08 13:39:00 +01001283 genpd_sync_power_off(genpd, true, 0);
1284 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001285
1286 return 0;
1287}
1288
1289/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001290 * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
Mikko Perttunen10da6542017-06-22 10:18:33 +03001291 * @dev: Device to suspend.
1292 *
1293 * Stop the device and remove power from the domain if all devices in it have
1294 * been stopped.
1295 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001296static int genpd_suspend_noirq(struct device *dev)
Mikko Perttunen10da6542017-06-22 10:18:33 +03001297{
1298 dev_dbg(dev, "%s()\n", __func__);
1299
Shawn Guo615db6d2022-11-02 22:21:02 +08001300 return genpd_finish_suspend(dev,
1301 pm_generic_suspend_noirq,
1302 pm_generic_resume_noirq);
Mikko Perttunen10da6542017-06-22 10:18:33 +03001303}
1304
1305/**
Shawn Guod9cc34f2022-11-02 22:21:03 +08001306 * genpd_finish_resume - Completion of resume of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001307 * @dev: Device to resume.
Shawn Guod9cc34f2022-11-02 22:21:03 +08001308 * @resume_noirq: Generic resume_noirq callback.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001309 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001310 * Restore power to the device's PM domain, if necessary, and start the device.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001311 */
Shawn Guod9cc34f2022-11-02 22:21:03 +08001312static int genpd_finish_resume(struct device *dev,
1313 int (*resume_noirq)(struct device *dev))
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001314{
1315 struct generic_pm_domain *genpd;
Ulf Hanssona9354242018-01-10 21:31:56 +01001316 int ret;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001317
1318 dev_dbg(dev, "%s()\n", __func__);
1319
1320 genpd = dev_to_genpd(dev);
1321 if (IS_ERR(genpd))
1322 return -EINVAL;
1323
Patrice Chotard4e1d9a72020-11-19 08:25:39 +01001324 if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
Shawn Guod9cc34f2022-11-02 22:21:03 +08001325 return resume_noirq(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001326
Ulf Hansson0883ac02017-02-08 13:39:00 +01001327 genpd_lock(genpd);
1328 genpd_sync_power_on(genpd, true, 0);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001329 genpd->suspended_count--;
Ulf Hansson0883ac02017-02-08 13:39:00 +01001330 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001331
Rafael J. Wysocki17218e02018-01-12 14:10:38 +01001332 if (genpd->dev_ops.stop && genpd->dev_ops.start &&
1333 !pm_runtime_status_suspended(dev)) {
1334 ret = genpd_start_dev(genpd, dev);
Ulf Hanssona9354242018-01-10 21:31:56 +01001335 if (ret)
1336 return ret;
1337 }
Ulf Hansson122a2232016-05-30 11:33:14 +02001338
Ulf Hanssona9354242018-01-10 21:31:56 +01001339 return pm_generic_resume_noirq(dev);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001340}
1341
1342/**
Shawn Guod9cc34f2022-11-02 22:21:03 +08001343 * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1344 * @dev: Device to resume.
1345 *
1346 * Restore power to the device's PM domain, if necessary, and start the device.
1347 */
1348static int genpd_resume_noirq(struct device *dev)
1349{
1350 dev_dbg(dev, "%s()\n", __func__);
1351
1352 return genpd_finish_resume(dev, pm_generic_resume_noirq);
1353}
1354
1355/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001356 * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001357 * @dev: Device to freeze.
1358 *
1359 * Carry out a late freeze of a device under the assumption that its
1360 * pm_domain field points to the domain member of an object of type
1361 * struct generic_pm_domain representing a power domain consisting of I/O
1362 * devices.
1363 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001364static int genpd_freeze_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001365{
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001366 dev_dbg(dev, "%s()\n", __func__);
1367
Shawn Guoebb486b2022-11-02 22:21:04 +08001368 return genpd_finish_suspend(dev,
1369 pm_generic_freeze_noirq,
1370 pm_generic_thaw_noirq);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001371}
1372
1373/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001374 * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001375 * @dev: Device to thaw.
1376 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001377 * Start the device, unless power has been removed from the domain already
1378 * before the system transition.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001379 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001380static int genpd_thaw_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001381{
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001382 dev_dbg(dev, "%s()\n", __func__);
1383
Shawn Guoebb486b2022-11-02 22:21:04 +08001384 return genpd_finish_resume(dev, pm_generic_thaw_noirq);
Mikko Perttunen10da6542017-06-22 10:18:33 +03001385}
1386
1387/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001388 * genpd_poweroff_noirq - Completion of hibernation of device in an
Mikko Perttunen10da6542017-06-22 10:18:33 +03001389 * I/O PM domain.
1390 * @dev: Device to poweroff.
1391 *
1392 * Stop the device and remove power from the domain if all devices in it have
1393 * been stopped.
1394 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001395static int genpd_poweroff_noirq(struct device *dev)
Mikko Perttunen10da6542017-06-22 10:18:33 +03001396{
1397 dev_dbg(dev, "%s()\n", __func__);
1398
Shawn Guo615db6d2022-11-02 22:21:02 +08001399 return genpd_finish_suspend(dev,
1400 pm_generic_poweroff_noirq,
1401 pm_generic_restore_noirq);
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001402}
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001403
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001404/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001405 * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001406 * @dev: Device to resume.
1407 *
Rafael J. Wysocki0496c8a2012-01-29 20:39:02 +01001408 * Make sure the domain will be in the same power state as before the
1409 * hibernation the system is resuming from and start the device if necessary.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001410 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001411static int genpd_restore_noirq(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001412{
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001413 dev_dbg(dev, "%s()\n", __func__);
1414
Shawn Guod9cc34f2022-11-02 22:21:03 +08001415 return genpd_finish_resume(dev, pm_generic_restore_noirq);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001416}
1417
1418/**
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001419 * genpd_complete - Complete power transition of a device in a power domain.
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001420 * @dev: Device to complete the transition of.
1421 *
1422 * Complete a power transition of a device (during a system-wide power
1423 * transition) under the assumption that its pm_domain field points to the
1424 * domain member of an object of type struct generic_pm_domain representing
1425 * a power domain consisting of I/O devices.
1426 */
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001427static void genpd_complete(struct device *dev)
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001428{
1429 struct generic_pm_domain *genpd;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001430
1431 dev_dbg(dev, "%s()\n", __func__);
1432
1433 genpd = dev_to_genpd(dev);
1434 if (IS_ERR(genpd))
1435 return;
1436
Ulf Hansson4d23a5e2016-05-30 11:33:13 +02001437 pm_generic_complete(dev);
1438
Lina Iyer35241d12016-10-14 10:47:54 -07001439 genpd_lock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001440
Ulf Hansson39dd0f22016-05-30 11:43:07 +02001441 genpd->prepared_count--;
Ulf Hansson4d23a5e2016-05-30 11:33:13 +02001442 if (!genpd->prepared_count)
1443 genpd_queue_power_off_work(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001444
Lina Iyer35241d12016-10-14 10:47:54 -07001445 genpd_unlock(genpd);
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001446}
1447
Ulf Hanssonfc519892020-11-03 16:06:25 +01001448static void genpd_switch_state(struct device *dev, bool suspend)
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001449{
1450 struct generic_pm_domain *genpd;
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001451 bool use_lock;
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001452
Ulf Hanssonfe0c2baa2019-10-16 16:16:49 +02001453 genpd = dev_to_genpd_safe(dev);
1454 if (!genpd)
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001455 return;
1456
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001457 use_lock = genpd_is_irq_safe(genpd);
1458
1459 if (use_lock)
1460 genpd_lock(genpd);
1461
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001462 if (suspend) {
1463 genpd->suspended_count++;
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001464 genpd_sync_power_off(genpd, use_lock, 0);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001465 } else {
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001466 genpd_sync_power_on(genpd, use_lock, 0);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001467 genpd->suspended_count--;
1468 }
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001469
1470 if (use_lock)
1471 genpd_unlock(genpd);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001472}
Ulf Hanssond47e6462014-09-03 12:52:24 +02001473
Ulf Hanssonfc519892020-11-03 16:06:25 +01001474/**
1475 * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
1476 * @dev: The device that is attached to the genpd, that can be suspended.
1477 *
1478 * This routine should typically be called for a device that needs to be
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001479 * suspended during the syscore suspend phase. It may also be called during
1480 * suspend-to-idle to suspend a corresponding CPU device that is attached to a
1481 * genpd.
Ulf Hanssonfc519892020-11-03 16:06:25 +01001482 */
1483void dev_pm_genpd_suspend(struct device *dev)
Ulf Hanssond47e6462014-09-03 12:52:24 +02001484{
Ulf Hanssonfc519892020-11-03 16:06:25 +01001485 genpd_switch_state(dev, true);
Ulf Hanssond47e6462014-09-03 12:52:24 +02001486}
Ulf Hanssonfc519892020-11-03 16:06:25 +01001487EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
Ulf Hanssond47e6462014-09-03 12:52:24 +02001488
Ulf Hanssonfc519892020-11-03 16:06:25 +01001489/**
1490 * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
1491 * @dev: The device that is attached to the genpd, which needs to be resumed.
1492 *
1493 * This routine should typically be called for a device that needs to be resumed
Ulf Hanssonb9795a32020-11-03 16:06:26 +01001494 * during the syscore resume phase. It may also be called during suspend-to-idle
1495 * to resume a corresponding CPU device that is attached to a genpd.
Ulf Hanssonfc519892020-11-03 16:06:25 +01001496 */
1497void dev_pm_genpd_resume(struct device *dev)
Ulf Hanssond47e6462014-09-03 12:52:24 +02001498{
Ulf Hanssonfc519892020-11-03 16:06:25 +01001499 genpd_switch_state(dev, false);
Ulf Hanssond47e6462014-09-03 12:52:24 +02001500}
Ulf Hanssonfc519892020-11-03 16:06:25 +01001501EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
Rafael J. Wysocki77f827d2012-08-06 01:39:57 +02001502
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01001503#else /* !CONFIG_PM_SLEEP */
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001504
Ulf Hansson9e9704e2017-10-06 09:02:06 +02001505#define genpd_prepare NULL
1506#define genpd_suspend_noirq NULL
1507#define genpd_resume_noirq NULL
1508#define genpd_freeze_noirq NULL
1509#define genpd_thaw_noirq NULL
1510#define genpd_poweroff_noirq NULL
1511#define genpd_restore_noirq NULL
1512#define genpd_complete NULL
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001513
1514#endif /* CONFIG_PM_SLEEP */
1515
Ulf Hansson66d29d82022-05-11 16:56:56 +02001516static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1517 bool has_governor)
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001518{
1519 struct generic_pm_domain_data *gpd_data;
Ulf Hansson66d29d82022-05-11 16:56:56 +02001520 struct gpd_timing_data *td;
Ulf Hansson3e235682015-01-27 21:13:43 +01001521 int ret;
1522
1523 ret = dev_pm_get_subsys_data(dev);
1524 if (ret)
1525 return ERR_PTR(ret);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001526
1527 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
Ulf Hansson3e235682015-01-27 21:13:43 +01001528 if (!gpd_data) {
1529 ret = -ENOMEM;
1530 goto err_put;
1531 }
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001532
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001533 gpd_data->base.dev = dev;
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001534 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1535
Ulf Hansson66d29d82022-05-11 16:56:56 +02001536 /* Allocate data used by a governor. */
1537 if (has_governor) {
1538 td = kzalloc(sizeof(*td), GFP_KERNEL);
1539 if (!td) {
1540 ret = -ENOMEM;
1541 goto err_free;
1542 }
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001543
Ulf Hansson66d29d82022-05-11 16:56:56 +02001544 td->constraint_changed = true;
1545 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
Ulf Hansson9c74f2a2022-05-11 16:56:57 +02001546 td->next_wakeup = KTIME_MAX;
Ulf Hansson66d29d82022-05-11 16:56:56 +02001547 gpd_data->td = td;
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001548 }
1549
Ulf Hansson66d29d82022-05-11 16:56:56 +02001550 spin_lock_irq(&dev->power.lock);
1551
1552 if (dev->power.subsys_data->domain_data)
1553 ret = -EINVAL;
1554 else
1555 dev->power.subsys_data->domain_data = &gpd_data->base;
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001556
1557 spin_unlock_irq(&dev->power.lock);
1558
Ulf Hansson66d29d82022-05-11 16:56:56 +02001559 if (ret)
1560 goto err_free;
1561
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001562 return gpd_data;
Ulf Hansson3e235682015-01-27 21:13:43 +01001563
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001564 err_free:
Ulf Hansson66d29d82022-05-11 16:56:56 +02001565 kfree(gpd_data->td);
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001566 kfree(gpd_data);
Ulf Hansson3e235682015-01-27 21:13:43 +01001567 err_put:
1568 dev_pm_put_subsys_data(dev);
1569 return ERR_PTR(ret);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001570}
1571
Ulf Hansson49d400c2015-01-27 21:13:38 +01001572static void genpd_free_dev_data(struct device *dev,
1573 struct generic_pm_domain_data *gpd_data)
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001574{
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001575 spin_lock_irq(&dev->power.lock);
1576
Ulf Hanssonf104e1e52015-01-27 21:13:44 +01001577 dev->power.subsys_data->domain_data = NULL;
1578
1579 spin_unlock_irq(&dev->power.lock);
1580
Ulf Hansson66d29d82022-05-11 16:56:56 +02001581 kfree(gpd_data->td);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001582 kfree(gpd_data);
Ulf Hansson3e235682015-01-27 21:13:43 +01001583 dev_pm_put_subsys_data(dev);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001584}
1585
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001586static void genpd_update_cpumask(struct generic_pm_domain *genpd,
1587 int cpu, bool set, unsigned int depth)
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001588{
1589 struct gpd_link *link;
1590
1591 if (!genpd_is_cpu_domain(genpd))
1592 return;
1593
Kees Cook8d87ae42020-07-08 16:32:13 -07001594 list_for_each_entry(link, &genpd->child_links, child_node) {
1595 struct generic_pm_domain *parent = link->parent;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001596
Kees Cook8d87ae42020-07-08 16:32:13 -07001597 genpd_lock_nested(parent, depth + 1);
1598 genpd_update_cpumask(parent, cpu, set, depth + 1);
1599 genpd_unlock(parent);
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001600 }
1601
1602 if (set)
1603 cpumask_set_cpu(cpu, genpd->cpus);
1604 else
1605 cpumask_clear_cpu(cpu, genpd->cpus);
1606}
1607
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001608static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
1609{
1610 if (cpu >= 0)
1611 genpd_update_cpumask(genpd, cpu, true, 0);
1612}
1613
1614static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
1615{
1616 if (cpu >= 0)
1617 genpd_update_cpumask(genpd, cpu, false, 0);
1618}
1619
1620static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001621{
1622 int cpu;
1623
1624 if (!genpd_is_cpu_domain(genpd))
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001625 return -1;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001626
1627 for_each_possible_cpu(cpu) {
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001628 if (get_cpu_device(cpu) == dev)
1629 return cpu;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001630 }
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001631
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001632 return -1;
Ulf Hanssoneb594b72019-03-27 15:35:46 +01001633}
1634
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001635static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1636 struct device *base_dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001637{
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02001638 struct genpd_governor_data *gd = genpd->gd;
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001639 struct generic_pm_domain_data *gpd_data;
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001640 int ret;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001641
1642 dev_dbg(dev, "%s()\n", __func__);
1643
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02001644 gpd_data = genpd_alloc_dev_data(dev, gd);
Ulf Hansson3e235682015-01-27 21:13:43 +01001645 if (IS_ERR(gpd_data))
1646 return PTR_ERR(gpd_data);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001647
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001648 gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
Ulf Hanssonb24e1962019-04-25 11:04:12 +02001649
Ulf Hanssonb472c2f2015-01-27 21:13:45 +01001650 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1651 if (ret)
1652 goto out;
Geert Uytterhoevend79b6fe2014-09-25 18:28:28 +02001653
Jiada Wang2071ac92019-03-12 15:51:28 +09001654 genpd_lock(genpd);
1655
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001656 genpd_set_cpumask(genpd, gpd_data->cpu);
Sudeep Holla975e83c2017-07-14 11:51:48 +01001657 dev_pm_domain_set(dev, &genpd->domain);
1658
Ulf Hansson14b53062015-01-27 21:13:40 +01001659 genpd->device_count++;
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02001660 if (gd)
1661 gd->max_off_time_changed = true;
Ulf Hansson14b53062015-01-27 21:13:40 +01001662
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001663 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001664
Lina Iyer35241d12016-10-14 10:47:54 -07001665 genpd_unlock(genpd);
Jiada Wang2071ac92019-03-12 15:51:28 +09001666 out:
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001667 if (ret)
1668 genpd_free_dev_data(dev, gpd_data);
1669 else
Viresh Kumar0b07ee92019-07-04 13:06:17 +05301670 dev_pm_qos_add_notifier(dev, &gpd_data->nb,
1671 DEV_PM_QOS_RESUME_LATENCY);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001672
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001673 return ret;
1674}
Jon Hunter19efa5f2016-09-12 12:01:11 +01001675
1676/**
Ulf Hansson1a7a6702018-05-29 12:04:14 +02001677 * pm_genpd_add_device - Add a device to an I/O PM domain.
Jon Hunter19efa5f2016-09-12 12:01:11 +01001678 * @genpd: PM domain to add the device to.
1679 * @dev: Device to be added.
Jon Hunter19efa5f2016-09-12 12:01:11 +01001680 */
Ulf Hansson1a7a6702018-05-29 12:04:14 +02001681int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
Jon Hunter19efa5f2016-09-12 12:01:11 +01001682{
1683 int ret;
1684
Ulf Hansson4384a702023-05-30 11:55:36 +02001685 if (!genpd || !dev)
1686 return -EINVAL;
1687
Jon Hunter19efa5f2016-09-12 12:01:11 +01001688 mutex_lock(&gpd_list_lock);
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001689 ret = genpd_add_device(genpd, dev, dev);
Jon Hunter19efa5f2016-09-12 12:01:11 +01001690 mutex_unlock(&gpd_list_lock);
1691
1692 return ret;
1693}
Ulf Hansson1a7a6702018-05-29 12:04:14 +02001694EXPORT_SYMBOL_GPL(pm_genpd_add_device);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001695
Ulf Hansson85168d52016-09-21 15:38:50 +02001696static int genpd_remove_device(struct generic_pm_domain *genpd,
1697 struct device *dev)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001698{
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001699 struct generic_pm_domain_data *gpd_data;
Rafael J. Wysocki4605ab62011-08-25 15:34:12 +02001700 struct pm_domain_data *pdd;
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001701 int ret = 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001702
1703 dev_dbg(dev, "%s()\n", __func__);
1704
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001705 pdd = dev->power.subsys_data->domain_data;
1706 gpd_data = to_gpd_data(pdd);
Viresh Kumar0b07ee92019-07-04 13:06:17 +05301707 dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
1708 DEV_PM_QOS_RESUME_LATENCY);
Ulf Hanssonc0356db2015-01-27 21:13:42 +01001709
Lina Iyer35241d12016-10-14 10:47:54 -07001710 genpd_lock(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001711
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001712 if (genpd->prepared_count > 0) {
1713 ret = -EAGAIN;
1714 goto out;
1715 }
1716
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001717 genpd->device_count--;
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02001718 if (genpd->gd)
1719 genpd->gd->max_off_time_changed = true;
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001720
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02001721 genpd_clear_cpumask(genpd, gpd_data->cpu);
Sudeep Holla975e83c2017-07-14 11:51:48 +01001722 dev_pm_domain_set(dev, NULL);
1723
Rafael J. Wysockiefa69022012-05-01 21:33:53 +02001724 list_del_init(&pdd->list_node);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001725
Lina Iyer35241d12016-10-14 10:47:54 -07001726 genpd_unlock(genpd);
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001727
Jiada Wang2071ac92019-03-12 15:51:28 +09001728 if (genpd->detach_dev)
1729 genpd->detach_dev(genpd, dev);
1730
Ulf Hanssonc1dbe2f2015-01-27 21:13:39 +01001731 genpd_free_dev_data(dev, gpd_data);
Rafael J. Wysocki1d5fcfe2012-07-05 22:12:32 +02001732
Rafael J. Wysocki6ff7bb0d02012-05-01 21:34:07 +02001733 return 0;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001734
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02001735 out:
Lina Iyer35241d12016-10-14 10:47:54 -07001736 genpd_unlock(genpd);
Viresh Kumar0b07ee92019-07-04 13:06:17 +05301737 dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001738
1739 return ret;
1740}
Ulf Hansson85168d52016-09-21 15:38:50 +02001741
1742/**
1743 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
Ulf Hansson85168d52016-09-21 15:38:50 +02001744 * @dev: Device to be removed.
1745 */
Ulf Hansson924f4482018-05-29 12:04:15 +02001746int pm_genpd_remove_device(struct device *dev)
Ulf Hansson85168d52016-09-21 15:38:50 +02001747{
Ulf Hanssonb3ad17c2019-08-29 16:48:05 +02001748 struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
Ulf Hansson924f4482018-05-29 12:04:15 +02001749
1750 if (!genpd)
Ulf Hansson85168d52016-09-21 15:38:50 +02001751 return -EINVAL;
1752
1753 return genpd_remove_device(genpd, dev);
1754}
Maruthi Bayyavarapu24c96dc2015-11-18 01:12:00 +05301755EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001756
Ulf Hanssond4f81382020-10-13 14:23:39 +02001757/**
1758 * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
1759 *
1760 * @dev: Device that should be associated with the notifier
1761 * @nb: The notifier block to register
1762 *
1763 * Users may call this function to add a genpd power on/off notifier for an
1764 * attached @dev. Only one notifier per device is allowed. The notifier is
1765 * sent when genpd is powering on/off the PM domain.
1766 *
1767 * It is assumed that the user guarantee that the genpd wouldn't be detached
1768 * while this routine is getting called.
1769 *
1770 * Returns 0 on success and negative error values on failures.
1771 */
1772int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
1773{
1774 struct generic_pm_domain *genpd;
1775 struct generic_pm_domain_data *gpd_data;
1776 int ret;
1777
1778 genpd = dev_to_genpd_safe(dev);
1779 if (!genpd)
1780 return -ENODEV;
1781
1782 if (WARN_ON(!dev->power.subsys_data ||
1783 !dev->power.subsys_data->domain_data))
1784 return -EINVAL;
1785
1786 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1787 if (gpd_data->power_nb)
1788 return -EEXIST;
1789
1790 genpd_lock(genpd);
1791 ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
1792 genpd_unlock(genpd);
1793
1794 if (ret) {
1795 dev_warn(dev, "failed to add notifier for PM domain %s\n",
1796 genpd->name);
1797 return ret;
1798 }
1799
1800 gpd_data->power_nb = nb;
1801 return 0;
1802}
1803EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
1804
1805/**
1806 * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
1807 *
1808 * @dev: Device that is associated with the notifier
1809 *
1810 * Users may call this function to remove a genpd power on/off notifier for an
1811 * attached @dev.
1812 *
1813 * It is assumed that the user guarantee that the genpd wouldn't be detached
1814 * while this routine is getting called.
1815 *
1816 * Returns 0 on success and negative error values on failures.
1817 */
1818int dev_pm_genpd_remove_notifier(struct device *dev)
1819{
1820 struct generic_pm_domain *genpd;
1821 struct generic_pm_domain_data *gpd_data;
1822 int ret;
1823
1824 genpd = dev_to_genpd_safe(dev);
1825 if (!genpd)
1826 return -ENODEV;
1827
1828 if (WARN_ON(!dev->power.subsys_data ||
1829 !dev->power.subsys_data->domain_data))
1830 return -EINVAL;
1831
1832 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1833 if (!gpd_data->power_nb)
1834 return -ENODEV;
1835
1836 genpd_lock(genpd);
1837 ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
1838 gpd_data->power_nb);
1839 genpd_unlock(genpd);
1840
1841 if (ret) {
1842 dev_warn(dev, "failed to remove notifier for PM domain %s\n",
1843 genpd->name);
1844 return ret;
1845 }
1846
1847 gpd_data->power_nb = NULL;
1848 return 0;
1849}
1850EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
1851
Jon Hunter19efa5f2016-09-12 12:01:11 +01001852static int genpd_add_subdomain(struct generic_pm_domain *genpd,
1853 struct generic_pm_domain *subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001854{
Lina Iyer25479232015-10-28 15:19:50 -06001855 struct gpd_link *link, *itr;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001856 int ret = 0;
1857
Rafael J. Wysockifb7268b2012-08-07 01:08:37 +02001858 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1859 || genpd == subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001860 return -EINVAL;
1861
Lina Iyerd716f472016-10-14 10:47:55 -07001862 /*
1863 * If the domain can be powered on/off in an IRQ safe
1864 * context, ensure that the subdomain can also be
1865 * powered on/off in that context.
1866 */
1867 if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
Dan Carpenter44cae7d2016-11-10 15:52:15 +03001868 WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
Lina Iyerd716f472016-10-14 10:47:55 -07001869 genpd->name, subdomain->name);
1870 return -EINVAL;
1871 }
1872
Lina Iyer25479232015-10-28 15:19:50 -06001873 link = kzalloc(sizeof(*link), GFP_KERNEL);
1874 if (!link)
1875 return -ENOMEM;
1876
Lina Iyer35241d12016-10-14 10:47:54 -07001877 genpd_lock(subdomain);
1878 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001879
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001880 if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001881 ret = -EINVAL;
1882 goto out;
1883 }
1884
Kees Cook8d87ae42020-07-08 16:32:13 -07001885 list_for_each_entry(itr, &genpd->parent_links, parent_node) {
1886 if (itr->child == subdomain && itr->parent == genpd) {
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001887 ret = -EINVAL;
1888 goto out;
1889 }
1890 }
1891
Kees Cook8d87ae42020-07-08 16:32:13 -07001892 link->parent = genpd;
1893 list_add_tail(&link->parent_node, &genpd->parent_links);
1894 link->child = subdomain;
1895 list_add_tail(&link->child_node, &subdomain->child_links);
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001896 if (genpd_status_on(subdomain))
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02001897 genpd_sd_counter_inc(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001898
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001899 out:
Lina Iyer35241d12016-10-14 10:47:54 -07001900 genpd_unlock(genpd);
1901 genpd_unlock(subdomain);
Lina Iyer25479232015-10-28 15:19:50 -06001902 if (ret)
1903 kfree(link);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001904 return ret;
1905}
Jon Hunter19efa5f2016-09-12 12:01:11 +01001906
1907/**
1908 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
Kees Cook8d87ae42020-07-08 16:32:13 -07001909 * @genpd: Leader PM domain to add the subdomain to.
Jon Hunter19efa5f2016-09-12 12:01:11 +01001910 * @subdomain: Subdomain to be added.
1911 */
1912int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1913 struct generic_pm_domain *subdomain)
1914{
1915 int ret;
1916
1917 mutex_lock(&gpd_list_lock);
1918 ret = genpd_add_subdomain(genpd, subdomain);
1919 mutex_unlock(&gpd_list_lock);
1920
1921 return ret;
1922}
Stephen Boydd60ee962015-10-01 12:22:53 -07001923EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001924
1925/**
1926 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
Kees Cook8d87ae42020-07-08 16:32:13 -07001927 * @genpd: Leader PM domain to remove the subdomain from.
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001928 * @subdomain: Subdomain to be removed.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001929 */
1930int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001931 struct generic_pm_domain *subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001932{
Krzysztof Kozlowskic6e83ca2017-06-28 16:56:18 +02001933 struct gpd_link *l, *link;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001934 int ret = -EINVAL;
1935
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001936 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001937 return -EINVAL;
1938
Lina Iyer35241d12016-10-14 10:47:54 -07001939 genpd_lock(subdomain);
1940 genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001941
Kees Cook8d87ae42020-07-08 16:32:13 -07001942 if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
Joe Perches7a5bd122019-03-04 09:14:38 -08001943 pr_warn("%s: unable to remove subdomain %s\n",
1944 genpd->name, subdomain->name);
Jon Hunter30e7a652015-09-03 09:10:37 +01001945 ret = -EBUSY;
1946 goto out;
1947 }
1948
Kees Cook8d87ae42020-07-08 16:32:13 -07001949 list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
1950 if (link->child != subdomain)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001951 continue;
1952
Kees Cook8d87ae42020-07-08 16:32:13 -07001953 list_del(&link->parent_node);
1954 list_del(&link->child_node);
Rafael J. Wysocki5063ce12011-08-08 23:43:40 +02001955 kfree(link);
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01001956 if (genpd_status_on(subdomain))
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001957 genpd_sd_counter_dec(genpd);
1958
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001959 ret = 0;
1960 break;
1961 }
1962
Jon Hunter30e7a652015-09-03 09:10:37 +01001963out:
Lina Iyer35241d12016-10-14 10:47:54 -07001964 genpd_unlock(genpd);
1965 genpd_unlock(subdomain);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001966
1967 return ret;
1968}
Stephen Boydd60ee962015-10-01 12:22:53 -07001969EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02001970
Ulf Hansson49a27e22019-03-27 15:35:45 +01001971static void genpd_free_default_power_state(struct genpd_power_state *states,
1972 unsigned int state_count)
1973{
1974 kfree(states);
1975}
1976
Lina Iyer59d65b72016-10-14 10:47:49 -07001977static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
1978{
1979 struct genpd_power_state *state;
1980
1981 state = kzalloc(sizeof(*state), GFP_KERNEL);
1982 if (!state)
1983 return -ENOMEM;
1984
1985 genpd->states = state;
1986 genpd->state_count = 1;
Ulf Hansson49a27e22019-03-27 15:35:45 +01001987 genpd->free_states = genpd_free_default_power_state;
Lina Iyer59d65b72016-10-14 10:47:49 -07001988
1989 return 0;
1990}
1991
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02001992static int genpd_alloc_data(struct generic_pm_domain *genpd)
1993{
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02001994 struct genpd_governor_data *gd = NULL;
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02001995 int ret;
1996
1997 if (genpd_is_cpu_domain(genpd) &&
1998 !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
1999 return -ENOMEM;
2000
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02002001 if (genpd->gov) {
2002 gd = kzalloc(sizeof(*gd), GFP_KERNEL);
2003 if (!gd) {
2004 ret = -ENOMEM;
2005 goto free;
2006 }
2007
2008 gd->max_off_time_ns = -1;
2009 gd->max_off_time_changed = true;
2010 gd->next_wakeup = KTIME_MAX;
Maulik Shah1498c502022-10-18 17:28:35 +02002011 gd->next_hrtimer = KTIME_MAX;
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02002012 }
2013
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002014 /* Use only one "off" state if there were no states declared */
2015 if (genpd->state_count == 0) {
2016 ret = genpd_set_default_power_state(genpd);
2017 if (ret)
2018 goto free;
2019 }
2020
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02002021 genpd->gd = gd;
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002022 return 0;
2023
2024free:
2025 if (genpd_is_cpu_domain(genpd))
2026 free_cpumask_var(genpd->cpus);
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02002027 kfree(gd);
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002028 return ret;
2029}
2030
2031static void genpd_free_data(struct generic_pm_domain *genpd)
2032{
2033 if (genpd_is_cpu_domain(genpd))
2034 free_cpumask_var(genpd->cpus);
2035 if (genpd->free_states)
2036 genpd->free_states(genpd->states, genpd->state_count);
Ulf Hanssonf38d1a62022-05-11 16:57:02 +02002037 kfree(genpd->gd);
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002038}
2039
Lina Iyerd716f472016-10-14 10:47:55 -07002040static void genpd_lock_init(struct generic_pm_domain *genpd)
2041{
2042 if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
2043 spin_lock_init(&genpd->slock);
2044 genpd->lock_ops = &genpd_spin_ops;
2045 } else {
2046 mutex_init(&genpd->mlock);
2047 genpd->lock_ops = &genpd_mtx_ops;
2048 }
2049}
2050
Rafael J. Wysockid23b9b02011-11-27 13:11:51 +01002051/**
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002052 * pm_genpd_init - Initialize a generic I/O PM domain object.
2053 * @genpd: PM domain object to initialize.
2054 * @gov: PM domain governor to associate with the domain (may be NULL).
2055 * @is_off: Initial value of the domain's power_is_off field.
Ulf Hansson7eb231c2016-06-17 12:27:52 +02002056 *
2057 * Returns 0 on successful initialization, else a negative error code.
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002058 */
Ulf Hansson7eb231c2016-06-17 12:27:52 +02002059int pm_genpd_init(struct generic_pm_domain *genpd,
2060 struct dev_power_governor *gov, bool is_off)
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002061{
Lina Iyer59d65b72016-10-14 10:47:49 -07002062 int ret;
2063
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002064 if (IS_ERR_OR_NULL(genpd))
Ulf Hansson7eb231c2016-06-17 12:27:52 +02002065 return -EINVAL;
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002066
Kees Cook8d87ae42020-07-08 16:32:13 -07002067 INIT_LIST_HEAD(&genpd->parent_links);
2068 INIT_LIST_HEAD(&genpd->child_links);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002069 INIT_LIST_HEAD(&genpd->dev_list);
Ulf Hanssond4f81382020-10-13 14:23:39 +02002070 RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
Lina Iyerd716f472016-10-14 10:47:55 -07002071 genpd_lock_init(genpd);
Rafael J. Wysockif7218892011-07-01 22:12:45 +02002072 genpd->gov = gov;
2073 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
Rafael J. Wysockic4bb3162011-08-08 23:43:04 +02002074 atomic_set(&genpd->sd_count, 0);
Ulf Hansson49f618e2020-09-24 13:04:47 +02002075 genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
Rafael J. Wysocki596ba342011-07-01 22:13:19 +02002076 genpd->device_count = 0;
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002077 genpd->provider = NULL;
2078 genpd->has_provider = false;
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02002079 genpd->accounting_time = ktime_get_mono_fast_ns();
Ulf Hansson795bd2e2016-03-31 11:21:26 +02002080 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
2081 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
Ulf Hansson9e9704e2017-10-06 09:02:06 +02002082 genpd->domain.ops.prepare = genpd_prepare;
2083 genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
2084 genpd->domain.ops.resume_noirq = genpd_resume_noirq;
2085 genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
2086 genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
2087 genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
2088 genpd->domain.ops.restore_noirq = genpd_restore_noirq;
2089 genpd->domain.ops.complete = genpd_complete;
Ulf Hanssonea71c592019-10-16 15:16:24 +02002090 genpd->domain.start = genpd_dev_pm_start;
Ulf Hansson401e0922023-09-25 15:17:08 +02002091 genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
Ulf Hanssonc11f6f52014-12-01 12:50:21 +01002092
2093 if (genpd->flags & GENPD_FLAG_PM_CLK) {
2094 genpd->dev_ops.stop = pm_clk_suspend;
2095 genpd->dev_ops.start = pm_clk_resume;
2096 }
2097
Ulf Hansson27656dc2022-05-11 16:56:51 +02002098 /* The always-on governor works better with the corresponding flag. */
2099 if (gov == &pm_domain_always_on_gov)
2100 genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
2101
Ulf Hanssonffaa42e2017-03-20 11:19:21 +01002102 /* Always-on domains must be powered on at initialization. */
Leonard Crestezed61e182019-04-30 15:06:11 +00002103 if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
Johan Hovold129b60c2022-09-29 17:42:14 +02002104 !genpd_status_on(genpd)) {
2105 pr_err("always-on PM domain %s is not on\n", genpd->name);
Ulf Hanssonffaa42e2017-03-20 11:19:21 +01002106 return -EINVAL;
Johan Hovold129b60c2022-09-29 17:42:14 +02002107 }
Ulf Hanssonffaa42e2017-03-20 11:19:21 +01002108
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002109 /* Multiple states but no governor doesn't make sense. */
2110 if (!gov && genpd->state_count > 1)
Joe Perches7a5bd122019-03-04 09:14:38 -08002111 pr_warn("%s: no governor for states\n", genpd->name);
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002112
2113 ret = genpd_alloc_data(genpd);
2114 if (ret)
2115 return ret;
Axel Haslamfc5cbf02016-02-15 11:10:51 +01002116
Viresh Kumar401ea152017-03-17 11:26:19 +05302117 device_initialize(&genpd->dev);
2118 dev_set_name(&genpd->dev, "%s", genpd->name);
2119
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +02002120 mutex_lock(&gpd_list_lock);
2121 list_add(&genpd->gpd_list_node, &gpd_list);
2122 mutex_unlock(&gpd_list_lock);
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002123 genpd_debug_add(genpd);
Ulf Hansson7eb231c2016-06-17 12:27:52 +02002124
2125 return 0;
Rafael J. Wysocki5125bbf382011-07-13 12:31:52 +02002126}
Rajendra Nayakbe5ed552015-08-13 11:51:57 +05302127EXPORT_SYMBOL_GPL(pm_genpd_init);
Tomasz Figaaa422402014-09-19 20:27:36 +02002128
Jon Hunter3fe57712016-09-12 12:01:13 +01002129static int genpd_remove(struct generic_pm_domain *genpd)
2130{
2131 struct gpd_link *l, *link;
2132
2133 if (IS_ERR_OR_NULL(genpd))
2134 return -EINVAL;
2135
Lina Iyer35241d12016-10-14 10:47:54 -07002136 genpd_lock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01002137
2138 if (genpd->has_provider) {
Lina Iyer35241d12016-10-14 10:47:54 -07002139 genpd_unlock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01002140 pr_err("Provider present, unable to remove %s\n", genpd->name);
2141 return -EBUSY;
2142 }
2143
Kees Cook8d87ae42020-07-08 16:32:13 -07002144 if (!list_empty(&genpd->parent_links) || genpd->device_count) {
Lina Iyer35241d12016-10-14 10:47:54 -07002145 genpd_unlock(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01002146 pr_err("%s: unable to remove %s\n", __func__, genpd->name);
2147 return -EBUSY;
2148 }
2149
Kees Cook8d87ae42020-07-08 16:32:13 -07002150 list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
2151 list_del(&link->parent_node);
2152 list_del(&link->child_node);
Jon Hunter3fe57712016-09-12 12:01:13 +01002153 kfree(link);
2154 }
2155
2156 list_del(&genpd->gpd_list_node);
Lina Iyer35241d12016-10-14 10:47:54 -07002157 genpd_unlock(genpd);
Shawn Guof6bfe8b2022-02-25 14:48:15 +08002158 genpd_debug_remove(genpd);
Jon Hunter3fe57712016-09-12 12:01:13 +01002159 cancel_work_sync(&genpd->power_off_work);
Ulf Hanssonba43d6d2022-05-11 16:57:01 +02002160 genpd_free_data(genpd);
Ulf Hansson49a27e22019-03-27 15:35:45 +01002161
Jon Hunter3fe57712016-09-12 12:01:13 +01002162 pr_debug("%s: removed %s\n", __func__, genpd->name);
2163
2164 return 0;
2165}
2166
2167/**
2168 * pm_genpd_remove - Remove a generic I/O PM domain
2169 * @genpd: Pointer to PM domain that is to be removed.
2170 *
2171 * To remove the PM domain, this function:
2172 * - Removes the PM domain as a subdomain to any parent domains,
2173 * if it was added.
2174 * - Removes the PM domain from the list of registered PM domains.
2175 *
2176 * The PM domain will only be removed, if the associated provider has
2177 * been removed, it is not a parent to any other PM domain and has no
2178 * devices associated with it.
2179 */
2180int pm_genpd_remove(struct generic_pm_domain *genpd)
2181{
2182 int ret;
2183
2184 mutex_lock(&gpd_list_lock);
2185 ret = genpd_remove(genpd);
2186 mutex_unlock(&gpd_list_lock);
2187
2188 return ret;
2189}
2190EXPORT_SYMBOL_GPL(pm_genpd_remove);
2191
Tomasz Figaaa422402014-09-19 20:27:36 +02002192#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002193
Tomasz Figaaa422402014-09-19 20:27:36 +02002194/*
2195 * Device Tree based PM domain providers.
2196 *
2197 * The code below implements generic device tree based PM domain providers that
2198 * bind device tree nodes with generic PM domains registered in the system.
2199 *
2200 * Any driver that registers generic PM domains and needs to support binding of
2201 * devices to these domains is supposed to register a PM domain provider, which
2202 * maps a PM domain specifier retrieved from the device tree to a PM domain.
2203 *
2204 * Two simple mapping functions have been provided for convenience:
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002205 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
2206 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
Tomasz Figaaa422402014-09-19 20:27:36 +02002207 * index.
2208 */
2209
2210/**
2211 * struct of_genpd_provider - PM domain provider registration structure
2212 * @link: Entry in global list of PM domain providers
2213 * @node: Pointer to device tree node of PM domain provider
2214 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
2215 * into a PM domain.
2216 * @data: context pointer to be passed into @xlate callback
2217 */
2218struct of_genpd_provider {
2219 struct list_head link;
2220 struct device_node *node;
2221 genpd_xlate_t xlate;
2222 void *data;
2223};
2224
2225/* List of registered PM domain providers. */
2226static LIST_HEAD(of_genpd_providers);
2227/* Mutex to protect the list above. */
2228static DEFINE_MUTEX(of_genpd_mutex);
2229
2230/**
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002231 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
Tomasz Figaaa422402014-09-19 20:27:36 +02002232 * @genpdspec: OF phandle args to map into a PM domain
2233 * @data: xlate function private data - pointer to struct generic_pm_domain
2234 *
2235 * This is a generic xlate function that can be used to model PM domains that
2236 * have their own device tree nodes. The private data of xlate function needs
2237 * to be a valid pointer to struct generic_pm_domain.
2238 */
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002239static struct generic_pm_domain *genpd_xlate_simple(
Tomasz Figaaa422402014-09-19 20:27:36 +02002240 struct of_phandle_args *genpdspec,
2241 void *data)
2242{
Tomasz Figaaa422402014-09-19 20:27:36 +02002243 return data;
2244}
Tomasz Figaaa422402014-09-19 20:27:36 +02002245
2246/**
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002247 * genpd_xlate_onecell() - Xlate function using a single index.
Tomasz Figaaa422402014-09-19 20:27:36 +02002248 * @genpdspec: OF phandle args to map into a PM domain
2249 * @data: xlate function private data - pointer to struct genpd_onecell_data
2250 *
2251 * This is a generic xlate function that can be used to model simple PM domain
2252 * controllers that have one device tree node and provide multiple PM domains.
2253 * A single cell is used as an index into an array of PM domains specified in
2254 * the genpd_onecell_data struct when registering the provider.
2255 */
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002256static struct generic_pm_domain *genpd_xlate_onecell(
Tomasz Figaaa422402014-09-19 20:27:36 +02002257 struct of_phandle_args *genpdspec,
2258 void *data)
2259{
2260 struct genpd_onecell_data *genpd_data = data;
2261 unsigned int idx = genpdspec->args[0];
2262
2263 if (genpdspec->args_count != 1)
2264 return ERR_PTR(-EINVAL);
2265
2266 if (idx >= genpd_data->num_domains) {
2267 pr_err("%s: invalid domain index %u\n", __func__, idx);
2268 return ERR_PTR(-EINVAL);
2269 }
2270
2271 if (!genpd_data->domains[idx])
2272 return ERR_PTR(-ENOENT);
2273
2274 return genpd_data->domains[idx];
2275}
Tomasz Figaaa422402014-09-19 20:27:36 +02002276
2277/**
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002278 * genpd_add_provider() - Register a PM domain provider for a node
Tomasz Figaaa422402014-09-19 20:27:36 +02002279 * @np: Device node pointer associated with the PM domain provider.
2280 * @xlate: Callback for decoding PM domain from phandle arguments.
2281 * @data: Context pointer for @xlate callback.
2282 */
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002283static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2284 void *data)
Tomasz Figaaa422402014-09-19 20:27:36 +02002285{
2286 struct of_genpd_provider *cp;
2287
2288 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2289 if (!cp)
2290 return -ENOMEM;
2291
2292 cp->node = of_node_get(np);
2293 cp->data = data;
2294 cp->xlate = xlate;
Saravana Kannanbab2d712021-02-05 14:26:43 -08002295 fwnode_dev_initialized(&np->fwnode, true);
Tomasz Figaaa422402014-09-19 20:27:36 +02002296
2297 mutex_lock(&of_genpd_mutex);
2298 list_add(&cp->link, &of_genpd_providers);
2299 mutex_unlock(&of_genpd_mutex);
Rob Herringea11e942017-07-18 16:42:50 -05002300 pr_debug("Added domain provider from %pOF\n", np);
Tomasz Figaaa422402014-09-19 20:27:36 +02002301
2302 return 0;
2303}
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002304
Ulf Hanssonfe0c2baa2019-10-16 16:16:49 +02002305static bool genpd_present(const struct generic_pm_domain *genpd)
2306{
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002307 bool ret = false;
Ulf Hanssonfe0c2baa2019-10-16 16:16:49 +02002308 const struct generic_pm_domain *gpd;
2309
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002310 mutex_lock(&gpd_list_lock);
2311 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2312 if (gpd == genpd) {
2313 ret = true;
2314 break;
2315 }
2316 }
2317 mutex_unlock(&gpd_list_lock);
2318
2319 return ret;
Ulf Hanssonfe0c2baa2019-10-16 16:16:49 +02002320}
2321
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002322/**
2323 * of_genpd_add_provider_simple() - Register a simple PM domain provider
2324 * @np: Device node pointer associated with the PM domain provider.
2325 * @genpd: Pointer to PM domain associated with the PM domain provider.
2326 */
2327int of_genpd_add_provider_simple(struct device_node *np,
2328 struct generic_pm_domain *genpd)
2329{
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002330 int ret;
Jon Hunter0159ec62016-09-12 12:01:10 +01002331
2332 if (!np || !genpd)
2333 return -EINVAL;
2334
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302335 if (!genpd_present(genpd))
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002336 return -EINVAL;
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302337
2338 genpd->dev.of_node = np;
2339
2340 /* Parse genpd OPP table */
Ulf Hansson3dd91512023-08-25 13:26:32 +02002341 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302342 ret = dev_pm_opp_of_add_table(&genpd->dev);
Ahmad Fatoum9a6582b2022-02-23 09:03:23 +01002343 if (ret)
2344 return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
Viresh Kumar1067ae32018-11-02 11:18:08 +05302345
2346 /*
2347 * Save table for faster processing while setting performance
2348 * state.
2349 */
2350 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
Stephan Gerholddd461cd2020-07-27 11:30:46 +02002351 WARN_ON(IS_ERR(genpd->opp_table));
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002352 }
2353
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302354 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
2355 if (ret) {
Ulf Hansson3dd91512023-08-25 13:26:32 +02002356 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
Viresh Kumar1067ae32018-11-02 11:18:08 +05302357 dev_pm_opp_put_opp_table(genpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302358 dev_pm_opp_of_remove_table(&genpd->dev);
Viresh Kumar1067ae32018-11-02 11:18:08 +05302359 }
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302360
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002361 return ret;
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302362 }
2363
2364 genpd->provider = &np->fwnode;
2365 genpd->has_provider = true;
2366
Stephen Boyd40ba55e2021-06-24 13:18:02 -07002367 return 0;
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002368}
2369EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
2370
2371/**
2372 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
2373 * @np: Device node pointer associated with the PM domain provider.
2374 * @data: Pointer to the data associated with the PM domain provider.
2375 */
2376int of_genpd_add_provider_onecell(struct device_node *np,
2377 struct genpd_onecell_data *data)
2378{
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302379 struct generic_pm_domain *genpd;
Jon Hunter0159ec62016-09-12 12:01:10 +01002380 unsigned int i;
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002381 int ret = -EINVAL;
Jon Hunter0159ec62016-09-12 12:01:10 +01002382
2383 if (!np || !data)
2384 return -EINVAL;
2385
Thierry Reding40845522017-03-29 18:34:50 +02002386 if (!data->xlate)
2387 data->xlate = genpd_xlate_onecell;
2388
Jon Hunter0159ec62016-09-12 12:01:10 +01002389 for (i = 0; i < data->num_domains; i++) {
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302390 genpd = data->domains[i];
2391
2392 if (!genpd)
Tomeu Vizoso609bed62016-09-15 14:05:23 +02002393 continue;
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302394 if (!genpd_present(genpd))
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002395 goto error;
2396
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302397 genpd->dev.of_node = np;
2398
2399 /* Parse genpd OPP table */
Ulf Hansson3dd91512023-08-25 13:26:32 +02002400 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302401 ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
2402 if (ret) {
Ahmad Fatoum9a6582b2022-02-23 09:03:23 +01002403 dev_err_probe(&genpd->dev, ret,
2404 "Failed to add OPP table for index %d\n", i);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302405 goto error;
2406 }
Viresh Kumar1067ae32018-11-02 11:18:08 +05302407
2408 /*
2409 * Save table for faster processing while setting
2410 * performance state.
2411 */
Viresh Kumare77dcb02020-11-06 10:37:16 +05302412 genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
Stephan Gerholddd461cd2020-07-27 11:30:46 +02002413 WARN_ON(IS_ERR(genpd->opp_table));
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302414 }
2415
2416 genpd->provider = &np->fwnode;
2417 genpd->has_provider = true;
Jon Hunter0159ec62016-09-12 12:01:10 +01002418 }
2419
Thierry Reding40845522017-03-29 18:34:50 +02002420 ret = genpd_add_provider(np, data->xlate, data);
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002421 if (ret < 0)
2422 goto error;
2423
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002424 return 0;
2425
2426error:
2427 while (i--) {
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302428 genpd = data->domains[i];
2429
2430 if (!genpd)
Tomeu Vizoso609bed62016-09-15 14:05:23 +02002431 continue;
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302432
2433 genpd->provider = NULL;
2434 genpd->has_provider = false;
2435
Ulf Hansson3dd91512023-08-25 13:26:32 +02002436 if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
Viresh Kumar1067ae32018-11-02 11:18:08 +05302437 dev_pm_opp_put_opp_table(genpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302438 dev_pm_opp_of_remove_table(&genpd->dev);
Viresh Kumar1067ae32018-11-02 11:18:08 +05302439 }
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002440 }
Jon Hunter0159ec62016-09-12 12:01:10 +01002441
Jon Hunter0159ec62016-09-12 12:01:10 +01002442 return ret;
Jon Hunter892ebdcc2016-09-12 12:01:09 +01002443}
2444EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
Tomasz Figaaa422402014-09-19 20:27:36 +02002445
2446/**
2447 * of_genpd_del_provider() - Remove a previously registered PM domain provider
2448 * @np: Device node pointer associated with the PM domain provider
2449 */
2450void of_genpd_del_provider(struct device_node *np)
2451{
Krzysztof Kozlowskib556b152017-06-28 16:56:19 +02002452 struct of_genpd_provider *cp, *tmp;
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002453 struct generic_pm_domain *gpd;
Tomasz Figaaa422402014-09-19 20:27:36 +02002454
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002455 mutex_lock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002456 mutex_lock(&of_genpd_mutex);
Krzysztof Kozlowskib556b152017-06-28 16:56:19 +02002457 list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
Tomasz Figaaa422402014-09-19 20:27:36 +02002458 if (cp->node == np) {
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002459 /*
2460 * For each PM domain associated with the
2461 * provider, set the 'has_provider' to false
2462 * so that the PM domain can be safely removed.
2463 */
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302464 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2465 if (gpd->provider == &np->fwnode) {
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002466 gpd->has_provider = false;
2467
Ulf Hansson3dd91512023-08-25 13:26:32 +02002468 if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302469 continue;
2470
Viresh Kumar1067ae32018-11-02 11:18:08 +05302471 dev_pm_opp_put_opp_table(gpd->opp_table);
Viresh Kumar6a0ae732018-04-05 15:53:34 +05302472 dev_pm_opp_of_remove_table(&gpd->dev);
2473 }
2474 }
2475
Saravana Kannanbab2d712021-02-05 14:26:43 -08002476 fwnode_dev_initialized(&cp->node->fwnode, false);
Tomasz Figaaa422402014-09-19 20:27:36 +02002477 list_del(&cp->link);
2478 of_node_put(cp->node);
2479 kfree(cp);
2480 break;
2481 }
2482 }
2483 mutex_unlock(&of_genpd_mutex);
Jon Hunterde0aa06d2016-09-12 12:01:12 +01002484 mutex_unlock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002485}
2486EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2487
2488/**
Jon Hunterf58d4e52016-09-12 12:01:08 +01002489 * genpd_get_from_provider() - Look-up PM domain
Tomasz Figaaa422402014-09-19 20:27:36 +02002490 * @genpdspec: OF phandle args to use for look-up
2491 *
2492 * Looks for a PM domain provider under the node specified by @genpdspec and if
2493 * found, uses xlate function of the provider to map phandle args to a PM
2494 * domain.
2495 *
2496 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2497 * on failure.
2498 */
Jon Hunterf58d4e52016-09-12 12:01:08 +01002499static struct generic_pm_domain *genpd_get_from_provider(
Tomasz Figaaa422402014-09-19 20:27:36 +02002500 struct of_phandle_args *genpdspec)
2501{
2502 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2503 struct of_genpd_provider *provider;
2504
Jon Hunter41795a82016-03-04 10:55:15 +00002505 if (!genpdspec)
2506 return ERR_PTR(-EINVAL);
2507
Tomasz Figaaa422402014-09-19 20:27:36 +02002508 mutex_lock(&of_genpd_mutex);
2509
2510 /* Check if we have such a provider in our array */
2511 list_for_each_entry(provider, &of_genpd_providers, link) {
2512 if (provider->node == genpdspec->np)
2513 genpd = provider->xlate(genpdspec, provider->data);
2514 if (!IS_ERR(genpd))
2515 break;
2516 }
2517
2518 mutex_unlock(&of_genpd_mutex);
2519
2520 return genpd;
2521}
2522
2523/**
Jon Hunterec695722016-09-12 12:01:05 +01002524 * of_genpd_add_device() - Add a device to an I/O PM domain
2525 * @genpdspec: OF phandle args to use for look-up PM domain
2526 * @dev: Device to be added.
2527 *
2528 * Looks-up an I/O PM domain based upon phandle args provided and adds
2529 * the device to the PM domain. Returns a negative error code on failure.
2530 */
2531int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
2532{
2533 struct generic_pm_domain *genpd;
Jon Hunter19efa5f2016-09-12 12:01:11 +01002534 int ret;
2535
Ulf Hansson4384a702023-05-30 11:55:36 +02002536 if (!dev)
2537 return -EINVAL;
2538
Jon Hunter19efa5f2016-09-12 12:01:11 +01002539 mutex_lock(&gpd_list_lock);
Jon Hunterec695722016-09-12 12:01:05 +01002540
Jon Hunterf58d4e52016-09-12 12:01:08 +01002541 genpd = genpd_get_from_provider(genpdspec);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002542 if (IS_ERR(genpd)) {
2543 ret = PTR_ERR(genpd);
2544 goto out;
2545 }
Jon Hunterec695722016-09-12 12:01:05 +01002546
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02002547 ret = genpd_add_device(genpd, dev, dev);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002548
2549out:
2550 mutex_unlock(&gpd_list_lock);
2551
2552 return ret;
Jon Hunterec695722016-09-12 12:01:05 +01002553}
2554EXPORT_SYMBOL_GPL(of_genpd_add_device);
2555
2556/**
2557 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
2558 * @parent_spec: OF phandle args to use for parent PM domain look-up
2559 * @subdomain_spec: OF phandle args to use for subdomain look-up
2560 *
2561 * Looks-up a parent PM domain and subdomain based upon phandle args
2562 * provided and adds the subdomain to the parent PM domain. Returns a
2563 * negative error code on failure.
2564 */
2565int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
2566 struct of_phandle_args *subdomain_spec)
2567{
2568 struct generic_pm_domain *parent, *subdomain;
Jon Hunter19efa5f2016-09-12 12:01:11 +01002569 int ret;
2570
2571 mutex_lock(&gpd_list_lock);
Jon Hunterec695722016-09-12 12:01:05 +01002572
Jon Hunterf58d4e52016-09-12 12:01:08 +01002573 parent = genpd_get_from_provider(parent_spec);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002574 if (IS_ERR(parent)) {
2575 ret = PTR_ERR(parent);
2576 goto out;
2577 }
Jon Hunterec695722016-09-12 12:01:05 +01002578
Jon Hunterf58d4e52016-09-12 12:01:08 +01002579 subdomain = genpd_get_from_provider(subdomain_spec);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002580 if (IS_ERR(subdomain)) {
2581 ret = PTR_ERR(subdomain);
2582 goto out;
2583 }
Jon Hunterec695722016-09-12 12:01:05 +01002584
Jon Hunter19efa5f2016-09-12 12:01:11 +01002585 ret = genpd_add_subdomain(parent, subdomain);
2586
2587out:
2588 mutex_unlock(&gpd_list_lock);
2589
Dmitry Osipenko18027d62021-01-21 00:12:31 +03002590 return ret == -ENOENT ? -EPROBE_DEFER : ret;
Jon Hunterec695722016-09-12 12:01:05 +01002591}
2592EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
2593
2594/**
Ulf Hanssondedd1492019-12-30 13:59:30 +01002595 * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
2596 * @parent_spec: OF phandle args to use for parent PM domain look-up
2597 * @subdomain_spec: OF phandle args to use for subdomain look-up
2598 *
2599 * Looks-up a parent PM domain and subdomain based upon phandle args
2600 * provided and removes the subdomain from the parent PM domain. Returns a
2601 * negative error code on failure.
2602 */
2603int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
2604 struct of_phandle_args *subdomain_spec)
2605{
2606 struct generic_pm_domain *parent, *subdomain;
2607 int ret;
2608
2609 mutex_lock(&gpd_list_lock);
2610
2611 parent = genpd_get_from_provider(parent_spec);
2612 if (IS_ERR(parent)) {
2613 ret = PTR_ERR(parent);
2614 goto out;
2615 }
2616
2617 subdomain = genpd_get_from_provider(subdomain_spec);
2618 if (IS_ERR(subdomain)) {
2619 ret = PTR_ERR(subdomain);
2620 goto out;
2621 }
2622
2623 ret = pm_genpd_remove_subdomain(parent, subdomain);
2624
2625out:
2626 mutex_unlock(&gpd_list_lock);
2627
2628 return ret;
2629}
2630EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
2631
2632/**
Jon Hunter17926552016-09-12 12:01:14 +01002633 * of_genpd_remove_last - Remove the last PM domain registered for a provider
Yang Yingliang763663c2021-05-12 15:25:15 +08002634 * @np: Pointer to device node associated with provider
Jon Hunter17926552016-09-12 12:01:14 +01002635 *
2636 * Find the last PM domain that was added by a particular provider and
2637 * remove this PM domain from the list of PM domains. The provider is
2638 * identified by the 'provider' device structure that is passed. The PM
2639 * domain will only be removed, if the provider associated with domain
2640 * has been removed.
2641 *
2642 * Returns a valid pointer to struct generic_pm_domain on success or
2643 * ERR_PTR() on failure.
2644 */
2645struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
2646{
Krzysztof Kozlowskia7e2d1b2017-06-28 16:56:20 +02002647 struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
Jon Hunter17926552016-09-12 12:01:14 +01002648 int ret;
2649
2650 if (IS_ERR_OR_NULL(np))
2651 return ERR_PTR(-EINVAL);
2652
2653 mutex_lock(&gpd_list_lock);
Krzysztof Kozlowskia7e2d1b2017-06-28 16:56:20 +02002654 list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
Jon Hunter17926552016-09-12 12:01:14 +01002655 if (gpd->provider == &np->fwnode) {
2656 ret = genpd_remove(gpd);
2657 genpd = ret ? ERR_PTR(ret) : gpd;
2658 break;
2659 }
2660 }
2661 mutex_unlock(&gpd_list_lock);
2662
2663 return genpd;
2664}
2665EXPORT_SYMBOL_GPL(of_genpd_remove_last);
2666
Ulf Hansson3c095f32018-05-31 12:59:58 +02002667static void genpd_release_dev(struct device *dev)
2668{
Ulf Hanssone8b04de2019-04-18 12:27:56 +02002669 of_node_put(dev->of_node);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002670 kfree(dev);
2671}
2672
2673static struct bus_type genpd_bus_type = {
2674 .name = "genpd",
2675};
2676
Jon Hunter17926552016-09-12 12:01:14 +01002677/**
Tomasz Figaaa422402014-09-19 20:27:36 +02002678 * genpd_dev_pm_detach - Detach a device from its PM domain.
Jon Hunter8bb69442015-08-27 10:17:00 +01002679 * @dev: Device to detach.
Tomasz Figaaa422402014-09-19 20:27:36 +02002680 * @power_off: Currently not used
2681 *
2682 * Try to locate a corresponding generic PM domain, which the device was
2683 * attached to previously. If such is found, the device is detached from it.
2684 */
2685static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2686{
Russell King446d999c2015-03-20 17:20:33 +00002687 struct generic_pm_domain *pd;
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02002688 unsigned int i;
Tomasz Figaaa422402014-09-19 20:27:36 +02002689 int ret = 0;
2690
Ulf Hansson85168d52016-09-21 15:38:50 +02002691 pd = dev_to_genpd(dev);
2692 if (IS_ERR(pd))
Tomasz Figaaa422402014-09-19 20:27:36 +02002693 return;
2694
2695 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2696
Rajendra Nayakc016baf2021-08-12 16:57:21 +05302697 /* Drop the default performance state */
2698 if (dev_gpd_data(dev)->default_pstate) {
2699 dev_pm_genpd_set_performance_state(dev, 0);
2700 dev_gpd_data(dev)->default_pstate = 0;
2701 }
2702
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02002703 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
Ulf Hansson85168d52016-09-21 15:38:50 +02002704 ret = genpd_remove_device(pd, dev);
Tomasz Figaaa422402014-09-19 20:27:36 +02002705 if (ret != -EAGAIN)
2706 break;
Geert Uytterhoeven93af5e92015-06-26 11:14:14 +02002707
2708 mdelay(i);
Tomasz Figaaa422402014-09-19 20:27:36 +02002709 cond_resched();
2710 }
2711
2712 if (ret < 0) {
2713 dev_err(dev, "failed to remove from PM domain %s: %d",
2714 pd->name, ret);
2715 return;
2716 }
2717
2718 /* Check if PM domain can be powered off after removing this device. */
2719 genpd_queue_power_off_work(pd);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002720
2721 /* Unregister the device if it was created by genpd. */
2722 if (dev->bus == &genpd_bus_type)
2723 device_unregister(dev);
Tomasz Figaaa422402014-09-19 20:27:36 +02002724}
2725
Russell King632f7ce2015-03-20 15:55:12 +01002726static void genpd_dev_pm_sync(struct device *dev)
2727{
2728 struct generic_pm_domain *pd;
2729
2730 pd = dev_to_genpd(dev);
2731 if (IS_ERR(pd))
2732 return;
2733
2734 genpd_queue_power_off_work(pd);
2735}
2736
Ulf Hansson51dcf742019-04-25 11:04:10 +02002737static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
2738 unsigned int index, bool power_on)
Tomasz Figaaa422402014-09-19 20:27:36 +02002739{
2740 struct of_phandle_args pd_args;
2741 struct generic_pm_domain *pd;
Rajendra Nayakc016baf2021-08-12 16:57:21 +05302742 int pstate;
Tomasz Figaaa422402014-09-19 20:27:36 +02002743 int ret;
2744
Ulf Hanssone8b04de2019-04-18 12:27:56 +02002745 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
Ulf Hansson8cb1cbd62018-05-31 12:59:57 +02002746 "#power-domain-cells", index, &pd_args);
Geert Uytterhoeven001d50c2017-11-30 12:54:28 +01002747 if (ret < 0)
Ulf Hanssonbcd931f2018-05-31 12:59:56 +02002748 return ret;
Tomasz Figaaa422402014-09-19 20:27:36 +02002749
Jon Hunter19efa5f2016-09-12 12:01:11 +01002750 mutex_lock(&gpd_list_lock);
Jon Hunterf58d4e52016-09-12 12:01:08 +01002751 pd = genpd_get_from_provider(&pd_args);
Eric Anholt265e2cf2015-12-01 09:39:31 -08002752 of_node_put(pd_args.np);
Tomasz Figaaa422402014-09-19 20:27:36 +02002753 if (IS_ERR(pd)) {
Jon Hunter19efa5f2016-09-12 12:01:11 +01002754 mutex_unlock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002755 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2756 __func__, PTR_ERR(pd));
Saravana Kannane20813d2022-08-19 15:16:13 -07002757 return driver_deferred_probe_check_state(base_dev);
Tomasz Figaaa422402014-09-19 20:27:36 +02002758 }
2759
2760 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2761
Ulf Hanssonf9ccd7c2019-04-25 11:04:13 +02002762 ret = genpd_add_device(pd, dev, base_dev);
Jon Hunter19efa5f2016-09-12 12:01:11 +01002763 mutex_unlock(&gpd_list_lock);
Tomasz Figaaa422402014-09-19 20:27:36 +02002764
Ahmad Fatoum9a6582b2022-02-23 09:03:23 +01002765 if (ret < 0)
2766 return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
Tomasz Figaaa422402014-09-19 20:27:36 +02002767
2768 dev->pm_domain->detach = genpd_dev_pm_detach;
Russell King632f7ce2015-03-20 15:55:12 +01002769 dev->pm_domain->sync = genpd_dev_pm_sync;
Tomasz Figaaa422402014-09-19 20:27:36 +02002770
Rajendra Nayakc016baf2021-08-12 16:57:21 +05302771 /* Set the default performance state */
2772 pstate = of_get_required_opp_performance_state(dev->of_node, index);
Geert Uytterhoeven65616412021-08-24 17:23:38 +02002773 if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
Rajendra Nayakc016baf2021-08-12 16:57:21 +05302774 ret = pstate;
2775 goto err;
2776 } else if (pstate > 0) {
2777 ret = dev_pm_genpd_set_performance_state(dev, pstate);
2778 if (ret)
2779 goto err;
2780 dev_gpd_data(dev)->default_pstate = pstate;
2781 }
Abel Vesaae8ac192022-11-15 23:25:43 +02002782
2783 if (power_on) {
2784 genpd_lock(pd);
2785 ret = genpd_power_on(pd, 0);
2786 genpd_unlock(pd);
2787 }
2788
2789 if (ret) {
2790 /* Drop the default performance state */
2791 if (dev_gpd_data(dev)->default_pstate) {
2792 dev_pm_genpd_set_performance_state(dev, 0);
2793 dev_gpd_data(dev)->default_pstate = 0;
2794 }
2795
2796 genpd_remove_device(pd, dev);
2797 return -EPROBE_DEFER;
2798 }
2799
Rajendra Nayakc016baf2021-08-12 16:57:21 +05302800 return 1;
2801
2802err:
2803 dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
2804 pd->name, ret);
2805 genpd_remove_device(pd, dev);
2806 return ret;
Tomasz Figaaa422402014-09-19 20:27:36 +02002807}
Ulf Hansson8cb1cbd62018-05-31 12:59:57 +02002808
2809/**
2810 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2811 * @dev: Device to attach.
2812 *
2813 * Parse device's OF node to find a PM domain specifier. If such is found,
2814 * attaches the device to retrieved pm_domain ops.
2815 *
2816 * Returns 1 on successfully attached PM domain, 0 when the device don't need a
2817 * PM domain or when multiple power-domains exists for it, else a negative error
2818 * code. Note that if a power-domain exists for the device, but it cannot be
2819 * found or turned on, then return -EPROBE_DEFER to ensure that the device is
2820 * not probed and to re-try again later.
2821 */
2822int genpd_dev_pm_attach(struct device *dev)
2823{
2824 if (!dev->of_node)
2825 return 0;
2826
2827 /*
2828 * Devices with multiple PM domains must be attached separately, as we
2829 * can only attach one PM domain per device.
2830 */
2831 if (of_count_phandle_with_args(dev->of_node, "power-domains",
2832 "#power-domain-cells") != 1)
2833 return 0;
2834
Ulf Hansson51dcf742019-04-25 11:04:10 +02002835 return __genpd_dev_pm_attach(dev, dev, 0, true);
Ulf Hansson8cb1cbd62018-05-31 12:59:57 +02002836}
Tomasz Figaaa422402014-09-19 20:27:36 +02002837EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
Lina Iyer30f604282016-10-14 10:47:51 -07002838
Ulf Hansson3c095f32018-05-31 12:59:58 +02002839/**
2840 * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
2841 * @dev: The device used to lookup the PM domain.
2842 * @index: The index of the PM domain.
2843 *
2844 * Parse device's OF node to find a PM domain specifier at the provided @index.
2845 * If such is found, creates a virtual device and attaches it to the retrieved
2846 * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
2847 * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
2848 *
2849 * Returns the created virtual device if successfully attached PM domain, NULL
2850 * when the device don't need a PM domain, else an ERR_PTR() in case of
2851 * failures. If a power-domain exists for the device, but cannot be found or
2852 * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
2853 * is not probed and to re-try again later.
2854 */
2855struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2856 unsigned int index)
2857{
Viresh Kumar560928b2018-10-25 09:07:38 +05302858 struct device *virt_dev;
Ulf Hansson3c095f32018-05-31 12:59:58 +02002859 int num_domains;
2860 int ret;
2861
2862 if (!dev->of_node)
2863 return NULL;
2864
Ulf Hansson3ccf3f02019-04-18 12:27:57 +02002865 /* Verify that the index is within a valid range. */
Ulf Hansson3c095f32018-05-31 12:59:58 +02002866 num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
2867 "#power-domain-cells");
Ulf Hansson3ccf3f02019-04-18 12:27:57 +02002868 if (index >= num_domains)
Ulf Hansson3c095f32018-05-31 12:59:58 +02002869 return NULL;
2870
2871 /* Allocate and register device on the genpd bus. */
Viresh Kumar560928b2018-10-25 09:07:38 +05302872 virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
2873 if (!virt_dev)
Ulf Hansson3c095f32018-05-31 12:59:58 +02002874 return ERR_PTR(-ENOMEM);
2875
Viresh Kumar560928b2018-10-25 09:07:38 +05302876 dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
2877 virt_dev->bus = &genpd_bus_type;
2878 virt_dev->release = genpd_release_dev;
Ulf Hanssone8b04de2019-04-18 12:27:56 +02002879 virt_dev->of_node = of_node_get(dev->of_node);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002880
Viresh Kumar560928b2018-10-25 09:07:38 +05302881 ret = device_register(virt_dev);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002882 if (ret) {
Ulf Hansson71b77692019-04-18 12:27:55 +02002883 put_device(virt_dev);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002884 return ERR_PTR(ret);
2885 }
2886
2887 /* Try to attach the device to the PM domain at the specified index. */
Ulf Hansson51dcf742019-04-25 11:04:10 +02002888 ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002889 if (ret < 1) {
Viresh Kumar560928b2018-10-25 09:07:38 +05302890 device_unregister(virt_dev);
Ulf Hansson3c095f32018-05-31 12:59:58 +02002891 return ret ? ERR_PTR(ret) : NULL;
2892 }
2893
Viresh Kumar560928b2018-10-25 09:07:38 +05302894 pm_runtime_enable(virt_dev);
2895 genpd_queue_power_off_work(dev_to_genpd(virt_dev));
Ulf Hansson3c095f32018-05-31 12:59:58 +02002896
Viresh Kumar560928b2018-10-25 09:07:38 +05302897 return virt_dev;
Ulf Hansson3c095f32018-05-31 12:59:58 +02002898}
2899EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
2900
Ulf Hansson5d6be702018-06-29 13:04:31 +02002901/**
2902 * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
2903 * @dev: The device used to lookup the PM domain.
2904 * @name: The name of the PM domain.
2905 *
2906 * Parse device's OF node to find a PM domain specifier using the
2907 * power-domain-names DT property. For further description see
2908 * genpd_dev_pm_attach_by_id().
2909 */
Douglas Anderson7416f1f2019-02-14 10:12:48 -08002910struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
Ulf Hansson5d6be702018-06-29 13:04:31 +02002911{
2912 int index;
2913
2914 if (!dev->of_node)
2915 return NULL;
2916
2917 index = of_property_match_string(dev->of_node, "power-domain-names",
2918 name);
2919 if (index < 0)
2920 return NULL;
2921
2922 return genpd_dev_pm_attach_by_id(dev, index);
2923}
2924
Lina Iyer30f604282016-10-14 10:47:51 -07002925static const struct of_device_id idle_state_match[] = {
Lina Iyer598da542016-11-03 14:54:35 -07002926 { .compatible = "domain-idle-state", },
Lina Iyer30f604282016-10-14 10:47:51 -07002927 { }
2928};
2929
2930static int genpd_parse_state(struct genpd_power_state *genpd_state,
2931 struct device_node *state_node)
2932{
2933 int err;
2934 u32 residency;
2935 u32 entry_latency, exit_latency;
Lina Iyer30f604282016-10-14 10:47:51 -07002936
2937 err = of_property_read_u32(state_node, "entry-latency-us",
2938 &entry_latency);
2939 if (err) {
Rob Herringea11e942017-07-18 16:42:50 -05002940 pr_debug(" * %pOF missing entry-latency-us property\n",
Joe Perches7a5bd122019-03-04 09:14:38 -08002941 state_node);
Lina Iyer30f604282016-10-14 10:47:51 -07002942 return -EINVAL;
2943 }
2944
2945 err = of_property_read_u32(state_node, "exit-latency-us",
2946 &exit_latency);
2947 if (err) {
Rob Herringea11e942017-07-18 16:42:50 -05002948 pr_debug(" * %pOF missing exit-latency-us property\n",
Joe Perches7a5bd122019-03-04 09:14:38 -08002949 state_node);
Lina Iyer30f604282016-10-14 10:47:51 -07002950 return -EINVAL;
2951 }
2952
2953 err = of_property_read_u32(state_node, "min-residency-us", &residency);
2954 if (!err)
Nikita Zhandaroviche5d1c872023-04-18 06:07:43 -07002955 genpd_state->residency_ns = 1000LL * residency;
Lina Iyer30f604282016-10-14 10:47:51 -07002956
Nikita Zhandaroviche5d1c872023-04-18 06:07:43 -07002957 genpd_state->power_on_latency_ns = 1000LL * exit_latency;
2958 genpd_state->power_off_latency_ns = 1000LL * entry_latency;
Lina Iyer0c9b6942016-10-14 10:47:52 -07002959 genpd_state->fwnode = &state_node->fwnode;
Lina Iyer30f604282016-10-14 10:47:51 -07002960
2961 return 0;
2962}
2963
Ulf Hanssona3381e32018-01-23 21:43:08 +01002964static int genpd_iterate_idle_states(struct device_node *dn,
2965 struct genpd_power_state *states)
2966{
2967 int ret;
2968 struct of_phandle_iterator it;
2969 struct device_node *np;
2970 int i = 0;
2971
2972 ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
2973 if (ret <= 0)
Ulf Hansson56cb2682020-03-10 11:40:23 +01002974 return ret == -ENOENT ? 0 : ret;
Ulf Hanssona3381e32018-01-23 21:43:08 +01002975
2976 /* Loop over the phandles until all the requested entry is found */
2977 of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
2978 np = it.node;
2979 if (!of_match_node(idle_state_match, np))
2980 continue;
Sudeep Hollae0c57a52022-10-25 13:34:32 +01002981
2982 if (!of_device_is_available(np))
2983 continue;
2984
Ulf Hanssona3381e32018-01-23 21:43:08 +01002985 if (states) {
2986 ret = genpd_parse_state(&states[i], np);
2987 if (ret) {
2988 pr_err("Parsing idle state node %pOF failed with err %d\n",
2989 np, ret);
2990 of_node_put(np);
2991 return ret;
2992 }
2993 }
2994 i++;
2995 }
2996
2997 return i;
2998}
2999
Lina Iyer30f604282016-10-14 10:47:51 -07003000/**
3001 * of_genpd_parse_idle_states: Return array of idle states for the genpd.
3002 *
3003 * @dn: The genpd device node
3004 * @states: The pointer to which the state array will be saved.
3005 * @n: The count of elements in the array returned from this function.
3006 *
3007 * Returns the device states parsed from the OF node. The memory for the states
3008 * is allocated by this function and is the responsibility of the caller to
Ulf Hansson2c361682018-10-03 16:38:14 +02003009 * free the memory after use. If any or zero compatible domain idle states is
3010 * found it returns 0 and in case of errors, a negative error code is returned.
Lina Iyer30f604282016-10-14 10:47:51 -07003011 */
3012int of_genpd_parse_idle_states(struct device_node *dn,
3013 struct genpd_power_state **states, int *n)
3014{
3015 struct genpd_power_state *st;
Ulf Hanssona3381e32018-01-23 21:43:08 +01003016 int ret;
Lina Iyer30f604282016-10-14 10:47:51 -07003017
Ulf Hanssona3381e32018-01-23 21:43:08 +01003018 ret = genpd_iterate_idle_states(dn, NULL);
Ulf Hansson2c361682018-10-03 16:38:14 +02003019 if (ret < 0)
3020 return ret;
3021
3022 if (!ret) {
3023 *states = NULL;
3024 *n = 0;
3025 return 0;
3026 }
Lina Iyer30f604282016-10-14 10:47:51 -07003027
Ulf Hanssona3381e32018-01-23 21:43:08 +01003028 st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
Lina Iyer30f604282016-10-14 10:47:51 -07003029 if (!st)
3030 return -ENOMEM;
3031
Ulf Hanssona3381e32018-01-23 21:43:08 +01003032 ret = genpd_iterate_idle_states(dn, st);
3033 if (ret <= 0) {
3034 kfree(st);
3035 return ret < 0 ? ret : -EINVAL;
Lina Iyer30f604282016-10-14 10:47:51 -07003036 }
3037
Ulf Hanssona3381e32018-01-23 21:43:08 +01003038 *states = st;
3039 *n = ret;
Lina Iyer30f604282016-10-14 10:47:51 -07003040
3041 return 0;
3042}
3043EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
3044
Viresh Kumar6e417662017-11-29 15:21:51 +05303045/**
Viresh Kumare38f89d2018-06-13 20:22:04 +05303046 * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
3047 *
3048 * @genpd_dev: Genpd's device for which the performance-state needs to be found.
3049 * @opp: struct dev_pm_opp of the OPP for which we need to find performance
3050 * state.
3051 *
3052 * Returns performance state encoded in the OPP of the genpd. This calls
3053 * platform specific genpd->opp_to_performance_state() callback to translate
3054 * power domain OPP to performance state.
3055 *
3056 * Returns performance state on success and 0 on failure.
3057 */
3058unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
3059 struct dev_pm_opp *opp)
3060{
3061 struct generic_pm_domain *genpd = NULL;
3062 int state;
3063
3064 genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
3065
3066 if (unlikely(!genpd->opp_to_performance_state))
3067 return 0;
3068
3069 genpd_lock(genpd);
3070 state = genpd->opp_to_performance_state(genpd, opp);
3071 genpd_unlock(genpd);
3072
3073 return state;
3074}
3075EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
3076
Ulf Hansson3c095f32018-05-31 12:59:58 +02003077static int __init genpd_bus_init(void)
3078{
3079 return bus_register(&genpd_bus_type);
3080}
3081core_initcall(genpd_bus_init);
3082
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01003083#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003084
3085
3086/*** debugfs support ***/
3087
Jon Hunter8b0510b2016-08-11 11:40:05 +01003088#ifdef CONFIG_DEBUG_FS
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003089/*
3090 * TODO: This function is a slightly modified version of rtpm_status_show
Rafael J. Wysockid30d8192014-11-27 22:38:05 +01003091 * from sysfs.c, so generalize it.
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003092 */
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003093static void rtpm_status_str(struct seq_file *s, struct device *dev)
3094{
3095 static const char * const status_lookup[] = {
3096 [RPM_ACTIVE] = "active",
3097 [RPM_RESUMING] = "resuming",
3098 [RPM_SUSPENDED] = "suspended",
3099 [RPM_SUSPENDING] = "suspending"
3100 };
3101 const char *p = "";
3102
3103 if (dev->power.runtime_error)
3104 p = "error";
3105 else if (dev->power.disable_depth)
3106 p = "unsupported";
3107 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
3108 p = status_lookup[dev->power.runtime_status];
3109 else
3110 WARN_ON(1);
3111
Dmitry Osipenko45fbc462021-01-21 00:12:32 +03003112 seq_printf(s, "%-25s ", p);
3113}
3114
3115static void perf_status_str(struct seq_file *s, struct device *dev)
3116{
3117 struct generic_pm_domain_data *gpd_data;
3118
3119 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
3120 seq_put_decimal_ull(s, "", gpd_data->performance_state);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003121}
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003122
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003123static int genpd_summary_one(struct seq_file *s,
3124 struct generic_pm_domain *genpd)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003125{
3126 static const char * const status_lookup[] = {
Ulf Hansson49f618e2020-09-24 13:04:47 +02003127 [GENPD_STATE_ON] = "on",
3128 [GENPD_STATE_OFF] = "off"
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003129 };
3130 struct pm_domain_data *pm_data;
3131 const char *kobj_path;
3132 struct gpd_link *link;
Geert Uytterhoeven6954d432016-02-23 17:49:17 +01003133 char state[16];
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003134 int ret;
3135
Lina Iyer35241d12016-10-14 10:47:54 -07003136 ret = genpd_lock_interruptible(genpd);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003137 if (ret)
3138 return -ERESTARTSYS;
3139
Kevin Hilman66a5ca42015-03-02 11:24:28 -08003140 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003141 goto exit;
Ulf Hansson41e2c8e2017-03-20 11:19:20 +01003142 if (!genpd_status_on(genpd))
Geert Uytterhoeven0ba554e2016-02-23 17:49:18 +01003143 snprintf(state, sizeof(state), "%s-%u",
Geert Uytterhoeven6954d432016-02-23 17:49:17 +01003144 status_lookup[genpd->status], genpd->state_idx);
Axel Haslamfc5cbf02016-02-15 11:10:51 +01003145 else
Geert Uytterhoeven6954d432016-02-23 17:49:17 +01003146 snprintf(state, sizeof(state), "%s",
3147 status_lookup[genpd->status]);
Dmitry Osipenko45fbc462021-01-21 00:12:32 +03003148 seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003149
3150 /*
3151 * Modifications on the list require holding locks on both
Kees Cook8d87ae42020-07-08 16:32:13 -07003152 * parent and child, so we are safe.
Kevin Hilman66a5ca42015-03-02 11:24:28 -08003153 * Also genpd->name is immutable.
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003154 */
Kees Cook8d87ae42020-07-08 16:32:13 -07003155 list_for_each_entry(link, &genpd->parent_links, parent_node) {
Dmitry Osipenko45fbc462021-01-21 00:12:32 +03003156 if (list_is_first(&link->parent_node, &genpd->parent_links))
3157 seq_printf(s, "\n%48s", " ");
Kees Cook8d87ae42020-07-08 16:32:13 -07003158 seq_printf(s, "%s", link->child->name);
3159 if (!list_is_last(&link->parent_node, &genpd->parent_links))
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003160 seq_puts(s, ", ");
3161 }
3162
Kevin Hilman66a5ca42015-03-02 11:24:28 -08003163 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
Lina Iyerd716f472016-10-14 10:47:55 -07003164 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3165 genpd_is_irq_safe(genpd) ?
3166 GFP_ATOMIC : GFP_KERNEL);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003167 if (kobj_path == NULL)
3168 continue;
3169
3170 seq_printf(s, "\n %-50s ", kobj_path);
3171 rtpm_status_str(s, pm_data->dev);
Dmitry Osipenko45fbc462021-01-21 00:12:32 +03003172 perf_status_str(s, pm_data->dev);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003173 kfree(kobj_path);
3174 }
3175
3176 seq_puts(s, "\n");
3177exit:
Lina Iyer35241d12016-10-14 10:47:54 -07003178 genpd_unlock(genpd);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003179
3180 return 0;
3181}
3182
Yangtao Lid32dcc62018-12-15 03:45:26 -05003183static int summary_show(struct seq_file *s, void *data)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003184{
Kevin Hilman66a5ca42015-03-02 11:24:28 -08003185 struct generic_pm_domain *genpd;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003186 int ret = 0;
3187
Dmitry Osipenko45fbc462021-01-21 00:12:32 +03003188 seq_puts(s, "domain status children performance\n");
Geert Uytterhoeven15dec672015-08-11 14:50:49 +02003189 seq_puts(s, " /device runtime status\n");
Dmitry Osipenko45fbc462021-01-21 00:12:32 +03003190 seq_puts(s, "----------------------------------------------------------------------------------------------\n");
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003191
3192 ret = mutex_lock_interruptible(&gpd_list_lock);
3193 if (ret)
3194 return -ERESTARTSYS;
3195
Kevin Hilman66a5ca42015-03-02 11:24:28 -08003196 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003197 ret = genpd_summary_one(s, genpd);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003198 if (ret)
3199 break;
3200 }
3201 mutex_unlock(&gpd_list_lock);
3202
3203 return ret;
3204}
3205
Yangtao Lid32dcc62018-12-15 03:45:26 -05003206static int status_show(struct seq_file *s, void *data)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003207{
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003208 static const char * const status_lookup[] = {
Ulf Hansson49f618e2020-09-24 13:04:47 +02003209 [GENPD_STATE_ON] = "on",
3210 [GENPD_STATE_OFF] = "off"
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003211 };
3212
3213 struct generic_pm_domain *genpd = s->private;
3214 int ret = 0;
3215
3216 ret = genpd_lock_interruptible(genpd);
3217 if (ret)
3218 return -ERESTARTSYS;
3219
3220 if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
3221 goto exit;
3222
Ulf Hansson49f618e2020-09-24 13:04:47 +02003223 if (genpd->status == GENPD_STATE_OFF)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003224 seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
3225 genpd->state_idx);
3226 else
3227 seq_printf(s, "%s\n", status_lookup[genpd->status]);
3228exit:
3229 genpd_unlock(genpd);
3230 return ret;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003231}
3232
Yangtao Lid32dcc62018-12-15 03:45:26 -05003233static int sub_domains_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003234{
3235 struct generic_pm_domain *genpd = s->private;
3236 struct gpd_link *link;
3237 int ret = 0;
3238
3239 ret = genpd_lock_interruptible(genpd);
3240 if (ret)
3241 return -ERESTARTSYS;
3242
Kees Cook8d87ae42020-07-08 16:32:13 -07003243 list_for_each_entry(link, &genpd->parent_links, parent_node)
3244 seq_printf(s, "%s\n", link->child->name);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003245
3246 genpd_unlock(genpd);
3247 return ret;
3248}
3249
Yangtao Lid32dcc62018-12-15 03:45:26 -05003250static int idle_states_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003251{
3252 struct generic_pm_domain *genpd = s->private;
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003253 u64 now, delta, idle_time = 0;
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003254 unsigned int i;
3255 int ret = 0;
3256
3257 ret = genpd_lock_interruptible(genpd);
3258 if (ret)
3259 return -ERESTARTSYS;
3260
Lina Iyerc6a113b2020-10-15 14:47:22 -06003261 seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003262
3263 for (i = 0; i < genpd->state_count; i++) {
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003264 idle_time += genpd->states[i].idle_time;
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003265
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003266 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3267 now = ktime_get_mono_fast_ns();
3268 if (now > genpd->accounting_time) {
3269 delta = now - genpd->accounting_time;
3270 idle_time += delta;
3271 }
3272 }
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003273
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003274 do_div(idle_time, NSEC_PER_MSEC);
3275 seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
3276 genpd->states[i].usage, genpd->states[i].rejected);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003277 }
3278
3279 genpd_unlock(genpd);
3280 return ret;
3281}
3282
Yangtao Lid32dcc62018-12-15 03:45:26 -05003283static int active_time_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003284{
3285 struct generic_pm_domain *genpd = s->private;
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003286 u64 now, on_time, delta = 0;
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003287 int ret = 0;
3288
3289 ret = genpd_lock_interruptible(genpd);
3290 if (ret)
3291 return -ERESTARTSYS;
3292
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003293 if (genpd->status == GENPD_STATE_ON) {
3294 now = ktime_get_mono_fast_ns();
3295 if (now > genpd->accounting_time)
3296 delta = now - genpd->accounting_time;
3297 }
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003298
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003299 on_time = genpd->on_time + delta;
3300 do_div(on_time, NSEC_PER_MSEC);
3301 seq_printf(s, "%llu ms\n", on_time);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003302
3303 genpd_unlock(genpd);
3304 return ret;
3305}
3306
Yangtao Lid32dcc62018-12-15 03:45:26 -05003307static int total_idle_time_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003308{
3309 struct generic_pm_domain *genpd = s->private;
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003310 u64 now, delta, total = 0;
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003311 unsigned int i;
3312 int ret = 0;
3313
3314 ret = genpd_lock_interruptible(genpd);
3315 if (ret)
3316 return -ERESTARTSYS;
3317
3318 for (i = 0; i < genpd->state_count; i++) {
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003319 total += genpd->states[i].idle_time;
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003320
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003321 if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
3322 now = ktime_get_mono_fast_ns();
3323 if (now > genpd->accounting_time) {
3324 delta = now - genpd->accounting_time;
3325 total += delta;
3326 }
3327 }
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003328 }
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003329
Ulf Hanssonbd40cbb2022-04-19 19:29:16 +02003330 do_div(total, NSEC_PER_MSEC);
3331 seq_printf(s, "%llu ms\n", total);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003332
3333 genpd_unlock(genpd);
3334 return ret;
3335}
3336
3337
Yangtao Lid32dcc62018-12-15 03:45:26 -05003338static int devices_show(struct seq_file *s, void *data)
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003339{
3340 struct generic_pm_domain *genpd = s->private;
3341 struct pm_domain_data *pm_data;
3342 const char *kobj_path;
3343 int ret = 0;
3344
3345 ret = genpd_lock_interruptible(genpd);
3346 if (ret)
3347 return -ERESTARTSYS;
3348
3349 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
3350 kobj_path = kobject_get_path(&pm_data->dev->kobj,
3351 genpd_is_irq_safe(genpd) ?
3352 GFP_ATOMIC : GFP_KERNEL);
3353 if (kobj_path == NULL)
3354 continue;
3355
3356 seq_printf(s, "%s\n", kobj_path);
3357 kfree(kobj_path);
3358 }
3359
3360 genpd_unlock(genpd);
3361 return ret;
3362}
3363
Yangtao Lid32dcc62018-12-15 03:45:26 -05003364static int perf_state_show(struct seq_file *s, void *data)
Rajendra Nayake8912812018-05-30 15:15:17 +05303365{
3366 struct generic_pm_domain *genpd = s->private;
3367
3368 if (genpd_lock_interruptible(genpd))
3369 return -ERESTARTSYS;
3370
3371 seq_printf(s, "%u\n", genpd->performance_state);
3372
3373 genpd_unlock(genpd);
3374 return 0;
3375}
3376
Yangtao Lid32dcc62018-12-15 03:45:26 -05003377DEFINE_SHOW_ATTRIBUTE(summary);
3378DEFINE_SHOW_ATTRIBUTE(status);
3379DEFINE_SHOW_ATTRIBUTE(sub_domains);
3380DEFINE_SHOW_ATTRIBUTE(idle_states);
3381DEFINE_SHOW_ATTRIBUTE(active_time);
3382DEFINE_SHOW_ATTRIBUTE(total_idle_time);
3383DEFINE_SHOW_ATTRIBUTE(devices);
3384DEFINE_SHOW_ATTRIBUTE(perf_state);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003385
Thierry Strudel718072c2020-12-08 11:19:55 -08003386static void genpd_debug_add(struct generic_pm_domain *genpd)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003387{
3388 struct dentry *d;
Thierry Strudel718072c2020-12-08 11:19:55 -08003389
3390 if (!genpd_debugfs_dir)
3391 return;
3392
3393 d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
3394
3395 debugfs_create_file("current_state", 0444,
3396 d, genpd, &status_fops);
3397 debugfs_create_file("sub_domains", 0444,
3398 d, genpd, &sub_domains_fops);
3399 debugfs_create_file("idle_states", 0444,
3400 d, genpd, &idle_states_fops);
3401 debugfs_create_file("active_time", 0444,
3402 d, genpd, &active_time_fops);
3403 debugfs_create_file("total_idle_time", 0444,
3404 d, genpd, &total_idle_time_fops);
3405 debugfs_create_file("devices", 0444,
3406 d, genpd, &devices_fops);
3407 if (genpd->set_performance_state)
3408 debugfs_create_file("perf_state", 0444,
3409 d, genpd, &perf_state_fops);
3410}
3411
3412static int __init genpd_debug_init(void)
3413{
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003414 struct generic_pm_domain *genpd;
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003415
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003416 genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003417
Greg Kroah-Hartmane16a42c2019-01-22 16:21:05 +01003418 debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
3419 NULL, &summary_fops);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003420
Thierry Strudel718072c2020-12-08 11:19:55 -08003421 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
3422 genpd_debug_add(genpd);
Thara Gopinathb6a1d092017-07-14 13:10:16 -04003423
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003424 return 0;
3425}
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003426late_initcall(genpd_debug_init);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003427
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003428static void __exit genpd_debug_exit(void)
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003429{
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003430 debugfs_remove_recursive(genpd_debugfs_dir);
Maciej Matraszek2bd53062014-09-15 13:09:10 +02003431}
Ulf Hansson9e9704e2017-10-06 09:02:06 +02003432__exitcall(genpd_debug_exit);
Jon Hunter8b0510b2016-08-11 11:40:05 +01003433#endif /* CONFIG_DEBUG_FS */