blob: 82ae1f26e634572b943d18b8d86267f0a69911a6 [file] [log] [blame]
Stephen Boydebafb632018-12-11 09:43:03 -08001// SPDX-License-Identifier: GPL-2.0
Lars-Peter Clausen8ef997b2012-09-11 19:56:23 +01002#include <linux/clk.h>
3#include <linux/device.h>
4#include <linux/export.h>
5#include <linux/gfp.h>
6
Uwe Kleine-Königabae8e52022-05-20 09:57:35 +02007struct devm_clk_state {
8 struct clk *clk;
9 void (*exit)(struct clk *clk);
10};
11
Lars-Peter Clausen8ef997b2012-09-11 19:56:23 +010012static void devm_clk_release(struct device *dev, void *res)
13{
Uwe Kleine-König8b3d7432022-06-20 19:18:15 +020014 struct devm_clk_state *state = res;
Uwe Kleine-Königabae8e52022-05-20 09:57:35 +020015
16 if (state->exit)
17 state->exit(state->clk);
18
19 clk_put(state->clk);
20}
21
22static struct clk *__devm_clk_get(struct device *dev, const char *id,
23 struct clk *(*get)(struct device *dev, const char *id),
24 int (*init)(struct clk *clk),
25 void (*exit)(struct clk *clk))
26{
27 struct devm_clk_state *state;
28 struct clk *clk;
29 int ret;
30
31 state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
32 if (!state)
33 return ERR_PTR(-ENOMEM);
34
35 clk = get(dev, id);
36 if (IS_ERR(clk)) {
37 ret = PTR_ERR(clk);
38 goto err_clk_get;
39 }
40
41 if (init) {
42 ret = init(clk);
43 if (ret)
44 goto err_clk_init;
45 }
46
47 state->clk = clk;
48 state->exit = exit;
49
50 devres_add(dev, state);
51
52 return clk;
53
54err_clk_init:
55
56 clk_put(clk);
57err_clk_get:
58
59 devres_free(state);
60 return ERR_PTR(ret);
Lars-Peter Clausen8ef997b2012-09-11 19:56:23 +010061}
62
63struct clk *devm_clk_get(struct device *dev, const char *id)
64{
Uwe Kleine-Königabae8e52022-05-20 09:57:35 +020065 return __devm_clk_get(dev, id, clk_get, NULL, NULL);
Lars-Peter Clausen8ef997b2012-09-11 19:56:23 +010066}
67EXPORT_SYMBOL(devm_clk_get);
68
Uwe Kleine-König7ef9651e2022-05-20 09:57:36 +020069struct clk *devm_clk_get_prepared(struct device *dev, const char *id)
70{
71 return __devm_clk_get(dev, id, clk_get, clk_prepare, clk_unprepare);
72}
73EXPORT_SYMBOL_GPL(devm_clk_get_prepared);
74
75struct clk *devm_clk_get_enabled(struct device *dev, const char *id)
76{
77 return __devm_clk_get(dev, id, clk_get,
78 clk_prepare_enable, clk_disable_unprepare);
79}
80EXPORT_SYMBOL_GPL(devm_clk_get_enabled);
81
Phil Edworthy60b8f0d2018-12-03 11:13:09 +000082struct clk *devm_clk_get_optional(struct device *dev, const char *id)
83{
Uwe Kleine-Königabae8e52022-05-20 09:57:35 +020084 return __devm_clk_get(dev, id, clk_get_optional, NULL, NULL);
Phil Edworthy60b8f0d2018-12-03 11:13:09 +000085}
86EXPORT_SYMBOL(devm_clk_get_optional);
87
Uwe Kleine-König7ef9651e2022-05-20 09:57:36 +020088struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id)
89{
90 return __devm_clk_get(dev, id, clk_get_optional,
91 clk_prepare, clk_unprepare);
92}
93EXPORT_SYMBOL_GPL(devm_clk_get_optional_prepared);
94
95struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id)
96{
97 return __devm_clk_get(dev, id, clk_get_optional,
98 clk_prepare_enable, clk_disable_unprepare);
99}
100EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled);
101
Bartosz Golaszewski9934a1bd2024-08-05 10:57:31 +0200102struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
103 const char *id,
104 unsigned long rate)
105{
106 struct clk *clk;
107 int ret;
108
109 clk = __devm_clk_get(dev, id, clk_get_optional, NULL,
110 clk_disable_unprepare);
111 if (IS_ERR(clk))
112 return ERR_CAST(clk);
113
114 ret = clk_set_rate(clk, rate);
115 if (ret)
116 goto out_put_clk;
117
118 ret = clk_prepare_enable(clk);
119 if (ret)
120 goto out_put_clk;
121
122 return clk;
123
124out_put_clk:
125 devm_clk_put(dev, clk);
126 return ERR_PTR(ret);
127}
128EXPORT_SYMBOL_GPL(devm_clk_get_optional_enabled_with_rate);
129
Dong Aisheng618aee02017-05-19 21:49:05 +0800130struct clk_bulk_devres {
131 struct clk_bulk_data *clks;
132 int num_clks;
133};
134
135static void devm_clk_bulk_release(struct device *dev, void *res)
136{
137 struct clk_bulk_devres *devres = res;
138
139 clk_bulk_put(devres->num_clks, devres->clks);
140}
141
Sylwester Nawrocki9bd5ef02019-06-19 11:39:26 +0200142static int __devm_clk_bulk_get(struct device *dev, int num_clks,
143 struct clk_bulk_data *clks, bool optional)
Dong Aisheng618aee02017-05-19 21:49:05 +0800144{
145 struct clk_bulk_devres *devres;
146 int ret;
147
148 devres = devres_alloc(devm_clk_bulk_release,
149 sizeof(*devres), GFP_KERNEL);
150 if (!devres)
151 return -ENOMEM;
152
Sylwester Nawrocki9bd5ef02019-06-19 11:39:26 +0200153 if (optional)
154 ret = clk_bulk_get_optional(dev, num_clks, clks);
155 else
156 ret = clk_bulk_get(dev, num_clks, clks);
Dong Aisheng618aee02017-05-19 21:49:05 +0800157 if (!ret) {
158 devres->clks = clks;
159 devres->num_clks = num_clks;
160 devres_add(dev, devres);
161 } else {
162 devres_free(devres);
163 }
164
165 return ret;
166}
Sylwester Nawrocki9bd5ef02019-06-19 11:39:26 +0200167
168int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
169 struct clk_bulk_data *clks)
170{
171 return __devm_clk_bulk_get(dev, num_clks, clks, false);
172}
Dong Aisheng618aee02017-05-19 21:49:05 +0800173EXPORT_SYMBOL_GPL(devm_clk_bulk_get);
174
Sylwester Nawrocki9bd5ef02019-06-19 11:39:26 +0200175int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks,
176 struct clk_bulk_data *clks)
177{
178 return __devm_clk_bulk_get(dev, num_clks, clks, true);
179}
180EXPORT_SYMBOL_GPL(devm_clk_bulk_get_optional);
181
Brian Norrisf828b0b2021-07-30 19:59:50 -0700182static void devm_clk_bulk_release_all(struct device *dev, void *res)
183{
184 struct clk_bulk_devres *devres = res;
185
186 clk_bulk_put_all(devres->num_clks, devres->clks);
187}
188
Dong Aishengf08c2e22018-08-31 12:45:55 +0800189int __must_check devm_clk_bulk_get_all(struct device *dev,
190 struct clk_bulk_data **clks)
191{
192 struct clk_bulk_devres *devres;
193 int ret;
194
Brian Norrisf828b0b2021-07-30 19:59:50 -0700195 devres = devres_alloc(devm_clk_bulk_release_all,
Dong Aishengf08c2e22018-08-31 12:45:55 +0800196 sizeof(*devres), GFP_KERNEL);
197 if (!devres)
198 return -ENOMEM;
199
200 ret = clk_bulk_get_all(dev, &devres->clks);
201 if (ret > 0) {
202 *clks = devres->clks;
203 devres->num_clks = ret;
204 devres_add(dev, devres);
205 } else {
206 devres_free(devres);
207 }
208
209 return ret;
210}
211EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all);
212
Shradha Todi265b07d2024-02-20 14:10:45 +0530213static void devm_clk_bulk_release_all_enable(struct device *dev, void *res)
214{
215 struct clk_bulk_devres *devres = res;
216
217 clk_bulk_disable_unprepare(devres->num_clks, devres->clks);
218 clk_bulk_put_all(devres->num_clks, devres->clks);
219}
220
221int __must_check devm_clk_bulk_get_all_enable(struct device *dev,
222 struct clk_bulk_data **clks)
223{
224 struct clk_bulk_devres *devres;
225 int ret;
226
227 devres = devres_alloc(devm_clk_bulk_release_all_enable,
228 sizeof(*devres), GFP_KERNEL);
229 if (!devres)
230 return -ENOMEM;
231
232 ret = clk_bulk_get_all(dev, &devres->clks);
233 if (ret > 0) {
234 *clks = devres->clks;
235 devres->num_clks = ret;
236 } else {
237 devres_free(devres);
238 return ret;
239 }
240
241 ret = clk_bulk_prepare_enable(devres->num_clks, *clks);
242 if (!ret) {
243 devres_add(dev, devres);
244 } else {
245 clk_bulk_put_all(devres->num_clks, devres->clks);
246 devres_free(devres);
247 }
248
249 return ret;
250}
251EXPORT_SYMBOL_GPL(devm_clk_bulk_get_all_enable);
252
Lars-Peter Clausen8ef997b2012-09-11 19:56:23 +0100253static int devm_clk_match(struct device *dev, void *res, void *data)
254{
255 struct clk **c = res;
256 if (!c || !*c) {
257 WARN_ON(!c || !*c);
258 return 0;
259 }
260 return *c == data;
261}
262
263void devm_clk_put(struct device *dev, struct clk *clk)
264{
265 int ret;
266
Mark Brown20332ff2012-09-19 12:43:21 +0100267 ret = devres_release(dev, devm_clk_release, devm_clk_match, clk);
Lars-Peter Clausen8ef997b2012-09-11 19:56:23 +0100268
269 WARN_ON(ret);
270}
271EXPORT_SYMBOL(devm_clk_put);
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000272
273struct clk *devm_get_clk_from_child(struct device *dev,
274 struct device_node *np, const char *con_id)
275{
Andrey Skvortsov66fbfb32023-08-05 11:48:47 +0300276 struct devm_clk_state *state;
277 struct clk *clk;
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000278
Andrey Skvortsov66fbfb32023-08-05 11:48:47 +0300279 state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
280 if (!state)
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000281 return ERR_PTR(-ENOMEM);
282
283 clk = of_clk_get_by_name(np, con_id);
284 if (!IS_ERR(clk)) {
Andrey Skvortsov66fbfb32023-08-05 11:48:47 +0300285 state->clk = clk;
286 devres_add(dev, state);
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000287 } else {
Andrey Skvortsov66fbfb32023-08-05 11:48:47 +0300288 devres_free(state);
Kuninori Morimoto71a2f112016-12-05 05:23:20 +0000289 }
290
291 return clk;
292}
293EXPORT_SYMBOL(devm_get_clk_from_child);