blob: d86a02563f6c8c329a820748dd0966ba2bc0992e [file] [log] [blame]
Sudeep Holla6d6a1d82017-06-13 17:19:36 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * System Control and Power Interface (SCMI) Protocol based clock driver
4 *
Cristian Marussi2641ee12024-04-15 17:36:45 +01005 * Copyright (C) 2018-2024 ARM Ltd.
Sudeep Holla6d6a1d82017-06-13 17:19:36 +01006 */
7
Cristian Marussi2641ee12024-04-15 17:36:45 +01008#include <linux/bits.h>
Sudeep Holla6d6a1d82017-06-13 17:19:36 +01009#include <linux/clk-provider.h>
10#include <linux/device.h>
11#include <linux/err.h>
12#include <linux/of.h>
13#include <linux/module.h>
14#include <linux/scmi_protocol.h>
15#include <asm/div64.h>
16
Cristian Marussi03a95cf2023-08-26 13:53:03 +010017#define NOT_ATOMIC false
18#define ATOMIC true
19
Cristian Marussi2641ee12024-04-15 17:36:45 +010020enum scmi_clk_feats {
21 SCMI_CLK_ATOMIC_SUPPORTED,
Cristian Marussia1b8faf2024-04-15 17:36:46 +010022 SCMI_CLK_STATE_CTRL_SUPPORTED,
Cristian Marussic3ad1d02024-04-15 17:36:47 +010023 SCMI_CLK_RATE_CTRL_SUPPORTED,
Cristian Marussifa23e092024-04-15 17:36:48 +010024 SCMI_CLK_PARENT_CTRL_SUPPORTED,
Cristian Marussi87af9482024-04-15 17:36:49 +010025 SCMI_CLK_DUTY_CYCLE_SUPPORTED,
Cristian Marussi2641ee12024-04-15 17:36:45 +010026 SCMI_CLK_FEATS_COUNT
27};
28
29#define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT)
30
Cristian Marussibeb076b2021-03-16 12:48:43 +000031static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
32
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010033struct scmi_clk {
34 u32 id;
Cristian Marussi1b39ff52023-08-26 13:53:07 +010035 struct device *dev;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010036 struct clk_hw hw;
37 const struct scmi_clock_info *info;
Cristian Marussibeb076b2021-03-16 12:48:43 +000038 const struct scmi_protocol_handle *ph;
Peng Fan65a8a3d2023-10-04 07:42:24 +080039 struct clk_parent_data *parent_data;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010040};
41
42#define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
43
44static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
45 unsigned long parent_rate)
46{
47 int ret;
48 u64 rate;
49 struct scmi_clk *clk = to_scmi_clk(hw);
50
Cristian Marussibeb076b2021-03-16 12:48:43 +000051 ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010052 if (ret)
53 return 0;
54 return rate;
55}
56
57static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
58 unsigned long *parent_rate)
59{
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010060 u64 fmin, fmax, ftmp;
61 struct scmi_clk *clk = to_scmi_clk(hw);
62
63 /*
64 * We can't figure out what rate it will be, so just return the
65 * rate back to the caller. scmi_clk_recalc_rate() will be called
66 * after the rate is set and we'll know what rate the clock is
67 * running at then.
68 */
69 if (clk->info->rate_discrete)
70 return rate;
71
72 fmin = clk->info->range.min_rate;
73 fmax = clk->info->range.max_rate;
74 if (rate <= fmin)
75 return fmin;
76 else if (rate >= fmax)
77 return fmax;
78
79 ftmp = rate - fmin;
80 ftmp += clk->info->range.step_size - 1; /* to round up */
Amit Daniel Kachhap7a8655e2018-07-31 11:25:55 +053081 do_div(ftmp, clk->info->range.step_size);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010082
Amit Daniel Kachhap7a8655e2018-07-31 11:25:55 +053083 return ftmp * clk->info->range.step_size + fmin;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010084}
85
86static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
87 unsigned long parent_rate)
88{
89 struct scmi_clk *clk = to_scmi_clk(hw);
90
Cristian Marussibeb076b2021-03-16 12:48:43 +000091 return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +010092}
93
Peng Fan65a8a3d2023-10-04 07:42:24 +080094static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index)
95{
96 struct scmi_clk *clk = to_scmi_clk(hw);
97
98 return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index);
99}
100
101static u8 scmi_clk_get_parent(struct clk_hw *hw)
102{
103 struct scmi_clk *clk = to_scmi_clk(hw);
104 u32 parent_id, p_idx;
105 int ret;
106
107 ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id);
108 if (ret)
109 return 0;
110
111 for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) {
112 if (clk->parent_data[p_idx].index == parent_id)
113 break;
114 }
115
116 if (p_idx == clk->info->num_parents)
117 return 0;
118
119 return p_idx;
120}
121
122static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
123{
124 /*
125 * Suppose all the requested rates are supported, and let firmware
126 * to handle the left work.
127 */
128 return 0;
129}
130
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100131static int scmi_clk_enable(struct clk_hw *hw)
132{
133 struct scmi_clk *clk = to_scmi_clk(hw);
134
Cristian Marussi03a95cf2023-08-26 13:53:03 +0100135 return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100136}
137
138static void scmi_clk_disable(struct clk_hw *hw)
139{
140 struct scmi_clk *clk = to_scmi_clk(hw);
141
Cristian Marussi03a95cf2023-08-26 13:53:03 +0100142 scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100143}
144
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000145static int scmi_clk_atomic_enable(struct clk_hw *hw)
146{
147 struct scmi_clk *clk = to_scmi_clk(hw);
148
Cristian Marussi03a95cf2023-08-26 13:53:03 +0100149 return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC);
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000150}
151
152static void scmi_clk_atomic_disable(struct clk_hw *hw)
153{
154 struct scmi_clk *clk = to_scmi_clk(hw);
155
Cristian Marussi03a95cf2023-08-26 13:53:03 +0100156 scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000157}
158
Cristian Marussi1b39ff52023-08-26 13:53:07 +0100159static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
160{
161 int ret;
162 bool enabled = false;
163 struct scmi_clk *clk = to_scmi_clk(hw);
164
165 ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
166 if (ret)
167 dev_warn(clk->dev,
168 "Failed to get state for clock ID %d\n", clk->id);
169
170 return !!enabled;
171}
172
Cristian Marussi87af9482024-04-15 17:36:49 +0100173static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
174{
175 int ret;
176 u32 val;
177 struct scmi_clk *clk = to_scmi_clk(hw);
178
179 ret = scmi_proto_clk_ops->config_oem_get(clk->ph, clk->id,
180 SCMI_CLOCK_CFG_DUTY_CYCLE,
181 &val, NULL, false);
182 if (!ret) {
183 duty->num = val;
184 duty->den = 100;
185 } else {
186 dev_warn(clk->dev,
187 "Failed to get duty cycle for clock ID %d\n", clk->id);
188 }
189
190 return ret;
191}
192
193static int scmi_clk_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
194{
195 int ret;
196 u32 val;
197 struct scmi_clk *clk = to_scmi_clk(hw);
198
199 /* SCMI OEM Duty Cycle is expressed as a percentage */
200 val = (duty->num * 100) / duty->den;
201 ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id,
202 SCMI_CLOCK_CFG_DUTY_CYCLE,
203 val, false);
204 if (ret)
205 dev_warn(clk->dev,
206 "Failed to set duty cycle(%u/%u) for clock ID %d\n",
207 duty->num, duty->den, clk->id);
208
209 return ret;
210}
211
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000212static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
213 const struct clk_ops *scmi_ops)
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100214{
215 int ret;
Sudeep Hollafcd2e0d2020-07-09 09:17:05 +0100216 unsigned long min_rate, max_rate;
217
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100218 struct clk_init_data init = {
219 .flags = CLK_GET_RATE_NOCACHE,
Peng Fan65a8a3d2023-10-04 07:42:24 +0800220 .num_parents = sclk->info->num_parents,
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000221 .ops = scmi_ops,
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100222 .name = sclk->info->name,
Peng Fan65a8a3d2023-10-04 07:42:24 +0800223 .parent_data = sclk->parent_data,
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100224 };
225
226 sclk->hw.init = &init;
227 ret = devm_clk_hw_register(dev, &sclk->hw);
Sudeep Hollafcd2e0d2020-07-09 09:17:05 +0100228 if (ret)
229 return ret;
230
231 if (sclk->info->rate_discrete) {
232 int num_rates = sclk->info->list.num_rates;
233
234 if (num_rates <= 0)
235 return -EINVAL;
236
237 min_rate = sclk->info->list.rates[0];
238 max_rate = sclk->info->list.rates[num_rates - 1];
239 } else {
240 min_rate = sclk->info->range.min_rate;
241 max_rate = sclk->info->range.max_rate;
242 }
243
244 clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100245 return ret;
246}
247
Cristian Marussi2641ee12024-04-15 17:36:45 +0100248/**
249 * scmi_clk_ops_alloc() - Alloc and configure clock operations
250 * @dev: A device reference for devres
251 * @feats_key: A bitmap representing the desired clk_ops capabilities
252 *
253 * Allocate and configure a proper set of clock operations depending on the
254 * specifically required SCMI clock features.
255 *
256 * Return: A pointer to the allocated and configured clk_ops on success,
257 * or NULL on allocation failure.
258 */
259static const struct clk_ops *
260scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
261{
262 struct clk_ops *ops;
263
264 ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
265 if (!ops)
266 return NULL;
267 /*
268 * We can provide enable/disable/is_enabled atomic callbacks only if the
269 * underlying SCMI transport for an SCMI instance is configured to
270 * handle SCMI commands in an atomic manner.
271 *
272 * When no SCMI atomic transport support is available we instead provide
273 * only the prepare/unprepare API, as allowed by the clock framework
274 * when atomic calls are not available.
275 */
Cristian Marussia1b8faf2024-04-15 17:36:46 +0100276 if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) {
277 if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
278 ops->enable = scmi_clk_atomic_enable;
279 ops->disable = scmi_clk_atomic_disable;
280 } else {
281 ops->prepare = scmi_clk_enable;
282 ops->unprepare = scmi_clk_disable;
283 }
Cristian Marussi2641ee12024-04-15 17:36:45 +0100284 }
285
Cristian Marussia1b8faf2024-04-15 17:36:46 +0100286 if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
287 ops->is_enabled = scmi_clk_atomic_is_enabled;
288
Cristian Marussi2641ee12024-04-15 17:36:45 +0100289 /* Rate ops */
290 ops->recalc_rate = scmi_clk_recalc_rate;
291 ops->round_rate = scmi_clk_round_rate;
292 ops->determine_rate = scmi_clk_determine_rate;
Cristian Marussic3ad1d02024-04-15 17:36:47 +0100293 if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
294 ops->set_rate = scmi_clk_set_rate;
Cristian Marussi2641ee12024-04-15 17:36:45 +0100295
296 /* Parent ops */
297 ops->get_parent = scmi_clk_get_parent;
Cristian Marussifa23e092024-04-15 17:36:48 +0100298 if (feats_key & BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED))
299 ops->set_parent = scmi_clk_set_parent;
Cristian Marussi2641ee12024-04-15 17:36:45 +0100300
Cristian Marussi87af9482024-04-15 17:36:49 +0100301 /* Duty cycle */
302 if (feats_key & BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED)) {
303 ops->get_duty_cycle = scmi_clk_get_duty_cycle;
304 ops->set_duty_cycle = scmi_clk_set_duty_cycle;
305 }
306
Cristian Marussi2641ee12024-04-15 17:36:45 +0100307 return ops;
308}
309
310/**
311 * scmi_clk_ops_select() - Select a proper set of clock operations
312 * @sclk: A reference to an SCMI clock descriptor
313 * @atomic_capable: A flag to indicate if atomic mode is supported by the
314 * transport
315 * @atomic_threshold_us: Platform atomic threshold value in microseconds:
316 * clk_ops are atomic when clock enable latency is less
317 * than this threshold
318 * @clk_ops_db: A reference to the array used as a database to store all the
319 * created clock operations combinations.
320 * @db_size: Maximum number of entries held by @clk_ops_db
321 *
322 * After having built a bitmap descriptor to represent the set of features
323 * needed by this SCMI clock, at first use it to lookup into the set of
324 * previously allocated clk_ops to check if a suitable combination of clock
325 * operations was already created; when no match is found allocate a brand new
326 * set of clk_ops satisfying the required combination of features and save it
327 * for future references.
328 *
329 * In this way only one set of clk_ops is ever created for each different
330 * combination that is effectively needed by a driver instance.
331 *
332 * Return: A pointer to the allocated and configured clk_ops on success, or
333 * NULL otherwise.
334 */
335static const struct clk_ops *
336scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
337 unsigned int atomic_threshold_us,
338 const struct clk_ops **clk_ops_db, size_t db_size)
339{
340 const struct scmi_clock_info *ci = sclk->info;
341 unsigned int feats_key = 0;
342 const struct clk_ops *ops;
343
344 /*
345 * Note that when transport is atomic but SCMI protocol did not
346 * specify (or support) an enable_latency associated with a
347 * clock, we default to use atomic operations mode.
348 */
349 if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
350 feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
351
Cristian Marussia1b8faf2024-04-15 17:36:46 +0100352 if (!ci->state_ctrl_forbidden)
353 feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED);
354
Cristian Marussic3ad1d02024-04-15 17:36:47 +0100355 if (!ci->rate_ctrl_forbidden)
356 feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED);
357
Cristian Marussifa23e092024-04-15 17:36:48 +0100358 if (!ci->parent_ctrl_forbidden)
359 feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
360
Cristian Marussi87af9482024-04-15 17:36:49 +0100361 if (ci->extended_config)
362 feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
363
Cristian Marussi2641ee12024-04-15 17:36:45 +0100364 if (WARN_ON(feats_key >= db_size))
365 return NULL;
366
367 /* Lookup previously allocated ops */
368 ops = clk_ops_db[feats_key];
369 if (ops)
370 return ops;
371
372 /* Did not find a pre-allocated clock_ops */
373 ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
374 if (!ops)
375 return NULL;
376
377 /* Store new ops combinations */
378 clk_ops_db[feats_key] = ops;
379
380 return ops;
381}
382
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100383static int scmi_clocks_probe(struct scmi_device *sdev)
384{
385 int idx, count, err;
Cristian Marussi2641ee12024-04-15 17:36:45 +0100386 unsigned int atomic_threshold_us;
387 bool transport_is_atomic;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100388 struct clk_hw **hws;
389 struct clk_hw_onecell_data *clk_data;
390 struct device *dev = &sdev->dev;
391 struct device_node *np = dev->of_node;
392 const struct scmi_handle *handle = sdev->handle;
Cristian Marussibeb076b2021-03-16 12:48:43 +0000393 struct scmi_protocol_handle *ph;
Cristian Marussi2641ee12024-04-15 17:36:45 +0100394 const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100395
Cristian Marussibeb076b2021-03-16 12:48:43 +0000396 if (!handle)
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100397 return -ENODEV;
398
Cristian Marussibeb076b2021-03-16 12:48:43 +0000399 scmi_proto_clk_ops =
400 handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
401 if (IS_ERR(scmi_proto_clk_ops))
402 return PTR_ERR(scmi_proto_clk_ops);
403
404 count = scmi_proto_clk_ops->count_get(ph);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100405 if (count < 0) {
Rob Herringe665f022018-08-28 10:44:29 -0500406 dev_err(dev, "%pOFn: invalid clock output count\n", np);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100407 return -EINVAL;
408 }
409
Kees Cook0ed2dd02018-05-08 16:08:53 -0700410 clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
411 GFP_KERNEL);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100412 if (!clk_data)
413 return -ENOMEM;
414
415 clk_data->num = count;
416 hws = clk_data->hws;
417
Cristian Marussi2641ee12024-04-15 17:36:45 +0100418 transport_is_atomic = handle->is_transport_atomic(handle,
419 &atomic_threshold_us);
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000420
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100421 for (idx = 0; idx < count; idx++) {
422 struct scmi_clk *sclk;
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000423 const struct clk_ops *scmi_ops;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100424
425 sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
426 if (!sclk)
427 return -ENOMEM;
428
Cristian Marussibeb076b2021-03-16 12:48:43 +0000429 sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100430 if (!sclk->info) {
431 dev_dbg(dev, "invalid clock info for idx %d\n", idx);
Sudeep Holla3537a752023-10-04 20:36:00 +0100432 devm_kfree(dev, sclk);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100433 continue;
434 }
435
436 sclk->id = idx;
Cristian Marussibeb076b2021-03-16 12:48:43 +0000437 sclk->ph = ph;
Cristian Marussi1b39ff52023-08-26 13:53:07 +0100438 sclk->dev = dev;
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100439
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000440 /*
Cristian Marussi2641ee12024-04-15 17:36:45 +0100441 * Note that the scmi_clk_ops_db is on the stack, not global,
442 * because it cannot be shared between mulitple probe-sequences
443 * to avoid sharing the devm_ allocated clk_ops between multiple
444 * SCMI clk driver instances.
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000445 */
Cristian Marussi2641ee12024-04-15 17:36:45 +0100446 scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic,
447 atomic_threshold_us,
448 scmi_clk_ops_db,
449 ARRAY_SIZE(scmi_clk_ops_db));
450 if (!scmi_ops)
451 return -ENOMEM;
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000452
Peng Fan65a8a3d2023-10-04 07:42:24 +0800453 /* Initialize clock parent data. */
454 if (sclk->info->num_parents > 0) {
455 sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents,
456 sizeof(*sclk->parent_data), GFP_KERNEL);
457 if (!sclk->parent_data)
458 return -ENOMEM;
459
460 for (int i = 0; i < sclk->info->num_parents; i++) {
461 sclk->parent_data[i].index = sclk->info->parents[i];
462 sclk->parent_data[i].hw = hws[sclk->info->parents[i]];
463 }
464 }
465
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000466 err = scmi_clk_ops_init(dev, sclk, scmi_ops);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100467 if (err) {
468 dev_err(dev, "failed to register clock %d\n", idx);
Peng Fan65a8a3d2023-10-04 07:42:24 +0800469 devm_kfree(dev, sclk->parent_data);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100470 devm_kfree(dev, sclk);
471 hws[idx] = NULL;
472 } else {
Cristian Marussi38a0e5b2022-02-17 13:12:34 +0000473 dev_dbg(dev, "Registered clock:%s%s\n",
474 sclk->info->name,
Cristian Marussi2641ee12024-04-15 17:36:45 +0100475 scmi_ops->enable ? " (atomic ops)" : "");
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100476 hws[idx] = &sclk->hw;
477 }
478 }
479
Sudeep Holla7f9badf2018-03-20 11:22:48 +0000480 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
481 clk_data);
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100482}
483
484static const struct scmi_device_id scmi_id_table[] = {
Sudeep Holla43998df2019-11-06 17:55:47 +0000485 { SCMI_PROTOCOL_CLOCK, "clocks" },
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100486 { },
487};
488MODULE_DEVICE_TABLE(scmi, scmi_id_table);
489
490static struct scmi_driver scmi_clocks_driver = {
491 .name = "scmi-clocks",
492 .probe = scmi_clocks_probe,
Sudeep Holla6d6a1d82017-06-13 17:19:36 +0100493 .id_table = scmi_id_table,
494};
495module_scmi_driver(scmi_clocks_driver);
496
497MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
498MODULE_DESCRIPTION("ARM SCMI clock driver");
499MODULE_LICENSE("GPL v2");