blob: 84660729538b95cb8624080490025a68cba4fa54 [file] [log] [blame]
Greg Kroah-Hartmaneb50fd32017-11-07 14:58:41 +01001// SPDX-License-Identifier: GPL-2.0
Alex Elder8c12cde2014-10-01 21:54:12 -05002/*
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -05003 * Greybus bundles
Alex Elder8c12cde2014-10-01 21:54:12 -05004 *
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +02005 * Copyright 2014-2015 Google Inc.
6 * Copyright 2014-2015 Linaro Ltd.
Alex Elder8c12cde2014-10-01 21:54:12 -05007 */
8
Greg Kroah-Hartmanec0ad862019-08-25 07:54:27 +02009#include <linux/greybus.h>
Alex Elder4f9c5c02016-06-03 15:55:36 -050010#include "greybus_trace.h"
Alex Elder8c12cde2014-10-01 21:54:12 -050011
Johan Hovold4396c002015-11-25 15:58:59 +010012static ssize_t bundle_class_show(struct device *dev,
13 struct device_attribute *attr, char *buf)
Viresh Kumar9f5f30e7122015-04-01 20:32:04 +053014{
15 struct gb_bundle *bundle = to_gb_bundle(dev);
16
Johan Hovold2b14dab2015-11-25 15:59:00 +010017 return sprintf(buf, "0x%02x\n", bundle->class);
Viresh Kumar9f5f30e7122015-04-01 20:32:04 +053018}
Johan Hovold4396c002015-11-25 15:58:59 +010019static DEVICE_ATTR_RO(bundle_class);
Viresh Kumar9f5f30e7122015-04-01 20:32:04 +053020
Johan Hovolda97015c2015-11-25 15:59:01 +010021static ssize_t bundle_id_show(struct device *dev,
22 struct device_attribute *attr, char *buf)
23{
24 struct gb_bundle *bundle = to_gb_bundle(dev);
25
26 return sprintf(buf, "%u\n", bundle->id);
27}
28static DEVICE_ATTR_RO(bundle_id);
29
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +020030static ssize_t state_show(struct device *dev, struct device_attribute *attr,
31 char *buf)
32{
33 struct gb_bundle *bundle = to_gb_bundle(dev);
34
Nishka Dasgupta180a41b2019-03-20 01:33:08 +053035 if (!bundle->state)
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +020036 return sprintf(buf, "\n");
37
38 return sprintf(buf, "%s\n", bundle->state);
39}
40
41static ssize_t state_store(struct device *dev, struct device_attribute *attr,
42 const char *buf, size_t size)
43{
44 struct gb_bundle *bundle = to_gb_bundle(dev);
45
46 kfree(bundle->state);
Alex Elder22fd2a82015-04-17 14:41:47 -050047 bundle->state = kstrdup(buf, GFP_KERNEL);
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +020048 if (!bundle->state)
49 return -ENOMEM;
50
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +020051 /* Tell userspace that the file contents changed */
52 sysfs_notify(&bundle->dev.kobj, NULL, "state");
53
54 return size;
55}
56static DEVICE_ATTR_RW(state);
57
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -050058static struct attribute *bundle_attrs[] = {
Johan Hovold4396c002015-11-25 15:58:59 +010059 &dev_attr_bundle_class.attr,
Johan Hovolda97015c2015-11-25 15:59:01 +010060 &dev_attr_bundle_id.attr,
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +020061 &dev_attr_state.attr,
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +080062 NULL,
63};
64
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -050065ATTRIBUTE_GROUPS(bundle);
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +080066
Johan Hovold1db1b242015-12-07 15:05:40 +010067static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
Bhanusree Polab7417e32019-02-26 06:02:44 +053068 u8 bundle_id)
Johan Hovold1db1b242015-12-07 15:05:40 +010069{
70 struct gb_bundle *bundle;
71
72 list_for_each_entry(bundle, &intf->bundles, links) {
73 if (bundle->id == bundle_id)
74 return bundle;
75 }
76
77 return NULL;
78}
79
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -050080static void gb_bundle_release(struct device *dev)
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +080081{
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -050082 struct gb_bundle *bundle = to_gb_bundle(dev);
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +080083
Alex Elder4f9c5c02016-06-03 15:55:36 -050084 trace_gb_bundle_release(bundle);
85
Greg Kroah-Hartman75052a52015-04-13 19:48:37 +020086 kfree(bundle->state);
Johan Hovold98fdf5a02016-01-21 17:34:09 +010087 kfree(bundle->cport_desc);
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -050088 kfree(bundle);
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +080089}
90
Greg Kroah-Hartman948c6222016-09-09 09:47:01 +020091#ifdef CONFIG_PM
David Lin61e13db2016-07-14 15:13:00 -050092static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
93{
94 struct gb_connection *connection;
95
96 list_for_each_entry(connection, &bundle->connections, bundle_links)
97 gb_connection_disable(connection);
98}
99
100static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
101{
102 struct gb_connection *connection;
103
104 list_for_each_entry(connection, &bundle->connections, bundle_links)
105 gb_connection_enable(connection);
106}
107
108static int gb_bundle_suspend(struct device *dev)
109{
110 struct gb_bundle *bundle = to_gb_bundle(dev);
111 const struct dev_pm_ops *pm = dev->driver->pm;
112 int ret;
113
114 if (pm && pm->runtime_suspend) {
115 ret = pm->runtime_suspend(&bundle->dev);
116 if (ret)
117 return ret;
118 } else {
119 gb_bundle_disable_all_connections(bundle);
120 }
121
122 ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
123 if (ret) {
124 if (pm && pm->runtime_resume)
125 ret = pm->runtime_resume(dev);
126 else
127 gb_bundle_enable_all_connections(bundle);
128
129 return ret;
130 }
131
132 return 0;
133}
134
135static int gb_bundle_resume(struct device *dev)
136{
137 struct gb_bundle *bundle = to_gb_bundle(dev);
138 const struct dev_pm_ops *pm = dev->driver->pm;
139 int ret;
140
141 ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
142 if (ret)
143 return ret;
144
145 if (pm && pm->runtime_resume) {
146 ret = pm->runtime_resume(dev);
147 if (ret)
148 return ret;
149 } else {
150 gb_bundle_enable_all_connections(bundle);
151 }
152
153 return 0;
154}
155
156static int gb_bundle_idle(struct device *dev)
157{
158 pm_runtime_mark_last_busy(dev);
159 pm_request_autosuspend(dev);
160
161 return 0;
162}
163#endif
164
165static const struct dev_pm_ops gb_bundle_pm_ops = {
166 SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
167};
168
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500169struct device_type greybus_bundle_type = {
170 .name = "greybus_bundle",
171 .release = gb_bundle_release,
David Lin61e13db2016-07-14 15:13:00 -0500172 .pm = &gb_bundle_pm_ops,
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +0800173};
174
Alex Elder8c12cde2014-10-01 21:54:12 -0500175/*
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500176 * Create a gb_bundle structure to represent a discovered
177 * bundle. Returns a pointer to the new bundle or a null
Alex Elder8c12cde2014-10-01 21:54:12 -0500178 * pointer if a failure occurs due to memory exhaustion.
179 */
Viresh Kumar7c183f72015-04-01 20:32:00 +0530180struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
Viresh Kumar88e6d372015-04-06 15:49:36 +0530181 u8 class)
Alex Elder8c12cde2014-10-01 21:54:12 -0500182{
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500183 struct gb_bundle *bundle;
Alex Elder8c12cde2014-10-01 21:54:12 -0500184
Alex Elder76639ef2016-06-03 15:55:30 -0500185 if (bundle_id == BUNDLE_ID_NONE) {
186 dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
187 return NULL;
188 }
189
Alex Elder8267616b2015-06-09 17:42:57 -0500190 /*
191 * Reject any attempt to reuse a bundle id. We initialize
192 * these serially, so there's no need to worry about keeping
193 * the interface bundle list locked here.
194 */
195 if (gb_bundle_find(intf, bundle_id)) {
Johan Hovolda2347922015-12-07 15:05:41 +0100196 dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
Alex Elder8267616b2015-06-09 17:42:57 -0500197 return NULL;
198 }
199
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500200 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
201 if (!bundle)
Alex Elder8c12cde2014-10-01 21:54:12 -0500202 return NULL;
203
Greg Kroah-Hartman4ab9b3c2014-12-19 14:56:31 -0800204 bundle->intf = intf;
Viresh Kumar7c183f72015-04-01 20:32:00 +0530205 bundle->id = bundle_id;
Viresh Kumar88e6d372015-04-06 15:49:36 +0530206 bundle->class = class;
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500207 INIT_LIST_HEAD(&bundle->connections);
Alex Elder8c12cde2014-10-01 21:54:12 -0500208
Greg Kroah-Hartman4ab9b3c2014-12-19 14:56:31 -0800209 bundle->dev.parent = &intf->dev;
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500210 bundle->dev.bus = &greybus_bus_type;
211 bundle->dev.type = &greybus_bundle_type;
212 bundle->dev.groups = bundle_groups;
Viresh Kumardc0f0282016-04-25 21:50:36 +0530213 bundle->dev.dma_mask = intf->dev.dma_mask;
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500214 device_initialize(&bundle->dev);
Johan Hovoldf0172c72015-11-25 15:59:05 +0100215 dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
Greg Kroah-Hartmanf0f61b92014-10-24 17:34:46 +0800216
Viresh Kumar928f2ab2015-06-04 18:16:45 +0530217 list_add(&bundle->links, &intf->bundles);
Alex Elder8c12cde2014-10-01 21:54:12 -0500218
Alex Elder4f9c5c02016-06-03 15:55:36 -0500219 trace_gb_bundle_create(bundle);
220
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500221 return bundle;
Alex Elder8c12cde2014-10-01 21:54:12 -0500222}
223
Johan Hovolda7e36d02015-12-07 15:05:43 +0100224int gb_bundle_add(struct gb_bundle *bundle)
225{
226 int ret;
227
228 ret = device_add(&bundle->dev);
229 if (ret) {
230 dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
231 return ret;
232 }
233
Alex Elder4f9c5c02016-06-03 15:55:36 -0500234 trace_gb_bundle_add(bundle);
235
Johan Hovolda7e36d02015-12-07 15:05:43 +0100236 return 0;
237}
238
Alex Elder8c12cde2014-10-01 21:54:12 -0500239/*
Greg Kroah-Hartman1db0a5f2014-12-12 17:10:17 -0500240 * Tear down a previously set up bundle.
Alex Elder8c12cde2014-10-01 21:54:12 -0500241 */
Alex Elderfe53b452015-06-12 10:21:11 -0500242void gb_bundle_destroy(struct gb_bundle *bundle)
Alex Elder8c12cde2014-10-01 21:54:12 -0500243{
Alex Elder4f9c5c02016-06-03 15:55:36 -0500244 trace_gb_bundle_destroy(bundle);
245
Johan Hovolda7e36d02015-12-07 15:05:43 +0100246 if (device_is_registered(&bundle->dev))
247 device_del(&bundle->dev);
248
Alex Elderfe53b452015-06-12 10:21:11 -0500249 list_del(&bundle->links);
Johan Hovold48d70772015-02-13 11:28:09 +0800250
Johan Hovolda7e36d02015-12-07 15:05:43 +0100251 put_device(&bundle->dev);
Alex Elder8c12cde2014-10-01 21:54:12 -0500252}