blob: e39d344feb28997696e7543edeaf30d9d18d5f05 [file] [log] [blame]
Kent Gibson925ca362020-06-16 17:36:15 +08001// SPDX-License-Identifier: GPL-2.0
2
Kent Gibsond189f622020-07-08 12:15:45 +08003#include <linux/anon_inodes.h>
Kent Gibson3c0d9c62020-09-28 08:27:54 +08004#include <linux/atomic.h>
Kent Gibson925ca362020-06-16 17:36:15 +08005#include <linux/bitmap.h>
Kent Gibson3c0d9c62020-09-28 08:27:54 +08006#include <linux/build_bug.h>
Kent Gibsond189f622020-07-08 12:15:45 +08007#include <linux/cdev.h>
8#include <linux/compat.h>
Kent Gibson65cff702020-09-28 08:27:59 +08009#include <linux/compiler.h>
Kent Gibson925ca362020-06-16 17:36:15 +080010#include <linux/device.h>
11#include <linux/err.h>
Kent Gibsond189f622020-07-08 12:15:45 +080012#include <linux/file.h>
Kent Gibson925ca362020-06-16 17:36:15 +080013#include <linux/gpio.h>
14#include <linux/gpio/driver.h>
Andy Shevchenko52ee7c02022-10-07 16:44:44 +030015#include <linux/hte.h>
Kent Gibsond189f622020-07-08 12:15:45 +080016#include <linux/interrupt.h>
17#include <linux/irqreturn.h>
18#include <linux/kernel.h>
Kent Gibson925ca362020-06-16 17:36:15 +080019#include <linux/kfifo.h>
Kent Gibsond189f622020-07-08 12:15:45 +080020#include <linux/module.h>
Kent Gibsona54756c2020-09-28 08:27:57 +080021#include <linux/mutex.h>
Kent Gibsond189f622020-07-08 12:15:45 +080022#include <linux/pinctrl/consumer.h>
Kent Gibson925ca362020-06-16 17:36:15 +080023#include <linux/poll.h>
Andy Shevchenko52ee7c02022-10-07 16:44:44 +030024#include <linux/seq_file.h>
Kent Gibsond189f622020-07-08 12:15:45 +080025#include <linux/spinlock.h>
Kent Gibson925ca362020-06-16 17:36:15 +080026#include <linux/timekeeping.h>
Kent Gibsond189f622020-07-08 12:15:45 +080027#include <linux/uaccess.h>
Kent Gibson65cff702020-09-28 08:27:59 +080028#include <linux/workqueue.h>
Andy Shevchenko52ee7c02022-10-07 16:44:44 +030029
Kent Gibson925ca362020-06-16 17:36:15 +080030#include <uapi/linux/gpio.h>
31
32#include "gpiolib.h"
33#include "gpiolib-cdev.h"
34
Kent Gibson3c0d9c62020-09-28 08:27:54 +080035/*
36 * Array sizes must ensure 64-bit alignment and not create holes in the
37 * struct packing.
38 */
39static_assert(IS_ALIGNED(GPIO_V2_LINES_MAX, 2));
40static_assert(IS_ALIGNED(GPIO_MAX_NAME_SIZE, 8));
41
42/*
43 * Check that uAPI structs are 64-bit aligned for 32/64-bit compatibility
44 */
45static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_attribute), 8));
46static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config_attribute), 8));
47static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_config), 8));
48static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_request), 8));
49static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info), 8));
50static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_info_changed), 8));
51static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_event), 8));
52static_assert(IS_ALIGNED(sizeof(struct gpio_v2_line_values), 8));
53
Kent Gibson925ca362020-06-16 17:36:15 +080054/* Character device interface to GPIO.
55 *
56 * The GPIO character device, /dev/gpiochipN, provides userspace an
57 * interface to gpiolib GPIOs via ioctl()s.
58 */
59
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +010060typedef __poll_t (*poll_fn)(struct file *, struct poll_table_struct *);
61typedef long (*ioctl_fn)(struct file *, unsigned int, unsigned long);
62typedef ssize_t (*read_fn)(struct file *, char __user *,
63 size_t count, loff_t *);
64
65static __poll_t call_poll_locked(struct file *file,
66 struct poll_table_struct *wait,
67 struct gpio_device *gdev, poll_fn func)
68{
69 __poll_t ret;
70
71 down_read(&gdev->sem);
72 ret = func(file, wait);
73 up_read(&gdev->sem);
74
75 return ret;
76}
77
78static long call_ioctl_locked(struct file *file, unsigned int cmd,
79 unsigned long arg, struct gpio_device *gdev,
80 ioctl_fn func)
81{
82 long ret;
83
84 down_read(&gdev->sem);
85 ret = func(file, cmd, arg);
86 up_read(&gdev->sem);
87
88 return ret;
89}
90
91static ssize_t call_read_locked(struct file *file, char __user *buf,
92 size_t count, loff_t *f_ps,
93 struct gpio_device *gdev, read_fn func)
94{
95 ssize_t ret;
96
97 down_read(&gdev->sem);
98 ret = func(file, buf, count, f_ps);
99 up_read(&gdev->sem);
100
101 return ret;
102}
103
Kent Gibson925ca362020-06-16 17:36:15 +0800104/*
105 * GPIO line handle management
106 */
107
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800108#ifdef CONFIG_GPIO_CDEV_V1
Kent Gibson925ca362020-06-16 17:36:15 +0800109/**
110 * struct linehandle_state - contains the state of a userspace handle
111 * @gdev: the GPIO device the handle pertains to
112 * @label: consumer label used to tag descriptors
113 * @descs: the GPIO descriptors held by this handle
Kent Gibson52b7b592020-07-08 12:15:49 +0800114 * @num_descs: the number of descriptors held in the descs array
Kent Gibson925ca362020-06-16 17:36:15 +0800115 */
116struct linehandle_state {
117 struct gpio_device *gdev;
118 const char *label;
119 struct gpio_desc *descs[GPIOHANDLES_MAX];
Kent Gibson52b7b592020-07-08 12:15:49 +0800120 u32 num_descs;
Kent Gibson925ca362020-06-16 17:36:15 +0800121};
122
123#define GPIOHANDLE_REQUEST_VALID_FLAGS \
124 (GPIOHANDLE_REQUEST_INPUT | \
125 GPIOHANDLE_REQUEST_OUTPUT | \
126 GPIOHANDLE_REQUEST_ACTIVE_LOW | \
127 GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
128 GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
129 GPIOHANDLE_REQUEST_BIAS_DISABLE | \
130 GPIOHANDLE_REQUEST_OPEN_DRAIN | \
131 GPIOHANDLE_REQUEST_OPEN_SOURCE)
132
133static int linehandle_validate_flags(u32 flags)
134{
135 /* Return an error if an unknown flag is set */
136 if (flags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
137 return -EINVAL;
138
139 /*
140 * Do not allow both INPUT & OUTPUT flags to be set as they are
141 * contradictory.
142 */
143 if ((flags & GPIOHANDLE_REQUEST_INPUT) &&
144 (flags & GPIOHANDLE_REQUEST_OUTPUT))
145 return -EINVAL;
146
147 /*
148 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
149 * the hardware actually supports enabling both at the same time the
150 * electrical result would be disastrous.
151 */
152 if ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
153 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
154 return -EINVAL;
155
156 /* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
157 if (!(flags & GPIOHANDLE_REQUEST_OUTPUT) &&
158 ((flags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
159 (flags & GPIOHANDLE_REQUEST_OPEN_SOURCE)))
160 return -EINVAL;
161
162 /* Bias flags only allowed for input or output mode. */
163 if (!((flags & GPIOHANDLE_REQUEST_INPUT) ||
164 (flags & GPIOHANDLE_REQUEST_OUTPUT)) &&
165 ((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) ||
166 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP) ||
167 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN)))
168 return -EINVAL;
169
170 /* Only one bias flag can be set. */
171 if (((flags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
172 (flags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
Kent Gibsona18512e2020-07-08 12:15:46 +0800173 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
Kent Gibson925ca362020-06-16 17:36:15 +0800174 ((flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
175 (flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
176 return -EINVAL;
177
178 return 0;
179}
180
Kent Gibsonc274b582020-07-08 12:15:47 +0800181static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
182{
183 assign_bit(FLAG_ACTIVE_LOW, flagsp,
184 lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
185 assign_bit(FLAG_OPEN_DRAIN, flagsp,
186 lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
187 assign_bit(FLAG_OPEN_SOURCE, flagsp,
188 lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
189 assign_bit(FLAG_PULL_UP, flagsp,
190 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
191 assign_bit(FLAG_PULL_DOWN, flagsp,
192 lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
193 assign_bit(FLAG_BIAS_DISABLE, flagsp,
194 lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
195}
196
Kent Gibson925ca362020-06-16 17:36:15 +0800197static long linehandle_set_config(struct linehandle_state *lh,
198 void __user *ip)
199{
200 struct gpiohandle_config gcnf;
201 struct gpio_desc *desc;
202 int i, ret;
203 u32 lflags;
Kent Gibson925ca362020-06-16 17:36:15 +0800204
205 if (copy_from_user(&gcnf, ip, sizeof(gcnf)))
206 return -EFAULT;
207
208 lflags = gcnf.flags;
209 ret = linehandle_validate_flags(lflags);
210 if (ret)
211 return ret;
212
Kent Gibson52b7b592020-07-08 12:15:49 +0800213 for (i = 0; i < lh->num_descs; i++) {
Kent Gibson925ca362020-06-16 17:36:15 +0800214 desc = lh->descs[i];
Kent Gibsonc274b582020-07-08 12:15:47 +0800215 linehandle_flags_to_desc_flags(gcnf.flags, &desc->flags);
Kent Gibson925ca362020-06-16 17:36:15 +0800216
217 /*
218 * Lines have to be requested explicitly for input
219 * or output, else the line will be treated "as is".
220 */
221 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
222 int val = !!gcnf.default_values[i];
223
224 ret = gpiod_direction_output(desc, val);
225 if (ret)
226 return ret;
227 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
228 ret = gpiod_direction_input(desc);
229 if (ret)
230 return ret;
231 }
232
Bartosz Golaszewski9ce4ed52023-08-21 16:18:27 +0200233 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
Kent Gibson925ca362020-06-16 17:36:15 +0800234 }
235 return 0;
236}
237
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +0100238static long linehandle_ioctl_unlocked(struct file *file, unsigned int cmd,
239 unsigned long arg)
Kent Gibson925ca362020-06-16 17:36:15 +0800240{
Kent Gibson49bc5272020-07-08 12:15:48 +0800241 struct linehandle_state *lh = file->private_data;
Kent Gibson925ca362020-06-16 17:36:15 +0800242 void __user *ip = (void __user *)arg;
243 struct gpiohandle_data ghd;
244 DECLARE_BITMAP(vals, GPIOHANDLES_MAX);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +0300245 unsigned int i;
246 int ret;
Kent Gibson925ca362020-06-16 17:36:15 +0800247
Bartosz Golaszewski533aae72022-12-05 13:39:02 +0100248 if (!lh->gdev->chip)
249 return -ENODEV;
250
Andy Shevchenko1cef8b52022-03-30 18:06:20 +0300251 switch (cmd) {
252 case GPIOHANDLE_GET_LINE_VALUES_IOCTL:
253 /* NOTE: It's okay to read values of output lines */
254 ret = gpiod_get_array_value_complex(false, true,
255 lh->num_descs, lh->descs,
256 NULL, vals);
Kent Gibson925ca362020-06-16 17:36:15 +0800257 if (ret)
258 return ret;
259
260 memset(&ghd, 0, sizeof(ghd));
Kent Gibson52b7b592020-07-08 12:15:49 +0800261 for (i = 0; i < lh->num_descs; i++)
Kent Gibson925ca362020-06-16 17:36:15 +0800262 ghd.values[i] = test_bit(i, vals);
263
264 if (copy_to_user(ip, &ghd, sizeof(ghd)))
265 return -EFAULT;
266
267 return 0;
Andy Shevchenko1cef8b52022-03-30 18:06:20 +0300268 case GPIOHANDLE_SET_LINE_VALUES_IOCTL:
Kent Gibson925ca362020-06-16 17:36:15 +0800269 /*
270 * All line descriptors were created at once with the same
271 * flags so just check if the first one is really output.
272 */
273 if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
274 return -EPERM;
275
276 if (copy_from_user(&ghd, ip, sizeof(ghd)))
277 return -EFAULT;
278
279 /* Clamp all values to [0,1] */
Kent Gibson52b7b592020-07-08 12:15:49 +0800280 for (i = 0; i < lh->num_descs; i++)
Kent Gibson925ca362020-06-16 17:36:15 +0800281 __assign_bit(i, vals, ghd.values[i]);
282
283 /* Reuse the array setting function */
284 return gpiod_set_array_value_complex(false,
Kent Gibsona18512e2020-07-08 12:15:46 +0800285 true,
Kent Gibson52b7b592020-07-08 12:15:49 +0800286 lh->num_descs,
Kent Gibsona18512e2020-07-08 12:15:46 +0800287 lh->descs,
288 NULL,
289 vals);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +0300290 case GPIOHANDLE_SET_CONFIG_IOCTL:
Kent Gibson925ca362020-06-16 17:36:15 +0800291 return linehandle_set_config(lh, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +0300292 default:
293 return -EINVAL;
Kent Gibson925ca362020-06-16 17:36:15 +0800294 }
Kent Gibson925ca362020-06-16 17:36:15 +0800295}
296
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +0100297static long linehandle_ioctl(struct file *file, unsigned int cmd,
298 unsigned long arg)
299{
300 struct linehandle_state *lh = file->private_data;
301
302 return call_ioctl_locked(file, cmd, arg, lh->gdev,
303 linehandle_ioctl_unlocked);
304}
305
Kent Gibson925ca362020-06-16 17:36:15 +0800306#ifdef CONFIG_COMPAT
Kent Gibson49bc5272020-07-08 12:15:48 +0800307static long linehandle_ioctl_compat(struct file *file, unsigned int cmd,
Kent Gibsona18512e2020-07-08 12:15:46 +0800308 unsigned long arg)
Kent Gibson925ca362020-06-16 17:36:15 +0800309{
Kent Gibson49bc5272020-07-08 12:15:48 +0800310 return linehandle_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
Kent Gibson925ca362020-06-16 17:36:15 +0800311}
312#endif
313
Kent Gibson883f9192020-07-08 12:15:55 +0800314static void linehandle_free(struct linehandle_state *lh)
Kent Gibson925ca362020-06-16 17:36:15 +0800315{
Kent Gibson925ca362020-06-16 17:36:15 +0800316 int i;
317
Kent Gibson52b7b592020-07-08 12:15:49 +0800318 for (i = 0; i < lh->num_descs; i++)
Kent Gibson883f9192020-07-08 12:15:55 +0800319 if (lh->descs[i])
320 gpiod_free(lh->descs[i]);
Kent Gibson925ca362020-06-16 17:36:15 +0800321 kfree(lh->label);
Andy Shevchenkodc0989e2022-12-28 11:20:43 +0200322 gpio_device_put(lh->gdev);
Kent Gibson925ca362020-06-16 17:36:15 +0800323 kfree(lh);
Kent Gibson883f9192020-07-08 12:15:55 +0800324}
325
326static int linehandle_release(struct inode *inode, struct file *file)
327{
328 linehandle_free(file->private_data);
Kent Gibson925ca362020-06-16 17:36:15 +0800329 return 0;
330}
331
332static const struct file_operations linehandle_fileops = {
333 .release = linehandle_release,
334 .owner = THIS_MODULE,
335 .llseek = noop_llseek,
336 .unlocked_ioctl = linehandle_ioctl,
337#ifdef CONFIG_COMPAT
338 .compat_ioctl = linehandle_ioctl_compat,
339#endif
340};
341
342static int linehandle_create(struct gpio_device *gdev, void __user *ip)
343{
344 struct gpiohandle_request handlereq;
345 struct linehandle_state *lh;
346 struct file *file;
Kent Gibson883f9192020-07-08 12:15:55 +0800347 int fd, i, ret;
Kent Gibson925ca362020-06-16 17:36:15 +0800348 u32 lflags;
349
350 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
351 return -EFAULT;
352 if ((handlereq.lines == 0) || (handlereq.lines > GPIOHANDLES_MAX))
353 return -EINVAL;
354
355 lflags = handlereq.flags;
356
357 ret = linehandle_validate_flags(lflags);
358 if (ret)
359 return ret;
360
361 lh = kzalloc(sizeof(*lh), GFP_KERNEL);
362 if (!lh)
363 return -ENOMEM;
Andy Shevchenkodc0989e2022-12-28 11:20:43 +0200364 lh->gdev = gpio_device_get(gdev);
Kent Gibson925ca362020-06-16 17:36:15 +0800365
Kent Gibsonf188ac122020-10-05 15:02:46 +0800366 if (handlereq.consumer_label[0] != '\0') {
367 /* label is only initialized if consumer_label is set */
368 lh->label = kstrndup(handlereq.consumer_label,
369 sizeof(handlereq.consumer_label) - 1,
370 GFP_KERNEL);
Kent Gibson925ca362020-06-16 17:36:15 +0800371 if (!lh->label) {
372 ret = -ENOMEM;
373 goto out_free_lh;
374 }
375 }
376
Kent Gibson883f9192020-07-08 12:15:55 +0800377 lh->num_descs = handlereq.lines;
378
Kent Gibson925ca362020-06-16 17:36:15 +0800379 /* Request each GPIO */
380 for (i = 0; i < handlereq.lines; i++) {
381 u32 offset = handlereq.lineoffsets[i];
382 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
383
384 if (IS_ERR(desc)) {
385 ret = PTR_ERR(desc);
Kent Gibson883f9192020-07-08 12:15:55 +0800386 goto out_free_lh;
Kent Gibson925ca362020-06-16 17:36:15 +0800387 }
388
Andy Shevchenko95a4eed2022-02-01 17:27:55 +0200389 ret = gpiod_request_user(desc, lh->label);
Kent Gibson925ca362020-06-16 17:36:15 +0800390 if (ret)
Kent Gibson883f9192020-07-08 12:15:55 +0800391 goto out_free_lh;
Kent Gibson925ca362020-06-16 17:36:15 +0800392 lh->descs[i] = desc;
Kent Gibsonc274b582020-07-08 12:15:47 +0800393 linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
Kent Gibson925ca362020-06-16 17:36:15 +0800394
395 ret = gpiod_set_transitory(desc, false);
396 if (ret < 0)
Kent Gibson883f9192020-07-08 12:15:55 +0800397 goto out_free_lh;
Kent Gibson925ca362020-06-16 17:36:15 +0800398
399 /*
400 * Lines have to be requested explicitly for input
401 * or output, else the line will be treated "as is".
402 */
403 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
404 int val = !!handlereq.default_values[i];
405
406 ret = gpiod_direction_output(desc, val);
407 if (ret)
Kent Gibson883f9192020-07-08 12:15:55 +0800408 goto out_free_lh;
Kent Gibson925ca362020-06-16 17:36:15 +0800409 } else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
410 ret = gpiod_direction_input(desc);
411 if (ret)
Kent Gibson883f9192020-07-08 12:15:55 +0800412 goto out_free_lh;
Kent Gibson925ca362020-06-16 17:36:15 +0800413 }
414
Bartosz Golaszewski9ce4ed52023-08-21 16:18:27 +0200415 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
Kent Gibson925ca362020-06-16 17:36:15 +0800416
417 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
418 offset);
419 }
Kent Gibson925ca362020-06-16 17:36:15 +0800420
421 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
422 if (fd < 0) {
423 ret = fd;
Kent Gibson883f9192020-07-08 12:15:55 +0800424 goto out_free_lh;
Kent Gibson925ca362020-06-16 17:36:15 +0800425 }
426
427 file = anon_inode_getfile("gpio-linehandle",
428 &linehandle_fileops,
429 lh,
430 O_RDONLY | O_CLOEXEC);
431 if (IS_ERR(file)) {
432 ret = PTR_ERR(file);
433 goto out_put_unused_fd;
434 }
435
436 handlereq.fd = fd;
437 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
438 /*
439 * fput() will trigger the release() callback, so do not go onto
440 * the regular error cleanup path here.
441 */
442 fput(file);
443 put_unused_fd(fd);
444 return -EFAULT;
445 }
446
447 fd_install(fd, file);
448
449 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
Kent Gibson52b7b592020-07-08 12:15:49 +0800450 lh->num_descs);
Kent Gibson925ca362020-06-16 17:36:15 +0800451
452 return 0;
453
454out_put_unused_fd:
455 put_unused_fd(fd);
Kent Gibson925ca362020-06-16 17:36:15 +0800456out_free_lh:
Kent Gibson883f9192020-07-08 12:15:55 +0800457 linehandle_free(lh);
Kent Gibson925ca362020-06-16 17:36:15 +0800458 return ret;
459}
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800460#endif /* CONFIG_GPIO_CDEV_V1 */
461
462/**
463 * struct line - contains the state of a requested line
464 * @desc: the GPIO descriptor for this line.
Kent Gibson73e03412020-09-28 08:27:56 +0800465 * @req: the corresponding line request
466 * @irq: the interrupt triggered in response to events on this GPIO
Andy Shevchenko8d259842022-10-20 18:39:14 +0300467 * @edflags: the edge flags, GPIO_V2_LINE_FLAG_EDGE_RISING and/or
Kent Gibson73e03412020-09-28 08:27:56 +0800468 * GPIO_V2_LINE_FLAG_EDGE_FALLING, indicating the edge detection applied
469 * @timestamp_ns: cache for the timestamp storing it between hardirq and
470 * IRQ thread, used to bring the timestamp close to the actual event
471 * @req_seqno: the seqno for the current edge event in the sequence of
472 * events for the corresponding line request. This is drawn from the @req.
473 * @line_seqno: the seqno for the current edge event in the sequence of
474 * events for this line.
Kent Gibson65cff702020-09-28 08:27:59 +0800475 * @work: the worker that implements software debouncing
476 * @sw_debounced: flag indicating if the software debouncer is active
477 * @level: the current debounced physical level of the line
Andy Shevchenko85ff37e2022-05-20 19:47:26 +0300478 * @hdesc: the Hardware Timestamp Engine (HTE) descriptor
479 * @raw_level: the line level at the time of event
480 * @total_discard_seq: the running counter of the discarded events
481 * @last_seqno: the last sequence number before debounce period expires
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800482 */
483struct line {
484 struct gpio_desc *desc;
Kent Gibson73e03412020-09-28 08:27:56 +0800485 /*
486 * -- edge detector specific fields --
487 */
488 struct linereq *req;
489 unsigned int irq;
Kent Gibson3ffb7c42020-10-14 14:29:21 +0800490 /*
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800491 * The flags for the active edge detector configuration.
492 *
493 * edflags is set by linereq_create(), linereq_free(), and
494 * linereq_set_config_unlocked(), which are themselves mutually
495 * exclusive, and is accessed by edge_irq_thread(),
496 * process_hw_ts_thread() and debounce_work_func(),
497 * which can all live with a slightly stale value.
Kent Gibson3ffb7c42020-10-14 14:29:21 +0800498 */
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800499 u64 edflags;
Kent Gibson73e03412020-09-28 08:27:56 +0800500 /*
501 * timestamp_ns and req_seqno are accessed only by
502 * edge_irq_handler() and edge_irq_thread(), which are themselves
503 * mutually exclusive, so no additional protection is necessary.
504 */
505 u64 timestamp_ns;
506 u32 req_seqno;
Kent Gibson65cff702020-09-28 08:27:59 +0800507 /*
508 * line_seqno is accessed by either edge_irq_thread() or
509 * debounce_work_func(), which are themselves mutually exclusive,
510 * so no additional protection is necessary.
511 */
Kent Gibson73e03412020-09-28 08:27:56 +0800512 u32 line_seqno;
Kent Gibson65cff702020-09-28 08:27:59 +0800513 /*
514 * -- debouncer specific fields --
515 */
516 struct delayed_work work;
517 /*
518 * sw_debounce is accessed by linereq_set_config(), which is the
519 * only setter, and linereq_get_values(), which can live with a
520 * slightly stale value.
521 */
522 unsigned int sw_debounced;
523 /*
524 * level is accessed by debounce_work_func(), which is the only
525 * setter, and linereq_get_values() which can live with a slightly
526 * stale value.
527 */
528 unsigned int level;
Kent Gibson272ddba2022-07-14 10:03:19 +0800529#ifdef CONFIG_HTE
Dipen Patel20683392022-04-22 13:52:18 -0700530 struct hte_ts_desc hdesc;
531 /*
532 * HTE provider sets line level at the time of event. The valid
533 * value is 0 or 1 and negative value for an error.
534 */
535 int raw_level;
536 /*
537 * when sw_debounce is set on HTE enabled line, this is running
538 * counter of the discarded events.
539 */
540 u32 total_discard_seq;
541 /*
542 * when sw_debounce is set on HTE enabled line, this variable records
543 * last sequence number before debounce period expires.
544 */
545 u32 last_seqno;
Kent Gibson272ddba2022-07-14 10:03:19 +0800546#endif /* CONFIG_HTE */
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800547};
548
549/**
550 * struct linereq - contains the state of a userspace line request
551 * @gdev: the GPIO device the line request pertains to
552 * @label: consumer label used to tag GPIO descriptors
553 * @num_lines: the number of lines in the lines array
Kent Gibson73e03412020-09-28 08:27:56 +0800554 * @wait: wait queue that handles blocking reads of events
Bartosz Golaszewskia0dda502023-08-17 15:18:58 +0200555 * @device_unregistered_nb: notifier block for receiving gdev unregister events
Kent Gibson73e03412020-09-28 08:27:56 +0800556 * @event_buffer_size: the number of elements allocated in @events
557 * @events: KFIFO for the GPIO events
558 * @seqno: the sequence number for edge events generated on all lines in
559 * this line request. Note that this is not used when @num_lines is 1, as
560 * the line_seqno is then the same and is cheaper to calculate.
Kent Gibsona54756c2020-09-28 08:27:57 +0800561 * @config_mutex: mutex for serializing ioctl() calls to ensure consistency
562 * of configuration, particularly multi-step accesses to desc flags.
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800563 * @lines: the lines held by this line request, with @num_lines elements.
564 */
565struct linereq {
566 struct gpio_device *gdev;
567 const char *label;
568 u32 num_lines;
Kent Gibson73e03412020-09-28 08:27:56 +0800569 wait_queue_head_t wait;
Bartosz Golaszewskia0dda502023-08-17 15:18:58 +0200570 struct notifier_block device_unregistered_nb;
Kent Gibson73e03412020-09-28 08:27:56 +0800571 u32 event_buffer_size;
572 DECLARE_KFIFO_PTR(events, struct gpio_v2_line_event);
573 atomic_t seqno;
Kent Gibsona54756c2020-09-28 08:27:57 +0800574 struct mutex config_mutex;
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800575 struct line lines[];
576};
577
578#define GPIO_V2_LINE_BIAS_FLAGS \
579 (GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
580 GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
581 GPIO_V2_LINE_FLAG_BIAS_DISABLED)
582
583#define GPIO_V2_LINE_DIRECTION_FLAGS \
584 (GPIO_V2_LINE_FLAG_INPUT | \
585 GPIO_V2_LINE_FLAG_OUTPUT)
586
587#define GPIO_V2_LINE_DRIVE_FLAGS \
588 (GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
589 GPIO_V2_LINE_FLAG_OPEN_SOURCE)
590
Kent Gibson73e03412020-09-28 08:27:56 +0800591#define GPIO_V2_LINE_EDGE_FLAGS \
592 (GPIO_V2_LINE_FLAG_EDGE_RISING | \
593 GPIO_V2_LINE_FLAG_EDGE_FALLING)
594
Kent Gibson5e2ca892020-10-29 16:48:32 +0800595#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
596
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800597#define GPIO_V2_LINE_VALID_FLAGS \
598 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
599 GPIO_V2_LINE_DIRECTION_FLAGS | \
600 GPIO_V2_LINE_DRIVE_FLAGS | \
Kent Gibson73e03412020-09-28 08:27:56 +0800601 GPIO_V2_LINE_EDGE_FLAGS | \
Kent Gibson26d060e2020-10-15 07:11:56 +0800602 GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
Dipen Patel20683392022-04-22 13:52:18 -0700603 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
Kent Gibson3c0d9c62020-09-28 08:27:54 +0800604 GPIO_V2_LINE_BIAS_FLAGS)
605
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800606/* subset of flags relevant for edge detector configuration */
607#define GPIO_V2_LINE_EDGE_DETECTOR_FLAGS \
608 (GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
609 GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE | \
610 GPIO_V2_LINE_EDGE_FLAGS)
611
Bartosz Golaszewskia0dda502023-08-17 15:18:58 +0200612static int linereq_unregistered_notify(struct notifier_block *nb,
613 unsigned long action, void *data)
614{
615 struct linereq *lr = container_of(nb, struct linereq,
616 device_unregistered_nb);
617
618 wake_up_poll(&lr->wait, EPOLLIN | EPOLLERR);
619
620 return NOTIFY_OK;
621}
622
Kent Gibson73e03412020-09-28 08:27:56 +0800623static void linereq_put_event(struct linereq *lr,
624 struct gpio_v2_line_event *le)
625{
626 bool overflow = false;
627
628 spin_lock(&lr->wait.lock);
629 if (kfifo_is_full(&lr->events)) {
630 overflow = true;
631 kfifo_skip(&lr->events);
632 }
633 kfifo_in(&lr->events, le, 1);
634 spin_unlock(&lr->wait.lock);
635 if (!overflow)
636 wake_up_poll(&lr->wait, EPOLLIN);
637 else
638 pr_debug_ratelimited("event FIFO is full - event dropped\n");
639}
640
Kent Gibson26d060e2020-10-15 07:11:56 +0800641static u64 line_event_timestamp(struct line *line)
642{
643 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
644 return ktime_get_real_ns();
Kent Gibson272ddba2022-07-14 10:03:19 +0800645 else if (IS_ENABLED(CONFIG_HTE) &&
646 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
Dipen Patel20683392022-04-22 13:52:18 -0700647 return line->timestamp_ns;
Kent Gibson26d060e2020-10-15 07:11:56 +0800648
649 return ktime_get_ns();
650}
651
Kent Gibson24220232022-07-14 10:03:17 +0800652static u32 line_event_id(int level)
653{
654 return level ? GPIO_V2_LINE_EVENT_RISING_EDGE :
655 GPIO_V2_LINE_EVENT_FALLING_EDGE;
656}
657
Kent Gibson272ddba2022-07-14 10:03:19 +0800658#ifdef CONFIG_HTE
659
Dipen Patel20683392022-04-22 13:52:18 -0700660static enum hte_return process_hw_ts_thread(void *p)
661{
662 struct line *line;
663 struct linereq *lr;
664 struct gpio_v2_line_event le;
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800665 u64 edflags;
Dipen Patel20683392022-04-22 13:52:18 -0700666 int level;
Dipen Patel20683392022-04-22 13:52:18 -0700667
668 if (!p)
669 return HTE_CB_HANDLED;
670
671 line = p;
672 lr = line->req;
673
674 memset(&le, 0, sizeof(le));
675
676 le.timestamp_ns = line->timestamp_ns;
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800677 edflags = READ_ONCE(line->edflags);
Dipen Patel20683392022-04-22 13:52:18 -0700678
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800679 switch (edflags & GPIO_V2_LINE_EDGE_FLAGS) {
Kent Gibsoncfa53462022-07-14 10:03:16 +0800680 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
Kent Gibson24220232022-07-14 10:03:17 +0800681 level = (line->raw_level >= 0) ?
682 line->raw_level :
683 gpiod_get_raw_value_cansleep(line->desc);
Dipen Patel20683392022-04-22 13:52:18 -0700684
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800685 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
Kent Gibson24220232022-07-14 10:03:17 +0800686 level = !level;
687
688 le.id = line_event_id(level);
Kent Gibsoncfa53462022-07-14 10:03:16 +0800689 break;
690 case GPIO_V2_LINE_FLAG_EDGE_RISING:
Dipen Patel20683392022-04-22 13:52:18 -0700691 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
Kent Gibsoncfa53462022-07-14 10:03:16 +0800692 break;
693 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
Dipen Patel20683392022-04-22 13:52:18 -0700694 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
Kent Gibsoncfa53462022-07-14 10:03:16 +0800695 break;
696 default:
Dipen Patel20683392022-04-22 13:52:18 -0700697 return HTE_CB_HANDLED;
698 }
699 le.line_seqno = line->line_seqno;
700 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
701 le.offset = gpio_chip_hwgpio(line->desc);
702
703 linereq_put_event(lr, &le);
704
705 return HTE_CB_HANDLED;
706}
707
708static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
709{
710 struct line *line;
711 struct linereq *lr;
712 int diff_seqno = 0;
713
714 if (!ts || !p)
715 return HTE_CB_HANDLED;
716
717 line = p;
718 line->timestamp_ns = ts->tsc;
719 line->raw_level = ts->raw_level;
720 lr = line->req;
721
722 if (READ_ONCE(line->sw_debounced)) {
723 line->total_discard_seq++;
724 line->last_seqno = ts->seq;
725 mod_delayed_work(system_wq, &line->work,
726 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
727 } else {
728 if (unlikely(ts->seq < line->line_seqno))
729 return HTE_CB_HANDLED;
730
731 diff_seqno = ts->seq - line->line_seqno;
732 line->line_seqno = ts->seq;
733 if (lr->num_lines != 1)
734 line->req_seqno = atomic_add_return(diff_seqno,
735 &lr->seqno);
736
737 return HTE_RUN_SECOND_CB;
738 }
739
740 return HTE_CB_HANDLED;
741}
742
Kent Gibson272ddba2022-07-14 10:03:19 +0800743static int hte_edge_setup(struct line *line, u64 eflags)
744{
745 int ret;
746 unsigned long flags = 0;
747 struct hte_ts_desc *hdesc = &line->hdesc;
748
749 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
750 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
751 HTE_FALLING_EDGE_TS :
752 HTE_RISING_EDGE_TS;
753 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
754 flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
755 HTE_RISING_EDGE_TS :
756 HTE_FALLING_EDGE_TS;
757
758 line->total_discard_seq = 0;
759
760 hte_init_line_attr(hdesc, desc_to_gpio(line->desc), flags, NULL,
761 line->desc);
762
763 ret = hte_ts_get(NULL, hdesc, 0);
764 if (ret)
765 return ret;
766
767 return hte_request_ts_ns(hdesc, process_hw_ts, process_hw_ts_thread,
768 line);
769}
770
771#else
772
773static int hte_edge_setup(struct line *line, u64 eflags)
774{
775 return 0;
776}
777#endif /* CONFIG_HTE */
778
Kent Gibson73e03412020-09-28 08:27:56 +0800779static irqreturn_t edge_irq_thread(int irq, void *p)
780{
781 struct line *line = p;
782 struct linereq *lr = line->req;
783 struct gpio_v2_line_event le;
784
785 /* Do not leak kernel stack to userspace */
786 memset(&le, 0, sizeof(le));
787
788 if (line->timestamp_ns) {
789 le.timestamp_ns = line->timestamp_ns;
790 } else {
791 /*
792 * We may be running from a nested threaded interrupt in
793 * which case we didn't get the timestamp from
794 * edge_irq_handler().
795 */
Kent Gibson26d060e2020-10-15 07:11:56 +0800796 le.timestamp_ns = line_event_timestamp(line);
Kent Gibson73e03412020-09-28 08:27:56 +0800797 if (lr->num_lines != 1)
798 line->req_seqno = atomic_inc_return(&lr->seqno);
799 }
800 line->timestamp_ns = 0;
801
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800802 switch (READ_ONCE(line->edflags) & GPIO_V2_LINE_EDGE_FLAGS) {
Kent Gibsoncfa53462022-07-14 10:03:16 +0800803 case GPIO_V2_LINE_FLAG_EDGE_BOTH:
Kent Gibson24220232022-07-14 10:03:17 +0800804 le.id = line_event_id(gpiod_get_value_cansleep(line->desc));
Kent Gibsoncfa53462022-07-14 10:03:16 +0800805 break;
806 case GPIO_V2_LINE_FLAG_EDGE_RISING:
Kent Gibson73e03412020-09-28 08:27:56 +0800807 le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
Kent Gibsoncfa53462022-07-14 10:03:16 +0800808 break;
809 case GPIO_V2_LINE_FLAG_EDGE_FALLING:
Kent Gibson73e03412020-09-28 08:27:56 +0800810 le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
Kent Gibsoncfa53462022-07-14 10:03:16 +0800811 break;
812 default:
Kent Gibson73e03412020-09-28 08:27:56 +0800813 return IRQ_NONE;
814 }
815 line->line_seqno++;
816 le.line_seqno = line->line_seqno;
817 le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
818 le.offset = gpio_chip_hwgpio(line->desc);
819
820 linereq_put_event(lr, &le);
821
822 return IRQ_HANDLED;
823}
824
825static irqreturn_t edge_irq_handler(int irq, void *p)
826{
827 struct line *line = p;
828 struct linereq *lr = line->req;
829
830 /*
831 * Just store the timestamp in hardirq context so we get it as
832 * close in time as possible to the actual event.
833 */
Kent Gibson26d060e2020-10-15 07:11:56 +0800834 line->timestamp_ns = line_event_timestamp(line);
Kent Gibson73e03412020-09-28 08:27:56 +0800835
836 if (lr->num_lines != 1)
837 line->req_seqno = atomic_inc_return(&lr->seqno);
838
839 return IRQ_WAKE_THREAD;
840}
841
Kent Gibson65cff702020-09-28 08:27:59 +0800842/*
843 * returns the current debounced logical value.
844 */
845static bool debounced_value(struct line *line)
846{
847 bool value;
848
849 /*
850 * minor race - debouncer may be stopped here, so edge_detector_stop()
851 * must leave the value unchanged so the following will read the level
852 * from when the debouncer was last running.
853 */
854 value = READ_ONCE(line->level);
855
856 if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
857 value = !value;
858
859 return value;
860}
861
862static irqreturn_t debounce_irq_handler(int irq, void *p)
863{
864 struct line *line = p;
865
866 mod_delayed_work(system_wq, &line->work,
867 usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
868
869 return IRQ_HANDLED;
870}
871
872static void debounce_work_func(struct work_struct *work)
873{
874 struct gpio_v2_line_event le;
875 struct line *line = container_of(work, struct line, work.work);
876 struct linereq *lr;
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800877 u64 eflags, edflags = READ_ONCE(line->edflags);
Kent Gibson272ddba2022-07-14 10:03:19 +0800878 int level = -1;
879#ifdef CONFIG_HTE
880 int diff_seqno;
Kent Gibson65cff702020-09-28 08:27:59 +0800881
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800882 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
Dipen Patel20683392022-04-22 13:52:18 -0700883 level = line->raw_level;
Kent Gibson272ddba2022-07-14 10:03:19 +0800884#endif
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800885 if (level < 0)
Dipen Patel20683392022-04-22 13:52:18 -0700886 level = gpiod_get_raw_value_cansleep(line->desc);
Kent Gibson65cff702020-09-28 08:27:59 +0800887 if (level < 0) {
888 pr_debug_ratelimited("debouncer failed to read line value\n");
889 return;
890 }
891
892 if (READ_ONCE(line->level) == level)
893 return;
894
895 WRITE_ONCE(line->level, level);
896
897 /* -- edge detection -- */
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800898 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
Kent Gibson3ffb7c42020-10-14 14:29:21 +0800899 if (!eflags)
Kent Gibson65cff702020-09-28 08:27:59 +0800900 return;
901
902 /* switch from physical level to logical - if they differ */
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800903 if (edflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
Kent Gibson65cff702020-09-28 08:27:59 +0800904 level = !level;
905
906 /* ignore edges that are not being monitored */
Kent Gibson3ffb7c42020-10-14 14:29:21 +0800907 if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
908 ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
Kent Gibson65cff702020-09-28 08:27:59 +0800909 return;
910
911 /* Do not leak kernel stack to userspace */
912 memset(&le, 0, sizeof(le));
913
914 lr = line->req;
Kent Gibson26d060e2020-10-15 07:11:56 +0800915 le.timestamp_ns = line_event_timestamp(line);
Kent Gibson65cff702020-09-28 08:27:59 +0800916 le.offset = gpio_chip_hwgpio(line->desc);
Kent Gibson272ddba2022-07-14 10:03:19 +0800917#ifdef CONFIG_HTE
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800918 if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
Dipen Patel20683392022-04-22 13:52:18 -0700919 /* discard events except the last one */
920 line->total_discard_seq -= 1;
921 diff_seqno = line->last_seqno - line->total_discard_seq -
922 line->line_seqno;
923 line->line_seqno = line->last_seqno - line->total_discard_seq;
924 le.line_seqno = line->line_seqno;
925 le.seqno = (lr->num_lines == 1) ?
926 le.line_seqno : atomic_add_return(diff_seqno, &lr->seqno);
Kent Gibson272ddba2022-07-14 10:03:19 +0800927 } else
928#endif /* CONFIG_HTE */
929 {
Dipen Patel20683392022-04-22 13:52:18 -0700930 line->line_seqno++;
931 le.line_seqno = line->line_seqno;
932 le.seqno = (lr->num_lines == 1) ?
933 le.line_seqno : atomic_inc_return(&lr->seqno);
934 }
Kent Gibson65cff702020-09-28 08:27:59 +0800935
Kent Gibson24220232022-07-14 10:03:17 +0800936 le.id = line_event_id(level);
Kent Gibson65cff702020-09-28 08:27:59 +0800937
938 linereq_put_event(lr, &le);
939}
940
Kent Gibsonb1a92e92022-07-14 10:03:18 +0800941static int debounce_setup(struct line *line, unsigned int debounce_period_us)
Kent Gibson65cff702020-09-28 08:27:59 +0800942{
943 unsigned long irqflags;
944 int ret, level, irq;
945
946 /* try hardware */
947 ret = gpiod_set_debounce(line->desc, debounce_period_us);
948 if (!ret) {
949 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
950 return ret;
951 }
952 if (ret != -ENOTSUPP)
953 return ret;
954
955 if (debounce_period_us) {
956 /* setup software debounce */
957 level = gpiod_get_raw_value_cansleep(line->desc);
958 if (level < 0)
959 return level;
960
Kent Gibson272ddba2022-07-14 10:03:19 +0800961 if (!(IS_ENABLED(CONFIG_HTE) &&
962 test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
Dipen Patel20683392022-04-22 13:52:18 -0700963 irq = gpiod_to_irq(line->desc);
964 if (irq < 0)
965 return -ENXIO;
966
967 irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
968 ret = request_irq(irq, debounce_irq_handler, irqflags,
969 line->req->label, line);
970 if (ret)
971 return ret;
972 line->irq = irq;
973 } else {
Kent Gibson2487a812022-07-14 10:03:15 +0800974 ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
Dipen Patel20683392022-04-22 13:52:18 -0700975 if (ret)
976 return ret;
977 }
Kent Gibson65cff702020-09-28 08:27:59 +0800978
979 WRITE_ONCE(line->level, level);
Kent Gibson65cff702020-09-28 08:27:59 +0800980 WRITE_ONCE(line->sw_debounced, 1);
Kent Gibson65cff702020-09-28 08:27:59 +0800981 }
982 return 0;
983}
984
985static bool gpio_v2_line_config_debounced(struct gpio_v2_line_config *lc,
986 unsigned int line_idx)
987{
988 unsigned int i;
989 u64 mask = BIT_ULL(line_idx);
990
991 for (i = 0; i < lc->num_attrs; i++) {
992 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
993 (lc->attrs[i].mask & mask))
994 return true;
995 }
996 return false;
997}
998
999static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
1000 unsigned int line_idx)
1001{
1002 unsigned int i;
1003 u64 mask = BIT_ULL(line_idx);
1004
1005 for (i = 0; i < lc->num_attrs; i++) {
1006 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE) &&
1007 (lc->attrs[i].mask & mask))
1008 return lc->attrs[i].attr.debounce_period_us;
1009 }
1010 return 0;
1011}
1012
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001013static void edge_detector_stop(struct line *line)
Kent Gibson73e03412020-09-28 08:27:56 +08001014{
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001015 if (line->irq) {
Kent Gibson73e03412020-09-28 08:27:56 +08001016 free_irq(line->irq, line);
1017 line->irq = 0;
1018 }
Kent Gibsona54756c2020-09-28 08:27:57 +08001019
Kent Gibson272ddba2022-07-14 10:03:19 +08001020#ifdef CONFIG_HTE
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001021 if (READ_ONCE(line->edflags) & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE)
Dipen Patel20683392022-04-22 13:52:18 -07001022 hte_ts_put(&line->hdesc);
Kent Gibson272ddba2022-07-14 10:03:19 +08001023#endif
Dipen Patel20683392022-04-22 13:52:18 -07001024
Kent Gibson65cff702020-09-28 08:27:59 +08001025 cancel_delayed_work_sync(&line->work);
1026 WRITE_ONCE(line->sw_debounced, 0);
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001027 WRITE_ONCE(line->edflags, 0);
Kent Gibson03a58ea2021-01-21 22:10:38 +08001028 if (line->desc)
1029 WRITE_ONCE(line->desc->debounce_period_us, 0);
Kent Gibson65cff702020-09-28 08:27:59 +08001030 /* do not change line->level - see comment in debounced_value() */
Kent Gibson73e03412020-09-28 08:27:56 +08001031}
1032
1033static int edge_detector_setup(struct line *line,
Kent Gibson65cff702020-09-28 08:27:59 +08001034 struct gpio_v2_line_config *lc,
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001035 unsigned int line_idx, u64 edflags)
Kent Gibson73e03412020-09-28 08:27:56 +08001036{
Kent Gibson65cff702020-09-28 08:27:59 +08001037 u32 debounce_period_us;
Kent Gibson73e03412020-09-28 08:27:56 +08001038 unsigned long irqflags = 0;
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001039 u64 eflags;
Kent Gibson73e03412020-09-28 08:27:56 +08001040 int irq, ret;
1041
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001042 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;
Kent Gibson73e03412020-09-28 08:27:56 +08001043 if (eflags && !kfifo_initialized(&line->req->events)) {
1044 ret = kfifo_alloc(&line->req->events,
1045 line->req->event_buffer_size, GFP_KERNEL);
1046 if (ret)
1047 return ret;
1048 }
Kent Gibson65cff702020-09-28 08:27:59 +08001049 if (gpio_v2_line_config_debounced(lc, line_idx)) {
1050 debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001051 ret = debounce_setup(line, debounce_period_us);
Kent Gibson65cff702020-09-28 08:27:59 +08001052 if (ret)
1053 return ret;
1054 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1055 }
Kent Gibson73e03412020-09-28 08:27:56 +08001056
Kent Gibson65cff702020-09-28 08:27:59 +08001057 /* detection disabled or sw debouncer will provide edge detection */
1058 if (!eflags || READ_ONCE(line->sw_debounced))
Kent Gibson73e03412020-09-28 08:27:56 +08001059 return 0;
1060
Kent Gibson272ddba2022-07-14 10:03:19 +08001061 if (IS_ENABLED(CONFIG_HTE) &&
1062 (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001063 return hte_edge_setup(line, edflags);
Dipen Patel20683392022-04-22 13:52:18 -07001064
Kent Gibson73e03412020-09-28 08:27:56 +08001065 irq = gpiod_to_irq(line->desc);
1066 if (irq < 0)
1067 return -ENXIO;
1068
1069 if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
1070 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1071 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
1072 if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
1073 irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
1074 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
1075 irqflags |= IRQF_ONESHOT;
1076
1077 /* Request a thread to read the events */
1078 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
1079 irqflags, line->req->label, line);
1080 if (ret)
1081 return ret;
1082
1083 line->irq = irq;
1084 return 0;
1085}
1086
Kent Gibson65cff702020-09-28 08:27:59 +08001087static int edge_detector_update(struct line *line,
1088 struct gpio_v2_line_config *lc,
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001089 unsigned int line_idx, u64 edflags)
Kent Gibsona54756c2020-09-28 08:27:57 +08001090{
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001091 u64 active_edflags = READ_ONCE(line->edflags);
Kent Gibson65cff702020-09-28 08:27:59 +08001092 unsigned int debounce_period_us =
Dipen Patel20683392022-04-22 13:52:18 -07001093 gpio_v2_line_config_debounce_period(lc, line_idx);
Kent Gibson65cff702020-09-28 08:27:59 +08001094
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001095 if ((active_edflags == edflags) &&
1096 (READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
Kent Gibsona54756c2020-09-28 08:27:57 +08001097 return 0;
1098
Kent Gibson65cff702020-09-28 08:27:59 +08001099 /* sw debounced and still will be...*/
1100 if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
Kent Gibson65cff702020-09-28 08:27:59 +08001101 WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
1102 return 0;
1103 }
Kent Gibsona54756c2020-09-28 08:27:57 +08001104
Kent Gibson65cff702020-09-28 08:27:59 +08001105 /* reconfiguring edge detection or sw debounce being disabled */
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001106 if ((line->irq && !READ_ONCE(line->sw_debounced)) ||
1107 (active_edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) ||
Kent Gibson65cff702020-09-28 08:27:59 +08001108 (!debounce_period_us && READ_ONCE(line->sw_debounced)))
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001109 edge_detector_stop(line);
Kent Gibson65cff702020-09-28 08:27:59 +08001110
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001111 return edge_detector_setup(line, lc, line_idx, edflags);
Kent Gibsona54756c2020-09-28 08:27:57 +08001112}
1113
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001114static u64 gpio_v2_line_config_flags(struct gpio_v2_line_config *lc,
1115 unsigned int line_idx)
1116{
1117 unsigned int i;
1118 u64 mask = BIT_ULL(line_idx);
1119
1120 for (i = 0; i < lc->num_attrs; i++) {
1121 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_FLAGS) &&
1122 (lc->attrs[i].mask & mask))
1123 return lc->attrs[i].attr.flags;
1124 }
1125 return lc->flags;
1126}
1127
1128static int gpio_v2_line_config_output_value(struct gpio_v2_line_config *lc,
1129 unsigned int line_idx)
1130{
1131 unsigned int i;
1132 u64 mask = BIT_ULL(line_idx);
1133
1134 for (i = 0; i < lc->num_attrs; i++) {
1135 if ((lc->attrs[i].attr.id == GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES) &&
1136 (lc->attrs[i].mask & mask))
1137 return !!(lc->attrs[i].attr.values & mask);
1138 }
1139 return 0;
1140}
1141
1142static int gpio_v2_line_flags_validate(u64 flags)
1143{
1144 /* Return an error if an unknown flag is set */
1145 if (flags & ~GPIO_V2_LINE_VALID_FLAGS)
1146 return -EINVAL;
Kent Gibson272ddba2022-07-14 10:03:19 +08001147
1148 if (!IS_ENABLED(CONFIG_HTE) &&
1149 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1150 return -EOPNOTSUPP;
1151
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001152 /*
1153 * Do not allow both INPUT and OUTPUT flags to be set as they are
1154 * contradictory.
1155 */
1156 if ((flags & GPIO_V2_LINE_FLAG_INPUT) &&
1157 (flags & GPIO_V2_LINE_FLAG_OUTPUT))
1158 return -EINVAL;
1159
Dipen Patel20683392022-04-22 13:52:18 -07001160 /* Only allow one event clock source */
Kent Gibson272ddba2022-07-14 10:03:19 +08001161 if (IS_ENABLED(CONFIG_HTE) &&
1162 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME) &&
Dipen Patel20683392022-04-22 13:52:18 -07001163 (flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE))
1164 return -EINVAL;
1165
Kent Gibson73e03412020-09-28 08:27:56 +08001166 /* Edge detection requires explicit input. */
1167 if ((flags & GPIO_V2_LINE_EDGE_FLAGS) &&
1168 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1169 return -EINVAL;
1170
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001171 /*
1172 * Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
1173 * request. If the hardware actually supports enabling both at the
1174 * same time the electrical result would be disastrous.
1175 */
1176 if ((flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN) &&
1177 (flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE))
1178 return -EINVAL;
1179
1180 /* Drive requires explicit output direction. */
1181 if ((flags & GPIO_V2_LINE_DRIVE_FLAGS) &&
1182 !(flags & GPIO_V2_LINE_FLAG_OUTPUT))
1183 return -EINVAL;
1184
1185 /* Bias requires explicit direction. */
1186 if ((flags & GPIO_V2_LINE_BIAS_FLAGS) &&
1187 !(flags & GPIO_V2_LINE_DIRECTION_FLAGS))
1188 return -EINVAL;
1189
1190 /* Only one bias flag can be set. */
1191 if (((flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED) &&
1192 (flags & (GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
1193 GPIO_V2_LINE_FLAG_BIAS_PULL_UP))) ||
1194 ((flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN) &&
1195 (flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)))
1196 return -EINVAL;
1197
1198 return 0;
1199}
1200
1201static int gpio_v2_line_config_validate(struct gpio_v2_line_config *lc,
1202 unsigned int num_lines)
1203{
1204 unsigned int i;
1205 u64 flags;
1206 int ret;
1207
1208 if (lc->num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX)
1209 return -EINVAL;
1210
1211 if (memchr_inv(lc->padding, 0, sizeof(lc->padding)))
1212 return -EINVAL;
1213
1214 for (i = 0; i < num_lines; i++) {
1215 flags = gpio_v2_line_config_flags(lc, i);
1216 ret = gpio_v2_line_flags_validate(flags);
1217 if (ret)
1218 return ret;
Kent Gibson65cff702020-09-28 08:27:59 +08001219
1220 /* debounce requires explicit input */
1221 if (gpio_v2_line_config_debounced(lc, i) &&
1222 !(flags & GPIO_V2_LINE_FLAG_INPUT))
1223 return -EINVAL;
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001224 }
1225 return 0;
1226}
1227
1228static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
1229 unsigned long *flagsp)
1230{
1231 assign_bit(FLAG_ACTIVE_LOW, flagsp,
1232 flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
1233
1234 if (flags & GPIO_V2_LINE_FLAG_OUTPUT)
1235 set_bit(FLAG_IS_OUT, flagsp);
1236 else if (flags & GPIO_V2_LINE_FLAG_INPUT)
1237 clear_bit(FLAG_IS_OUT, flagsp);
1238
Kent Gibson73e03412020-09-28 08:27:56 +08001239 assign_bit(FLAG_EDGE_RISING, flagsp,
1240 flags & GPIO_V2_LINE_FLAG_EDGE_RISING);
1241 assign_bit(FLAG_EDGE_FALLING, flagsp,
1242 flags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
1243
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001244 assign_bit(FLAG_OPEN_DRAIN, flagsp,
1245 flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
1246 assign_bit(FLAG_OPEN_SOURCE, flagsp,
1247 flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
1248
1249 assign_bit(FLAG_PULL_UP, flagsp,
1250 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
1251 assign_bit(FLAG_PULL_DOWN, flagsp,
1252 flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
1253 assign_bit(FLAG_BIAS_DISABLE, flagsp,
1254 flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
Kent Gibson26d060e2020-10-15 07:11:56 +08001255
1256 assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
1257 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
Dipen Patel20683392022-04-22 13:52:18 -07001258 assign_bit(FLAG_EVENT_CLOCK_HTE, flagsp,
1259 flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001260}
1261
1262static long linereq_get_values(struct linereq *lr, void __user *ip)
1263{
1264 struct gpio_v2_line_values lv;
1265 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1266 struct gpio_desc **descs;
1267 unsigned int i, didx, num_get;
Kent Gibson65cff702020-09-28 08:27:59 +08001268 bool val;
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001269 int ret;
1270
1271 /* NOTE: It's ok to read values of output lines. */
1272 if (copy_from_user(&lv, ip, sizeof(lv)))
1273 return -EFAULT;
1274
1275 for (num_get = 0, i = 0; i < lr->num_lines; i++) {
1276 if (lv.mask & BIT_ULL(i)) {
1277 num_get++;
1278 descs = &lr->lines[i].desc;
1279 }
1280 }
1281
1282 if (num_get == 0)
1283 return -EINVAL;
1284
1285 if (num_get != 1) {
1286 descs = kmalloc_array(num_get, sizeof(*descs), GFP_KERNEL);
1287 if (!descs)
1288 return -ENOMEM;
1289 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1290 if (lv.mask & BIT_ULL(i)) {
1291 descs[didx] = lr->lines[i].desc;
1292 didx++;
1293 }
1294 }
1295 }
1296 ret = gpiod_get_array_value_complex(false, true, num_get,
1297 descs, NULL, vals);
1298
1299 if (num_get != 1)
1300 kfree(descs);
1301 if (ret)
1302 return ret;
1303
1304 lv.bits = 0;
1305 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1306 if (lv.mask & BIT_ULL(i)) {
Kent Gibson65cff702020-09-28 08:27:59 +08001307 if (lr->lines[i].sw_debounced)
1308 val = debounced_value(&lr->lines[i]);
1309 else
1310 val = test_bit(didx, vals);
1311 if (val)
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001312 lv.bits |= BIT_ULL(i);
1313 didx++;
1314 }
1315 }
1316
1317 if (copy_to_user(ip, &lv, sizeof(lv)))
1318 return -EFAULT;
1319
1320 return 0;
1321}
1322
Kent Gibson7b8e00d92020-09-28 08:27:58 +08001323static long linereq_set_values_unlocked(struct linereq *lr,
1324 struct gpio_v2_line_values *lv)
1325{
1326 DECLARE_BITMAP(vals, GPIO_V2_LINES_MAX);
1327 struct gpio_desc **descs;
1328 unsigned int i, didx, num_set;
1329 int ret;
1330
1331 bitmap_zero(vals, GPIO_V2_LINES_MAX);
1332 for (num_set = 0, i = 0; i < lr->num_lines; i++) {
1333 if (lv->mask & BIT_ULL(i)) {
1334 if (!test_bit(FLAG_IS_OUT, &lr->lines[i].desc->flags))
1335 return -EPERM;
1336 if (lv->bits & BIT_ULL(i))
1337 __set_bit(num_set, vals);
1338 num_set++;
1339 descs = &lr->lines[i].desc;
1340 }
1341 }
1342 if (num_set == 0)
1343 return -EINVAL;
1344
1345 if (num_set != 1) {
1346 /* build compacted desc array and values */
1347 descs = kmalloc_array(num_set, sizeof(*descs), GFP_KERNEL);
1348 if (!descs)
1349 return -ENOMEM;
1350 for (didx = 0, i = 0; i < lr->num_lines; i++) {
1351 if (lv->mask & BIT_ULL(i)) {
1352 descs[didx] = lr->lines[i].desc;
1353 didx++;
1354 }
1355 }
1356 }
1357 ret = gpiod_set_array_value_complex(false, true, num_set,
1358 descs, NULL, vals);
1359
1360 if (num_set != 1)
1361 kfree(descs);
1362 return ret;
1363}
1364
1365static long linereq_set_values(struct linereq *lr, void __user *ip)
1366{
1367 struct gpio_v2_line_values lv;
1368 int ret;
1369
1370 if (copy_from_user(&lv, ip, sizeof(lv)))
1371 return -EFAULT;
1372
1373 mutex_lock(&lr->config_mutex);
1374
1375 ret = linereq_set_values_unlocked(lr, &lv);
1376
1377 mutex_unlock(&lr->config_mutex);
1378
1379 return ret;
1380}
1381
Kent Gibsona54756c2020-09-28 08:27:57 +08001382static long linereq_set_config_unlocked(struct linereq *lr,
1383 struct gpio_v2_line_config *lc)
1384{
1385 struct gpio_desc *desc;
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001386 struct line *line;
Kent Gibsona54756c2020-09-28 08:27:57 +08001387 unsigned int i;
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001388 u64 flags, edflags;
Kent Gibsona54756c2020-09-28 08:27:57 +08001389 int ret;
1390
1391 for (i = 0; i < lr->num_lines; i++) {
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001392 line = &lr->lines[i];
Kent Gibsona54756c2020-09-28 08:27:57 +08001393 desc = lr->lines[i].desc;
1394 flags = gpio_v2_line_config_flags(lc, i);
Kent Gibsona54756c2020-09-28 08:27:57 +08001395 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001396 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
Kent Gibsona54756c2020-09-28 08:27:57 +08001397 /*
1398 * Lines have to be requested explicitly for input
1399 * or output, else the line will be treated "as is".
1400 */
1401 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1402 int val = gpio_v2_line_config_output_value(lc, i);
1403
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001404 edge_detector_stop(line);
Kent Gibsona54756c2020-09-28 08:27:57 +08001405 ret = gpiod_direction_output(desc, val);
1406 if (ret)
1407 return ret;
1408 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1409 ret = gpiod_direction_input(desc);
1410 if (ret)
1411 return ret;
1412
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001413 ret = edge_detector_update(line, lc, i, edflags);
Kent Gibsona54756c2020-09-28 08:27:57 +08001414 if (ret)
1415 return ret;
1416 }
1417
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001418 WRITE_ONCE(line->edflags, edflags);
1419
Bartosz Golaszewski9ce4ed52023-08-21 16:18:27 +02001420 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
Kent Gibsona54756c2020-09-28 08:27:57 +08001421 }
1422 return 0;
1423}
1424
1425static long linereq_set_config(struct linereq *lr, void __user *ip)
1426{
1427 struct gpio_v2_line_config lc;
1428 int ret;
1429
1430 if (copy_from_user(&lc, ip, sizeof(lc)))
1431 return -EFAULT;
1432
1433 ret = gpio_v2_line_config_validate(&lc, lr->num_lines);
1434 if (ret)
1435 return ret;
1436
1437 mutex_lock(&lr->config_mutex);
1438
1439 ret = linereq_set_config_unlocked(lr, &lc);
1440
1441 mutex_unlock(&lr->config_mutex);
1442
1443 return ret;
1444}
1445
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001446static long linereq_ioctl_unlocked(struct file *file, unsigned int cmd,
1447 unsigned long arg)
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001448{
1449 struct linereq *lr = file->private_data;
1450 void __user *ip = (void __user *)arg;
1451
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01001452 if (!lr->gdev->chip)
1453 return -ENODEV;
1454
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03001455 switch (cmd) {
1456 case GPIO_V2_LINE_GET_VALUES_IOCTL:
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001457 return linereq_get_values(lr, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03001458 case GPIO_V2_LINE_SET_VALUES_IOCTL:
Kent Gibson7b8e00d92020-09-28 08:27:58 +08001459 return linereq_set_values(lr, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03001460 case GPIO_V2_LINE_SET_CONFIG_IOCTL:
Kent Gibsona54756c2020-09-28 08:27:57 +08001461 return linereq_set_config(lr, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03001462 default:
1463 return -EINVAL;
1464 }
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001465}
1466
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001467static long linereq_ioctl(struct file *file, unsigned int cmd,
1468 unsigned long arg)
1469{
1470 struct linereq *lr = file->private_data;
1471
1472 return call_ioctl_locked(file, cmd, arg, lr->gdev,
1473 linereq_ioctl_unlocked);
1474}
1475
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001476#ifdef CONFIG_COMPAT
1477static long linereq_ioctl_compat(struct file *file, unsigned int cmd,
1478 unsigned long arg)
1479{
1480 return linereq_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1481}
1482#endif
1483
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001484static __poll_t linereq_poll_unlocked(struct file *file,
1485 struct poll_table_struct *wait)
Kent Gibson73e03412020-09-28 08:27:56 +08001486{
1487 struct linereq *lr = file->private_data;
1488 __poll_t events = 0;
1489
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01001490 if (!lr->gdev->chip)
1491 return EPOLLHUP | EPOLLERR;
1492
Kent Gibson73e03412020-09-28 08:27:56 +08001493 poll_wait(file, &lr->wait, wait);
1494
1495 if (!kfifo_is_empty_spinlocked_noirqsave(&lr->events,
1496 &lr->wait.lock))
1497 events = EPOLLIN | EPOLLRDNORM;
1498
1499 return events;
1500}
1501
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001502static __poll_t linereq_poll(struct file *file,
1503 struct poll_table_struct *wait)
1504{
1505 struct linereq *lr = file->private_data;
1506
1507 return call_poll_locked(file, wait, lr->gdev, linereq_poll_unlocked);
1508}
1509
1510static ssize_t linereq_read_unlocked(struct file *file, char __user *buf,
1511 size_t count, loff_t *f_ps)
Kent Gibson73e03412020-09-28 08:27:56 +08001512{
1513 struct linereq *lr = file->private_data;
1514 struct gpio_v2_line_event le;
1515 ssize_t bytes_read = 0;
1516 int ret;
1517
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01001518 if (!lr->gdev->chip)
1519 return -ENODEV;
1520
Kent Gibson73e03412020-09-28 08:27:56 +08001521 if (count < sizeof(le))
1522 return -EINVAL;
1523
1524 do {
1525 spin_lock(&lr->wait.lock);
1526 if (kfifo_is_empty(&lr->events)) {
1527 if (bytes_read) {
1528 spin_unlock(&lr->wait.lock);
1529 return bytes_read;
1530 }
1531
1532 if (file->f_flags & O_NONBLOCK) {
1533 spin_unlock(&lr->wait.lock);
1534 return -EAGAIN;
1535 }
1536
1537 ret = wait_event_interruptible_locked(lr->wait,
1538 !kfifo_is_empty(&lr->events));
1539 if (ret) {
1540 spin_unlock(&lr->wait.lock);
1541 return ret;
1542 }
1543 }
1544
1545 ret = kfifo_out(&lr->events, &le, 1);
1546 spin_unlock(&lr->wait.lock);
1547 if (ret != 1) {
1548 /*
1549 * This should never happen - we were holding the
1550 * lock from the moment we learned the fifo is no
1551 * longer empty until now.
1552 */
1553 ret = -EIO;
1554 break;
1555 }
1556
1557 if (copy_to_user(buf + bytes_read, &le, sizeof(le)))
1558 return -EFAULT;
1559 bytes_read += sizeof(le);
1560 } while (count >= bytes_read + sizeof(le));
1561
1562 return bytes_read;
1563}
1564
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001565static ssize_t linereq_read(struct file *file, char __user *buf,
1566 size_t count, loff_t *f_ps)
1567{
1568 struct linereq *lr = file->private_data;
1569
1570 return call_read_locked(file, buf, count, f_ps, lr->gdev,
1571 linereq_read_unlocked);
1572}
1573
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001574static void linereq_free(struct linereq *lr)
1575{
1576 unsigned int i;
1577
Bartosz Golaszewskia0dda502023-08-17 15:18:58 +02001578 if (lr->device_unregistered_nb.notifier_call)
1579 blocking_notifier_chain_unregister(&lr->gdev->device_notifier,
1580 &lr->device_unregistered_nb);
1581
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001582 for (i = 0; i < lr->num_lines; i++) {
Kent Gibson160d6e42022-07-14 10:03:14 +08001583 if (lr->lines[i].desc) {
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001584 edge_detector_stop(&lr->lines[i]);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001585 gpiod_free(lr->lines[i].desc);
Kent Gibson160d6e42022-07-14 10:03:14 +08001586 }
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001587 }
Kent Gibson73e03412020-09-28 08:27:56 +08001588 kfifo_free(&lr->events);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001589 kfree(lr->label);
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02001590 gpio_device_put(lr->gdev);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001591 kfree(lr);
1592}
1593
1594static int linereq_release(struct inode *inode, struct file *file)
1595{
1596 struct linereq *lr = file->private_data;
1597
1598 linereq_free(lr);
1599 return 0;
1600}
1601
Bartosz Golaszewski0ae31092022-09-22 11:51:24 +02001602#ifdef CONFIG_PROC_FS
1603static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
1604{
1605 struct linereq *lr = file->private_data;
1606 struct device *dev = &lr->gdev->dev;
1607 u16 i;
1608
1609 seq_printf(out, "gpio-chip:\t%s\n", dev_name(dev));
1610
1611 for (i = 0; i < lr->num_lines; i++)
1612 seq_printf(out, "gpio-line:\t%d\n",
1613 gpio_chip_hwgpio(lr->lines[i].desc));
1614}
1615#endif
1616
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001617static const struct file_operations line_fileops = {
1618 .release = linereq_release,
Kent Gibson73e03412020-09-28 08:27:56 +08001619 .read = linereq_read,
1620 .poll = linereq_poll,
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001621 .owner = THIS_MODULE,
1622 .llseek = noop_llseek,
1623 .unlocked_ioctl = linereq_ioctl,
1624#ifdef CONFIG_COMPAT
1625 .compat_ioctl = linereq_ioctl_compat,
1626#endif
Bartosz Golaszewski0ae31092022-09-22 11:51:24 +02001627#ifdef CONFIG_PROC_FS
1628 .show_fdinfo = linereq_show_fdinfo,
1629#endif
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001630};
1631
1632static int linereq_create(struct gpio_device *gdev, void __user *ip)
1633{
1634 struct gpio_v2_line_request ulr;
1635 struct gpio_v2_line_config *lc;
1636 struct linereq *lr;
1637 struct file *file;
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001638 u64 flags, edflags;
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001639 unsigned int i;
1640 int fd, ret;
1641
1642 if (copy_from_user(&ulr, ip, sizeof(ulr)))
1643 return -EFAULT;
1644
1645 if ((ulr.num_lines == 0) || (ulr.num_lines > GPIO_V2_LINES_MAX))
1646 return -EINVAL;
1647
1648 if (memchr_inv(ulr.padding, 0, sizeof(ulr.padding)))
1649 return -EINVAL;
1650
1651 lc = &ulr.config;
1652 ret = gpio_v2_line_config_validate(lc, ulr.num_lines);
1653 if (ret)
1654 return ret;
1655
1656 lr = kzalloc(struct_size(lr, lines, ulr.num_lines), GFP_KERNEL);
1657 if (!lr)
1658 return -ENOMEM;
1659
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02001660 lr->gdev = gpio_device_get(gdev);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001661
Kent Gibson65cff702020-09-28 08:27:59 +08001662 for (i = 0; i < ulr.num_lines; i++) {
Kent Gibson73e03412020-09-28 08:27:56 +08001663 lr->lines[i].req = lr;
Kent Gibson65cff702020-09-28 08:27:59 +08001664 WRITE_ONCE(lr->lines[i].sw_debounced, 0);
1665 INIT_DELAYED_WORK(&lr->lines[i].work, debounce_work_func);
1666 }
Kent Gibson73e03412020-09-28 08:27:56 +08001667
Kent Gibsonf188ac122020-10-05 15:02:46 +08001668 if (ulr.consumer[0] != '\0') {
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001669 /* label is only initialized if consumer is set */
Kent Gibsonf188ac122020-10-05 15:02:46 +08001670 lr->label = kstrndup(ulr.consumer, sizeof(ulr.consumer) - 1,
1671 GFP_KERNEL);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001672 if (!lr->label) {
1673 ret = -ENOMEM;
1674 goto out_free_linereq;
1675 }
1676 }
1677
Kent Gibsona54756c2020-09-28 08:27:57 +08001678 mutex_init(&lr->config_mutex);
Kent Gibson73e03412020-09-28 08:27:56 +08001679 init_waitqueue_head(&lr->wait);
1680 lr->event_buffer_size = ulr.event_buffer_size;
1681 if (lr->event_buffer_size == 0)
1682 lr->event_buffer_size = ulr.num_lines * 16;
1683 else if (lr->event_buffer_size > GPIO_V2_LINES_MAX * 16)
1684 lr->event_buffer_size = GPIO_V2_LINES_MAX * 16;
1685
1686 atomic_set(&lr->seqno, 0);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001687 lr->num_lines = ulr.num_lines;
1688
1689 /* Request each GPIO */
1690 for (i = 0; i < ulr.num_lines; i++) {
1691 u32 offset = ulr.offsets[i];
1692 struct gpio_desc *desc = gpiochip_get_desc(gdev->chip, offset);
1693
1694 if (IS_ERR(desc)) {
1695 ret = PTR_ERR(desc);
1696 goto out_free_linereq;
1697 }
1698
Andy Shevchenko95a4eed2022-02-01 17:27:55 +02001699 ret = gpiod_request_user(desc, lr->label);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001700 if (ret)
1701 goto out_free_linereq;
1702
1703 lr->lines[i].desc = desc;
1704 flags = gpio_v2_line_config_flags(lc, i);
1705 gpio_v2_line_config_flags_to_desc_flags(flags, &desc->flags);
1706
1707 ret = gpiod_set_transitory(desc, false);
1708 if (ret < 0)
1709 goto out_free_linereq;
1710
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001711 edflags = flags & GPIO_V2_LINE_EDGE_DETECTOR_FLAGS;
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001712 /*
1713 * Lines have to be requested explicitly for input
1714 * or output, else the line will be treated "as is".
1715 */
1716 if (flags & GPIO_V2_LINE_FLAG_OUTPUT) {
1717 int val = gpio_v2_line_config_output_value(lc, i);
1718
1719 ret = gpiod_direction_output(desc, val);
1720 if (ret)
1721 goto out_free_linereq;
1722 } else if (flags & GPIO_V2_LINE_FLAG_INPUT) {
1723 ret = gpiod_direction_input(desc);
1724 if (ret)
1725 goto out_free_linereq;
Kent Gibson73e03412020-09-28 08:27:56 +08001726
Kent Gibson65cff702020-09-28 08:27:59 +08001727 ret = edge_detector_setup(&lr->lines[i], lc, i,
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001728 edflags);
Kent Gibson73e03412020-09-28 08:27:56 +08001729 if (ret)
1730 goto out_free_linereq;
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001731 }
1732
Kent Gibsonb1a92e92022-07-14 10:03:18 +08001733 lr->lines[i].edflags = edflags;
1734
Bartosz Golaszewski9ce4ed52023-08-21 16:18:27 +02001735 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001736
1737 dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
1738 offset);
1739 }
1740
Bartosz Golaszewskia0dda502023-08-17 15:18:58 +02001741 lr->device_unregistered_nb.notifier_call = linereq_unregistered_notify;
1742 ret = blocking_notifier_chain_register(&gdev->device_notifier,
1743 &lr->device_unregistered_nb);
1744 if (ret)
1745 goto out_free_linereq;
1746
Kent Gibson3c0d9c62020-09-28 08:27:54 +08001747 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1748 if (fd < 0) {
1749 ret = fd;
1750 goto out_free_linereq;
1751 }
1752
1753 file = anon_inode_getfile("gpio-line", &line_fileops, lr,
1754 O_RDONLY | O_CLOEXEC);
1755 if (IS_ERR(file)) {
1756 ret = PTR_ERR(file);
1757 goto out_put_unused_fd;
1758 }
1759
1760 ulr.fd = fd;
1761 if (copy_to_user(ip, &ulr, sizeof(ulr))) {
1762 /*
1763 * fput() will trigger the release() callback, so do not go onto
1764 * the regular error cleanup path here.
1765 */
1766 fput(file);
1767 put_unused_fd(fd);
1768 return -EFAULT;
1769 }
1770
1771 fd_install(fd, file);
1772
1773 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1774 lr->num_lines);
1775
1776 return 0;
1777
1778out_put_unused_fd:
1779 put_unused_fd(fd);
1780out_free_linereq:
1781 linereq_free(lr);
1782 return ret;
1783}
1784
1785#ifdef CONFIG_GPIO_CDEV_V1
Kent Gibson925ca362020-06-16 17:36:15 +08001786
1787/*
1788 * GPIO line event management
1789 */
1790
1791/**
1792 * struct lineevent_state - contains the state of a userspace event
1793 * @gdev: the GPIO device the event pertains to
1794 * @label: consumer label used to tag descriptors
1795 * @desc: the GPIO descriptor held by this event
1796 * @eflags: the event flags this line was requested with
1797 * @irq: the interrupt that trigger in response to events on this GPIO
1798 * @wait: wait queue that handles blocking reads of events
Bartosz Golaszewski91043f52023-08-17 15:28:31 +02001799 * @device_unregistered_nb: notifier block for receiving gdev unregister events
Kent Gibson925ca362020-06-16 17:36:15 +08001800 * @events: KFIFO for the GPIO events
1801 * @timestamp: cache for the timestamp storing it between hardirq
1802 * and IRQ thread, used to bring the timestamp close to the actual
1803 * event
1804 */
1805struct lineevent_state {
1806 struct gpio_device *gdev;
1807 const char *label;
1808 struct gpio_desc *desc;
1809 u32 eflags;
1810 int irq;
1811 wait_queue_head_t wait;
Bartosz Golaszewski91043f52023-08-17 15:28:31 +02001812 struct notifier_block device_unregistered_nb;
Kent Gibson925ca362020-06-16 17:36:15 +08001813 DECLARE_KFIFO(events, struct gpioevent_data, 16);
1814 u64 timestamp;
1815};
1816
1817#define GPIOEVENT_REQUEST_VALID_FLAGS \
1818 (GPIOEVENT_REQUEST_RISING_EDGE | \
1819 GPIOEVENT_REQUEST_FALLING_EDGE)
1820
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001821static __poll_t lineevent_poll_unlocked(struct file *file,
1822 struct poll_table_struct *wait)
Kent Gibson925ca362020-06-16 17:36:15 +08001823{
Kent Gibson49bc5272020-07-08 12:15:48 +08001824 struct lineevent_state *le = file->private_data;
Kent Gibson925ca362020-06-16 17:36:15 +08001825 __poll_t events = 0;
1826
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01001827 if (!le->gdev->chip)
1828 return EPOLLHUP | EPOLLERR;
1829
Kent Gibson49bc5272020-07-08 12:15:48 +08001830 poll_wait(file, &le->wait, wait);
Kent Gibson925ca362020-06-16 17:36:15 +08001831
1832 if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock))
1833 events = EPOLLIN | EPOLLRDNORM;
1834
1835 return events;
1836}
1837
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001838static __poll_t lineevent_poll(struct file *file,
1839 struct poll_table_struct *wait)
1840{
1841 struct lineevent_state *le = file->private_data;
1842
1843 return call_poll_locked(file, wait, le->gdev, lineevent_poll_unlocked);
1844}
1845
Bartosz Golaszewski91043f52023-08-17 15:28:31 +02001846static int lineevent_unregistered_notify(struct notifier_block *nb,
1847 unsigned long action, void *data)
1848{
1849 struct lineevent_state *le = container_of(nb, struct lineevent_state,
1850 device_unregistered_nb);
1851
1852 wake_up_poll(&le->wait, EPOLLIN | EPOLLERR);
1853
1854 return NOTIFY_OK;
1855}
1856
Andy Shevchenko163d1712020-10-14 13:33:15 +03001857struct compat_gpioeevent_data {
1858 compat_u64 timestamp;
1859 u32 id;
1860};
Kent Gibson925ca362020-06-16 17:36:15 +08001861
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001862static ssize_t lineevent_read_unlocked(struct file *file, char __user *buf,
1863 size_t count, loff_t *f_ps)
Kent Gibson925ca362020-06-16 17:36:15 +08001864{
Kent Gibson49bc5272020-07-08 12:15:48 +08001865 struct lineevent_state *le = file->private_data;
Kent Gibson925ca362020-06-16 17:36:15 +08001866 struct gpioevent_data ge;
1867 ssize_t bytes_read = 0;
Andy Shevchenko5ad284a2020-09-15 15:58:16 +03001868 ssize_t ge_size;
Kent Gibson925ca362020-06-16 17:36:15 +08001869 int ret;
1870
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01001871 if (!le->gdev->chip)
1872 return -ENODEV;
1873
Andy Shevchenko5ad284a2020-09-15 15:58:16 +03001874 /*
1875 * When compatible system call is being used the struct gpioevent_data,
1876 * in case of at least ia32, has different size due to the alignment
1877 * differences. Because we have first member 64 bits followed by one of
1878 * 32 bits there is no gap between them. The only difference is the
1879 * padding at the end of the data structure. Hence, we calculate the
1880 * actual sizeof() and pass this as an argument to copy_to_user() to
1881 * drop unneeded bytes from the output.
1882 */
Andy Shevchenko163d1712020-10-14 13:33:15 +03001883 if (compat_need_64bit_alignment_fixup())
1884 ge_size = sizeof(struct compat_gpioeevent_data);
1885 else
1886 ge_size = sizeof(struct gpioevent_data);
Andy Shevchenko5ad284a2020-09-15 15:58:16 +03001887 if (count < ge_size)
Kent Gibson925ca362020-06-16 17:36:15 +08001888 return -EINVAL;
1889
1890 do {
1891 spin_lock(&le->wait.lock);
1892 if (kfifo_is_empty(&le->events)) {
1893 if (bytes_read) {
1894 spin_unlock(&le->wait.lock);
1895 return bytes_read;
1896 }
1897
Kent Gibson49bc5272020-07-08 12:15:48 +08001898 if (file->f_flags & O_NONBLOCK) {
Kent Gibson925ca362020-06-16 17:36:15 +08001899 spin_unlock(&le->wait.lock);
1900 return -EAGAIN;
1901 }
1902
1903 ret = wait_event_interruptible_locked(le->wait,
1904 !kfifo_is_empty(&le->events));
1905 if (ret) {
1906 spin_unlock(&le->wait.lock);
1907 return ret;
1908 }
1909 }
1910
1911 ret = kfifo_out(&le->events, &ge, 1);
1912 spin_unlock(&le->wait.lock);
1913 if (ret != 1) {
1914 /*
1915 * This should never happen - we were holding the lock
1916 * from the moment we learned the fifo is no longer
1917 * empty until now.
1918 */
1919 ret = -EIO;
1920 break;
1921 }
1922
Andy Shevchenko5ad284a2020-09-15 15:58:16 +03001923 if (copy_to_user(buf + bytes_read, &ge, ge_size))
Kent Gibson925ca362020-06-16 17:36:15 +08001924 return -EFAULT;
Andy Shevchenko5ad284a2020-09-15 15:58:16 +03001925 bytes_read += ge_size;
1926 } while (count >= bytes_read + ge_size);
Kent Gibson925ca362020-06-16 17:36:15 +08001927
1928 return bytes_read;
1929}
1930
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001931static ssize_t lineevent_read(struct file *file, char __user *buf,
1932 size_t count, loff_t *f_ps)
1933{
1934 struct lineevent_state *le = file->private_data;
1935
1936 return call_read_locked(file, buf, count, f_ps, le->gdev,
1937 lineevent_read_unlocked);
1938}
1939
Kent Gibson46824272020-07-08 12:15:56 +08001940static void lineevent_free(struct lineevent_state *le)
1941{
Bartosz Golaszewski91043f52023-08-17 15:28:31 +02001942 if (le->device_unregistered_nb.notifier_call)
1943 blocking_notifier_chain_unregister(&le->gdev->device_notifier,
1944 &le->device_unregistered_nb);
Kent Gibson46824272020-07-08 12:15:56 +08001945 if (le->irq)
1946 free_irq(le->irq, le);
1947 if (le->desc)
1948 gpiod_free(le->desc);
1949 kfree(le->label);
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02001950 gpio_device_put(le->gdev);
Kent Gibson46824272020-07-08 12:15:56 +08001951 kfree(le);
1952}
1953
Kent Gibson49bc5272020-07-08 12:15:48 +08001954static int lineevent_release(struct inode *inode, struct file *file)
Kent Gibson925ca362020-06-16 17:36:15 +08001955{
Kent Gibson46824272020-07-08 12:15:56 +08001956 lineevent_free(file->private_data);
Kent Gibson925ca362020-06-16 17:36:15 +08001957 return 0;
1958}
1959
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001960static long lineevent_ioctl_unlocked(struct file *file, unsigned int cmd,
1961 unsigned long arg)
Kent Gibson925ca362020-06-16 17:36:15 +08001962{
Kent Gibson49bc5272020-07-08 12:15:48 +08001963 struct lineevent_state *le = file->private_data;
Kent Gibson925ca362020-06-16 17:36:15 +08001964 void __user *ip = (void __user *)arg;
1965 struct gpiohandle_data ghd;
1966
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01001967 if (!le->gdev->chip)
1968 return -ENODEV;
1969
Kent Gibson925ca362020-06-16 17:36:15 +08001970 /*
1971 * We can get the value for an event line but not set it,
1972 * because it is input by definition.
1973 */
1974 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1975 int val;
1976
1977 memset(&ghd, 0, sizeof(ghd));
1978
1979 val = gpiod_get_value_cansleep(le->desc);
1980 if (val < 0)
1981 return val;
1982 ghd.values[0] = val;
1983
1984 if (copy_to_user(ip, &ghd, sizeof(ghd)))
1985 return -EFAULT;
1986
1987 return 0;
1988 }
1989 return -EINVAL;
1990}
1991
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01001992static long lineevent_ioctl(struct file *file, unsigned int cmd,
1993 unsigned long arg)
1994{
1995 struct lineevent_state *le = file->private_data;
1996
1997 return call_ioctl_locked(file, cmd, arg, le->gdev,
1998 lineevent_ioctl_unlocked);
1999}
2000
Kent Gibson925ca362020-06-16 17:36:15 +08002001#ifdef CONFIG_COMPAT
Kent Gibson49bc5272020-07-08 12:15:48 +08002002static long lineevent_ioctl_compat(struct file *file, unsigned int cmd,
Kent Gibson925ca362020-06-16 17:36:15 +08002003 unsigned long arg)
2004{
Kent Gibson49bc5272020-07-08 12:15:48 +08002005 return lineevent_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
Kent Gibson925ca362020-06-16 17:36:15 +08002006}
2007#endif
2008
2009static const struct file_operations lineevent_fileops = {
2010 .release = lineevent_release,
2011 .read = lineevent_read,
2012 .poll = lineevent_poll,
2013 .owner = THIS_MODULE,
2014 .llseek = noop_llseek,
2015 .unlocked_ioctl = lineevent_ioctl,
2016#ifdef CONFIG_COMPAT
2017 .compat_ioctl = lineevent_ioctl_compat,
2018#endif
2019};
2020
2021static irqreturn_t lineevent_irq_thread(int irq, void *p)
2022{
2023 struct lineevent_state *le = p;
2024 struct gpioevent_data ge;
2025 int ret;
2026
2027 /* Do not leak kernel stack to userspace */
2028 memset(&ge, 0, sizeof(ge));
2029
2030 /*
2031 * We may be running from a nested threaded interrupt in which case
2032 * we didn't get the timestamp from lineevent_irq_handler().
2033 */
2034 if (!le->timestamp)
2035 ge.timestamp = ktime_get_ns();
2036 else
2037 ge.timestamp = le->timestamp;
2038
2039 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
2040 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2041 int level = gpiod_get_value_cansleep(le->desc);
2042
2043 if (level)
2044 /* Emit low-to-high event */
2045 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2046 else
2047 /* Emit high-to-low event */
2048 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2049 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
2050 /* Emit low-to-high event */
2051 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
2052 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
2053 /* Emit high-to-low event */
2054 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
2055 } else {
2056 return IRQ_NONE;
2057 }
2058
2059 ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge,
2060 1, &le->wait.lock);
2061 if (ret)
2062 wake_up_poll(&le->wait, EPOLLIN);
2063 else
2064 pr_debug_ratelimited("event FIFO is full - event dropped\n");
2065
2066 return IRQ_HANDLED;
2067}
2068
2069static irqreturn_t lineevent_irq_handler(int irq, void *p)
2070{
2071 struct lineevent_state *le = p;
2072
2073 /*
2074 * Just store the timestamp in hardirq context so we get it as
2075 * close in time as possible to the actual event.
2076 */
2077 le->timestamp = ktime_get_ns();
2078
2079 return IRQ_WAKE_THREAD;
2080}
2081
2082static int lineevent_create(struct gpio_device *gdev, void __user *ip)
2083{
2084 struct gpioevent_request eventreq;
2085 struct lineevent_state *le;
2086 struct gpio_desc *desc;
2087 struct file *file;
2088 u32 offset;
2089 u32 lflags;
2090 u32 eflags;
2091 int fd;
2092 int ret;
Kent Gibson46824272020-07-08 12:15:56 +08002093 int irq, irqflags = 0;
Kent Gibson925ca362020-06-16 17:36:15 +08002094
2095 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))
2096 return -EFAULT;
2097
2098 offset = eventreq.lineoffset;
2099 lflags = eventreq.handleflags;
2100 eflags = eventreq.eventflags;
2101
2102 desc = gpiochip_get_desc(gdev->chip, offset);
2103 if (IS_ERR(desc))
2104 return PTR_ERR(desc);
2105
2106 /* Return an error if a unknown flag is set */
2107 if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
2108 (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS))
2109 return -EINVAL;
2110
2111 /* This is just wrong: we don't look for events on output lines */
2112 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
2113 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
2114 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
2115 return -EINVAL;
2116
2117 /* Only one bias flag can be set. */
2118 if (((lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE) &&
2119 (lflags & (GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2120 GPIOHANDLE_REQUEST_BIAS_PULL_UP))) ||
2121 ((lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN) &&
2122 (lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP)))
2123 return -EINVAL;
2124
2125 le = kzalloc(sizeof(*le), GFP_KERNEL);
2126 if (!le)
2127 return -ENOMEM;
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02002128 le->gdev = gpio_device_get(gdev);
Kent Gibson925ca362020-06-16 17:36:15 +08002129
Kent Gibsonf188ac122020-10-05 15:02:46 +08002130 if (eventreq.consumer_label[0] != '\0') {
2131 /* label is only initialized if consumer_label is set */
2132 le->label = kstrndup(eventreq.consumer_label,
2133 sizeof(eventreq.consumer_label) - 1,
2134 GFP_KERNEL);
Kent Gibson925ca362020-06-16 17:36:15 +08002135 if (!le->label) {
2136 ret = -ENOMEM;
2137 goto out_free_le;
2138 }
2139 }
2140
Andy Shevchenko95a4eed2022-02-01 17:27:55 +02002141 ret = gpiod_request_user(desc, le->label);
Kent Gibson925ca362020-06-16 17:36:15 +08002142 if (ret)
Kent Gibson46824272020-07-08 12:15:56 +08002143 goto out_free_le;
Kent Gibson925ca362020-06-16 17:36:15 +08002144 le->desc = desc;
2145 le->eflags = eflags;
2146
Kent Gibsonc274b582020-07-08 12:15:47 +08002147 linehandle_flags_to_desc_flags(lflags, &desc->flags);
Kent Gibson925ca362020-06-16 17:36:15 +08002148
2149 ret = gpiod_direction_input(desc);
2150 if (ret)
Kent Gibson46824272020-07-08 12:15:56 +08002151 goto out_free_le;
Kent Gibson925ca362020-06-16 17:36:15 +08002152
Bartosz Golaszewski9ce4ed52023-08-21 16:18:27 +02002153 gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
Kent Gibson925ca362020-06-16 17:36:15 +08002154
Kent Gibson46824272020-07-08 12:15:56 +08002155 irq = gpiod_to_irq(desc);
2156 if (irq <= 0) {
Kent Gibson925ca362020-06-16 17:36:15 +08002157 ret = -ENODEV;
Kent Gibson46824272020-07-08 12:15:56 +08002158 goto out_free_le;
Kent Gibson925ca362020-06-16 17:36:15 +08002159 }
2160
2161 if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
2162 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2163 IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
2164 if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
2165 irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
2166 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
2167 irqflags |= IRQF_ONESHOT;
2168
2169 INIT_KFIFO(le->events);
2170 init_waitqueue_head(&le->wait);
2171
Bartosz Golaszewski91043f52023-08-17 15:28:31 +02002172 le->device_unregistered_nb.notifier_call = lineevent_unregistered_notify;
2173 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2174 &le->device_unregistered_nb);
2175 if (ret)
2176 goto out_free_le;
2177
Kent Gibson925ca362020-06-16 17:36:15 +08002178 /* Request a thread to read the events */
Meng Li69bef192022-09-21 11:20:20 +08002179 ret = request_threaded_irq(irq,
Kent Gibsona18512e2020-07-08 12:15:46 +08002180 lineevent_irq_handler,
2181 lineevent_irq_thread,
2182 irqflags,
2183 le->label,
2184 le);
Kent Gibson925ca362020-06-16 17:36:15 +08002185 if (ret)
Kent Gibson46824272020-07-08 12:15:56 +08002186 goto out_free_le;
Kent Gibson925ca362020-06-16 17:36:15 +08002187
Meng Li69bef192022-09-21 11:20:20 +08002188 le->irq = irq;
2189
Kent Gibson925ca362020-06-16 17:36:15 +08002190 fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
2191 if (fd < 0) {
2192 ret = fd;
Kent Gibson46824272020-07-08 12:15:56 +08002193 goto out_free_le;
Kent Gibson925ca362020-06-16 17:36:15 +08002194 }
2195
2196 file = anon_inode_getfile("gpio-event",
2197 &lineevent_fileops,
2198 le,
2199 O_RDONLY | O_CLOEXEC);
2200 if (IS_ERR(file)) {
2201 ret = PTR_ERR(file);
2202 goto out_put_unused_fd;
2203 }
2204
2205 eventreq.fd = fd;
2206 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
2207 /*
2208 * fput() will trigger the release() callback, so do not go onto
2209 * the regular error cleanup path here.
2210 */
2211 fput(file);
2212 put_unused_fd(fd);
2213 return -EFAULT;
2214 }
2215
2216 fd_install(fd, file);
2217
2218 return 0;
2219
2220out_put_unused_fd:
2221 put_unused_fd(fd);
Kent Gibson925ca362020-06-16 17:36:15 +08002222out_free_le:
Kent Gibson46824272020-07-08 12:15:56 +08002223 lineevent_free(le);
Kent Gibson925ca362020-06-16 17:36:15 +08002224 return ret;
2225}
2226
Kent Gibsonaad95582020-09-28 08:27:55 +08002227static void gpio_v2_line_info_to_v1(struct gpio_v2_line_info *info_v2,
2228 struct gpioline_info *info_v1)
2229{
2230 u64 flagsv2 = info_v2->flags;
2231
2232 memcpy(info_v1->name, info_v2->name, sizeof(info_v1->name));
2233 memcpy(info_v1->consumer, info_v2->consumer, sizeof(info_v1->consumer));
2234 info_v1->line_offset = info_v2->offset;
2235 info_v1->flags = 0;
2236
2237 if (flagsv2 & GPIO_V2_LINE_FLAG_USED)
2238 info_v1->flags |= GPIOLINE_FLAG_KERNEL;
2239
2240 if (flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT)
2241 info_v1->flags |= GPIOLINE_FLAG_IS_OUT;
2242
2243 if (flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW)
2244 info_v1->flags |= GPIOLINE_FLAG_ACTIVE_LOW;
2245
2246 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN)
2247 info_v1->flags |= GPIOLINE_FLAG_OPEN_DRAIN;
2248 if (flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE)
2249 info_v1->flags |= GPIOLINE_FLAG_OPEN_SOURCE;
2250
2251 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP)
2252 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_UP;
2253 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN)
2254 info_v1->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN;
2255 if (flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED)
2256 info_v1->flags |= GPIOLINE_FLAG_BIAS_DISABLE;
2257}
2258
2259static void gpio_v2_line_info_changed_to_v1(
2260 struct gpio_v2_line_info_changed *lic_v2,
2261 struct gpioline_info_changed *lic_v1)
2262{
Gabriel Knezekcb8f63b2021-06-21 15:28:59 -07002263 memset(lic_v1, 0, sizeof(*lic_v1));
Kent Gibsonaad95582020-09-28 08:27:55 +08002264 gpio_v2_line_info_to_v1(&lic_v2->info, &lic_v1->info);
2265 lic_v1->timestamp = lic_v2->timestamp_ns;
2266 lic_v1->event_type = lic_v2->event_type;
2267}
2268
Kent Gibson3c0d9c62020-09-28 08:27:54 +08002269#endif /* CONFIG_GPIO_CDEV_V1 */
2270
Kent Gibson925ca362020-06-16 17:36:15 +08002271static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
Kent Gibsonaad95582020-09-28 08:27:55 +08002272 struct gpio_v2_line_info *info)
Kent Gibson925ca362020-06-16 17:36:15 +08002273{
2274 struct gpio_chip *gc = desc->gdev->chip;
2275 bool ok_for_pinctrl;
2276 unsigned long flags;
Kent Gibson65cff702020-09-28 08:27:59 +08002277 u32 debounce_period_us;
2278 unsigned int num_attrs = 0;
Kent Gibson925ca362020-06-16 17:36:15 +08002279
Kent Gibson69e4e132020-09-28 08:27:49 +08002280 memset(info, 0, sizeof(*info));
Kent Gibsonaad95582020-09-28 08:27:55 +08002281 info->offset = gpio_chip_hwgpio(desc);
Kent Gibson925ca362020-06-16 17:36:15 +08002282
2283 /*
2284 * This function takes a mutex so we must check this before taking
2285 * the spinlock.
2286 *
2287 * FIXME: find a non-racy way to retrieve this information. Maybe a
2288 * lock common to both frameworks?
2289 */
2290 ok_for_pinctrl =
Kent Gibsonaad95582020-09-28 08:27:55 +08002291 pinctrl_gpio_can_use_line(gc->base + info->offset);
Kent Gibson925ca362020-06-16 17:36:15 +08002292
2293 spin_lock_irqsave(&gpio_lock, flags);
2294
Kent Gibson69e4e132020-09-28 08:27:49 +08002295 if (desc->name)
2296 strscpy(info->name, desc->name, sizeof(info->name));
Kent Gibson925ca362020-06-16 17:36:15 +08002297
Kent Gibson69e4e132020-09-28 08:27:49 +08002298 if (desc->label)
2299 strscpy(info->consumer, desc->label, sizeof(info->consumer));
Kent Gibson925ca362020-06-16 17:36:15 +08002300
2301 /*
2302 * Userspace only need to know that the kernel is using this GPIO so
2303 * it can't use it.
2304 */
2305 info->flags = 0;
2306 if (test_bit(FLAG_REQUESTED, &desc->flags) ||
2307 test_bit(FLAG_IS_HOGGED, &desc->flags) ||
2308 test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
2309 test_bit(FLAG_EXPORT, &desc->flags) ||
2310 test_bit(FLAG_SYSFS, &desc->flags) ||
Marc Zyngiera0db1972020-12-04 16:47:36 +00002311 !gpiochip_line_is_valid(gc, info->offset) ||
Kent Gibson925ca362020-06-16 17:36:15 +08002312 !ok_for_pinctrl)
Kent Gibsonaad95582020-09-28 08:27:55 +08002313 info->flags |= GPIO_V2_LINE_FLAG_USED;
2314
Kent Gibson925ca362020-06-16 17:36:15 +08002315 if (test_bit(FLAG_IS_OUT, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002316 info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
2317 else
2318 info->flags |= GPIO_V2_LINE_FLAG_INPUT;
2319
Kent Gibson925ca362020-06-16 17:36:15 +08002320 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002321 info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
2322
Kent Gibson925ca362020-06-16 17:36:15 +08002323 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002324 info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
Kent Gibson925ca362020-06-16 17:36:15 +08002325 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002326 info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
2327
Kent Gibson925ca362020-06-16 17:36:15 +08002328 if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002329 info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
Kent Gibson925ca362020-06-16 17:36:15 +08002330 if (test_bit(FLAG_PULL_DOWN, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002331 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
Kent Gibson925ca362020-06-16 17:36:15 +08002332 if (test_bit(FLAG_PULL_UP, &desc->flags))
Kent Gibsonaad95582020-09-28 08:27:55 +08002333 info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
Kent Gibson925ca362020-06-16 17:36:15 +08002334
Kent Gibson73e03412020-09-28 08:27:56 +08002335 if (test_bit(FLAG_EDGE_RISING, &desc->flags))
2336 info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
2337 if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
2338 info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
2339
Kent Gibson26d060e2020-10-15 07:11:56 +08002340 if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
2341 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
Dipen Patel20683392022-04-22 13:52:18 -07002342 else if (test_bit(FLAG_EVENT_CLOCK_HTE, &desc->flags))
2343 info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
Kent Gibson26d060e2020-10-15 07:11:56 +08002344
Kent Gibson65cff702020-09-28 08:27:59 +08002345 debounce_period_us = READ_ONCE(desc->debounce_period_us);
2346 if (debounce_period_us) {
2347 info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
2348 info->attrs[num_attrs].debounce_period_us = debounce_period_us;
2349 num_attrs++;
2350 }
2351 info->num_attrs = num_attrs;
Kent Gibson925ca362020-06-16 17:36:15 +08002352
2353 spin_unlock_irqrestore(&gpio_lock, flags);
2354}
2355
2356struct gpio_chardev_data {
2357 struct gpio_device *gdev;
2358 wait_queue_head_t wait;
Kent Gibsonaad95582020-09-28 08:27:55 +08002359 DECLARE_KFIFO(events, struct gpio_v2_line_info_changed, 32);
Kent Gibson925ca362020-06-16 17:36:15 +08002360 struct notifier_block lineinfo_changed_nb;
Bartosz Golaszewskid2e25862023-08-17 14:42:24 +02002361 struct notifier_block device_unregistered_nb;
Kent Gibson925ca362020-06-16 17:36:15 +08002362 unsigned long *watched_lines;
Kent Gibsonaad95582020-09-28 08:27:55 +08002363#ifdef CONFIG_GPIO_CDEV_V1
2364 atomic_t watch_abi_version;
2365#endif
Kent Gibson925ca362020-06-16 17:36:15 +08002366};
2367
Kent Gibson2e202ad2020-12-28 00:10:40 +08002368static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
2369{
2370 struct gpio_device *gdev = cdev->gdev;
2371 struct gpiochip_info chipinfo;
2372
2373 memset(&chipinfo, 0, sizeof(chipinfo));
2374
2375 strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
2376 strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
2377 chipinfo.lines = gdev->ngpio;
2378 if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
2379 return -EFAULT;
2380 return 0;
2381}
2382
Kent Gibsonaad95582020-09-28 08:27:55 +08002383#ifdef CONFIG_GPIO_CDEV_V1
2384/*
2385 * returns 0 if the versions match, else the previously selected ABI version
2386 */
2387static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
2388 unsigned int version)
2389{
2390 int abiv = atomic_cmpxchg(&cdata->watch_abi_version, 0, version);
2391
2392 if (abiv == version)
2393 return 0;
2394
2395 return abiv;
2396}
Kent Gibson2e202ad2020-12-28 00:10:40 +08002397
2398static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
2399 bool watch)
2400{
2401 struct gpio_desc *desc;
2402 struct gpioline_info lineinfo;
2403 struct gpio_v2_line_info lineinfo_v2;
2404
2405 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2406 return -EFAULT;
2407
2408 /* this doubles as a range check on line_offset */
2409 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
2410 if (IS_ERR(desc))
2411 return PTR_ERR(desc);
2412
2413 if (watch) {
2414 if (lineinfo_ensure_abi_version(cdev, 1))
2415 return -EPERM;
2416
2417 if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
2418 return -EBUSY;
2419 }
2420
2421 gpio_desc_to_lineinfo(desc, &lineinfo_v2);
2422 gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
2423
2424 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2425 if (watch)
2426 clear_bit(lineinfo.line_offset, cdev->watched_lines);
2427 return -EFAULT;
2428 }
2429
2430 return 0;
2431}
Kent Gibsonaad95582020-09-28 08:27:55 +08002432#endif
2433
2434static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
2435 bool watch)
2436{
2437 struct gpio_desc *desc;
2438 struct gpio_v2_line_info lineinfo;
2439
2440 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
2441 return -EFAULT;
2442
2443 if (memchr_inv(lineinfo.padding, 0, sizeof(lineinfo.padding)))
2444 return -EINVAL;
2445
2446 desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.offset);
2447 if (IS_ERR(desc))
2448 return PTR_ERR(desc);
2449
2450 if (watch) {
2451#ifdef CONFIG_GPIO_CDEV_V1
2452 if (lineinfo_ensure_abi_version(cdev, 2))
2453 return -EPERM;
2454#endif
2455 if (test_and_set_bit(lineinfo.offset, cdev->watched_lines))
2456 return -EBUSY;
2457 }
2458 gpio_desc_to_lineinfo(desc, &lineinfo);
2459
2460 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
2461 if (watch)
2462 clear_bit(lineinfo.offset, cdev->watched_lines);
2463 return -EFAULT;
2464 }
2465
2466 return 0;
2467}
2468
Kent Gibson2e202ad2020-12-28 00:10:40 +08002469static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
2470{
2471 __u32 offset;
2472
2473 if (copy_from_user(&offset, ip, sizeof(offset)))
2474 return -EFAULT;
2475
2476 if (offset >= cdev->gdev->ngpio)
2477 return -EINVAL;
2478
2479 if (!test_and_clear_bit(offset, cdev->watched_lines))
2480 return -EBUSY;
2481
2482 return 0;
2483}
2484
Kent Gibson925ca362020-06-16 17:36:15 +08002485/*
2486 * gpio_ioctl() - ioctl handler for the GPIO chardev
2487 */
Kent Gibson49bc5272020-07-08 12:15:48 +08002488static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Kent Gibson925ca362020-06-16 17:36:15 +08002489{
Kent Gibsone2b781c52020-07-08 12:15:52 +08002490 struct gpio_chardev_data *cdev = file->private_data;
2491 struct gpio_device *gdev = cdev->gdev;
Kent Gibson925ca362020-06-16 17:36:15 +08002492 void __user *ip = (void __user *)arg;
Kent Gibson925ca362020-06-16 17:36:15 +08002493
2494 /* We fail any subsequent ioctl():s when the chip is gone */
Kent Gibson2e202ad2020-12-28 00:10:40 +08002495 if (!gdev->chip)
Kent Gibson925ca362020-06-16 17:36:15 +08002496 return -ENODEV;
2497
2498 /* Fill in the struct and pass to userspace */
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002499 switch (cmd) {
2500 case GPIO_GET_CHIPINFO_IOCTL:
Kent Gibson2e202ad2020-12-28 00:10:40 +08002501 return chipinfo_get(cdev, ip);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08002502#ifdef CONFIG_GPIO_CDEV_V1
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002503 case GPIO_GET_LINEHANDLE_IOCTL:
Kent Gibson925ca362020-06-16 17:36:15 +08002504 return linehandle_create(gdev, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002505 case GPIO_GET_LINEEVENT_IOCTL:
Kent Gibson925ca362020-06-16 17:36:15 +08002506 return lineevent_create(gdev, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002507 case GPIO_GET_LINEINFO_IOCTL:
2508 return lineinfo_get_v1(cdev, ip, false);
2509 case GPIO_GET_LINEINFO_WATCH_IOCTL:
2510 return lineinfo_get_v1(cdev, ip, true);
Kent Gibson3c0d9c62020-09-28 08:27:54 +08002511#endif /* CONFIG_GPIO_CDEV_V1 */
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002512 case GPIO_V2_GET_LINEINFO_IOCTL:
2513 return lineinfo_get(cdev, ip, false);
2514 case GPIO_V2_GET_LINEINFO_WATCH_IOCTL:
2515 return lineinfo_get(cdev, ip, true);
2516 case GPIO_V2_GET_LINE_IOCTL:
Kent Gibson3c0d9c62020-09-28 08:27:54 +08002517 return linereq_create(gdev, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002518 case GPIO_GET_LINEINFO_UNWATCH_IOCTL:
Kent Gibson2e202ad2020-12-28 00:10:40 +08002519 return lineinfo_unwatch(cdev, ip);
Andy Shevchenko1cef8b52022-03-30 18:06:20 +03002520 default:
2521 return -EINVAL;
Kent Gibson925ca362020-06-16 17:36:15 +08002522 }
Kent Gibson925ca362020-06-16 17:36:15 +08002523}
2524
2525#ifdef CONFIG_COMPAT
Kent Gibson49bc5272020-07-08 12:15:48 +08002526static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
Kent Gibson925ca362020-06-16 17:36:15 +08002527 unsigned long arg)
2528{
Kent Gibson49bc5272020-07-08 12:15:48 +08002529 return gpio_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
Kent Gibson925ca362020-06-16 17:36:15 +08002530}
2531#endif
2532
Kent Gibson925ca362020-06-16 17:36:15 +08002533static int lineinfo_changed_notify(struct notifier_block *nb,
2534 unsigned long action, void *data)
2535{
Bartosz Golaszewskie82bbd62023-08-17 13:42:34 +02002536 struct gpio_chardev_data *cdev =
2537 container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
Kent Gibsonaad95582020-09-28 08:27:55 +08002538 struct gpio_v2_line_info_changed chg;
Kent Gibson925ca362020-06-16 17:36:15 +08002539 struct gpio_desc *desc = data;
2540 int ret;
2541
Kent Gibsone2b781c52020-07-08 12:15:52 +08002542 if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
Kent Gibson925ca362020-06-16 17:36:15 +08002543 return NOTIFY_DONE;
2544
2545 memset(&chg, 0, sizeof(chg));
Kent Gibson925ca362020-06-16 17:36:15 +08002546 chg.event_type = action;
Kent Gibsonaad95582020-09-28 08:27:55 +08002547 chg.timestamp_ns = ktime_get_ns();
Kent Gibson925ca362020-06-16 17:36:15 +08002548 gpio_desc_to_lineinfo(desc, &chg.info);
2549
Kent Gibsone2b781c52020-07-08 12:15:52 +08002550 ret = kfifo_in_spinlocked(&cdev->events, &chg, 1, &cdev->wait.lock);
Kent Gibson925ca362020-06-16 17:36:15 +08002551 if (ret)
Kent Gibsone2b781c52020-07-08 12:15:52 +08002552 wake_up_poll(&cdev->wait, EPOLLIN);
Kent Gibson925ca362020-06-16 17:36:15 +08002553 else
2554 pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n");
2555
2556 return NOTIFY_OK;
2557}
2558
Bartosz Golaszewskid2e25862023-08-17 14:42:24 +02002559static int gpio_device_unregistered_notify(struct notifier_block *nb,
2560 unsigned long action, void *data)
2561{
2562 struct gpio_chardev_data *cdev = container_of(nb,
2563 struct gpio_chardev_data,
2564 device_unregistered_nb);
2565
2566 wake_up_poll(&cdev->wait, EPOLLIN | EPOLLERR);
2567
2568 return NOTIFY_OK;
2569}
2570
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002571static __poll_t lineinfo_watch_poll_unlocked(struct file *file,
2572 struct poll_table_struct *pollt)
Kent Gibson925ca362020-06-16 17:36:15 +08002573{
Kent Gibsone2b781c52020-07-08 12:15:52 +08002574 struct gpio_chardev_data *cdev = file->private_data;
Kent Gibson925ca362020-06-16 17:36:15 +08002575 __poll_t events = 0;
2576
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01002577 if (!cdev->gdev->chip)
2578 return EPOLLHUP | EPOLLERR;
2579
Kent Gibsone2b781c52020-07-08 12:15:52 +08002580 poll_wait(file, &cdev->wait, pollt);
Kent Gibson925ca362020-06-16 17:36:15 +08002581
Kent Gibsone2b781c52020-07-08 12:15:52 +08002582 if (!kfifo_is_empty_spinlocked_noirqsave(&cdev->events,
2583 &cdev->wait.lock))
Kent Gibson925ca362020-06-16 17:36:15 +08002584 events = EPOLLIN | EPOLLRDNORM;
2585
2586 return events;
2587}
2588
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002589static __poll_t lineinfo_watch_poll(struct file *file,
2590 struct poll_table_struct *pollt)
2591{
2592 struct gpio_chardev_data *cdev = file->private_data;
2593
2594 return call_poll_locked(file, pollt, cdev->gdev,
2595 lineinfo_watch_poll_unlocked);
2596}
2597
2598static ssize_t lineinfo_watch_read_unlocked(struct file *file, char __user *buf,
2599 size_t count, loff_t *off)
Kent Gibson925ca362020-06-16 17:36:15 +08002600{
Kent Gibsone2b781c52020-07-08 12:15:52 +08002601 struct gpio_chardev_data *cdev = file->private_data;
Kent Gibsonaad95582020-09-28 08:27:55 +08002602 struct gpio_v2_line_info_changed event;
Kent Gibson925ca362020-06-16 17:36:15 +08002603 ssize_t bytes_read = 0;
2604 int ret;
Kent Gibsonaad95582020-09-28 08:27:55 +08002605 size_t event_size;
Kent Gibson925ca362020-06-16 17:36:15 +08002606
Bartosz Golaszewski533aae72022-12-05 13:39:02 +01002607 if (!cdev->gdev->chip)
2608 return -ENODEV;
2609
Kent Gibsonaad95582020-09-28 08:27:55 +08002610#ifndef CONFIG_GPIO_CDEV_V1
2611 event_size = sizeof(struct gpio_v2_line_info_changed);
2612 if (count < event_size)
Kent Gibson925ca362020-06-16 17:36:15 +08002613 return -EINVAL;
Kent Gibsonaad95582020-09-28 08:27:55 +08002614#endif
Kent Gibson925ca362020-06-16 17:36:15 +08002615
2616 do {
Kent Gibsone2b781c52020-07-08 12:15:52 +08002617 spin_lock(&cdev->wait.lock);
2618 if (kfifo_is_empty(&cdev->events)) {
Kent Gibson925ca362020-06-16 17:36:15 +08002619 if (bytes_read) {
Kent Gibsone2b781c52020-07-08 12:15:52 +08002620 spin_unlock(&cdev->wait.lock);
Kent Gibson925ca362020-06-16 17:36:15 +08002621 return bytes_read;
2622 }
2623
Kent Gibson49bc5272020-07-08 12:15:48 +08002624 if (file->f_flags & O_NONBLOCK) {
Kent Gibsone2b781c52020-07-08 12:15:52 +08002625 spin_unlock(&cdev->wait.lock);
Kent Gibson925ca362020-06-16 17:36:15 +08002626 return -EAGAIN;
2627 }
2628
Kent Gibsone2b781c52020-07-08 12:15:52 +08002629 ret = wait_event_interruptible_locked(cdev->wait,
2630 !kfifo_is_empty(&cdev->events));
Kent Gibson925ca362020-06-16 17:36:15 +08002631 if (ret) {
Kent Gibsone2b781c52020-07-08 12:15:52 +08002632 spin_unlock(&cdev->wait.lock);
Kent Gibson925ca362020-06-16 17:36:15 +08002633 return ret;
2634 }
2635 }
Kent Gibsonaad95582020-09-28 08:27:55 +08002636#ifdef CONFIG_GPIO_CDEV_V1
2637 /* must be after kfifo check so watch_abi_version is set */
2638 if (atomic_read(&cdev->watch_abi_version) == 2)
2639 event_size = sizeof(struct gpio_v2_line_info_changed);
2640 else
2641 event_size = sizeof(struct gpioline_info_changed);
2642 if (count < event_size) {
2643 spin_unlock(&cdev->wait.lock);
2644 return -EINVAL;
2645 }
2646#endif
Kent Gibsone2b781c52020-07-08 12:15:52 +08002647 ret = kfifo_out(&cdev->events, &event, 1);
2648 spin_unlock(&cdev->wait.lock);
Kent Gibson925ca362020-06-16 17:36:15 +08002649 if (ret != 1) {
2650 ret = -EIO;
2651 break;
2652 /* We should never get here. See lineevent_read(). */
2653 }
2654
Kent Gibsonaad95582020-09-28 08:27:55 +08002655#ifdef CONFIG_GPIO_CDEV_V1
2656 if (event_size == sizeof(struct gpio_v2_line_info_changed)) {
2657 if (copy_to_user(buf + bytes_read, &event, event_size))
2658 return -EFAULT;
2659 } else {
2660 struct gpioline_info_changed event_v1;
2661
2662 gpio_v2_line_info_changed_to_v1(&event, &event_v1);
2663 if (copy_to_user(buf + bytes_read, &event_v1,
2664 event_size))
2665 return -EFAULT;
2666 }
2667#else
2668 if (copy_to_user(buf + bytes_read, &event, event_size))
Kent Gibson925ca362020-06-16 17:36:15 +08002669 return -EFAULT;
Kent Gibsonaad95582020-09-28 08:27:55 +08002670#endif
2671 bytes_read += event_size;
Kent Gibson925ca362020-06-16 17:36:15 +08002672 } while (count >= bytes_read + sizeof(event));
2673
2674 return bytes_read;
2675}
2676
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002677static ssize_t lineinfo_watch_read(struct file *file, char __user *buf,
2678 size_t count, loff_t *off)
2679{
2680 struct gpio_chardev_data *cdev = file->private_data;
2681
2682 return call_read_locked(file, buf, count, off, cdev->gdev,
2683 lineinfo_watch_read_unlocked);
2684}
2685
Kent Gibson925ca362020-06-16 17:36:15 +08002686/**
2687 * gpio_chrdev_open() - open the chardev for ioctl operations
2688 * @inode: inode for this chardev
Kent Gibson49bc5272020-07-08 12:15:48 +08002689 * @file: file struct for storing private data
Kent Gibson925ca362020-06-16 17:36:15 +08002690 * Returns 0 on success
2691 */
Kent Gibson49bc5272020-07-08 12:15:48 +08002692static int gpio_chrdev_open(struct inode *inode, struct file *file)
Kent Gibson925ca362020-06-16 17:36:15 +08002693{
2694 struct gpio_device *gdev = container_of(inode->i_cdev,
Kent Gibsona18512e2020-07-08 12:15:46 +08002695 struct gpio_device, chrdev);
Kent Gibsone2b781c52020-07-08 12:15:52 +08002696 struct gpio_chardev_data *cdev;
Kent Gibson925ca362020-06-16 17:36:15 +08002697 int ret = -ENOMEM;
2698
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002699 down_read(&gdev->sem);
2700
Kent Gibson925ca362020-06-16 17:36:15 +08002701 /* Fail on open if the backing gpiochip is gone */
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002702 if (!gdev->chip) {
2703 ret = -ENODEV;
2704 goto out_unlock;
2705 }
Kent Gibson925ca362020-06-16 17:36:15 +08002706
Kent Gibsone2b781c52020-07-08 12:15:52 +08002707 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
2708 if (!cdev)
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002709 goto out_unlock;
Kent Gibson925ca362020-06-16 17:36:15 +08002710
Kent Gibsone2b781c52020-07-08 12:15:52 +08002711 cdev->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL);
2712 if (!cdev->watched_lines)
2713 goto out_free_cdev;
Kent Gibson925ca362020-06-16 17:36:15 +08002714
Kent Gibsone2b781c52020-07-08 12:15:52 +08002715 init_waitqueue_head(&cdev->wait);
2716 INIT_KFIFO(cdev->events);
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02002717 cdev->gdev = gpio_device_get(gdev);
Kent Gibson925ca362020-06-16 17:36:15 +08002718
Kent Gibsone2b781c52020-07-08 12:15:52 +08002719 cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
Bartosz Golaszewski17a7ca32023-08-17 10:52:28 +02002720 ret = blocking_notifier_chain_register(&gdev->line_state_notifier,
Kent Gibsone2b781c52020-07-08 12:15:52 +08002721 &cdev->lineinfo_changed_nb);
Kent Gibson925ca362020-06-16 17:36:15 +08002722 if (ret)
2723 goto out_free_bitmap;
2724
Bartosz Golaszewskid2e25862023-08-17 14:42:24 +02002725 cdev->device_unregistered_nb.notifier_call =
2726 gpio_device_unregistered_notify;
2727 ret = blocking_notifier_chain_register(&gdev->device_notifier,
2728 &cdev->device_unregistered_nb);
2729 if (ret)
2730 goto out_unregister_line_notifier;
2731
Kent Gibsone2b781c52020-07-08 12:15:52 +08002732 file->private_data = cdev;
Kent Gibson925ca362020-06-16 17:36:15 +08002733
Kent Gibson49bc5272020-07-08 12:15:48 +08002734 ret = nonseekable_open(inode, file);
Kent Gibson925ca362020-06-16 17:36:15 +08002735 if (ret)
Bartosz Golaszewskid2e25862023-08-17 14:42:24 +02002736 goto out_unregister_device_notifier;
Kent Gibson925ca362020-06-16 17:36:15 +08002737
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002738 up_read(&gdev->sem);
2739
Kent Gibson925ca362020-06-16 17:36:15 +08002740 return ret;
2741
Bartosz Golaszewskid2e25862023-08-17 14:42:24 +02002742out_unregister_device_notifier:
2743 blocking_notifier_chain_unregister(&gdev->device_notifier,
2744 &cdev->device_unregistered_nb);
2745out_unregister_line_notifier:
Bartosz Golaszewski17a7ca32023-08-17 10:52:28 +02002746 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
Kent Gibsone2b781c52020-07-08 12:15:52 +08002747 &cdev->lineinfo_changed_nb);
Kent Gibson925ca362020-06-16 17:36:15 +08002748out_free_bitmap:
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02002749 gpio_device_put(gdev);
Kent Gibsone2b781c52020-07-08 12:15:52 +08002750 bitmap_free(cdev->watched_lines);
2751out_free_cdev:
2752 kfree(cdev);
Bartosz Golaszewskibdbbae22022-12-05 13:39:03 +01002753out_unlock:
2754 up_read(&gdev->sem);
Kent Gibson925ca362020-06-16 17:36:15 +08002755 return ret;
2756}
2757
2758/**
2759 * gpio_chrdev_release() - close chardev after ioctl operations
2760 * @inode: inode for this chardev
Kent Gibson49bc5272020-07-08 12:15:48 +08002761 * @file: file struct for storing private data
Kent Gibson925ca362020-06-16 17:36:15 +08002762 * Returns 0 on success
2763 */
Kent Gibson49bc5272020-07-08 12:15:48 +08002764static int gpio_chrdev_release(struct inode *inode, struct file *file)
Kent Gibson925ca362020-06-16 17:36:15 +08002765{
Kent Gibsone2b781c52020-07-08 12:15:52 +08002766 struct gpio_chardev_data *cdev = file->private_data;
2767 struct gpio_device *gdev = cdev->gdev;
Kent Gibson925ca362020-06-16 17:36:15 +08002768
Kent Gibsone2b781c52020-07-08 12:15:52 +08002769 bitmap_free(cdev->watched_lines);
Bartosz Golaszewskid2e25862023-08-17 14:42:24 +02002770 blocking_notifier_chain_unregister(&gdev->device_notifier,
2771 &cdev->device_unregistered_nb);
Bartosz Golaszewski17a7ca32023-08-17 10:52:28 +02002772 blocking_notifier_chain_unregister(&gdev->line_state_notifier,
Kent Gibsone2b781c52020-07-08 12:15:52 +08002773 &cdev->lineinfo_changed_nb);
Andy Shevchenkodc0989e2022-12-28 11:20:43 +02002774 gpio_device_put(gdev);
Kent Gibsone2b781c52020-07-08 12:15:52 +08002775 kfree(cdev);
Kent Gibson925ca362020-06-16 17:36:15 +08002776
2777 return 0;
2778}
2779
2780static const struct file_operations gpio_fileops = {
2781 .release = gpio_chrdev_release,
2782 .open = gpio_chrdev_open,
2783 .poll = lineinfo_watch_poll,
2784 .read = lineinfo_watch_read,
2785 .owner = THIS_MODULE,
2786 .llseek = no_llseek,
2787 .unlocked_ioctl = gpio_ioctl,
2788#ifdef CONFIG_COMPAT
2789 .compat_ioctl = gpio_ioctl_compat,
2790#endif
2791};
2792
2793int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
2794{
2795 int ret;
2796
2797 cdev_init(&gdev->chrdev, &gpio_fileops);
2798 gdev->chrdev.owner = THIS_MODULE;
2799 gdev->dev.devt = MKDEV(MAJOR(devt), gdev->id);
2800
2801 ret = cdev_device_add(&gdev->chrdev, &gdev->dev);
2802 if (ret)
2803 return ret;
2804
2805 chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n",
2806 MAJOR(devt), gdev->id);
2807
2808 return 0;
2809}
2810
2811void gpiolib_cdev_unregister(struct gpio_device *gdev)
2812{
2813 cdev_device_del(&gdev->chrdev, &gdev->dev);
Bartosz Golaszewskia0674192023-08-17 13:59:05 +02002814 blocking_notifier_call_chain(&gdev->device_notifier, 0, NULL);
Kent Gibson925ca362020-06-16 17:36:15 +08002815}