Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 2 | /* The industrial I/O core, trigger handling functions |
| 3 | * |
| 4 | * Copyright (c) 2008 Jonathan Cameron |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | #include <linux/kernel.h> |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 8 | #include <linux/idr.h> |
| 9 | #include <linux/err.h> |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/list.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 14 | |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 15 | #include <linux/iio/iio.h> |
Jonathan Cameron | 6eaf9f6 | 2021-04-26 18:49:05 +0100 | [diff] [blame] | 16 | #include <linux/iio/iio-opaque.h> |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 17 | #include <linux/iio/trigger.h> |
Jonathan Cameron | df9c1c4 | 2011-08-12 17:56:03 +0100 | [diff] [blame] | 18 | #include "iio_core.h" |
Jonathan Cameron | 6aea1c3 | 2011-08-24 17:28:38 +0100 | [diff] [blame] | 19 | #include "iio_core_trigger.h" |
Jonathan Cameron | 06458e2 | 2012-04-25 15:54:58 +0100 | [diff] [blame] | 20 | #include <linux/iio/trigger_consumer.h> |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 21 | |
| 22 | /* RFC - Question of approach |
| 23 | * Make the common case (single sensor single trigger) |
| 24 | * simple by starting trigger capture from when first sensors |
| 25 | * is added. |
| 26 | * |
| 27 | * Complex simultaneous start requires use of 'hold' functionality |
| 28 | * of the trigger. (not implemented) |
| 29 | * |
| 30 | * Any other suggestions? |
| 31 | */ |
| 32 | |
Jonathan Cameron | 47c24fd | 2011-08-30 12:41:07 +0100 | [diff] [blame] | 33 | static DEFINE_IDA(iio_trigger_ida); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 34 | |
| 35 | /* Single list of all available triggers */ |
| 36 | static LIST_HEAD(iio_trigger_list); |
| 37 | static DEFINE_MUTEX(iio_trigger_list_lock); |
| 38 | |
| 39 | /** |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 40 | * name_show() - retrieve useful identifying name |
Cristina Opriceana | 8e563b0 | 2015-08-06 14:56:02 +0300 | [diff] [blame] | 41 | * @dev: device associated with the iio_trigger |
| 42 | * @attr: pointer to the device_attribute structure that is |
| 43 | * being processed |
| 44 | * @buf: buffer to print the name into |
| 45 | * |
| 46 | * Return: a negative number on failure or the number of written |
| 47 | * characters on success. |
| 48 | */ |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 49 | static ssize_t name_show(struct device *dev, struct device_attribute *attr, |
| 50 | char *buf) |
Jonathan Cameron | 59c85e8 | 2011-05-18 14:42:22 +0100 | [diff] [blame] | 51 | { |
Lars-Peter Clausen | 971ff1d | 2012-06-21 19:11:00 +0200 | [diff] [blame] | 52 | struct iio_trigger *trig = to_iio_trigger(dev); |
Joe Simmons-Talbott | 9d9ec8d | 2022-07-27 14:18:54 -0400 | [diff] [blame] | 53 | |
Lars-Peter Clausen | 83ca56b | 2021-03-20 08:14:02 +0100 | [diff] [blame] | 54 | return sysfs_emit(buf, "%s\n", trig->name); |
Jonathan Cameron | 59c85e8 | 2011-05-18 14:42:22 +0100 | [diff] [blame] | 55 | } |
| 56 | |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 57 | static DEVICE_ATTR_RO(name); |
Jonathan Cameron | 59c85e8 | 2011-05-18 14:42:22 +0100 | [diff] [blame] | 58 | |
Lars-Peter Clausen | 6d459aa | 2012-07-05 10:57:06 +0200 | [diff] [blame] | 59 | static struct attribute *iio_trig_dev_attrs[] = { |
| 60 | &dev_attr_name.attr, |
| 61 | NULL, |
| 62 | }; |
Axel Lin | f59c257 | 2013-12-02 02:57:00 +0000 | [diff] [blame] | 63 | ATTRIBUTE_GROUPS(iio_trig_dev); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 64 | |
Crestez Dan Leonard | 3b8e73e | 2016-05-23 21:40:01 +0300 | [diff] [blame] | 65 | static struct iio_trigger *__iio_trigger_find_by_name(const char *name); |
| 66 | |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 67 | int iio_trigger_register(struct iio_trigger *trig_info) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 68 | { |
| 69 | int ret; |
| 70 | |
keliu | 319dbcd | 2022-05-27 09:17:39 +0000 | [diff] [blame] | 71 | trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL); |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 72 | if (trig_info->id < 0) |
| 73 | return trig_info->id; |
| 74 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 75 | /* Set the name used for the sysfs directory etc */ |
Andy Shevchenko | ebb9493 | 2021-04-02 20:49:10 +0300 | [diff] [blame] | 76 | dev_set_name(&trig_info->dev, "trigger%d", trig_info->id); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 77 | |
| 78 | ret = device_add(&trig_info->dev); |
| 79 | if (ret) |
| 80 | goto error_unregister_id; |
| 81 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 82 | /* Add to list of available triggers held by the IIO core */ |
| 83 | mutex_lock(&iio_trigger_list_lock); |
Crestez Dan Leonard | 3b8e73e | 2016-05-23 21:40:01 +0300 | [diff] [blame] | 84 | if (__iio_trigger_find_by_name(trig_info->name)) { |
| 85 | pr_err("Duplicate trigger name '%s'\n", trig_info->name); |
| 86 | ret = -EEXIST; |
| 87 | goto error_device_del; |
| 88 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 89 | list_add_tail(&trig_info->list, &iio_trigger_list); |
| 90 | mutex_unlock(&iio_trigger_list_lock); |
| 91 | |
| 92 | return 0; |
| 93 | |
Crestez Dan Leonard | 3b8e73e | 2016-05-23 21:40:01 +0300 | [diff] [blame] | 94 | error_device_del: |
| 95 | mutex_unlock(&iio_trigger_list_lock); |
| 96 | device_del(&trig_info->dev); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 97 | error_unregister_id: |
keliu | 319dbcd | 2022-05-27 09:17:39 +0000 | [diff] [blame] | 98 | ida_free(&iio_trigger_ida, trig_info->id); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 99 | return ret; |
| 100 | } |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 101 | EXPORT_SYMBOL(iio_trigger_register); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 102 | |
| 103 | void iio_trigger_unregister(struct iio_trigger *trig_info) |
| 104 | { |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 105 | mutex_lock(&iio_trigger_list_lock); |
Jonathan Cameron | 582e548 | 2011-04-15 18:55:54 +0100 | [diff] [blame] | 106 | list_del(&trig_info->list); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 107 | mutex_unlock(&iio_trigger_list_lock); |
| 108 | |
keliu | 319dbcd | 2022-05-27 09:17:39 +0000 | [diff] [blame] | 109 | ida_free(&iio_trigger_ida, trig_info->id); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 110 | /* Possible issue in here */ |
Jonathan Cameron | 8bade40 | 2013-06-22 12:00:04 +0100 | [diff] [blame] | 111 | device_del(&trig_info->dev); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 112 | } |
| 113 | EXPORT_SYMBOL(iio_trigger_unregister); |
| 114 | |
Matt Ranostay | c8cdf70 | 2016-09-02 23:36:15 -0700 | [diff] [blame] | 115 | int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig) |
| 116 | { |
Jonathan Cameron | 3028e0c | 2021-04-26 18:49:06 +0100 | [diff] [blame] | 117 | struct iio_dev_opaque *iio_dev_opaque; |
| 118 | |
Matt Ranostay | c8cdf70 | 2016-09-02 23:36:15 -0700 | [diff] [blame] | 119 | if (!indio_dev || !trig) |
| 120 | return -EINVAL; |
| 121 | |
Jonathan Cameron | 3028e0c | 2021-04-26 18:49:06 +0100 | [diff] [blame] | 122 | iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
Nuno Sá | 16afe12 | 2022-10-12 17:16:20 +0200 | [diff] [blame] | 123 | mutex_lock(&iio_dev_opaque->mlock); |
Jonathan Cameron | 3028e0c | 2021-04-26 18:49:06 +0100 | [diff] [blame] | 124 | WARN_ON(iio_dev_opaque->trig_readonly); |
Matt Ranostay | c8cdf70 | 2016-09-02 23:36:15 -0700 | [diff] [blame] | 125 | |
| 126 | indio_dev->trig = iio_trigger_get(trig); |
Jonathan Cameron | 3028e0c | 2021-04-26 18:49:06 +0100 | [diff] [blame] | 127 | iio_dev_opaque->trig_readonly = true; |
Nuno Sá | 16afe12 | 2022-10-12 17:16:20 +0200 | [diff] [blame] | 128 | mutex_unlock(&iio_dev_opaque->mlock); |
Matt Ranostay | c8cdf70 | 2016-09-02 23:36:15 -0700 | [diff] [blame] | 129 | |
| 130 | return 0; |
| 131 | } |
| 132 | EXPORT_SYMBOL(iio_trigger_set_immutable); |
| 133 | |
Crestez Dan Leonard | 3b8e73e | 2016-05-23 21:40:01 +0300 | [diff] [blame] | 134 | /* Search for trigger by name, assuming iio_trigger_list_lock held */ |
| 135 | static struct iio_trigger *__iio_trigger_find_by_name(const char *name) |
| 136 | { |
| 137 | struct iio_trigger *iter; |
| 138 | |
| 139 | list_for_each_entry(iter, &iio_trigger_list, list) |
| 140 | if (!strcmp(iter->name, name)) |
| 141 | return iter; |
| 142 | |
| 143 | return NULL; |
| 144 | } |
| 145 | |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 146 | static struct iio_trigger *iio_trigger_acquire_by_name(const char *name) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 147 | { |
Jonathan Cameron | f6517f2 | 2011-04-15 18:55:53 +0100 | [diff] [blame] | 148 | struct iio_trigger *trig = NULL, *iter; |
Michael Hennerich | 7c32785 | 2010-04-26 10:49:10 +0200 | [diff] [blame] | 149 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 150 | mutex_lock(&iio_trigger_list_lock); |
Jonathan Cameron | f6517f2 | 2011-04-15 18:55:53 +0100 | [diff] [blame] | 151 | list_for_each_entry(iter, &iio_trigger_list, list) |
| 152 | if (sysfs_streq(iter->name, name)) { |
| 153 | trig = iter; |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 154 | iio_trigger_get(trig); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 155 | break; |
| 156 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 157 | mutex_unlock(&iio_trigger_list_lock); |
| 158 | |
Jonathan Cameron | f6517f2 | 2011-04-15 18:55:53 +0100 | [diff] [blame] | 159 | return trig; |
Jonathan Cameron | 5f87404 | 2010-05-04 22:20:54 +0100 | [diff] [blame] | 160 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 161 | |
Jonathan Cameron | 9020ef6 | 2021-10-17 18:22:09 +0100 | [diff] [blame] | 162 | static void iio_reenable_work_fn(struct work_struct *work) |
| 163 | { |
| 164 | struct iio_trigger *trig = container_of(work, struct iio_trigger, |
| 165 | reenable_work); |
| 166 | |
| 167 | /* |
| 168 | * This 'might' occur after the trigger state is set to disabled - |
| 169 | * in that case the driver should skip reenabling. |
| 170 | */ |
| 171 | trig->ops->reenable(trig); |
| 172 | } |
| 173 | |
| 174 | /* |
| 175 | * In general, reenable callbacks may need to sleep and this path is |
| 176 | * not performance sensitive, so just queue up a work item |
| 177 | * to reneable the trigger for us. |
| 178 | * |
| 179 | * Races that can cause this. |
| 180 | * 1) A handler occurs entirely in interrupt context so the counter |
| 181 | * the final decrement is still in this interrupt. |
| 182 | * 2) The trigger has been removed, but one last interrupt gets through. |
| 183 | * |
| 184 | * For (1) we must call reenable, but not in atomic context. |
| 185 | * For (2) it should be safe to call reenanble, if drivers never blindly |
| 186 | * reenable after state is off. |
| 187 | */ |
| 188 | static void iio_trigger_notify_done_atomic(struct iio_trigger *trig) |
| 189 | { |
| 190 | if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
| 191 | trig->ops->reenable) |
| 192 | schedule_work(&trig->reenable_work); |
| 193 | } |
| 194 | |
Mehdi Djait | 4ad682e | 2023-03-02 14:04:35 +0100 | [diff] [blame] | 195 | /** |
| 196 | * iio_trigger_poll() - Call the IRQ trigger handler of the consumers |
| 197 | * @trig: trigger which occurred |
| 198 | * |
| 199 | * This function should only be called from a hard IRQ context. |
| 200 | */ |
Peter Meerwald | 398fd22 | 2014-12-06 06:46:00 +0000 | [diff] [blame] | 201 | void iio_trigger_poll(struct iio_trigger *trig) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 202 | { |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 203 | int i; |
Lars-Peter Clausen | a1a8e1d | 2013-07-16 15:28:00 +0100 | [diff] [blame] | 204 | |
| 205 | if (!atomic_read(&trig->use_count)) { |
| 206 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 207 | |
| 208 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 209 | if (trig->subirqs[i].enabled) |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 210 | generic_handle_irq(trig->subirq_base + i); |
Lars-Peter Clausen | a1a8e1d | 2013-07-16 15:28:00 +0100 | [diff] [blame] | 211 | else |
Jonathan Cameron | 9020ef6 | 2021-10-17 18:22:09 +0100 | [diff] [blame] | 212 | iio_trigger_notify_done_atomic(trig); |
Lars-Peter Clausen | a1a8e1d | 2013-07-16 15:28:00 +0100 | [diff] [blame] | 213 | } |
| 214 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 215 | } |
| 216 | EXPORT_SYMBOL(iio_trigger_poll); |
| 217 | |
Jonathan Cameron | 8384d95 | 2011-05-18 14:41:21 +0100 | [diff] [blame] | 218 | irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) |
| 219 | { |
Peter Meerwald | 398fd22 | 2014-12-06 06:46:00 +0000 | [diff] [blame] | 220 | iio_trigger_poll(private); |
Jonathan Cameron | 8384d95 | 2011-05-18 14:41:21 +0100 | [diff] [blame] | 221 | return IRQ_HANDLED; |
| 222 | } |
| 223 | EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); |
| 224 | |
Mehdi Djait | f700e55 | 2023-03-02 14:04:36 +0100 | [diff] [blame] | 225 | /** |
| 226 | * iio_trigger_poll_nested() - Call the threaded trigger handler of the |
| 227 | * consumers |
| 228 | * @trig: trigger which occurred |
| 229 | * |
| 230 | * This function should only be called from a kernel thread context. |
| 231 | */ |
| 232 | void iio_trigger_poll_nested(struct iio_trigger *trig) |
Jonathan Cameron | 1f78568 | 2011-05-18 14:42:11 +0100 | [diff] [blame] | 233 | { |
| 234 | int i; |
Lars-Peter Clausen | a1a8e1d | 2013-07-16 15:28:00 +0100 | [diff] [blame] | 235 | |
| 236 | if (!atomic_read(&trig->use_count)) { |
| 237 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 238 | |
| 239 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 240 | if (trig->subirqs[i].enabled) |
Jonathan Cameron | 1f78568 | 2011-05-18 14:42:11 +0100 | [diff] [blame] | 241 | handle_nested_irq(trig->subirq_base + i); |
Lars-Peter Clausen | a1a8e1d | 2013-07-16 15:28:00 +0100 | [diff] [blame] | 242 | else |
| 243 | iio_trigger_notify_done(trig); |
| 244 | } |
| 245 | } |
Jonathan Cameron | 1f78568 | 2011-05-18 14:42:11 +0100 | [diff] [blame] | 246 | } |
Mehdi Djait | f700e55 | 2023-03-02 14:04:36 +0100 | [diff] [blame] | 247 | EXPORT_SYMBOL(iio_trigger_poll_nested); |
Jonathan Cameron | 1f78568 | 2011-05-18 14:42:11 +0100 | [diff] [blame] | 248 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 249 | void iio_trigger_notify_done(struct iio_trigger *trig) |
| 250 | { |
Jonathan Cameron | 0458168 | 2017-07-23 17:25:45 +0100 | [diff] [blame] | 251 | if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
Jonathan Cameron | eca8523 | 2020-09-20 14:25:48 +0100 | [diff] [blame] | 252 | trig->ops->reenable) |
| 253 | trig->ops->reenable(trig); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 254 | } |
| 255 | EXPORT_SYMBOL(iio_trigger_notify_done); |
| 256 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 257 | /* Trigger Consumer related functions */ |
Jonathan Cameron | 208b813c | 2011-08-24 17:28:41 +0100 | [diff] [blame] | 258 | static int iio_trigger_get_irq(struct iio_trigger *trig) |
| 259 | { |
| 260 | int ret; |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 261 | |
Jonathan Cameron | 208b813c | 2011-08-24 17:28:41 +0100 | [diff] [blame] | 262 | mutex_lock(&trig->pool_lock); |
| 263 | ret = bitmap_find_free_region(trig->pool, |
| 264 | CONFIG_IIO_CONSUMERS_PER_TRIGGER, |
| 265 | ilog2(1)); |
| 266 | mutex_unlock(&trig->pool_lock); |
| 267 | if (ret >= 0) |
| 268 | ret += trig->subirq_base; |
| 269 | |
| 270 | return ret; |
| 271 | } |
| 272 | |
| 273 | static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) |
| 274 | { |
| 275 | mutex_lock(&trig->pool_lock); |
| 276 | clear_bit(irq - trig->subirq_base, trig->pool); |
| 277 | mutex_unlock(&trig->pool_lock); |
| 278 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 279 | |
| 280 | /* Complexity in here. With certain triggers (datardy) an acknowledgement |
| 281 | * may be needed if the pollfuncs do not include the data read for the |
| 282 | * triggering device. |
| 283 | * This is not currently handled. Alternative of not enabling trigger unless |
| 284 | * the relevant function is in there may be the best option. |
| 285 | */ |
Peter Meerwald | 860c9c5 | 2013-02-04 11:36:00 +0000 | [diff] [blame] | 286 | /* Worth protecting against double additions? */ |
Lars-Peter Clausen | f11d59d | 2020-05-25 14:38:53 +0300 | [diff] [blame] | 287 | int iio_trigger_attach_poll_func(struct iio_trigger *trig, |
| 288 | struct iio_poll_func *pf) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 289 | { |
Jonathan Cameron | 6eaf9f6 | 2021-04-26 18:49:05 +0100 | [diff] [blame] | 290 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev); |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 291 | bool notinuse = |
| 292 | bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 293 | int ret = 0; |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 294 | |
Peter Meerwald | 860c9c5 | 2013-02-04 11:36:00 +0000 | [diff] [blame] | 295 | /* Prevent the module from being removed whilst attached to a trigger */ |
Jonathan Cameron | 6eaf9f6 | 2021-04-26 18:49:05 +0100 | [diff] [blame] | 296 | __module_get(iio_dev_opaque->driver_module); |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 297 | |
| 298 | /* Get irq number */ |
Jonathan Cameron | 51c060a | 2011-05-18 14:41:42 +0100 | [diff] [blame] | 299 | pf->irq = iio_trigger_get_irq(trig); |
Mathieu Othacehe | be35d28 | 2019-02-20 17:49:10 +0100 | [diff] [blame] | 300 | if (pf->irq < 0) { |
| 301 | pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n", |
| 302 | trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 303 | goto out_put_module; |
Mathieu Othacehe | be35d28 | 2019-02-20 17:49:10 +0100 | [diff] [blame] | 304 | } |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 305 | |
| 306 | /* Request irq */ |
Jonathan Cameron | 51c060a | 2011-05-18 14:41:42 +0100 | [diff] [blame] | 307 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, |
| 308 | pf->type, pf->name, |
| 309 | pf); |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 310 | if (ret < 0) |
| 311 | goto out_put_irq; |
Jonathan Cameron | 5dd72ec | 2011-10-26 17:27:41 +0100 | [diff] [blame] | 312 | |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 313 | /* Enable trigger in driver */ |
Jonathan Cameron | 0458168 | 2017-07-23 17:25:45 +0100 | [diff] [blame] | 314 | if (trig->ops && trig->ops->set_trigger_state && notinuse) { |
Jonathan Cameron | d29f73d | 2011-08-12 17:08:38 +0100 | [diff] [blame] | 315 | ret = trig->ops->set_trigger_state(trig, true); |
Jonathan Cameron | 5dd72ec | 2011-10-26 17:27:41 +0100 | [diff] [blame] | 316 | if (ret < 0) |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 317 | goto out_free_irq; |
Jonathan Cameron | 5dd72ec | 2011-10-26 17:27:41 +0100 | [diff] [blame] | 318 | } |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 319 | |
Linus Walleij | 702a7b8 | 2016-09-01 10:27:17 +0200 | [diff] [blame] | 320 | /* |
| 321 | * Check if we just registered to our own trigger: we determine that |
| 322 | * this is the case if the IIO device and the trigger device share the |
| 323 | * same parent device. |
| 324 | */ |
Matti Vaittinen | 517985e | 2023-05-08 13:31:17 +0300 | [diff] [blame] | 325 | if (iio_validate_own_trigger(pf->indio_dev, trig)) |
Linus Walleij | 702a7b8 | 2016-09-01 10:27:17 +0200 | [diff] [blame] | 326 | trig->attached_own_device = true; |
| 327 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 328 | return ret; |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 329 | |
| 330 | out_free_irq: |
| 331 | free_irq(pf->irq, pf); |
| 332 | out_put_irq: |
| 333 | iio_trigger_put_irq(trig, pf->irq); |
| 334 | out_put_module: |
Jonathan Cameron | 6eaf9f6 | 2021-04-26 18:49:05 +0100 | [diff] [blame] | 335 | module_put(iio_dev_opaque->driver_module); |
Crestez Dan Leonard | 9954382 | 2016-05-03 15:27:09 +0300 | [diff] [blame] | 336 | return ret; |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 337 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 338 | |
Lars-Peter Clausen | f11d59d | 2020-05-25 14:38:53 +0300 | [diff] [blame] | 339 | int iio_trigger_detach_poll_func(struct iio_trigger *trig, |
| 340 | struct iio_poll_func *pf) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 341 | { |
Jonathan Cameron | 6eaf9f6 | 2021-04-26 18:49:05 +0100 | [diff] [blame] | 342 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev); |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 343 | bool no_other_users = |
| 344 | bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1; |
Jonathan Cameron | 51c060a | 2011-05-18 14:41:42 +0100 | [diff] [blame] | 345 | int ret = 0; |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 346 | |
Jonathan Cameron | 0458168 | 2017-07-23 17:25:45 +0100 | [diff] [blame] | 347 | if (trig->ops && trig->ops->set_trigger_state && no_other_users) { |
Jonathan Cameron | d29f73d | 2011-08-12 17:08:38 +0100 | [diff] [blame] | 348 | ret = trig->ops->set_trigger_state(trig, false); |
Jonathan Cameron | 51c060a | 2011-05-18 14:41:42 +0100 | [diff] [blame] | 349 | if (ret) |
Hartmut Knaack | 92825ff | 2014-02-16 11:53:00 +0000 | [diff] [blame] | 350 | return ret; |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 351 | } |
Linus Walleij | 702a7b8 | 2016-09-01 10:27:17 +0200 | [diff] [blame] | 352 | if (pf->indio_dev->dev.parent == trig->dev.parent) |
| 353 | trig->attached_own_device = false; |
Jonathan Cameron | 51c060a | 2011-05-18 14:41:42 +0100 | [diff] [blame] | 354 | iio_trigger_put_irq(trig, pf->irq); |
| 355 | free_irq(pf->irq, pf); |
Jonathan Cameron | 6eaf9f6 | 2021-04-26 18:49:05 +0100 | [diff] [blame] | 356 | module_put(iio_dev_opaque->driver_module); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 357 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 358 | return ret; |
| 359 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 360 | |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 361 | irqreturn_t iio_pollfunc_store_time(int irq, void *p) |
| 362 | { |
| 363 | struct iio_poll_func *pf = p; |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 364 | |
Gregor Boirie | bc2b7da | 2016-03-09 19:05:49 +0100 | [diff] [blame] | 365 | pf->timestamp = iio_get_time_ns(pf->indio_dev); |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 366 | return IRQ_WAKE_THREAD; |
| 367 | } |
| 368 | EXPORT_SYMBOL(iio_pollfunc_store_time); |
| 369 | |
Jonathan Cameron | 21b185f | 2011-05-18 14:42:32 +0100 | [diff] [blame] | 370 | struct iio_poll_func |
| 371 | *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), |
| 372 | irqreturn_t (*thread)(int irq, void *p), |
| 373 | int type, |
Jonathan Cameron | e65bc6ac | 2011-08-24 17:28:36 +0100 | [diff] [blame] | 374 | struct iio_dev *indio_dev, |
Jonathan Cameron | 21b185f | 2011-05-18 14:42:32 +0100 | [diff] [blame] | 375 | const char *fmt, |
| 376 | ...) |
| 377 | { |
| 378 | va_list vargs; |
| 379 | struct iio_poll_func *pf; |
| 380 | |
Joe Simmons-Talbott | ef7cece | 2022-07-17 11:34:38 -0400 | [diff] [blame] | 381 | pf = kmalloc(sizeof(*pf), GFP_KERNEL); |
Joe Simmons-Talbott | 295cc42 | 2022-07-17 22:03:48 -0400 | [diff] [blame] | 382 | if (!pf) |
Jonathan Cameron | 21b185f | 2011-05-18 14:42:32 +0100 | [diff] [blame] | 383 | return NULL; |
| 384 | va_start(vargs, fmt); |
| 385 | pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
| 386 | va_end(vargs); |
| 387 | if (pf->name == NULL) { |
| 388 | kfree(pf); |
| 389 | return NULL; |
| 390 | } |
| 391 | pf->h = h; |
| 392 | pf->thread = thread; |
| 393 | pf->type = type; |
Jonathan Cameron | e65bc6ac | 2011-08-24 17:28:36 +0100 | [diff] [blame] | 394 | pf->indio_dev = indio_dev; |
Jonathan Cameron | 21b185f | 2011-05-18 14:42:32 +0100 | [diff] [blame] | 395 | |
| 396 | return pf; |
| 397 | } |
| 398 | EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); |
| 399 | |
| 400 | void iio_dealloc_pollfunc(struct iio_poll_func *pf) |
| 401 | { |
| 402 | kfree(pf->name); |
| 403 | kfree(pf); |
| 404 | } |
| 405 | EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); |
| 406 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 407 | /** |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 408 | * current_trigger_show() - trigger consumer sysfs query current trigger |
Cristina Opriceana | 8e563b0 | 2015-08-06 14:56:02 +0300 | [diff] [blame] | 409 | * @dev: device associated with an industrial I/O device |
| 410 | * @attr: pointer to the device_attribute structure that |
| 411 | * is being processed |
| 412 | * @buf: buffer where the current trigger name will be printed into |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 413 | * |
| 414 | * For trigger consumers the current_trigger interface allows the trigger |
| 415 | * used by the device to be queried. |
Cristina Opriceana | 8e563b0 | 2015-08-06 14:56:02 +0300 | [diff] [blame] | 416 | * |
| 417 | * Return: a negative number on failure, the number of characters written |
| 418 | * on success or 0 if no trigger is available |
| 419 | */ |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 420 | static ssize_t current_trigger_show(struct device *dev, |
| 421 | struct device_attribute *attr, char *buf) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 422 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 423 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | cb6c89a | 2011-08-24 17:28:40 +0100 | [diff] [blame] | 424 | |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 425 | if (indio_dev->trig) |
Lars-Peter Clausen | 83ca56b | 2021-03-20 08:14:02 +0100 | [diff] [blame] | 426 | return sysfs_emit(buf, "%s\n", indio_dev->trig->name); |
Jonathan Cameron | cb6c89a | 2011-08-24 17:28:40 +0100 | [diff] [blame] | 427 | return 0; |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | /** |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 431 | * current_trigger_store() - trigger consumer sysfs set current trigger |
Cristina Opriceana | 8e563b0 | 2015-08-06 14:56:02 +0300 | [diff] [blame] | 432 | * @dev: device associated with an industrial I/O device |
| 433 | * @attr: device attribute that is being processed |
| 434 | * @buf: string buffer that holds the name of the trigger |
| 435 | * @len: length of the trigger name held by buf |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 436 | * |
| 437 | * For trigger consumers the current_trigger interface allows the trigger |
Peter Meerwald | 17666ef | 2013-11-12 21:49:00 +0000 | [diff] [blame] | 438 | * used for this device to be specified at run time based on the trigger's |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 439 | * name. |
Cristina Opriceana | 8e563b0 | 2015-08-06 14:56:02 +0300 | [diff] [blame] | 440 | * |
| 441 | * Return: negative error code on failure or length of the buffer |
| 442 | * on success |
| 443 | */ |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 444 | static ssize_t current_trigger_store(struct device *dev, |
| 445 | struct device_attribute *attr, |
| 446 | const char *buf, size_t len) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 447 | { |
Lars-Peter Clausen | e53f5ac | 2012-05-12 15:39:33 +0200 | [diff] [blame] | 448 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
Jonathan Cameron | 3028e0c | 2021-04-26 18:49:06 +0100 | [diff] [blame] | 449 | struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 450 | struct iio_trigger *oldtrig = indio_dev->trig; |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 451 | struct iio_trigger *trig; |
| 452 | int ret; |
| 453 | |
Nuno Sá | 16afe12 | 2022-10-12 17:16:20 +0200 | [diff] [blame] | 454 | mutex_lock(&iio_dev_opaque->mlock); |
Miquel Raynal | 51570c9 | 2022-02-07 15:38:38 +0100 | [diff] [blame] | 455 | if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { |
Nuno Sá | 16afe12 | 2022-10-12 17:16:20 +0200 | [diff] [blame] | 456 | mutex_unlock(&iio_dev_opaque->mlock); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 457 | return -EBUSY; |
| 458 | } |
Jonathan Cameron | 3028e0c | 2021-04-26 18:49:06 +0100 | [diff] [blame] | 459 | if (iio_dev_opaque->trig_readonly) { |
Nuno Sá | 16afe12 | 2022-10-12 17:16:20 +0200 | [diff] [blame] | 460 | mutex_unlock(&iio_dev_opaque->mlock); |
Matt Ranostay | c8cdf70 | 2016-09-02 23:36:15 -0700 | [diff] [blame] | 461 | return -EPERM; |
| 462 | } |
Nuno Sá | 16afe12 | 2022-10-12 17:16:20 +0200 | [diff] [blame] | 463 | mutex_unlock(&iio_dev_opaque->mlock); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 464 | |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 465 | trig = iio_trigger_acquire_by_name(buf); |
| 466 | if (oldtrig == trig) { |
| 467 | ret = len; |
| 468 | goto out_trigger_put; |
| 469 | } |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 470 | |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 471 | if (trig && indio_dev->info->validate_trigger) { |
| 472 | ret = indio_dev->info->validate_trigger(indio_dev, trig); |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 473 | if (ret) |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 474 | goto out_trigger_put; |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 475 | } |
| 476 | |
Jonathan Cameron | 0458168 | 2017-07-23 17:25:45 +0100 | [diff] [blame] | 477 | if (trig && trig->ops && trig->ops->validate_device) { |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 478 | ret = trig->ops->validate_device(trig, indio_dev); |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 479 | if (ret) |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 480 | goto out_trigger_put; |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 481 | } |
| 482 | |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 483 | indio_dev->trig = trig; |
Michael Hennerich | 43a4360 | 2011-06-27 13:07:09 +0100 | [diff] [blame] | 484 | |
Vladimir Barinov | 735ad07 | 2015-08-20 22:37:39 +0300 | [diff] [blame] | 485 | if (oldtrig) { |
| 486 | if (indio_dev->modes & INDIO_EVENT_TRIGGERED) |
| 487 | iio_trigger_detach_poll_func(oldtrig, |
| 488 | indio_dev->pollfunc_event); |
Lars-Peter Clausen | 7cbb753 | 2012-04-26 13:35:01 +0200 | [diff] [blame] | 489 | iio_trigger_put(oldtrig); |
Vladimir Barinov | 735ad07 | 2015-08-20 22:37:39 +0300 | [diff] [blame] | 490 | } |
| 491 | if (indio_dev->trig) { |
Vladimir Barinov | 735ad07 | 2015-08-20 22:37:39 +0300 | [diff] [blame] | 492 | if (indio_dev->modes & INDIO_EVENT_TRIGGERED) |
| 493 | iio_trigger_attach_poll_func(indio_dev->trig, |
| 494 | indio_dev->pollfunc_event); |
| 495 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 496 | |
| 497 | return len; |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 498 | |
| 499 | out_trigger_put: |
Marcin Niestroj | 4eecbe8 | 2017-05-18 09:12:06 +0200 | [diff] [blame] | 500 | if (trig) |
| 501 | iio_trigger_put(trig); |
Alison Schofield | d5d24bc | 2017-01-21 19:28:52 -0800 | [diff] [blame] | 502 | return ret; |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 503 | } |
| 504 | |
Joe Simmons-Talbott | 9cf0b61 | 2022-06-01 14:54:14 -0400 | [diff] [blame] | 505 | static DEVICE_ATTR_RW(current_trigger); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 506 | |
| 507 | static struct attribute *iio_trigger_consumer_attrs[] = { |
| 508 | &dev_attr_current_trigger.attr, |
| 509 | NULL, |
| 510 | }; |
| 511 | |
| 512 | static const struct attribute_group iio_trigger_consumer_attr_group = { |
| 513 | .name = "trigger", |
| 514 | .attrs = iio_trigger_consumer_attrs, |
| 515 | }; |
| 516 | |
| 517 | static void iio_trig_release(struct device *device) |
| 518 | { |
| 519 | struct iio_trigger *trig = to_iio_trigger(device); |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 520 | int i; |
| 521 | |
| 522 | if (trig->subirq_base) { |
| 523 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 524 | irq_modify_status(trig->subirq_base + i, |
| 525 | IRQ_NOAUTOEN, |
| 526 | IRQ_NOREQUEST | IRQ_NOPROBE); |
| 527 | irq_set_chip(trig->subirq_base + i, |
| 528 | NULL); |
| 529 | irq_set_handler(trig->subirq_base + i, |
| 530 | NULL); |
| 531 | } |
| 532 | |
| 533 | irq_free_descs(trig->subirq_base, |
| 534 | CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 535 | } |
Jonathan Cameron | 59c85e8 | 2011-05-18 14:42:22 +0100 | [diff] [blame] | 536 | kfree(trig->name); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 537 | kfree(trig); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 538 | } |
| 539 | |
Bhumika Goyal | 3bdafc4 | 2017-01-21 22:29:49 +0530 | [diff] [blame] | 540 | static const struct device_type iio_trig_type = { |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 541 | .release = iio_trig_release, |
Axel Lin | f59c257 | 2013-12-02 02:57:00 +0000 | [diff] [blame] | 542 | .groups = iio_trig_dev_groups, |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 543 | }; |
| 544 | |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 545 | static void iio_trig_subirqmask(struct irq_data *d) |
| 546 | { |
| 547 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 548 | struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); |
| 549 | |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 550 | trig->subirqs[d->irq - trig->subirq_base].enabled = false; |
| 551 | } |
| 552 | |
| 553 | static void iio_trig_subirqunmask(struct irq_data *d) |
| 554 | { |
| 555 | struct irq_chip *chip = irq_data_get_irq_chip(d); |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 556 | struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); |
| 557 | |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 558 | trig->subirqs[d->irq - trig->subirq_base].enabled = true; |
| 559 | } |
| 560 | |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 561 | static __printf(3, 0) |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 562 | struct iio_trigger *viio_trigger_alloc(struct device *parent, |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 563 | struct module *this_mod, |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 564 | const char *fmt, |
| 565 | va_list vargs) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 566 | { |
| 567 | struct iio_trigger *trig; |
Dan Carpenter | 2c99f1a | 2017-01-21 07:55:58 +0300 | [diff] [blame] | 568 | int i; |
| 569 | |
Joe Simmons-Talbott | ef7cece | 2022-07-17 11:34:38 -0400 | [diff] [blame] | 570 | trig = kzalloc(sizeof(*trig), GFP_KERNEL); |
Dan Carpenter | 2c99f1a | 2017-01-21 07:55:58 +0300 | [diff] [blame] | 571 | if (!trig) |
| 572 | return NULL; |
Jonathan Cameron | d96d133 | 2011-05-18 14:41:18 +0100 | [diff] [blame] | 573 | |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 574 | trig->dev.parent = parent; |
Dan Carpenter | 2c99f1a | 2017-01-21 07:55:58 +0300 | [diff] [blame] | 575 | trig->dev.type = &iio_trig_type; |
| 576 | trig->dev.bus = &iio_bus_type; |
| 577 | device_initialize(&trig->dev); |
Jonathan Cameron | 9020ef6 | 2021-10-17 18:22:09 +0100 | [diff] [blame] | 578 | INIT_WORK(&trig->reenable_work, iio_reenable_work_fn); |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 579 | |
Dan Carpenter | 2c99f1a | 2017-01-21 07:55:58 +0300 | [diff] [blame] | 580 | mutex_init(&trig->pool_lock); |
| 581 | trig->subirq_base = irq_alloc_descs(-1, 0, |
| 582 | CONFIG_IIO_CONSUMERS_PER_TRIGGER, |
| 583 | 0); |
| 584 | if (trig->subirq_base < 0) |
| 585 | goto free_trig; |
| 586 | |
| 587 | trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); |
| 588 | if (trig->name == NULL) |
| 589 | goto free_descs; |
| 590 | |
Dmitry Rokosov | 4a08069 | 2022-06-07 18:39:18 +0000 | [diff] [blame] | 591 | INIT_LIST_HEAD(&trig->list); |
| 592 | |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 593 | trig->owner = this_mod; |
| 594 | |
Dan Carpenter | 2c99f1a | 2017-01-21 07:55:58 +0300 | [diff] [blame] | 595 | trig->subirq_chip.name = trig->name; |
| 596 | trig->subirq_chip.irq_mask = &iio_trig_subirqmask; |
| 597 | trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; |
| 598 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { |
| 599 | irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); |
| 600 | irq_set_handler(trig->subirq_base + i, &handle_simple_irq); |
| 601 | irq_modify_status(trig->subirq_base + i, |
| 602 | IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 603 | } |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 604 | |
| 605 | return trig; |
Dan Carpenter | 2c99f1a | 2017-01-21 07:55:58 +0300 | [diff] [blame] | 606 | |
| 607 | free_descs: |
| 608 | irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
| 609 | free_trig: |
| 610 | kfree(trig); |
| 611 | return NULL; |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 612 | } |
| 613 | |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 614 | /** |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 615 | * __iio_trigger_alloc - Allocate a trigger |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 616 | * @parent: Device to allocate iio_trigger for |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 617 | * @this_mod: module allocating the trigger |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 618 | * @fmt: trigger name format. If it includes format |
| 619 | * specifiers, the additional arguments following |
| 620 | * format are formatted and inserted in the resulting |
| 621 | * string replacing their respective specifiers. |
| 622 | * RETURNS: |
| 623 | * Pointer to allocated iio_trigger on success, NULL on failure. |
| 624 | */ |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 625 | struct iio_trigger *__iio_trigger_alloc(struct device *parent, |
| 626 | struct module *this_mod, |
| 627 | const char *fmt, ...) |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 628 | { |
| 629 | struct iio_trigger *trig; |
| 630 | va_list vargs; |
| 631 | |
| 632 | va_start(vargs, fmt); |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 633 | trig = viio_trigger_alloc(parent, this_mod, fmt, vargs); |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 634 | va_end(vargs); |
| 635 | |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 636 | return trig; |
| 637 | } |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 638 | EXPORT_SYMBOL(__iio_trigger_alloc); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 639 | |
Lars-Peter Clausen | 7cbb753 | 2012-04-26 13:35:01 +0200 | [diff] [blame] | 640 | void iio_trigger_free(struct iio_trigger *trig) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 641 | { |
| 642 | if (trig) |
| 643 | put_device(&trig->dev); |
| 644 | } |
Lars-Peter Clausen | 7cbb753 | 2012-04-26 13:35:01 +0200 | [diff] [blame] | 645 | EXPORT_SYMBOL(iio_trigger_free); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 646 | |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 647 | static void devm_iio_trigger_release(struct device *dev, void *res) |
| 648 | { |
| 649 | iio_trigger_free(*(struct iio_trigger **)res); |
| 650 | } |
| 651 | |
Sachin Kamat | a7e57dc | 2013-10-29 11:39:00 +0000 | [diff] [blame] | 652 | /** |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 653 | * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc() |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 654 | * Managed iio_trigger_alloc. iio_trigger allocated with this function is |
| 655 | * automatically freed on driver detach. |
| 656 | * @parent: Device to allocate iio_trigger for |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 657 | * @this_mod: module allocating the trigger |
Sachin Kamat | a7e57dc | 2013-10-29 11:39:00 +0000 | [diff] [blame] | 658 | * @fmt: trigger name format. If it includes format |
| 659 | * specifiers, the additional arguments following |
| 660 | * format are formatted and inserted in the resulting |
| 661 | * string replacing their respective specifiers. |
| 662 | * |
Sachin Kamat | a7e57dc | 2013-10-29 11:39:00 +0000 | [diff] [blame] | 663 | * |
Sachin Kamat | a7e57dc | 2013-10-29 11:39:00 +0000 | [diff] [blame] | 664 | * RETURNS: |
| 665 | * Pointer to allocated iio_trigger on success, NULL on failure. |
| 666 | */ |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 667 | struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent, |
| 668 | struct module *this_mod, |
| 669 | const char *fmt, ...) |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 670 | { |
| 671 | struct iio_trigger **ptr, *trig; |
| 672 | va_list vargs; |
| 673 | |
| 674 | ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr), |
| 675 | GFP_KERNEL); |
| 676 | if (!ptr) |
| 677 | return NULL; |
| 678 | |
| 679 | /* use raw alloc_dr for kmalloc caller tracing */ |
| 680 | va_start(vargs, fmt); |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 681 | trig = viio_trigger_alloc(parent, this_mod, fmt, vargs); |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 682 | va_end(vargs); |
| 683 | if (trig) { |
| 684 | *ptr = trig; |
Gwendal Grignou | 995071d | 2021-03-09 11:36:13 -0800 | [diff] [blame] | 685 | devres_add(parent, ptr); |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 686 | } else { |
| 687 | devres_free(ptr); |
| 688 | } |
| 689 | |
| 690 | return trig; |
| 691 | } |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 692 | EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc); |
Jacek Anaszewski | d536321 | 2013-08-16 14:11:00 +0100 | [diff] [blame] | 693 | |
Yicong Yang | 171a70a | 2021-04-08 19:38:15 +0800 | [diff] [blame] | 694 | static void devm_iio_trigger_unreg(void *trigger_info) |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 695 | { |
Yicong Yang | 171a70a | 2021-04-08 19:38:15 +0800 | [diff] [blame] | 696 | iio_trigger_unregister(trigger_info); |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 697 | } |
| 698 | |
| 699 | /** |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 700 | * devm_iio_trigger_register - Resource-managed iio_trigger_register() |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 701 | * @dev: device this trigger was allocated for |
| 702 | * @trig_info: trigger to register |
| 703 | * |
| 704 | * Managed iio_trigger_register(). The IIO trigger registered with this |
| 705 | * function is automatically unregistered on driver detach. This function |
| 706 | * calls iio_trigger_register() internally. Refer to that function for more |
| 707 | * information. |
| 708 | * |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 709 | * RETURNS: |
| 710 | * 0 on success, negative error number on failure. |
| 711 | */ |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 712 | int devm_iio_trigger_register(struct device *dev, |
| 713 | struct iio_trigger *trig_info) |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 714 | { |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 715 | int ret; |
| 716 | |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 717 | ret = iio_trigger_register(trig_info); |
Yicong Yang | 171a70a | 2021-04-08 19:38:15 +0800 | [diff] [blame] | 718 | if (ret) |
| 719 | return ret; |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 720 | |
Yicong Yang | 171a70a | 2021-04-08 19:38:15 +0800 | [diff] [blame] | 721 | return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info); |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 722 | } |
Dmitry Rokosov | bc72d93 | 2022-06-01 17:48:32 +0000 | [diff] [blame] | 723 | EXPORT_SYMBOL_GPL(devm_iio_trigger_register); |
Gregor Boirie | 9083325 | 2016-09-02 20:47:54 +0200 | [diff] [blame] | 724 | |
Linus Walleij | 702a7b8 | 2016-09-01 10:27:17 +0200 | [diff] [blame] | 725 | bool iio_trigger_using_own(struct iio_dev *indio_dev) |
| 726 | { |
| 727 | return indio_dev->trig->attached_own_device; |
| 728 | } |
| 729 | EXPORT_SYMBOL(iio_trigger_using_own); |
| 730 | |
Lars-Peter Clausen | 43ece27 | 2016-09-23 17:19:41 +0200 | [diff] [blame] | 731 | /** |
Matti Vaittinen | 517985e | 2023-05-08 13:31:17 +0300 | [diff] [blame] | 732 | * iio_validate_own_trigger - Check if a trigger and IIO device belong to |
| 733 | * the same device |
| 734 | * @idev: the IIO device to check |
| 735 | * @trig: the IIO trigger to check |
| 736 | * |
| 737 | * This function can be used as the validate_trigger callback for triggers that |
| 738 | * can only be attached to their own device. |
| 739 | * |
| 740 | * Return: 0 if both the trigger and the IIO device belong to the same |
| 741 | * device, -EINVAL otherwise. |
| 742 | */ |
| 743 | int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig) |
| 744 | { |
| 745 | if (idev->dev.parent != trig->dev.parent) |
| 746 | return -EINVAL; |
| 747 | return 0; |
| 748 | } |
| 749 | EXPORT_SYMBOL_GPL(iio_validate_own_trigger); |
| 750 | |
| 751 | /** |
Lars-Peter Clausen | 43ece27 | 2016-09-23 17:19:41 +0200 | [diff] [blame] | 752 | * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to |
| 753 | * the same device |
| 754 | * @trig: The IIO trigger to check |
| 755 | * @indio_dev: the IIO device to check |
| 756 | * |
| 757 | * This function can be used as the validate_device callback for triggers that |
| 758 | * can only be attached to their own device. |
| 759 | * |
| 760 | * Return: 0 if both the trigger and the IIO device belong to the same |
| 761 | * device, -EINVAL otherwise. |
| 762 | */ |
| 763 | int iio_trigger_validate_own_device(struct iio_trigger *trig, |
Andy Shevchenko | af3bac4 | 2021-04-02 20:49:11 +0300 | [diff] [blame] | 764 | struct iio_dev *indio_dev) |
Lars-Peter Clausen | 43ece27 | 2016-09-23 17:19:41 +0200 | [diff] [blame] | 765 | { |
| 766 | if (indio_dev->dev.parent != trig->dev.parent) |
| 767 | return -EINVAL; |
| 768 | return 0; |
| 769 | } |
| 770 | EXPORT_SYMBOL(iio_trigger_validate_own_device); |
| 771 | |
Alexandru Ardelean | e64506b | 2021-02-15 12:40:28 +0200 | [diff] [blame] | 772 | int iio_device_register_trigger_consumer(struct iio_dev *indio_dev) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 773 | { |
Alexandru Ardelean | 32f1717 | 2021-02-15 12:40:29 +0200 | [diff] [blame] | 774 | return iio_device_register_sysfs_group(indio_dev, |
| 775 | &iio_trigger_consumer_attr_group); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 776 | } |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 777 | |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 778 | void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 779 | { |
Peter Meerwald | 860c9c5 | 2013-02-04 11:36:00 +0000 | [diff] [blame] | 780 | /* Clean up an associated but not attached trigger reference */ |
Jonathan Cameron | f8c6f4e | 2011-10-06 17:14:35 +0100 | [diff] [blame] | 781 | if (indio_dev->trig) |
Lars-Peter Clausen | 7cbb753 | 2012-04-26 13:35:01 +0200 | [diff] [blame] | 782 | iio_trigger_put(indio_dev->trig); |
Jonathan Cameron | 1637db4 | 2009-08-18 18:06:26 +0100 | [diff] [blame] | 783 | } |