1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core, trigger handling functions
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/idr.h>
9 #include <linux/err.h>
10 #include <linux/device.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 
15 #include <linux/iio/iio.h>
16 #include <linux/iio/iio-opaque.h>
17 #include <linux/iio/trigger.h>
18 #include "iio_core.h"
19 #include "iio_core_trigger.h"
20 #include <linux/iio/trigger_consumer.h>
21 
22 /* RFC - Question of approach
23  * Make the common case (single sensor single trigger)
24  * simple by starting trigger capture from when first sensors
25  * is added.
26  *
27  * Complex simultaneous start requires use of 'hold' functionality
28  * of the trigger. (not implemented)
29  *
30  * Any other suggestions?
31  */
32 
33 static DEFINE_IDA(iio_trigger_ida);
34 
35 /* Single list of all available triggers */
36 static LIST_HEAD(iio_trigger_list);
37 static DEFINE_MUTEX(iio_trigger_list_lock);
38 
39 /**
40  * name_show() - retrieve useful identifying name
41  * @dev:	device associated with the iio_trigger
42  * @attr:	pointer to the device_attribute structure that is
43  *		being processed
44  * @buf:	buffer to print the name into
45  *
46  * Return: a negative number on failure or the number of written
47  *	   characters on success.
48  */
name_show(struct device * dev,struct device_attribute * attr,char * buf)49 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
50 			 char *buf)
51 {
52 	struct iio_trigger *trig = to_iio_trigger(dev);
53 
54 	return sysfs_emit(buf, "%s\n", trig->name);
55 }
56 
57 static DEVICE_ATTR_RO(name);
58 
59 static struct attribute *iio_trig_dev_attrs[] = {
60 	&dev_attr_name.attr,
61 	NULL,
62 };
63 ATTRIBUTE_GROUPS(iio_trig_dev);
64 
65 static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
66 
iio_trigger_register(struct iio_trigger * trig_info)67 int iio_trigger_register(struct iio_trigger *trig_info)
68 {
69 	int ret;
70 
71 	trig_info->id = ida_alloc(&iio_trigger_ida, GFP_KERNEL);
72 	if (trig_info->id < 0)
73 		return trig_info->id;
74 
75 	/* Set the name used for the sysfs directory etc */
76 	dev_set_name(&trig_info->dev, "trigger%d", trig_info->id);
77 
78 	ret = device_add(&trig_info->dev);
79 	if (ret)
80 		goto error_unregister_id;
81 
82 	/* Add to list of available triggers held by the IIO core */
83 	mutex_lock(&iio_trigger_list_lock);
84 	if (__iio_trigger_find_by_name(trig_info->name)) {
85 		pr_err("Duplicate trigger name '%s'\n", trig_info->name);
86 		ret = -EEXIST;
87 		goto error_device_del;
88 	}
89 	list_add_tail(&trig_info->list, &iio_trigger_list);
90 	mutex_unlock(&iio_trigger_list_lock);
91 
92 	return 0;
93 
94 error_device_del:
95 	mutex_unlock(&iio_trigger_list_lock);
96 	device_del(&trig_info->dev);
97 error_unregister_id:
98 	ida_free(&iio_trigger_ida, trig_info->id);
99 	return ret;
100 }
101 EXPORT_SYMBOL(iio_trigger_register);
102 
iio_trigger_unregister(struct iio_trigger * trig_info)103 void iio_trigger_unregister(struct iio_trigger *trig_info)
104 {
105 	mutex_lock(&iio_trigger_list_lock);
106 	list_del(&trig_info->list);
107 	mutex_unlock(&iio_trigger_list_lock);
108 
109 	ida_free(&iio_trigger_ida, trig_info->id);
110 	/* Possible issue in here */
111 	device_del(&trig_info->dev);
112 }
113 EXPORT_SYMBOL(iio_trigger_unregister);
114 
iio_trigger_set_immutable(struct iio_dev * indio_dev,struct iio_trigger * trig)115 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
116 {
117 	struct iio_dev_opaque *iio_dev_opaque;
118 
119 	if (!indio_dev || !trig)
120 		return -EINVAL;
121 
122 	iio_dev_opaque = to_iio_dev_opaque(indio_dev);
123 	mutex_lock(&iio_dev_opaque->mlock);
124 	WARN_ON(iio_dev_opaque->trig_readonly);
125 
126 	indio_dev->trig = iio_trigger_get(trig);
127 	iio_dev_opaque->trig_readonly = true;
128 	mutex_unlock(&iio_dev_opaque->mlock);
129 
130 	return 0;
131 }
132 EXPORT_SYMBOL(iio_trigger_set_immutable);
133 
134 /* Search for trigger by name, assuming iio_trigger_list_lock held */
__iio_trigger_find_by_name(const char * name)135 static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
136 {
137 	struct iio_trigger *iter;
138 
139 	list_for_each_entry(iter, &iio_trigger_list, list)
140 		if (!strcmp(iter->name, name))
141 			return iter;
142 
143 	return NULL;
144 }
145 
iio_trigger_acquire_by_name(const char * name)146 static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
147 {
148 	struct iio_trigger *trig = NULL, *iter;
149 
150 	mutex_lock(&iio_trigger_list_lock);
151 	list_for_each_entry(iter, &iio_trigger_list, list)
152 		if (sysfs_streq(iter->name, name)) {
153 			trig = iter;
154 			iio_trigger_get(trig);
155 			break;
156 		}
157 	mutex_unlock(&iio_trigger_list_lock);
158 
159 	return trig;
160 }
161 
iio_reenable_work_fn(struct work_struct * work)162 static void iio_reenable_work_fn(struct work_struct *work)
163 {
164 	struct iio_trigger *trig = container_of(work, struct iio_trigger,
165 						reenable_work);
166 
167 	/*
168 	 * This 'might' occur after the trigger state is set to disabled -
169 	 * in that case the driver should skip reenabling.
170 	 */
171 	trig->ops->reenable(trig);
172 }
173 
174 /*
175  * In general, reenable callbacks may need to sleep and this path is
176  * not performance sensitive, so just queue up a work item
177  * to reneable the trigger for us.
178  *
179  * Races that can cause this.
180  * 1) A handler occurs entirely in interrupt context so the counter
181  *    the final decrement is still in this interrupt.
182  * 2) The trigger has been removed, but one last interrupt gets through.
183  *
184  * For (1) we must call reenable, but not in atomic context.
185  * For (2) it should be safe to call reenanble, if drivers never blindly
186  * reenable after state is off.
187  */
iio_trigger_notify_done_atomic(struct iio_trigger * trig)188 static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
189 {
190 	if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
191 	    trig->ops->reenable)
192 		schedule_work(&trig->reenable_work);
193 }
194 
195 /**
196  * iio_trigger_poll() - Call the IRQ trigger handler of the consumers
197  * @trig: trigger which occurred
198  *
199  * This function should only be called from a hard IRQ context.
200  */
iio_trigger_poll(struct iio_trigger * trig)201 void iio_trigger_poll(struct iio_trigger *trig)
202 {
203 	int i;
204 
205 	if (!atomic_read(&trig->use_count)) {
206 		atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
207 
208 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
209 			if (trig->subirqs[i].enabled)
210 				generic_handle_irq(trig->subirq_base + i);
211 			else
212 				iio_trigger_notify_done_atomic(trig);
213 		}
214 	}
215 }
216 EXPORT_SYMBOL(iio_trigger_poll);
217 
iio_trigger_generic_data_rdy_poll(int irq,void * private)218 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
219 {
220 	iio_trigger_poll(private);
221 	return IRQ_HANDLED;
222 }
223 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
224 
225 /**
226  * iio_trigger_poll_nested() - Call the threaded trigger handler of the
227  * consumers
228  * @trig: trigger which occurred
229  *
230  * This function should only be called from a kernel thread context.
231  */
iio_trigger_poll_nested(struct iio_trigger * trig)232 void iio_trigger_poll_nested(struct iio_trigger *trig)
233 {
234 	int i;
235 
236 	if (!atomic_read(&trig->use_count)) {
237 		atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
238 
239 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
240 			if (trig->subirqs[i].enabled)
241 				handle_nested_irq(trig->subirq_base + i);
242 			else
243 				iio_trigger_notify_done(trig);
244 		}
245 	}
246 }
247 EXPORT_SYMBOL(iio_trigger_poll_nested);
248 
iio_trigger_notify_done(struct iio_trigger * trig)249 void iio_trigger_notify_done(struct iio_trigger *trig)
250 {
251 	if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
252 	    trig->ops->reenable)
253 		trig->ops->reenable(trig);
254 }
255 EXPORT_SYMBOL(iio_trigger_notify_done);
256 
257 /* Trigger Consumer related functions */
iio_trigger_get_irq(struct iio_trigger * trig)258 static int iio_trigger_get_irq(struct iio_trigger *trig)
259 {
260 	int ret;
261 
262 	mutex_lock(&trig->pool_lock);
263 	ret = bitmap_find_free_region(trig->pool,
264 				      CONFIG_IIO_CONSUMERS_PER_TRIGGER,
265 				      ilog2(1));
266 	mutex_unlock(&trig->pool_lock);
267 	if (ret >= 0)
268 		ret += trig->subirq_base;
269 
270 	return ret;
271 }
272 
iio_trigger_put_irq(struct iio_trigger * trig,int irq)273 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
274 {
275 	mutex_lock(&trig->pool_lock);
276 	clear_bit(irq - trig->subirq_base, trig->pool);
277 	mutex_unlock(&trig->pool_lock);
278 }
279 
280 /* Complexity in here.  With certain triggers (datardy) an acknowledgement
281  * may be needed if the pollfuncs do not include the data read for the
282  * triggering device.
283  * This is not currently handled.  Alternative of not enabling trigger unless
284  * the relevant function is in there may be the best option.
285  */
286 /* Worth protecting against double additions? */
iio_trigger_attach_poll_func(struct iio_trigger * trig,struct iio_poll_func * pf)287 int iio_trigger_attach_poll_func(struct iio_trigger *trig,
288 				 struct iio_poll_func *pf)
289 {
290 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
291 	bool notinuse =
292 		bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
293 	int ret = 0;
294 
295 	/* Prevent the module from being removed whilst attached to a trigger */
296 	__module_get(iio_dev_opaque->driver_module);
297 
298 	/* Get irq number */
299 	pf->irq = iio_trigger_get_irq(trig);
300 	if (pf->irq < 0) {
301 		pr_err("Could not find an available irq for trigger %s, CONFIG_IIO_CONSUMERS_PER_TRIGGER=%d limit might be exceeded\n",
302 			trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
303 		goto out_put_module;
304 	}
305 
306 	/* Request irq */
307 	ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
308 				   pf->type, pf->name,
309 				   pf);
310 	if (ret < 0)
311 		goto out_put_irq;
312 
313 	/* Enable trigger in driver */
314 	if (trig->ops && trig->ops->set_trigger_state && notinuse) {
315 		ret = trig->ops->set_trigger_state(trig, true);
316 		if (ret)
317 			goto out_free_irq;
318 	}
319 
320 	/*
321 	 * Check if we just registered to our own trigger: we determine that
322 	 * this is the case if the IIO device and the trigger device share the
323 	 * same parent device.
324 	 */
325 	if (iio_validate_own_trigger(pf->indio_dev, trig))
326 		trig->attached_own_device = true;
327 
328 	return ret;
329 
330 out_free_irq:
331 	free_irq(pf->irq, pf);
332 out_put_irq:
333 	iio_trigger_put_irq(trig, pf->irq);
334 out_put_module:
335 	module_put(iio_dev_opaque->driver_module);
336 	return ret;
337 }
338 
iio_trigger_detach_poll_func(struct iio_trigger * trig,struct iio_poll_func * pf)339 int iio_trigger_detach_poll_func(struct iio_trigger *trig,
340 				 struct iio_poll_func *pf)
341 {
342 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(pf->indio_dev);
343 	bool no_other_users =
344 		bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1;
345 	int ret = 0;
346 
347 	if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
348 		ret = trig->ops->set_trigger_state(trig, false);
349 		if (ret)
350 			return ret;
351 	}
352 	if (pf->indio_dev->dev.parent == trig->dev.parent)
353 		trig->attached_own_device = false;
354 	iio_trigger_put_irq(trig, pf->irq);
355 	free_irq(pf->irq, pf);
356 	module_put(iio_dev_opaque->driver_module);
357 
358 	return ret;
359 }
360 
iio_pollfunc_store_time(int irq,void * p)361 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
362 {
363 	struct iio_poll_func *pf = p;
364 
365 	pf->timestamp = iio_get_time_ns(pf->indio_dev);
366 	return IRQ_WAKE_THREAD;
367 }
368 EXPORT_SYMBOL(iio_pollfunc_store_time);
369 
370 struct iio_poll_func
iio_alloc_pollfunc(irqreturn_t (* h)(int irq,void * p),irqreturn_t (* thread)(int irq,void * p),int type,struct iio_dev * indio_dev,const char * fmt,...)371 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
372 		    irqreturn_t (*thread)(int irq, void *p),
373 		    int type,
374 		    struct iio_dev *indio_dev,
375 		    const char *fmt,
376 		    ...)
377 {
378 	va_list vargs;
379 	struct iio_poll_func *pf;
380 
381 	pf = kmalloc(sizeof(*pf), GFP_KERNEL);
382 	if (!pf)
383 		return NULL;
384 	va_start(vargs, fmt);
385 	pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
386 	va_end(vargs);
387 	if (pf->name == NULL) {
388 		kfree(pf);
389 		return NULL;
390 	}
391 	pf->h = h;
392 	pf->thread = thread;
393 	pf->type = type;
394 	pf->indio_dev = indio_dev;
395 
396 	return pf;
397 }
398 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
399 
iio_dealloc_pollfunc(struct iio_poll_func * pf)400 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
401 {
402 	kfree(pf->name);
403 	kfree(pf);
404 }
405 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
406 
407 /**
408  * current_trigger_show() - trigger consumer sysfs query current trigger
409  * @dev:	device associated with an industrial I/O device
410  * @attr:	pointer to the device_attribute structure that
411  *		is being processed
412  * @buf:	buffer where the current trigger name will be printed into
413  *
414  * For trigger consumers the current_trigger interface allows the trigger
415  * used by the device to be queried.
416  *
417  * Return: a negative number on failure, the number of characters written
418  *	   on success or 0 if no trigger is available
419  */
current_trigger_show(struct device * dev,struct device_attribute * attr,char * buf)420 static ssize_t current_trigger_show(struct device *dev,
421 				    struct device_attribute *attr, char *buf)
422 {
423 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
424 
425 	if (indio_dev->trig)
426 		return sysfs_emit(buf, "%s\n", indio_dev->trig->name);
427 	return 0;
428 }
429 
430 /**
431  * current_trigger_store() - trigger consumer sysfs set current trigger
432  * @dev:	device associated with an industrial I/O device
433  * @attr:	device attribute that is being processed
434  * @buf:	string buffer that holds the name of the trigger
435  * @len:	length of the trigger name held by buf
436  *
437  * For trigger consumers the current_trigger interface allows the trigger
438  * used for this device to be specified at run time based on the trigger's
439  * name.
440  *
441  * Return: negative error code on failure or length of the buffer
442  *	   on success
443  */
current_trigger_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)444 static ssize_t current_trigger_store(struct device *dev,
445 				     struct device_attribute *attr,
446 				     const char *buf, size_t len)
447 {
448 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
449 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
450 	struct iio_trigger *oldtrig = indio_dev->trig;
451 	struct iio_trigger *trig;
452 	int ret;
453 
454 	mutex_lock(&iio_dev_opaque->mlock);
455 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
456 		mutex_unlock(&iio_dev_opaque->mlock);
457 		return -EBUSY;
458 	}
459 	if (iio_dev_opaque->trig_readonly) {
460 		mutex_unlock(&iio_dev_opaque->mlock);
461 		return -EPERM;
462 	}
463 	mutex_unlock(&iio_dev_opaque->mlock);
464 
465 	trig = iio_trigger_acquire_by_name(buf);
466 	if (oldtrig == trig) {
467 		ret = len;
468 		goto out_trigger_put;
469 	}
470 
471 	if (trig && indio_dev->info->validate_trigger) {
472 		ret = indio_dev->info->validate_trigger(indio_dev, trig);
473 		if (ret)
474 			goto out_trigger_put;
475 	}
476 
477 	if (trig && trig->ops && trig->ops->validate_device) {
478 		ret = trig->ops->validate_device(trig, indio_dev);
479 		if (ret)
480 			goto out_trigger_put;
481 	}
482 
483 	indio_dev->trig = trig;
484 
485 	if (oldtrig) {
486 		if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
487 			iio_trigger_detach_poll_func(oldtrig,
488 						     indio_dev->pollfunc_event);
489 		iio_trigger_put(oldtrig);
490 	}
491 	if (indio_dev->trig) {
492 		if (indio_dev->modes & INDIO_EVENT_TRIGGERED)
493 			iio_trigger_attach_poll_func(indio_dev->trig,
494 						     indio_dev->pollfunc_event);
495 	}
496 
497 	return len;
498 
499 out_trigger_put:
500 	if (trig)
501 		iio_trigger_put(trig);
502 	return ret;
503 }
504 
505 static DEVICE_ATTR_RW(current_trigger);
506 
507 static struct attribute *iio_trigger_consumer_attrs[] = {
508 	&dev_attr_current_trigger.attr,
509 	NULL,
510 };
511 
512 static const struct attribute_group iio_trigger_consumer_attr_group = {
513 	.name = "trigger",
514 	.attrs = iio_trigger_consumer_attrs,
515 };
516 
iio_trig_release(struct device * device)517 static void iio_trig_release(struct device *device)
518 {
519 	struct iio_trigger *trig = to_iio_trigger(device);
520 	int i;
521 
522 	if (trig->subirq_base) {
523 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
524 			irq_modify_status(trig->subirq_base + i,
525 					  IRQ_NOAUTOEN,
526 					  IRQ_NOREQUEST | IRQ_NOPROBE);
527 			irq_set_chip(trig->subirq_base + i,
528 				     NULL);
529 			irq_set_handler(trig->subirq_base + i,
530 					NULL);
531 		}
532 
533 		irq_free_descs(trig->subirq_base,
534 			       CONFIG_IIO_CONSUMERS_PER_TRIGGER);
535 	}
536 	kfree(trig->name);
537 	kfree(trig);
538 }
539 
540 static const struct device_type iio_trig_type = {
541 	.release = iio_trig_release,
542 	.groups = iio_trig_dev_groups,
543 };
544 
iio_trig_subirqmask(struct irq_data * d)545 static void iio_trig_subirqmask(struct irq_data *d)
546 {
547 	struct irq_chip *chip = irq_data_get_irq_chip(d);
548 	struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
549 
550 	trig->subirqs[d->irq - trig->subirq_base].enabled = false;
551 }
552 
iio_trig_subirqunmask(struct irq_data * d)553 static void iio_trig_subirqunmask(struct irq_data *d)
554 {
555 	struct irq_chip *chip = irq_data_get_irq_chip(d);
556 	struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip);
557 
558 	trig->subirqs[d->irq - trig->subirq_base].enabled = true;
559 }
560 
561 static __printf(3, 0)
viio_trigger_alloc(struct device * parent,struct module * this_mod,const char * fmt,va_list vargs)562 struct iio_trigger *viio_trigger_alloc(struct device *parent,
563 				       struct module *this_mod,
564 				       const char *fmt,
565 				       va_list vargs)
566 {
567 	struct iio_trigger *trig;
568 	int i;
569 
570 	trig = kzalloc(sizeof(*trig), GFP_KERNEL);
571 	if (!trig)
572 		return NULL;
573 
574 	trig->dev.parent = parent;
575 	trig->dev.type = &iio_trig_type;
576 	trig->dev.bus = &iio_bus_type;
577 	device_initialize(&trig->dev);
578 	INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
579 
580 	mutex_init(&trig->pool_lock);
581 	trig->subirq_base = irq_alloc_descs(-1, 0,
582 					    CONFIG_IIO_CONSUMERS_PER_TRIGGER,
583 					    0);
584 	if (trig->subirq_base < 0)
585 		goto free_trig;
586 
587 	trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
588 	if (trig->name == NULL)
589 		goto free_descs;
590 
591 	INIT_LIST_HEAD(&trig->list);
592 
593 	trig->owner = this_mod;
594 
595 	trig->subirq_chip.name = trig->name;
596 	trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
597 	trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
598 	for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
599 		irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
600 		irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
601 		irq_modify_status(trig->subirq_base + i,
602 				  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
603 	}
604 
605 	return trig;
606 
607 free_descs:
608 	irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
609 free_trig:
610 	kfree(trig);
611 	return NULL;
612 }
613 
614 /**
615  * __iio_trigger_alloc - Allocate a trigger
616  * @parent:		Device to allocate iio_trigger for
617  * @this_mod:		module allocating the trigger
618  * @fmt:		trigger name format. If it includes format
619  *			specifiers, the additional arguments following
620  *			format are formatted and inserted in the resulting
621  *			string replacing their respective specifiers.
622  * RETURNS:
623  * Pointer to allocated iio_trigger on success, NULL on failure.
624  */
__iio_trigger_alloc(struct device * parent,struct module * this_mod,const char * fmt,...)625 struct iio_trigger *__iio_trigger_alloc(struct device *parent,
626 					struct module *this_mod,
627 					const char *fmt, ...)
628 {
629 	struct iio_trigger *trig;
630 	va_list vargs;
631 
632 	va_start(vargs, fmt);
633 	trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
634 	va_end(vargs);
635 
636 	return trig;
637 }
638 EXPORT_SYMBOL(__iio_trigger_alloc);
639 
iio_trigger_free(struct iio_trigger * trig)640 void iio_trigger_free(struct iio_trigger *trig)
641 {
642 	if (trig)
643 		put_device(&trig->dev);
644 }
645 EXPORT_SYMBOL(iio_trigger_free);
646 
devm_iio_trigger_release(struct device * dev,void * res)647 static void devm_iio_trigger_release(struct device *dev, void *res)
648 {
649 	iio_trigger_free(*(struct iio_trigger **)res);
650 }
651 
652 /**
653  * __devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
654  * Managed iio_trigger_alloc.  iio_trigger allocated with this function is
655  * automatically freed on driver detach.
656  * @parent:		Device to allocate iio_trigger for
657  * @this_mod:		module allocating the trigger
658  * @fmt:		trigger name format. If it includes format
659  *			specifiers, the additional arguments following
660  *			format are formatted and inserted in the resulting
661  *			string replacing their respective specifiers.
662  *
663  *
664  * RETURNS:
665  * Pointer to allocated iio_trigger on success, NULL on failure.
666  */
__devm_iio_trigger_alloc(struct device * parent,struct module * this_mod,const char * fmt,...)667 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
668 					     struct module *this_mod,
669 					     const char *fmt, ...)
670 {
671 	struct iio_trigger **ptr, *trig;
672 	va_list vargs;
673 
674 	ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
675 			   GFP_KERNEL);
676 	if (!ptr)
677 		return NULL;
678 
679 	/* use raw alloc_dr for kmalloc caller tracing */
680 	va_start(vargs, fmt);
681 	trig = viio_trigger_alloc(parent, this_mod, fmt, vargs);
682 	va_end(vargs);
683 	if (trig) {
684 		*ptr = trig;
685 		devres_add(parent, ptr);
686 	} else {
687 		devres_free(ptr);
688 	}
689 
690 	return trig;
691 }
692 EXPORT_SYMBOL_GPL(__devm_iio_trigger_alloc);
693 
devm_iio_trigger_unreg(void * trigger_info)694 static void devm_iio_trigger_unreg(void *trigger_info)
695 {
696 	iio_trigger_unregister(trigger_info);
697 }
698 
699 /**
700  * devm_iio_trigger_register - Resource-managed iio_trigger_register()
701  * @dev:	device this trigger was allocated for
702  * @trig_info:	trigger to register
703  *
704  * Managed iio_trigger_register().  The IIO trigger registered with this
705  * function is automatically unregistered on driver detach. This function
706  * calls iio_trigger_register() internally. Refer to that function for more
707  * information.
708  *
709  * RETURNS:
710  * 0 on success, negative error number on failure.
711  */
devm_iio_trigger_register(struct device * dev,struct iio_trigger * trig_info)712 int devm_iio_trigger_register(struct device *dev,
713 			      struct iio_trigger *trig_info)
714 {
715 	int ret;
716 
717 	ret = iio_trigger_register(trig_info);
718 	if (ret)
719 		return ret;
720 
721 	return devm_add_action_or_reset(dev, devm_iio_trigger_unreg, trig_info);
722 }
723 EXPORT_SYMBOL_GPL(devm_iio_trigger_register);
724 
iio_trigger_using_own(struct iio_dev * indio_dev)725 bool iio_trigger_using_own(struct iio_dev *indio_dev)
726 {
727 	return indio_dev->trig->attached_own_device;
728 }
729 EXPORT_SYMBOL(iio_trigger_using_own);
730 
731 /**
732  * iio_validate_own_trigger - Check if a trigger and IIO device belong to
733  *  the same device
734  * @idev: the IIO device to check
735  * @trig: the IIO trigger to check
736  *
737  * This function can be used as the validate_trigger callback for triggers that
738  * can only be attached to their own device.
739  *
740  * Return: 0 if both the trigger and the IIO device belong to the same
741  * device, -EINVAL otherwise.
742  */
iio_validate_own_trigger(struct iio_dev * idev,struct iio_trigger * trig)743 int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig)
744 {
745 	if (idev->dev.parent != trig->dev.parent)
746 		return -EINVAL;
747 	return 0;
748 }
749 EXPORT_SYMBOL_GPL(iio_validate_own_trigger);
750 
751 /**
752  * iio_trigger_validate_own_device - Check if a trigger and IIO device belong to
753  *  the same device
754  * @trig: The IIO trigger to check
755  * @indio_dev: the IIO device to check
756  *
757  * This function can be used as the validate_device callback for triggers that
758  * can only be attached to their own device.
759  *
760  * Return: 0 if both the trigger and the IIO device belong to the same
761  * device, -EINVAL otherwise.
762  */
iio_trigger_validate_own_device(struct iio_trigger * trig,struct iio_dev * indio_dev)763 int iio_trigger_validate_own_device(struct iio_trigger *trig,
764 				    struct iio_dev *indio_dev)
765 {
766 	if (indio_dev->dev.parent != trig->dev.parent)
767 		return -EINVAL;
768 	return 0;
769 }
770 EXPORT_SYMBOL(iio_trigger_validate_own_device);
771 
iio_device_register_trigger_consumer(struct iio_dev * indio_dev)772 int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
773 {
774 	return iio_device_register_sysfs_group(indio_dev,
775 					       &iio_trigger_consumer_attr_group);
776 }
777 
iio_device_unregister_trigger_consumer(struct iio_dev * indio_dev)778 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
779 {
780 	/* Clean up an associated but not attached trigger reference */
781 	if (indio_dev->trig)
782 		iio_trigger_put(indio_dev->trig);
783 }
784