1 /* The industrial I/O core, trigger handling functions
2  *
3  * Copyright (c) 2008 Jonathan Cameron
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/idr.h>
12 #include <linux/err.h>
13 #include <linux/device.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 
18 #include <linux/iio/iio.h>
19 #include <linux/iio/trigger.h>
20 #include "iio_core.h"
21 #include "iio_core_trigger.h"
22 #include <linux/iio/trigger_consumer.h>
23 
24 /* RFC - Question of approach
25  * Make the common case (single sensor single trigger)
26  * simple by starting trigger capture from when first sensors
27  * is added.
28  *
29  * Complex simultaneous start requires use of 'hold' functionality
30  * of the trigger. (not implemented)
31  *
32  * Any other suggestions?
33  */
34 
35 static DEFINE_IDA(iio_trigger_ida);
36 
37 /* Single list of all available triggers */
38 static LIST_HEAD(iio_trigger_list);
39 static DEFINE_MUTEX(iio_trigger_list_lock);
40 
41 /**
42  * iio_trigger_read_name() - retrieve useful identifying name
43  **/
44 static ssize_t iio_trigger_read_name(struct device *dev,
45 				     struct device_attribute *attr,
46 				     char *buf)
47 {
48 	struct iio_trigger *trig = to_iio_trigger(dev);
49 	return sprintf(buf, "%s\n", trig->name);
50 }
51 
52 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL);
53 
54 static struct attribute *iio_trig_dev_attrs[] = {
55 	&dev_attr_name.attr,
56 	NULL,
57 };
58 
59 static struct attribute_group iio_trig_attr_group = {
60 	.attrs	= iio_trig_dev_attrs,
61 };
62 
63 static const struct attribute_group *iio_trig_attr_groups[] = {
64 	&iio_trig_attr_group,
65 	NULL
66 };
67 
68 int iio_trigger_register(struct iio_trigger *trig_info)
69 {
70 	int ret;
71 
72 	trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
73 	if (trig_info->id < 0) {
74 		ret = trig_info->id;
75 		goto error_ret;
76 	}
77 	/* Set the name used for the sysfs directory etc */
78 	dev_set_name(&trig_info->dev, "trigger%ld",
79 		     (unsigned long) trig_info->id);
80 
81 	ret = device_add(&trig_info->dev);
82 	if (ret)
83 		goto error_unregister_id;
84 
85 	/* Add to list of available triggers held by the IIO core */
86 	mutex_lock(&iio_trigger_list_lock);
87 	list_add_tail(&trig_info->list, &iio_trigger_list);
88 	mutex_unlock(&iio_trigger_list_lock);
89 
90 	return 0;
91 
92 error_unregister_id:
93 	ida_simple_remove(&iio_trigger_ida, trig_info->id);
94 error_ret:
95 	return ret;
96 }
97 EXPORT_SYMBOL(iio_trigger_register);
98 
99 void iio_trigger_unregister(struct iio_trigger *trig_info)
100 {
101 	mutex_lock(&iio_trigger_list_lock);
102 	list_del(&trig_info->list);
103 	mutex_unlock(&iio_trigger_list_lock);
104 
105 	ida_simple_remove(&iio_trigger_ida, trig_info->id);
106 	/* Possible issue in here */
107 	device_del(&trig_info->dev);
108 }
109 EXPORT_SYMBOL(iio_trigger_unregister);
110 
111 static struct iio_trigger *iio_trigger_find_by_name(const char *name,
112 						    size_t len)
113 {
114 	struct iio_trigger *trig = NULL, *iter;
115 
116 	mutex_lock(&iio_trigger_list_lock);
117 	list_for_each_entry(iter, &iio_trigger_list, list)
118 		if (sysfs_streq(iter->name, name)) {
119 			trig = iter;
120 			break;
121 		}
122 	mutex_unlock(&iio_trigger_list_lock);
123 
124 	return trig;
125 }
126 
127 void iio_trigger_poll(struct iio_trigger *trig, s64 time)
128 {
129 	int i;
130 
131 	if (!atomic_read(&trig->use_count)) {
132 		atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
133 
134 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
135 			if (trig->subirqs[i].enabled)
136 				generic_handle_irq(trig->subirq_base + i);
137 			else
138 				iio_trigger_notify_done(trig);
139 		}
140 	}
141 }
142 EXPORT_SYMBOL(iio_trigger_poll);
143 
144 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private)
145 {
146 	iio_trigger_poll(private, iio_get_time_ns());
147 	return IRQ_HANDLED;
148 }
149 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
150 
151 void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
152 {
153 	int i;
154 
155 	if (!atomic_read(&trig->use_count)) {
156 		atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
157 
158 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
159 			if (trig->subirqs[i].enabled)
160 				handle_nested_irq(trig->subirq_base + i);
161 			else
162 				iio_trigger_notify_done(trig);
163 		}
164 	}
165 }
166 EXPORT_SYMBOL(iio_trigger_poll_chained);
167 
168 void iio_trigger_notify_done(struct iio_trigger *trig)
169 {
170 	if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
171 		trig->ops->try_reenable)
172 		if (trig->ops->try_reenable(trig))
173 			/* Missed an interrupt so launch new poll now */
174 			iio_trigger_poll(trig, 0);
175 }
176 EXPORT_SYMBOL(iio_trigger_notify_done);
177 
178 /* Trigger Consumer related functions */
179 static int iio_trigger_get_irq(struct iio_trigger *trig)
180 {
181 	int ret;
182 	mutex_lock(&trig->pool_lock);
183 	ret = bitmap_find_free_region(trig->pool,
184 				      CONFIG_IIO_CONSUMERS_PER_TRIGGER,
185 				      ilog2(1));
186 	mutex_unlock(&trig->pool_lock);
187 	if (ret >= 0)
188 		ret += trig->subirq_base;
189 
190 	return ret;
191 }
192 
193 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
194 {
195 	mutex_lock(&trig->pool_lock);
196 	clear_bit(irq - trig->subirq_base, trig->pool);
197 	mutex_unlock(&trig->pool_lock);
198 }
199 
200 /* Complexity in here.  With certain triggers (datardy) an acknowledgement
201  * may be needed if the pollfuncs do not include the data read for the
202  * triggering device.
203  * This is not currently handled.  Alternative of not enabling trigger unless
204  * the relevant function is in there may be the best option.
205  */
206 /* Worth protecting against double additions? */
207 static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
208 					struct iio_poll_func *pf)
209 {
210 	int ret = 0;
211 	bool notinuse
212 		= bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
213 
214 	/* Prevent the module from being removed whilst attached to a trigger */
215 	__module_get(pf->indio_dev->info->driver_module);
216 	pf->irq = iio_trigger_get_irq(trig);
217 	ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
218 				   pf->type, pf->name,
219 				   pf);
220 	if (ret < 0) {
221 		module_put(pf->indio_dev->info->driver_module);
222 		return ret;
223 	}
224 
225 	if (trig->ops && trig->ops->set_trigger_state && notinuse) {
226 		ret = trig->ops->set_trigger_state(trig, true);
227 		if (ret < 0)
228 			module_put(pf->indio_dev->info->driver_module);
229 	}
230 
231 	return ret;
232 }
233 
234 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
235 					 struct iio_poll_func *pf)
236 {
237 	int ret = 0;
238 	bool no_other_users
239 		= (bitmap_weight(trig->pool,
240 				 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
241 		   == 1);
242 	if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
243 		ret = trig->ops->set_trigger_state(trig, false);
244 		if (ret)
245 			goto error_ret;
246 	}
247 	iio_trigger_put_irq(trig, pf->irq);
248 	free_irq(pf->irq, pf);
249 	module_put(pf->indio_dev->info->driver_module);
250 
251 error_ret:
252 	return ret;
253 }
254 
255 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
256 {
257 	struct iio_poll_func *pf = p;
258 	pf->timestamp = iio_get_time_ns();
259 	return IRQ_WAKE_THREAD;
260 }
261 EXPORT_SYMBOL(iio_pollfunc_store_time);
262 
263 struct iio_poll_func
264 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
265 		    irqreturn_t (*thread)(int irq, void *p),
266 		    int type,
267 		    struct iio_dev *indio_dev,
268 		    const char *fmt,
269 		    ...)
270 {
271 	va_list vargs;
272 	struct iio_poll_func *pf;
273 
274 	pf = kmalloc(sizeof *pf, GFP_KERNEL);
275 	if (pf == NULL)
276 		return NULL;
277 	va_start(vargs, fmt);
278 	pf->name = kvasprintf(GFP_KERNEL, fmt, vargs);
279 	va_end(vargs);
280 	if (pf->name == NULL) {
281 		kfree(pf);
282 		return NULL;
283 	}
284 	pf->h = h;
285 	pf->thread = thread;
286 	pf->type = type;
287 	pf->indio_dev = indio_dev;
288 
289 	return pf;
290 }
291 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc);
292 
293 void iio_dealloc_pollfunc(struct iio_poll_func *pf)
294 {
295 	kfree(pf->name);
296 	kfree(pf);
297 }
298 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
299 
300 /**
301  * iio_trigger_read_current() - trigger consumer sysfs query current trigger
302  *
303  * For trigger consumers the current_trigger interface allows the trigger
304  * used by the device to be queried.
305  **/
306 static ssize_t iio_trigger_read_current(struct device *dev,
307 					struct device_attribute *attr,
308 					char *buf)
309 {
310 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
311 
312 	if (indio_dev->trig)
313 		return sprintf(buf, "%s\n", indio_dev->trig->name);
314 	return 0;
315 }
316 
317 /**
318  * iio_trigger_write_current() - trigger consumer sysfs set current trigger
319  *
320  * For trigger consumers the current_trigger interface allows the trigger
321  * used for this device to be specified at run time based on the triggers
322  * name.
323  **/
324 static ssize_t iio_trigger_write_current(struct device *dev,
325 					 struct device_attribute *attr,
326 					 const char *buf,
327 					 size_t len)
328 {
329 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
330 	struct iio_trigger *oldtrig = indio_dev->trig;
331 	struct iio_trigger *trig;
332 	int ret;
333 
334 	mutex_lock(&indio_dev->mlock);
335 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
336 		mutex_unlock(&indio_dev->mlock);
337 		return -EBUSY;
338 	}
339 	mutex_unlock(&indio_dev->mlock);
340 
341 	trig = iio_trigger_find_by_name(buf, len);
342 	if (oldtrig == trig)
343 		return len;
344 
345 	if (trig && indio_dev->info->validate_trigger) {
346 		ret = indio_dev->info->validate_trigger(indio_dev, trig);
347 		if (ret)
348 			return ret;
349 	}
350 
351 	if (trig && trig->ops && trig->ops->validate_device) {
352 		ret = trig->ops->validate_device(trig, indio_dev);
353 		if (ret)
354 			return ret;
355 	}
356 
357 	indio_dev->trig = trig;
358 
359 	if (oldtrig && indio_dev->trig != oldtrig)
360 		iio_trigger_put(oldtrig);
361 	if (indio_dev->trig)
362 		iio_trigger_get(indio_dev->trig);
363 
364 	return len;
365 }
366 
367 static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR,
368 		   iio_trigger_read_current,
369 		   iio_trigger_write_current);
370 
371 static struct attribute *iio_trigger_consumer_attrs[] = {
372 	&dev_attr_current_trigger.attr,
373 	NULL,
374 };
375 
376 static const struct attribute_group iio_trigger_consumer_attr_group = {
377 	.name = "trigger",
378 	.attrs = iio_trigger_consumer_attrs,
379 };
380 
381 static void iio_trig_release(struct device *device)
382 {
383 	struct iio_trigger *trig = to_iio_trigger(device);
384 	int i;
385 
386 	if (trig->subirq_base) {
387 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
388 			irq_modify_status(trig->subirq_base + i,
389 					  IRQ_NOAUTOEN,
390 					  IRQ_NOREQUEST | IRQ_NOPROBE);
391 			irq_set_chip(trig->subirq_base + i,
392 				     NULL);
393 			irq_set_handler(trig->subirq_base + i,
394 					NULL);
395 		}
396 
397 		irq_free_descs(trig->subirq_base,
398 			       CONFIG_IIO_CONSUMERS_PER_TRIGGER);
399 	}
400 	kfree(trig->name);
401 	kfree(trig);
402 }
403 
404 static struct device_type iio_trig_type = {
405 	.release = iio_trig_release,
406 	.groups = iio_trig_attr_groups,
407 };
408 
409 static void iio_trig_subirqmask(struct irq_data *d)
410 {
411 	struct irq_chip *chip = irq_data_get_irq_chip(d);
412 	struct iio_trigger *trig
413 		= container_of(chip,
414 			       struct iio_trigger, subirq_chip);
415 	trig->subirqs[d->irq - trig->subirq_base].enabled = false;
416 }
417 
418 static void iio_trig_subirqunmask(struct irq_data *d)
419 {
420 	struct irq_chip *chip = irq_data_get_irq_chip(d);
421 	struct iio_trigger *trig
422 		= container_of(chip,
423 			       struct iio_trigger, subirq_chip);
424 	trig->subirqs[d->irq - trig->subirq_base].enabled = true;
425 }
426 
427 static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
428 {
429 	struct iio_trigger *trig;
430 	trig = kzalloc(sizeof *trig, GFP_KERNEL);
431 	if (trig) {
432 		int i;
433 		trig->dev.type = &iio_trig_type;
434 		trig->dev.bus = &iio_bus_type;
435 		device_initialize(&trig->dev);
436 
437 		mutex_init(&trig->pool_lock);
438 		trig->subirq_base
439 			= irq_alloc_descs(-1, 0,
440 					  CONFIG_IIO_CONSUMERS_PER_TRIGGER,
441 					  0);
442 		if (trig->subirq_base < 0) {
443 			kfree(trig);
444 			return NULL;
445 		}
446 
447 		trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
448 		if (trig->name == NULL) {
449 			irq_free_descs(trig->subirq_base,
450 				       CONFIG_IIO_CONSUMERS_PER_TRIGGER);
451 			kfree(trig);
452 			return NULL;
453 		}
454 		trig->subirq_chip.name = trig->name;
455 		trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
456 		trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
457 		for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
458 			irq_set_chip(trig->subirq_base + i,
459 				     &trig->subirq_chip);
460 			irq_set_handler(trig->subirq_base + i,
461 					&handle_simple_irq);
462 			irq_modify_status(trig->subirq_base + i,
463 					  IRQ_NOREQUEST | IRQ_NOAUTOEN,
464 					  IRQ_NOPROBE);
465 		}
466 		get_device(&trig->dev);
467 	}
468 
469 	return trig;
470 }
471 
472 struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
473 {
474 	struct iio_trigger *trig;
475 	va_list vargs;
476 
477 	va_start(vargs, fmt);
478 	trig = viio_trigger_alloc(fmt, vargs);
479 	va_end(vargs);
480 
481 	return trig;
482 }
483 EXPORT_SYMBOL(iio_trigger_alloc);
484 
485 void iio_trigger_free(struct iio_trigger *trig)
486 {
487 	if (trig)
488 		put_device(&trig->dev);
489 }
490 EXPORT_SYMBOL(iio_trigger_free);
491 
492 static void devm_iio_trigger_release(struct device *dev, void *res)
493 {
494 	iio_trigger_free(*(struct iio_trigger **)res);
495 }
496 
497 static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
498 {
499 	struct iio_trigger **r = res;
500 
501 	if (!r || !*r) {
502 		WARN_ON(!r || !*r);
503 		return 0;
504 	}
505 
506 	return *r == data;
507 }
508 
509 struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
510 						const char *fmt, ...)
511 {
512 	struct iio_trigger **ptr, *trig;
513 	va_list vargs;
514 
515 	ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
516 			   GFP_KERNEL);
517 	if (!ptr)
518 		return NULL;
519 
520 	/* use raw alloc_dr for kmalloc caller tracing */
521 	va_start(vargs, fmt);
522 	trig = viio_trigger_alloc(fmt, vargs);
523 	va_end(vargs);
524 	if (trig) {
525 		*ptr = trig;
526 		devres_add(dev, ptr);
527 	} else {
528 		devres_free(ptr);
529 	}
530 
531 	return trig;
532 }
533 EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
534 
535 void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
536 {
537 	int rc;
538 
539 	rc = devres_release(dev, devm_iio_trigger_release,
540 			    devm_iio_trigger_match, iio_trig);
541 	WARN_ON(rc);
542 }
543 EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
544 
545 void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
546 {
547 	indio_dev->groups[indio_dev->groupcounter++] =
548 		&iio_trigger_consumer_attr_group;
549 }
550 
551 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
552 {
553 	/* Clean up an associated but not attached trigger reference */
554 	if (indio_dev->trig)
555 		iio_trigger_put(indio_dev->trig);
556 }
557 
558 int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
559 {
560 	return iio_trigger_attach_poll_func(indio_dev->trig,
561 					    indio_dev->pollfunc);
562 }
563 EXPORT_SYMBOL(iio_triggered_buffer_postenable);
564 
565 int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
566 {
567 	return iio_trigger_detach_poll_func(indio_dev->trig,
568 					     indio_dev->pollfunc);
569 }
570 EXPORT_SYMBOL(iio_triggered_buffer_predisable);
571