1 /* The industrial I/O core, trigger handling functions 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/idr.h> 12 #include <linux/err.h> 13 #include <linux/device.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/slab.h> 17 18 #include <linux/iio/iio.h> 19 #include <linux/iio/trigger.h> 20 #include "iio_core.h" 21 #include "iio_core_trigger.h" 22 #include <linux/iio/trigger_consumer.h> 23 24 /* RFC - Question of approach 25 * Make the common case (single sensor single trigger) 26 * simple by starting trigger capture from when first sensors 27 * is added. 28 * 29 * Complex simultaneous start requires use of 'hold' functionality 30 * of the trigger. (not implemented) 31 * 32 * Any other suggestions? 33 */ 34 35 static DEFINE_IDA(iio_trigger_ida); 36 37 /* Single list of all available triggers */ 38 static LIST_HEAD(iio_trigger_list); 39 static DEFINE_MUTEX(iio_trigger_list_lock); 40 41 /** 42 * iio_trigger_read_name() - retrieve useful identifying name 43 **/ 44 static ssize_t iio_trigger_read_name(struct device *dev, 45 struct device_attribute *attr, 46 char *buf) 47 { 48 struct iio_trigger *trig = to_iio_trigger(dev); 49 return sprintf(buf, "%s\n", trig->name); 50 } 51 52 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL); 53 54 static struct attribute *iio_trig_dev_attrs[] = { 55 &dev_attr_name.attr, 56 NULL, 57 }; 58 ATTRIBUTE_GROUPS(iio_trig_dev); 59 60 int iio_trigger_register(struct iio_trigger *trig_info) 61 { 62 int ret; 63 64 trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); 65 if (trig_info->id < 0) 66 return trig_info->id; 67 68 /* Set the name used for the sysfs directory etc */ 69 dev_set_name(&trig_info->dev, "trigger%ld", 70 (unsigned long) trig_info->id); 71 72 ret = device_add(&trig_info->dev); 73 if (ret) 74 goto error_unregister_id; 75 76 /* Add to list of available triggers held by the IIO core */ 77 mutex_lock(&iio_trigger_list_lock); 78 list_add_tail(&trig_info->list, &iio_trigger_list); 79 mutex_unlock(&iio_trigger_list_lock); 80 81 return 0; 82 83 error_unregister_id: 84 ida_simple_remove(&iio_trigger_ida, trig_info->id); 85 return ret; 86 } 87 EXPORT_SYMBOL(iio_trigger_register); 88 89 void iio_trigger_unregister(struct iio_trigger *trig_info) 90 { 91 mutex_lock(&iio_trigger_list_lock); 92 list_del(&trig_info->list); 93 mutex_unlock(&iio_trigger_list_lock); 94 95 ida_simple_remove(&iio_trigger_ida, trig_info->id); 96 /* Possible issue in here */ 97 device_del(&trig_info->dev); 98 } 99 EXPORT_SYMBOL(iio_trigger_unregister); 100 101 static struct iio_trigger *iio_trigger_find_by_name(const char *name, 102 size_t len) 103 { 104 struct iio_trigger *trig = NULL, *iter; 105 106 mutex_lock(&iio_trigger_list_lock); 107 list_for_each_entry(iter, &iio_trigger_list, list) 108 if (sysfs_streq(iter->name, name)) { 109 trig = iter; 110 break; 111 } 112 mutex_unlock(&iio_trigger_list_lock); 113 114 return trig; 115 } 116 117 void iio_trigger_poll(struct iio_trigger *trig, s64 time) 118 { 119 int i; 120 121 if (!atomic_read(&trig->use_count)) { 122 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); 123 124 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 125 if (trig->subirqs[i].enabled) 126 generic_handle_irq(trig->subirq_base + i); 127 else 128 iio_trigger_notify_done(trig); 129 } 130 } 131 } 132 EXPORT_SYMBOL(iio_trigger_poll); 133 134 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) 135 { 136 iio_trigger_poll(private, iio_get_time_ns()); 137 return IRQ_HANDLED; 138 } 139 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); 140 141 void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) 142 { 143 int i; 144 145 if (!atomic_read(&trig->use_count)) { 146 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); 147 148 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 149 if (trig->subirqs[i].enabled) 150 handle_nested_irq(trig->subirq_base + i); 151 else 152 iio_trigger_notify_done(trig); 153 } 154 } 155 } 156 EXPORT_SYMBOL(iio_trigger_poll_chained); 157 158 void iio_trigger_notify_done(struct iio_trigger *trig) 159 { 160 if (atomic_dec_and_test(&trig->use_count) && trig->ops && 161 trig->ops->try_reenable) 162 if (trig->ops->try_reenable(trig)) 163 /* Missed an interrupt so launch new poll now */ 164 iio_trigger_poll(trig, 0); 165 } 166 EXPORT_SYMBOL(iio_trigger_notify_done); 167 168 /* Trigger Consumer related functions */ 169 static int iio_trigger_get_irq(struct iio_trigger *trig) 170 { 171 int ret; 172 mutex_lock(&trig->pool_lock); 173 ret = bitmap_find_free_region(trig->pool, 174 CONFIG_IIO_CONSUMERS_PER_TRIGGER, 175 ilog2(1)); 176 mutex_unlock(&trig->pool_lock); 177 if (ret >= 0) 178 ret += trig->subirq_base; 179 180 return ret; 181 } 182 183 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) 184 { 185 mutex_lock(&trig->pool_lock); 186 clear_bit(irq - trig->subirq_base, trig->pool); 187 mutex_unlock(&trig->pool_lock); 188 } 189 190 /* Complexity in here. With certain triggers (datardy) an acknowledgement 191 * may be needed if the pollfuncs do not include the data read for the 192 * triggering device. 193 * This is not currently handled. Alternative of not enabling trigger unless 194 * the relevant function is in there may be the best option. 195 */ 196 /* Worth protecting against double additions? */ 197 static int iio_trigger_attach_poll_func(struct iio_trigger *trig, 198 struct iio_poll_func *pf) 199 { 200 int ret = 0; 201 bool notinuse 202 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); 203 204 /* Prevent the module from being removed whilst attached to a trigger */ 205 __module_get(pf->indio_dev->info->driver_module); 206 pf->irq = iio_trigger_get_irq(trig); 207 ret = request_threaded_irq(pf->irq, pf->h, pf->thread, 208 pf->type, pf->name, 209 pf); 210 if (ret < 0) { 211 module_put(pf->indio_dev->info->driver_module); 212 return ret; 213 } 214 215 if (trig->ops && trig->ops->set_trigger_state && notinuse) { 216 ret = trig->ops->set_trigger_state(trig, true); 217 if (ret < 0) 218 module_put(pf->indio_dev->info->driver_module); 219 } 220 221 return ret; 222 } 223 224 static int iio_trigger_detach_poll_func(struct iio_trigger *trig, 225 struct iio_poll_func *pf) 226 { 227 int ret = 0; 228 bool no_other_users 229 = (bitmap_weight(trig->pool, 230 CONFIG_IIO_CONSUMERS_PER_TRIGGER) 231 == 1); 232 if (trig->ops && trig->ops->set_trigger_state && no_other_users) { 233 ret = trig->ops->set_trigger_state(trig, false); 234 if (ret) 235 return ret; 236 } 237 iio_trigger_put_irq(trig, pf->irq); 238 free_irq(pf->irq, pf); 239 module_put(pf->indio_dev->info->driver_module); 240 241 return ret; 242 } 243 244 irqreturn_t iio_pollfunc_store_time(int irq, void *p) 245 { 246 struct iio_poll_func *pf = p; 247 pf->timestamp = iio_get_time_ns(); 248 return IRQ_WAKE_THREAD; 249 } 250 EXPORT_SYMBOL(iio_pollfunc_store_time); 251 252 struct iio_poll_func 253 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), 254 irqreturn_t (*thread)(int irq, void *p), 255 int type, 256 struct iio_dev *indio_dev, 257 const char *fmt, 258 ...) 259 { 260 va_list vargs; 261 struct iio_poll_func *pf; 262 263 pf = kmalloc(sizeof *pf, GFP_KERNEL); 264 if (pf == NULL) 265 return NULL; 266 va_start(vargs, fmt); 267 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); 268 va_end(vargs); 269 if (pf->name == NULL) { 270 kfree(pf); 271 return NULL; 272 } 273 pf->h = h; 274 pf->thread = thread; 275 pf->type = type; 276 pf->indio_dev = indio_dev; 277 278 return pf; 279 } 280 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); 281 282 void iio_dealloc_pollfunc(struct iio_poll_func *pf) 283 { 284 kfree(pf->name); 285 kfree(pf); 286 } 287 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); 288 289 /** 290 * iio_trigger_read_current() - trigger consumer sysfs query current trigger 291 * 292 * For trigger consumers the current_trigger interface allows the trigger 293 * used by the device to be queried. 294 **/ 295 static ssize_t iio_trigger_read_current(struct device *dev, 296 struct device_attribute *attr, 297 char *buf) 298 { 299 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 300 301 if (indio_dev->trig) 302 return sprintf(buf, "%s\n", indio_dev->trig->name); 303 return 0; 304 } 305 306 /** 307 * iio_trigger_write_current() - trigger consumer sysfs set current trigger 308 * 309 * For trigger consumers the current_trigger interface allows the trigger 310 * used for this device to be specified at run time based on the trigger's 311 * name. 312 **/ 313 static ssize_t iio_trigger_write_current(struct device *dev, 314 struct device_attribute *attr, 315 const char *buf, 316 size_t len) 317 { 318 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 319 struct iio_trigger *oldtrig = indio_dev->trig; 320 struct iio_trigger *trig; 321 int ret; 322 323 mutex_lock(&indio_dev->mlock); 324 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 325 mutex_unlock(&indio_dev->mlock); 326 return -EBUSY; 327 } 328 mutex_unlock(&indio_dev->mlock); 329 330 trig = iio_trigger_find_by_name(buf, len); 331 if (oldtrig == trig) 332 return len; 333 334 if (trig && indio_dev->info->validate_trigger) { 335 ret = indio_dev->info->validate_trigger(indio_dev, trig); 336 if (ret) 337 return ret; 338 } 339 340 if (trig && trig->ops && trig->ops->validate_device) { 341 ret = trig->ops->validate_device(trig, indio_dev); 342 if (ret) 343 return ret; 344 } 345 346 indio_dev->trig = trig; 347 348 if (oldtrig) 349 iio_trigger_put(oldtrig); 350 if (indio_dev->trig) 351 iio_trigger_get(indio_dev->trig); 352 353 return len; 354 } 355 356 static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR, 357 iio_trigger_read_current, 358 iio_trigger_write_current); 359 360 static struct attribute *iio_trigger_consumer_attrs[] = { 361 &dev_attr_current_trigger.attr, 362 NULL, 363 }; 364 365 static const struct attribute_group iio_trigger_consumer_attr_group = { 366 .name = "trigger", 367 .attrs = iio_trigger_consumer_attrs, 368 }; 369 370 static void iio_trig_release(struct device *device) 371 { 372 struct iio_trigger *trig = to_iio_trigger(device); 373 int i; 374 375 if (trig->subirq_base) { 376 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 377 irq_modify_status(trig->subirq_base + i, 378 IRQ_NOAUTOEN, 379 IRQ_NOREQUEST | IRQ_NOPROBE); 380 irq_set_chip(trig->subirq_base + i, 381 NULL); 382 irq_set_handler(trig->subirq_base + i, 383 NULL); 384 } 385 386 irq_free_descs(trig->subirq_base, 387 CONFIG_IIO_CONSUMERS_PER_TRIGGER); 388 } 389 kfree(trig->name); 390 kfree(trig); 391 } 392 393 static struct device_type iio_trig_type = { 394 .release = iio_trig_release, 395 .groups = iio_trig_dev_groups, 396 }; 397 398 static void iio_trig_subirqmask(struct irq_data *d) 399 { 400 struct irq_chip *chip = irq_data_get_irq_chip(d); 401 struct iio_trigger *trig 402 = container_of(chip, 403 struct iio_trigger, subirq_chip); 404 trig->subirqs[d->irq - trig->subirq_base].enabled = false; 405 } 406 407 static void iio_trig_subirqunmask(struct irq_data *d) 408 { 409 struct irq_chip *chip = irq_data_get_irq_chip(d); 410 struct iio_trigger *trig 411 = container_of(chip, 412 struct iio_trigger, subirq_chip); 413 trig->subirqs[d->irq - trig->subirq_base].enabled = true; 414 } 415 416 static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs) 417 { 418 struct iio_trigger *trig; 419 trig = kzalloc(sizeof *trig, GFP_KERNEL); 420 if (trig) { 421 int i; 422 trig->dev.type = &iio_trig_type; 423 trig->dev.bus = &iio_bus_type; 424 device_initialize(&trig->dev); 425 426 mutex_init(&trig->pool_lock); 427 trig->subirq_base 428 = irq_alloc_descs(-1, 0, 429 CONFIG_IIO_CONSUMERS_PER_TRIGGER, 430 0); 431 if (trig->subirq_base < 0) { 432 kfree(trig); 433 return NULL; 434 } 435 436 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); 437 if (trig->name == NULL) { 438 irq_free_descs(trig->subirq_base, 439 CONFIG_IIO_CONSUMERS_PER_TRIGGER); 440 kfree(trig); 441 return NULL; 442 } 443 trig->subirq_chip.name = trig->name; 444 trig->subirq_chip.irq_mask = &iio_trig_subirqmask; 445 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; 446 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 447 irq_set_chip(trig->subirq_base + i, 448 &trig->subirq_chip); 449 irq_set_handler(trig->subirq_base + i, 450 &handle_simple_irq); 451 irq_modify_status(trig->subirq_base + i, 452 IRQ_NOREQUEST | IRQ_NOAUTOEN, 453 IRQ_NOPROBE); 454 } 455 get_device(&trig->dev); 456 } 457 458 return trig; 459 } 460 461 struct iio_trigger *iio_trigger_alloc(const char *fmt, ...) 462 { 463 struct iio_trigger *trig; 464 va_list vargs; 465 466 va_start(vargs, fmt); 467 trig = viio_trigger_alloc(fmt, vargs); 468 va_end(vargs); 469 470 return trig; 471 } 472 EXPORT_SYMBOL(iio_trigger_alloc); 473 474 void iio_trigger_free(struct iio_trigger *trig) 475 { 476 if (trig) 477 put_device(&trig->dev); 478 } 479 EXPORT_SYMBOL(iio_trigger_free); 480 481 static void devm_iio_trigger_release(struct device *dev, void *res) 482 { 483 iio_trigger_free(*(struct iio_trigger **)res); 484 } 485 486 static int devm_iio_trigger_match(struct device *dev, void *res, void *data) 487 { 488 struct iio_trigger **r = res; 489 490 if (!r || !*r) { 491 WARN_ON(!r || !*r); 492 return 0; 493 } 494 495 return *r == data; 496 } 497 498 /** 499 * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc() 500 * @dev: Device to allocate iio_trigger for 501 * @fmt: trigger name format. If it includes format 502 * specifiers, the additional arguments following 503 * format are formatted and inserted in the resulting 504 * string replacing their respective specifiers. 505 * 506 * Managed iio_trigger_alloc. iio_trigger allocated with this function is 507 * automatically freed on driver detach. 508 * 509 * If an iio_trigger allocated with this function needs to be freed separately, 510 * devm_iio_trigger_free() must be used. 511 * 512 * RETURNS: 513 * Pointer to allocated iio_trigger on success, NULL on failure. 514 */ 515 struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, 516 const char *fmt, ...) 517 { 518 struct iio_trigger **ptr, *trig; 519 va_list vargs; 520 521 ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr), 522 GFP_KERNEL); 523 if (!ptr) 524 return NULL; 525 526 /* use raw alloc_dr for kmalloc caller tracing */ 527 va_start(vargs, fmt); 528 trig = viio_trigger_alloc(fmt, vargs); 529 va_end(vargs); 530 if (trig) { 531 *ptr = trig; 532 devres_add(dev, ptr); 533 } else { 534 devres_free(ptr); 535 } 536 537 return trig; 538 } 539 EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc); 540 541 /** 542 * devm_iio_trigger_free - Resource-managed iio_trigger_free() 543 * @dev: Device this iio_dev belongs to 544 * @iio_trig: the iio_trigger associated with the device 545 * 546 * Free iio_trigger allocated with devm_iio_trigger_alloc(). 547 */ 548 void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig) 549 { 550 int rc; 551 552 rc = devres_release(dev, devm_iio_trigger_release, 553 devm_iio_trigger_match, iio_trig); 554 WARN_ON(rc); 555 } 556 EXPORT_SYMBOL_GPL(devm_iio_trigger_free); 557 558 void iio_device_register_trigger_consumer(struct iio_dev *indio_dev) 559 { 560 indio_dev->groups[indio_dev->groupcounter++] = 561 &iio_trigger_consumer_attr_group; 562 } 563 564 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) 565 { 566 /* Clean up an associated but not attached trigger reference */ 567 if (indio_dev->trig) 568 iio_trigger_put(indio_dev->trig); 569 } 570 571 int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) 572 { 573 return iio_trigger_attach_poll_func(indio_dev->trig, 574 indio_dev->pollfunc); 575 } 576 EXPORT_SYMBOL(iio_triggered_buffer_postenable); 577 578 int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) 579 { 580 return iio_trigger_detach_poll_func(indio_dev->trig, 581 indio_dev->pollfunc); 582 } 583 EXPORT_SYMBOL(iio_triggered_buffer_predisable); 584