1 /* The industrial I/O core, trigger handling functions 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/idr.h> 12 #include <linux/err.h> 13 #include <linux/device.h> 14 #include <linux/interrupt.h> 15 #include <linux/list.h> 16 #include <linux/slab.h> 17 18 #include <linux/iio/iio.h> 19 #include <linux/iio/trigger.h> 20 #include "iio_core.h" 21 #include "iio_core_trigger.h" 22 #include <linux/iio/trigger_consumer.h> 23 24 /* RFC - Question of approach 25 * Make the common case (single sensor single trigger) 26 * simple by starting trigger capture from when first sensors 27 * is added. 28 * 29 * Complex simultaneous start requires use of 'hold' functionality 30 * of the trigger. (not implemented) 31 * 32 * Any other suggestions? 33 */ 34 35 static DEFINE_IDA(iio_trigger_ida); 36 37 /* Single list of all available triggers */ 38 static LIST_HEAD(iio_trigger_list); 39 static DEFINE_MUTEX(iio_trigger_list_lock); 40 41 /** 42 * iio_trigger_read_name() - retrieve useful identifying name 43 **/ 44 static ssize_t iio_trigger_read_name(struct device *dev, 45 struct device_attribute *attr, 46 char *buf) 47 { 48 struct iio_trigger *trig = to_iio_trigger(dev); 49 return sprintf(buf, "%s\n", trig->name); 50 } 51 52 static DEVICE_ATTR(name, S_IRUGO, iio_trigger_read_name, NULL); 53 54 static struct attribute *iio_trig_dev_attrs[] = { 55 &dev_attr_name.attr, 56 NULL, 57 }; 58 ATTRIBUTE_GROUPS(iio_trig_dev); 59 60 int iio_trigger_register(struct iio_trigger *trig_info) 61 { 62 int ret; 63 64 trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL); 65 if (trig_info->id < 0) { 66 ret = trig_info->id; 67 goto error_ret; 68 } 69 /* Set the name used for the sysfs directory etc */ 70 dev_set_name(&trig_info->dev, "trigger%ld", 71 (unsigned long) trig_info->id); 72 73 ret = device_add(&trig_info->dev); 74 if (ret) 75 goto error_unregister_id; 76 77 /* Add to list of available triggers held by the IIO core */ 78 mutex_lock(&iio_trigger_list_lock); 79 list_add_tail(&trig_info->list, &iio_trigger_list); 80 mutex_unlock(&iio_trigger_list_lock); 81 82 return 0; 83 84 error_unregister_id: 85 ida_simple_remove(&iio_trigger_ida, trig_info->id); 86 error_ret: 87 return ret; 88 } 89 EXPORT_SYMBOL(iio_trigger_register); 90 91 void iio_trigger_unregister(struct iio_trigger *trig_info) 92 { 93 mutex_lock(&iio_trigger_list_lock); 94 list_del(&trig_info->list); 95 mutex_unlock(&iio_trigger_list_lock); 96 97 ida_simple_remove(&iio_trigger_ida, trig_info->id); 98 /* Possible issue in here */ 99 device_del(&trig_info->dev); 100 } 101 EXPORT_SYMBOL(iio_trigger_unregister); 102 103 static struct iio_trigger *iio_trigger_find_by_name(const char *name, 104 size_t len) 105 { 106 struct iio_trigger *trig = NULL, *iter; 107 108 mutex_lock(&iio_trigger_list_lock); 109 list_for_each_entry(iter, &iio_trigger_list, list) 110 if (sysfs_streq(iter->name, name)) { 111 trig = iter; 112 break; 113 } 114 mutex_unlock(&iio_trigger_list_lock); 115 116 return trig; 117 } 118 119 void iio_trigger_poll(struct iio_trigger *trig, s64 time) 120 { 121 int i; 122 123 if (!atomic_read(&trig->use_count)) { 124 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); 125 126 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 127 if (trig->subirqs[i].enabled) 128 generic_handle_irq(trig->subirq_base + i); 129 else 130 iio_trigger_notify_done(trig); 131 } 132 } 133 } 134 EXPORT_SYMBOL(iio_trigger_poll); 135 136 irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private) 137 { 138 iio_trigger_poll(private, iio_get_time_ns()); 139 return IRQ_HANDLED; 140 } 141 EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); 142 143 void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) 144 { 145 int i; 146 147 if (!atomic_read(&trig->use_count)) { 148 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); 149 150 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 151 if (trig->subirqs[i].enabled) 152 handle_nested_irq(trig->subirq_base + i); 153 else 154 iio_trigger_notify_done(trig); 155 } 156 } 157 } 158 EXPORT_SYMBOL(iio_trigger_poll_chained); 159 160 void iio_trigger_notify_done(struct iio_trigger *trig) 161 { 162 if (atomic_dec_and_test(&trig->use_count) && trig->ops && 163 trig->ops->try_reenable) 164 if (trig->ops->try_reenable(trig)) 165 /* Missed an interrupt so launch new poll now */ 166 iio_trigger_poll(trig, 0); 167 } 168 EXPORT_SYMBOL(iio_trigger_notify_done); 169 170 /* Trigger Consumer related functions */ 171 static int iio_trigger_get_irq(struct iio_trigger *trig) 172 { 173 int ret; 174 mutex_lock(&trig->pool_lock); 175 ret = bitmap_find_free_region(trig->pool, 176 CONFIG_IIO_CONSUMERS_PER_TRIGGER, 177 ilog2(1)); 178 mutex_unlock(&trig->pool_lock); 179 if (ret >= 0) 180 ret += trig->subirq_base; 181 182 return ret; 183 } 184 185 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) 186 { 187 mutex_lock(&trig->pool_lock); 188 clear_bit(irq - trig->subirq_base, trig->pool); 189 mutex_unlock(&trig->pool_lock); 190 } 191 192 /* Complexity in here. With certain triggers (datardy) an acknowledgement 193 * may be needed if the pollfuncs do not include the data read for the 194 * triggering device. 195 * This is not currently handled. Alternative of not enabling trigger unless 196 * the relevant function is in there may be the best option. 197 */ 198 /* Worth protecting against double additions? */ 199 static int iio_trigger_attach_poll_func(struct iio_trigger *trig, 200 struct iio_poll_func *pf) 201 { 202 int ret = 0; 203 bool notinuse 204 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); 205 206 /* Prevent the module from being removed whilst attached to a trigger */ 207 __module_get(pf->indio_dev->info->driver_module); 208 pf->irq = iio_trigger_get_irq(trig); 209 ret = request_threaded_irq(pf->irq, pf->h, pf->thread, 210 pf->type, pf->name, 211 pf); 212 if (ret < 0) { 213 module_put(pf->indio_dev->info->driver_module); 214 return ret; 215 } 216 217 if (trig->ops && trig->ops->set_trigger_state && notinuse) { 218 ret = trig->ops->set_trigger_state(trig, true); 219 if (ret < 0) 220 module_put(pf->indio_dev->info->driver_module); 221 } 222 223 return ret; 224 } 225 226 static int iio_trigger_detach_poll_func(struct iio_trigger *trig, 227 struct iio_poll_func *pf) 228 { 229 int ret = 0; 230 bool no_other_users 231 = (bitmap_weight(trig->pool, 232 CONFIG_IIO_CONSUMERS_PER_TRIGGER) 233 == 1); 234 if (trig->ops && trig->ops->set_trigger_state && no_other_users) { 235 ret = trig->ops->set_trigger_state(trig, false); 236 if (ret) 237 goto error_ret; 238 } 239 iio_trigger_put_irq(trig, pf->irq); 240 free_irq(pf->irq, pf); 241 module_put(pf->indio_dev->info->driver_module); 242 243 error_ret: 244 return ret; 245 } 246 247 irqreturn_t iio_pollfunc_store_time(int irq, void *p) 248 { 249 struct iio_poll_func *pf = p; 250 pf->timestamp = iio_get_time_ns(); 251 return IRQ_WAKE_THREAD; 252 } 253 EXPORT_SYMBOL(iio_pollfunc_store_time); 254 255 struct iio_poll_func 256 *iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p), 257 irqreturn_t (*thread)(int irq, void *p), 258 int type, 259 struct iio_dev *indio_dev, 260 const char *fmt, 261 ...) 262 { 263 va_list vargs; 264 struct iio_poll_func *pf; 265 266 pf = kmalloc(sizeof *pf, GFP_KERNEL); 267 if (pf == NULL) 268 return NULL; 269 va_start(vargs, fmt); 270 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); 271 va_end(vargs); 272 if (pf->name == NULL) { 273 kfree(pf); 274 return NULL; 275 } 276 pf->h = h; 277 pf->thread = thread; 278 pf->type = type; 279 pf->indio_dev = indio_dev; 280 281 return pf; 282 } 283 EXPORT_SYMBOL_GPL(iio_alloc_pollfunc); 284 285 void iio_dealloc_pollfunc(struct iio_poll_func *pf) 286 { 287 kfree(pf->name); 288 kfree(pf); 289 } 290 EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc); 291 292 /** 293 * iio_trigger_read_current() - trigger consumer sysfs query current trigger 294 * 295 * For trigger consumers the current_trigger interface allows the trigger 296 * used by the device to be queried. 297 **/ 298 static ssize_t iio_trigger_read_current(struct device *dev, 299 struct device_attribute *attr, 300 char *buf) 301 { 302 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 303 304 if (indio_dev->trig) 305 return sprintf(buf, "%s\n", indio_dev->trig->name); 306 return 0; 307 } 308 309 /** 310 * iio_trigger_write_current() - trigger consumer sysfs set current trigger 311 * 312 * For trigger consumers the current_trigger interface allows the trigger 313 * used for this device to be specified at run time based on the trigger's 314 * name. 315 **/ 316 static ssize_t iio_trigger_write_current(struct device *dev, 317 struct device_attribute *attr, 318 const char *buf, 319 size_t len) 320 { 321 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 322 struct iio_trigger *oldtrig = indio_dev->trig; 323 struct iio_trigger *trig; 324 int ret; 325 326 mutex_lock(&indio_dev->mlock); 327 if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) { 328 mutex_unlock(&indio_dev->mlock); 329 return -EBUSY; 330 } 331 mutex_unlock(&indio_dev->mlock); 332 333 trig = iio_trigger_find_by_name(buf, len); 334 if (oldtrig == trig) 335 return len; 336 337 if (trig && indio_dev->info->validate_trigger) { 338 ret = indio_dev->info->validate_trigger(indio_dev, trig); 339 if (ret) 340 return ret; 341 } 342 343 if (trig && trig->ops && trig->ops->validate_device) { 344 ret = trig->ops->validate_device(trig, indio_dev); 345 if (ret) 346 return ret; 347 } 348 349 indio_dev->trig = trig; 350 351 if (oldtrig) 352 iio_trigger_put(oldtrig); 353 if (indio_dev->trig) 354 iio_trigger_get(indio_dev->trig); 355 356 return len; 357 } 358 359 static DEVICE_ATTR(current_trigger, S_IRUGO | S_IWUSR, 360 iio_trigger_read_current, 361 iio_trigger_write_current); 362 363 static struct attribute *iio_trigger_consumer_attrs[] = { 364 &dev_attr_current_trigger.attr, 365 NULL, 366 }; 367 368 static const struct attribute_group iio_trigger_consumer_attr_group = { 369 .name = "trigger", 370 .attrs = iio_trigger_consumer_attrs, 371 }; 372 373 static void iio_trig_release(struct device *device) 374 { 375 struct iio_trigger *trig = to_iio_trigger(device); 376 int i; 377 378 if (trig->subirq_base) { 379 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 380 irq_modify_status(trig->subirq_base + i, 381 IRQ_NOAUTOEN, 382 IRQ_NOREQUEST | IRQ_NOPROBE); 383 irq_set_chip(trig->subirq_base + i, 384 NULL); 385 irq_set_handler(trig->subirq_base + i, 386 NULL); 387 } 388 389 irq_free_descs(trig->subirq_base, 390 CONFIG_IIO_CONSUMERS_PER_TRIGGER); 391 } 392 kfree(trig->name); 393 kfree(trig); 394 } 395 396 static struct device_type iio_trig_type = { 397 .release = iio_trig_release, 398 .groups = iio_trig_dev_groups, 399 }; 400 401 static void iio_trig_subirqmask(struct irq_data *d) 402 { 403 struct irq_chip *chip = irq_data_get_irq_chip(d); 404 struct iio_trigger *trig 405 = container_of(chip, 406 struct iio_trigger, subirq_chip); 407 trig->subirqs[d->irq - trig->subirq_base].enabled = false; 408 } 409 410 static void iio_trig_subirqunmask(struct irq_data *d) 411 { 412 struct irq_chip *chip = irq_data_get_irq_chip(d); 413 struct iio_trigger *trig 414 = container_of(chip, 415 struct iio_trigger, subirq_chip); 416 trig->subirqs[d->irq - trig->subirq_base].enabled = true; 417 } 418 419 static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs) 420 { 421 struct iio_trigger *trig; 422 trig = kzalloc(sizeof *trig, GFP_KERNEL); 423 if (trig) { 424 int i; 425 trig->dev.type = &iio_trig_type; 426 trig->dev.bus = &iio_bus_type; 427 device_initialize(&trig->dev); 428 429 mutex_init(&trig->pool_lock); 430 trig->subirq_base 431 = irq_alloc_descs(-1, 0, 432 CONFIG_IIO_CONSUMERS_PER_TRIGGER, 433 0); 434 if (trig->subirq_base < 0) { 435 kfree(trig); 436 return NULL; 437 } 438 439 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); 440 if (trig->name == NULL) { 441 irq_free_descs(trig->subirq_base, 442 CONFIG_IIO_CONSUMERS_PER_TRIGGER); 443 kfree(trig); 444 return NULL; 445 } 446 trig->subirq_chip.name = trig->name; 447 trig->subirq_chip.irq_mask = &iio_trig_subirqmask; 448 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; 449 for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { 450 irq_set_chip(trig->subirq_base + i, 451 &trig->subirq_chip); 452 irq_set_handler(trig->subirq_base + i, 453 &handle_simple_irq); 454 irq_modify_status(trig->subirq_base + i, 455 IRQ_NOREQUEST | IRQ_NOAUTOEN, 456 IRQ_NOPROBE); 457 } 458 get_device(&trig->dev); 459 } 460 461 return trig; 462 } 463 464 struct iio_trigger *iio_trigger_alloc(const char *fmt, ...) 465 { 466 struct iio_trigger *trig; 467 va_list vargs; 468 469 va_start(vargs, fmt); 470 trig = viio_trigger_alloc(fmt, vargs); 471 va_end(vargs); 472 473 return trig; 474 } 475 EXPORT_SYMBOL(iio_trigger_alloc); 476 477 void iio_trigger_free(struct iio_trigger *trig) 478 { 479 if (trig) 480 put_device(&trig->dev); 481 } 482 EXPORT_SYMBOL(iio_trigger_free); 483 484 static void devm_iio_trigger_release(struct device *dev, void *res) 485 { 486 iio_trigger_free(*(struct iio_trigger **)res); 487 } 488 489 static int devm_iio_trigger_match(struct device *dev, void *res, void *data) 490 { 491 struct iio_trigger **r = res; 492 493 if (!r || !*r) { 494 WARN_ON(!r || !*r); 495 return 0; 496 } 497 498 return *r == data; 499 } 500 501 /** 502 * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc() 503 * @dev: Device to allocate iio_trigger for 504 * @fmt: trigger name format. If it includes format 505 * specifiers, the additional arguments following 506 * format are formatted and inserted in the resulting 507 * string replacing their respective specifiers. 508 * 509 * Managed iio_trigger_alloc. iio_trigger allocated with this function is 510 * automatically freed on driver detach. 511 * 512 * If an iio_trigger allocated with this function needs to be freed separately, 513 * devm_iio_trigger_free() must be used. 514 * 515 * RETURNS: 516 * Pointer to allocated iio_trigger on success, NULL on failure. 517 */ 518 struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, 519 const char *fmt, ...) 520 { 521 struct iio_trigger **ptr, *trig; 522 va_list vargs; 523 524 ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr), 525 GFP_KERNEL); 526 if (!ptr) 527 return NULL; 528 529 /* use raw alloc_dr for kmalloc caller tracing */ 530 va_start(vargs, fmt); 531 trig = viio_trigger_alloc(fmt, vargs); 532 va_end(vargs); 533 if (trig) { 534 *ptr = trig; 535 devres_add(dev, ptr); 536 } else { 537 devres_free(ptr); 538 } 539 540 return trig; 541 } 542 EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc); 543 544 /** 545 * devm_iio_trigger_free - Resource-managed iio_trigger_free() 546 * @dev: Device this iio_dev belongs to 547 * @iio_trig: the iio_trigger associated with the device 548 * 549 * Free iio_trigger allocated with devm_iio_trigger_alloc(). 550 */ 551 void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig) 552 { 553 int rc; 554 555 rc = devres_release(dev, devm_iio_trigger_release, 556 devm_iio_trigger_match, iio_trig); 557 WARN_ON(rc); 558 } 559 EXPORT_SYMBOL_GPL(devm_iio_trigger_free); 560 561 void iio_device_register_trigger_consumer(struct iio_dev *indio_dev) 562 { 563 indio_dev->groups[indio_dev->groupcounter++] = 564 &iio_trigger_consumer_attr_group; 565 } 566 567 void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev) 568 { 569 /* Clean up an associated but not attached trigger reference */ 570 if (indio_dev->trig) 571 iio_trigger_put(indio_dev->trig); 572 } 573 574 int iio_triggered_buffer_postenable(struct iio_dev *indio_dev) 575 { 576 return iio_trigger_attach_poll_func(indio_dev->trig, 577 indio_dev->pollfunc); 578 } 579 EXPORT_SYMBOL(iio_triggered_buffer_postenable); 580 581 int iio_triggered_buffer_predisable(struct iio_dev *indio_dev) 582 { 583 return iio_trigger_detach_poll_func(indio_dev->trig, 584 indio_dev->pollfunc); 585 } 586 EXPORT_SYMBOL(iio_triggered_buffer_predisable); 587