1 // SPDX-License-Identifier: GPL-2.0 2 // rc-ir-raw.c - handle IR pulse/space events 3 // 4 // Copyright (C) 2010 by Mauro Carvalho Chehab 5 6 #include <linux/export.h> 7 #include <linux/kthread.h> 8 #include <linux/mutex.h> 9 #include <linux/kmod.h> 10 #include <linux/sched.h> 11 #include "rc-core-priv.h" 12 13 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */ 14 static LIST_HEAD(ir_raw_client_list); 15 16 /* Used to handle IR raw handler extensions */ 17 static DEFINE_MUTEX(ir_raw_handler_lock); 18 static LIST_HEAD(ir_raw_handler_list); 19 static atomic64_t available_protocols = ATOMIC64_INIT(0); 20 21 static int ir_raw_event_thread(void *data) 22 { 23 struct ir_raw_event ev; 24 struct ir_raw_handler *handler; 25 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data; 26 27 while (1) { 28 mutex_lock(&ir_raw_handler_lock); 29 while (kfifo_out(&raw->kfifo, &ev, 1)) { 30 list_for_each_entry(handler, &ir_raw_handler_list, list) 31 if (raw->dev->enabled_protocols & 32 handler->protocols || !handler->protocols) 33 handler->decode(raw->dev, ev); 34 ir_lirc_raw_event(raw->dev, ev); 35 raw->prev_ev = ev; 36 } 37 mutex_unlock(&ir_raw_handler_lock); 38 39 set_current_state(TASK_INTERRUPTIBLE); 40 41 if (kthread_should_stop()) { 42 __set_current_state(TASK_RUNNING); 43 break; 44 } else if (!kfifo_is_empty(&raw->kfifo)) 45 set_current_state(TASK_RUNNING); 46 47 schedule(); 48 } 49 50 return 0; 51 } 52 53 /** 54 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders 55 * @dev: the struct rc_dev device descriptor 56 * @ev: the struct ir_raw_event descriptor of the pulse/space 57 * 58 * This routine (which may be called from an interrupt context) stores a 59 * pulse/space duration for the raw ir decoding state machines. Pulses are 60 * signalled as positive values and spaces as negative values. A zero value 61 * will reset the decoding state machines. 62 */ 63 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev) 64 { 65 if (!dev->raw) 66 return -EINVAL; 67 68 dev_dbg(&dev->dev, "sample: (%05dus %s)\n", 69 TO_US(ev->duration), TO_STR(ev->pulse)); 70 71 if (!kfifo_put(&dev->raw->kfifo, *ev)) { 72 dev_err(&dev->dev, "IR event FIFO is full!\n"); 73 return -ENOSPC; 74 } 75 76 return 0; 77 } 78 EXPORT_SYMBOL_GPL(ir_raw_event_store); 79 80 /** 81 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space 82 * @dev: the struct rc_dev device descriptor 83 * @pulse: true for pulse, false for space 84 * 85 * This routine (which may be called from an interrupt context) is used to 86 * store the beginning of an ir pulse or space (or the start/end of ir 87 * reception) for the raw ir decoding state machines. This is used by 88 * hardware which does not provide durations directly but only interrupts 89 * (or similar events) on state change. 90 */ 91 int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse) 92 { 93 ktime_t now; 94 DEFINE_IR_RAW_EVENT(ev); 95 96 if (!dev->raw) 97 return -EINVAL; 98 99 now = ktime_get(); 100 ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event)); 101 ev.pulse = !pulse; 102 103 return ir_raw_event_store_with_timeout(dev, &ev); 104 } 105 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); 106 107 /* 108 * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw 109 * ir decoders, schedule decoding and 110 * timeout 111 * @dev: the struct rc_dev device descriptor 112 * @ev: the struct ir_raw_event descriptor of the pulse/space 113 * 114 * This routine (which may be called from an interrupt context) stores a 115 * pulse/space duration for the raw ir decoding state machines, schedules 116 * decoding and generates a timeout. 117 */ 118 int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev) 119 { 120 ktime_t now; 121 int rc = 0; 122 123 if (!dev->raw) 124 return -EINVAL; 125 126 now = ktime_get(); 127 128 spin_lock(&dev->raw->edge_spinlock); 129 rc = ir_raw_event_store(dev, ev); 130 131 dev->raw->last_event = now; 132 133 /* timer could be set to timeout (125ms by default) */ 134 if (!timer_pending(&dev->raw->edge_handle) || 135 time_after(dev->raw->edge_handle.expires, 136 jiffies + msecs_to_jiffies(15))) { 137 mod_timer(&dev->raw->edge_handle, 138 jiffies + msecs_to_jiffies(15)); 139 } 140 spin_unlock(&dev->raw->edge_spinlock); 141 142 return rc; 143 } 144 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout); 145 146 /** 147 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing 148 * @dev: the struct rc_dev device descriptor 149 * @ev: the event that has occurred 150 * 151 * This routine (which may be called from an interrupt context) works 152 * in similar manner to ir_raw_event_store_edge. 153 * This routine is intended for devices with limited internal buffer 154 * It automerges samples of same type, and handles timeouts. Returns non-zero 155 * if the event was added, and zero if the event was ignored due to idle 156 * processing. 157 */ 158 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev) 159 { 160 if (!dev->raw) 161 return -EINVAL; 162 163 /* Ignore spaces in idle mode */ 164 if (dev->idle && !ev->pulse) 165 return 0; 166 else if (dev->idle) 167 ir_raw_event_set_idle(dev, false); 168 169 if (!dev->raw->this_ev.duration) 170 dev->raw->this_ev = *ev; 171 else if (ev->pulse == dev->raw->this_ev.pulse) 172 dev->raw->this_ev.duration += ev->duration; 173 else { 174 ir_raw_event_store(dev, &dev->raw->this_ev); 175 dev->raw->this_ev = *ev; 176 } 177 178 /* Enter idle mode if nessesary */ 179 if (!ev->pulse && dev->timeout && 180 dev->raw->this_ev.duration >= dev->timeout) 181 ir_raw_event_set_idle(dev, true); 182 183 return 1; 184 } 185 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter); 186 187 /** 188 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not 189 * @dev: the struct rc_dev device descriptor 190 * @idle: whether the device is idle or not 191 */ 192 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle) 193 { 194 if (!dev->raw) 195 return; 196 197 dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave"); 198 199 if (idle) { 200 dev->raw->this_ev.timeout = true; 201 ir_raw_event_store(dev, &dev->raw->this_ev); 202 init_ir_raw_event(&dev->raw->this_ev); 203 } 204 205 if (dev->s_idle) 206 dev->s_idle(dev, idle); 207 208 dev->idle = idle; 209 } 210 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); 211 212 /** 213 * ir_raw_event_handle() - schedules the decoding of stored ir data 214 * @dev: the struct rc_dev device descriptor 215 * 216 * This routine will tell rc-core to start decoding stored ir data. 217 */ 218 void ir_raw_event_handle(struct rc_dev *dev) 219 { 220 if (!dev->raw || !dev->raw->thread) 221 return; 222 223 wake_up_process(dev->raw->thread); 224 } 225 EXPORT_SYMBOL_GPL(ir_raw_event_handle); 226 227 /* used internally by the sysfs interface */ 228 u64 229 ir_raw_get_allowed_protocols(void) 230 { 231 return atomic64_read(&available_protocols); 232 } 233 234 static int change_protocol(struct rc_dev *dev, u64 *rc_proto) 235 { 236 /* the caller will update dev->enabled_protocols */ 237 return 0; 238 } 239 240 static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols) 241 { 242 mutex_lock(&dev->lock); 243 dev->enabled_protocols &= ~protocols; 244 mutex_unlock(&dev->lock); 245 } 246 247 /** 248 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation. 249 * @ev: Pointer to pointer to next free event. *@ev is incremented for 250 * each raw event filled. 251 * @max: Maximum number of raw events to fill. 252 * @timings: Manchester modulation timings. 253 * @n: Number of bits of data. 254 * @data: Data bits to encode. 255 * 256 * Encodes the @n least significant bits of @data using Manchester (bi-phase) 257 * modulation with the timing characteristics described by @timings, writing up 258 * to @max raw IR events using the *@ev pointer. 259 * 260 * Returns: 0 on success. 261 * -ENOBUFS if there isn't enough space in the array to fit the 262 * full encoded data. In this case all @max events will have been 263 * written. 264 */ 265 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, 266 const struct ir_raw_timings_manchester *timings, 267 unsigned int n, u64 data) 268 { 269 bool need_pulse; 270 u64 i; 271 int ret = -ENOBUFS; 272 273 i = BIT_ULL(n - 1); 274 275 if (timings->leader_pulse) { 276 if (!max--) 277 return ret; 278 init_ir_raw_event_duration((*ev), 1, timings->leader_pulse); 279 if (timings->leader_space) { 280 if (!max--) 281 return ret; 282 init_ir_raw_event_duration(++(*ev), 0, 283 timings->leader_space); 284 } 285 } else { 286 /* continue existing signal */ 287 --(*ev); 288 } 289 /* from here on *ev will point to the last event rather than the next */ 290 291 while (n && i > 0) { 292 need_pulse = !(data & i); 293 if (timings->invert) 294 need_pulse = !need_pulse; 295 if (need_pulse == !!(*ev)->pulse) { 296 (*ev)->duration += timings->clock; 297 } else { 298 if (!max--) 299 goto nobufs; 300 init_ir_raw_event_duration(++(*ev), need_pulse, 301 timings->clock); 302 } 303 304 if (!max--) 305 goto nobufs; 306 init_ir_raw_event_duration(++(*ev), !need_pulse, 307 timings->clock); 308 i >>= 1; 309 } 310 311 if (timings->trailer_space) { 312 if (!(*ev)->pulse) 313 (*ev)->duration += timings->trailer_space; 314 else if (!max--) 315 goto nobufs; 316 else 317 init_ir_raw_event_duration(++(*ev), 0, 318 timings->trailer_space); 319 } 320 321 ret = 0; 322 nobufs: 323 /* point to the next event rather than last event before returning */ 324 ++(*ev); 325 return ret; 326 } 327 EXPORT_SYMBOL(ir_raw_gen_manchester); 328 329 /** 330 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation. 331 * @ev: Pointer to pointer to next free event. *@ev is incremented for 332 * each raw event filled. 333 * @max: Maximum number of raw events to fill. 334 * @timings: Pulse distance modulation timings. 335 * @n: Number of bits of data. 336 * @data: Data bits to encode. 337 * 338 * Encodes the @n least significant bits of @data using pulse-distance 339 * modulation with the timing characteristics described by @timings, writing up 340 * to @max raw IR events using the *@ev pointer. 341 * 342 * Returns: 0 on success. 343 * -ENOBUFS if there isn't enough space in the array to fit the 344 * full encoded data. In this case all @max events will have been 345 * written. 346 */ 347 int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max, 348 const struct ir_raw_timings_pd *timings, 349 unsigned int n, u64 data) 350 { 351 int i; 352 int ret; 353 unsigned int space; 354 355 if (timings->header_pulse) { 356 ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse, 357 timings->header_space); 358 if (ret) 359 return ret; 360 } 361 362 if (timings->msb_first) { 363 for (i = n - 1; i >= 0; --i) { 364 space = timings->bit_space[(data >> i) & 1]; 365 ret = ir_raw_gen_pulse_space(ev, &max, 366 timings->bit_pulse, 367 space); 368 if (ret) 369 return ret; 370 } 371 } else { 372 for (i = 0; i < n; ++i, data >>= 1) { 373 space = timings->bit_space[data & 1]; 374 ret = ir_raw_gen_pulse_space(ev, &max, 375 timings->bit_pulse, 376 space); 377 if (ret) 378 return ret; 379 } 380 } 381 382 ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse, 383 timings->trailer_space); 384 return ret; 385 } 386 EXPORT_SYMBOL(ir_raw_gen_pd); 387 388 /** 389 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation. 390 * @ev: Pointer to pointer to next free event. *@ev is incremented for 391 * each raw event filled. 392 * @max: Maximum number of raw events to fill. 393 * @timings: Pulse distance modulation timings. 394 * @n: Number of bits of data. 395 * @data: Data bits to encode. 396 * 397 * Encodes the @n least significant bits of @data using space-distance 398 * modulation with the timing characteristics described by @timings, writing up 399 * to @max raw IR events using the *@ev pointer. 400 * 401 * Returns: 0 on success. 402 * -ENOBUFS if there isn't enough space in the array to fit the 403 * full encoded data. In this case all @max events will have been 404 * written. 405 */ 406 int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max, 407 const struct ir_raw_timings_pl *timings, 408 unsigned int n, u64 data) 409 { 410 int i; 411 int ret = -ENOBUFS; 412 unsigned int pulse; 413 414 if (!max--) 415 return ret; 416 417 init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse); 418 419 if (timings->msb_first) { 420 for (i = n - 1; i >= 0; --i) { 421 if (!max--) 422 return ret; 423 init_ir_raw_event_duration((*ev)++, 0, 424 timings->bit_space); 425 if (!max--) 426 return ret; 427 pulse = timings->bit_pulse[(data >> i) & 1]; 428 init_ir_raw_event_duration((*ev)++, 1, pulse); 429 } 430 } else { 431 for (i = 0; i < n; ++i, data >>= 1) { 432 if (!max--) 433 return ret; 434 init_ir_raw_event_duration((*ev)++, 0, 435 timings->bit_space); 436 if (!max--) 437 return ret; 438 pulse = timings->bit_pulse[data & 1]; 439 init_ir_raw_event_duration((*ev)++, 1, pulse); 440 } 441 } 442 443 if (!max--) 444 return ret; 445 446 init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space); 447 448 return 0; 449 } 450 EXPORT_SYMBOL(ir_raw_gen_pl); 451 452 /** 453 * ir_raw_encode_scancode() - Encode a scancode as raw events 454 * 455 * @protocol: protocol 456 * @scancode: scancode filter describing a single scancode 457 * @events: array of raw events to write into 458 * @max: max number of raw events 459 * 460 * Attempts to encode the scancode as raw events. 461 * 462 * Returns: The number of events written. 463 * -ENOBUFS if there isn't enough space in the array to fit the 464 * encoding. In this case all @max events will have been written. 465 * -EINVAL if the scancode is ambiguous or invalid, or if no 466 * compatible encoder was found. 467 */ 468 int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, 469 struct ir_raw_event *events, unsigned int max) 470 { 471 struct ir_raw_handler *handler; 472 int ret = -EINVAL; 473 u64 mask = 1ULL << protocol; 474 475 ir_raw_load_modules(&mask); 476 477 mutex_lock(&ir_raw_handler_lock); 478 list_for_each_entry(handler, &ir_raw_handler_list, list) { 479 if (handler->protocols & mask && handler->encode) { 480 ret = handler->encode(protocol, scancode, events, max); 481 if (ret >= 0 || ret == -ENOBUFS) 482 break; 483 } 484 } 485 mutex_unlock(&ir_raw_handler_lock); 486 487 return ret; 488 } 489 EXPORT_SYMBOL(ir_raw_encode_scancode); 490 491 /** 492 * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing 493 * 494 * @t: timer_list 495 * 496 * This callback is armed by ir_raw_event_store_edge(). It does two things: 497 * first of all, rather than calling ir_raw_event_handle() for each 498 * edge and waking up the rc thread, 15 ms after the first edge 499 * ir_raw_event_handle() is called. Secondly, generate a timeout event 500 * no more IR is received after the rc_dev timeout. 501 */ 502 static void ir_raw_edge_handle(struct timer_list *t) 503 { 504 struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle); 505 struct rc_dev *dev = raw->dev; 506 unsigned long flags; 507 ktime_t interval; 508 509 spin_lock_irqsave(&dev->raw->edge_spinlock, flags); 510 interval = ktime_sub(ktime_get(), dev->raw->last_event); 511 if (ktime_to_ns(interval) >= dev->timeout) { 512 DEFINE_IR_RAW_EVENT(ev); 513 514 ev.timeout = true; 515 ev.duration = ktime_to_ns(interval); 516 517 ir_raw_event_store(dev, &ev); 518 } else { 519 mod_timer(&dev->raw->edge_handle, 520 jiffies + nsecs_to_jiffies(dev->timeout - 521 ktime_to_ns(interval))); 522 } 523 spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags); 524 525 ir_raw_event_handle(dev); 526 } 527 528 /** 529 * ir_raw_encode_carrier() - Get carrier used for protocol 530 * 531 * @protocol: protocol 532 * 533 * Attempts to find the carrier for the specified protocol 534 * 535 * Returns: The carrier in Hz 536 * -EINVAL if the protocol is invalid, or if no 537 * compatible encoder was found. 538 */ 539 int ir_raw_encode_carrier(enum rc_proto protocol) 540 { 541 struct ir_raw_handler *handler; 542 int ret = -EINVAL; 543 u64 mask = BIT_ULL(protocol); 544 545 mutex_lock(&ir_raw_handler_lock); 546 list_for_each_entry(handler, &ir_raw_handler_list, list) { 547 if (handler->protocols & mask && handler->encode) { 548 ret = handler->carrier; 549 break; 550 } 551 } 552 mutex_unlock(&ir_raw_handler_lock); 553 554 return ret; 555 } 556 EXPORT_SYMBOL(ir_raw_encode_carrier); 557 558 /* 559 * Used to (un)register raw event clients 560 */ 561 int ir_raw_event_prepare(struct rc_dev *dev) 562 { 563 if (!dev) 564 return -EINVAL; 565 566 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL); 567 if (!dev->raw) 568 return -ENOMEM; 569 570 dev->raw->dev = dev; 571 dev->change_protocol = change_protocol; 572 spin_lock_init(&dev->raw->edge_spinlock); 573 timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0); 574 INIT_KFIFO(dev->raw->kfifo); 575 576 return 0; 577 } 578 579 int ir_raw_event_register(struct rc_dev *dev) 580 { 581 struct ir_raw_handler *handler; 582 struct task_struct *thread; 583 584 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor); 585 if (IS_ERR(thread)) 586 return PTR_ERR(thread); 587 588 dev->raw->thread = thread; 589 590 mutex_lock(&ir_raw_handler_lock); 591 list_add_tail(&dev->raw->list, &ir_raw_client_list); 592 list_for_each_entry(handler, &ir_raw_handler_list, list) 593 if (handler->raw_register) 594 handler->raw_register(dev); 595 mutex_unlock(&ir_raw_handler_lock); 596 597 return 0; 598 } 599 600 void ir_raw_event_free(struct rc_dev *dev) 601 { 602 if (!dev) 603 return; 604 605 kfree(dev->raw); 606 dev->raw = NULL; 607 } 608 609 void ir_raw_event_unregister(struct rc_dev *dev) 610 { 611 struct ir_raw_handler *handler; 612 613 if (!dev || !dev->raw) 614 return; 615 616 kthread_stop(dev->raw->thread); 617 del_timer_sync(&dev->raw->edge_handle); 618 619 mutex_lock(&ir_raw_handler_lock); 620 list_del(&dev->raw->list); 621 list_for_each_entry(handler, &ir_raw_handler_list, list) 622 if (handler->raw_unregister) 623 handler->raw_unregister(dev); 624 mutex_unlock(&ir_raw_handler_lock); 625 626 ir_raw_event_free(dev); 627 } 628 629 /* 630 * Extension interface - used to register the IR decoders 631 */ 632 633 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) 634 { 635 struct ir_raw_event_ctrl *raw; 636 637 mutex_lock(&ir_raw_handler_lock); 638 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); 639 if (ir_raw_handler->raw_register) 640 list_for_each_entry(raw, &ir_raw_client_list, list) 641 ir_raw_handler->raw_register(raw->dev); 642 atomic64_or(ir_raw_handler->protocols, &available_protocols); 643 mutex_unlock(&ir_raw_handler_lock); 644 645 return 0; 646 } 647 EXPORT_SYMBOL(ir_raw_handler_register); 648 649 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) 650 { 651 struct ir_raw_event_ctrl *raw; 652 u64 protocols = ir_raw_handler->protocols; 653 654 mutex_lock(&ir_raw_handler_lock); 655 list_del(&ir_raw_handler->list); 656 list_for_each_entry(raw, &ir_raw_client_list, list) { 657 ir_raw_disable_protocols(raw->dev, protocols); 658 if (ir_raw_handler->raw_unregister) 659 ir_raw_handler->raw_unregister(raw->dev); 660 } 661 atomic64_andnot(protocols, &available_protocols); 662 mutex_unlock(&ir_raw_handler_lock); 663 } 664 EXPORT_SYMBOL(ir_raw_handler_unregister); 665