xref: /openbmc/linux/drivers/media/rc/rc-ir-raw.c (revision 176f011b)
1 // SPDX-License-Identifier: GPL-2.0
2 // rc-ir-raw.c - handle IR pulse/space events
3 //
4 // Copyright (C) 2010 by Mauro Carvalho Chehab
5 
6 #include <linux/export.h>
7 #include <linux/kthread.h>
8 #include <linux/mutex.h>
9 #include <linux/kmod.h>
10 #include <linux/sched.h>
11 #include "rc-core-priv.h"
12 
13 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
14 static LIST_HEAD(ir_raw_client_list);
15 
16 /* Used to handle IR raw handler extensions */
17 DEFINE_MUTEX(ir_raw_handler_lock);
18 static LIST_HEAD(ir_raw_handler_list);
19 static atomic64_t available_protocols = ATOMIC64_INIT(0);
20 
21 static int ir_raw_event_thread(void *data)
22 {
23 	struct ir_raw_event ev;
24 	struct ir_raw_handler *handler;
25 	struct ir_raw_event_ctrl *raw = data;
26 	struct rc_dev *dev = raw->dev;
27 
28 	while (1) {
29 		mutex_lock(&ir_raw_handler_lock);
30 		while (kfifo_out(&raw->kfifo, &ev, 1)) {
31 			if (is_timing_event(ev)) {
32 				if (ev.duration == 0)
33 					dev_warn_once(&dev->dev, "nonsensical timing event of duration 0");
34 				if (is_timing_event(raw->prev_ev) &&
35 				    !is_transition(&ev, &raw->prev_ev))
36 					dev_warn_once(&dev->dev, "two consecutive events of type %s",
37 						      TO_STR(ev.pulse));
38 				if (raw->prev_ev.reset && ev.pulse == 0)
39 					dev_warn_once(&dev->dev, "timing event after reset should be pulse");
40 			}
41 			list_for_each_entry(handler, &ir_raw_handler_list, list)
42 				if (dev->enabled_protocols &
43 				    handler->protocols || !handler->protocols)
44 					handler->decode(dev, ev);
45 			ir_lirc_raw_event(dev, ev);
46 			raw->prev_ev = ev;
47 		}
48 		mutex_unlock(&ir_raw_handler_lock);
49 
50 		set_current_state(TASK_INTERRUPTIBLE);
51 
52 		if (kthread_should_stop()) {
53 			__set_current_state(TASK_RUNNING);
54 			break;
55 		} else if (!kfifo_is_empty(&raw->kfifo))
56 			set_current_state(TASK_RUNNING);
57 
58 		schedule();
59 	}
60 
61 	return 0;
62 }
63 
64 /**
65  * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
66  * @dev:	the struct rc_dev device descriptor
67  * @ev:		the struct ir_raw_event descriptor of the pulse/space
68  *
69  * This routine (which may be called from an interrupt context) stores a
70  * pulse/space duration for the raw ir decoding state machines. Pulses are
71  * signalled as positive values and spaces as negative values. A zero value
72  * will reset the decoding state machines.
73  */
74 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
75 {
76 	if (!dev->raw)
77 		return -EINVAL;
78 
79 	dev_dbg(&dev->dev, "sample: (%05dus %s)\n",
80 		TO_US(ev->duration), TO_STR(ev->pulse));
81 
82 	if (!kfifo_put(&dev->raw->kfifo, *ev)) {
83 		dev_err(&dev->dev, "IR event FIFO is full!\n");
84 		return -ENOSPC;
85 	}
86 
87 	return 0;
88 }
89 EXPORT_SYMBOL_GPL(ir_raw_event_store);
90 
91 /**
92  * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
93  * @dev:	the struct rc_dev device descriptor
94  * @pulse:	true for pulse, false for space
95  *
96  * This routine (which may be called from an interrupt context) is used to
97  * store the beginning of an ir pulse or space (or the start/end of ir
98  * reception) for the raw ir decoding state machines. This is used by
99  * hardware which does not provide durations directly but only interrupts
100  * (or similar events) on state change.
101  */
102 int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse)
103 {
104 	ktime_t			now;
105 	struct ir_raw_event	ev = {};
106 
107 	if (!dev->raw)
108 		return -EINVAL;
109 
110 	now = ktime_get();
111 	ev.duration = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
112 	ev.pulse = !pulse;
113 
114 	return ir_raw_event_store_with_timeout(dev, &ev);
115 }
116 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
117 
118 /*
119  * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw
120  *				       ir decoders, schedule decoding and
121  *				       timeout
122  * @dev:	the struct rc_dev device descriptor
123  * @ev:		the struct ir_raw_event descriptor of the pulse/space
124  *
125  * This routine (which may be called from an interrupt context) stores a
126  * pulse/space duration for the raw ir decoding state machines, schedules
127  * decoding and generates a timeout.
128  */
129 int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev)
130 {
131 	ktime_t		now;
132 	int		rc = 0;
133 
134 	if (!dev->raw)
135 		return -EINVAL;
136 
137 	now = ktime_get();
138 
139 	spin_lock(&dev->raw->edge_spinlock);
140 	rc = ir_raw_event_store(dev, ev);
141 
142 	dev->raw->last_event = now;
143 
144 	/* timer could be set to timeout (125ms by default) */
145 	if (!timer_pending(&dev->raw->edge_handle) ||
146 	    time_after(dev->raw->edge_handle.expires,
147 		       jiffies + msecs_to_jiffies(15))) {
148 		mod_timer(&dev->raw->edge_handle,
149 			  jiffies + msecs_to_jiffies(15));
150 	}
151 	spin_unlock(&dev->raw->edge_spinlock);
152 
153 	return rc;
154 }
155 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout);
156 
157 /**
158  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
159  * @dev:	the struct rc_dev device descriptor
160  * @ev:		the event that has occurred
161  *
162  * This routine (which may be called from an interrupt context) works
163  * in similar manner to ir_raw_event_store_edge.
164  * This routine is intended for devices with limited internal buffer
165  * It automerges samples of same type, and handles timeouts. Returns non-zero
166  * if the event was added, and zero if the event was ignored due to idle
167  * processing.
168  */
169 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
170 {
171 	if (!dev->raw)
172 		return -EINVAL;
173 
174 	/* Ignore spaces in idle mode */
175 	if (dev->idle && !ev->pulse)
176 		return 0;
177 	else if (dev->idle)
178 		ir_raw_event_set_idle(dev, false);
179 
180 	if (!dev->raw->this_ev.duration)
181 		dev->raw->this_ev = *ev;
182 	else if (ev->pulse == dev->raw->this_ev.pulse)
183 		dev->raw->this_ev.duration += ev->duration;
184 	else {
185 		ir_raw_event_store(dev, &dev->raw->this_ev);
186 		dev->raw->this_ev = *ev;
187 	}
188 
189 	/* Enter idle mode if nessesary */
190 	if (!ev->pulse && dev->timeout &&
191 	    dev->raw->this_ev.duration >= dev->timeout)
192 		ir_raw_event_set_idle(dev, true);
193 
194 	return 1;
195 }
196 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
197 
198 /**
199  * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
200  * @dev:	the struct rc_dev device descriptor
201  * @idle:	whether the device is idle or not
202  */
203 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
204 {
205 	if (!dev->raw)
206 		return;
207 
208 	dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave");
209 
210 	if (idle) {
211 		dev->raw->this_ev.timeout = true;
212 		ir_raw_event_store(dev, &dev->raw->this_ev);
213 		dev->raw->this_ev = (struct ir_raw_event) {};
214 	}
215 
216 	if (dev->s_idle)
217 		dev->s_idle(dev, idle);
218 
219 	dev->idle = idle;
220 }
221 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
222 
223 /**
224  * ir_raw_event_handle() - schedules the decoding of stored ir data
225  * @dev:	the struct rc_dev device descriptor
226  *
227  * This routine will tell rc-core to start decoding stored ir data.
228  */
229 void ir_raw_event_handle(struct rc_dev *dev)
230 {
231 	if (!dev->raw || !dev->raw->thread)
232 		return;
233 
234 	wake_up_process(dev->raw->thread);
235 }
236 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
237 
238 /* used internally by the sysfs interface */
239 u64
240 ir_raw_get_allowed_protocols(void)
241 {
242 	return atomic64_read(&available_protocols);
243 }
244 
245 static int change_protocol(struct rc_dev *dev, u64 *rc_proto)
246 {
247 	struct ir_raw_handler *handler;
248 	u32 timeout = 0;
249 
250 	mutex_lock(&ir_raw_handler_lock);
251 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
252 		if (!(dev->enabled_protocols & handler->protocols) &&
253 		    (*rc_proto & handler->protocols) && handler->raw_register)
254 			handler->raw_register(dev);
255 
256 		if ((dev->enabled_protocols & handler->protocols) &&
257 		    !(*rc_proto & handler->protocols) &&
258 		    handler->raw_unregister)
259 			handler->raw_unregister(dev);
260 	}
261 	mutex_unlock(&ir_raw_handler_lock);
262 
263 	if (!dev->max_timeout)
264 		return 0;
265 
266 	mutex_lock(&ir_raw_handler_lock);
267 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
268 		if (handler->protocols & *rc_proto) {
269 			if (timeout < handler->min_timeout)
270 				timeout = handler->min_timeout;
271 		}
272 	}
273 	mutex_unlock(&ir_raw_handler_lock);
274 
275 	if (timeout == 0)
276 		timeout = IR_DEFAULT_TIMEOUT;
277 	else
278 		timeout += MS_TO_NS(10);
279 
280 	if (timeout < dev->min_timeout)
281 		timeout = dev->min_timeout;
282 	else if (timeout > dev->max_timeout)
283 		timeout = dev->max_timeout;
284 
285 	if (dev->s_timeout)
286 		dev->s_timeout(dev, timeout);
287 	else
288 		dev->timeout = timeout;
289 
290 	return 0;
291 }
292 
293 static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
294 {
295 	mutex_lock(&dev->lock);
296 	dev->enabled_protocols &= ~protocols;
297 	mutex_unlock(&dev->lock);
298 }
299 
300 /**
301  * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
302  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
303  *		each raw event filled.
304  * @max:	Maximum number of raw events to fill.
305  * @timings:	Manchester modulation timings.
306  * @n:		Number of bits of data.
307  * @data:	Data bits to encode.
308  *
309  * Encodes the @n least significant bits of @data using Manchester (bi-phase)
310  * modulation with the timing characteristics described by @timings, writing up
311  * to @max raw IR events using the *@ev pointer.
312  *
313  * Returns:	0 on success.
314  *		-ENOBUFS if there isn't enough space in the array to fit the
315  *		full encoded data. In this case all @max events will have been
316  *		written.
317  */
318 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
319 			  const struct ir_raw_timings_manchester *timings,
320 			  unsigned int n, u64 data)
321 {
322 	bool need_pulse;
323 	u64 i;
324 	int ret = -ENOBUFS;
325 
326 	i = BIT_ULL(n - 1);
327 
328 	if (timings->leader_pulse) {
329 		if (!max--)
330 			return ret;
331 		init_ir_raw_event_duration((*ev), 1, timings->leader_pulse);
332 		if (timings->leader_space) {
333 			if (!max--)
334 				return ret;
335 			init_ir_raw_event_duration(++(*ev), 0,
336 						   timings->leader_space);
337 		}
338 	} else {
339 		/* continue existing signal */
340 		--(*ev);
341 	}
342 	/* from here on *ev will point to the last event rather than the next */
343 
344 	while (n && i > 0) {
345 		need_pulse = !(data & i);
346 		if (timings->invert)
347 			need_pulse = !need_pulse;
348 		if (need_pulse == !!(*ev)->pulse) {
349 			(*ev)->duration += timings->clock;
350 		} else {
351 			if (!max--)
352 				goto nobufs;
353 			init_ir_raw_event_duration(++(*ev), need_pulse,
354 						   timings->clock);
355 		}
356 
357 		if (!max--)
358 			goto nobufs;
359 		init_ir_raw_event_duration(++(*ev), !need_pulse,
360 					   timings->clock);
361 		i >>= 1;
362 	}
363 
364 	if (timings->trailer_space) {
365 		if (!(*ev)->pulse)
366 			(*ev)->duration += timings->trailer_space;
367 		else if (!max--)
368 			goto nobufs;
369 		else
370 			init_ir_raw_event_duration(++(*ev), 0,
371 						   timings->trailer_space);
372 	}
373 
374 	ret = 0;
375 nobufs:
376 	/* point to the next event rather than last event before returning */
377 	++(*ev);
378 	return ret;
379 }
380 EXPORT_SYMBOL(ir_raw_gen_manchester);
381 
382 /**
383  * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
384  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
385  *		each raw event filled.
386  * @max:	Maximum number of raw events to fill.
387  * @timings:	Pulse distance modulation timings.
388  * @n:		Number of bits of data.
389  * @data:	Data bits to encode.
390  *
391  * Encodes the @n least significant bits of @data using pulse-distance
392  * modulation with the timing characteristics described by @timings, writing up
393  * to @max raw IR events using the *@ev pointer.
394  *
395  * Returns:	0 on success.
396  *		-ENOBUFS if there isn't enough space in the array to fit the
397  *		full encoded data. In this case all @max events will have been
398  *		written.
399  */
400 int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
401 		  const struct ir_raw_timings_pd *timings,
402 		  unsigned int n, u64 data)
403 {
404 	int i;
405 	int ret;
406 	unsigned int space;
407 
408 	if (timings->header_pulse) {
409 		ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
410 					     timings->header_space);
411 		if (ret)
412 			return ret;
413 	}
414 
415 	if (timings->msb_first) {
416 		for (i = n - 1; i >= 0; --i) {
417 			space = timings->bit_space[(data >> i) & 1];
418 			ret = ir_raw_gen_pulse_space(ev, &max,
419 						     timings->bit_pulse,
420 						     space);
421 			if (ret)
422 				return ret;
423 		}
424 	} else {
425 		for (i = 0; i < n; ++i, data >>= 1) {
426 			space = timings->bit_space[data & 1];
427 			ret = ir_raw_gen_pulse_space(ev, &max,
428 						     timings->bit_pulse,
429 						     space);
430 			if (ret)
431 				return ret;
432 		}
433 	}
434 
435 	ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
436 				     timings->trailer_space);
437 	return ret;
438 }
439 EXPORT_SYMBOL(ir_raw_gen_pd);
440 
441 /**
442  * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
443  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
444  *		each raw event filled.
445  * @max:	Maximum number of raw events to fill.
446  * @timings:	Pulse distance modulation timings.
447  * @n:		Number of bits of data.
448  * @data:	Data bits to encode.
449  *
450  * Encodes the @n least significant bits of @data using space-distance
451  * modulation with the timing characteristics described by @timings, writing up
452  * to @max raw IR events using the *@ev pointer.
453  *
454  * Returns:	0 on success.
455  *		-ENOBUFS if there isn't enough space in the array to fit the
456  *		full encoded data. In this case all @max events will have been
457  *		written.
458  */
459 int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
460 		  const struct ir_raw_timings_pl *timings,
461 		  unsigned int n, u64 data)
462 {
463 	int i;
464 	int ret = -ENOBUFS;
465 	unsigned int pulse;
466 
467 	if (!max--)
468 		return ret;
469 
470 	init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
471 
472 	if (timings->msb_first) {
473 		for (i = n - 1; i >= 0; --i) {
474 			if (!max--)
475 				return ret;
476 			init_ir_raw_event_duration((*ev)++, 0,
477 						   timings->bit_space);
478 			if (!max--)
479 				return ret;
480 			pulse = timings->bit_pulse[(data >> i) & 1];
481 			init_ir_raw_event_duration((*ev)++, 1, pulse);
482 		}
483 	} else {
484 		for (i = 0; i < n; ++i, data >>= 1) {
485 			if (!max--)
486 				return ret;
487 			init_ir_raw_event_duration((*ev)++, 0,
488 						   timings->bit_space);
489 			if (!max--)
490 				return ret;
491 			pulse = timings->bit_pulse[data & 1];
492 			init_ir_raw_event_duration((*ev)++, 1, pulse);
493 		}
494 	}
495 
496 	if (!max--)
497 		return ret;
498 
499 	init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
500 
501 	return 0;
502 }
503 EXPORT_SYMBOL(ir_raw_gen_pl);
504 
505 /**
506  * ir_raw_encode_scancode() - Encode a scancode as raw events
507  *
508  * @protocol:		protocol
509  * @scancode:		scancode filter describing a single scancode
510  * @events:		array of raw events to write into
511  * @max:		max number of raw events
512  *
513  * Attempts to encode the scancode as raw events.
514  *
515  * Returns:	The number of events written.
516  *		-ENOBUFS if there isn't enough space in the array to fit the
517  *		encoding. In this case all @max events will have been written.
518  *		-EINVAL if the scancode is ambiguous or invalid, or if no
519  *		compatible encoder was found.
520  */
521 int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode,
522 			   struct ir_raw_event *events, unsigned int max)
523 {
524 	struct ir_raw_handler *handler;
525 	int ret = -EINVAL;
526 	u64 mask = 1ULL << protocol;
527 
528 	ir_raw_load_modules(&mask);
529 
530 	mutex_lock(&ir_raw_handler_lock);
531 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
532 		if (handler->protocols & mask && handler->encode) {
533 			ret = handler->encode(protocol, scancode, events, max);
534 			if (ret >= 0 || ret == -ENOBUFS)
535 				break;
536 		}
537 	}
538 	mutex_unlock(&ir_raw_handler_lock);
539 
540 	return ret;
541 }
542 EXPORT_SYMBOL(ir_raw_encode_scancode);
543 
544 /**
545  * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing
546  *
547  * @t:		timer_list
548  *
549  * This callback is armed by ir_raw_event_store_edge(). It does two things:
550  * first of all, rather than calling ir_raw_event_handle() for each
551  * edge and waking up the rc thread, 15 ms after the first edge
552  * ir_raw_event_handle() is called. Secondly, generate a timeout event
553  * no more IR is received after the rc_dev timeout.
554  */
555 static void ir_raw_edge_handle(struct timer_list *t)
556 {
557 	struct ir_raw_event_ctrl *raw = from_timer(raw, t, edge_handle);
558 	struct rc_dev *dev = raw->dev;
559 	unsigned long flags;
560 	ktime_t interval;
561 
562 	spin_lock_irqsave(&dev->raw->edge_spinlock, flags);
563 	interval = ktime_sub(ktime_get(), dev->raw->last_event);
564 	if (ktime_to_ns(interval) >= dev->timeout) {
565 		struct ir_raw_event ev = {
566 			.timeout = true,
567 			.duration = ktime_to_ns(interval)
568 		};
569 
570 		ir_raw_event_store(dev, &ev);
571 	} else {
572 		mod_timer(&dev->raw->edge_handle,
573 			  jiffies + nsecs_to_jiffies(dev->timeout -
574 						     ktime_to_ns(interval)));
575 	}
576 	spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags);
577 
578 	ir_raw_event_handle(dev);
579 }
580 
581 /**
582  * ir_raw_encode_carrier() - Get carrier used for protocol
583  *
584  * @protocol:		protocol
585  *
586  * Attempts to find the carrier for the specified protocol
587  *
588  * Returns:	The carrier in Hz
589  *		-EINVAL if the protocol is invalid, or if no
590  *		compatible encoder was found.
591  */
592 int ir_raw_encode_carrier(enum rc_proto protocol)
593 {
594 	struct ir_raw_handler *handler;
595 	int ret = -EINVAL;
596 	u64 mask = BIT_ULL(protocol);
597 
598 	mutex_lock(&ir_raw_handler_lock);
599 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
600 		if (handler->protocols & mask && handler->encode) {
601 			ret = handler->carrier;
602 			break;
603 		}
604 	}
605 	mutex_unlock(&ir_raw_handler_lock);
606 
607 	return ret;
608 }
609 EXPORT_SYMBOL(ir_raw_encode_carrier);
610 
611 /*
612  * Used to (un)register raw event clients
613  */
614 int ir_raw_event_prepare(struct rc_dev *dev)
615 {
616 	if (!dev)
617 		return -EINVAL;
618 
619 	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
620 	if (!dev->raw)
621 		return -ENOMEM;
622 
623 	dev->raw->dev = dev;
624 	dev->change_protocol = change_protocol;
625 	dev->idle = true;
626 	spin_lock_init(&dev->raw->edge_spinlock);
627 	timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0);
628 	INIT_KFIFO(dev->raw->kfifo);
629 
630 	return 0;
631 }
632 
633 int ir_raw_event_register(struct rc_dev *dev)
634 {
635 	struct task_struct *thread;
636 
637 	thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor);
638 	if (IS_ERR(thread))
639 		return PTR_ERR(thread);
640 
641 	dev->raw->thread = thread;
642 
643 	mutex_lock(&ir_raw_handler_lock);
644 	list_add_tail(&dev->raw->list, &ir_raw_client_list);
645 	mutex_unlock(&ir_raw_handler_lock);
646 
647 	return 0;
648 }
649 
650 void ir_raw_event_free(struct rc_dev *dev)
651 {
652 	if (!dev)
653 		return;
654 
655 	kfree(dev->raw);
656 	dev->raw = NULL;
657 }
658 
659 void ir_raw_event_unregister(struct rc_dev *dev)
660 {
661 	struct ir_raw_handler *handler;
662 
663 	if (!dev || !dev->raw)
664 		return;
665 
666 	kthread_stop(dev->raw->thread);
667 	del_timer_sync(&dev->raw->edge_handle);
668 
669 	mutex_lock(&ir_raw_handler_lock);
670 	list_del(&dev->raw->list);
671 	list_for_each_entry(handler, &ir_raw_handler_list, list)
672 		if (handler->raw_unregister &&
673 		    (handler->protocols & dev->enabled_protocols))
674 			handler->raw_unregister(dev);
675 
676 	lirc_bpf_free(dev);
677 
678 	ir_raw_event_free(dev);
679 
680 	/*
681 	 * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so
682 	 * ensure that the raw member is null on unlock; this is how
683 	 * "device gone" is checked.
684 	 */
685 	mutex_unlock(&ir_raw_handler_lock);
686 }
687 
688 /*
689  * Extension interface - used to register the IR decoders
690  */
691 
692 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
693 {
694 	mutex_lock(&ir_raw_handler_lock);
695 	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
696 	atomic64_or(ir_raw_handler->protocols, &available_protocols);
697 	mutex_unlock(&ir_raw_handler_lock);
698 
699 	return 0;
700 }
701 EXPORT_SYMBOL(ir_raw_handler_register);
702 
703 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
704 {
705 	struct ir_raw_event_ctrl *raw;
706 	u64 protocols = ir_raw_handler->protocols;
707 
708 	mutex_lock(&ir_raw_handler_lock);
709 	list_del(&ir_raw_handler->list);
710 	list_for_each_entry(raw, &ir_raw_client_list, list) {
711 		if (ir_raw_handler->raw_unregister &&
712 		    (raw->dev->enabled_protocols & protocols))
713 			ir_raw_handler->raw_unregister(raw->dev);
714 		ir_raw_disable_protocols(raw->dev, protocols);
715 	}
716 	atomic64_andnot(protocols, &available_protocols);
717 	mutex_unlock(&ir_raw_handler_lock);
718 }
719 EXPORT_SYMBOL(ir_raw_handler_unregister);
720