xref: /openbmc/linux/drivers/media/rc/rc-ir-raw.c (revision 174cd4b1)
1 /* rc-ir-raw.c - handle IR pulse/space events
2  *
3  * Copyright (C) 2010 by Mauro Carvalho Chehab
4  *
5  * This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation version 2 of the License.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  */
14 
15 #include <linux/export.h>
16 #include <linux/kthread.h>
17 #include <linux/mutex.h>
18 #include <linux/kmod.h>
19 #include <linux/sched.h>
20 #include "rc-core-priv.h"
21 
22 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
23 static LIST_HEAD(ir_raw_client_list);
24 
25 /* Used to handle IR raw handler extensions */
26 static DEFINE_MUTEX(ir_raw_handler_lock);
27 static LIST_HEAD(ir_raw_handler_list);
28 static atomic64_t available_protocols = ATOMIC64_INIT(0);
29 
30 static int ir_raw_event_thread(void *data)
31 {
32 	struct ir_raw_event ev;
33 	struct ir_raw_handler *handler;
34 	struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
35 
36 	while (1) {
37 		mutex_lock(&ir_raw_handler_lock);
38 		while (kfifo_out(&raw->kfifo, &ev, 1)) {
39 			list_for_each_entry(handler, &ir_raw_handler_list, list)
40 				if (raw->dev->enabled_protocols &
41 				    handler->protocols || !handler->protocols)
42 					handler->decode(raw->dev, ev);
43 			raw->prev_ev = ev;
44 		}
45 		mutex_unlock(&ir_raw_handler_lock);
46 
47 		set_current_state(TASK_INTERRUPTIBLE);
48 
49 		if (kthread_should_stop()) {
50 			__set_current_state(TASK_RUNNING);
51 			break;
52 		} else if (!kfifo_is_empty(&raw->kfifo))
53 			set_current_state(TASK_RUNNING);
54 
55 		schedule();
56 	}
57 
58 	return 0;
59 }
60 
61 /**
62  * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
63  * @dev:	the struct rc_dev device descriptor
64  * @ev:		the struct ir_raw_event descriptor of the pulse/space
65  *
66  * This routine (which may be called from an interrupt context) stores a
67  * pulse/space duration for the raw ir decoding state machines. Pulses are
68  * signalled as positive values and spaces as negative values. A zero value
69  * will reset the decoding state machines.
70  */
71 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
72 {
73 	if (!dev->raw)
74 		return -EINVAL;
75 
76 	IR_dprintk(2, "sample: (%05dus %s)\n",
77 		   TO_US(ev->duration), TO_STR(ev->pulse));
78 
79 	if (!kfifo_put(&dev->raw->kfifo, *ev)) {
80 		dev_err(&dev->dev, "IR event FIFO is full!\n");
81 		return -ENOSPC;
82 	}
83 
84 	return 0;
85 }
86 EXPORT_SYMBOL_GPL(ir_raw_event_store);
87 
88 /**
89  * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
90  * @dev:	the struct rc_dev device descriptor
91  * @type:	the type of the event that has occurred
92  *
93  * This routine (which may be called from an interrupt context) is used to
94  * store the beginning of an ir pulse or space (or the start/end of ir
95  * reception) for the raw ir decoding state machines. This is used by
96  * hardware which does not provide durations directly but only interrupts
97  * (or similar events) on state change.
98  */
99 int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
100 {
101 	ktime_t			now;
102 	s64			delta; /* ns */
103 	DEFINE_IR_RAW_EVENT(ev);
104 	int			rc = 0;
105 	int			delay;
106 
107 	if (!dev->raw)
108 		return -EINVAL;
109 
110 	now = ktime_get();
111 	delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
112 	delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
113 
114 	/* Check for a long duration since last event or if we're
115 	 * being called for the first time, note that delta can't
116 	 * possibly be negative.
117 	 */
118 	if (delta > delay || !dev->raw->last_type)
119 		type |= IR_START_EVENT;
120 	else
121 		ev.duration = delta;
122 
123 	if (type & IR_START_EVENT)
124 		ir_raw_event_reset(dev);
125 	else if (dev->raw->last_type & IR_SPACE) {
126 		ev.pulse = false;
127 		rc = ir_raw_event_store(dev, &ev);
128 	} else if (dev->raw->last_type & IR_PULSE) {
129 		ev.pulse = true;
130 		rc = ir_raw_event_store(dev, &ev);
131 	} else
132 		return 0;
133 
134 	dev->raw->last_event = now;
135 	dev->raw->last_type = type;
136 	return rc;
137 }
138 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
139 
140 /**
141  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
142  * @dev:	the struct rc_dev device descriptor
143  * @type:	the type of the event that has occurred
144  *
145  * This routine (which may be called from an interrupt context) works
146  * in similar manner to ir_raw_event_store_edge.
147  * This routine is intended for devices with limited internal buffer
148  * It automerges samples of same type, and handles timeouts. Returns non-zero
149  * if the event was added, and zero if the event was ignored due to idle
150  * processing.
151  */
152 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
153 {
154 	if (!dev->raw)
155 		return -EINVAL;
156 
157 	/* Ignore spaces in idle mode */
158 	if (dev->idle && !ev->pulse)
159 		return 0;
160 	else if (dev->idle)
161 		ir_raw_event_set_idle(dev, false);
162 
163 	if (!dev->raw->this_ev.duration)
164 		dev->raw->this_ev = *ev;
165 	else if (ev->pulse == dev->raw->this_ev.pulse)
166 		dev->raw->this_ev.duration += ev->duration;
167 	else {
168 		ir_raw_event_store(dev, &dev->raw->this_ev);
169 		dev->raw->this_ev = *ev;
170 	}
171 
172 	/* Enter idle mode if nessesary */
173 	if (!ev->pulse && dev->timeout &&
174 	    dev->raw->this_ev.duration >= dev->timeout)
175 		ir_raw_event_set_idle(dev, true);
176 
177 	return 1;
178 }
179 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
180 
181 /**
182  * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
183  * @dev:	the struct rc_dev device descriptor
184  * @idle:	whether the device is idle or not
185  */
186 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
187 {
188 	if (!dev->raw)
189 		return;
190 
191 	IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
192 
193 	if (idle) {
194 		dev->raw->this_ev.timeout = true;
195 		ir_raw_event_store(dev, &dev->raw->this_ev);
196 		init_ir_raw_event(&dev->raw->this_ev);
197 	}
198 
199 	if (dev->s_idle)
200 		dev->s_idle(dev, idle);
201 
202 	dev->idle = idle;
203 }
204 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
205 
206 /**
207  * ir_raw_event_handle() - schedules the decoding of stored ir data
208  * @dev:	the struct rc_dev device descriptor
209  *
210  * This routine will tell rc-core to start decoding stored ir data.
211  */
212 void ir_raw_event_handle(struct rc_dev *dev)
213 {
214 	if (!dev->raw)
215 		return;
216 
217 	wake_up_process(dev->raw->thread);
218 }
219 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
220 
221 /* used internally by the sysfs interface */
222 u64
223 ir_raw_get_allowed_protocols(void)
224 {
225 	return atomic64_read(&available_protocols);
226 }
227 
228 static int change_protocol(struct rc_dev *dev, u64 *rc_type)
229 {
230 	/* the caller will update dev->enabled_protocols */
231 	return 0;
232 }
233 
234 static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
235 {
236 	mutex_lock(&dev->lock);
237 	dev->enabled_protocols &= ~protocols;
238 	mutex_unlock(&dev->lock);
239 }
240 
241 /**
242  * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
243  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
244  *		each raw event filled.
245  * @max:	Maximum number of raw events to fill.
246  * @timings:	Manchester modulation timings.
247  * @n:		Number of bits of data.
248  * @data:	Data bits to encode.
249  *
250  * Encodes the @n least significant bits of @data using Manchester (bi-phase)
251  * modulation with the timing characteristics described by @timings, writing up
252  * to @max raw IR events using the *@ev pointer.
253  *
254  * Returns:	0 on success.
255  *		-ENOBUFS if there isn't enough space in the array to fit the
256  *		full encoded data. In this case all @max events will have been
257  *		written.
258  */
259 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
260 			  const struct ir_raw_timings_manchester *timings,
261 			  unsigned int n, unsigned int data)
262 {
263 	bool need_pulse;
264 	unsigned int i;
265 	int ret = -ENOBUFS;
266 
267 	i = 1 << (n - 1);
268 
269 	if (timings->leader) {
270 		if (!max--)
271 			return ret;
272 		if (timings->pulse_space_start) {
273 			init_ir_raw_event_duration((*ev)++, 1, timings->leader);
274 
275 			if (!max--)
276 				return ret;
277 			init_ir_raw_event_duration((*ev), 0, timings->leader);
278 		} else {
279 			init_ir_raw_event_duration((*ev), 1, timings->leader);
280 		}
281 		i >>= 1;
282 	} else {
283 		/* continue existing signal */
284 		--(*ev);
285 	}
286 	/* from here on *ev will point to the last event rather than the next */
287 
288 	while (n && i > 0) {
289 		need_pulse = !(data & i);
290 		if (timings->invert)
291 			need_pulse = !need_pulse;
292 		if (need_pulse == !!(*ev)->pulse) {
293 			(*ev)->duration += timings->clock;
294 		} else {
295 			if (!max--)
296 				goto nobufs;
297 			init_ir_raw_event_duration(++(*ev), need_pulse,
298 						   timings->clock);
299 		}
300 
301 		if (!max--)
302 			goto nobufs;
303 		init_ir_raw_event_duration(++(*ev), !need_pulse,
304 					   timings->clock);
305 		i >>= 1;
306 	}
307 
308 	if (timings->trailer_space) {
309 		if (!(*ev)->pulse)
310 			(*ev)->duration += timings->trailer_space;
311 		else if (!max--)
312 			goto nobufs;
313 		else
314 			init_ir_raw_event_duration(++(*ev), 0,
315 						   timings->trailer_space);
316 	}
317 
318 	ret = 0;
319 nobufs:
320 	/* point to the next event rather than last event before returning */
321 	++(*ev);
322 	return ret;
323 }
324 EXPORT_SYMBOL(ir_raw_gen_manchester);
325 
326 /**
327  * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
328  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
329  *		each raw event filled.
330  * @max:	Maximum number of raw events to fill.
331  * @timings:	Pulse distance modulation timings.
332  * @n:		Number of bits of data.
333  * @data:	Data bits to encode.
334  *
335  * Encodes the @n least significant bits of @data using pulse-distance
336  * modulation with the timing characteristics described by @timings, writing up
337  * to @max raw IR events using the *@ev pointer.
338  *
339  * Returns:	0 on success.
340  *		-ENOBUFS if there isn't enough space in the array to fit the
341  *		full encoded data. In this case all @max events will have been
342  *		written.
343  */
344 int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
345 		  const struct ir_raw_timings_pd *timings,
346 		  unsigned int n, u64 data)
347 {
348 	int i;
349 	int ret;
350 	unsigned int space;
351 
352 	if (timings->header_pulse) {
353 		ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
354 					     timings->header_space);
355 		if (ret)
356 			return ret;
357 	}
358 
359 	if (timings->msb_first) {
360 		for (i = n - 1; i >= 0; --i) {
361 			space = timings->bit_space[(data >> i) & 1];
362 			ret = ir_raw_gen_pulse_space(ev, &max,
363 						     timings->bit_pulse,
364 						     space);
365 			if (ret)
366 				return ret;
367 		}
368 	} else {
369 		for (i = 0; i < n; ++i, data >>= 1) {
370 			space = timings->bit_space[data & 1];
371 			ret = ir_raw_gen_pulse_space(ev, &max,
372 						     timings->bit_pulse,
373 						     space);
374 			if (ret)
375 				return ret;
376 		}
377 	}
378 
379 	ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
380 				     timings->trailer_space);
381 	return ret;
382 }
383 EXPORT_SYMBOL(ir_raw_gen_pd);
384 
385 /**
386  * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
387  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
388  *		each raw event filled.
389  * @max:	Maximum number of raw events to fill.
390  * @timings:	Pulse distance modulation timings.
391  * @n:		Number of bits of data.
392  * @data:	Data bits to encode.
393  *
394  * Encodes the @n least significant bits of @data using space-distance
395  * modulation with the timing characteristics described by @timings, writing up
396  * to @max raw IR events using the *@ev pointer.
397  *
398  * Returns:	0 on success.
399  *		-ENOBUFS if there isn't enough space in the array to fit the
400  *		full encoded data. In this case all @max events will have been
401  *		written.
402  */
403 int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
404 		  const struct ir_raw_timings_pl *timings,
405 		  unsigned int n, u64 data)
406 {
407 	int i;
408 	int ret = -ENOBUFS;
409 	unsigned int pulse;
410 
411 	if (!max--)
412 		return ret;
413 
414 	init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
415 
416 	if (timings->msb_first) {
417 		for (i = n - 1; i >= 0; --i) {
418 			if (!max--)
419 				return ret;
420 			init_ir_raw_event_duration((*ev)++, 0,
421 						   timings->bit_space);
422 			if (!max--)
423 				return ret;
424 			pulse = timings->bit_pulse[(data >> i) & 1];
425 			init_ir_raw_event_duration((*ev)++, 1, pulse);
426 		}
427 	} else {
428 		for (i = 0; i < n; ++i, data >>= 1) {
429 			if (!max--)
430 				return ret;
431 			init_ir_raw_event_duration((*ev)++, 0,
432 						   timings->bit_space);
433 			if (!max--)
434 				return ret;
435 			pulse = timings->bit_pulse[data & 1];
436 			init_ir_raw_event_duration((*ev)++, 1, pulse);
437 		}
438 	}
439 
440 	if (!max--)
441 		return ret;
442 
443 	init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
444 
445 	return 0;
446 }
447 EXPORT_SYMBOL(ir_raw_gen_pl);
448 
449 /**
450  * ir_raw_encode_scancode() - Encode a scancode as raw events
451  *
452  * @protocol:		protocol
453  * @scancode:		scancode filter describing a single scancode
454  * @events:		array of raw events to write into
455  * @max:		max number of raw events
456  *
457  * Attempts to encode the scancode as raw events.
458  *
459  * Returns:	The number of events written.
460  *		-ENOBUFS if there isn't enough space in the array to fit the
461  *		encoding. In this case all @max events will have been written.
462  *		-EINVAL if the scancode is ambiguous or invalid, or if no
463  *		compatible encoder was found.
464  */
465 int ir_raw_encode_scancode(enum rc_type protocol, u32 scancode,
466 			   struct ir_raw_event *events, unsigned int max)
467 {
468 	struct ir_raw_handler *handler;
469 	int ret = -EINVAL;
470 	u64 mask = 1ULL << protocol;
471 
472 	mutex_lock(&ir_raw_handler_lock);
473 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
474 		if (handler->protocols & mask && handler->encode) {
475 			ret = handler->encode(protocol, scancode, events, max);
476 			if (ret >= 0 || ret == -ENOBUFS)
477 				break;
478 		}
479 	}
480 	mutex_unlock(&ir_raw_handler_lock);
481 
482 	return ret;
483 }
484 EXPORT_SYMBOL(ir_raw_encode_scancode);
485 
486 /*
487  * Used to (un)register raw event clients
488  */
489 int ir_raw_event_register(struct rc_dev *dev)
490 {
491 	int rc;
492 	struct ir_raw_handler *handler;
493 
494 	if (!dev)
495 		return -EINVAL;
496 
497 	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
498 	if (!dev->raw)
499 		return -ENOMEM;
500 
501 	dev->raw->dev = dev;
502 	dev->change_protocol = change_protocol;
503 	INIT_KFIFO(dev->raw->kfifo);
504 
505 	/*
506 	 * raw transmitters do not need any event registration
507 	 * because the event is coming from userspace
508 	 */
509 	if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
510 		dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
511 					       "rc%u", dev->minor);
512 
513 		if (IS_ERR(dev->raw->thread)) {
514 			rc = PTR_ERR(dev->raw->thread);
515 			goto out;
516 		}
517 	}
518 
519 	mutex_lock(&ir_raw_handler_lock);
520 	list_add_tail(&dev->raw->list, &ir_raw_client_list);
521 	list_for_each_entry(handler, &ir_raw_handler_list, list)
522 		if (handler->raw_register)
523 			handler->raw_register(dev);
524 	mutex_unlock(&ir_raw_handler_lock);
525 
526 	return 0;
527 
528 out:
529 	kfree(dev->raw);
530 	dev->raw = NULL;
531 	return rc;
532 }
533 
534 void ir_raw_event_unregister(struct rc_dev *dev)
535 {
536 	struct ir_raw_handler *handler;
537 
538 	if (!dev || !dev->raw)
539 		return;
540 
541 	kthread_stop(dev->raw->thread);
542 
543 	mutex_lock(&ir_raw_handler_lock);
544 	list_del(&dev->raw->list);
545 	list_for_each_entry(handler, &ir_raw_handler_list, list)
546 		if (handler->raw_unregister)
547 			handler->raw_unregister(dev);
548 	mutex_unlock(&ir_raw_handler_lock);
549 
550 	kfree(dev->raw);
551 	dev->raw = NULL;
552 }
553 
554 /*
555  * Extension interface - used to register the IR decoders
556  */
557 
558 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
559 {
560 	struct ir_raw_event_ctrl *raw;
561 
562 	mutex_lock(&ir_raw_handler_lock);
563 	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
564 	if (ir_raw_handler->raw_register)
565 		list_for_each_entry(raw, &ir_raw_client_list, list)
566 			ir_raw_handler->raw_register(raw->dev);
567 	atomic64_or(ir_raw_handler->protocols, &available_protocols);
568 	mutex_unlock(&ir_raw_handler_lock);
569 
570 	return 0;
571 }
572 EXPORT_SYMBOL(ir_raw_handler_register);
573 
574 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
575 {
576 	struct ir_raw_event_ctrl *raw;
577 	u64 protocols = ir_raw_handler->protocols;
578 
579 	mutex_lock(&ir_raw_handler_lock);
580 	list_del(&ir_raw_handler->list);
581 	list_for_each_entry(raw, &ir_raw_client_list, list) {
582 		ir_raw_disable_protocols(raw->dev, protocols);
583 		if (ir_raw_handler->raw_unregister)
584 			ir_raw_handler->raw_unregister(raw->dev);
585 	}
586 	atomic64_andnot(protocols, &available_protocols);
587 	mutex_unlock(&ir_raw_handler_lock);
588 }
589 EXPORT_SYMBOL(ir_raw_handler_unregister);
590