xref: /openbmc/linux/drivers/media/rc/rc-ir-raw.c (revision d3597236)
1 /* rc-ir-raw.c - handle IR pulse/space events
2  *
3  * Copyright (C) 2010 by Mauro Carvalho Chehab
4  *
5  * This program is free software; you can redistribute it and/or modify
6  *  it under the terms of the GNU General Public License as published by
7  *  the Free Software Foundation version 2 of the License.
8  *
9  *  This program is distributed in the hope that it will be useful,
10  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  *  GNU General Public License for more details.
13  */
14 
15 #include <linux/export.h>
16 #include <linux/kthread.h>
17 #include <linux/mutex.h>
18 #include <linux/kmod.h>
19 #include <linux/sched.h>
20 #include <linux/freezer.h>
21 #include "rc-core-priv.h"
22 
23 /* Define the max number of pulse/space transitions to buffer */
24 #define MAX_IR_EVENT_SIZE      512
25 
26 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
27 static LIST_HEAD(ir_raw_client_list);
28 
29 /* Used to handle IR raw handler extensions */
30 static DEFINE_MUTEX(ir_raw_handler_lock);
31 static LIST_HEAD(ir_raw_handler_list);
32 static u64 available_protocols;
33 static u64 encode_protocols;
34 
35 static int ir_raw_event_thread(void *data)
36 {
37 	struct ir_raw_event ev;
38 	struct ir_raw_handler *handler;
39 	struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
40 	int retval;
41 
42 	while (!kthread_should_stop()) {
43 
44 		spin_lock_irq(&raw->lock);
45 		retval = kfifo_len(&raw->kfifo);
46 
47 		if (retval < sizeof(ev)) {
48 			set_current_state(TASK_INTERRUPTIBLE);
49 
50 			if (kthread_should_stop())
51 				set_current_state(TASK_RUNNING);
52 
53 			spin_unlock_irq(&raw->lock);
54 			schedule();
55 			continue;
56 		}
57 
58 		retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
59 		spin_unlock_irq(&raw->lock);
60 
61 		mutex_lock(&ir_raw_handler_lock);
62 		list_for_each_entry(handler, &ir_raw_handler_list, list)
63 			handler->decode(raw->dev, ev);
64 		raw->prev_ev = ev;
65 		mutex_unlock(&ir_raw_handler_lock);
66 	}
67 
68 	return 0;
69 }
70 
71 /**
72  * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
73  * @dev:	the struct rc_dev device descriptor
74  * @ev:		the struct ir_raw_event descriptor of the pulse/space
75  *
76  * This routine (which may be called from an interrupt context) stores a
77  * pulse/space duration for the raw ir decoding state machines. Pulses are
78  * signalled as positive values and spaces as negative values. A zero value
79  * will reset the decoding state machines.
80  */
81 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
82 {
83 	if (!dev->raw)
84 		return -EINVAL;
85 
86 	IR_dprintk(2, "sample: (%05dus %s)\n",
87 		   TO_US(ev->duration), TO_STR(ev->pulse));
88 
89 	if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
90 		return -ENOMEM;
91 
92 	return 0;
93 }
94 EXPORT_SYMBOL_GPL(ir_raw_event_store);
95 
96 /**
97  * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
98  * @dev:	the struct rc_dev device descriptor
99  * @type:	the type of the event that has occurred
100  *
101  * This routine (which may be called from an interrupt context) is used to
102  * store the beginning of an ir pulse or space (or the start/end of ir
103  * reception) for the raw ir decoding state machines. This is used by
104  * hardware which does not provide durations directly but only interrupts
105  * (or similar events) on state change.
106  */
107 int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
108 {
109 	ktime_t			now;
110 	s64			delta; /* ns */
111 	DEFINE_IR_RAW_EVENT(ev);
112 	int			rc = 0;
113 	int			delay;
114 
115 	if (!dev->raw)
116 		return -EINVAL;
117 
118 	now = ktime_get();
119 	delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
120 	delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
121 
122 	/* Check for a long duration since last event or if we're
123 	 * being called for the first time, note that delta can't
124 	 * possibly be negative.
125 	 */
126 	if (delta > delay || !dev->raw->last_type)
127 		type |= IR_START_EVENT;
128 	else
129 		ev.duration = delta;
130 
131 	if (type & IR_START_EVENT)
132 		ir_raw_event_reset(dev);
133 	else if (dev->raw->last_type & IR_SPACE) {
134 		ev.pulse = false;
135 		rc = ir_raw_event_store(dev, &ev);
136 	} else if (dev->raw->last_type & IR_PULSE) {
137 		ev.pulse = true;
138 		rc = ir_raw_event_store(dev, &ev);
139 	} else
140 		return 0;
141 
142 	dev->raw->last_event = now;
143 	dev->raw->last_type = type;
144 	return rc;
145 }
146 EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
147 
148 /**
149  * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
150  * @dev:	the struct rc_dev device descriptor
151  * @type:	the type of the event that has occurred
152  *
153  * This routine (which may be called from an interrupt context) works
154  * in similar manner to ir_raw_event_store_edge.
155  * This routine is intended for devices with limited internal buffer
156  * It automerges samples of same type, and handles timeouts. Returns non-zero
157  * if the event was added, and zero if the event was ignored due to idle
158  * processing.
159  */
160 int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
161 {
162 	if (!dev->raw)
163 		return -EINVAL;
164 
165 	/* Ignore spaces in idle mode */
166 	if (dev->idle && !ev->pulse)
167 		return 0;
168 	else if (dev->idle)
169 		ir_raw_event_set_idle(dev, false);
170 
171 	if (!dev->raw->this_ev.duration)
172 		dev->raw->this_ev = *ev;
173 	else if (ev->pulse == dev->raw->this_ev.pulse)
174 		dev->raw->this_ev.duration += ev->duration;
175 	else {
176 		ir_raw_event_store(dev, &dev->raw->this_ev);
177 		dev->raw->this_ev = *ev;
178 	}
179 
180 	/* Enter idle mode if nessesary */
181 	if (!ev->pulse && dev->timeout &&
182 	    dev->raw->this_ev.duration >= dev->timeout)
183 		ir_raw_event_set_idle(dev, true);
184 
185 	return 1;
186 }
187 EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
188 
189 /**
190  * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
191  * @dev:	the struct rc_dev device descriptor
192  * @idle:	whether the device is idle or not
193  */
194 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
195 {
196 	if (!dev->raw)
197 		return;
198 
199 	IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
200 
201 	if (idle) {
202 		dev->raw->this_ev.timeout = true;
203 		ir_raw_event_store(dev, &dev->raw->this_ev);
204 		init_ir_raw_event(&dev->raw->this_ev);
205 	}
206 
207 	if (dev->s_idle)
208 		dev->s_idle(dev, idle);
209 
210 	dev->idle = idle;
211 }
212 EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
213 
214 /**
215  * ir_raw_event_handle() - schedules the decoding of stored ir data
216  * @dev:	the struct rc_dev device descriptor
217  *
218  * This routine will tell rc-core to start decoding stored ir data.
219  */
220 void ir_raw_event_handle(struct rc_dev *dev)
221 {
222 	unsigned long flags;
223 
224 	if (!dev->raw)
225 		return;
226 
227 	spin_lock_irqsave(&dev->raw->lock, flags);
228 	wake_up_process(dev->raw->thread);
229 	spin_unlock_irqrestore(&dev->raw->lock, flags);
230 }
231 EXPORT_SYMBOL_GPL(ir_raw_event_handle);
232 
233 /* used internally by the sysfs interface */
234 u64
235 ir_raw_get_allowed_protocols(void)
236 {
237 	u64 protocols;
238 	mutex_lock(&ir_raw_handler_lock);
239 	protocols = available_protocols;
240 	mutex_unlock(&ir_raw_handler_lock);
241 	return protocols;
242 }
243 
244 /* used internally by the sysfs interface */
245 u64
246 ir_raw_get_encode_protocols(void)
247 {
248 	u64 protocols;
249 
250 	mutex_lock(&ir_raw_handler_lock);
251 	protocols = encode_protocols;
252 	mutex_unlock(&ir_raw_handler_lock);
253 	return protocols;
254 }
255 
256 static int change_protocol(struct rc_dev *dev, u64 *rc_type)
257 {
258 	/* the caller will update dev->enabled_protocols */
259 	return 0;
260 }
261 
262 /**
263  * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
264  * @ev:		Pointer to pointer to next free event. *@ev is incremented for
265  *		each raw event filled.
266  * @max:	Maximum number of raw events to fill.
267  * @timings:	Manchester modulation timings.
268  * @n:		Number of bits of data.
269  * @data:	Data bits to encode.
270  *
271  * Encodes the @n least significant bits of @data using Manchester (bi-phase)
272  * modulation with the timing characteristics described by @timings, writing up
273  * to @max raw IR events using the *@ev pointer.
274  *
275  * Returns:	0 on success.
276  *		-ENOBUFS if there isn't enough space in the array to fit the
277  *		full encoded data. In this case all @max events will have been
278  *		written.
279  */
280 int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
281 			  const struct ir_raw_timings_manchester *timings,
282 			  unsigned int n, unsigned int data)
283 {
284 	bool need_pulse;
285 	unsigned int i;
286 	int ret = -ENOBUFS;
287 
288 	i = 1 << (n - 1);
289 
290 	if (timings->leader) {
291 		if (!max--)
292 			return ret;
293 		if (timings->pulse_space_start) {
294 			init_ir_raw_event_duration((*ev)++, 1, timings->leader);
295 
296 			if (!max--)
297 				return ret;
298 			init_ir_raw_event_duration((*ev), 0, timings->leader);
299 		} else {
300 			init_ir_raw_event_duration((*ev), 1, timings->leader);
301 		}
302 		i >>= 1;
303 	} else {
304 		/* continue existing signal */
305 		--(*ev);
306 	}
307 	/* from here on *ev will point to the last event rather than the next */
308 
309 	while (n && i > 0) {
310 		need_pulse = !(data & i);
311 		if (timings->invert)
312 			need_pulse = !need_pulse;
313 		if (need_pulse == !!(*ev)->pulse) {
314 			(*ev)->duration += timings->clock;
315 		} else {
316 			if (!max--)
317 				goto nobufs;
318 			init_ir_raw_event_duration(++(*ev), need_pulse,
319 						   timings->clock);
320 		}
321 
322 		if (!max--)
323 			goto nobufs;
324 		init_ir_raw_event_duration(++(*ev), !need_pulse,
325 					   timings->clock);
326 		i >>= 1;
327 	}
328 
329 	if (timings->trailer_space) {
330 		if (!(*ev)->pulse)
331 			(*ev)->duration += timings->trailer_space;
332 		else if (!max--)
333 			goto nobufs;
334 		else
335 			init_ir_raw_event_duration(++(*ev), 0,
336 						   timings->trailer_space);
337 	}
338 
339 	ret = 0;
340 nobufs:
341 	/* point to the next event rather than last event before returning */
342 	++(*ev);
343 	return ret;
344 }
345 EXPORT_SYMBOL(ir_raw_gen_manchester);
346 
347 /**
348  * ir_raw_encode_scancode() - Encode a scancode as raw events
349  *
350  * @protocols:		permitted protocols
351  * @scancode:		scancode filter describing a single scancode
352  * @events:		array of raw events to write into
353  * @max:		max number of raw events
354  *
355  * Attempts to encode the scancode as raw events.
356  *
357  * Returns:	The number of events written.
358  *		-ENOBUFS if there isn't enough space in the array to fit the
359  *		encoding. In this case all @max events will have been written.
360  *		-EINVAL if the scancode is ambiguous or invalid, or if no
361  *		compatible encoder was found.
362  */
363 int ir_raw_encode_scancode(u64 protocols,
364 			   const struct rc_scancode_filter *scancode,
365 			   struct ir_raw_event *events, unsigned int max)
366 {
367 	struct ir_raw_handler *handler;
368 	int ret = -EINVAL;
369 
370 	mutex_lock(&ir_raw_handler_lock);
371 	list_for_each_entry(handler, &ir_raw_handler_list, list) {
372 		if (handler->protocols & protocols && handler->encode) {
373 			ret = handler->encode(protocols, scancode, events, max);
374 			if (ret >= 0 || ret == -ENOBUFS)
375 				break;
376 		}
377 	}
378 	mutex_unlock(&ir_raw_handler_lock);
379 
380 	return ret;
381 }
382 EXPORT_SYMBOL(ir_raw_encode_scancode);
383 
384 /*
385  * Used to (un)register raw event clients
386  */
387 int ir_raw_event_register(struct rc_dev *dev)
388 {
389 	int rc;
390 	struct ir_raw_handler *handler;
391 
392 	if (!dev)
393 		return -EINVAL;
394 
395 	dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
396 	if (!dev->raw)
397 		return -ENOMEM;
398 
399 	dev->raw->dev = dev;
400 	dev->change_protocol = change_protocol;
401 	rc = kfifo_alloc(&dev->raw->kfifo,
402 			 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
403 			 GFP_KERNEL);
404 	if (rc < 0)
405 		goto out;
406 
407 	spin_lock_init(&dev->raw->lock);
408 	dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
409 				       "rc%ld", dev->devno);
410 
411 	if (IS_ERR(dev->raw->thread)) {
412 		rc = PTR_ERR(dev->raw->thread);
413 		goto out;
414 	}
415 
416 	mutex_lock(&ir_raw_handler_lock);
417 	list_add_tail(&dev->raw->list, &ir_raw_client_list);
418 	list_for_each_entry(handler, &ir_raw_handler_list, list)
419 		if (handler->raw_register)
420 			handler->raw_register(dev);
421 	mutex_unlock(&ir_raw_handler_lock);
422 
423 	return 0;
424 
425 out:
426 	kfree(dev->raw);
427 	dev->raw = NULL;
428 	return rc;
429 }
430 
431 void ir_raw_event_unregister(struct rc_dev *dev)
432 {
433 	struct ir_raw_handler *handler;
434 
435 	if (!dev || !dev->raw)
436 		return;
437 
438 	kthread_stop(dev->raw->thread);
439 
440 	mutex_lock(&ir_raw_handler_lock);
441 	list_del(&dev->raw->list);
442 	list_for_each_entry(handler, &ir_raw_handler_list, list)
443 		if (handler->raw_unregister)
444 			handler->raw_unregister(dev);
445 	mutex_unlock(&ir_raw_handler_lock);
446 
447 	kfifo_free(&dev->raw->kfifo);
448 	kfree(dev->raw);
449 	dev->raw = NULL;
450 }
451 
452 /*
453  * Extension interface - used to register the IR decoders
454  */
455 
456 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
457 {
458 	struct ir_raw_event_ctrl *raw;
459 
460 	mutex_lock(&ir_raw_handler_lock);
461 	list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
462 	if (ir_raw_handler->raw_register)
463 		list_for_each_entry(raw, &ir_raw_client_list, list)
464 			ir_raw_handler->raw_register(raw->dev);
465 	available_protocols |= ir_raw_handler->protocols;
466 	if (ir_raw_handler->encode)
467 		encode_protocols |= ir_raw_handler->protocols;
468 	mutex_unlock(&ir_raw_handler_lock);
469 
470 	return 0;
471 }
472 EXPORT_SYMBOL(ir_raw_handler_register);
473 
474 void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
475 {
476 	struct ir_raw_event_ctrl *raw;
477 
478 	mutex_lock(&ir_raw_handler_lock);
479 	list_del(&ir_raw_handler->list);
480 	if (ir_raw_handler->raw_unregister)
481 		list_for_each_entry(raw, &ir_raw_client_list, list)
482 			ir_raw_handler->raw_unregister(raw->dev);
483 	available_protocols &= ~ir_raw_handler->protocols;
484 	if (ir_raw_handler->encode)
485 		encode_protocols &= ~ir_raw_handler->protocols;
486 	mutex_unlock(&ir_raw_handler_lock);
487 }
488 EXPORT_SYMBOL(ir_raw_handler_unregister);
489 
490 void ir_raw_init(void)
491 {
492 	/* Load the decoder modules */
493 
494 	load_nec_decode();
495 	load_rc5_decode();
496 	load_rc6_decode();
497 	load_jvc_decode();
498 	load_sony_decode();
499 	load_sanyo_decode();
500 	load_sharp_decode();
501 	load_mce_kbd_decode();
502 	load_lirc_codec();
503 	load_xmp_decode();
504 
505 	/* If needed, we may later add some init code. In this case,
506 	   it is needed to change the CONFIG_MODULE test at rc-core.h
507 	 */
508 }
509