xref: /openbmc/linux/drivers/media/rc/ir-rx51.c (revision a10c3d5f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Copyright (C) 2008 Nokia Corporation
4  *
5  *  Based on lirc_serial.c
6  */
7 #include <linux/clk.h>
8 #include <linux/module.h>
9 #include <linux/platform_device.h>
10 #include <linux/wait.h>
11 #include <linux/pwm.h>
12 #include <linux/of.h>
13 #include <linux/hrtimer.h>
14 
15 #include <media/rc-core.h>
16 
17 #define WBUF_LEN 256
18 
19 struct ir_rx51 {
20 	struct rc_dev *rcdev;
21 	struct pwm_device *pwm;
22 	struct pwm_state state;
23 	struct hrtimer timer;
24 	struct device	     *dev;
25 	wait_queue_head_t     wqueue;
26 
27 	unsigned int	freq;		/* carrier frequency */
28 	unsigned int	duty_cycle;	/* carrier duty cycle */
29 	int		wbuf[WBUF_LEN];
30 	int		wbuf_index;
31 	unsigned long	device_is_open;
32 };
33 
ir_rx51_on(struct ir_rx51 * ir_rx51)34 static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
35 {
36 	ir_rx51->state.enabled = true;
37 	pwm_apply_might_sleep(ir_rx51->pwm, &ir_rx51->state);
38 }
39 
ir_rx51_off(struct ir_rx51 * ir_rx51)40 static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
41 {
42 	ir_rx51->state.enabled = false;
43 	pwm_apply_might_sleep(ir_rx51->pwm, &ir_rx51->state);
44 }
45 
init_timing_params(struct ir_rx51 * ir_rx51)46 static int init_timing_params(struct ir_rx51 *ir_rx51)
47 {
48 	ir_rx51->state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
49 	pwm_set_relative_duty_cycle(&ir_rx51->state, ir_rx51->duty_cycle, 100);
50 
51 	return 0;
52 }
53 
ir_rx51_timer_cb(struct hrtimer * timer)54 static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
55 {
56 	struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
57 	ktime_t now;
58 
59 	if (ir_rx51->wbuf_index < 0) {
60 		dev_err_ratelimited(ir_rx51->dev,
61 				    "BUG wbuf_index has value of %i\n",
62 				    ir_rx51->wbuf_index);
63 		goto end;
64 	}
65 
66 	/*
67 	 * If we happen to hit an odd latency spike, loop through the
68 	 * pulses until we catch up.
69 	 */
70 	do {
71 		u64 ns;
72 
73 		if (ir_rx51->wbuf_index >= WBUF_LEN)
74 			goto end;
75 		if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
76 			goto end;
77 
78 		if (ir_rx51->wbuf_index % 2)
79 			ir_rx51_off(ir_rx51);
80 		else
81 			ir_rx51_on(ir_rx51);
82 
83 		ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
84 		hrtimer_add_expires_ns(timer, ns);
85 
86 		ir_rx51->wbuf_index++;
87 
88 		now = timer->base->get_time();
89 
90 	} while (hrtimer_get_expires_tv64(timer) < now);
91 
92 	return HRTIMER_RESTART;
93 end:
94 	/* Stop TX here */
95 	ir_rx51_off(ir_rx51);
96 	ir_rx51->wbuf_index = -1;
97 
98 	wake_up_interruptible(&ir_rx51->wqueue);
99 
100 	return HRTIMER_NORESTART;
101 }
102 
ir_rx51_tx(struct rc_dev * dev,unsigned int * buffer,unsigned int count)103 static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
104 		      unsigned int count)
105 {
106 	struct ir_rx51 *ir_rx51 = dev->priv;
107 
108 	if (count > WBUF_LEN)
109 		return -EINVAL;
110 
111 	memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
112 
113 	/* Wait any pending transfers to finish */
114 	wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
115 
116 	init_timing_params(ir_rx51);
117 	if (count < WBUF_LEN)
118 		ir_rx51->wbuf[count] = -1; /* Insert termination mark */
119 
120 	/*
121 	 * REVISIT: Adjust latency requirements so the device doesn't go in too
122 	 * deep sleep states with pm_qos_add_request().
123 	 */
124 
125 	ir_rx51_on(ir_rx51);
126 	ir_rx51->wbuf_index = 1;
127 	hrtimer_start(&ir_rx51->timer,
128 		      ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
129 		      HRTIMER_MODE_REL);
130 	/*
131 	 * Don't return back to the userspace until the transfer has
132 	 * finished
133 	 */
134 	wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
135 
136 	/* REVISIT: Remove pm_qos constraint, we can sleep again */
137 
138 	return count;
139 }
140 
ir_rx51_open(struct rc_dev * dev)141 static int ir_rx51_open(struct rc_dev *dev)
142 {
143 	struct ir_rx51 *ir_rx51 = dev->priv;
144 
145 	if (test_and_set_bit(1, &ir_rx51->device_is_open))
146 		return -EBUSY;
147 
148 	ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
149 	if (IS_ERR(ir_rx51->pwm)) {
150 		int res = PTR_ERR(ir_rx51->pwm);
151 
152 		dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
153 		return res;
154 	}
155 
156 	return 0;
157 }
158 
ir_rx51_release(struct rc_dev * dev)159 static void ir_rx51_release(struct rc_dev *dev)
160 {
161 	struct ir_rx51 *ir_rx51 = dev->priv;
162 
163 	hrtimer_cancel(&ir_rx51->timer);
164 	ir_rx51_off(ir_rx51);
165 	pwm_put(ir_rx51->pwm);
166 
167 	clear_bit(1, &ir_rx51->device_is_open);
168 }
169 
170 static struct ir_rx51 ir_rx51 = {
171 	.duty_cycle	= 50,
172 	.wbuf_index	= -1,
173 };
174 
ir_rx51_set_duty_cycle(struct rc_dev * dev,u32 duty)175 static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
176 {
177 	struct ir_rx51 *ir_rx51 = dev->priv;
178 
179 	ir_rx51->duty_cycle = duty;
180 
181 	return 0;
182 }
183 
ir_rx51_set_tx_carrier(struct rc_dev * dev,u32 carrier)184 static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
185 {
186 	struct ir_rx51 *ir_rx51 = dev->priv;
187 
188 	if (carrier > 500000 || carrier < 20000)
189 		return -EINVAL;
190 
191 	ir_rx51->freq = carrier;
192 
193 	return 0;
194 }
195 
196 #ifdef CONFIG_PM
197 
ir_rx51_suspend(struct platform_device * dev,pm_message_t state)198 static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
199 {
200 	/*
201 	 * In case the device is still open, do not suspend. Normally
202 	 * this should not be a problem as lircd only keeps the device
203 	 * open only for short periods of time. We also don't want to
204 	 * get involved with race conditions that might happen if we
205 	 * were in a middle of a transmit. Thus, we defer any suspend
206 	 * actions until transmit has completed.
207 	 */
208 	if (test_and_set_bit(1, &ir_rx51.device_is_open))
209 		return -EAGAIN;
210 
211 	clear_bit(1, &ir_rx51.device_is_open);
212 
213 	return 0;
214 }
215 
ir_rx51_resume(struct platform_device * dev)216 static int ir_rx51_resume(struct platform_device *dev)
217 {
218 	return 0;
219 }
220 
221 #else
222 
223 #define ir_rx51_suspend	NULL
224 #define ir_rx51_resume	NULL
225 
226 #endif /* CONFIG_PM */
227 
ir_rx51_probe(struct platform_device * dev)228 static int ir_rx51_probe(struct platform_device *dev)
229 {
230 	struct pwm_device *pwm;
231 	struct rc_dev *rcdev;
232 
233 	pwm = pwm_get(&dev->dev, NULL);
234 	if (IS_ERR(pwm))
235 		return dev_err_probe(&dev->dev, PTR_ERR(pwm), "pwm_get failed\n");
236 
237 	/* Use default, in case userspace does not set the carrier */
238 	ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
239 	pwm_init_state(pwm, &ir_rx51.state);
240 	pwm_put(pwm);
241 
242 	hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
243 	ir_rx51.timer.function = ir_rx51_timer_cb;
244 
245 	ir_rx51.dev = &dev->dev;
246 
247 	rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
248 	if (!rcdev)
249 		return -ENOMEM;
250 
251 	rcdev->priv = &ir_rx51;
252 	rcdev->open = ir_rx51_open;
253 	rcdev->close = ir_rx51_release;
254 	rcdev->tx_ir = ir_rx51_tx;
255 	rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
256 	rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
257 	rcdev->driver_name = KBUILD_MODNAME;
258 
259 	ir_rx51.rcdev = rcdev;
260 
261 	return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
262 }
263 
264 static const struct of_device_id ir_rx51_match[] = {
265 	{
266 		.compatible = "nokia,n900-ir",
267 	},
268 	{},
269 };
270 MODULE_DEVICE_TABLE(of, ir_rx51_match);
271 
272 static struct platform_driver ir_rx51_platform_driver = {
273 	.probe		= ir_rx51_probe,
274 	.suspend	= ir_rx51_suspend,
275 	.resume		= ir_rx51_resume,
276 	.driver		= {
277 		.name	= KBUILD_MODNAME,
278 		.of_match_table = ir_rx51_match,
279 	},
280 };
281 module_platform_driver(ir_rx51_platform_driver);
282 
283 MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
284 MODULE_AUTHOR("Nokia Corporation");
285 MODULE_LICENSE("GPL");
286