xref: /openbmc/linux/drivers/input/rmi4/rmi_spi.c (revision 151f4e2b)
1 /*
2  * Copyright (c) 2011-2016 Synaptics Incorporated
3  * Copyright (c) 2011 Unixphere
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/rmi.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 #include <linux/of.h>
16 #include "rmi_driver.h"
17 
18 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE	64
19 
20 #define RMI_PAGE_SELECT_REGISTER	0x00FF
21 #define RMI_SPI_PAGE(addr)		(((addr) >> 8) & 0x80)
22 #define RMI_SPI_XFER_SIZE_LIMIT		255
23 
24 #define BUFFER_SIZE_INCREMENT 32
25 
26 enum rmi_spi_op {
27 	RMI_SPI_WRITE = 0,
28 	RMI_SPI_READ,
29 	RMI_SPI_V2_READ_UNIFIED,
30 	RMI_SPI_V2_READ_SPLIT,
31 	RMI_SPI_V2_WRITE,
32 };
33 
34 struct rmi_spi_cmd {
35 	enum rmi_spi_op op;
36 	u16 addr;
37 };
38 
39 struct rmi_spi_xport {
40 	struct rmi_transport_dev xport;
41 	struct spi_device *spi;
42 
43 	struct mutex page_mutex;
44 	int page;
45 
46 	u8 *rx_buf;
47 	u8 *tx_buf;
48 	int xfer_buf_size;
49 
50 	struct spi_transfer *rx_xfers;
51 	struct spi_transfer *tx_xfers;
52 	int rx_xfer_count;
53 	int tx_xfer_count;
54 };
55 
56 static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
57 {
58 	struct spi_device *spi = rmi_spi->spi;
59 	int buf_size = rmi_spi->xfer_buf_size
60 		? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
61 	struct spi_transfer *xfer_buf;
62 	void *buf;
63 	void *tmp;
64 
65 	while (buf_size < len)
66 		buf_size *= 2;
67 
68 	if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
69 		buf_size = RMI_SPI_XFER_SIZE_LIMIT;
70 
71 	tmp = rmi_spi->rx_buf;
72 	buf = devm_kcalloc(&spi->dev, buf_size, 2,
73 				GFP_KERNEL | GFP_DMA);
74 	if (!buf)
75 		return -ENOMEM;
76 
77 	rmi_spi->rx_buf = buf;
78 	rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
79 	rmi_spi->xfer_buf_size = buf_size;
80 
81 	if (tmp)
82 		devm_kfree(&spi->dev, tmp);
83 
84 	if (rmi_spi->xport.pdata.spi_data.read_delay_us)
85 		rmi_spi->rx_xfer_count = buf_size;
86 	else
87 		rmi_spi->rx_xfer_count = 1;
88 
89 	if (rmi_spi->xport.pdata.spi_data.write_delay_us)
90 		rmi_spi->tx_xfer_count = buf_size;
91 	else
92 		rmi_spi->tx_xfer_count = 1;
93 
94 	/*
95 	 * Allocate a pool of spi_transfer buffers for devices which need
96 	 * per byte delays.
97 	 */
98 	tmp = rmi_spi->rx_xfers;
99 	xfer_buf = devm_kcalloc(&spi->dev,
100 		rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count,
101 		sizeof(struct spi_transfer),
102 		GFP_KERNEL);
103 	if (!xfer_buf)
104 		return -ENOMEM;
105 
106 	rmi_spi->rx_xfers = xfer_buf;
107 	rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
108 
109 	if (tmp)
110 		devm_kfree(&spi->dev, tmp);
111 
112 	return 0;
113 }
114 
115 static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
116 			const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
117 			int tx_len, u8 *rx_buf, int rx_len)
118 {
119 	struct spi_device *spi = rmi_spi->spi;
120 	struct rmi_device_platform_data_spi *spi_data =
121 					&rmi_spi->xport.pdata.spi_data;
122 	struct spi_message msg;
123 	struct spi_transfer *xfer;
124 	int ret = 0;
125 	int len;
126 	int cmd_len = 0;
127 	int total_tx_len;
128 	int i;
129 	u16 addr = cmd->addr;
130 
131 	spi_message_init(&msg);
132 
133 	switch (cmd->op) {
134 	case RMI_SPI_WRITE:
135 	case RMI_SPI_READ:
136 		cmd_len += 2;
137 		break;
138 	case RMI_SPI_V2_READ_UNIFIED:
139 	case RMI_SPI_V2_READ_SPLIT:
140 	case RMI_SPI_V2_WRITE:
141 		cmd_len += 4;
142 		break;
143 	}
144 
145 	total_tx_len = cmd_len + tx_len;
146 	len = max(total_tx_len, rx_len);
147 
148 	if (len > RMI_SPI_XFER_SIZE_LIMIT)
149 		return -EINVAL;
150 
151 	if (rmi_spi->xfer_buf_size < len) {
152 		ret = rmi_spi_manage_pools(rmi_spi, len);
153 		if (ret < 0)
154 			return ret;
155 	}
156 
157 	if (addr == 0)
158 		/*
159 		 * SPI needs an address. Use 0x7FF if we want to keep
160 		 * reading from the last position of the register pointer.
161 		 */
162 		addr = 0x7FF;
163 
164 	switch (cmd->op) {
165 	case RMI_SPI_WRITE:
166 		rmi_spi->tx_buf[0] = (addr >> 8);
167 		rmi_spi->tx_buf[1] = addr & 0xFF;
168 		break;
169 	case RMI_SPI_READ:
170 		rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
171 		rmi_spi->tx_buf[1] = addr & 0xFF;
172 		break;
173 	case RMI_SPI_V2_READ_UNIFIED:
174 		break;
175 	case RMI_SPI_V2_READ_SPLIT:
176 		break;
177 	case RMI_SPI_V2_WRITE:
178 		rmi_spi->tx_buf[0] = 0x40;
179 		rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
180 		rmi_spi->tx_buf[2] = addr & 0xFF;
181 		rmi_spi->tx_buf[3] = tx_len;
182 		break;
183 	}
184 
185 	if (tx_buf)
186 		memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
187 
188 	if (rmi_spi->tx_xfer_count > 1) {
189 		for (i = 0; i < total_tx_len; i++) {
190 			xfer = &rmi_spi->tx_xfers[i];
191 			memset(xfer, 0,	sizeof(struct spi_transfer));
192 			xfer->tx_buf = &rmi_spi->tx_buf[i];
193 			xfer->len = 1;
194 			xfer->delay_usecs = spi_data->write_delay_us;
195 			spi_message_add_tail(xfer, &msg);
196 		}
197 	} else {
198 		xfer = rmi_spi->tx_xfers;
199 		memset(xfer, 0, sizeof(struct spi_transfer));
200 		xfer->tx_buf = rmi_spi->tx_buf;
201 		xfer->len = total_tx_len;
202 		spi_message_add_tail(xfer, &msg);
203 	}
204 
205 	rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
206 		__func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
207 		total_tx_len, total_tx_len, rmi_spi->tx_buf);
208 
209 	if (rx_buf) {
210 		if (rmi_spi->rx_xfer_count > 1) {
211 			for (i = 0; i < rx_len; i++) {
212 				xfer = &rmi_spi->rx_xfers[i];
213 				memset(xfer, 0, sizeof(struct spi_transfer));
214 				xfer->rx_buf = &rmi_spi->rx_buf[i];
215 				xfer->len = 1;
216 				xfer->delay_usecs = spi_data->read_delay_us;
217 				spi_message_add_tail(xfer, &msg);
218 			}
219 		} else {
220 			xfer = rmi_spi->rx_xfers;
221 			memset(xfer, 0, sizeof(struct spi_transfer));
222 			xfer->rx_buf = rmi_spi->rx_buf;
223 			xfer->len = rx_len;
224 			spi_message_add_tail(xfer, &msg);
225 		}
226 	}
227 
228 	ret = spi_sync(spi, &msg);
229 	if (ret < 0) {
230 		dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
231 		return ret;
232 	}
233 
234 	if (rx_buf) {
235 		memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
236 		rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
237 			__func__, rx_len, rx_len, rx_buf);
238 	}
239 
240 	return 0;
241 }
242 
243 /*
244  * rmi_set_page - Set RMI page
245  * @xport: The pointer to the rmi_transport_dev struct
246  * @page: The new page address.
247  *
248  * RMI devices have 16-bit addressing, but some of the transport
249  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
250  * a page address at 0xff of every page so we can reliable page addresses
251  * every 256 registers.
252  *
253  * The page_mutex lock must be held when this function is entered.
254  *
255  * Returns zero on success, non-zero on failure.
256  */
257 static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
258 {
259 	struct rmi_spi_cmd cmd;
260 	int ret;
261 
262 	cmd.op = RMI_SPI_WRITE;
263 	cmd.addr = RMI_PAGE_SELECT_REGISTER;
264 
265 	ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
266 
267 	if (ret)
268 		rmi_spi->page = page;
269 
270 	return ret;
271 }
272 
273 static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
274 			       const void *buf, size_t len)
275 {
276 	struct rmi_spi_xport *rmi_spi =
277 		container_of(xport, struct rmi_spi_xport, xport);
278 	struct rmi_spi_cmd cmd;
279 	int ret;
280 
281 	mutex_lock(&rmi_spi->page_mutex);
282 
283 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
284 		ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
285 		if (ret)
286 			goto exit;
287 	}
288 
289 	cmd.op = RMI_SPI_WRITE;
290 	cmd.addr = addr;
291 
292 	ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
293 
294 exit:
295 	mutex_unlock(&rmi_spi->page_mutex);
296 	return ret;
297 }
298 
299 static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
300 			      void *buf, size_t len)
301 {
302 	struct rmi_spi_xport *rmi_spi =
303 		container_of(xport, struct rmi_spi_xport, xport);
304 	struct rmi_spi_cmd cmd;
305 	int ret;
306 
307 	mutex_lock(&rmi_spi->page_mutex);
308 
309 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
310 		ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
311 		if (ret)
312 			goto exit;
313 	}
314 
315 	cmd.op = RMI_SPI_READ;
316 	cmd.addr = addr;
317 
318 	ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
319 
320 exit:
321 	mutex_unlock(&rmi_spi->page_mutex);
322 	return ret;
323 }
324 
325 static const struct rmi_transport_ops rmi_spi_ops = {
326 	.write_block	= rmi_spi_write_block,
327 	.read_block	= rmi_spi_read_block,
328 };
329 
330 #ifdef CONFIG_OF
331 static int rmi_spi_of_probe(struct spi_device *spi,
332 			struct rmi_device_platform_data *pdata)
333 {
334 	struct device *dev = &spi->dev;
335 	int retval;
336 
337 	retval = rmi_of_property_read_u32(dev,
338 			&pdata->spi_data.read_delay_us,
339 			"spi-rx-delay-us", 1);
340 	if (retval)
341 		return retval;
342 
343 	retval = rmi_of_property_read_u32(dev,
344 			&pdata->spi_data.write_delay_us,
345 			"spi-tx-delay-us", 1);
346 	if (retval)
347 		return retval;
348 
349 	return 0;
350 }
351 
352 static const struct of_device_id rmi_spi_of_match[] = {
353 	{ .compatible = "syna,rmi4-spi" },
354 	{},
355 };
356 MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
357 #else
358 static inline int rmi_spi_of_probe(struct spi_device *spi,
359 				struct rmi_device_platform_data *pdata)
360 {
361 	return -ENODEV;
362 }
363 #endif
364 
365 static void rmi_spi_unregister_transport(void *data)
366 {
367 	struct rmi_spi_xport *rmi_spi = data;
368 
369 	rmi_unregister_transport_device(&rmi_spi->xport);
370 }
371 
372 static int rmi_spi_probe(struct spi_device *spi)
373 {
374 	struct rmi_spi_xport *rmi_spi;
375 	struct rmi_device_platform_data *pdata;
376 	struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
377 	int error;
378 
379 	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
380 		return -EINVAL;
381 
382 	rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
383 			GFP_KERNEL);
384 	if (!rmi_spi)
385 		return -ENOMEM;
386 
387 	pdata = &rmi_spi->xport.pdata;
388 
389 	if (spi->dev.of_node) {
390 		error = rmi_spi_of_probe(spi, pdata);
391 		if (error)
392 			return error;
393 	} else if (spi_pdata) {
394 		*pdata = *spi_pdata;
395 	}
396 
397 	if (pdata->spi_data.bits_per_word)
398 		spi->bits_per_word = pdata->spi_data.bits_per_word;
399 
400 	if (pdata->spi_data.mode)
401 		spi->mode = pdata->spi_data.mode;
402 
403 	error = spi_setup(spi);
404 	if (error < 0) {
405 		dev_err(&spi->dev, "spi_setup failed!\n");
406 		return error;
407 	}
408 
409 	pdata->irq = spi->irq;
410 
411 	rmi_spi->spi = spi;
412 	mutex_init(&rmi_spi->page_mutex);
413 
414 	rmi_spi->xport.dev = &spi->dev;
415 	rmi_spi->xport.proto_name = "spi";
416 	rmi_spi->xport.ops = &rmi_spi_ops;
417 
418 	spi_set_drvdata(spi, rmi_spi);
419 
420 	error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
421 	if (error)
422 		return error;
423 
424 	/*
425 	 * Setting the page to zero will (a) make sure the PSR is in a
426 	 * known state, and (b) make sure we can talk to the device.
427 	 */
428 	error = rmi_set_page(rmi_spi, 0);
429 	if (error) {
430 		dev_err(&spi->dev, "Failed to set page select to 0.\n");
431 		return error;
432 	}
433 
434 	dev_info(&spi->dev, "registering SPI-connected sensor\n");
435 
436 	error = rmi_register_transport_device(&rmi_spi->xport);
437 	if (error) {
438 		dev_err(&spi->dev, "failed to register sensor: %d\n", error);
439 		return error;
440 	}
441 
442 	error = devm_add_action_or_reset(&spi->dev,
443 					  rmi_spi_unregister_transport,
444 					  rmi_spi);
445 	if (error)
446 		return error;
447 
448 	return 0;
449 }
450 
451 #ifdef CONFIG_PM_SLEEP
452 static int rmi_spi_suspend(struct device *dev)
453 {
454 	struct spi_device *spi = to_spi_device(dev);
455 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
456 	int ret;
457 
458 	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
459 	if (ret)
460 		dev_warn(dev, "Failed to resume device: %d\n", ret);
461 
462 	return ret;
463 }
464 
465 static int rmi_spi_resume(struct device *dev)
466 {
467 	struct spi_device *spi = to_spi_device(dev);
468 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
469 	int ret;
470 
471 	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
472 	if (ret)
473 		dev_warn(dev, "Failed to resume device: %d\n", ret);
474 
475 	return ret;
476 }
477 #endif
478 
479 #ifdef CONFIG_PM
480 static int rmi_spi_runtime_suspend(struct device *dev)
481 {
482 	struct spi_device *spi = to_spi_device(dev);
483 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
484 	int ret;
485 
486 	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
487 	if (ret)
488 		dev_warn(dev, "Failed to resume device: %d\n", ret);
489 
490 	return 0;
491 }
492 
493 static int rmi_spi_runtime_resume(struct device *dev)
494 {
495 	struct spi_device *spi = to_spi_device(dev);
496 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
497 	int ret;
498 
499 	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
500 	if (ret)
501 		dev_warn(dev, "Failed to resume device: %d\n", ret);
502 
503 	return 0;
504 }
505 #endif
506 
507 static const struct dev_pm_ops rmi_spi_pm = {
508 	SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
509 	SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
510 			   NULL)
511 };
512 
513 static const struct spi_device_id rmi_id[] = {
514 	{ "rmi4_spi", 0 },
515 	{ }
516 };
517 MODULE_DEVICE_TABLE(spi, rmi_id);
518 
519 static struct spi_driver rmi_spi_driver = {
520 	.driver = {
521 		.name	= "rmi4_spi",
522 		.pm	= &rmi_spi_pm,
523 		.of_match_table = of_match_ptr(rmi_spi_of_match),
524 	},
525 	.id_table	= rmi_id,
526 	.probe		= rmi_spi_probe,
527 };
528 
529 module_spi_driver(rmi_spi_driver);
530 
531 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
532 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
533 MODULE_DESCRIPTION("RMI SPI driver");
534 MODULE_LICENSE("GPL");
535