xref: /openbmc/linux/drivers/input/rmi4/rmi_spi.c (revision bc33f5e5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2011-2016 Synaptics Incorporated
4  * Copyright (c) 2011 Unixphere
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/rmi.h>
10 #include <linux/slab.h>
11 #include <linux/spi/spi.h>
12 #include <linux/of.h>
13 #include "rmi_driver.h"
14 
15 #define RMI_SPI_DEFAULT_XFER_BUF_SIZE	64
16 
17 #define RMI_PAGE_SELECT_REGISTER	0x00FF
18 #define RMI_SPI_PAGE(addr)		(((addr) >> 8) & 0x80)
19 #define RMI_SPI_XFER_SIZE_LIMIT		255
20 
21 #define BUFFER_SIZE_INCREMENT 32
22 
23 enum rmi_spi_op {
24 	RMI_SPI_WRITE = 0,
25 	RMI_SPI_READ,
26 	RMI_SPI_V2_READ_UNIFIED,
27 	RMI_SPI_V2_READ_SPLIT,
28 	RMI_SPI_V2_WRITE,
29 };
30 
31 struct rmi_spi_cmd {
32 	enum rmi_spi_op op;
33 	u16 addr;
34 };
35 
36 struct rmi_spi_xport {
37 	struct rmi_transport_dev xport;
38 	struct spi_device *spi;
39 
40 	struct mutex page_mutex;
41 	int page;
42 
43 	u8 *rx_buf;
44 	u8 *tx_buf;
45 	int xfer_buf_size;
46 
47 	struct spi_transfer *rx_xfers;
48 	struct spi_transfer *tx_xfers;
49 	int rx_xfer_count;
50 	int tx_xfer_count;
51 };
52 
53 static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
54 {
55 	struct spi_device *spi = rmi_spi->spi;
56 	int buf_size = rmi_spi->xfer_buf_size
57 		? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
58 	struct spi_transfer *xfer_buf;
59 	void *buf;
60 	void *tmp;
61 
62 	while (buf_size < len)
63 		buf_size *= 2;
64 
65 	if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
66 		buf_size = RMI_SPI_XFER_SIZE_LIMIT;
67 
68 	tmp = rmi_spi->rx_buf;
69 	buf = devm_kcalloc(&spi->dev, buf_size, 2,
70 				GFP_KERNEL | GFP_DMA);
71 	if (!buf)
72 		return -ENOMEM;
73 
74 	rmi_spi->rx_buf = buf;
75 	rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
76 	rmi_spi->xfer_buf_size = buf_size;
77 
78 	if (tmp)
79 		devm_kfree(&spi->dev, tmp);
80 
81 	if (rmi_spi->xport.pdata.spi_data.read_delay_us)
82 		rmi_spi->rx_xfer_count = buf_size;
83 	else
84 		rmi_spi->rx_xfer_count = 1;
85 
86 	if (rmi_spi->xport.pdata.spi_data.write_delay_us)
87 		rmi_spi->tx_xfer_count = buf_size;
88 	else
89 		rmi_spi->tx_xfer_count = 1;
90 
91 	/*
92 	 * Allocate a pool of spi_transfer buffers for devices which need
93 	 * per byte delays.
94 	 */
95 	tmp = rmi_spi->rx_xfers;
96 	xfer_buf = devm_kcalloc(&spi->dev,
97 		rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count,
98 		sizeof(struct spi_transfer),
99 		GFP_KERNEL);
100 	if (!xfer_buf)
101 		return -ENOMEM;
102 
103 	rmi_spi->rx_xfers = xfer_buf;
104 	rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
105 
106 	if (tmp)
107 		devm_kfree(&spi->dev, tmp);
108 
109 	return 0;
110 }
111 
112 static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
113 			const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
114 			int tx_len, u8 *rx_buf, int rx_len)
115 {
116 	struct spi_device *spi = rmi_spi->spi;
117 	struct rmi_device_platform_data_spi *spi_data =
118 					&rmi_spi->xport.pdata.spi_data;
119 	struct spi_message msg;
120 	struct spi_transfer *xfer;
121 	int ret = 0;
122 	int len;
123 	int cmd_len = 0;
124 	int total_tx_len;
125 	int i;
126 	u16 addr = cmd->addr;
127 
128 	spi_message_init(&msg);
129 
130 	switch (cmd->op) {
131 	case RMI_SPI_WRITE:
132 	case RMI_SPI_READ:
133 		cmd_len += 2;
134 		break;
135 	case RMI_SPI_V2_READ_UNIFIED:
136 	case RMI_SPI_V2_READ_SPLIT:
137 	case RMI_SPI_V2_WRITE:
138 		cmd_len += 4;
139 		break;
140 	}
141 
142 	total_tx_len = cmd_len + tx_len;
143 	len = max(total_tx_len, rx_len);
144 
145 	if (len > RMI_SPI_XFER_SIZE_LIMIT)
146 		return -EINVAL;
147 
148 	if (rmi_spi->xfer_buf_size < len) {
149 		ret = rmi_spi_manage_pools(rmi_spi, len);
150 		if (ret < 0)
151 			return ret;
152 	}
153 
154 	if (addr == 0)
155 		/*
156 		 * SPI needs an address. Use 0x7FF if we want to keep
157 		 * reading from the last position of the register pointer.
158 		 */
159 		addr = 0x7FF;
160 
161 	switch (cmd->op) {
162 	case RMI_SPI_WRITE:
163 		rmi_spi->tx_buf[0] = (addr >> 8);
164 		rmi_spi->tx_buf[1] = addr & 0xFF;
165 		break;
166 	case RMI_SPI_READ:
167 		rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
168 		rmi_spi->tx_buf[1] = addr & 0xFF;
169 		break;
170 	case RMI_SPI_V2_READ_UNIFIED:
171 		break;
172 	case RMI_SPI_V2_READ_SPLIT:
173 		break;
174 	case RMI_SPI_V2_WRITE:
175 		rmi_spi->tx_buf[0] = 0x40;
176 		rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
177 		rmi_spi->tx_buf[2] = addr & 0xFF;
178 		rmi_spi->tx_buf[3] = tx_len;
179 		break;
180 	}
181 
182 	if (tx_buf)
183 		memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
184 
185 	if (rmi_spi->tx_xfer_count > 1) {
186 		for (i = 0; i < total_tx_len; i++) {
187 			xfer = &rmi_spi->tx_xfers[i];
188 			memset(xfer, 0,	sizeof(struct spi_transfer));
189 			xfer->tx_buf = &rmi_spi->tx_buf[i];
190 			xfer->len = 1;
191 			xfer->delay.value = spi_data->write_delay_us;
192 			xfer->delay.unit = SPI_DELAY_UNIT_USECS;
193 			spi_message_add_tail(xfer, &msg);
194 		}
195 	} else {
196 		xfer = rmi_spi->tx_xfers;
197 		memset(xfer, 0, sizeof(struct spi_transfer));
198 		xfer->tx_buf = rmi_spi->tx_buf;
199 		xfer->len = total_tx_len;
200 		spi_message_add_tail(xfer, &msg);
201 	}
202 
203 	rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
204 		__func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
205 		total_tx_len, total_tx_len, rmi_spi->tx_buf);
206 
207 	if (rx_buf) {
208 		if (rmi_spi->rx_xfer_count > 1) {
209 			for (i = 0; i < rx_len; i++) {
210 				xfer = &rmi_spi->rx_xfers[i];
211 				memset(xfer, 0, sizeof(struct spi_transfer));
212 				xfer->rx_buf = &rmi_spi->rx_buf[i];
213 				xfer->len = 1;
214 				xfer->delay.value = spi_data->read_delay_us;
215 				xfer->delay.unit = SPI_DELAY_UNIT_USECS;
216 				spi_message_add_tail(xfer, &msg);
217 			}
218 		} else {
219 			xfer = rmi_spi->rx_xfers;
220 			memset(xfer, 0, sizeof(struct spi_transfer));
221 			xfer->rx_buf = rmi_spi->rx_buf;
222 			xfer->len = rx_len;
223 			spi_message_add_tail(xfer, &msg);
224 		}
225 	}
226 
227 	ret = spi_sync(spi, &msg);
228 	if (ret < 0) {
229 		dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
230 		return ret;
231 	}
232 
233 	if (rx_buf) {
234 		memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
235 		rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
236 			__func__, rx_len, rx_len, rx_buf);
237 	}
238 
239 	return 0;
240 }
241 
242 /*
243  * rmi_set_page - Set RMI page
244  * @xport: The pointer to the rmi_transport_dev struct
245  * @page: The new page address.
246  *
247  * RMI devices have 16-bit addressing, but some of the transport
248  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
249  * a page address at 0xff of every page so we can reliable page addresses
250  * every 256 registers.
251  *
252  * The page_mutex lock must be held when this function is entered.
253  *
254  * Returns zero on success, non-zero on failure.
255  */
256 static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
257 {
258 	struct rmi_spi_cmd cmd;
259 	int ret;
260 
261 	cmd.op = RMI_SPI_WRITE;
262 	cmd.addr = RMI_PAGE_SELECT_REGISTER;
263 
264 	ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
265 
266 	if (ret)
267 		rmi_spi->page = page;
268 
269 	return ret;
270 }
271 
272 static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
273 			       const void *buf, size_t len)
274 {
275 	struct rmi_spi_xport *rmi_spi =
276 		container_of(xport, struct rmi_spi_xport, xport);
277 	struct rmi_spi_cmd cmd;
278 	int ret;
279 
280 	mutex_lock(&rmi_spi->page_mutex);
281 
282 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
283 		ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
284 		if (ret)
285 			goto exit;
286 	}
287 
288 	cmd.op = RMI_SPI_WRITE;
289 	cmd.addr = addr;
290 
291 	ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
292 
293 exit:
294 	mutex_unlock(&rmi_spi->page_mutex);
295 	return ret;
296 }
297 
298 static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
299 			      void *buf, size_t len)
300 {
301 	struct rmi_spi_xport *rmi_spi =
302 		container_of(xport, struct rmi_spi_xport, xport);
303 	struct rmi_spi_cmd cmd;
304 	int ret;
305 
306 	mutex_lock(&rmi_spi->page_mutex);
307 
308 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
309 		ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
310 		if (ret)
311 			goto exit;
312 	}
313 
314 	cmd.op = RMI_SPI_READ;
315 	cmd.addr = addr;
316 
317 	ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
318 
319 exit:
320 	mutex_unlock(&rmi_spi->page_mutex);
321 	return ret;
322 }
323 
324 static const struct rmi_transport_ops rmi_spi_ops = {
325 	.write_block	= rmi_spi_write_block,
326 	.read_block	= rmi_spi_read_block,
327 };
328 
329 #ifdef CONFIG_OF
330 static int rmi_spi_of_probe(struct spi_device *spi,
331 			struct rmi_device_platform_data *pdata)
332 {
333 	struct device *dev = &spi->dev;
334 	int retval;
335 
336 	retval = rmi_of_property_read_u32(dev,
337 			&pdata->spi_data.read_delay_us,
338 			"spi-rx-delay-us", 1);
339 	if (retval)
340 		return retval;
341 
342 	retval = rmi_of_property_read_u32(dev,
343 			&pdata->spi_data.write_delay_us,
344 			"spi-tx-delay-us", 1);
345 	if (retval)
346 		return retval;
347 
348 	return 0;
349 }
350 
351 static const struct of_device_id rmi_spi_of_match[] = {
352 	{ .compatible = "syna,rmi4-spi" },
353 	{},
354 };
355 MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
356 #else
357 static inline int rmi_spi_of_probe(struct spi_device *spi,
358 				struct rmi_device_platform_data *pdata)
359 {
360 	return -ENODEV;
361 }
362 #endif
363 
364 static void rmi_spi_unregister_transport(void *data)
365 {
366 	struct rmi_spi_xport *rmi_spi = data;
367 
368 	rmi_unregister_transport_device(&rmi_spi->xport);
369 }
370 
371 static int rmi_spi_probe(struct spi_device *spi)
372 {
373 	struct rmi_spi_xport *rmi_spi;
374 	struct rmi_device_platform_data *pdata;
375 	struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
376 	int error;
377 
378 	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
379 		return -EINVAL;
380 
381 	rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
382 			GFP_KERNEL);
383 	if (!rmi_spi)
384 		return -ENOMEM;
385 
386 	pdata = &rmi_spi->xport.pdata;
387 
388 	if (spi->dev.of_node) {
389 		error = rmi_spi_of_probe(spi, pdata);
390 		if (error)
391 			return error;
392 	} else if (spi_pdata) {
393 		*pdata = *spi_pdata;
394 	}
395 
396 	if (pdata->spi_data.bits_per_word)
397 		spi->bits_per_word = pdata->spi_data.bits_per_word;
398 
399 	if (pdata->spi_data.mode)
400 		spi->mode = pdata->spi_data.mode;
401 
402 	error = spi_setup(spi);
403 	if (error < 0) {
404 		dev_err(&spi->dev, "spi_setup failed!\n");
405 		return error;
406 	}
407 
408 	pdata->irq = spi->irq;
409 
410 	rmi_spi->spi = spi;
411 	mutex_init(&rmi_spi->page_mutex);
412 
413 	rmi_spi->xport.dev = &spi->dev;
414 	rmi_spi->xport.proto_name = "spi";
415 	rmi_spi->xport.ops = &rmi_spi_ops;
416 
417 	spi_set_drvdata(spi, rmi_spi);
418 
419 	error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
420 	if (error)
421 		return error;
422 
423 	/*
424 	 * Setting the page to zero will (a) make sure the PSR is in a
425 	 * known state, and (b) make sure we can talk to the device.
426 	 */
427 	error = rmi_set_page(rmi_spi, 0);
428 	if (error) {
429 		dev_err(&spi->dev, "Failed to set page select to 0.\n");
430 		return error;
431 	}
432 
433 	dev_info(&spi->dev, "registering SPI-connected sensor\n");
434 
435 	error = rmi_register_transport_device(&rmi_spi->xport);
436 	if (error) {
437 		dev_err(&spi->dev, "failed to register sensor: %d\n", error);
438 		return error;
439 	}
440 
441 	error = devm_add_action_or_reset(&spi->dev,
442 					  rmi_spi_unregister_transport,
443 					  rmi_spi);
444 	if (error)
445 		return error;
446 
447 	return 0;
448 }
449 
450 #ifdef CONFIG_PM_SLEEP
451 static int rmi_spi_suspend(struct device *dev)
452 {
453 	struct spi_device *spi = to_spi_device(dev);
454 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
455 	int ret;
456 
457 	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
458 	if (ret)
459 		dev_warn(dev, "Failed to resume device: %d\n", ret);
460 
461 	return ret;
462 }
463 
464 static int rmi_spi_resume(struct device *dev)
465 {
466 	struct spi_device *spi = to_spi_device(dev);
467 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
468 	int ret;
469 
470 	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
471 	if (ret)
472 		dev_warn(dev, "Failed to resume device: %d\n", ret);
473 
474 	return ret;
475 }
476 #endif
477 
478 #ifdef CONFIG_PM
479 static int rmi_spi_runtime_suspend(struct device *dev)
480 {
481 	struct spi_device *spi = to_spi_device(dev);
482 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
483 	int ret;
484 
485 	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
486 	if (ret)
487 		dev_warn(dev, "Failed to resume device: %d\n", ret);
488 
489 	return 0;
490 }
491 
492 static int rmi_spi_runtime_resume(struct device *dev)
493 {
494 	struct spi_device *spi = to_spi_device(dev);
495 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
496 	int ret;
497 
498 	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
499 	if (ret)
500 		dev_warn(dev, "Failed to resume device: %d\n", ret);
501 
502 	return 0;
503 }
504 #endif
505 
506 static const struct dev_pm_ops rmi_spi_pm = {
507 	SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
508 	SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
509 			   NULL)
510 };
511 
512 static const struct spi_device_id rmi_id[] = {
513 	{ "rmi4_spi", 0 },
514 	{ }
515 };
516 MODULE_DEVICE_TABLE(spi, rmi_id);
517 
518 static struct spi_driver rmi_spi_driver = {
519 	.driver = {
520 		.name	= "rmi4_spi",
521 		.pm	= &rmi_spi_pm,
522 		.of_match_table = of_match_ptr(rmi_spi_of_match),
523 	},
524 	.id_table	= rmi_id,
525 	.probe		= rmi_spi_probe,
526 };
527 
528 module_spi_driver(rmi_spi_driver);
529 
530 MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
531 MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
532 MODULE_DESCRIPTION("RMI SPI driver");
533 MODULE_LICENSE("GPL");
534