xref: /openbmc/linux/drivers/spi/spi-mem.c (revision 4c5e2bba)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 Exceet Electronics GmbH
4  * Copyright (C) 2018 Bootlin
5  *
6  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7  */
8 #include <linux/dmaengine.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/spi/spi.h>
11 #include <linux/spi/spi-mem.h>
12 
13 #include "internals.h"
14 
15 #define SPI_MEM_MAX_BUSWIDTH		8
16 
17 /**
18  * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
19  *					  memory operation
20  * @ctlr: the SPI controller requesting this dma_map()
21  * @op: the memory operation containing the buffer to map
22  * @sgt: a pointer to a non-initialized sg_table that will be filled by this
23  *	 function
24  *
25  * Some controllers might want to do DMA on the data buffer embedded in @op.
26  * This helper prepares everything for you and provides a ready-to-use
27  * sg_table. This function is not intended to be called from spi drivers.
28  * Only SPI controller drivers should use it.
29  * Note that the caller must ensure the memory region pointed by
30  * op->data.buf.{in,out} is DMA-able before calling this function.
31  *
32  * Return: 0 in case of success, a negative error code otherwise.
33  */
34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
35 				       const struct spi_mem_op *op,
36 				       struct sg_table *sgt)
37 {
38 	struct device *dmadev;
39 
40 	if (!op->data.nbytes)
41 		return -EINVAL;
42 
43 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
44 		dmadev = ctlr->dma_tx->device->dev;
45 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
46 		dmadev = ctlr->dma_rx->device->dev;
47 	else
48 		dmadev = ctlr->dev.parent;
49 
50 	if (!dmadev)
51 		return -EINVAL;
52 
53 	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
54 			   op->data.dir == SPI_MEM_DATA_IN ?
55 			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
56 }
57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
58 
59 /**
60  * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
61  *					    memory operation
62  * @ctlr: the SPI controller requesting this dma_unmap()
63  * @op: the memory operation containing the buffer to unmap
64  * @sgt: a pointer to an sg_table previously initialized by
65  *	 spi_controller_dma_map_mem_op_data()
66  *
67  * Some controllers might want to do DMA on the data buffer embedded in @op.
68  * This helper prepares things so that the CPU can access the
69  * op->data.buf.{in,out} buffer again.
70  *
71  * This function is not intended to be called from SPI drivers. Only SPI
72  * controller drivers should use it.
73  *
74  * This function should be called after the DMA operation has finished and is
75  * only valid if the previous spi_controller_dma_map_mem_op_data() call
76  * returned 0.
77  *
78  * Return: 0 in case of success, a negative error code otherwise.
79  */
80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
81 					  const struct spi_mem_op *op,
82 					  struct sg_table *sgt)
83 {
84 	struct device *dmadev;
85 
86 	if (!op->data.nbytes)
87 		return;
88 
89 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
90 		dmadev = ctlr->dma_tx->device->dev;
91 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
92 		dmadev = ctlr->dma_rx->device->dev;
93 	else
94 		dmadev = ctlr->dev.parent;
95 
96 	spi_unmap_buf(ctlr, dmadev, sgt,
97 		      op->data.dir == SPI_MEM_DATA_IN ?
98 		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
99 }
100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
101 
102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
103 {
104 	u32 mode = mem->spi->mode;
105 
106 	switch (buswidth) {
107 	case 1:
108 		return 0;
109 
110 	case 2:
111 		if ((tx &&
112 		     (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
113 		    (!tx &&
114 		     (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
115 			return 0;
116 
117 		break;
118 
119 	case 4:
120 		if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
121 		    (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
122 			return 0;
123 
124 		break;
125 
126 	case 8:
127 		if ((tx && (mode & SPI_TX_OCTAL)) ||
128 		    (!tx && (mode & SPI_RX_OCTAL)))
129 			return 0;
130 
131 		break;
132 
133 	default:
134 		break;
135 	}
136 
137 	return -ENOTSUPP;
138 }
139 
140 bool spi_mem_default_supports_op(struct spi_mem *mem,
141 				 const struct spi_mem_op *op)
142 {
143 	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
144 		return false;
145 
146 	if (op->addr.nbytes &&
147 	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
148 		return false;
149 
150 	if (op->dummy.nbytes &&
151 	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
152 		return false;
153 
154 	if (op->data.dir != SPI_MEM_NO_DATA &&
155 	    spi_check_buswidth_req(mem, op->data.buswidth,
156 				   op->data.dir == SPI_MEM_DATA_OUT))
157 		return false;
158 
159 	if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
160 		return false;
161 
162 	return true;
163 }
164 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
165 
166 static bool spi_mem_buswidth_is_valid(u8 buswidth)
167 {
168 	if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
169 		return false;
170 
171 	return true;
172 }
173 
174 static int spi_mem_check_op(const struct spi_mem_op *op)
175 {
176 	if (!op->cmd.buswidth)
177 		return -EINVAL;
178 
179 	if ((op->addr.nbytes && !op->addr.buswidth) ||
180 	    (op->dummy.nbytes && !op->dummy.buswidth) ||
181 	    (op->data.nbytes && !op->data.buswidth))
182 		return -EINVAL;
183 
184 	if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
185 	    !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
186 	    !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
187 	    !spi_mem_buswidth_is_valid(op->data.buswidth))
188 		return -EINVAL;
189 
190 	return 0;
191 }
192 
193 static bool spi_mem_internal_supports_op(struct spi_mem *mem,
194 					 const struct spi_mem_op *op)
195 {
196 	struct spi_controller *ctlr = mem->spi->controller;
197 
198 	if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
199 		return ctlr->mem_ops->supports_op(mem, op);
200 
201 	return spi_mem_default_supports_op(mem, op);
202 }
203 
204 /**
205  * spi_mem_supports_op() - Check if a memory device and the controller it is
206  *			   connected to support a specific memory operation
207  * @mem: the SPI memory
208  * @op: the memory operation to check
209  *
210  * Some controllers are only supporting Single or Dual IOs, others might only
211  * support specific opcodes, or it can even be that the controller and device
212  * both support Quad IOs but the hardware prevents you from using it because
213  * only 2 IO lines are connected.
214  *
215  * This function checks whether a specific operation is supported.
216  *
217  * Return: true if @op is supported, false otherwise.
218  */
219 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
220 {
221 	if (spi_mem_check_op(op))
222 		return false;
223 
224 	return spi_mem_internal_supports_op(mem, op);
225 }
226 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
227 
228 static int spi_mem_access_start(struct spi_mem *mem)
229 {
230 	struct spi_controller *ctlr = mem->spi->controller;
231 
232 	/*
233 	 * Flush the message queue before executing our SPI memory
234 	 * operation to prevent preemption of regular SPI transfers.
235 	 */
236 	spi_flush_queue(ctlr);
237 
238 	if (ctlr->auto_runtime_pm) {
239 		int ret;
240 
241 		ret = pm_runtime_get_sync(ctlr->dev.parent);
242 		if (ret < 0) {
243 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
244 				ret);
245 			return ret;
246 		}
247 	}
248 
249 	mutex_lock(&ctlr->bus_lock_mutex);
250 	mutex_lock(&ctlr->io_mutex);
251 
252 	return 0;
253 }
254 
255 static void spi_mem_access_end(struct spi_mem *mem)
256 {
257 	struct spi_controller *ctlr = mem->spi->controller;
258 
259 	mutex_unlock(&ctlr->io_mutex);
260 	mutex_unlock(&ctlr->bus_lock_mutex);
261 
262 	if (ctlr->auto_runtime_pm)
263 		pm_runtime_put(ctlr->dev.parent);
264 }
265 
266 /**
267  * spi_mem_exec_op() - Execute a memory operation
268  * @mem: the SPI memory
269  * @op: the memory operation to execute
270  *
271  * Executes a memory operation.
272  *
273  * This function first checks that @op is supported and then tries to execute
274  * it.
275  *
276  * Return: 0 in case of success, a negative error code otherwise.
277  */
278 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
279 {
280 	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
281 	struct spi_controller *ctlr = mem->spi->controller;
282 	struct spi_transfer xfers[4] = { };
283 	struct spi_message msg;
284 	u8 *tmpbuf;
285 	int ret;
286 
287 	ret = spi_mem_check_op(op);
288 	if (ret)
289 		return ret;
290 
291 	if (!spi_mem_internal_supports_op(mem, op))
292 		return -ENOTSUPP;
293 
294 	if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
295 		ret = spi_mem_access_start(mem);
296 		if (ret)
297 			return ret;
298 
299 		ret = ctlr->mem_ops->exec_op(mem, op);
300 
301 		spi_mem_access_end(mem);
302 
303 		/*
304 		 * Some controllers only optimize specific paths (typically the
305 		 * read path) and expect the core to use the regular SPI
306 		 * interface in other cases.
307 		 */
308 		if (!ret || ret != -ENOTSUPP)
309 			return ret;
310 	}
311 
312 	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
313 		     op->dummy.nbytes;
314 
315 	/*
316 	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
317 	 * we're guaranteed that this buffer is DMA-able, as required by the
318 	 * SPI layer.
319 	 */
320 	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
321 	if (!tmpbuf)
322 		return -ENOMEM;
323 
324 	spi_message_init(&msg);
325 
326 	tmpbuf[0] = op->cmd.opcode;
327 	xfers[xferpos].tx_buf = tmpbuf;
328 	xfers[xferpos].len = sizeof(op->cmd.opcode);
329 	xfers[xferpos].tx_nbits = op->cmd.buswidth;
330 	spi_message_add_tail(&xfers[xferpos], &msg);
331 	xferpos++;
332 	totalxferlen++;
333 
334 	if (op->addr.nbytes) {
335 		int i;
336 
337 		for (i = 0; i < op->addr.nbytes; i++)
338 			tmpbuf[i + 1] = op->addr.val >>
339 					(8 * (op->addr.nbytes - i - 1));
340 
341 		xfers[xferpos].tx_buf = tmpbuf + 1;
342 		xfers[xferpos].len = op->addr.nbytes;
343 		xfers[xferpos].tx_nbits = op->addr.buswidth;
344 		spi_message_add_tail(&xfers[xferpos], &msg);
345 		xferpos++;
346 		totalxferlen += op->addr.nbytes;
347 	}
348 
349 	if (op->dummy.nbytes) {
350 		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
351 		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
352 		xfers[xferpos].len = op->dummy.nbytes;
353 		xfers[xferpos].tx_nbits = op->dummy.buswidth;
354 		spi_message_add_tail(&xfers[xferpos], &msg);
355 		xferpos++;
356 		totalxferlen += op->dummy.nbytes;
357 	}
358 
359 	if (op->data.nbytes) {
360 		if (op->data.dir == SPI_MEM_DATA_IN) {
361 			xfers[xferpos].rx_buf = op->data.buf.in;
362 			xfers[xferpos].rx_nbits = op->data.buswidth;
363 		} else {
364 			xfers[xferpos].tx_buf = op->data.buf.out;
365 			xfers[xferpos].tx_nbits = op->data.buswidth;
366 		}
367 
368 		xfers[xferpos].len = op->data.nbytes;
369 		spi_message_add_tail(&xfers[xferpos], &msg);
370 		xferpos++;
371 		totalxferlen += op->data.nbytes;
372 	}
373 
374 	ret = spi_sync(mem->spi, &msg);
375 
376 	kfree(tmpbuf);
377 
378 	if (ret)
379 		return ret;
380 
381 	if (msg.actual_length != totalxferlen)
382 		return -EIO;
383 
384 	return 0;
385 }
386 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
387 
388 /**
389  * spi_mem_get_name() - Return the SPI mem device name to be used by the
390  *			upper layer if necessary
391  * @mem: the SPI memory
392  *
393  * This function allows SPI mem users to retrieve the SPI mem device name.
394  * It is useful if the upper layer needs to expose a custom name for
395  * compatibility reasons.
396  *
397  * Return: a string containing the name of the memory device to be used
398  *	   by the SPI mem user
399  */
400 const char *spi_mem_get_name(struct spi_mem *mem)
401 {
402 	return mem->name;
403 }
404 EXPORT_SYMBOL_GPL(spi_mem_get_name);
405 
406 /**
407  * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
408  *			      match controller limitations
409  * @mem: the SPI memory
410  * @op: the operation to adjust
411  *
412  * Some controllers have FIFO limitations and must split a data transfer
413  * operation into multiple ones, others require a specific alignment for
414  * optimized accesses. This function allows SPI mem drivers to split a single
415  * operation into multiple sub-operations when required.
416  *
417  * Return: a negative error code if the controller can't properly adjust @op,
418  *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
419  *	   can't be handled in a single step.
420  */
421 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
422 {
423 	struct spi_controller *ctlr = mem->spi->controller;
424 	size_t len;
425 
426 	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
427 		return ctlr->mem_ops->adjust_op_size(mem, op);
428 
429 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
430 		len = sizeof(op->cmd.opcode) + op->addr.nbytes +
431 		      op->dummy.nbytes;
432 
433 		if (len > spi_max_transfer_size(mem->spi))
434 			return -EINVAL;
435 
436 		op->data.nbytes = min3((size_t)op->data.nbytes,
437 				       spi_max_transfer_size(mem->spi),
438 				       spi_max_message_size(mem->spi) -
439 				       len);
440 		if (!op->data.nbytes)
441 			return -EINVAL;
442 	}
443 
444 	return 0;
445 }
446 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
447 
448 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
449 				      u64 offs, size_t len, void *buf)
450 {
451 	struct spi_mem_op op = desc->info.op_tmpl;
452 	int ret;
453 
454 	op.addr.val = desc->info.offset + offs;
455 	op.data.buf.in = buf;
456 	op.data.nbytes = len;
457 	ret = spi_mem_adjust_op_size(desc->mem, &op);
458 	if (ret)
459 		return ret;
460 
461 	ret = spi_mem_exec_op(desc->mem, &op);
462 	if (ret)
463 		return ret;
464 
465 	return op.data.nbytes;
466 }
467 
468 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
469 				       u64 offs, size_t len, const void *buf)
470 {
471 	struct spi_mem_op op = desc->info.op_tmpl;
472 	int ret;
473 
474 	op.addr.val = desc->info.offset + offs;
475 	op.data.buf.out = buf;
476 	op.data.nbytes = len;
477 	ret = spi_mem_adjust_op_size(desc->mem, &op);
478 	if (ret)
479 		return ret;
480 
481 	ret = spi_mem_exec_op(desc->mem, &op);
482 	if (ret)
483 		return ret;
484 
485 	return op.data.nbytes;
486 }
487 
488 /**
489  * spi_mem_dirmap_create() - Create a direct mapping descriptor
490  * @mem: SPI mem device this direct mapping should be created for
491  * @info: direct mapping information
492  *
493  * This function is creating a direct mapping descriptor which can then be used
494  * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
495  * If the SPI controller driver does not support direct mapping, this function
496  * falls back to an implementation using spi_mem_exec_op(), so that the caller
497  * doesn't have to bother implementing a fallback on his own.
498  *
499  * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
500  */
501 struct spi_mem_dirmap_desc *
502 spi_mem_dirmap_create(struct spi_mem *mem,
503 		      const struct spi_mem_dirmap_info *info)
504 {
505 	struct spi_controller *ctlr = mem->spi->controller;
506 	struct spi_mem_dirmap_desc *desc;
507 	int ret = -ENOTSUPP;
508 
509 	/* Make sure the number of address cycles is between 1 and 8 bytes. */
510 	if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
511 		return ERR_PTR(-EINVAL);
512 
513 	/* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
514 	if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
515 		return ERR_PTR(-EINVAL);
516 
517 	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
518 	if (!desc)
519 		return ERR_PTR(-ENOMEM);
520 
521 	desc->mem = mem;
522 	desc->info = *info;
523 	if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
524 		ret = ctlr->mem_ops->dirmap_create(desc);
525 
526 	if (ret) {
527 		desc->nodirmap = true;
528 		if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
529 			ret = -ENOTSUPP;
530 		else
531 			ret = 0;
532 	}
533 
534 	if (ret) {
535 		kfree(desc);
536 		return ERR_PTR(ret);
537 	}
538 
539 	return desc;
540 }
541 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
542 
543 /**
544  * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
545  * @desc: the direct mapping descriptor to destroy
546  *
547  * This function destroys a direct mapping descriptor previously created by
548  * spi_mem_dirmap_create().
549  */
550 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
551 {
552 	struct spi_controller *ctlr = desc->mem->spi->controller;
553 
554 	if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
555 		ctlr->mem_ops->dirmap_destroy(desc);
556 
557 	kfree(desc);
558 }
559 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
560 
561 static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
562 {
563 	struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
564 
565 	spi_mem_dirmap_destroy(desc);
566 }
567 
568 /**
569  * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
570  *				  it to a device
571  * @dev: device the dirmap desc will be attached to
572  * @mem: SPI mem device this direct mapping should be created for
573  * @info: direct mapping information
574  *
575  * devm_ variant of the spi_mem_dirmap_create() function. See
576  * spi_mem_dirmap_create() for more details.
577  *
578  * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
579  */
580 struct spi_mem_dirmap_desc *
581 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
582 			   const struct spi_mem_dirmap_info *info)
583 {
584 	struct spi_mem_dirmap_desc **ptr, *desc;
585 
586 	ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
587 			   GFP_KERNEL);
588 	if (!ptr)
589 		return ERR_PTR(-ENOMEM);
590 
591 	desc = spi_mem_dirmap_create(mem, info);
592 	if (IS_ERR(desc)) {
593 		devres_free(ptr);
594 	} else {
595 		*ptr = desc;
596 		devres_add(dev, ptr);
597 	}
598 
599 	return desc;
600 }
601 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
602 
603 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
604 {
605         struct spi_mem_dirmap_desc **ptr = res;
606 
607         if (WARN_ON(!ptr || !*ptr))
608                 return 0;
609 
610 	return *ptr == data;
611 }
612 
613 /**
614  * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
615  *				   to a device
616  * @dev: device the dirmap desc is attached to
617  * @desc: the direct mapping descriptor to destroy
618  *
619  * devm_ variant of the spi_mem_dirmap_destroy() function. See
620  * spi_mem_dirmap_destroy() for more details.
621  */
622 void devm_spi_mem_dirmap_destroy(struct device *dev,
623 				 struct spi_mem_dirmap_desc *desc)
624 {
625 	devres_release(dev, devm_spi_mem_dirmap_release,
626 		       devm_spi_mem_dirmap_match, desc);
627 }
628 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
629 
630 /**
631  * spi_mem_dirmap_read() - Read data through a direct mapping
632  * @desc: direct mapping descriptor
633  * @offs: offset to start reading from. Note that this is not an absolute
634  *	  offset, but the offset within the direct mapping which already has
635  *	  its own offset
636  * @len: length in bytes
637  * @buf: destination buffer. This buffer must be DMA-able
638  *
639  * This function reads data from a memory device using a direct mapping
640  * previously instantiated with spi_mem_dirmap_create().
641  *
642  * Return: the amount of data read from the memory device or a negative error
643  * code. Note that the returned size might be smaller than @len, and the caller
644  * is responsible for calling spi_mem_dirmap_read() again when that happens.
645  */
646 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
647 			    u64 offs, size_t len, void *buf)
648 {
649 	struct spi_controller *ctlr = desc->mem->spi->controller;
650 	ssize_t ret;
651 
652 	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
653 		return -EINVAL;
654 
655 	if (!len)
656 		return 0;
657 
658 	if (desc->nodirmap) {
659 		ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
660 	} else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
661 		ret = spi_mem_access_start(desc->mem);
662 		if (ret)
663 			return ret;
664 
665 		ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
666 
667 		spi_mem_access_end(desc->mem);
668 	} else {
669 		ret = -ENOTSUPP;
670 	}
671 
672 	return ret;
673 }
674 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
675 
676 /**
677  * spi_mem_dirmap_write() - Write data through a direct mapping
678  * @desc: direct mapping descriptor
679  * @offs: offset to start writing from. Note that this is not an absolute
680  *	  offset, but the offset within the direct mapping which already has
681  *	  its own offset
682  * @len: length in bytes
683  * @buf: source buffer. This buffer must be DMA-able
684  *
685  * This function writes data to a memory device using a direct mapping
686  * previously instantiated with spi_mem_dirmap_create().
687  *
688  * Return: the amount of data written to the memory device or a negative error
689  * code. Note that the returned size might be smaller than @len, and the caller
690  * is responsible for calling spi_mem_dirmap_write() again when that happens.
691  */
692 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
693 			     u64 offs, size_t len, const void *buf)
694 {
695 	struct spi_controller *ctlr = desc->mem->spi->controller;
696 	ssize_t ret;
697 
698 	if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
699 		return -EINVAL;
700 
701 	if (!len)
702 		return 0;
703 
704 	if (desc->nodirmap) {
705 		ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
706 	} else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
707 		ret = spi_mem_access_start(desc->mem);
708 		if (ret)
709 			return ret;
710 
711 		ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
712 
713 		spi_mem_access_end(desc->mem);
714 	} else {
715 		ret = -ENOTSUPP;
716 	}
717 
718 	return ret;
719 }
720 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
721 
722 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
723 {
724 	return container_of(drv, struct spi_mem_driver, spidrv.driver);
725 }
726 
727 static int spi_mem_probe(struct spi_device *spi)
728 {
729 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
730 	struct spi_controller *ctlr = spi->controller;
731 	struct spi_mem *mem;
732 
733 	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
734 	if (!mem)
735 		return -ENOMEM;
736 
737 	mem->spi = spi;
738 
739 	if (ctlr->mem_ops && ctlr->mem_ops->get_name)
740 		mem->name = ctlr->mem_ops->get_name(mem);
741 	else
742 		mem->name = dev_name(&spi->dev);
743 
744 	if (IS_ERR_OR_NULL(mem->name))
745 		return PTR_ERR(mem->name);
746 
747 	spi_set_drvdata(spi, mem);
748 
749 	return memdrv->probe(mem);
750 }
751 
752 static int spi_mem_remove(struct spi_device *spi)
753 {
754 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
755 	struct spi_mem *mem = spi_get_drvdata(spi);
756 
757 	if (memdrv->remove)
758 		return memdrv->remove(mem);
759 
760 	return 0;
761 }
762 
763 static void spi_mem_shutdown(struct spi_device *spi)
764 {
765 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
766 	struct spi_mem *mem = spi_get_drvdata(spi);
767 
768 	if (memdrv->shutdown)
769 		memdrv->shutdown(mem);
770 }
771 
772 /**
773  * spi_mem_driver_register_with_owner() - Register a SPI memory driver
774  * @memdrv: the SPI memory driver to register
775  * @owner: the owner of this driver
776  *
777  * Registers a SPI memory driver.
778  *
779  * Return: 0 in case of success, a negative error core otherwise.
780  */
781 
782 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
783 				       struct module *owner)
784 {
785 	memdrv->spidrv.probe = spi_mem_probe;
786 	memdrv->spidrv.remove = spi_mem_remove;
787 	memdrv->spidrv.shutdown = spi_mem_shutdown;
788 
789 	return __spi_register_driver(owner, &memdrv->spidrv);
790 }
791 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
792 
793 /**
794  * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
795  * @memdrv: the SPI memory driver to unregister
796  *
797  * Unregisters a SPI memory driver.
798  */
799 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
800 {
801 	spi_unregister_driver(&memdrv->spidrv);
802 }
803 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
804