1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Exceet Electronics GmbH 4 * Copyright (C) 2018 Bootlin 5 * 6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 7 */ 8 #include <linux/dmaengine.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/spi/spi.h> 11 #include <linux/spi/spi-mem.h> 12 13 #include "internals.h" 14 15 #define SPI_MEM_MAX_BUSWIDTH 4 16 17 /** 18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 19 * memory operation 20 * @ctlr: the SPI controller requesting this dma_map() 21 * @op: the memory operation containing the buffer to map 22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 23 * function 24 * 25 * Some controllers might want to do DMA on the data buffer embedded in @op. 26 * This helper prepares everything for you and provides a ready-to-use 27 * sg_table. This function is not intended to be called from spi drivers. 28 * Only SPI controller drivers should use it. 29 * Note that the caller must ensure the memory region pointed by 30 * op->data.buf.{in,out} is DMA-able before calling this function. 31 * 32 * Return: 0 in case of success, a negative error code otherwise. 33 */ 34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, 35 const struct spi_mem_op *op, 36 struct sg_table *sgt) 37 { 38 struct device *dmadev; 39 40 if (!op->data.nbytes) 41 return -EINVAL; 42 43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 44 dmadev = ctlr->dma_tx->device->dev; 45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 46 dmadev = ctlr->dma_rx->device->dev; 47 else 48 dmadev = ctlr->dev.parent; 49 50 if (!dmadev) 51 return -EINVAL; 52 53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, 54 op->data.dir == SPI_MEM_DATA_IN ? 55 DMA_FROM_DEVICE : DMA_TO_DEVICE); 56 } 57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); 58 59 /** 60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a 61 * memory operation 62 * @ctlr: the SPI controller requesting this dma_unmap() 63 * @op: the memory operation containing the buffer to unmap 64 * @sgt: a pointer to an sg_table previously initialized by 65 * spi_controller_dma_map_mem_op_data() 66 * 67 * Some controllers might want to do DMA on the data buffer embedded in @op. 68 * This helper prepares things so that the CPU can access the 69 * op->data.buf.{in,out} buffer again. 70 * 71 * This function is not intended to be called from SPI drivers. Only SPI 72 * controller drivers should use it. 73 * 74 * This function should be called after the DMA operation has finished and is 75 * only valid if the previous spi_controller_dma_map_mem_op_data() call 76 * returned 0. 77 * 78 * Return: 0 in case of success, a negative error code otherwise. 79 */ 80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, 81 const struct spi_mem_op *op, 82 struct sg_table *sgt) 83 { 84 struct device *dmadev; 85 86 if (!op->data.nbytes) 87 return; 88 89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 90 dmadev = ctlr->dma_tx->device->dev; 91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 92 dmadev = ctlr->dma_rx->device->dev; 93 else 94 dmadev = ctlr->dev.parent; 95 96 spi_unmap_buf(ctlr, dmadev, sgt, 97 op->data.dir == SPI_MEM_DATA_IN ? 98 DMA_FROM_DEVICE : DMA_TO_DEVICE); 99 } 100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); 101 102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 103 { 104 u32 mode = mem->spi->mode; 105 106 switch (buswidth) { 107 case 1: 108 return 0; 109 110 case 2: 111 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || 112 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) 113 return 0; 114 115 break; 116 117 case 4: 118 if ((tx && (mode & SPI_TX_QUAD)) || 119 (!tx && (mode & SPI_RX_QUAD))) 120 return 0; 121 122 break; 123 124 default: 125 break; 126 } 127 128 return -ENOTSUPP; 129 } 130 131 static bool spi_mem_default_supports_op(struct spi_mem *mem, 132 const struct spi_mem_op *op) 133 { 134 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 135 return false; 136 137 if (op->addr.nbytes && 138 spi_check_buswidth_req(mem, op->addr.buswidth, true)) 139 return false; 140 141 if (op->dummy.nbytes && 142 spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 143 return false; 144 145 if (op->data.nbytes && 146 spi_check_buswidth_req(mem, op->data.buswidth, 147 op->data.dir == SPI_MEM_DATA_OUT)) 148 return false; 149 150 return true; 151 } 152 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); 153 154 static bool spi_mem_buswidth_is_valid(u8 buswidth) 155 { 156 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH) 157 return false; 158 159 return true; 160 } 161 162 static int spi_mem_check_op(const struct spi_mem_op *op) 163 { 164 if (!op->cmd.buswidth) 165 return -EINVAL; 166 167 if ((op->addr.nbytes && !op->addr.buswidth) || 168 (op->dummy.nbytes && !op->dummy.buswidth) || 169 (op->data.nbytes && !op->data.buswidth)) 170 return -EINVAL; 171 172 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 173 !spi_mem_buswidth_is_valid(op->addr.buswidth) || 174 !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 175 !spi_mem_buswidth_is_valid(op->data.buswidth)) 176 return -EINVAL; 177 178 return 0; 179 } 180 181 static bool spi_mem_internal_supports_op(struct spi_mem *mem, 182 const struct spi_mem_op *op) 183 { 184 struct spi_controller *ctlr = mem->spi->controller; 185 186 if (ctlr->mem_ops && ctlr->mem_ops->supports_op) 187 return ctlr->mem_ops->supports_op(mem, op); 188 189 return spi_mem_default_supports_op(mem, op); 190 } 191 192 /** 193 * spi_mem_supports_op() - Check if a memory device and the controller it is 194 * connected to support a specific memory operation 195 * @mem: the SPI memory 196 * @op: the memory operation to check 197 * 198 * Some controllers are only supporting Single or Dual IOs, others might only 199 * support specific opcodes, or it can even be that the controller and device 200 * both support Quad IOs but the hardware prevents you from using it because 201 * only 2 IO lines are connected. 202 * 203 * This function checks whether a specific operation is supported. 204 * 205 * Return: true if @op is supported, false otherwise. 206 */ 207 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 208 { 209 if (spi_mem_check_op(op)) 210 return false; 211 212 return spi_mem_internal_supports_op(mem, op); 213 } 214 EXPORT_SYMBOL_GPL(spi_mem_supports_op); 215 216 /** 217 * spi_mem_exec_op() - Execute a memory operation 218 * @mem: the SPI memory 219 * @op: the memory operation to execute 220 * 221 * Executes a memory operation. 222 * 223 * This function first checks that @op is supported and then tries to execute 224 * it. 225 * 226 * Return: 0 in case of success, a negative error code otherwise. 227 */ 228 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 229 { 230 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0; 231 struct spi_controller *ctlr = mem->spi->controller; 232 struct spi_transfer xfers[4] = { }; 233 struct spi_message msg; 234 u8 *tmpbuf; 235 int ret; 236 237 ret = spi_mem_check_op(op); 238 if (ret) 239 return ret; 240 241 if (!spi_mem_internal_supports_op(mem, op)) 242 return -ENOTSUPP; 243 244 if (ctlr->mem_ops) { 245 /* 246 * Flush the message queue before executing our SPI memory 247 * operation to prevent preemption of regular SPI transfers. 248 */ 249 spi_flush_queue(ctlr); 250 251 if (ctlr->auto_runtime_pm) { 252 ret = pm_runtime_get_sync(ctlr->dev.parent); 253 if (ret < 0) { 254 dev_err(&ctlr->dev, 255 "Failed to power device: %d\n", 256 ret); 257 return ret; 258 } 259 } 260 261 mutex_lock(&ctlr->bus_lock_mutex); 262 mutex_lock(&ctlr->io_mutex); 263 ret = ctlr->mem_ops->exec_op(mem, op); 264 mutex_unlock(&ctlr->io_mutex); 265 mutex_unlock(&ctlr->bus_lock_mutex); 266 267 if (ctlr->auto_runtime_pm) 268 pm_runtime_put(ctlr->dev.parent); 269 270 /* 271 * Some controllers only optimize specific paths (typically the 272 * read path) and expect the core to use the regular SPI 273 * interface in other cases. 274 */ 275 if (!ret || ret != -ENOTSUPP) 276 return ret; 277 } 278 279 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + 280 op->dummy.nbytes; 281 282 /* 283 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so 284 * we're guaranteed that this buffer is DMA-able, as required by the 285 * SPI layer. 286 */ 287 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); 288 if (!tmpbuf) 289 return -ENOMEM; 290 291 spi_message_init(&msg); 292 293 tmpbuf[0] = op->cmd.opcode; 294 xfers[xferpos].tx_buf = tmpbuf; 295 xfers[xferpos].len = sizeof(op->cmd.opcode); 296 xfers[xferpos].tx_nbits = op->cmd.buswidth; 297 spi_message_add_tail(&xfers[xferpos], &msg); 298 xferpos++; 299 totalxferlen++; 300 301 if (op->addr.nbytes) { 302 int i; 303 304 for (i = 0; i < op->addr.nbytes; i++) 305 tmpbuf[i + 1] = op->addr.val >> 306 (8 * (op->addr.nbytes - i - 1)); 307 308 xfers[xferpos].tx_buf = tmpbuf + 1; 309 xfers[xferpos].len = op->addr.nbytes; 310 xfers[xferpos].tx_nbits = op->addr.buswidth; 311 spi_message_add_tail(&xfers[xferpos], &msg); 312 xferpos++; 313 totalxferlen += op->addr.nbytes; 314 } 315 316 if (op->dummy.nbytes) { 317 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); 318 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; 319 xfers[xferpos].len = op->dummy.nbytes; 320 xfers[xferpos].tx_nbits = op->dummy.buswidth; 321 spi_message_add_tail(&xfers[xferpos], &msg); 322 xferpos++; 323 totalxferlen += op->dummy.nbytes; 324 } 325 326 if (op->data.nbytes) { 327 if (op->data.dir == SPI_MEM_DATA_IN) { 328 xfers[xferpos].rx_buf = op->data.buf.in; 329 xfers[xferpos].rx_nbits = op->data.buswidth; 330 } else { 331 xfers[xferpos].tx_buf = op->data.buf.out; 332 xfers[xferpos].tx_nbits = op->data.buswidth; 333 } 334 335 xfers[xferpos].len = op->data.nbytes; 336 spi_message_add_tail(&xfers[xferpos], &msg); 337 xferpos++; 338 totalxferlen += op->data.nbytes; 339 } 340 341 ret = spi_sync(mem->spi, &msg); 342 343 kfree(tmpbuf); 344 345 if (ret) 346 return ret; 347 348 if (msg.actual_length != totalxferlen) 349 return -EIO; 350 351 return 0; 352 } 353 EXPORT_SYMBOL_GPL(spi_mem_exec_op); 354 355 /** 356 * spi_mem_get_name() - Return the SPI mem device name to be used by the 357 * upper layer if necessary 358 * @mem: the SPI memory 359 * 360 * This function allows SPI mem users to retrieve the SPI mem device name. 361 * It is useful if the upper layer needs to expose a custom name for 362 * compatibility reasons. 363 * 364 * Return: a string containing the name of the memory device to be used 365 * by the SPI mem user 366 */ 367 const char *spi_mem_get_name(struct spi_mem *mem) 368 { 369 return mem->name; 370 } 371 EXPORT_SYMBOL_GPL(spi_mem_get_name); 372 373 /** 374 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 375 * match controller limitations 376 * @mem: the SPI memory 377 * @op: the operation to adjust 378 * 379 * Some controllers have FIFO limitations and must split a data transfer 380 * operation into multiple ones, others require a specific alignment for 381 * optimized accesses. This function allows SPI mem drivers to split a single 382 * operation into multiple sub-operations when required. 383 * 384 * Return: a negative error code if the controller can't properly adjust @op, 385 * 0 otherwise. Note that @op->data.nbytes will be updated if @op 386 * can't be handled in a single step. 387 */ 388 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 389 { 390 struct spi_controller *ctlr = mem->spi->controller; 391 size_t len; 392 393 len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; 394 395 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) 396 return ctlr->mem_ops->adjust_op_size(mem, op); 397 398 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 399 if (len > spi_max_transfer_size(mem->spi)) 400 return -EINVAL; 401 402 op->data.nbytes = min3((size_t)op->data.nbytes, 403 spi_max_transfer_size(mem->spi), 404 spi_max_message_size(mem->spi) - 405 len); 406 if (!op->data.nbytes) 407 return -EINVAL; 408 } 409 410 return 0; 411 } 412 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); 413 414 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) 415 { 416 return container_of(drv, struct spi_mem_driver, spidrv.driver); 417 } 418 419 static int spi_mem_probe(struct spi_device *spi) 420 { 421 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 422 struct spi_controller *ctlr = spi->controller; 423 struct spi_mem *mem; 424 425 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); 426 if (!mem) 427 return -ENOMEM; 428 429 mem->spi = spi; 430 431 if (ctlr->mem_ops && ctlr->mem_ops->get_name) 432 mem->name = ctlr->mem_ops->get_name(mem); 433 else 434 mem->name = dev_name(&spi->dev); 435 436 if (IS_ERR_OR_NULL(mem->name)) 437 return PTR_ERR(mem->name); 438 439 spi_set_drvdata(spi, mem); 440 441 return memdrv->probe(mem); 442 } 443 444 static int spi_mem_remove(struct spi_device *spi) 445 { 446 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 447 struct spi_mem *mem = spi_get_drvdata(spi); 448 449 if (memdrv->remove) 450 return memdrv->remove(mem); 451 452 return 0; 453 } 454 455 static void spi_mem_shutdown(struct spi_device *spi) 456 { 457 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 458 struct spi_mem *mem = spi_get_drvdata(spi); 459 460 if (memdrv->shutdown) 461 memdrv->shutdown(mem); 462 } 463 464 /** 465 * spi_mem_driver_register_with_owner() - Register a SPI memory driver 466 * @memdrv: the SPI memory driver to register 467 * @owner: the owner of this driver 468 * 469 * Registers a SPI memory driver. 470 * 471 * Return: 0 in case of success, a negative error core otherwise. 472 */ 473 474 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, 475 struct module *owner) 476 { 477 memdrv->spidrv.probe = spi_mem_probe; 478 memdrv->spidrv.remove = spi_mem_remove; 479 memdrv->spidrv.shutdown = spi_mem_shutdown; 480 481 return __spi_register_driver(owner, &memdrv->spidrv); 482 } 483 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); 484 485 /** 486 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver 487 * @memdrv: the SPI memory driver to unregister 488 * 489 * Unregisters a SPI memory driver. 490 */ 491 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) 492 { 493 spi_unregister_driver(&memdrv->spidrv); 494 } 495 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); 496