1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Exceet Electronics GmbH 4 * Copyright (C) 2018 Bootlin 5 * 6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 7 */ 8 #include <linux/dmaengine.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/spi/spi.h> 11 #include <linux/spi/spi-mem.h> 12 13 #include "internals.h" 14 15 #define SPI_MEM_MAX_BUSWIDTH 8 16 17 /** 18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 19 * memory operation 20 * @ctlr: the SPI controller requesting this dma_map() 21 * @op: the memory operation containing the buffer to map 22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 23 * function 24 * 25 * Some controllers might want to do DMA on the data buffer embedded in @op. 26 * This helper prepares everything for you and provides a ready-to-use 27 * sg_table. This function is not intended to be called from spi drivers. 28 * Only SPI controller drivers should use it. 29 * Note that the caller must ensure the memory region pointed by 30 * op->data.buf.{in,out} is DMA-able before calling this function. 31 * 32 * Return: 0 in case of success, a negative error code otherwise. 33 */ 34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, 35 const struct spi_mem_op *op, 36 struct sg_table *sgt) 37 { 38 struct device *dmadev; 39 40 if (!op->data.nbytes) 41 return -EINVAL; 42 43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 44 dmadev = ctlr->dma_tx->device->dev; 45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 46 dmadev = ctlr->dma_rx->device->dev; 47 else 48 dmadev = ctlr->dev.parent; 49 50 if (!dmadev) 51 return -EINVAL; 52 53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, 54 op->data.dir == SPI_MEM_DATA_IN ? 55 DMA_FROM_DEVICE : DMA_TO_DEVICE); 56 } 57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); 58 59 /** 60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a 61 * memory operation 62 * @ctlr: the SPI controller requesting this dma_unmap() 63 * @op: the memory operation containing the buffer to unmap 64 * @sgt: a pointer to an sg_table previously initialized by 65 * spi_controller_dma_map_mem_op_data() 66 * 67 * Some controllers might want to do DMA on the data buffer embedded in @op. 68 * This helper prepares things so that the CPU can access the 69 * op->data.buf.{in,out} buffer again. 70 * 71 * This function is not intended to be called from SPI drivers. Only SPI 72 * controller drivers should use it. 73 * 74 * This function should be called after the DMA operation has finished and is 75 * only valid if the previous spi_controller_dma_map_mem_op_data() call 76 * returned 0. 77 * 78 * Return: 0 in case of success, a negative error code otherwise. 79 */ 80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, 81 const struct spi_mem_op *op, 82 struct sg_table *sgt) 83 { 84 struct device *dmadev; 85 86 if (!op->data.nbytes) 87 return; 88 89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 90 dmadev = ctlr->dma_tx->device->dev; 91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 92 dmadev = ctlr->dma_rx->device->dev; 93 else 94 dmadev = ctlr->dev.parent; 95 96 spi_unmap_buf(ctlr, dmadev, sgt, 97 op->data.dir == SPI_MEM_DATA_IN ? 98 DMA_FROM_DEVICE : DMA_TO_DEVICE); 99 } 100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); 101 102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 103 { 104 u32 mode = mem->spi->mode; 105 106 switch (buswidth) { 107 case 1: 108 return 0; 109 110 case 2: 111 if ((tx && 112 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || 113 (!tx && 114 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) 115 return 0; 116 117 break; 118 119 case 4: 120 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || 121 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) 122 return 0; 123 124 break; 125 126 case 8: 127 if ((tx && (mode & SPI_TX_OCTAL)) || 128 (!tx && (mode & SPI_RX_OCTAL))) 129 return 0; 130 131 break; 132 133 default: 134 break; 135 } 136 137 return -ENOTSUPP; 138 } 139 140 bool spi_mem_default_supports_op(struct spi_mem *mem, 141 const struct spi_mem_op *op) 142 { 143 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 144 return false; 145 146 if (op->addr.nbytes && 147 spi_check_buswidth_req(mem, op->addr.buswidth, true)) 148 return false; 149 150 if (op->dummy.nbytes && 151 spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 152 return false; 153 154 if (op->data.dir != SPI_MEM_NO_DATA && 155 spi_check_buswidth_req(mem, op->data.buswidth, 156 op->data.dir == SPI_MEM_DATA_OUT)) 157 return false; 158 159 if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) 160 return false; 161 162 if (op->cmd.nbytes != 1) 163 return false; 164 165 return true; 166 } 167 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); 168 169 static bool spi_mem_buswidth_is_valid(u8 buswidth) 170 { 171 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH) 172 return false; 173 174 return true; 175 } 176 177 static int spi_mem_check_op(const struct spi_mem_op *op) 178 { 179 if (!op->cmd.buswidth || !op->cmd.nbytes) 180 return -EINVAL; 181 182 if ((op->addr.nbytes && !op->addr.buswidth) || 183 (op->dummy.nbytes && !op->dummy.buswidth) || 184 (op->data.nbytes && !op->data.buswidth)) 185 return -EINVAL; 186 187 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 188 !spi_mem_buswidth_is_valid(op->addr.buswidth) || 189 !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 190 !spi_mem_buswidth_is_valid(op->data.buswidth)) 191 return -EINVAL; 192 193 return 0; 194 } 195 196 static bool spi_mem_internal_supports_op(struct spi_mem *mem, 197 const struct spi_mem_op *op) 198 { 199 struct spi_controller *ctlr = mem->spi->controller; 200 201 if (ctlr->mem_ops && ctlr->mem_ops->supports_op) 202 return ctlr->mem_ops->supports_op(mem, op); 203 204 return spi_mem_default_supports_op(mem, op); 205 } 206 207 /** 208 * spi_mem_supports_op() - Check if a memory device and the controller it is 209 * connected to support a specific memory operation 210 * @mem: the SPI memory 211 * @op: the memory operation to check 212 * 213 * Some controllers are only supporting Single or Dual IOs, others might only 214 * support specific opcodes, or it can even be that the controller and device 215 * both support Quad IOs but the hardware prevents you from using it because 216 * only 2 IO lines are connected. 217 * 218 * This function checks whether a specific operation is supported. 219 * 220 * Return: true if @op is supported, false otherwise. 221 */ 222 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 223 { 224 if (spi_mem_check_op(op)) 225 return false; 226 227 return spi_mem_internal_supports_op(mem, op); 228 } 229 EXPORT_SYMBOL_GPL(spi_mem_supports_op); 230 231 static int spi_mem_access_start(struct spi_mem *mem) 232 { 233 struct spi_controller *ctlr = mem->spi->controller; 234 235 /* 236 * Flush the message queue before executing our SPI memory 237 * operation to prevent preemption of regular SPI transfers. 238 */ 239 spi_flush_queue(ctlr); 240 241 if (ctlr->auto_runtime_pm) { 242 int ret; 243 244 ret = pm_runtime_get_sync(ctlr->dev.parent); 245 if (ret < 0) { 246 pm_runtime_put_noidle(ctlr->dev.parent); 247 dev_err(&ctlr->dev, "Failed to power device: %d\n", 248 ret); 249 return ret; 250 } 251 } 252 253 mutex_lock(&ctlr->bus_lock_mutex); 254 mutex_lock(&ctlr->io_mutex); 255 256 return 0; 257 } 258 259 static void spi_mem_access_end(struct spi_mem *mem) 260 { 261 struct spi_controller *ctlr = mem->spi->controller; 262 263 mutex_unlock(&ctlr->io_mutex); 264 mutex_unlock(&ctlr->bus_lock_mutex); 265 266 if (ctlr->auto_runtime_pm) 267 pm_runtime_put(ctlr->dev.parent); 268 } 269 270 /** 271 * spi_mem_exec_op() - Execute a memory operation 272 * @mem: the SPI memory 273 * @op: the memory operation to execute 274 * 275 * Executes a memory operation. 276 * 277 * This function first checks that @op is supported and then tries to execute 278 * it. 279 * 280 * Return: 0 in case of success, a negative error code otherwise. 281 */ 282 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 283 { 284 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0; 285 struct spi_controller *ctlr = mem->spi->controller; 286 struct spi_transfer xfers[4] = { }; 287 struct spi_message msg; 288 u8 *tmpbuf; 289 int ret; 290 291 ret = spi_mem_check_op(op); 292 if (ret) 293 return ret; 294 295 if (!spi_mem_internal_supports_op(mem, op)) 296 return -ENOTSUPP; 297 298 if (ctlr->mem_ops && !mem->spi->cs_gpiod) { 299 ret = spi_mem_access_start(mem); 300 if (ret) 301 return ret; 302 303 ret = ctlr->mem_ops->exec_op(mem, op); 304 305 spi_mem_access_end(mem); 306 307 /* 308 * Some controllers only optimize specific paths (typically the 309 * read path) and expect the core to use the regular SPI 310 * interface in other cases. 311 */ 312 if (!ret || ret != -ENOTSUPP) 313 return ret; 314 } 315 316 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 317 318 /* 319 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so 320 * we're guaranteed that this buffer is DMA-able, as required by the 321 * SPI layer. 322 */ 323 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); 324 if (!tmpbuf) 325 return -ENOMEM; 326 327 spi_message_init(&msg); 328 329 tmpbuf[0] = op->cmd.opcode; 330 xfers[xferpos].tx_buf = tmpbuf; 331 xfers[xferpos].len = op->cmd.nbytes; 332 xfers[xferpos].tx_nbits = op->cmd.buswidth; 333 spi_message_add_tail(&xfers[xferpos], &msg); 334 xferpos++; 335 totalxferlen++; 336 337 if (op->addr.nbytes) { 338 int i; 339 340 for (i = 0; i < op->addr.nbytes; i++) 341 tmpbuf[i + 1] = op->addr.val >> 342 (8 * (op->addr.nbytes - i - 1)); 343 344 xfers[xferpos].tx_buf = tmpbuf + 1; 345 xfers[xferpos].len = op->addr.nbytes; 346 xfers[xferpos].tx_nbits = op->addr.buswidth; 347 spi_message_add_tail(&xfers[xferpos], &msg); 348 xferpos++; 349 totalxferlen += op->addr.nbytes; 350 } 351 352 if (op->dummy.nbytes) { 353 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); 354 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; 355 xfers[xferpos].len = op->dummy.nbytes; 356 xfers[xferpos].tx_nbits = op->dummy.buswidth; 357 spi_message_add_tail(&xfers[xferpos], &msg); 358 xferpos++; 359 totalxferlen += op->dummy.nbytes; 360 } 361 362 if (op->data.nbytes) { 363 if (op->data.dir == SPI_MEM_DATA_IN) { 364 xfers[xferpos].rx_buf = op->data.buf.in; 365 xfers[xferpos].rx_nbits = op->data.buswidth; 366 } else { 367 xfers[xferpos].tx_buf = op->data.buf.out; 368 xfers[xferpos].tx_nbits = op->data.buswidth; 369 } 370 371 xfers[xferpos].len = op->data.nbytes; 372 spi_message_add_tail(&xfers[xferpos], &msg); 373 xferpos++; 374 totalxferlen += op->data.nbytes; 375 } 376 377 ret = spi_sync(mem->spi, &msg); 378 379 kfree(tmpbuf); 380 381 if (ret) 382 return ret; 383 384 if (msg.actual_length != totalxferlen) 385 return -EIO; 386 387 return 0; 388 } 389 EXPORT_SYMBOL_GPL(spi_mem_exec_op); 390 391 /** 392 * spi_mem_get_name() - Return the SPI mem device name to be used by the 393 * upper layer if necessary 394 * @mem: the SPI memory 395 * 396 * This function allows SPI mem users to retrieve the SPI mem device name. 397 * It is useful if the upper layer needs to expose a custom name for 398 * compatibility reasons. 399 * 400 * Return: a string containing the name of the memory device to be used 401 * by the SPI mem user 402 */ 403 const char *spi_mem_get_name(struct spi_mem *mem) 404 { 405 return mem->name; 406 } 407 EXPORT_SYMBOL_GPL(spi_mem_get_name); 408 409 /** 410 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 411 * match controller limitations 412 * @mem: the SPI memory 413 * @op: the operation to adjust 414 * 415 * Some controllers have FIFO limitations and must split a data transfer 416 * operation into multiple ones, others require a specific alignment for 417 * optimized accesses. This function allows SPI mem drivers to split a single 418 * operation into multiple sub-operations when required. 419 * 420 * Return: a negative error code if the controller can't properly adjust @op, 421 * 0 otherwise. Note that @op->data.nbytes will be updated if @op 422 * can't be handled in a single step. 423 */ 424 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 425 { 426 struct spi_controller *ctlr = mem->spi->controller; 427 size_t len; 428 429 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) 430 return ctlr->mem_ops->adjust_op_size(mem, op); 431 432 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 433 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 434 435 if (len > spi_max_transfer_size(mem->spi)) 436 return -EINVAL; 437 438 op->data.nbytes = min3((size_t)op->data.nbytes, 439 spi_max_transfer_size(mem->spi), 440 spi_max_message_size(mem->spi) - 441 len); 442 if (!op->data.nbytes) 443 return -EINVAL; 444 } 445 446 return 0; 447 } 448 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); 449 450 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, 451 u64 offs, size_t len, void *buf) 452 { 453 struct spi_mem_op op = desc->info.op_tmpl; 454 int ret; 455 456 op.addr.val = desc->info.offset + offs; 457 op.data.buf.in = buf; 458 op.data.nbytes = len; 459 ret = spi_mem_adjust_op_size(desc->mem, &op); 460 if (ret) 461 return ret; 462 463 ret = spi_mem_exec_op(desc->mem, &op); 464 if (ret) 465 return ret; 466 467 return op.data.nbytes; 468 } 469 470 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, 471 u64 offs, size_t len, const void *buf) 472 { 473 struct spi_mem_op op = desc->info.op_tmpl; 474 int ret; 475 476 op.addr.val = desc->info.offset + offs; 477 op.data.buf.out = buf; 478 op.data.nbytes = len; 479 ret = spi_mem_adjust_op_size(desc->mem, &op); 480 if (ret) 481 return ret; 482 483 ret = spi_mem_exec_op(desc->mem, &op); 484 if (ret) 485 return ret; 486 487 return op.data.nbytes; 488 } 489 490 /** 491 * spi_mem_dirmap_create() - Create a direct mapping descriptor 492 * @mem: SPI mem device this direct mapping should be created for 493 * @info: direct mapping information 494 * 495 * This function is creating a direct mapping descriptor which can then be used 496 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 497 * If the SPI controller driver does not support direct mapping, this function 498 * falls back to an implementation using spi_mem_exec_op(), so that the caller 499 * doesn't have to bother implementing a fallback on his own. 500 * 501 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 502 */ 503 struct spi_mem_dirmap_desc * 504 spi_mem_dirmap_create(struct spi_mem *mem, 505 const struct spi_mem_dirmap_info *info) 506 { 507 struct spi_controller *ctlr = mem->spi->controller; 508 struct spi_mem_dirmap_desc *desc; 509 int ret = -ENOTSUPP; 510 511 /* Make sure the number of address cycles is between 1 and 8 bytes. */ 512 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) 513 return ERR_PTR(-EINVAL); 514 515 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ 516 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) 517 return ERR_PTR(-EINVAL); 518 519 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 520 if (!desc) 521 return ERR_PTR(-ENOMEM); 522 523 desc->mem = mem; 524 desc->info = *info; 525 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) 526 ret = ctlr->mem_ops->dirmap_create(desc); 527 528 if (ret) { 529 desc->nodirmap = true; 530 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) 531 ret = -ENOTSUPP; 532 else 533 ret = 0; 534 } 535 536 if (ret) { 537 kfree(desc); 538 return ERR_PTR(ret); 539 } 540 541 return desc; 542 } 543 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); 544 545 /** 546 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor 547 * @desc: the direct mapping descriptor to destroy 548 * 549 * This function destroys a direct mapping descriptor previously created by 550 * spi_mem_dirmap_create(). 551 */ 552 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) 553 { 554 struct spi_controller *ctlr = desc->mem->spi->controller; 555 556 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) 557 ctlr->mem_ops->dirmap_destroy(desc); 558 559 kfree(desc); 560 } 561 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); 562 563 static void devm_spi_mem_dirmap_release(struct device *dev, void *res) 564 { 565 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; 566 567 spi_mem_dirmap_destroy(desc); 568 } 569 570 /** 571 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach 572 * it to a device 573 * @dev: device the dirmap desc will be attached to 574 * @mem: SPI mem device this direct mapping should be created for 575 * @info: direct mapping information 576 * 577 * devm_ variant of the spi_mem_dirmap_create() function. See 578 * spi_mem_dirmap_create() for more details. 579 * 580 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 581 */ 582 struct spi_mem_dirmap_desc * 583 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, 584 const struct spi_mem_dirmap_info *info) 585 { 586 struct spi_mem_dirmap_desc **ptr, *desc; 587 588 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), 589 GFP_KERNEL); 590 if (!ptr) 591 return ERR_PTR(-ENOMEM); 592 593 desc = spi_mem_dirmap_create(mem, info); 594 if (IS_ERR(desc)) { 595 devres_free(ptr); 596 } else { 597 *ptr = desc; 598 devres_add(dev, ptr); 599 } 600 601 return desc; 602 } 603 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); 604 605 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) 606 { 607 struct spi_mem_dirmap_desc **ptr = res; 608 609 if (WARN_ON(!ptr || !*ptr)) 610 return 0; 611 612 return *ptr == data; 613 } 614 615 /** 616 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached 617 * to a device 618 * @dev: device the dirmap desc is attached to 619 * @desc: the direct mapping descriptor to destroy 620 * 621 * devm_ variant of the spi_mem_dirmap_destroy() function. See 622 * spi_mem_dirmap_destroy() for more details. 623 */ 624 void devm_spi_mem_dirmap_destroy(struct device *dev, 625 struct spi_mem_dirmap_desc *desc) 626 { 627 devres_release(dev, devm_spi_mem_dirmap_release, 628 devm_spi_mem_dirmap_match, desc); 629 } 630 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); 631 632 /** 633 * spi_mem_dirmap_read() - Read data through a direct mapping 634 * @desc: direct mapping descriptor 635 * @offs: offset to start reading from. Note that this is not an absolute 636 * offset, but the offset within the direct mapping which already has 637 * its own offset 638 * @len: length in bytes 639 * @buf: destination buffer. This buffer must be DMA-able 640 * 641 * This function reads data from a memory device using a direct mapping 642 * previously instantiated with spi_mem_dirmap_create(). 643 * 644 * Return: the amount of data read from the memory device or a negative error 645 * code. Note that the returned size might be smaller than @len, and the caller 646 * is responsible for calling spi_mem_dirmap_read() again when that happens. 647 */ 648 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 649 u64 offs, size_t len, void *buf) 650 { 651 struct spi_controller *ctlr = desc->mem->spi->controller; 652 ssize_t ret; 653 654 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) 655 return -EINVAL; 656 657 if (!len) 658 return 0; 659 660 if (desc->nodirmap) { 661 ret = spi_mem_no_dirmap_read(desc, offs, len, buf); 662 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { 663 ret = spi_mem_access_start(desc->mem); 664 if (ret) 665 return ret; 666 667 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); 668 669 spi_mem_access_end(desc->mem); 670 } else { 671 ret = -ENOTSUPP; 672 } 673 674 return ret; 675 } 676 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read); 677 678 /** 679 * spi_mem_dirmap_write() - Write data through a direct mapping 680 * @desc: direct mapping descriptor 681 * @offs: offset to start writing from. Note that this is not an absolute 682 * offset, but the offset within the direct mapping which already has 683 * its own offset 684 * @len: length in bytes 685 * @buf: source buffer. This buffer must be DMA-able 686 * 687 * This function writes data to a memory device using a direct mapping 688 * previously instantiated with spi_mem_dirmap_create(). 689 * 690 * Return: the amount of data written to the memory device or a negative error 691 * code. Note that the returned size might be smaller than @len, and the caller 692 * is responsible for calling spi_mem_dirmap_write() again when that happens. 693 */ 694 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 695 u64 offs, size_t len, const void *buf) 696 { 697 struct spi_controller *ctlr = desc->mem->spi->controller; 698 ssize_t ret; 699 700 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) 701 return -EINVAL; 702 703 if (!len) 704 return 0; 705 706 if (desc->nodirmap) { 707 ret = spi_mem_no_dirmap_write(desc, offs, len, buf); 708 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { 709 ret = spi_mem_access_start(desc->mem); 710 if (ret) 711 return ret; 712 713 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); 714 715 spi_mem_access_end(desc->mem); 716 } else { 717 ret = -ENOTSUPP; 718 } 719 720 return ret; 721 } 722 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write); 723 724 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) 725 { 726 return container_of(drv, struct spi_mem_driver, spidrv.driver); 727 } 728 729 static int spi_mem_probe(struct spi_device *spi) 730 { 731 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 732 struct spi_controller *ctlr = spi->controller; 733 struct spi_mem *mem; 734 735 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); 736 if (!mem) 737 return -ENOMEM; 738 739 mem->spi = spi; 740 741 if (ctlr->mem_ops && ctlr->mem_ops->get_name) 742 mem->name = ctlr->mem_ops->get_name(mem); 743 else 744 mem->name = dev_name(&spi->dev); 745 746 if (IS_ERR_OR_NULL(mem->name)) 747 return PTR_ERR_OR_ZERO(mem->name); 748 749 spi_set_drvdata(spi, mem); 750 751 return memdrv->probe(mem); 752 } 753 754 static int spi_mem_remove(struct spi_device *spi) 755 { 756 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 757 struct spi_mem *mem = spi_get_drvdata(spi); 758 759 if (memdrv->remove) 760 return memdrv->remove(mem); 761 762 return 0; 763 } 764 765 static void spi_mem_shutdown(struct spi_device *spi) 766 { 767 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 768 struct spi_mem *mem = spi_get_drvdata(spi); 769 770 if (memdrv->shutdown) 771 memdrv->shutdown(mem); 772 } 773 774 /** 775 * spi_mem_driver_register_with_owner() - Register a SPI memory driver 776 * @memdrv: the SPI memory driver to register 777 * @owner: the owner of this driver 778 * 779 * Registers a SPI memory driver. 780 * 781 * Return: 0 in case of success, a negative error core otherwise. 782 */ 783 784 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, 785 struct module *owner) 786 { 787 memdrv->spidrv.probe = spi_mem_probe; 788 memdrv->spidrv.remove = spi_mem_remove; 789 memdrv->spidrv.shutdown = spi_mem_shutdown; 790 791 return __spi_register_driver(owner, &memdrv->spidrv); 792 } 793 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); 794 795 /** 796 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver 797 * @memdrv: the SPI memory driver to unregister 798 * 799 * Unregisters a SPI memory driver. 800 */ 801 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) 802 { 803 spi_unregister_driver(&memdrv->spidrv); 804 } 805 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); 806