1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Exceet Electronics GmbH 4 * Copyright (C) 2018 Bootlin 5 * 6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 7 */ 8 #include <linux/dmaengine.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/spi/spi.h> 11 #include <linux/spi/spi-mem.h> 12 13 #include "internals.h" 14 15 #define SPI_MEM_MAX_BUSWIDTH 8 16 17 /** 18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 19 * memory operation 20 * @ctlr: the SPI controller requesting this dma_map() 21 * @op: the memory operation containing the buffer to map 22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 23 * function 24 * 25 * Some controllers might want to do DMA on the data buffer embedded in @op. 26 * This helper prepares everything for you and provides a ready-to-use 27 * sg_table. This function is not intended to be called from spi drivers. 28 * Only SPI controller drivers should use it. 29 * Note that the caller must ensure the memory region pointed by 30 * op->data.buf.{in,out} is DMA-able before calling this function. 31 * 32 * Return: 0 in case of success, a negative error code otherwise. 33 */ 34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, 35 const struct spi_mem_op *op, 36 struct sg_table *sgt) 37 { 38 struct device *dmadev; 39 40 if (!op->data.nbytes) 41 return -EINVAL; 42 43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 44 dmadev = ctlr->dma_tx->device->dev; 45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 46 dmadev = ctlr->dma_rx->device->dev; 47 else 48 dmadev = ctlr->dev.parent; 49 50 if (!dmadev) 51 return -EINVAL; 52 53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, 54 op->data.dir == SPI_MEM_DATA_IN ? 55 DMA_FROM_DEVICE : DMA_TO_DEVICE); 56 } 57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); 58 59 /** 60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a 61 * memory operation 62 * @ctlr: the SPI controller requesting this dma_unmap() 63 * @op: the memory operation containing the buffer to unmap 64 * @sgt: a pointer to an sg_table previously initialized by 65 * spi_controller_dma_map_mem_op_data() 66 * 67 * Some controllers might want to do DMA on the data buffer embedded in @op. 68 * This helper prepares things so that the CPU can access the 69 * op->data.buf.{in,out} buffer again. 70 * 71 * This function is not intended to be called from SPI drivers. Only SPI 72 * controller drivers should use it. 73 * 74 * This function should be called after the DMA operation has finished and is 75 * only valid if the previous spi_controller_dma_map_mem_op_data() call 76 * returned 0. 77 * 78 * Return: 0 in case of success, a negative error code otherwise. 79 */ 80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, 81 const struct spi_mem_op *op, 82 struct sg_table *sgt) 83 { 84 struct device *dmadev; 85 86 if (!op->data.nbytes) 87 return; 88 89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 90 dmadev = ctlr->dma_tx->device->dev; 91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 92 dmadev = ctlr->dma_rx->device->dev; 93 else 94 dmadev = ctlr->dev.parent; 95 96 spi_unmap_buf(ctlr, dmadev, sgt, 97 op->data.dir == SPI_MEM_DATA_IN ? 98 DMA_FROM_DEVICE : DMA_TO_DEVICE); 99 } 100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); 101 102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 103 { 104 u32 mode = mem->spi->mode; 105 106 switch (buswidth) { 107 case 1: 108 return 0; 109 110 case 2: 111 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || 112 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) 113 return 0; 114 115 break; 116 117 case 4: 118 if ((tx && (mode & SPI_TX_QUAD)) || 119 (!tx && (mode & SPI_RX_QUAD))) 120 return 0; 121 122 break; 123 124 case 8: 125 if ((tx && (mode & SPI_TX_OCTAL)) || 126 (!tx && (mode & SPI_RX_OCTAL))) 127 return 0; 128 129 break; 130 131 default: 132 break; 133 } 134 135 return -ENOTSUPP; 136 } 137 138 bool spi_mem_default_supports_op(struct spi_mem *mem, 139 const struct spi_mem_op *op) 140 { 141 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 142 return false; 143 144 if (op->addr.nbytes && 145 spi_check_buswidth_req(mem, op->addr.buswidth, true)) 146 return false; 147 148 if (op->dummy.nbytes && 149 spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 150 return false; 151 152 if (op->data.dir != SPI_MEM_NO_DATA && 153 spi_check_buswidth_req(mem, op->data.buswidth, 154 op->data.dir == SPI_MEM_DATA_OUT)) 155 return false; 156 157 return true; 158 } 159 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); 160 161 static bool spi_mem_buswidth_is_valid(u8 buswidth) 162 { 163 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH) 164 return false; 165 166 return true; 167 } 168 169 static int spi_mem_check_op(const struct spi_mem_op *op) 170 { 171 if (!op->cmd.buswidth) 172 return -EINVAL; 173 174 if ((op->addr.nbytes && !op->addr.buswidth) || 175 (op->dummy.nbytes && !op->dummy.buswidth) || 176 (op->data.nbytes && !op->data.buswidth)) 177 return -EINVAL; 178 179 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 180 !spi_mem_buswidth_is_valid(op->addr.buswidth) || 181 !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 182 !spi_mem_buswidth_is_valid(op->data.buswidth)) 183 return -EINVAL; 184 185 return 0; 186 } 187 188 static bool spi_mem_internal_supports_op(struct spi_mem *mem, 189 const struct spi_mem_op *op) 190 { 191 struct spi_controller *ctlr = mem->spi->controller; 192 193 if (ctlr->mem_ops && ctlr->mem_ops->supports_op) 194 return ctlr->mem_ops->supports_op(mem, op); 195 196 return spi_mem_default_supports_op(mem, op); 197 } 198 199 /** 200 * spi_mem_supports_op() - Check if a memory device and the controller it is 201 * connected to support a specific memory operation 202 * @mem: the SPI memory 203 * @op: the memory operation to check 204 * 205 * Some controllers are only supporting Single or Dual IOs, others might only 206 * support specific opcodes, or it can even be that the controller and device 207 * both support Quad IOs but the hardware prevents you from using it because 208 * only 2 IO lines are connected. 209 * 210 * This function checks whether a specific operation is supported. 211 * 212 * Return: true if @op is supported, false otherwise. 213 */ 214 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 215 { 216 if (spi_mem_check_op(op)) 217 return false; 218 219 return spi_mem_internal_supports_op(mem, op); 220 } 221 EXPORT_SYMBOL_GPL(spi_mem_supports_op); 222 223 static int spi_mem_access_start(struct spi_mem *mem) 224 { 225 struct spi_controller *ctlr = mem->spi->controller; 226 227 /* 228 * Flush the message queue before executing our SPI memory 229 * operation to prevent preemption of regular SPI transfers. 230 */ 231 spi_flush_queue(ctlr); 232 233 if (ctlr->auto_runtime_pm) { 234 int ret; 235 236 ret = pm_runtime_get_sync(ctlr->dev.parent); 237 if (ret < 0) { 238 dev_err(&ctlr->dev, "Failed to power device: %d\n", 239 ret); 240 return ret; 241 } 242 } 243 244 mutex_lock(&ctlr->bus_lock_mutex); 245 mutex_lock(&ctlr->io_mutex); 246 247 return 0; 248 } 249 250 static void spi_mem_access_end(struct spi_mem *mem) 251 { 252 struct spi_controller *ctlr = mem->spi->controller; 253 254 mutex_unlock(&ctlr->io_mutex); 255 mutex_unlock(&ctlr->bus_lock_mutex); 256 257 if (ctlr->auto_runtime_pm) 258 pm_runtime_put(ctlr->dev.parent); 259 } 260 261 /** 262 * spi_mem_exec_op() - Execute a memory operation 263 * @mem: the SPI memory 264 * @op: the memory operation to execute 265 * 266 * Executes a memory operation. 267 * 268 * This function first checks that @op is supported and then tries to execute 269 * it. 270 * 271 * Return: 0 in case of success, a negative error code otherwise. 272 */ 273 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 274 { 275 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0; 276 struct spi_controller *ctlr = mem->spi->controller; 277 struct spi_transfer xfers[4] = { }; 278 struct spi_message msg; 279 u8 *tmpbuf; 280 int ret; 281 282 ret = spi_mem_check_op(op); 283 if (ret) 284 return ret; 285 286 if (!spi_mem_internal_supports_op(mem, op)) 287 return -ENOTSUPP; 288 289 if (ctlr->mem_ops && !mem->spi->cs_gpiod) { 290 ret = spi_mem_access_start(mem); 291 if (ret) 292 return ret; 293 294 ret = ctlr->mem_ops->exec_op(mem, op); 295 296 spi_mem_access_end(mem); 297 298 /* 299 * Some controllers only optimize specific paths (typically the 300 * read path) and expect the core to use the regular SPI 301 * interface in other cases. 302 */ 303 if (!ret || ret != -ENOTSUPP) 304 return ret; 305 } 306 307 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + 308 op->dummy.nbytes; 309 310 /* 311 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so 312 * we're guaranteed that this buffer is DMA-able, as required by the 313 * SPI layer. 314 */ 315 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); 316 if (!tmpbuf) 317 return -ENOMEM; 318 319 spi_message_init(&msg); 320 321 tmpbuf[0] = op->cmd.opcode; 322 xfers[xferpos].tx_buf = tmpbuf; 323 xfers[xferpos].len = sizeof(op->cmd.opcode); 324 xfers[xferpos].tx_nbits = op->cmd.buswidth; 325 spi_message_add_tail(&xfers[xferpos], &msg); 326 xferpos++; 327 totalxferlen++; 328 329 if (op->addr.nbytes) { 330 int i; 331 332 for (i = 0; i < op->addr.nbytes; i++) 333 tmpbuf[i + 1] = op->addr.val >> 334 (8 * (op->addr.nbytes - i - 1)); 335 336 xfers[xferpos].tx_buf = tmpbuf + 1; 337 xfers[xferpos].len = op->addr.nbytes; 338 xfers[xferpos].tx_nbits = op->addr.buswidth; 339 spi_message_add_tail(&xfers[xferpos], &msg); 340 xferpos++; 341 totalxferlen += op->addr.nbytes; 342 } 343 344 if (op->dummy.nbytes) { 345 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); 346 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; 347 xfers[xferpos].len = op->dummy.nbytes; 348 xfers[xferpos].tx_nbits = op->dummy.buswidth; 349 spi_message_add_tail(&xfers[xferpos], &msg); 350 xferpos++; 351 totalxferlen += op->dummy.nbytes; 352 } 353 354 if (op->data.nbytes) { 355 if (op->data.dir == SPI_MEM_DATA_IN) { 356 xfers[xferpos].rx_buf = op->data.buf.in; 357 xfers[xferpos].rx_nbits = op->data.buswidth; 358 } else { 359 xfers[xferpos].tx_buf = op->data.buf.out; 360 xfers[xferpos].tx_nbits = op->data.buswidth; 361 } 362 363 xfers[xferpos].len = op->data.nbytes; 364 spi_message_add_tail(&xfers[xferpos], &msg); 365 xferpos++; 366 totalxferlen += op->data.nbytes; 367 } 368 369 ret = spi_sync(mem->spi, &msg); 370 371 kfree(tmpbuf); 372 373 if (ret) 374 return ret; 375 376 if (msg.actual_length != totalxferlen) 377 return -EIO; 378 379 return 0; 380 } 381 EXPORT_SYMBOL_GPL(spi_mem_exec_op); 382 383 /** 384 * spi_mem_get_name() - Return the SPI mem device name to be used by the 385 * upper layer if necessary 386 * @mem: the SPI memory 387 * 388 * This function allows SPI mem users to retrieve the SPI mem device name. 389 * It is useful if the upper layer needs to expose a custom name for 390 * compatibility reasons. 391 * 392 * Return: a string containing the name of the memory device to be used 393 * by the SPI mem user 394 */ 395 const char *spi_mem_get_name(struct spi_mem *mem) 396 { 397 return mem->name; 398 } 399 EXPORT_SYMBOL_GPL(spi_mem_get_name); 400 401 /** 402 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 403 * match controller limitations 404 * @mem: the SPI memory 405 * @op: the operation to adjust 406 * 407 * Some controllers have FIFO limitations and must split a data transfer 408 * operation into multiple ones, others require a specific alignment for 409 * optimized accesses. This function allows SPI mem drivers to split a single 410 * operation into multiple sub-operations when required. 411 * 412 * Return: a negative error code if the controller can't properly adjust @op, 413 * 0 otherwise. Note that @op->data.nbytes will be updated if @op 414 * can't be handled in a single step. 415 */ 416 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 417 { 418 struct spi_controller *ctlr = mem->spi->controller; 419 size_t len; 420 421 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) 422 return ctlr->mem_ops->adjust_op_size(mem, op); 423 424 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 425 len = sizeof(op->cmd.opcode) + op->addr.nbytes + 426 op->dummy.nbytes; 427 428 if (len > spi_max_transfer_size(mem->spi)) 429 return -EINVAL; 430 431 op->data.nbytes = min3((size_t)op->data.nbytes, 432 spi_max_transfer_size(mem->spi), 433 spi_max_message_size(mem->spi) - 434 len); 435 if (!op->data.nbytes) 436 return -EINVAL; 437 } 438 439 return 0; 440 } 441 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); 442 443 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, 444 u64 offs, size_t len, void *buf) 445 { 446 struct spi_mem_op op = desc->info.op_tmpl; 447 int ret; 448 449 op.addr.val = desc->info.offset + offs; 450 op.data.buf.in = buf; 451 op.data.nbytes = len; 452 ret = spi_mem_adjust_op_size(desc->mem, &op); 453 if (ret) 454 return ret; 455 456 ret = spi_mem_exec_op(desc->mem, &op); 457 if (ret) 458 return ret; 459 460 return op.data.nbytes; 461 } 462 463 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, 464 u64 offs, size_t len, const void *buf) 465 { 466 struct spi_mem_op op = desc->info.op_tmpl; 467 int ret; 468 469 op.addr.val = desc->info.offset + offs; 470 op.data.buf.out = buf; 471 op.data.nbytes = len; 472 ret = spi_mem_adjust_op_size(desc->mem, &op); 473 if (ret) 474 return ret; 475 476 ret = spi_mem_exec_op(desc->mem, &op); 477 if (ret) 478 return ret; 479 480 return op.data.nbytes; 481 } 482 483 /** 484 * spi_mem_dirmap_create() - Create a direct mapping descriptor 485 * @mem: SPI mem device this direct mapping should be created for 486 * @info: direct mapping information 487 * 488 * This function is creating a direct mapping descriptor which can then be used 489 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 490 * If the SPI controller driver does not support direct mapping, this function 491 * falls back to an implementation using spi_mem_exec_op(), so that the caller 492 * doesn't have to bother implementing a fallback on his own. 493 * 494 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 495 */ 496 struct spi_mem_dirmap_desc * 497 spi_mem_dirmap_create(struct spi_mem *mem, 498 const struct spi_mem_dirmap_info *info) 499 { 500 struct spi_controller *ctlr = mem->spi->controller; 501 struct spi_mem_dirmap_desc *desc; 502 int ret = -ENOTSUPP; 503 504 /* Make sure the number of address cycles is between 1 and 8 bytes. */ 505 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) 506 return ERR_PTR(-EINVAL); 507 508 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ 509 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) 510 return ERR_PTR(-EINVAL); 511 512 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 513 if (!desc) 514 return ERR_PTR(-ENOMEM); 515 516 desc->mem = mem; 517 desc->info = *info; 518 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) 519 ret = ctlr->mem_ops->dirmap_create(desc); 520 521 if (ret) { 522 desc->nodirmap = true; 523 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) 524 ret = -ENOTSUPP; 525 else 526 ret = 0; 527 } 528 529 if (ret) { 530 kfree(desc); 531 return ERR_PTR(ret); 532 } 533 534 return desc; 535 } 536 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); 537 538 /** 539 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor 540 * @desc: the direct mapping descriptor to destroy 541 * 542 * This function destroys a direct mapping descriptor previously created by 543 * spi_mem_dirmap_create(). 544 */ 545 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) 546 { 547 struct spi_controller *ctlr = desc->mem->spi->controller; 548 549 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) 550 ctlr->mem_ops->dirmap_destroy(desc); 551 552 kfree(desc); 553 } 554 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); 555 556 static void devm_spi_mem_dirmap_release(struct device *dev, void *res) 557 { 558 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; 559 560 spi_mem_dirmap_destroy(desc); 561 } 562 563 /** 564 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach 565 * it to a device 566 * @dev: device the dirmap desc will be attached to 567 * @mem: SPI mem device this direct mapping should be created for 568 * @info: direct mapping information 569 * 570 * devm_ variant of the spi_mem_dirmap_create() function. See 571 * spi_mem_dirmap_create() for more details. 572 * 573 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 574 */ 575 struct spi_mem_dirmap_desc * 576 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, 577 const struct spi_mem_dirmap_info *info) 578 { 579 struct spi_mem_dirmap_desc **ptr, *desc; 580 581 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), 582 GFP_KERNEL); 583 if (!ptr) 584 return ERR_PTR(-ENOMEM); 585 586 desc = spi_mem_dirmap_create(mem, info); 587 if (IS_ERR(desc)) { 588 devres_free(ptr); 589 } else { 590 *ptr = desc; 591 devres_add(dev, ptr); 592 } 593 594 return desc; 595 } 596 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); 597 598 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) 599 { 600 struct spi_mem_dirmap_desc **ptr = res; 601 602 if (WARN_ON(!ptr || !*ptr)) 603 return 0; 604 605 return *ptr == data; 606 } 607 608 /** 609 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached 610 * to a device 611 * @dev: device the dirmap desc is attached to 612 * @desc: the direct mapping descriptor to destroy 613 * 614 * devm_ variant of the spi_mem_dirmap_destroy() function. See 615 * spi_mem_dirmap_destroy() for more details. 616 */ 617 void devm_spi_mem_dirmap_destroy(struct device *dev, 618 struct spi_mem_dirmap_desc *desc) 619 { 620 devres_release(dev, devm_spi_mem_dirmap_release, 621 devm_spi_mem_dirmap_match, desc); 622 } 623 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); 624 625 /** 626 * spi_mem_dirmap_read() - Read data through a direct mapping 627 * @desc: direct mapping descriptor 628 * @offs: offset to start reading from. Note that this is not an absolute 629 * offset, but the offset within the direct mapping which already has 630 * its own offset 631 * @len: length in bytes 632 * @buf: destination buffer. This buffer must be DMA-able 633 * 634 * This function reads data from a memory device using a direct mapping 635 * previously instantiated with spi_mem_dirmap_create(). 636 * 637 * Return: the amount of data read from the memory device or a negative error 638 * code. Note that the returned size might be smaller than @len, and the caller 639 * is responsible for calling spi_mem_dirmap_read() again when that happens. 640 */ 641 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 642 u64 offs, size_t len, void *buf) 643 { 644 struct spi_controller *ctlr = desc->mem->spi->controller; 645 ssize_t ret; 646 647 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) 648 return -EINVAL; 649 650 if (!len) 651 return 0; 652 653 if (desc->nodirmap) { 654 ret = spi_mem_no_dirmap_read(desc, offs, len, buf); 655 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { 656 ret = spi_mem_access_start(desc->mem); 657 if (ret) 658 return ret; 659 660 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); 661 662 spi_mem_access_end(desc->mem); 663 } else { 664 ret = -ENOTSUPP; 665 } 666 667 return ret; 668 } 669 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read); 670 671 /** 672 * spi_mem_dirmap_write() - Write data through a direct mapping 673 * @desc: direct mapping descriptor 674 * @offs: offset to start writing from. Note that this is not an absolute 675 * offset, but the offset within the direct mapping which already has 676 * its own offset 677 * @len: length in bytes 678 * @buf: source buffer. This buffer must be DMA-able 679 * 680 * This function writes data to a memory device using a direct mapping 681 * previously instantiated with spi_mem_dirmap_create(). 682 * 683 * Return: the amount of data written to the memory device or a negative error 684 * code. Note that the returned size might be smaller than @len, and the caller 685 * is responsible for calling spi_mem_dirmap_write() again when that happens. 686 */ 687 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 688 u64 offs, size_t len, const void *buf) 689 { 690 struct spi_controller *ctlr = desc->mem->spi->controller; 691 ssize_t ret; 692 693 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) 694 return -EINVAL; 695 696 if (!len) 697 return 0; 698 699 if (desc->nodirmap) { 700 ret = spi_mem_no_dirmap_write(desc, offs, len, buf); 701 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { 702 ret = spi_mem_access_start(desc->mem); 703 if (ret) 704 return ret; 705 706 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); 707 708 spi_mem_access_end(desc->mem); 709 } else { 710 ret = -ENOTSUPP; 711 } 712 713 return ret; 714 } 715 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write); 716 717 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) 718 { 719 return container_of(drv, struct spi_mem_driver, spidrv.driver); 720 } 721 722 static int spi_mem_probe(struct spi_device *spi) 723 { 724 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 725 struct spi_controller *ctlr = spi->controller; 726 struct spi_mem *mem; 727 728 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); 729 if (!mem) 730 return -ENOMEM; 731 732 mem->spi = spi; 733 734 if (ctlr->mem_ops && ctlr->mem_ops->get_name) 735 mem->name = ctlr->mem_ops->get_name(mem); 736 else 737 mem->name = dev_name(&spi->dev); 738 739 if (IS_ERR_OR_NULL(mem->name)) 740 return PTR_ERR(mem->name); 741 742 spi_set_drvdata(spi, mem); 743 744 return memdrv->probe(mem); 745 } 746 747 static int spi_mem_remove(struct spi_device *spi) 748 { 749 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 750 struct spi_mem *mem = spi_get_drvdata(spi); 751 752 if (memdrv->remove) 753 return memdrv->remove(mem); 754 755 return 0; 756 } 757 758 static void spi_mem_shutdown(struct spi_device *spi) 759 { 760 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 761 struct spi_mem *mem = spi_get_drvdata(spi); 762 763 if (memdrv->shutdown) 764 memdrv->shutdown(mem); 765 } 766 767 /** 768 * spi_mem_driver_register_with_owner() - Register a SPI memory driver 769 * @memdrv: the SPI memory driver to register 770 * @owner: the owner of this driver 771 * 772 * Registers a SPI memory driver. 773 * 774 * Return: 0 in case of success, a negative error core otherwise. 775 */ 776 777 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, 778 struct module *owner) 779 { 780 memdrv->spidrv.probe = spi_mem_probe; 781 memdrv->spidrv.remove = spi_mem_remove; 782 memdrv->spidrv.shutdown = spi_mem_shutdown; 783 784 return __spi_register_driver(owner, &memdrv->spidrv); 785 } 786 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); 787 788 /** 789 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver 790 * @memdrv: the SPI memory driver to unregister 791 * 792 * Unregisters a SPI memory driver. 793 */ 794 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) 795 { 796 spi_unregister_driver(&memdrv->spidrv); 797 } 798 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); 799