1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Exceet Electronics GmbH 4 * Copyright (C) 2018 Bootlin 5 * 6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 7 */ 8 #include <linux/dmaengine.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/spi/spi.h> 11 #include <linux/spi/spi-mem.h> 12 13 #include "internals.h" 14 15 #define SPI_MEM_MAX_BUSWIDTH 8 16 17 /** 18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 19 * memory operation 20 * @ctlr: the SPI controller requesting this dma_map() 21 * @op: the memory operation containing the buffer to map 22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 23 * function 24 * 25 * Some controllers might want to do DMA on the data buffer embedded in @op. 26 * This helper prepares everything for you and provides a ready-to-use 27 * sg_table. This function is not intended to be called from spi drivers. 28 * Only SPI controller drivers should use it. 29 * Note that the caller must ensure the memory region pointed by 30 * op->data.buf.{in,out} is DMA-able before calling this function. 31 * 32 * Return: 0 in case of success, a negative error code otherwise. 33 */ 34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, 35 const struct spi_mem_op *op, 36 struct sg_table *sgt) 37 { 38 struct device *dmadev; 39 40 if (!op->data.nbytes) 41 return -EINVAL; 42 43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 44 dmadev = ctlr->dma_tx->device->dev; 45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 46 dmadev = ctlr->dma_rx->device->dev; 47 else 48 dmadev = ctlr->dev.parent; 49 50 if (!dmadev) 51 return -EINVAL; 52 53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, 54 op->data.dir == SPI_MEM_DATA_IN ? 55 DMA_FROM_DEVICE : DMA_TO_DEVICE); 56 } 57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); 58 59 /** 60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a 61 * memory operation 62 * @ctlr: the SPI controller requesting this dma_unmap() 63 * @op: the memory operation containing the buffer to unmap 64 * @sgt: a pointer to an sg_table previously initialized by 65 * spi_controller_dma_map_mem_op_data() 66 * 67 * Some controllers might want to do DMA on the data buffer embedded in @op. 68 * This helper prepares things so that the CPU can access the 69 * op->data.buf.{in,out} buffer again. 70 * 71 * This function is not intended to be called from SPI drivers. Only SPI 72 * controller drivers should use it. 73 * 74 * This function should be called after the DMA operation has finished and is 75 * only valid if the previous spi_controller_dma_map_mem_op_data() call 76 * returned 0. 77 * 78 * Return: 0 in case of success, a negative error code otherwise. 79 */ 80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, 81 const struct spi_mem_op *op, 82 struct sg_table *sgt) 83 { 84 struct device *dmadev; 85 86 if (!op->data.nbytes) 87 return; 88 89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 90 dmadev = ctlr->dma_tx->device->dev; 91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 92 dmadev = ctlr->dma_rx->device->dev; 93 else 94 dmadev = ctlr->dev.parent; 95 96 spi_unmap_buf(ctlr, dmadev, sgt, 97 op->data.dir == SPI_MEM_DATA_IN ? 98 DMA_FROM_DEVICE : DMA_TO_DEVICE); 99 } 100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); 101 102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 103 { 104 u32 mode = mem->spi->mode; 105 106 switch (buswidth) { 107 case 1: 108 return 0; 109 110 case 2: 111 if ((tx && 112 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || 113 (!tx && 114 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) 115 return 0; 116 117 break; 118 119 case 4: 120 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || 121 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) 122 return 0; 123 124 break; 125 126 case 8: 127 if ((tx && (mode & SPI_TX_OCTAL)) || 128 (!tx && (mode & SPI_RX_OCTAL))) 129 return 0; 130 131 break; 132 133 default: 134 break; 135 } 136 137 return -ENOTSUPP; 138 } 139 140 bool spi_mem_default_supports_op(struct spi_mem *mem, 141 const struct spi_mem_op *op) 142 { 143 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 144 return false; 145 146 if (op->addr.nbytes && 147 spi_check_buswidth_req(mem, op->addr.buswidth, true)) 148 return false; 149 150 if (op->dummy.nbytes && 151 spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 152 return false; 153 154 if (op->data.dir != SPI_MEM_NO_DATA && 155 spi_check_buswidth_req(mem, op->data.buswidth, 156 op->data.dir == SPI_MEM_DATA_OUT)) 157 return false; 158 159 return true; 160 } 161 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); 162 163 static bool spi_mem_buswidth_is_valid(u8 buswidth) 164 { 165 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH) 166 return false; 167 168 return true; 169 } 170 171 static int spi_mem_check_op(const struct spi_mem_op *op) 172 { 173 if (!op->cmd.buswidth) 174 return -EINVAL; 175 176 if ((op->addr.nbytes && !op->addr.buswidth) || 177 (op->dummy.nbytes && !op->dummy.buswidth) || 178 (op->data.nbytes && !op->data.buswidth)) 179 return -EINVAL; 180 181 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 182 !spi_mem_buswidth_is_valid(op->addr.buswidth) || 183 !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 184 !spi_mem_buswidth_is_valid(op->data.buswidth)) 185 return -EINVAL; 186 187 return 0; 188 } 189 190 static bool spi_mem_internal_supports_op(struct spi_mem *mem, 191 const struct spi_mem_op *op) 192 { 193 struct spi_controller *ctlr = mem->spi->controller; 194 195 if (ctlr->mem_ops && ctlr->mem_ops->supports_op) 196 return ctlr->mem_ops->supports_op(mem, op); 197 198 return spi_mem_default_supports_op(mem, op); 199 } 200 201 /** 202 * spi_mem_supports_op() - Check if a memory device and the controller it is 203 * connected to support a specific memory operation 204 * @mem: the SPI memory 205 * @op: the memory operation to check 206 * 207 * Some controllers are only supporting Single or Dual IOs, others might only 208 * support specific opcodes, or it can even be that the controller and device 209 * both support Quad IOs but the hardware prevents you from using it because 210 * only 2 IO lines are connected. 211 * 212 * This function checks whether a specific operation is supported. 213 * 214 * Return: true if @op is supported, false otherwise. 215 */ 216 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 217 { 218 if (spi_mem_check_op(op)) 219 return false; 220 221 return spi_mem_internal_supports_op(mem, op); 222 } 223 EXPORT_SYMBOL_GPL(spi_mem_supports_op); 224 225 static int spi_mem_access_start(struct spi_mem *mem) 226 { 227 struct spi_controller *ctlr = mem->spi->controller; 228 229 /* 230 * Flush the message queue before executing our SPI memory 231 * operation to prevent preemption of regular SPI transfers. 232 */ 233 spi_flush_queue(ctlr); 234 235 if (ctlr->auto_runtime_pm) { 236 int ret; 237 238 ret = pm_runtime_get_sync(ctlr->dev.parent); 239 if (ret < 0) { 240 dev_err(&ctlr->dev, "Failed to power device: %d\n", 241 ret); 242 return ret; 243 } 244 } 245 246 mutex_lock(&ctlr->bus_lock_mutex); 247 mutex_lock(&ctlr->io_mutex); 248 249 return 0; 250 } 251 252 static void spi_mem_access_end(struct spi_mem *mem) 253 { 254 struct spi_controller *ctlr = mem->spi->controller; 255 256 mutex_unlock(&ctlr->io_mutex); 257 mutex_unlock(&ctlr->bus_lock_mutex); 258 259 if (ctlr->auto_runtime_pm) 260 pm_runtime_put(ctlr->dev.parent); 261 } 262 263 /** 264 * spi_mem_exec_op() - Execute a memory operation 265 * @mem: the SPI memory 266 * @op: the memory operation to execute 267 * 268 * Executes a memory operation. 269 * 270 * This function first checks that @op is supported and then tries to execute 271 * it. 272 * 273 * Return: 0 in case of success, a negative error code otherwise. 274 */ 275 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 276 { 277 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0; 278 struct spi_controller *ctlr = mem->spi->controller; 279 struct spi_transfer xfers[4] = { }; 280 struct spi_message msg; 281 u8 *tmpbuf; 282 int ret; 283 284 ret = spi_mem_check_op(op); 285 if (ret) 286 return ret; 287 288 if (!spi_mem_internal_supports_op(mem, op)) 289 return -ENOTSUPP; 290 291 if (ctlr->mem_ops && !mem->spi->cs_gpiod) { 292 ret = spi_mem_access_start(mem); 293 if (ret) 294 return ret; 295 296 ret = ctlr->mem_ops->exec_op(mem, op); 297 298 spi_mem_access_end(mem); 299 300 /* 301 * Some controllers only optimize specific paths (typically the 302 * read path) and expect the core to use the regular SPI 303 * interface in other cases. 304 */ 305 if (!ret || ret != -ENOTSUPP) 306 return ret; 307 } 308 309 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + 310 op->dummy.nbytes; 311 312 /* 313 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so 314 * we're guaranteed that this buffer is DMA-able, as required by the 315 * SPI layer. 316 */ 317 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); 318 if (!tmpbuf) 319 return -ENOMEM; 320 321 spi_message_init(&msg); 322 323 tmpbuf[0] = op->cmd.opcode; 324 xfers[xferpos].tx_buf = tmpbuf; 325 xfers[xferpos].len = sizeof(op->cmd.opcode); 326 xfers[xferpos].tx_nbits = op->cmd.buswidth; 327 spi_message_add_tail(&xfers[xferpos], &msg); 328 xferpos++; 329 totalxferlen++; 330 331 if (op->addr.nbytes) { 332 int i; 333 334 for (i = 0; i < op->addr.nbytes; i++) 335 tmpbuf[i + 1] = op->addr.val >> 336 (8 * (op->addr.nbytes - i - 1)); 337 338 xfers[xferpos].tx_buf = tmpbuf + 1; 339 xfers[xferpos].len = op->addr.nbytes; 340 xfers[xferpos].tx_nbits = op->addr.buswidth; 341 spi_message_add_tail(&xfers[xferpos], &msg); 342 xferpos++; 343 totalxferlen += op->addr.nbytes; 344 } 345 346 if (op->dummy.nbytes) { 347 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); 348 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; 349 xfers[xferpos].len = op->dummy.nbytes; 350 xfers[xferpos].tx_nbits = op->dummy.buswidth; 351 spi_message_add_tail(&xfers[xferpos], &msg); 352 xferpos++; 353 totalxferlen += op->dummy.nbytes; 354 } 355 356 if (op->data.nbytes) { 357 if (op->data.dir == SPI_MEM_DATA_IN) { 358 xfers[xferpos].rx_buf = op->data.buf.in; 359 xfers[xferpos].rx_nbits = op->data.buswidth; 360 } else { 361 xfers[xferpos].tx_buf = op->data.buf.out; 362 xfers[xferpos].tx_nbits = op->data.buswidth; 363 } 364 365 xfers[xferpos].len = op->data.nbytes; 366 spi_message_add_tail(&xfers[xferpos], &msg); 367 xferpos++; 368 totalxferlen += op->data.nbytes; 369 } 370 371 ret = spi_sync(mem->spi, &msg); 372 373 kfree(tmpbuf); 374 375 if (ret) 376 return ret; 377 378 if (msg.actual_length != totalxferlen) 379 return -EIO; 380 381 return 0; 382 } 383 EXPORT_SYMBOL_GPL(spi_mem_exec_op); 384 385 /** 386 * spi_mem_get_name() - Return the SPI mem device name to be used by the 387 * upper layer if necessary 388 * @mem: the SPI memory 389 * 390 * This function allows SPI mem users to retrieve the SPI mem device name. 391 * It is useful if the upper layer needs to expose a custom name for 392 * compatibility reasons. 393 * 394 * Return: a string containing the name of the memory device to be used 395 * by the SPI mem user 396 */ 397 const char *spi_mem_get_name(struct spi_mem *mem) 398 { 399 return mem->name; 400 } 401 EXPORT_SYMBOL_GPL(spi_mem_get_name); 402 403 /** 404 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 405 * match controller limitations 406 * @mem: the SPI memory 407 * @op: the operation to adjust 408 * 409 * Some controllers have FIFO limitations and must split a data transfer 410 * operation into multiple ones, others require a specific alignment for 411 * optimized accesses. This function allows SPI mem drivers to split a single 412 * operation into multiple sub-operations when required. 413 * 414 * Return: a negative error code if the controller can't properly adjust @op, 415 * 0 otherwise. Note that @op->data.nbytes will be updated if @op 416 * can't be handled in a single step. 417 */ 418 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 419 { 420 struct spi_controller *ctlr = mem->spi->controller; 421 size_t len; 422 423 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) 424 return ctlr->mem_ops->adjust_op_size(mem, op); 425 426 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 427 len = sizeof(op->cmd.opcode) + op->addr.nbytes + 428 op->dummy.nbytes; 429 430 if (len > spi_max_transfer_size(mem->spi)) 431 return -EINVAL; 432 433 op->data.nbytes = min3((size_t)op->data.nbytes, 434 spi_max_transfer_size(mem->spi), 435 spi_max_message_size(mem->spi) - 436 len); 437 if (!op->data.nbytes) 438 return -EINVAL; 439 } 440 441 return 0; 442 } 443 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); 444 445 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, 446 u64 offs, size_t len, void *buf) 447 { 448 struct spi_mem_op op = desc->info.op_tmpl; 449 int ret; 450 451 op.addr.val = desc->info.offset + offs; 452 op.data.buf.in = buf; 453 op.data.nbytes = len; 454 ret = spi_mem_adjust_op_size(desc->mem, &op); 455 if (ret) 456 return ret; 457 458 ret = spi_mem_exec_op(desc->mem, &op); 459 if (ret) 460 return ret; 461 462 return op.data.nbytes; 463 } 464 465 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, 466 u64 offs, size_t len, const void *buf) 467 { 468 struct spi_mem_op op = desc->info.op_tmpl; 469 int ret; 470 471 op.addr.val = desc->info.offset + offs; 472 op.data.buf.out = buf; 473 op.data.nbytes = len; 474 ret = spi_mem_adjust_op_size(desc->mem, &op); 475 if (ret) 476 return ret; 477 478 ret = spi_mem_exec_op(desc->mem, &op); 479 if (ret) 480 return ret; 481 482 return op.data.nbytes; 483 } 484 485 /** 486 * spi_mem_dirmap_create() - Create a direct mapping descriptor 487 * @mem: SPI mem device this direct mapping should be created for 488 * @info: direct mapping information 489 * 490 * This function is creating a direct mapping descriptor which can then be used 491 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 492 * If the SPI controller driver does not support direct mapping, this function 493 * falls back to an implementation using spi_mem_exec_op(), so that the caller 494 * doesn't have to bother implementing a fallback on his own. 495 * 496 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 497 */ 498 struct spi_mem_dirmap_desc * 499 spi_mem_dirmap_create(struct spi_mem *mem, 500 const struct spi_mem_dirmap_info *info) 501 { 502 struct spi_controller *ctlr = mem->spi->controller; 503 struct spi_mem_dirmap_desc *desc; 504 int ret = -ENOTSUPP; 505 506 /* Make sure the number of address cycles is between 1 and 8 bytes. */ 507 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) 508 return ERR_PTR(-EINVAL); 509 510 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ 511 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) 512 return ERR_PTR(-EINVAL); 513 514 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 515 if (!desc) 516 return ERR_PTR(-ENOMEM); 517 518 desc->mem = mem; 519 desc->info = *info; 520 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) 521 ret = ctlr->mem_ops->dirmap_create(desc); 522 523 if (ret) { 524 desc->nodirmap = true; 525 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) 526 ret = -ENOTSUPP; 527 else 528 ret = 0; 529 } 530 531 if (ret) { 532 kfree(desc); 533 return ERR_PTR(ret); 534 } 535 536 return desc; 537 } 538 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); 539 540 /** 541 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor 542 * @desc: the direct mapping descriptor to destroy 543 * 544 * This function destroys a direct mapping descriptor previously created by 545 * spi_mem_dirmap_create(). 546 */ 547 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) 548 { 549 struct spi_controller *ctlr = desc->mem->spi->controller; 550 551 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) 552 ctlr->mem_ops->dirmap_destroy(desc); 553 554 kfree(desc); 555 } 556 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); 557 558 static void devm_spi_mem_dirmap_release(struct device *dev, void *res) 559 { 560 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; 561 562 spi_mem_dirmap_destroy(desc); 563 } 564 565 /** 566 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach 567 * it to a device 568 * @dev: device the dirmap desc will be attached to 569 * @mem: SPI mem device this direct mapping should be created for 570 * @info: direct mapping information 571 * 572 * devm_ variant of the spi_mem_dirmap_create() function. See 573 * spi_mem_dirmap_create() for more details. 574 * 575 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 576 */ 577 struct spi_mem_dirmap_desc * 578 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, 579 const struct spi_mem_dirmap_info *info) 580 { 581 struct spi_mem_dirmap_desc **ptr, *desc; 582 583 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), 584 GFP_KERNEL); 585 if (!ptr) 586 return ERR_PTR(-ENOMEM); 587 588 desc = spi_mem_dirmap_create(mem, info); 589 if (IS_ERR(desc)) { 590 devres_free(ptr); 591 } else { 592 *ptr = desc; 593 devres_add(dev, ptr); 594 } 595 596 return desc; 597 } 598 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); 599 600 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) 601 { 602 struct spi_mem_dirmap_desc **ptr = res; 603 604 if (WARN_ON(!ptr || !*ptr)) 605 return 0; 606 607 return *ptr == data; 608 } 609 610 /** 611 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached 612 * to a device 613 * @dev: device the dirmap desc is attached to 614 * @desc: the direct mapping descriptor to destroy 615 * 616 * devm_ variant of the spi_mem_dirmap_destroy() function. See 617 * spi_mem_dirmap_destroy() for more details. 618 */ 619 void devm_spi_mem_dirmap_destroy(struct device *dev, 620 struct spi_mem_dirmap_desc *desc) 621 { 622 devres_release(dev, devm_spi_mem_dirmap_release, 623 devm_spi_mem_dirmap_match, desc); 624 } 625 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); 626 627 /** 628 * spi_mem_dirmap_read() - Read data through a direct mapping 629 * @desc: direct mapping descriptor 630 * @offs: offset to start reading from. Note that this is not an absolute 631 * offset, but the offset within the direct mapping which already has 632 * its own offset 633 * @len: length in bytes 634 * @buf: destination buffer. This buffer must be DMA-able 635 * 636 * This function reads data from a memory device using a direct mapping 637 * previously instantiated with spi_mem_dirmap_create(). 638 * 639 * Return: the amount of data read from the memory device or a negative error 640 * code. Note that the returned size might be smaller than @len, and the caller 641 * is responsible for calling spi_mem_dirmap_read() again when that happens. 642 */ 643 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 644 u64 offs, size_t len, void *buf) 645 { 646 struct spi_controller *ctlr = desc->mem->spi->controller; 647 ssize_t ret; 648 649 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) 650 return -EINVAL; 651 652 if (!len) 653 return 0; 654 655 if (desc->nodirmap) { 656 ret = spi_mem_no_dirmap_read(desc, offs, len, buf); 657 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { 658 ret = spi_mem_access_start(desc->mem); 659 if (ret) 660 return ret; 661 662 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); 663 664 spi_mem_access_end(desc->mem); 665 } else { 666 ret = -ENOTSUPP; 667 } 668 669 return ret; 670 } 671 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read); 672 673 /** 674 * spi_mem_dirmap_write() - Write data through a direct mapping 675 * @desc: direct mapping descriptor 676 * @offs: offset to start writing from. Note that this is not an absolute 677 * offset, but the offset within the direct mapping which already has 678 * its own offset 679 * @len: length in bytes 680 * @buf: source buffer. This buffer must be DMA-able 681 * 682 * This function writes data to a memory device using a direct mapping 683 * previously instantiated with spi_mem_dirmap_create(). 684 * 685 * Return: the amount of data written to the memory device or a negative error 686 * code. Note that the returned size might be smaller than @len, and the caller 687 * is responsible for calling spi_mem_dirmap_write() again when that happens. 688 */ 689 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 690 u64 offs, size_t len, const void *buf) 691 { 692 struct spi_controller *ctlr = desc->mem->spi->controller; 693 ssize_t ret; 694 695 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) 696 return -EINVAL; 697 698 if (!len) 699 return 0; 700 701 if (desc->nodirmap) { 702 ret = spi_mem_no_dirmap_write(desc, offs, len, buf); 703 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { 704 ret = spi_mem_access_start(desc->mem); 705 if (ret) 706 return ret; 707 708 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); 709 710 spi_mem_access_end(desc->mem); 711 } else { 712 ret = -ENOTSUPP; 713 } 714 715 return ret; 716 } 717 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write); 718 719 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) 720 { 721 return container_of(drv, struct spi_mem_driver, spidrv.driver); 722 } 723 724 static int spi_mem_probe(struct spi_device *spi) 725 { 726 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 727 struct spi_controller *ctlr = spi->controller; 728 struct spi_mem *mem; 729 730 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); 731 if (!mem) 732 return -ENOMEM; 733 734 mem->spi = spi; 735 736 if (ctlr->mem_ops && ctlr->mem_ops->get_name) 737 mem->name = ctlr->mem_ops->get_name(mem); 738 else 739 mem->name = dev_name(&spi->dev); 740 741 if (IS_ERR_OR_NULL(mem->name)) 742 return PTR_ERR(mem->name); 743 744 spi_set_drvdata(spi, mem); 745 746 return memdrv->probe(mem); 747 } 748 749 static int spi_mem_remove(struct spi_device *spi) 750 { 751 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 752 struct spi_mem *mem = spi_get_drvdata(spi); 753 754 if (memdrv->remove) 755 return memdrv->remove(mem); 756 757 return 0; 758 } 759 760 static void spi_mem_shutdown(struct spi_device *spi) 761 { 762 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 763 struct spi_mem *mem = spi_get_drvdata(spi); 764 765 if (memdrv->shutdown) 766 memdrv->shutdown(mem); 767 } 768 769 /** 770 * spi_mem_driver_register_with_owner() - Register a SPI memory driver 771 * @memdrv: the SPI memory driver to register 772 * @owner: the owner of this driver 773 * 774 * Registers a SPI memory driver. 775 * 776 * Return: 0 in case of success, a negative error core otherwise. 777 */ 778 779 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, 780 struct module *owner) 781 { 782 memdrv->spidrv.probe = spi_mem_probe; 783 memdrv->spidrv.remove = spi_mem_remove; 784 memdrv->spidrv.shutdown = spi_mem_shutdown; 785 786 return __spi_register_driver(owner, &memdrv->spidrv); 787 } 788 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); 789 790 /** 791 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver 792 * @memdrv: the SPI memory driver to unregister 793 * 794 * Unregisters a SPI memory driver. 795 */ 796 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) 797 { 798 spi_unregister_driver(&memdrv->spidrv); 799 } 800 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); 801