1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Exceet Electronics GmbH 4 * Copyright (C) 2018 Bootlin 5 * 6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 7 */ 8 #include <linux/dmaengine.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/spi/spi.h> 11 #include <linux/spi/spi-mem.h> 12 13 #include "internals.h" 14 15 #define SPI_MEM_MAX_BUSWIDTH 8 16 17 /** 18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 19 * memory operation 20 * @ctlr: the SPI controller requesting this dma_map() 21 * @op: the memory operation containing the buffer to map 22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 23 * function 24 * 25 * Some controllers might want to do DMA on the data buffer embedded in @op. 26 * This helper prepares everything for you and provides a ready-to-use 27 * sg_table. This function is not intended to be called from spi drivers. 28 * Only SPI controller drivers should use it. 29 * Note that the caller must ensure the memory region pointed by 30 * op->data.buf.{in,out} is DMA-able before calling this function. 31 * 32 * Return: 0 in case of success, a negative error code otherwise. 33 */ 34 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, 35 const struct spi_mem_op *op, 36 struct sg_table *sgt) 37 { 38 struct device *dmadev; 39 40 if (!op->data.nbytes) 41 return -EINVAL; 42 43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 44 dmadev = ctlr->dma_tx->device->dev; 45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 46 dmadev = ctlr->dma_rx->device->dev; 47 else 48 dmadev = ctlr->dev.parent; 49 50 if (!dmadev) 51 return -EINVAL; 52 53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, 54 op->data.dir == SPI_MEM_DATA_IN ? 55 DMA_FROM_DEVICE : DMA_TO_DEVICE); 56 } 57 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); 58 59 /** 60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a 61 * memory operation 62 * @ctlr: the SPI controller requesting this dma_unmap() 63 * @op: the memory operation containing the buffer to unmap 64 * @sgt: a pointer to an sg_table previously initialized by 65 * spi_controller_dma_map_mem_op_data() 66 * 67 * Some controllers might want to do DMA on the data buffer embedded in @op. 68 * This helper prepares things so that the CPU can access the 69 * op->data.buf.{in,out} buffer again. 70 * 71 * This function is not intended to be called from SPI drivers. Only SPI 72 * controller drivers should use it. 73 * 74 * This function should be called after the DMA operation has finished and is 75 * only valid if the previous spi_controller_dma_map_mem_op_data() call 76 * returned 0. 77 * 78 * Return: 0 in case of success, a negative error code otherwise. 79 */ 80 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, 81 const struct spi_mem_op *op, 82 struct sg_table *sgt) 83 { 84 struct device *dmadev; 85 86 if (!op->data.nbytes) 87 return; 88 89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 90 dmadev = ctlr->dma_tx->device->dev; 91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 92 dmadev = ctlr->dma_rx->device->dev; 93 else 94 dmadev = ctlr->dev.parent; 95 96 spi_unmap_buf(ctlr, dmadev, sgt, 97 op->data.dir == SPI_MEM_DATA_IN ? 98 DMA_FROM_DEVICE : DMA_TO_DEVICE); 99 } 100 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); 101 102 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) 103 { 104 u32 mode = mem->spi->mode; 105 106 switch (buswidth) { 107 case 1: 108 return 0; 109 110 case 2: 111 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || 112 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) 113 return 0; 114 115 break; 116 117 case 4: 118 if ((tx && (mode & SPI_TX_QUAD)) || 119 (!tx && (mode & SPI_RX_QUAD))) 120 return 0; 121 122 break; 123 124 case 8: 125 if ((tx && (mode & SPI_TX_OCTAL)) || 126 (!tx && (mode & SPI_RX_OCTAL))) 127 return 0; 128 129 break; 130 131 default: 132 break; 133 } 134 135 return -ENOTSUPP; 136 } 137 138 static bool spi_mem_default_supports_op(struct spi_mem *mem, 139 const struct spi_mem_op *op) 140 { 141 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) 142 return false; 143 144 if (op->addr.nbytes && 145 spi_check_buswidth_req(mem, op->addr.buswidth, true)) 146 return false; 147 148 if (op->dummy.nbytes && 149 spi_check_buswidth_req(mem, op->dummy.buswidth, true)) 150 return false; 151 152 if (op->data.dir != SPI_MEM_NO_DATA && 153 spi_check_buswidth_req(mem, op->data.buswidth, 154 op->data.dir == SPI_MEM_DATA_OUT)) 155 return false; 156 157 return true; 158 } 159 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); 160 161 static bool spi_mem_buswidth_is_valid(u8 buswidth) 162 { 163 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH) 164 return false; 165 166 return true; 167 } 168 169 static int spi_mem_check_op(const struct spi_mem_op *op) 170 { 171 if (!op->cmd.buswidth) 172 return -EINVAL; 173 174 if ((op->addr.nbytes && !op->addr.buswidth) || 175 (op->dummy.nbytes && !op->dummy.buswidth) || 176 (op->data.nbytes && !op->data.buswidth)) 177 return -EINVAL; 178 179 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || 180 !spi_mem_buswidth_is_valid(op->addr.buswidth) || 181 !spi_mem_buswidth_is_valid(op->dummy.buswidth) || 182 !spi_mem_buswidth_is_valid(op->data.buswidth)) 183 return -EINVAL; 184 185 return 0; 186 } 187 188 static bool spi_mem_internal_supports_op(struct spi_mem *mem, 189 const struct spi_mem_op *op) 190 { 191 struct spi_controller *ctlr = mem->spi->controller; 192 193 if (ctlr->mem_ops && ctlr->mem_ops->supports_op) 194 return ctlr->mem_ops->supports_op(mem, op); 195 196 return spi_mem_default_supports_op(mem, op); 197 } 198 199 /** 200 * spi_mem_supports_op() - Check if a memory device and the controller it is 201 * connected to support a specific memory operation 202 * @mem: the SPI memory 203 * @op: the memory operation to check 204 * 205 * Some controllers are only supporting Single or Dual IOs, others might only 206 * support specific opcodes, or it can even be that the controller and device 207 * both support Quad IOs but the hardware prevents you from using it because 208 * only 2 IO lines are connected. 209 * 210 * This function checks whether a specific operation is supported. 211 * 212 * Return: true if @op is supported, false otherwise. 213 */ 214 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 215 { 216 if (spi_mem_check_op(op)) 217 return false; 218 219 return spi_mem_internal_supports_op(mem, op); 220 } 221 EXPORT_SYMBOL_GPL(spi_mem_supports_op); 222 223 static int spi_mem_access_start(struct spi_mem *mem) 224 { 225 struct spi_controller *ctlr = mem->spi->controller; 226 227 /* 228 * Flush the message queue before executing our SPI memory 229 * operation to prevent preemption of regular SPI transfers. 230 */ 231 spi_flush_queue(ctlr); 232 233 if (ctlr->auto_runtime_pm) { 234 int ret; 235 236 ret = pm_runtime_get_sync(ctlr->dev.parent); 237 if (ret < 0) { 238 dev_err(&ctlr->dev, "Failed to power device: %d\n", 239 ret); 240 return ret; 241 } 242 } 243 244 mutex_lock(&ctlr->bus_lock_mutex); 245 mutex_lock(&ctlr->io_mutex); 246 247 return 0; 248 } 249 250 static void spi_mem_access_end(struct spi_mem *mem) 251 { 252 struct spi_controller *ctlr = mem->spi->controller; 253 254 mutex_unlock(&ctlr->io_mutex); 255 mutex_unlock(&ctlr->bus_lock_mutex); 256 257 if (ctlr->auto_runtime_pm) 258 pm_runtime_put(ctlr->dev.parent); 259 } 260 261 /** 262 * spi_mem_exec_op() - Execute a memory operation 263 * @mem: the SPI memory 264 * @op: the memory operation to execute 265 * 266 * Executes a memory operation. 267 * 268 * This function first checks that @op is supported and then tries to execute 269 * it. 270 * 271 * Return: 0 in case of success, a negative error code otherwise. 272 */ 273 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 274 { 275 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0; 276 struct spi_controller *ctlr = mem->spi->controller; 277 struct spi_transfer xfers[4] = { }; 278 struct spi_message msg; 279 u8 *tmpbuf; 280 int ret; 281 282 ret = spi_mem_check_op(op); 283 if (ret) 284 return ret; 285 286 if (!spi_mem_internal_supports_op(mem, op)) 287 return -ENOTSUPP; 288 289 if (ctlr->mem_ops) { 290 ret = spi_mem_access_start(mem); 291 if (ret) 292 return ret; 293 294 ret = ctlr->mem_ops->exec_op(mem, op); 295 296 spi_mem_access_end(mem); 297 298 /* 299 * Some controllers only optimize specific paths (typically the 300 * read path) and expect the core to use the regular SPI 301 * interface in other cases. 302 */ 303 if (!ret || ret != -ENOTSUPP) 304 return ret; 305 } 306 307 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + 308 op->dummy.nbytes; 309 310 /* 311 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so 312 * we're guaranteed that this buffer is DMA-able, as required by the 313 * SPI layer. 314 */ 315 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); 316 if (!tmpbuf) 317 return -ENOMEM; 318 319 spi_message_init(&msg); 320 321 tmpbuf[0] = op->cmd.opcode; 322 xfers[xferpos].tx_buf = tmpbuf; 323 xfers[xferpos].len = sizeof(op->cmd.opcode); 324 xfers[xferpos].tx_nbits = op->cmd.buswidth; 325 spi_message_add_tail(&xfers[xferpos], &msg); 326 xferpos++; 327 totalxferlen++; 328 329 if (op->addr.nbytes) { 330 int i; 331 332 for (i = 0; i < op->addr.nbytes; i++) 333 tmpbuf[i + 1] = op->addr.val >> 334 (8 * (op->addr.nbytes - i - 1)); 335 336 xfers[xferpos].tx_buf = tmpbuf + 1; 337 xfers[xferpos].len = op->addr.nbytes; 338 xfers[xferpos].tx_nbits = op->addr.buswidth; 339 spi_message_add_tail(&xfers[xferpos], &msg); 340 xferpos++; 341 totalxferlen += op->addr.nbytes; 342 } 343 344 if (op->dummy.nbytes) { 345 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); 346 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; 347 xfers[xferpos].len = op->dummy.nbytes; 348 xfers[xferpos].tx_nbits = op->dummy.buswidth; 349 spi_message_add_tail(&xfers[xferpos], &msg); 350 xferpos++; 351 totalxferlen += op->dummy.nbytes; 352 } 353 354 if (op->data.nbytes) { 355 if (op->data.dir == SPI_MEM_DATA_IN) { 356 xfers[xferpos].rx_buf = op->data.buf.in; 357 xfers[xferpos].rx_nbits = op->data.buswidth; 358 } else { 359 xfers[xferpos].tx_buf = op->data.buf.out; 360 xfers[xferpos].tx_nbits = op->data.buswidth; 361 } 362 363 xfers[xferpos].len = op->data.nbytes; 364 spi_message_add_tail(&xfers[xferpos], &msg); 365 xferpos++; 366 totalxferlen += op->data.nbytes; 367 } 368 369 ret = spi_sync(mem->spi, &msg); 370 371 kfree(tmpbuf); 372 373 if (ret) 374 return ret; 375 376 if (msg.actual_length != totalxferlen) 377 return -EIO; 378 379 return 0; 380 } 381 EXPORT_SYMBOL_GPL(spi_mem_exec_op); 382 383 /** 384 * spi_mem_get_name() - Return the SPI mem device name to be used by the 385 * upper layer if necessary 386 * @mem: the SPI memory 387 * 388 * This function allows SPI mem users to retrieve the SPI mem device name. 389 * It is useful if the upper layer needs to expose a custom name for 390 * compatibility reasons. 391 * 392 * Return: a string containing the name of the memory device to be used 393 * by the SPI mem user 394 */ 395 const char *spi_mem_get_name(struct spi_mem *mem) 396 { 397 return mem->name; 398 } 399 EXPORT_SYMBOL_GPL(spi_mem_get_name); 400 401 /** 402 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 403 * match controller limitations 404 * @mem: the SPI memory 405 * @op: the operation to adjust 406 * 407 * Some controllers have FIFO limitations and must split a data transfer 408 * operation into multiple ones, others require a specific alignment for 409 * optimized accesses. This function allows SPI mem drivers to split a single 410 * operation into multiple sub-operations when required. 411 * 412 * Return: a negative error code if the controller can't properly adjust @op, 413 * 0 otherwise. Note that @op->data.nbytes will be updated if @op 414 * can't be handled in a single step. 415 */ 416 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 417 { 418 struct spi_controller *ctlr = mem->spi->controller; 419 size_t len; 420 421 len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; 422 423 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) 424 return ctlr->mem_ops->adjust_op_size(mem, op); 425 426 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 427 if (len > spi_max_transfer_size(mem->spi)) 428 return -EINVAL; 429 430 op->data.nbytes = min3((size_t)op->data.nbytes, 431 spi_max_transfer_size(mem->spi), 432 spi_max_message_size(mem->spi) - 433 len); 434 if (!op->data.nbytes) 435 return -EINVAL; 436 } 437 438 return 0; 439 } 440 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); 441 442 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, 443 u64 offs, size_t len, void *buf) 444 { 445 struct spi_mem_op op = desc->info.op_tmpl; 446 int ret; 447 448 op.addr.val = desc->info.offset + offs; 449 op.data.buf.in = buf; 450 op.data.nbytes = len; 451 ret = spi_mem_adjust_op_size(desc->mem, &op); 452 if (ret) 453 return ret; 454 455 ret = spi_mem_exec_op(desc->mem, &op); 456 if (ret) 457 return ret; 458 459 return op.data.nbytes; 460 } 461 462 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, 463 u64 offs, size_t len, const void *buf) 464 { 465 struct spi_mem_op op = desc->info.op_tmpl; 466 int ret; 467 468 op.addr.val = desc->info.offset + offs; 469 op.data.buf.out = buf; 470 op.data.nbytes = len; 471 ret = spi_mem_adjust_op_size(desc->mem, &op); 472 if (ret) 473 return ret; 474 475 ret = spi_mem_exec_op(desc->mem, &op); 476 if (ret) 477 return ret; 478 479 return op.data.nbytes; 480 } 481 482 /** 483 * spi_mem_dirmap_create() - Create a direct mapping descriptor 484 * @mem: SPI mem device this direct mapping should be created for 485 * @info: direct mapping information 486 * 487 * This function is creating a direct mapping descriptor which can then be used 488 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). 489 * If the SPI controller driver does not support direct mapping, this function 490 * fallback to an implementation using spi_mem_exec_op(), so that the caller 491 * doesn't have to bother implementing a fallback on his own. 492 * 493 * Return: a valid pointer in case of success, and ERR_PTR() otherwise. 494 */ 495 struct spi_mem_dirmap_desc * 496 spi_mem_dirmap_create(struct spi_mem *mem, 497 const struct spi_mem_dirmap_info *info) 498 { 499 struct spi_controller *ctlr = mem->spi->controller; 500 struct spi_mem_dirmap_desc *desc; 501 int ret = -ENOTSUPP; 502 503 /* Make sure the number of address cycles is between 1 and 8 bytes. */ 504 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) 505 return ERR_PTR(-EINVAL); 506 507 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ 508 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) 509 return ERR_PTR(-EINVAL); 510 511 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 512 if (!desc) 513 return ERR_PTR(-ENOMEM); 514 515 desc->mem = mem; 516 desc->info = *info; 517 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) 518 ret = ctlr->mem_ops->dirmap_create(desc); 519 520 if (ret) { 521 desc->nodirmap = true; 522 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) 523 ret = -ENOTSUPP; 524 else 525 ret = 0; 526 } 527 528 if (ret) { 529 kfree(desc); 530 return ERR_PTR(ret); 531 } 532 533 return desc; 534 } 535 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); 536 537 /** 538 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor 539 * @desc: the direct mapping descriptor to destroy 540 * @info: direct mapping information 541 * 542 * This function destroys a direct mapping descriptor previously created by 543 * spi_mem_dirmap_create(). 544 */ 545 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) 546 { 547 struct spi_controller *ctlr = desc->mem->spi->controller; 548 549 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) 550 ctlr->mem_ops->dirmap_destroy(desc); 551 } 552 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); 553 554 /** 555 * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping 556 * @desc: direct mapping descriptor 557 * @offs: offset to start reading from. Note that this is not an absolute 558 * offset, but the offset within the direct mapping which already has 559 * its own offset 560 * @len: length in bytes 561 * @buf: destination buffer. This buffer must be DMA-able 562 * 563 * This function reads data from a memory device using a direct mapping 564 * previously instantiated with spi_mem_dirmap_create(). 565 * 566 * Return: the amount of data read from the memory device or a negative error 567 * code. Note that the returned size might be smaller than @len, and the caller 568 * is responsible for calling spi_mem_dirmap_read() again when that happens. 569 */ 570 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, 571 u64 offs, size_t len, void *buf) 572 { 573 struct spi_controller *ctlr = desc->mem->spi->controller; 574 ssize_t ret; 575 576 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) 577 return -EINVAL; 578 579 if (!len) 580 return 0; 581 582 if (desc->nodirmap) { 583 ret = spi_mem_no_dirmap_read(desc, offs, len, buf); 584 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { 585 ret = spi_mem_access_start(desc->mem); 586 if (ret) 587 return ret; 588 589 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); 590 591 spi_mem_access_end(desc->mem); 592 } else { 593 ret = -ENOTSUPP; 594 } 595 596 return ret; 597 } 598 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read); 599 600 /** 601 * spi_mem_dirmap_dirmap_write() - Write data through a direct mapping 602 * @desc: direct mapping descriptor 603 * @offs: offset to start writing from. Note that this is not an absolute 604 * offset, but the offset within the direct mapping which already has 605 * its own offset 606 * @len: length in bytes 607 * @buf: source buffer. This buffer must be DMA-able 608 * 609 * This function writes data to a memory device using a direct mapping 610 * previously instantiated with spi_mem_dirmap_create(). 611 * 612 * Return: the amount of data written to the memory device or a negative error 613 * code. Note that the returned size might be smaller than @len, and the caller 614 * is responsible for calling spi_mem_dirmap_write() again when that happens. 615 */ 616 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, 617 u64 offs, size_t len, const void *buf) 618 { 619 struct spi_controller *ctlr = desc->mem->spi->controller; 620 ssize_t ret; 621 622 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) 623 return -EINVAL; 624 625 if (!len) 626 return 0; 627 628 if (desc->nodirmap) { 629 ret = spi_mem_no_dirmap_write(desc, offs, len, buf); 630 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { 631 ret = spi_mem_access_start(desc->mem); 632 if (ret) 633 return ret; 634 635 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); 636 637 spi_mem_access_end(desc->mem); 638 } else { 639 ret = -ENOTSUPP; 640 } 641 642 return ret; 643 } 644 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write); 645 646 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) 647 { 648 return container_of(drv, struct spi_mem_driver, spidrv.driver); 649 } 650 651 static int spi_mem_probe(struct spi_device *spi) 652 { 653 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 654 struct spi_controller *ctlr = spi->controller; 655 struct spi_mem *mem; 656 657 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); 658 if (!mem) 659 return -ENOMEM; 660 661 mem->spi = spi; 662 663 if (ctlr->mem_ops && ctlr->mem_ops->get_name) 664 mem->name = ctlr->mem_ops->get_name(mem); 665 else 666 mem->name = dev_name(&spi->dev); 667 668 if (IS_ERR_OR_NULL(mem->name)) 669 return PTR_ERR(mem->name); 670 671 spi_set_drvdata(spi, mem); 672 673 return memdrv->probe(mem); 674 } 675 676 static int spi_mem_remove(struct spi_device *spi) 677 { 678 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 679 struct spi_mem *mem = spi_get_drvdata(spi); 680 681 if (memdrv->remove) 682 return memdrv->remove(mem); 683 684 return 0; 685 } 686 687 static void spi_mem_shutdown(struct spi_device *spi) 688 { 689 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 690 struct spi_mem *mem = spi_get_drvdata(spi); 691 692 if (memdrv->shutdown) 693 memdrv->shutdown(mem); 694 } 695 696 /** 697 * spi_mem_driver_register_with_owner() - Register a SPI memory driver 698 * @memdrv: the SPI memory driver to register 699 * @owner: the owner of this driver 700 * 701 * Registers a SPI memory driver. 702 * 703 * Return: 0 in case of success, a negative error core otherwise. 704 */ 705 706 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, 707 struct module *owner) 708 { 709 memdrv->spidrv.probe = spi_mem_probe; 710 memdrv->spidrv.remove = spi_mem_remove; 711 memdrv->spidrv.shutdown = spi_mem_shutdown; 712 713 return __spi_register_driver(owner, &memdrv->spidrv); 714 } 715 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); 716 717 /** 718 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver 719 * @memdrv: the SPI memory driver to unregister 720 * 721 * Unregisters a SPI memory driver. 722 */ 723 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) 724 { 725 spi_unregister_driver(&memdrv->spidrv); 726 } 727 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); 728