1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with 4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c 5 * 6 * Copyright (C) 2005, Intec Automation Inc. 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 */ 9 10 #include <linux/err.h> 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/device.h> 14 #include <linux/mutex.h> 15 #include <linux/math64.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 19 #include <linux/mtd/mtd.h> 20 #include <linux/of_platform.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/spi/flash.h> 23 #include <linux/mtd/spi-nor.h> 24 25 #include "core.h" 26 27 /* Define max times to check status register before we give up. */ 28 29 /* 30 * For everything but full-chip erase; probably could be much smaller, but kept 31 * around for safety for now 32 */ 33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) 34 35 /* 36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up 37 * for larger flash 38 */ 39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) 40 41 #define SPI_NOR_MAX_ADDR_WIDTH 4 42 43 #define SPI_NOR_SRST_SLEEP_MIN 200 44 #define SPI_NOR_SRST_SLEEP_MAX 400 45 46 /** 47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the 48 * extension type. 49 * @nor: pointer to a 'struct spi_nor' 50 * @op: pointer to the 'struct spi_mem_op' whose properties 51 * need to be initialized. 52 * 53 * Right now, only "repeat" and "invert" are supported. 54 * 55 * Return: The opcode extension. 56 */ 57 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, 58 const struct spi_mem_op *op) 59 { 60 switch (nor->cmd_ext_type) { 61 case SPI_NOR_EXT_INVERT: 62 return ~op->cmd.opcode; 63 64 case SPI_NOR_EXT_REPEAT: 65 return op->cmd.opcode; 66 67 default: 68 dev_err(nor->dev, "Unknown command extension type\n"); 69 return 0; 70 } 71 } 72 73 /** 74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. 75 * @nor: pointer to a 'struct spi_nor' 76 * @op: pointer to the 'struct spi_mem_op' whose properties 77 * need to be initialized. 78 * @proto: the protocol from which the properties need to be set. 79 */ 80 void spi_nor_spimem_setup_op(const struct spi_nor *nor, 81 struct spi_mem_op *op, 82 const enum spi_nor_protocol proto) 83 { 84 u8 ext; 85 86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); 87 88 if (op->addr.nbytes) 89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); 90 91 if (op->dummy.nbytes) 92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); 93 94 if (op->data.nbytes) 95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); 96 97 if (spi_nor_protocol_is_dtr(proto)) { 98 /* 99 * SPIMEM supports mixed DTR modes, but right now we can only 100 * have all phases either DTR or STR. IOW, SPIMEM can have 101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 102 * phases to either DTR or STR. 103 */ 104 op->cmd.dtr = true; 105 op->addr.dtr = true; 106 op->dummy.dtr = true; 107 op->data.dtr = true; 108 109 /* 2 bytes per clock cycle in DTR mode. */ 110 op->dummy.nbytes *= 2; 111 112 ext = spi_nor_get_cmd_ext(nor, op); 113 op->cmd.opcode = (op->cmd.opcode << 8) | ext; 114 op->cmd.nbytes = 2; 115 } 116 } 117 118 /** 119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data 120 * transfer 121 * @nor: pointer to 'struct spi_nor' 122 * @op: pointer to 'struct spi_mem_op' template for transfer 123 * 124 * If we have to use the bounce buffer, the data field in @op will be updated. 125 * 126 * Return: true if the bounce buffer is needed, false if not 127 */ 128 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) 129 { 130 /* op->data.buf.in occupies the same memory as op->data.buf.out */ 131 if (object_is_on_stack(op->data.buf.in) || 132 !virt_addr_valid(op->data.buf.in)) { 133 if (op->data.nbytes > nor->bouncebuf_size) 134 op->data.nbytes = nor->bouncebuf_size; 135 op->data.buf.in = nor->bouncebuf; 136 return true; 137 } 138 139 return false; 140 } 141 142 /** 143 * spi_nor_spimem_exec_op() - execute a memory operation 144 * @nor: pointer to 'struct spi_nor' 145 * @op: pointer to 'struct spi_mem_op' template for transfer 146 * 147 * Return: 0 on success, -error otherwise. 148 */ 149 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) 150 { 151 int error; 152 153 error = spi_mem_adjust_op_size(nor->spimem, op); 154 if (error) 155 return error; 156 157 return spi_mem_exec_op(nor->spimem, op); 158 } 159 160 static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, 161 u8 *buf, size_t len) 162 { 163 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 164 return -EOPNOTSUPP; 165 166 return nor->controller_ops->read_reg(nor, opcode, buf, len); 167 } 168 169 static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, 170 const u8 *buf, size_t len) 171 { 172 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 173 return -EOPNOTSUPP; 174 175 return nor->controller_ops->write_reg(nor, opcode, buf, len); 176 } 177 178 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) 179 { 180 if (spi_nor_protocol_is_dtr(nor->write_proto)) 181 return -EOPNOTSUPP; 182 183 return nor->controller_ops->erase(nor, offs); 184 } 185 186 /** 187 * spi_nor_spimem_read_data() - read data from flash's memory region via 188 * spi-mem 189 * @nor: pointer to 'struct spi_nor' 190 * @from: offset to read from 191 * @len: number of bytes to read 192 * @buf: pointer to dst buffer 193 * 194 * Return: number of bytes read successfully, -errno otherwise 195 */ 196 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, 197 size_t len, u8 *buf) 198 { 199 struct spi_mem_op op = 200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 201 SPI_MEM_OP_ADDR(nor->addr_width, from, 0), 202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 203 SPI_MEM_OP_DATA_IN(len, buf, 0)); 204 bool usebouncebuf; 205 ssize_t nbytes; 206 int error; 207 208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto); 209 210 /* convert the dummy cycles to the number of bytes */ 211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 212 if (spi_nor_protocol_is_dtr(nor->read_proto)) 213 op.dummy.nbytes *= 2; 214 215 usebouncebuf = spi_nor_spimem_bounce(nor, &op); 216 217 if (nor->dirmap.rdesc) { 218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, 219 op.data.nbytes, op.data.buf.in); 220 } else { 221 error = spi_nor_spimem_exec_op(nor, &op); 222 if (error) 223 return error; 224 nbytes = op.data.nbytes; 225 } 226 227 if (usebouncebuf && nbytes > 0) 228 memcpy(buf, op.data.buf.in, nbytes); 229 230 return nbytes; 231 } 232 233 /** 234 * spi_nor_read_data() - read data from flash memory 235 * @nor: pointer to 'struct spi_nor' 236 * @from: offset to read from 237 * @len: number of bytes to read 238 * @buf: pointer to dst buffer 239 * 240 * Return: number of bytes read successfully, -errno otherwise 241 */ 242 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) 243 { 244 if (nor->spimem) 245 return spi_nor_spimem_read_data(nor, from, len, buf); 246 247 return nor->controller_ops->read(nor, from, len, buf); 248 } 249 250 /** 251 * spi_nor_spimem_write_data() - write data to flash memory via 252 * spi-mem 253 * @nor: pointer to 'struct spi_nor' 254 * @to: offset to write to 255 * @len: number of bytes to write 256 * @buf: pointer to src buffer 257 * 258 * Return: number of bytes written successfully, -errno otherwise 259 */ 260 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, 261 size_t len, const u8 *buf) 262 { 263 struct spi_mem_op op = 264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 265 SPI_MEM_OP_ADDR(nor->addr_width, to, 0), 266 SPI_MEM_OP_NO_DUMMY, 267 SPI_MEM_OP_DATA_OUT(len, buf, 0)); 268 ssize_t nbytes; 269 int error; 270 271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 272 op.addr.nbytes = 0; 273 274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 275 276 if (spi_nor_spimem_bounce(nor, &op)) 277 memcpy(nor->bouncebuf, buf, op.data.nbytes); 278 279 if (nor->dirmap.wdesc) { 280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, 281 op.data.nbytes, op.data.buf.out); 282 } else { 283 error = spi_nor_spimem_exec_op(nor, &op); 284 if (error) 285 return error; 286 nbytes = op.data.nbytes; 287 } 288 289 return nbytes; 290 } 291 292 /** 293 * spi_nor_write_data() - write data to flash memory 294 * @nor: pointer to 'struct spi_nor' 295 * @to: offset to write to 296 * @len: number of bytes to write 297 * @buf: pointer to src buffer 298 * 299 * Return: number of bytes written successfully, -errno otherwise 300 */ 301 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, 302 const u8 *buf) 303 { 304 if (nor->spimem) 305 return spi_nor_spimem_write_data(nor, to, len, buf); 306 307 return nor->controller_ops->write(nor, to, len, buf); 308 } 309 310 /** 311 * spi_nor_write_enable() - Set write enable latch with Write Enable command. 312 * @nor: pointer to 'struct spi_nor'. 313 * 314 * Return: 0 on success, -errno otherwise. 315 */ 316 int spi_nor_write_enable(struct spi_nor *nor) 317 { 318 int ret; 319 320 if (nor->spimem) { 321 struct spi_mem_op op = 322 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0), 323 SPI_MEM_OP_NO_ADDR, 324 SPI_MEM_OP_NO_DUMMY, 325 SPI_MEM_OP_NO_DATA); 326 327 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 328 329 ret = spi_mem_exec_op(nor->spimem, &op); 330 } else { 331 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, 332 NULL, 0); 333 } 334 335 if (ret) 336 dev_dbg(nor->dev, "error %d on Write Enable\n", ret); 337 338 return ret; 339 } 340 341 /** 342 * spi_nor_write_disable() - Send Write Disable instruction to the chip. 343 * @nor: pointer to 'struct spi_nor'. 344 * 345 * Return: 0 on success, -errno otherwise. 346 */ 347 int spi_nor_write_disable(struct spi_nor *nor) 348 { 349 int ret; 350 351 if (nor->spimem) { 352 struct spi_mem_op op = 353 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0), 354 SPI_MEM_OP_NO_ADDR, 355 SPI_MEM_OP_NO_DUMMY, 356 SPI_MEM_OP_NO_DATA); 357 358 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 359 360 ret = spi_mem_exec_op(nor->spimem, &op); 361 } else { 362 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, 363 NULL, 0); 364 } 365 366 if (ret) 367 dev_dbg(nor->dev, "error %d on Write Disable\n", ret); 368 369 return ret; 370 } 371 372 /** 373 * spi_nor_read_sr() - Read the Status Register. 374 * @nor: pointer to 'struct spi_nor'. 375 * @sr: pointer to a DMA-able buffer where the value of the 376 * Status Register will be written. Should be at least 2 bytes. 377 * 378 * Return: 0 on success, -errno otherwise. 379 */ 380 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) 381 { 382 int ret; 383 384 if (nor->spimem) { 385 struct spi_mem_op op = 386 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0), 387 SPI_MEM_OP_NO_ADDR, 388 SPI_MEM_OP_NO_DUMMY, 389 SPI_MEM_OP_DATA_IN(1, sr, 0)); 390 391 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 392 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 393 op.dummy.nbytes = nor->params->rdsr_dummy; 394 /* 395 * We don't want to read only one byte in DTR mode. So, 396 * read 2 and then discard the second byte. 397 */ 398 op.data.nbytes = 2; 399 } 400 401 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 402 403 ret = spi_mem_exec_op(nor->spimem, &op); 404 } else { 405 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, 406 1); 407 } 408 409 if (ret) 410 dev_dbg(nor->dev, "error %d reading SR\n", ret); 411 412 return ret; 413 } 414 415 /** 416 * spi_nor_read_fsr() - Read the Flag Status Register. 417 * @nor: pointer to 'struct spi_nor' 418 * @fsr: pointer to a DMA-able buffer where the value of the 419 * Flag Status Register will be written. Should be at least 2 420 * bytes. 421 * 422 * Return: 0 on success, -errno otherwise. 423 */ 424 static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) 425 { 426 int ret; 427 428 if (nor->spimem) { 429 struct spi_mem_op op = 430 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), 431 SPI_MEM_OP_NO_ADDR, 432 SPI_MEM_OP_NO_DUMMY, 433 SPI_MEM_OP_DATA_IN(1, fsr, 0)); 434 435 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 436 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 437 op.dummy.nbytes = nor->params->rdsr_dummy; 438 /* 439 * We don't want to read only one byte in DTR mode. So, 440 * read 2 and then discard the second byte. 441 */ 442 op.data.nbytes = 2; 443 } 444 445 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 446 447 ret = spi_mem_exec_op(nor->spimem, &op); 448 } else { 449 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, 450 1); 451 } 452 453 if (ret) 454 dev_dbg(nor->dev, "error %d reading FSR\n", ret); 455 456 return ret; 457 } 458 459 /** 460 * spi_nor_read_cr() - Read the Configuration Register using the 461 * SPINOR_OP_RDCR (35h) command. 462 * @nor: pointer to 'struct spi_nor' 463 * @cr: pointer to a DMA-able buffer where the value of the 464 * Configuration Register will be written. 465 * 466 * Return: 0 on success, -errno otherwise. 467 */ 468 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) 469 { 470 int ret; 471 472 if (nor->spimem) { 473 struct spi_mem_op op = 474 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0), 475 SPI_MEM_OP_NO_ADDR, 476 SPI_MEM_OP_NO_DUMMY, 477 SPI_MEM_OP_DATA_IN(1, cr, 0)); 478 479 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 480 481 ret = spi_mem_exec_op(nor->spimem, &op); 482 } else { 483 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, 484 1); 485 } 486 487 if (ret) 488 dev_dbg(nor->dev, "error %d reading CR\n", ret); 489 490 return ret; 491 } 492 493 /** 494 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode. 495 * @nor: pointer to 'struct spi_nor'. 496 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 497 * address mode. 498 * 499 * Return: 0 on success, -errno otherwise. 500 */ 501 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 502 { 503 int ret; 504 505 if (nor->spimem) { 506 struct spi_mem_op op = 507 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? 508 SPINOR_OP_EN4B : 509 SPINOR_OP_EX4B, 510 0), 511 SPI_MEM_OP_NO_ADDR, 512 SPI_MEM_OP_NO_DUMMY, 513 SPI_MEM_OP_NO_DATA); 514 515 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 516 517 ret = spi_mem_exec_op(nor->spimem, &op); 518 } else { 519 ret = spi_nor_controller_ops_write_reg(nor, 520 enable ? SPINOR_OP_EN4B : 521 SPINOR_OP_EX4B, 522 NULL, 0); 523 } 524 525 if (ret) 526 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 527 528 return ret; 529 } 530 531 /** 532 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion 533 * flashes. 534 * @nor: pointer to 'struct spi_nor'. 535 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 536 * address mode. 537 * 538 * Return: 0 on success, -errno otherwise. 539 */ 540 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 541 { 542 int ret; 543 544 nor->bouncebuf[0] = enable << 7; 545 546 if (nor->spimem) { 547 struct spi_mem_op op = 548 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0), 549 SPI_MEM_OP_NO_ADDR, 550 SPI_MEM_OP_NO_DUMMY, 551 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); 552 553 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 554 555 ret = spi_mem_exec_op(nor->spimem, &op); 556 } else { 557 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, 558 nor->bouncebuf, 1); 559 } 560 561 if (ret) 562 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 563 564 return ret; 565 } 566 567 /** 568 * spi_nor_write_ear() - Write Extended Address Register. 569 * @nor: pointer to 'struct spi_nor'. 570 * @ear: value to write to the Extended Address Register. 571 * 572 * Return: 0 on success, -errno otherwise. 573 */ 574 int spi_nor_write_ear(struct spi_nor *nor, u8 ear) 575 { 576 int ret; 577 578 nor->bouncebuf[0] = ear; 579 580 if (nor->spimem) { 581 struct spi_mem_op op = 582 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0), 583 SPI_MEM_OP_NO_ADDR, 584 SPI_MEM_OP_NO_DUMMY, 585 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); 586 587 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 588 589 ret = spi_mem_exec_op(nor->spimem, &op); 590 } else { 591 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR, 592 nor->bouncebuf, 1); 593 } 594 595 if (ret) 596 dev_dbg(nor->dev, "error %d writing EAR\n", ret); 597 598 return ret; 599 } 600 601 /** 602 * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. 603 * @nor: pointer to 'struct spi_nor'. 604 * @sr: pointer to a DMA-able buffer where the value of the 605 * Status Register will be written. 606 * 607 * Return: 0 on success, -errno otherwise. 608 */ 609 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) 610 { 611 int ret; 612 613 if (nor->spimem) { 614 struct spi_mem_op op = 615 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0), 616 SPI_MEM_OP_NO_ADDR, 617 SPI_MEM_OP_NO_DUMMY, 618 SPI_MEM_OP_DATA_IN(1, sr, 0)); 619 620 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 621 622 ret = spi_mem_exec_op(nor->spimem, &op); 623 } else { 624 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr, 625 1); 626 } 627 628 if (ret) 629 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); 630 631 return ret; 632 } 633 634 /** 635 * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if 636 * the flash is ready for new commands. 637 * @nor: pointer to 'struct spi_nor'. 638 * 639 * Return: 1 if ready, 0 if not ready, -errno on errors. 640 */ 641 static int spi_nor_xsr_ready(struct spi_nor *nor) 642 { 643 int ret; 644 645 ret = spi_nor_xread_sr(nor, nor->bouncebuf); 646 if (ret) 647 return ret; 648 649 return !!(nor->bouncebuf[0] & XSR_RDY); 650 } 651 652 /** 653 * spi_nor_clear_sr() - Clear the Status Register. 654 * @nor: pointer to 'struct spi_nor'. 655 */ 656 static void spi_nor_clear_sr(struct spi_nor *nor) 657 { 658 int ret; 659 660 if (nor->spimem) { 661 struct spi_mem_op op = 662 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), 663 SPI_MEM_OP_NO_ADDR, 664 SPI_MEM_OP_NO_DUMMY, 665 SPI_MEM_OP_NO_DATA); 666 667 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 668 669 ret = spi_mem_exec_op(nor->spimem, &op); 670 } else { 671 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, 672 NULL, 0); 673 } 674 675 if (ret) 676 dev_dbg(nor->dev, "error %d clearing SR\n", ret); 677 } 678 679 /** 680 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready 681 * for new commands. 682 * @nor: pointer to 'struct spi_nor'. 683 * 684 * Return: 1 if ready, 0 if not ready, -errno on errors. 685 */ 686 static int spi_nor_sr_ready(struct spi_nor *nor) 687 { 688 int ret = spi_nor_read_sr(nor, nor->bouncebuf); 689 690 if (ret) 691 return ret; 692 693 if (nor->flags & SNOR_F_USE_CLSR && 694 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { 695 if (nor->bouncebuf[0] & SR_E_ERR) 696 dev_err(nor->dev, "Erase Error occurred\n"); 697 else 698 dev_err(nor->dev, "Programming Error occurred\n"); 699 700 spi_nor_clear_sr(nor); 701 702 /* 703 * WEL bit remains set to one when an erase or page program 704 * error occurs. Issue a Write Disable command to protect 705 * against inadvertent writes that can possibly corrupt the 706 * contents of the memory. 707 */ 708 ret = spi_nor_write_disable(nor); 709 if (ret) 710 return ret; 711 712 return -EIO; 713 } 714 715 return !(nor->bouncebuf[0] & SR_WIP); 716 } 717 718 /** 719 * spi_nor_clear_fsr() - Clear the Flag Status Register. 720 * @nor: pointer to 'struct spi_nor'. 721 */ 722 static void spi_nor_clear_fsr(struct spi_nor *nor) 723 { 724 int ret; 725 726 if (nor->spimem) { 727 struct spi_mem_op op = 728 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), 729 SPI_MEM_OP_NO_ADDR, 730 SPI_MEM_OP_NO_DUMMY, 731 SPI_MEM_OP_NO_DATA); 732 733 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 734 735 ret = spi_mem_exec_op(nor->spimem, &op); 736 } else { 737 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, 738 NULL, 0); 739 } 740 741 if (ret) 742 dev_dbg(nor->dev, "error %d clearing FSR\n", ret); 743 } 744 745 /** 746 * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is 747 * ready for new commands. 748 * @nor: pointer to 'struct spi_nor'. 749 * 750 * Return: 1 if ready, 0 if not ready, -errno on errors. 751 */ 752 static int spi_nor_fsr_ready(struct spi_nor *nor) 753 { 754 int ret = spi_nor_read_fsr(nor, nor->bouncebuf); 755 756 if (ret) 757 return ret; 758 759 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { 760 if (nor->bouncebuf[0] & FSR_E_ERR) 761 dev_err(nor->dev, "Erase operation failed.\n"); 762 else 763 dev_err(nor->dev, "Program operation failed.\n"); 764 765 if (nor->bouncebuf[0] & FSR_PT_ERR) 766 dev_err(nor->dev, 767 "Attempted to modify a protected sector.\n"); 768 769 spi_nor_clear_fsr(nor); 770 771 /* 772 * WEL bit remains set to one when an erase or page program 773 * error occurs. Issue a Write Disable command to protect 774 * against inadvertent writes that can possibly corrupt the 775 * contents of the memory. 776 */ 777 ret = spi_nor_write_disable(nor); 778 if (ret) 779 return ret; 780 781 return -EIO; 782 } 783 784 return !!(nor->bouncebuf[0] & FSR_READY); 785 } 786 787 /** 788 * spi_nor_ready() - Query the flash to see if it is ready for new commands. 789 * @nor: pointer to 'struct spi_nor'. 790 * 791 * Return: 1 if ready, 0 if not ready, -errno on errors. 792 */ 793 static int spi_nor_ready(struct spi_nor *nor) 794 { 795 int sr, fsr; 796 797 if (nor->flags & SNOR_F_READY_XSR_RDY) 798 sr = spi_nor_xsr_ready(nor); 799 else 800 sr = spi_nor_sr_ready(nor); 801 if (sr < 0) 802 return sr; 803 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; 804 if (fsr < 0) 805 return fsr; 806 return sr && fsr; 807 } 808 809 /** 810 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the 811 * Status Register until ready, or timeout occurs. 812 * @nor: pointer to "struct spi_nor". 813 * @timeout_jiffies: jiffies to wait until timeout. 814 * 815 * Return: 0 on success, -errno otherwise. 816 */ 817 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, 818 unsigned long timeout_jiffies) 819 { 820 unsigned long deadline; 821 int timeout = 0, ret; 822 823 deadline = jiffies + timeout_jiffies; 824 825 while (!timeout) { 826 if (time_after_eq(jiffies, deadline)) 827 timeout = 1; 828 829 ret = spi_nor_ready(nor); 830 if (ret < 0) 831 return ret; 832 if (ret) 833 return 0; 834 835 cond_resched(); 836 } 837 838 dev_dbg(nor->dev, "flash operation timed out\n"); 839 840 return -ETIMEDOUT; 841 } 842 843 /** 844 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the 845 * flash to be ready, or timeout occurs. 846 * @nor: pointer to "struct spi_nor". 847 * 848 * Return: 0 on success, -errno otherwise. 849 */ 850 int spi_nor_wait_till_ready(struct spi_nor *nor) 851 { 852 return spi_nor_wait_till_ready_with_timeout(nor, 853 DEFAULT_READY_WAIT_JIFFIES); 854 } 855 856 /** 857 * spi_nor_global_block_unlock() - Unlock Global Block Protection. 858 * @nor: pointer to 'struct spi_nor'. 859 * 860 * Return: 0 on success, -errno otherwise. 861 */ 862 int spi_nor_global_block_unlock(struct spi_nor *nor) 863 { 864 int ret; 865 866 ret = spi_nor_write_enable(nor); 867 if (ret) 868 return ret; 869 870 if (nor->spimem) { 871 struct spi_mem_op op = 872 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0), 873 SPI_MEM_OP_NO_ADDR, 874 SPI_MEM_OP_NO_DUMMY, 875 SPI_MEM_OP_NO_DATA); 876 877 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 878 879 ret = spi_mem_exec_op(nor->spimem, &op); 880 } else { 881 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK, 882 NULL, 0); 883 } 884 885 if (ret) { 886 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret); 887 return ret; 888 } 889 890 return spi_nor_wait_till_ready(nor); 891 } 892 893 /** 894 * spi_nor_write_sr() - Write the Status Register. 895 * @nor: pointer to 'struct spi_nor'. 896 * @sr: pointer to DMA-able buffer to write to the Status Register. 897 * @len: number of bytes to write to the Status Register. 898 * 899 * Return: 0 on success, -errno otherwise. 900 */ 901 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) 902 { 903 int ret; 904 905 ret = spi_nor_write_enable(nor); 906 if (ret) 907 return ret; 908 909 if (nor->spimem) { 910 struct spi_mem_op op = 911 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0), 912 SPI_MEM_OP_NO_ADDR, 913 SPI_MEM_OP_NO_DUMMY, 914 SPI_MEM_OP_DATA_OUT(len, sr, 0)); 915 916 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 917 918 ret = spi_mem_exec_op(nor->spimem, &op); 919 } else { 920 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, 921 len); 922 } 923 924 if (ret) { 925 dev_dbg(nor->dev, "error %d writing SR\n", ret); 926 return ret; 927 } 928 929 return spi_nor_wait_till_ready(nor); 930 } 931 932 /** 933 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and 934 * ensure that the byte written match the received value. 935 * @nor: pointer to a 'struct spi_nor'. 936 * @sr1: byte value to be written to the Status Register. 937 * 938 * Return: 0 on success, -errno otherwise. 939 */ 940 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) 941 { 942 int ret; 943 944 nor->bouncebuf[0] = sr1; 945 946 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); 947 if (ret) 948 return ret; 949 950 ret = spi_nor_read_sr(nor, nor->bouncebuf); 951 if (ret) 952 return ret; 953 954 if (nor->bouncebuf[0] != sr1) { 955 dev_dbg(nor->dev, "SR1: read back test failed\n"); 956 return -EIO; 957 } 958 959 return 0; 960 } 961 962 /** 963 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the 964 * Status Register 2 in one shot. Ensure that the byte written in the Status 965 * Register 1 match the received value, and that the 16-bit Write did not 966 * affect what was already in the Status Register 2. 967 * @nor: pointer to a 'struct spi_nor'. 968 * @sr1: byte value to be written to the Status Register 1. 969 * 970 * Return: 0 on success, -errno otherwise. 971 */ 972 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) 973 { 974 int ret; 975 u8 *sr_cr = nor->bouncebuf; 976 u8 cr_written; 977 978 /* Make sure we don't overwrite the contents of Status Register 2. */ 979 if (!(nor->flags & SNOR_F_NO_READ_CR)) { 980 ret = spi_nor_read_cr(nor, &sr_cr[1]); 981 if (ret) 982 return ret; 983 } else if (nor->params->quad_enable) { 984 /* 985 * If the Status Register 2 Read command (35h) is not 986 * supported, we should at least be sure we don't 987 * change the value of the SR2 Quad Enable bit. 988 * 989 * We can safely assume that when the Quad Enable method is 990 * set, the value of the QE bit is one, as a consequence of the 991 * nor->params->quad_enable() call. 992 * 993 * We can safely assume that the Quad Enable bit is present in 994 * the Status Register 2 at BIT(1). According to the JESD216 995 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit 996 * Write Status (01h) command is available just for the cases 997 * in which the QE bit is described in SR2 at BIT(1). 998 */ 999 sr_cr[1] = SR2_QUAD_EN_BIT1; 1000 } else { 1001 sr_cr[1] = 0; 1002 } 1003 1004 sr_cr[0] = sr1; 1005 1006 ret = spi_nor_write_sr(nor, sr_cr, 2); 1007 if (ret) 1008 return ret; 1009 1010 if (nor->flags & SNOR_F_NO_READ_CR) 1011 return 0; 1012 1013 cr_written = sr_cr[1]; 1014 1015 ret = spi_nor_read_cr(nor, &sr_cr[1]); 1016 if (ret) 1017 return ret; 1018 1019 if (cr_written != sr_cr[1]) { 1020 dev_dbg(nor->dev, "CR: read back test failed\n"); 1021 return -EIO; 1022 } 1023 1024 return 0; 1025 } 1026 1027 /** 1028 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the 1029 * Configuration Register in one shot. Ensure that the byte written in the 1030 * Configuration Register match the received value, and that the 16-bit Write 1031 * did not affect what was already in the Status Register 1. 1032 * @nor: pointer to a 'struct spi_nor'. 1033 * @cr: byte value to be written to the Configuration Register. 1034 * 1035 * Return: 0 on success, -errno otherwise. 1036 */ 1037 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) 1038 { 1039 int ret; 1040 u8 *sr_cr = nor->bouncebuf; 1041 u8 sr_written; 1042 1043 /* Keep the current value of the Status Register 1. */ 1044 ret = spi_nor_read_sr(nor, sr_cr); 1045 if (ret) 1046 return ret; 1047 1048 sr_cr[1] = cr; 1049 1050 ret = spi_nor_write_sr(nor, sr_cr, 2); 1051 if (ret) 1052 return ret; 1053 1054 sr_written = sr_cr[0]; 1055 1056 ret = spi_nor_read_sr(nor, sr_cr); 1057 if (ret) 1058 return ret; 1059 1060 if (sr_written != sr_cr[0]) { 1061 dev_dbg(nor->dev, "SR: Read back test failed\n"); 1062 return -EIO; 1063 } 1064 1065 if (nor->flags & SNOR_F_NO_READ_CR) 1066 return 0; 1067 1068 ret = spi_nor_read_cr(nor, &sr_cr[1]); 1069 if (ret) 1070 return ret; 1071 1072 if (cr != sr_cr[1]) { 1073 dev_dbg(nor->dev, "CR: read back test failed\n"); 1074 return -EIO; 1075 } 1076 1077 return 0; 1078 } 1079 1080 /** 1081 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that 1082 * the byte written match the received value without affecting other bits in the 1083 * Status Register 1 and 2. 1084 * @nor: pointer to a 'struct spi_nor'. 1085 * @sr1: byte value to be written to the Status Register. 1086 * 1087 * Return: 0 on success, -errno otherwise. 1088 */ 1089 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) 1090 { 1091 if (nor->flags & SNOR_F_HAS_16BIT_SR) 1092 return spi_nor_write_16bit_sr_and_check(nor, sr1); 1093 1094 return spi_nor_write_sr1_and_check(nor, sr1); 1095 } 1096 1097 /** 1098 * spi_nor_write_sr2() - Write the Status Register 2 using the 1099 * SPINOR_OP_WRSR2 (3eh) command. 1100 * @nor: pointer to 'struct spi_nor'. 1101 * @sr2: pointer to DMA-able buffer to write to the Status Register 2. 1102 * 1103 * Return: 0 on success, -errno otherwise. 1104 */ 1105 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) 1106 { 1107 int ret; 1108 1109 ret = spi_nor_write_enable(nor); 1110 if (ret) 1111 return ret; 1112 1113 if (nor->spimem) { 1114 struct spi_mem_op op = 1115 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0), 1116 SPI_MEM_OP_NO_ADDR, 1117 SPI_MEM_OP_NO_DUMMY, 1118 SPI_MEM_OP_DATA_OUT(1, sr2, 0)); 1119 1120 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1121 1122 ret = spi_mem_exec_op(nor->spimem, &op); 1123 } else { 1124 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, 1125 sr2, 1); 1126 } 1127 1128 if (ret) { 1129 dev_dbg(nor->dev, "error %d writing SR2\n", ret); 1130 return ret; 1131 } 1132 1133 return spi_nor_wait_till_ready(nor); 1134 } 1135 1136 /** 1137 * spi_nor_read_sr2() - Read the Status Register 2 using the 1138 * SPINOR_OP_RDSR2 (3fh) command. 1139 * @nor: pointer to 'struct spi_nor'. 1140 * @sr2: pointer to DMA-able buffer where the value of the 1141 * Status Register 2 will be written. 1142 * 1143 * Return: 0 on success, -errno otherwise. 1144 */ 1145 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) 1146 { 1147 int ret; 1148 1149 if (nor->spimem) { 1150 struct spi_mem_op op = 1151 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0), 1152 SPI_MEM_OP_NO_ADDR, 1153 SPI_MEM_OP_NO_DUMMY, 1154 SPI_MEM_OP_DATA_IN(1, sr2, 0)); 1155 1156 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1157 1158 ret = spi_mem_exec_op(nor->spimem, &op); 1159 } else { 1160 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, 1161 1); 1162 } 1163 1164 if (ret) 1165 dev_dbg(nor->dev, "error %d reading SR2\n", ret); 1166 1167 return ret; 1168 } 1169 1170 /** 1171 * spi_nor_erase_chip() - Erase the entire flash memory. 1172 * @nor: pointer to 'struct spi_nor'. 1173 * 1174 * Return: 0 on success, -errno otherwise. 1175 */ 1176 static int spi_nor_erase_chip(struct spi_nor *nor) 1177 { 1178 int ret; 1179 1180 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); 1181 1182 if (nor->spimem) { 1183 struct spi_mem_op op = 1184 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), 1185 SPI_MEM_OP_NO_ADDR, 1186 SPI_MEM_OP_NO_DUMMY, 1187 SPI_MEM_OP_NO_DATA); 1188 1189 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 1190 1191 ret = spi_mem_exec_op(nor->spimem, &op); 1192 } else { 1193 ret = spi_nor_controller_ops_write_reg(nor, 1194 SPINOR_OP_CHIP_ERASE, 1195 NULL, 0); 1196 } 1197 1198 if (ret) 1199 dev_dbg(nor->dev, "error %d erasing chip\n", ret); 1200 1201 return ret; 1202 } 1203 1204 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) 1205 { 1206 size_t i; 1207 1208 for (i = 0; i < size; i++) 1209 if (table[i][0] == opcode) 1210 return table[i][1]; 1211 1212 /* No conversion found, keep input op code. */ 1213 return opcode; 1214 } 1215 1216 u8 spi_nor_convert_3to4_read(u8 opcode) 1217 { 1218 static const u8 spi_nor_3to4_read[][2] = { 1219 { SPINOR_OP_READ, SPINOR_OP_READ_4B }, 1220 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, 1221 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, 1222 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, 1223 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, 1224 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, 1225 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, 1226 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, 1227 1228 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, 1229 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, 1230 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, 1231 }; 1232 1233 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, 1234 ARRAY_SIZE(spi_nor_3to4_read)); 1235 } 1236 1237 static u8 spi_nor_convert_3to4_program(u8 opcode) 1238 { 1239 static const u8 spi_nor_3to4_program[][2] = { 1240 { SPINOR_OP_PP, SPINOR_OP_PP_4B }, 1241 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, 1242 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, 1243 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, 1244 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, 1245 }; 1246 1247 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, 1248 ARRAY_SIZE(spi_nor_3to4_program)); 1249 } 1250 1251 static u8 spi_nor_convert_3to4_erase(u8 opcode) 1252 { 1253 static const u8 spi_nor_3to4_erase[][2] = { 1254 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, 1255 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, 1256 { SPINOR_OP_SE, SPINOR_OP_SE_4B }, 1257 }; 1258 1259 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, 1260 ARRAY_SIZE(spi_nor_3to4_erase)); 1261 } 1262 1263 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) 1264 { 1265 return !!nor->params->erase_map.uniform_erase_type; 1266 } 1267 1268 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) 1269 { 1270 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); 1271 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); 1272 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); 1273 1274 if (!spi_nor_has_uniform_erase(nor)) { 1275 struct spi_nor_erase_map *map = &nor->params->erase_map; 1276 struct spi_nor_erase_type *erase; 1277 int i; 1278 1279 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1280 erase = &map->erase_type[i]; 1281 erase->opcode = 1282 spi_nor_convert_3to4_erase(erase->opcode); 1283 } 1284 } 1285 } 1286 1287 int spi_nor_lock_and_prep(struct spi_nor *nor) 1288 { 1289 int ret = 0; 1290 1291 mutex_lock(&nor->lock); 1292 1293 if (nor->controller_ops && nor->controller_ops->prepare) { 1294 ret = nor->controller_ops->prepare(nor); 1295 if (ret) { 1296 mutex_unlock(&nor->lock); 1297 return ret; 1298 } 1299 } 1300 return ret; 1301 } 1302 1303 void spi_nor_unlock_and_unprep(struct spi_nor *nor) 1304 { 1305 if (nor->controller_ops && nor->controller_ops->unprepare) 1306 nor->controller_ops->unprepare(nor); 1307 mutex_unlock(&nor->lock); 1308 } 1309 1310 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) 1311 { 1312 if (!nor->params->convert_addr) 1313 return addr; 1314 1315 return nor->params->convert_addr(nor, addr); 1316 } 1317 1318 /* 1319 * Initiate the erasure of a single sector 1320 */ 1321 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) 1322 { 1323 int i; 1324 1325 addr = spi_nor_convert_addr(nor, addr); 1326 1327 if (nor->spimem) { 1328 struct spi_mem_op op = 1329 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0), 1330 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0), 1331 SPI_MEM_OP_NO_DUMMY, 1332 SPI_MEM_OP_NO_DATA); 1333 1334 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 1335 1336 return spi_mem_exec_op(nor->spimem, &op); 1337 } else if (nor->controller_ops->erase) { 1338 return spi_nor_controller_ops_erase(nor, addr); 1339 } 1340 1341 /* 1342 * Default implementation, if driver doesn't have a specialized HW 1343 * control 1344 */ 1345 for (i = nor->addr_width - 1; i >= 0; i--) { 1346 nor->bouncebuf[i] = addr & 0xff; 1347 addr >>= 8; 1348 } 1349 1350 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, 1351 nor->bouncebuf, nor->addr_width); 1352 } 1353 1354 /** 1355 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend 1356 * @erase: pointer to a structure that describes a SPI NOR erase type 1357 * @dividend: dividend value 1358 * @remainder: pointer to u32 remainder (will be updated) 1359 * 1360 * Return: the result of the division 1361 */ 1362 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, 1363 u64 dividend, u32 *remainder) 1364 { 1365 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 1366 *remainder = (u32)dividend & erase->size_mask; 1367 return dividend >> erase->size_shift; 1368 } 1369 1370 /** 1371 * spi_nor_find_best_erase_type() - find the best erase type for the given 1372 * offset in the serial flash memory and the 1373 * number of bytes to erase. The region in 1374 * which the address fits is expected to be 1375 * provided. 1376 * @map: the erase map of the SPI NOR 1377 * @region: pointer to a structure that describes a SPI NOR erase region 1378 * @addr: offset in the serial flash memory 1379 * @len: number of bytes to erase 1380 * 1381 * Return: a pointer to the best fitted erase type, NULL otherwise. 1382 */ 1383 static const struct spi_nor_erase_type * 1384 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, 1385 const struct spi_nor_erase_region *region, 1386 u64 addr, u32 len) 1387 { 1388 const struct spi_nor_erase_type *erase; 1389 u32 rem; 1390 int i; 1391 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 1392 1393 /* 1394 * Erase types are ordered by size, with the smallest erase type at 1395 * index 0. 1396 */ 1397 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 1398 /* Does the erase region support the tested erase type? */ 1399 if (!(erase_mask & BIT(i))) 1400 continue; 1401 1402 erase = &map->erase_type[i]; 1403 1404 /* Alignment is not mandatory for overlaid regions */ 1405 if (region->offset & SNOR_OVERLAID_REGION && 1406 region->size <= len) 1407 return erase; 1408 1409 /* Don't erase more than what the user has asked for. */ 1410 if (erase->size > len) 1411 continue; 1412 1413 spi_nor_div_by_erase_size(erase, addr, &rem); 1414 if (!rem) 1415 return erase; 1416 } 1417 1418 return NULL; 1419 } 1420 1421 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) 1422 { 1423 return region->offset & SNOR_LAST_REGION; 1424 } 1425 1426 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) 1427 { 1428 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; 1429 } 1430 1431 /** 1432 * spi_nor_region_next() - get the next spi nor region 1433 * @region: pointer to a structure that describes a SPI NOR erase region 1434 * 1435 * Return: the next spi nor region or NULL if last region. 1436 */ 1437 struct spi_nor_erase_region * 1438 spi_nor_region_next(struct spi_nor_erase_region *region) 1439 { 1440 if (spi_nor_region_is_last(region)) 1441 return NULL; 1442 region++; 1443 return region; 1444 } 1445 1446 /** 1447 * spi_nor_find_erase_region() - find the region of the serial flash memory in 1448 * which the offset fits 1449 * @map: the erase map of the SPI NOR 1450 * @addr: offset in the serial flash memory 1451 * 1452 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno) 1453 * otherwise. 1454 */ 1455 static struct spi_nor_erase_region * 1456 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) 1457 { 1458 struct spi_nor_erase_region *region = map->regions; 1459 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1460 u64 region_end = region_start + region->size; 1461 1462 while (addr < region_start || addr >= region_end) { 1463 region = spi_nor_region_next(region); 1464 if (!region) 1465 return ERR_PTR(-EINVAL); 1466 1467 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1468 region_end = region_start + region->size; 1469 } 1470 1471 return region; 1472 } 1473 1474 /** 1475 * spi_nor_init_erase_cmd() - initialize an erase command 1476 * @region: pointer to a structure that describes a SPI NOR erase region 1477 * @erase: pointer to a structure that describes a SPI NOR erase type 1478 * 1479 * Return: the pointer to the allocated erase command, ERR_PTR(-errno) 1480 * otherwise. 1481 */ 1482 static struct spi_nor_erase_command * 1483 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, 1484 const struct spi_nor_erase_type *erase) 1485 { 1486 struct spi_nor_erase_command *cmd; 1487 1488 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1489 if (!cmd) 1490 return ERR_PTR(-ENOMEM); 1491 1492 INIT_LIST_HEAD(&cmd->list); 1493 cmd->opcode = erase->opcode; 1494 cmd->count = 1; 1495 1496 if (region->offset & SNOR_OVERLAID_REGION) 1497 cmd->size = region->size; 1498 else 1499 cmd->size = erase->size; 1500 1501 return cmd; 1502 } 1503 1504 /** 1505 * spi_nor_destroy_erase_cmd_list() - destroy erase command list 1506 * @erase_list: list of erase commands 1507 */ 1508 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) 1509 { 1510 struct spi_nor_erase_command *cmd, *next; 1511 1512 list_for_each_entry_safe(cmd, next, erase_list, list) { 1513 list_del(&cmd->list); 1514 kfree(cmd); 1515 } 1516 } 1517 1518 /** 1519 * spi_nor_init_erase_cmd_list() - initialize erase command list 1520 * @nor: pointer to a 'struct spi_nor' 1521 * @erase_list: list of erase commands to be executed once we validate that the 1522 * erase can be performed 1523 * @addr: offset in the serial flash memory 1524 * @len: number of bytes to erase 1525 * 1526 * Builds the list of best fitted erase commands and verifies if the erase can 1527 * be performed. 1528 * 1529 * Return: 0 on success, -errno otherwise. 1530 */ 1531 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, 1532 struct list_head *erase_list, 1533 u64 addr, u32 len) 1534 { 1535 const struct spi_nor_erase_map *map = &nor->params->erase_map; 1536 const struct spi_nor_erase_type *erase, *prev_erase = NULL; 1537 struct spi_nor_erase_region *region; 1538 struct spi_nor_erase_command *cmd = NULL; 1539 u64 region_end; 1540 int ret = -EINVAL; 1541 1542 region = spi_nor_find_erase_region(map, addr); 1543 if (IS_ERR(region)) 1544 return PTR_ERR(region); 1545 1546 region_end = spi_nor_region_end(region); 1547 1548 while (len) { 1549 erase = spi_nor_find_best_erase_type(map, region, addr, len); 1550 if (!erase) 1551 goto destroy_erase_cmd_list; 1552 1553 if (prev_erase != erase || 1554 erase->size != cmd->size || 1555 region->offset & SNOR_OVERLAID_REGION) { 1556 cmd = spi_nor_init_erase_cmd(region, erase); 1557 if (IS_ERR(cmd)) { 1558 ret = PTR_ERR(cmd); 1559 goto destroy_erase_cmd_list; 1560 } 1561 1562 list_add_tail(&cmd->list, erase_list); 1563 } else { 1564 cmd->count++; 1565 } 1566 1567 addr += cmd->size; 1568 len -= cmd->size; 1569 1570 if (len && addr >= region_end) { 1571 region = spi_nor_region_next(region); 1572 if (!region) 1573 goto destroy_erase_cmd_list; 1574 region_end = spi_nor_region_end(region); 1575 } 1576 1577 prev_erase = erase; 1578 } 1579 1580 return 0; 1581 1582 destroy_erase_cmd_list: 1583 spi_nor_destroy_erase_cmd_list(erase_list); 1584 return ret; 1585 } 1586 1587 /** 1588 * spi_nor_erase_multi_sectors() - perform a non-uniform erase 1589 * @nor: pointer to a 'struct spi_nor' 1590 * @addr: offset in the serial flash memory 1591 * @len: number of bytes to erase 1592 * 1593 * Build a list of best fitted erase commands and execute it once we validate 1594 * that the erase can be performed. 1595 * 1596 * Return: 0 on success, -errno otherwise. 1597 */ 1598 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) 1599 { 1600 LIST_HEAD(erase_list); 1601 struct spi_nor_erase_command *cmd, *next; 1602 int ret; 1603 1604 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); 1605 if (ret) 1606 return ret; 1607 1608 list_for_each_entry_safe(cmd, next, &erase_list, list) { 1609 nor->erase_opcode = cmd->opcode; 1610 while (cmd->count) { 1611 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", 1612 cmd->size, cmd->opcode, cmd->count); 1613 1614 ret = spi_nor_write_enable(nor); 1615 if (ret) 1616 goto destroy_erase_cmd_list; 1617 1618 ret = spi_nor_erase_sector(nor, addr); 1619 if (ret) 1620 goto destroy_erase_cmd_list; 1621 1622 ret = spi_nor_wait_till_ready(nor); 1623 if (ret) 1624 goto destroy_erase_cmd_list; 1625 1626 addr += cmd->size; 1627 cmd->count--; 1628 } 1629 list_del(&cmd->list); 1630 kfree(cmd); 1631 } 1632 1633 return 0; 1634 1635 destroy_erase_cmd_list: 1636 spi_nor_destroy_erase_cmd_list(&erase_list); 1637 return ret; 1638 } 1639 1640 /* 1641 * Erase an address range on the nor chip. The address range may extend 1642 * one or more erase sectors. Return an error if there is a problem erasing. 1643 */ 1644 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) 1645 { 1646 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1647 u32 addr, len; 1648 uint32_t rem; 1649 int ret; 1650 1651 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, 1652 (long long)instr->len); 1653 1654 if (spi_nor_has_uniform_erase(nor)) { 1655 div_u64_rem(instr->len, mtd->erasesize, &rem); 1656 if (rem) 1657 return -EINVAL; 1658 } 1659 1660 addr = instr->addr; 1661 len = instr->len; 1662 1663 ret = spi_nor_lock_and_prep(nor); 1664 if (ret) 1665 return ret; 1666 1667 /* whole-chip erase? */ 1668 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { 1669 unsigned long timeout; 1670 1671 ret = spi_nor_write_enable(nor); 1672 if (ret) 1673 goto erase_err; 1674 1675 ret = spi_nor_erase_chip(nor); 1676 if (ret) 1677 goto erase_err; 1678 1679 /* 1680 * Scale the timeout linearly with the size of the flash, with 1681 * a minimum calibrated to an old 2MB flash. We could try to 1682 * pull these from CFI/SFDP, but these values should be good 1683 * enough for now. 1684 */ 1685 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, 1686 CHIP_ERASE_2MB_READY_WAIT_JIFFIES * 1687 (unsigned long)(mtd->size / SZ_2M)); 1688 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); 1689 if (ret) 1690 goto erase_err; 1691 1692 /* REVISIT in some cases we could speed up erasing large regions 1693 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up 1694 * to use "small sector erase", but that's not always optimal. 1695 */ 1696 1697 /* "sector"-at-a-time erase */ 1698 } else if (spi_nor_has_uniform_erase(nor)) { 1699 while (len) { 1700 ret = spi_nor_write_enable(nor); 1701 if (ret) 1702 goto erase_err; 1703 1704 ret = spi_nor_erase_sector(nor, addr); 1705 if (ret) 1706 goto erase_err; 1707 1708 ret = spi_nor_wait_till_ready(nor); 1709 if (ret) 1710 goto erase_err; 1711 1712 addr += mtd->erasesize; 1713 len -= mtd->erasesize; 1714 } 1715 1716 /* erase multiple sectors */ 1717 } else { 1718 ret = spi_nor_erase_multi_sectors(nor, addr, len); 1719 if (ret) 1720 goto erase_err; 1721 } 1722 1723 ret = spi_nor_write_disable(nor); 1724 1725 erase_err: 1726 spi_nor_unlock_and_unprep(nor); 1727 1728 return ret; 1729 } 1730 1731 /** 1732 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status 1733 * Register 1. 1734 * @nor: pointer to a 'struct spi_nor' 1735 * 1736 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. 1737 * 1738 * Return: 0 on success, -errno otherwise. 1739 */ 1740 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) 1741 { 1742 int ret; 1743 1744 ret = spi_nor_read_sr(nor, nor->bouncebuf); 1745 if (ret) 1746 return ret; 1747 1748 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) 1749 return 0; 1750 1751 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1752 1753 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); 1754 } 1755 1756 /** 1757 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status 1758 * Register 2. 1759 * @nor: pointer to a 'struct spi_nor'. 1760 * 1761 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. 1762 * 1763 * Return: 0 on success, -errno otherwise. 1764 */ 1765 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) 1766 { 1767 int ret; 1768 1769 if (nor->flags & SNOR_F_NO_READ_CR) 1770 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); 1771 1772 ret = spi_nor_read_cr(nor, nor->bouncebuf); 1773 if (ret) 1774 return ret; 1775 1776 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) 1777 return 0; 1778 1779 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1780 1781 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); 1782 } 1783 1784 /** 1785 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. 1786 * @nor: pointer to a 'struct spi_nor' 1787 * 1788 * Set the Quad Enable (QE) bit in the Status Register 2. 1789 * 1790 * This is one of the procedures to set the QE bit described in the SFDP 1791 * (JESD216 rev B) specification but no manufacturer using this procedure has 1792 * been identified yet, hence the name of the function. 1793 * 1794 * Return: 0 on success, -errno otherwise. 1795 */ 1796 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) 1797 { 1798 u8 *sr2 = nor->bouncebuf; 1799 int ret; 1800 u8 sr2_written; 1801 1802 /* Check current Quad Enable bit value. */ 1803 ret = spi_nor_read_sr2(nor, sr2); 1804 if (ret) 1805 return ret; 1806 if (*sr2 & SR2_QUAD_EN_BIT7) 1807 return 0; 1808 1809 /* Update the Quad Enable bit. */ 1810 *sr2 |= SR2_QUAD_EN_BIT7; 1811 1812 ret = spi_nor_write_sr2(nor, sr2); 1813 if (ret) 1814 return ret; 1815 1816 sr2_written = *sr2; 1817 1818 /* Read back and check it. */ 1819 ret = spi_nor_read_sr2(nor, sr2); 1820 if (ret) 1821 return ret; 1822 1823 if (*sr2 != sr2_written) { 1824 dev_dbg(nor->dev, "SR2: Read back test failed\n"); 1825 return -EIO; 1826 } 1827 1828 return 0; 1829 } 1830 1831 static const struct spi_nor_manufacturer *manufacturers[] = { 1832 &spi_nor_atmel, 1833 &spi_nor_catalyst, 1834 &spi_nor_eon, 1835 &spi_nor_esmt, 1836 &spi_nor_everspin, 1837 &spi_nor_fujitsu, 1838 &spi_nor_gigadevice, 1839 &spi_nor_intel, 1840 &spi_nor_issi, 1841 &spi_nor_macronix, 1842 &spi_nor_micron, 1843 &spi_nor_st, 1844 &spi_nor_spansion, 1845 &spi_nor_sst, 1846 &spi_nor_winbond, 1847 &spi_nor_xilinx, 1848 &spi_nor_xmc, 1849 }; 1850 1851 static const struct flash_info * 1852 spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts, 1853 const u8 *id) 1854 { 1855 unsigned int i; 1856 1857 for (i = 0; i < nparts; i++) { 1858 if (parts[i].id_len && 1859 !memcmp(parts[i].id, id, parts[i].id_len)) 1860 return &parts[i]; 1861 } 1862 1863 return NULL; 1864 } 1865 1866 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) 1867 { 1868 const struct flash_info *info; 1869 u8 *id = nor->bouncebuf; 1870 unsigned int i; 1871 int ret; 1872 1873 if (nor->spimem) { 1874 struct spi_mem_op op = 1875 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), 1876 SPI_MEM_OP_NO_ADDR, 1877 SPI_MEM_OP_NO_DUMMY, 1878 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1)); 1879 1880 ret = spi_mem_exec_op(nor->spimem, &op); 1881 } else { 1882 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, 1883 SPI_NOR_MAX_ID_LEN); 1884 } 1885 if (ret) { 1886 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); 1887 return ERR_PTR(ret); 1888 } 1889 1890 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 1891 info = spi_nor_search_part_by_id(manufacturers[i]->parts, 1892 manufacturers[i]->nparts, 1893 id); 1894 if (info) { 1895 nor->manufacturer = manufacturers[i]; 1896 return info; 1897 } 1898 } 1899 1900 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", 1901 SPI_NOR_MAX_ID_LEN, id); 1902 return ERR_PTR(-ENODEV); 1903 } 1904 1905 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, 1906 size_t *retlen, u_char *buf) 1907 { 1908 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1909 ssize_t ret; 1910 1911 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); 1912 1913 ret = spi_nor_lock_and_prep(nor); 1914 if (ret) 1915 return ret; 1916 1917 while (len) { 1918 loff_t addr = from; 1919 1920 addr = spi_nor_convert_addr(nor, addr); 1921 1922 ret = spi_nor_read_data(nor, addr, len, buf); 1923 if (ret == 0) { 1924 /* We shouldn't see 0-length reads */ 1925 ret = -EIO; 1926 goto read_err; 1927 } 1928 if (ret < 0) 1929 goto read_err; 1930 1931 WARN_ON(ret > len); 1932 *retlen += ret; 1933 buf += ret; 1934 from += ret; 1935 len -= ret; 1936 } 1937 ret = 0; 1938 1939 read_err: 1940 spi_nor_unlock_and_unprep(nor); 1941 return ret; 1942 } 1943 1944 /* 1945 * Write an address range to the nor chip. Data must be written in 1946 * FLASH_PAGESIZE chunks. The address range may be any size provided 1947 * it is within the physical boundaries. 1948 */ 1949 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, 1950 size_t *retlen, const u_char *buf) 1951 { 1952 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1953 size_t page_offset, page_remain, i; 1954 ssize_t ret; 1955 1956 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 1957 1958 ret = spi_nor_lock_and_prep(nor); 1959 if (ret) 1960 return ret; 1961 1962 for (i = 0; i < len; ) { 1963 ssize_t written; 1964 loff_t addr = to + i; 1965 1966 /* 1967 * If page_size is a power of two, the offset can be quickly 1968 * calculated with an AND operation. On the other cases we 1969 * need to do a modulus operation (more expensive). 1970 */ 1971 if (is_power_of_2(nor->page_size)) { 1972 page_offset = addr & (nor->page_size - 1); 1973 } else { 1974 uint64_t aux = addr; 1975 1976 page_offset = do_div(aux, nor->page_size); 1977 } 1978 /* the size of data remaining on the first page */ 1979 page_remain = min_t(size_t, 1980 nor->page_size - page_offset, len - i); 1981 1982 addr = spi_nor_convert_addr(nor, addr); 1983 1984 ret = spi_nor_write_enable(nor); 1985 if (ret) 1986 goto write_err; 1987 1988 ret = spi_nor_write_data(nor, addr, page_remain, buf + i); 1989 if (ret < 0) 1990 goto write_err; 1991 written = ret; 1992 1993 ret = spi_nor_wait_till_ready(nor); 1994 if (ret) 1995 goto write_err; 1996 *retlen += written; 1997 i += written; 1998 } 1999 2000 write_err: 2001 spi_nor_unlock_and_unprep(nor); 2002 return ret; 2003 } 2004 2005 static int spi_nor_check(struct spi_nor *nor) 2006 { 2007 if (!nor->dev || 2008 (!nor->spimem && !nor->controller_ops) || 2009 (!nor->spimem && nor->controller_ops && 2010 (!nor->controller_ops->read || 2011 !nor->controller_ops->write || 2012 !nor->controller_ops->read_reg || 2013 !nor->controller_ops->write_reg))) { 2014 pr_err("spi-nor: please fill all the necessary fields!\n"); 2015 return -EINVAL; 2016 } 2017 2018 if (nor->spimem && nor->controller_ops) { 2019 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); 2020 return -EINVAL; 2021 } 2022 2023 return 0; 2024 } 2025 2026 void 2027 spi_nor_set_read_settings(struct spi_nor_read_command *read, 2028 u8 num_mode_clocks, 2029 u8 num_wait_states, 2030 u8 opcode, 2031 enum spi_nor_protocol proto) 2032 { 2033 read->num_mode_clocks = num_mode_clocks; 2034 read->num_wait_states = num_wait_states; 2035 read->opcode = opcode; 2036 read->proto = proto; 2037 } 2038 2039 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, 2040 enum spi_nor_protocol proto) 2041 { 2042 pp->opcode = opcode; 2043 pp->proto = proto; 2044 } 2045 2046 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) 2047 { 2048 size_t i; 2049 2050 for (i = 0; i < size; i++) 2051 if (table[i][0] == (int)hwcaps) 2052 return table[i][1]; 2053 2054 return -EINVAL; 2055 } 2056 2057 int spi_nor_hwcaps_read2cmd(u32 hwcaps) 2058 { 2059 static const int hwcaps_read2cmd[][2] = { 2060 { SNOR_HWCAPS_READ, SNOR_CMD_READ }, 2061 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, 2062 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, 2063 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, 2064 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, 2065 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, 2066 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, 2067 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, 2068 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, 2069 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, 2070 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, 2071 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, 2072 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, 2073 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, 2074 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, 2075 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, 2076 }; 2077 2078 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, 2079 ARRAY_SIZE(hwcaps_read2cmd)); 2080 } 2081 2082 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) 2083 { 2084 static const int hwcaps_pp2cmd[][2] = { 2085 { SNOR_HWCAPS_PP, SNOR_CMD_PP }, 2086 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, 2087 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, 2088 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, 2089 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, 2090 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, 2091 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, 2092 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, 2093 }; 2094 2095 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, 2096 ARRAY_SIZE(hwcaps_pp2cmd)); 2097 } 2098 2099 /** 2100 * spi_nor_spimem_check_op - check if the operation is supported 2101 * by controller 2102 *@nor: pointer to a 'struct spi_nor' 2103 *@op: pointer to op template to be checked 2104 * 2105 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2106 */ 2107 static int spi_nor_spimem_check_op(struct spi_nor *nor, 2108 struct spi_mem_op *op) 2109 { 2110 /* 2111 * First test with 4 address bytes. The opcode itself might 2112 * be a 3B addressing opcode but we don't care, because 2113 * SPI controller implementation should not check the opcode, 2114 * but just the sequence. 2115 */ 2116 op->addr.nbytes = 4; 2117 if (!spi_mem_supports_op(nor->spimem, op)) { 2118 if (nor->mtd.size > SZ_16M) 2119 return -EOPNOTSUPP; 2120 2121 /* If flash size <= 16MB, 3 address bytes are sufficient */ 2122 op->addr.nbytes = 3; 2123 if (!spi_mem_supports_op(nor->spimem, op)) 2124 return -EOPNOTSUPP; 2125 } 2126 2127 return 0; 2128 } 2129 2130 /** 2131 * spi_nor_spimem_check_readop - check if the read op is supported 2132 * by controller 2133 *@nor: pointer to a 'struct spi_nor' 2134 *@read: pointer to op template to be checked 2135 * 2136 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2137 */ 2138 static int spi_nor_spimem_check_readop(struct spi_nor *nor, 2139 const struct spi_nor_read_command *read) 2140 { 2141 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0), 2142 SPI_MEM_OP_ADDR(3, 0, 0), 2143 SPI_MEM_OP_DUMMY(1, 0), 2144 SPI_MEM_OP_DATA_IN(1, NULL, 0)); 2145 2146 spi_nor_spimem_setup_op(nor, &op, read->proto); 2147 2148 /* convert the dummy cycles to the number of bytes */ 2149 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 2150 if (spi_nor_protocol_is_dtr(nor->read_proto)) 2151 op.dummy.nbytes *= 2; 2152 2153 return spi_nor_spimem_check_op(nor, &op); 2154 } 2155 2156 /** 2157 * spi_nor_spimem_check_pp - check if the page program op is supported 2158 * by controller 2159 *@nor: pointer to a 'struct spi_nor' 2160 *@pp: pointer to op template to be checked 2161 * 2162 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2163 */ 2164 static int spi_nor_spimem_check_pp(struct spi_nor *nor, 2165 const struct spi_nor_pp_command *pp) 2166 { 2167 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0), 2168 SPI_MEM_OP_ADDR(3, 0, 0), 2169 SPI_MEM_OP_NO_DUMMY, 2170 SPI_MEM_OP_DATA_OUT(1, NULL, 0)); 2171 2172 spi_nor_spimem_setup_op(nor, &op, pp->proto); 2173 2174 return spi_nor_spimem_check_op(nor, &op); 2175 } 2176 2177 /** 2178 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol 2179 * based on SPI controller capabilities 2180 * @nor: pointer to a 'struct spi_nor' 2181 * @hwcaps: pointer to resulting capabilities after adjusting 2182 * according to controller and flash's capability 2183 */ 2184 static void 2185 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) 2186 { 2187 struct spi_nor_flash_parameter *params = nor->params; 2188 unsigned int cap; 2189 2190 /* X-X-X modes are not supported yet, mask them all. */ 2191 *hwcaps &= ~SNOR_HWCAPS_X_X_X; 2192 2193 /* 2194 * If the reset line is broken, we do not want to enter a stateful 2195 * mode. 2196 */ 2197 if (nor->flags & SNOR_F_BROKEN_RESET) 2198 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); 2199 2200 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { 2201 int rdidx, ppidx; 2202 2203 if (!(*hwcaps & BIT(cap))) 2204 continue; 2205 2206 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); 2207 if (rdidx >= 0 && 2208 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) 2209 *hwcaps &= ~BIT(cap); 2210 2211 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); 2212 if (ppidx < 0) 2213 continue; 2214 2215 if (spi_nor_spimem_check_pp(nor, 2216 ¶ms->page_programs[ppidx])) 2217 *hwcaps &= ~BIT(cap); 2218 } 2219 } 2220 2221 /** 2222 * spi_nor_set_erase_type() - set a SPI NOR erase type 2223 * @erase: pointer to a structure that describes a SPI NOR erase type 2224 * @size: the size of the sector/block erased by the erase type 2225 * @opcode: the SPI command op code to erase the sector/block 2226 */ 2227 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, 2228 u8 opcode) 2229 { 2230 erase->size = size; 2231 erase->opcode = opcode; 2232 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 2233 erase->size_shift = ffs(erase->size) - 1; 2234 erase->size_mask = (1 << erase->size_shift) - 1; 2235 } 2236 2237 /** 2238 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map 2239 * @map: the erase map of the SPI NOR 2240 * @erase_mask: bitmask encoding erase types that can erase the entire 2241 * flash memory 2242 * @flash_size: the spi nor flash memory size 2243 */ 2244 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, 2245 u8 erase_mask, u64 flash_size) 2246 { 2247 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */ 2248 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | 2249 SNOR_LAST_REGION; 2250 map->uniform_region.size = flash_size; 2251 map->regions = &map->uniform_region; 2252 map->uniform_erase_type = erase_mask; 2253 } 2254 2255 int spi_nor_post_bfpt_fixups(struct spi_nor *nor, 2256 const struct sfdp_parameter_header *bfpt_header, 2257 const struct sfdp_bfpt *bfpt) 2258 { 2259 int ret; 2260 2261 if (nor->manufacturer && nor->manufacturer->fixups && 2262 nor->manufacturer->fixups->post_bfpt) { 2263 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, 2264 bfpt); 2265 if (ret) 2266 return ret; 2267 } 2268 2269 if (nor->info->fixups && nor->info->fixups->post_bfpt) 2270 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt); 2271 2272 return 0; 2273 } 2274 2275 static int spi_nor_select_read(struct spi_nor *nor, 2276 u32 shared_hwcaps) 2277 { 2278 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; 2279 const struct spi_nor_read_command *read; 2280 2281 if (best_match < 0) 2282 return -EINVAL; 2283 2284 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); 2285 if (cmd < 0) 2286 return -EINVAL; 2287 2288 read = &nor->params->reads[cmd]; 2289 nor->read_opcode = read->opcode; 2290 nor->read_proto = read->proto; 2291 2292 /* 2293 * In the SPI NOR framework, we don't need to make the difference 2294 * between mode clock cycles and wait state clock cycles. 2295 * Indeed, the value of the mode clock cycles is used by a QSPI 2296 * flash memory to know whether it should enter or leave its 0-4-4 2297 * (Continuous Read / XIP) mode. 2298 * eXecution In Place is out of the scope of the mtd sub-system. 2299 * Hence we choose to merge both mode and wait state clock cycles 2300 * into the so called dummy clock cycles. 2301 */ 2302 nor->read_dummy = read->num_mode_clocks + read->num_wait_states; 2303 return 0; 2304 } 2305 2306 static int spi_nor_select_pp(struct spi_nor *nor, 2307 u32 shared_hwcaps) 2308 { 2309 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; 2310 const struct spi_nor_pp_command *pp; 2311 2312 if (best_match < 0) 2313 return -EINVAL; 2314 2315 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); 2316 if (cmd < 0) 2317 return -EINVAL; 2318 2319 pp = &nor->params->page_programs[cmd]; 2320 nor->program_opcode = pp->opcode; 2321 nor->write_proto = pp->proto; 2322 return 0; 2323 } 2324 2325 /** 2326 * spi_nor_select_uniform_erase() - select optimum uniform erase type 2327 * @map: the erase map of the SPI NOR 2328 * @wanted_size: the erase type size to search for. Contains the value of 2329 * info->sector_size or of the "small sector" size in case 2330 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined. 2331 * 2332 * Once the optimum uniform sector erase command is found, disable all the 2333 * other. 2334 * 2335 * Return: pointer to erase type on success, NULL otherwise. 2336 */ 2337 static const struct spi_nor_erase_type * 2338 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, 2339 const u32 wanted_size) 2340 { 2341 const struct spi_nor_erase_type *tested_erase, *erase = NULL; 2342 int i; 2343 u8 uniform_erase_type = map->uniform_erase_type; 2344 2345 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2346 if (!(uniform_erase_type & BIT(i))) 2347 continue; 2348 2349 tested_erase = &map->erase_type[i]; 2350 2351 /* 2352 * If the current erase size is the one, stop here: 2353 * we have found the right uniform Sector Erase command. 2354 */ 2355 if (tested_erase->size == wanted_size) { 2356 erase = tested_erase; 2357 break; 2358 } 2359 2360 /* 2361 * Otherwise, the current erase size is still a valid candidate. 2362 * Select the biggest valid candidate. 2363 */ 2364 if (!erase && tested_erase->size) 2365 erase = tested_erase; 2366 /* keep iterating to find the wanted_size */ 2367 } 2368 2369 if (!erase) 2370 return NULL; 2371 2372 /* Disable all other Sector Erase commands. */ 2373 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; 2374 map->uniform_erase_type |= BIT(erase - map->erase_type); 2375 return erase; 2376 } 2377 2378 static int spi_nor_select_erase(struct spi_nor *nor) 2379 { 2380 struct spi_nor_erase_map *map = &nor->params->erase_map; 2381 const struct spi_nor_erase_type *erase = NULL; 2382 struct mtd_info *mtd = &nor->mtd; 2383 u32 wanted_size = nor->info->sector_size; 2384 int i; 2385 2386 /* 2387 * The previous implementation handling Sector Erase commands assumed 2388 * that the SPI flash memory has an uniform layout then used only one 2389 * of the supported erase sizes for all Sector Erase commands. 2390 * So to be backward compatible, the new implementation also tries to 2391 * manage the SPI flash memory as uniform with a single erase sector 2392 * size, when possible. 2393 */ 2394 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS 2395 /* prefer "small sector" erase if possible */ 2396 wanted_size = 4096u; 2397 #endif 2398 2399 if (spi_nor_has_uniform_erase(nor)) { 2400 erase = spi_nor_select_uniform_erase(map, wanted_size); 2401 if (!erase) 2402 return -EINVAL; 2403 nor->erase_opcode = erase->opcode; 2404 mtd->erasesize = erase->size; 2405 return 0; 2406 } 2407 2408 /* 2409 * For non-uniform SPI flash memory, set mtd->erasesize to the 2410 * maximum erase sector size. No need to set nor->erase_opcode. 2411 */ 2412 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2413 if (map->erase_type[i].size) { 2414 erase = &map->erase_type[i]; 2415 break; 2416 } 2417 } 2418 2419 if (!erase) 2420 return -EINVAL; 2421 2422 mtd->erasesize = erase->size; 2423 return 0; 2424 } 2425 2426 static int spi_nor_default_setup(struct spi_nor *nor, 2427 const struct spi_nor_hwcaps *hwcaps) 2428 { 2429 struct spi_nor_flash_parameter *params = nor->params; 2430 u32 ignored_mask, shared_mask; 2431 int err; 2432 2433 /* 2434 * Keep only the hardware capabilities supported by both the SPI 2435 * controller and the SPI flash memory. 2436 */ 2437 shared_mask = hwcaps->mask & params->hwcaps.mask; 2438 2439 if (nor->spimem) { 2440 /* 2441 * When called from spi_nor_probe(), all caps are set and we 2442 * need to discard some of them based on what the SPI 2443 * controller actually supports (using spi_mem_supports_op()). 2444 */ 2445 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); 2446 } else { 2447 /* 2448 * SPI n-n-n protocols are not supported when the SPI 2449 * controller directly implements the spi_nor interface. 2450 * Yet another reason to switch to spi-mem. 2451 */ 2452 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; 2453 if (shared_mask & ignored_mask) { 2454 dev_dbg(nor->dev, 2455 "SPI n-n-n protocols are not supported.\n"); 2456 shared_mask &= ~ignored_mask; 2457 } 2458 } 2459 2460 /* Select the (Fast) Read command. */ 2461 err = spi_nor_select_read(nor, shared_mask); 2462 if (err) { 2463 dev_dbg(nor->dev, 2464 "can't select read settings supported by both the SPI controller and memory.\n"); 2465 return err; 2466 } 2467 2468 /* Select the Page Program command. */ 2469 err = spi_nor_select_pp(nor, shared_mask); 2470 if (err) { 2471 dev_dbg(nor->dev, 2472 "can't select write settings supported by both the SPI controller and memory.\n"); 2473 return err; 2474 } 2475 2476 /* Select the Sector Erase command. */ 2477 err = spi_nor_select_erase(nor); 2478 if (err) { 2479 dev_dbg(nor->dev, 2480 "can't select erase settings supported by both the SPI controller and memory.\n"); 2481 return err; 2482 } 2483 2484 return 0; 2485 } 2486 2487 static int spi_nor_setup(struct spi_nor *nor, 2488 const struct spi_nor_hwcaps *hwcaps) 2489 { 2490 if (!nor->params->setup) 2491 return 0; 2492 2493 return nor->params->setup(nor, hwcaps); 2494 } 2495 2496 /** 2497 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and 2498 * settings based on MFR register and ->default_init() hook. 2499 * @nor: pointer to a 'struct spi_nor'. 2500 */ 2501 static void spi_nor_manufacturer_init_params(struct spi_nor *nor) 2502 { 2503 if (nor->manufacturer && nor->manufacturer->fixups && 2504 nor->manufacturer->fixups->default_init) 2505 nor->manufacturer->fixups->default_init(nor); 2506 2507 if (nor->info->fixups && nor->info->fixups->default_init) 2508 nor->info->fixups->default_init(nor); 2509 } 2510 2511 /** 2512 * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings 2513 * based on JESD216 SFDP standard. 2514 * @nor: pointer to a 'struct spi_nor'. 2515 * 2516 * The method has a roll-back mechanism: in case the SFDP parsing fails, the 2517 * legacy flash parameters and settings will be restored. 2518 */ 2519 static void spi_nor_sfdp_init_params(struct spi_nor *nor) 2520 { 2521 struct spi_nor_flash_parameter sfdp_params; 2522 2523 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); 2524 2525 if (spi_nor_parse_sfdp(nor)) { 2526 memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); 2527 nor->addr_width = 0; 2528 nor->flags &= ~SNOR_F_4B_OPCODES; 2529 } 2530 } 2531 2532 /** 2533 * spi_nor_info_init_params() - Initialize the flash's parameters and settings 2534 * based on nor->info data. 2535 * @nor: pointer to a 'struct spi_nor'. 2536 */ 2537 static void spi_nor_info_init_params(struct spi_nor *nor) 2538 { 2539 struct spi_nor_flash_parameter *params = nor->params; 2540 struct spi_nor_erase_map *map = ¶ms->erase_map; 2541 const struct flash_info *info = nor->info; 2542 struct device_node *np = spi_nor_get_flash_node(nor); 2543 u8 i, erase_mask; 2544 2545 /* Initialize default flash parameters and settings. */ 2546 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 2547 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode; 2548 params->setup = spi_nor_default_setup; 2549 params->otp.org = &info->otp_org; 2550 2551 /* Default to 16-bit Write Status (01h) Command */ 2552 nor->flags |= SNOR_F_HAS_16BIT_SR; 2553 2554 /* Set SPI NOR sizes. */ 2555 params->writesize = 1; 2556 params->size = (u64)info->sector_size * info->n_sectors; 2557 params->page_size = info->page_size; 2558 2559 if (!(info->flags & SPI_NOR_NO_FR)) { 2560 /* Default to Fast Read for DT and non-DT platform devices. */ 2561 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; 2562 2563 /* Mask out Fast Read if not requested at DT instantiation. */ 2564 if (np && !of_property_read_bool(np, "m25p,fast-read")) 2565 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; 2566 } 2567 2568 /* (Fast) Read settings. */ 2569 params->hwcaps.mask |= SNOR_HWCAPS_READ; 2570 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], 2571 0, 0, SPINOR_OP_READ, 2572 SNOR_PROTO_1_1_1); 2573 2574 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) 2575 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], 2576 0, 8, SPINOR_OP_READ_FAST, 2577 SNOR_PROTO_1_1_1); 2578 2579 if (info->flags & SPI_NOR_DUAL_READ) { 2580 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2581 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], 2582 0, 8, SPINOR_OP_READ_1_1_2, 2583 SNOR_PROTO_1_1_2); 2584 } 2585 2586 if (info->flags & SPI_NOR_QUAD_READ) { 2587 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2588 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], 2589 0, 8, SPINOR_OP_READ_1_1_4, 2590 SNOR_PROTO_1_1_4); 2591 } 2592 2593 if (info->flags & SPI_NOR_OCTAL_READ) { 2594 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2595 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], 2596 0, 8, SPINOR_OP_READ_1_1_8, 2597 SNOR_PROTO_1_1_8); 2598 } 2599 2600 if (info->flags & SPI_NOR_OCTAL_DTR_READ) { 2601 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; 2602 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 2603 0, 20, SPINOR_OP_READ_FAST, 2604 SNOR_PROTO_8_8_8_DTR); 2605 } 2606 2607 /* Page Program settings. */ 2608 params->hwcaps.mask |= SNOR_HWCAPS_PP; 2609 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], 2610 SPINOR_OP_PP, SNOR_PROTO_1_1_1); 2611 2612 if (info->flags & SPI_NOR_OCTAL_DTR_PP) { 2613 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; 2614 /* 2615 * Since xSPI Page Program opcode is backward compatible with 2616 * Legacy SPI, use Legacy SPI opcode there as well. 2617 */ 2618 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], 2619 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); 2620 } 2621 2622 /* 2623 * Sector Erase settings. Sort Erase Types in ascending order, with the 2624 * smallest erase size starting at BIT(0). 2625 */ 2626 erase_mask = 0; 2627 i = 0; 2628 if (info->flags & SECT_4K_PMC) { 2629 erase_mask |= BIT(i); 2630 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2631 SPINOR_OP_BE_4K_PMC); 2632 i++; 2633 } else if (info->flags & SECT_4K) { 2634 erase_mask |= BIT(i); 2635 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2636 SPINOR_OP_BE_4K); 2637 i++; 2638 } 2639 erase_mask |= BIT(i); 2640 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size, 2641 SPINOR_OP_SE); 2642 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 2643 } 2644 2645 /** 2646 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings 2647 * after SFDP has been parsed (is also called for SPI NORs that do not 2648 * support RDSFDP). 2649 * @nor: pointer to a 'struct spi_nor' 2650 * 2651 * Typically used to tweak various parameters that could not be extracted by 2652 * other means (i.e. when information provided by the SFDP/flash_info tables 2653 * are incomplete or wrong). 2654 */ 2655 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor) 2656 { 2657 if (nor->manufacturer && nor->manufacturer->fixups && 2658 nor->manufacturer->fixups->post_sfdp) 2659 nor->manufacturer->fixups->post_sfdp(nor); 2660 2661 if (nor->info->fixups && nor->info->fixups->post_sfdp) 2662 nor->info->fixups->post_sfdp(nor); 2663 } 2664 2665 /** 2666 * spi_nor_late_init_params() - Late initialization of default flash parameters. 2667 * @nor: pointer to a 'struct spi_nor' 2668 * 2669 * Used to set default flash parameters and settings when the ->default_init() 2670 * hook or the SFDP parser let voids. 2671 */ 2672 static void spi_nor_late_init_params(struct spi_nor *nor) 2673 { 2674 /* 2675 * NOR protection support. When locking_ops are not provided, we pick 2676 * the default ones. 2677 */ 2678 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) 2679 spi_nor_init_default_locking_ops(nor); 2680 } 2681 2682 /** 2683 * spi_nor_init_params() - Initialize the flash's parameters and settings. 2684 * @nor: pointer to a 'struct spi_nor'. 2685 * 2686 * The flash parameters and settings are initialized based on a sequence of 2687 * calls that are ordered by priority: 2688 * 2689 * 1/ Default flash parameters initialization. The initializations are done 2690 * based on nor->info data: 2691 * spi_nor_info_init_params() 2692 * 2693 * which can be overwritten by: 2694 * 2/ Manufacturer flash parameters initialization. The initializations are 2695 * done based on MFR register, or when the decisions can not be done solely 2696 * based on MFR, by using specific flash_info tweeks, ->default_init(): 2697 * spi_nor_manufacturer_init_params() 2698 * 2699 * which can be overwritten by: 2700 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and 2701 * should be more accurate that the above. 2702 * spi_nor_sfdp_init_params() 2703 * 2704 * Please note that there is a ->post_bfpt() fixup hook that can overwrite 2705 * the flash parameters and settings immediately after parsing the Basic 2706 * Flash Parameter Table. 2707 * 2708 * which can be overwritten by: 2709 * 4/ Post SFDP flash parameters initialization. Used to tweak various 2710 * parameters that could not be extracted by other means (i.e. when 2711 * information provided by the SFDP/flash_info tables are incomplete or 2712 * wrong). 2713 * spi_nor_post_sfdp_fixups() 2714 * 2715 * 5/ Late default flash parameters initialization, used when the 2716 * ->default_init() hook or the SFDP parser do not set specific params. 2717 * spi_nor_late_init_params() 2718 */ 2719 static int spi_nor_init_params(struct spi_nor *nor) 2720 { 2721 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); 2722 if (!nor->params) 2723 return -ENOMEM; 2724 2725 spi_nor_info_init_params(nor); 2726 2727 spi_nor_manufacturer_init_params(nor); 2728 2729 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 2730 SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) && 2731 !(nor->info->flags & SPI_NOR_SKIP_SFDP)) 2732 spi_nor_sfdp_init_params(nor); 2733 2734 spi_nor_post_sfdp_fixups(nor); 2735 2736 spi_nor_late_init_params(nor); 2737 2738 return 0; 2739 } 2740 2741 /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed 2742 * @nor: pointer to a 'struct spi_nor' 2743 * @enable: whether to enable or disable Octal DTR 2744 * 2745 * Return: 0 on success, -errno otherwise. 2746 */ 2747 static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable) 2748 { 2749 int ret; 2750 2751 if (!nor->params->octal_dtr_enable) 2752 return 0; 2753 2754 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && 2755 nor->write_proto == SNOR_PROTO_8_8_8_DTR)) 2756 return 0; 2757 2758 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) 2759 return 0; 2760 2761 ret = nor->params->octal_dtr_enable(nor, enable); 2762 if (ret) 2763 return ret; 2764 2765 if (enable) 2766 nor->reg_proto = SNOR_PROTO_8_8_8_DTR; 2767 else 2768 nor->reg_proto = SNOR_PROTO_1_1_1; 2769 2770 return 0; 2771 } 2772 2773 /** 2774 * spi_nor_quad_enable() - enable Quad I/O if needed. 2775 * @nor: pointer to a 'struct spi_nor' 2776 * 2777 * Return: 0 on success, -errno otherwise. 2778 */ 2779 static int spi_nor_quad_enable(struct spi_nor *nor) 2780 { 2781 if (!nor->params->quad_enable) 2782 return 0; 2783 2784 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || 2785 spi_nor_get_protocol_width(nor->write_proto) == 4)) 2786 return 0; 2787 2788 return nor->params->quad_enable(nor); 2789 } 2790 2791 static int spi_nor_init(struct spi_nor *nor) 2792 { 2793 int err; 2794 2795 err = spi_nor_octal_dtr_enable(nor, true); 2796 if (err) { 2797 dev_dbg(nor->dev, "octal mode not supported\n"); 2798 return err; 2799 } 2800 2801 err = spi_nor_quad_enable(nor); 2802 if (err) { 2803 dev_dbg(nor->dev, "quad mode not supported\n"); 2804 return err; 2805 } 2806 2807 /* 2808 * Some SPI NOR flashes are write protected by default after a power-on 2809 * reset cycle, in order to avoid inadvertent writes during power-up. 2810 * Backward compatibility imposes to unlock the entire flash memory 2811 * array at power-up by default. Depending on the kernel configuration 2812 * (1) do nothing, (2) always unlock the entire flash array or (3) 2813 * unlock the entire flash array only when the software write 2814 * protection bits are volatile. The latter is indicated by 2815 * SNOR_F_SWP_IS_VOLATILE. 2816 */ 2817 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || 2818 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && 2819 nor->flags & SNOR_F_SWP_IS_VOLATILE)) 2820 spi_nor_try_unlock_all(nor); 2821 2822 if (nor->addr_width == 4 && 2823 nor->read_proto != SNOR_PROTO_8_8_8_DTR && 2824 !(nor->flags & SNOR_F_4B_OPCODES)) { 2825 /* 2826 * If the RESET# pin isn't hooked up properly, or the system 2827 * otherwise doesn't perform a reset command in the boot 2828 * sequence, it's impossible to 100% protect against unexpected 2829 * reboots (e.g., crashes). Warn the user (or hopefully, system 2830 * designer) that this is bad. 2831 */ 2832 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, 2833 "enabling reset hack; may not recover from unexpected reboots\n"); 2834 nor->params->set_4byte_addr_mode(nor, true); 2835 } 2836 2837 return 0; 2838 } 2839 2840 /** 2841 * spi_nor_soft_reset() - Perform a software reset 2842 * @nor: pointer to 'struct spi_nor' 2843 * 2844 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets 2845 * the device to its power-on-reset state. This is useful when the software has 2846 * made some changes to device (volatile) registers and needs to reset it before 2847 * shutting down, for example. 2848 * 2849 * Not every flash supports this sequence. The same set of opcodes might be used 2850 * for some other operation on a flash that does not support this. Support for 2851 * this sequence can be discovered via SFDP in the BFPT table. 2852 * 2853 * Return: 0 on success, -errno otherwise. 2854 */ 2855 static void spi_nor_soft_reset(struct spi_nor *nor) 2856 { 2857 struct spi_mem_op op; 2858 int ret; 2859 2860 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0), 2861 SPI_MEM_OP_NO_DUMMY, 2862 SPI_MEM_OP_NO_ADDR, 2863 SPI_MEM_OP_NO_DATA); 2864 2865 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 2866 2867 ret = spi_mem_exec_op(nor->spimem, &op); 2868 if (ret) { 2869 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 2870 return; 2871 } 2872 2873 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0), 2874 SPI_MEM_OP_NO_DUMMY, 2875 SPI_MEM_OP_NO_ADDR, 2876 SPI_MEM_OP_NO_DATA); 2877 2878 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 2879 2880 ret = spi_mem_exec_op(nor->spimem, &op); 2881 if (ret) { 2882 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 2883 return; 2884 } 2885 2886 /* 2887 * Software Reset is not instant, and the delay varies from flash to 2888 * flash. Looking at a few flashes, most range somewhere below 100 2889 * microseconds. So, sleep for a range of 200-400 us. 2890 */ 2891 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); 2892 } 2893 2894 /* mtd suspend handler */ 2895 static int spi_nor_suspend(struct mtd_info *mtd) 2896 { 2897 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2898 int ret; 2899 2900 /* Disable octal DTR mode if we enabled it. */ 2901 ret = spi_nor_octal_dtr_enable(nor, false); 2902 if (ret) 2903 dev_err(nor->dev, "suspend() failed\n"); 2904 2905 return ret; 2906 } 2907 2908 /* mtd resume handler */ 2909 static void spi_nor_resume(struct mtd_info *mtd) 2910 { 2911 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2912 struct device *dev = nor->dev; 2913 int ret; 2914 2915 /* re-initialize the nor chip */ 2916 ret = spi_nor_init(nor); 2917 if (ret) 2918 dev_err(dev, "resume() failed\n"); 2919 } 2920 2921 static int spi_nor_get_device(struct mtd_info *mtd) 2922 { 2923 struct mtd_info *master = mtd_get_master(mtd); 2924 struct spi_nor *nor = mtd_to_spi_nor(master); 2925 struct device *dev; 2926 2927 if (nor->spimem) 2928 dev = nor->spimem->spi->controller->dev.parent; 2929 else 2930 dev = nor->dev; 2931 2932 if (!try_module_get(dev->driver->owner)) 2933 return -ENODEV; 2934 2935 return 0; 2936 } 2937 2938 static void spi_nor_put_device(struct mtd_info *mtd) 2939 { 2940 struct mtd_info *master = mtd_get_master(mtd); 2941 struct spi_nor *nor = mtd_to_spi_nor(master); 2942 struct device *dev; 2943 2944 if (nor->spimem) 2945 dev = nor->spimem->spi->controller->dev.parent; 2946 else 2947 dev = nor->dev; 2948 2949 module_put(dev->driver->owner); 2950 } 2951 2952 void spi_nor_restore(struct spi_nor *nor) 2953 { 2954 /* restore the addressing mode */ 2955 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && 2956 nor->flags & SNOR_F_BROKEN_RESET) 2957 nor->params->set_4byte_addr_mode(nor, false); 2958 2959 if (nor->flags & SNOR_F_SOFT_RESET) 2960 spi_nor_soft_reset(nor); 2961 } 2962 EXPORT_SYMBOL_GPL(spi_nor_restore); 2963 2964 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, 2965 const char *name) 2966 { 2967 unsigned int i, j; 2968 2969 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 2970 for (j = 0; j < manufacturers[i]->nparts; j++) { 2971 if (!strcmp(name, manufacturers[i]->parts[j].name)) { 2972 nor->manufacturer = manufacturers[i]; 2973 return &manufacturers[i]->parts[j]; 2974 } 2975 } 2976 } 2977 2978 return NULL; 2979 } 2980 2981 static int spi_nor_set_addr_width(struct spi_nor *nor) 2982 { 2983 if (nor->addr_width) { 2984 /* already configured from SFDP */ 2985 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { 2986 /* 2987 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So 2988 * in this protocol an odd address width cannot be used because 2989 * then the address phase would only span a cycle and a half. 2990 * Half a cycle would be left over. We would then have to start 2991 * the dummy phase in the middle of a cycle and so too the data 2992 * phase, and we will end the transaction with half a cycle left 2993 * over. 2994 * 2995 * Force all 8D-8D-8D flashes to use an address width of 4 to 2996 * avoid this situation. 2997 */ 2998 nor->addr_width = 4; 2999 } else if (nor->info->addr_width) { 3000 nor->addr_width = nor->info->addr_width; 3001 } else { 3002 nor->addr_width = 3; 3003 } 3004 3005 if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) { 3006 /* enable 4-byte addressing if the device exceeds 16MiB */ 3007 nor->addr_width = 4; 3008 } 3009 3010 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { 3011 dev_dbg(nor->dev, "address width is too large: %u\n", 3012 nor->addr_width); 3013 return -EINVAL; 3014 } 3015 3016 /* Set 4byte opcodes when possible. */ 3017 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES && 3018 !(nor->flags & SNOR_F_HAS_4BAIT)) 3019 spi_nor_set_4byte_opcodes(nor); 3020 3021 return 0; 3022 } 3023 3024 static void spi_nor_debugfs_init(struct spi_nor *nor, 3025 const struct flash_info *info) 3026 { 3027 struct mtd_info *mtd = &nor->mtd; 3028 3029 mtd->dbg.partname = info->name; 3030 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN", 3031 info->id_len, info->id); 3032 } 3033 3034 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, 3035 const char *name) 3036 { 3037 const struct flash_info *info = NULL; 3038 3039 if (name) 3040 info = spi_nor_match_id(nor, name); 3041 /* Try to auto-detect if chip name wasn't specified or not found */ 3042 if (!info) 3043 info = spi_nor_read_id(nor); 3044 if (IS_ERR_OR_NULL(info)) 3045 return ERR_PTR(-ENOENT); 3046 3047 /* 3048 * If caller has specified name of flash model that can normally be 3049 * detected using JEDEC, let's verify it. 3050 */ 3051 if (name && info->id_len) { 3052 const struct flash_info *jinfo; 3053 3054 jinfo = spi_nor_read_id(nor); 3055 if (IS_ERR(jinfo)) { 3056 return jinfo; 3057 } else if (jinfo != info) { 3058 /* 3059 * JEDEC knows better, so overwrite platform ID. We 3060 * can't trust partitions any longer, but we'll let 3061 * mtd apply them anyway, since some partitions may be 3062 * marked read-only, and we don't want to lose that 3063 * information, even if it's not 100% accurate. 3064 */ 3065 dev_warn(nor->dev, "found %s, expected %s\n", 3066 jinfo->name, info->name); 3067 info = jinfo; 3068 } 3069 } 3070 3071 return info; 3072 } 3073 3074 int spi_nor_scan(struct spi_nor *nor, const char *name, 3075 const struct spi_nor_hwcaps *hwcaps) 3076 { 3077 const struct flash_info *info; 3078 struct device *dev = nor->dev; 3079 struct mtd_info *mtd = &nor->mtd; 3080 struct device_node *np = spi_nor_get_flash_node(nor); 3081 int ret; 3082 int i; 3083 3084 ret = spi_nor_check(nor); 3085 if (ret) 3086 return ret; 3087 3088 /* Reset SPI protocol for all commands. */ 3089 nor->reg_proto = SNOR_PROTO_1_1_1; 3090 nor->read_proto = SNOR_PROTO_1_1_1; 3091 nor->write_proto = SNOR_PROTO_1_1_1; 3092 3093 /* 3094 * We need the bounce buffer early to read/write registers when going 3095 * through the spi-mem layer (buffers have to be DMA-able). 3096 * For spi-mem drivers, we'll reallocate a new buffer if 3097 * nor->page_size turns out to be greater than PAGE_SIZE (which 3098 * shouldn't happen before long since NOR pages are usually less 3099 * than 1KB) after spi_nor_scan() returns. 3100 */ 3101 nor->bouncebuf_size = PAGE_SIZE; 3102 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, 3103 GFP_KERNEL); 3104 if (!nor->bouncebuf) 3105 return -ENOMEM; 3106 3107 info = spi_nor_get_flash_info(nor, name); 3108 if (IS_ERR(info)) 3109 return PTR_ERR(info); 3110 3111 nor->info = info; 3112 3113 spi_nor_debugfs_init(nor, info); 3114 3115 mutex_init(&nor->lock); 3116 3117 /* 3118 * Make sure the XSR_RDY flag is set before calling 3119 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR 3120 * with Atmel SPI NOR. 3121 */ 3122 if (info->flags & SPI_NOR_XSR_RDY) 3123 nor->flags |= SNOR_F_READY_XSR_RDY; 3124 3125 if (info->flags & SPI_NOR_HAS_LOCK) 3126 nor->flags |= SNOR_F_HAS_LOCK; 3127 3128 mtd->_write = spi_nor_write; 3129 3130 /* Init flash parameters based on flash_info struct and SFDP */ 3131 ret = spi_nor_init_params(nor); 3132 if (ret) 3133 return ret; 3134 3135 if (!mtd->name) 3136 mtd->name = dev_name(dev); 3137 mtd->priv = nor; 3138 mtd->type = MTD_NORFLASH; 3139 mtd->writesize = nor->params->writesize; 3140 mtd->flags = MTD_CAP_NORFLASH; 3141 mtd->size = nor->params->size; 3142 mtd->_erase = spi_nor_erase; 3143 mtd->_read = spi_nor_read; 3144 mtd->_suspend = spi_nor_suspend; 3145 mtd->_resume = spi_nor_resume; 3146 mtd->_get_device = spi_nor_get_device; 3147 mtd->_put_device = spi_nor_put_device; 3148 3149 if (info->flags & USE_FSR) 3150 nor->flags |= SNOR_F_USE_FSR; 3151 if (info->flags & SPI_NOR_HAS_TB) { 3152 nor->flags |= SNOR_F_HAS_SR_TB; 3153 if (info->flags & SPI_NOR_TB_SR_BIT6) 3154 nor->flags |= SNOR_F_HAS_SR_TB_BIT6; 3155 } 3156 3157 if (info->flags & NO_CHIP_ERASE) 3158 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; 3159 if (info->flags & USE_CLSR) 3160 nor->flags |= SNOR_F_USE_CLSR; 3161 if (info->flags & SPI_NOR_SWP_IS_VOLATILE) 3162 nor->flags |= SNOR_F_SWP_IS_VOLATILE; 3163 3164 if (info->flags & SPI_NOR_4BIT_BP) { 3165 nor->flags |= SNOR_F_HAS_4BIT_BP; 3166 if (info->flags & SPI_NOR_BP3_SR_BIT6) 3167 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; 3168 } 3169 3170 if (info->flags & SPI_NOR_NO_ERASE) 3171 mtd->flags |= MTD_NO_ERASE; 3172 3173 mtd->dev.parent = dev; 3174 nor->page_size = nor->params->page_size; 3175 mtd->writebufsize = nor->page_size; 3176 3177 if (of_property_read_bool(np, "broken-flash-reset")) 3178 nor->flags |= SNOR_F_BROKEN_RESET; 3179 3180 /* 3181 * Configure the SPI memory: 3182 * - select op codes for (Fast) Read, Page Program and Sector Erase. 3183 * - set the number of dummy cycles (mode cycles + wait states). 3184 * - set the SPI protocols for register and memory accesses. 3185 */ 3186 ret = spi_nor_setup(nor, hwcaps); 3187 if (ret) 3188 return ret; 3189 3190 if (info->flags & SPI_NOR_4B_OPCODES) 3191 nor->flags |= SNOR_F_4B_OPCODES; 3192 3193 if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE) 3194 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 3195 3196 ret = spi_nor_set_addr_width(nor); 3197 if (ret) 3198 return ret; 3199 3200 spi_nor_register_locking_ops(nor); 3201 3202 /* Send all the required SPI flash commands to initialize device */ 3203 ret = spi_nor_init(nor); 3204 if (ret) 3205 return ret; 3206 3207 /* Configure OTP parameters and ops */ 3208 spi_nor_otp_init(nor); 3209 3210 dev_info(dev, "%s (%lld Kbytes)\n", info->name, 3211 (long long)mtd->size >> 10); 3212 3213 dev_dbg(dev, 3214 "mtd .name = %s, .size = 0x%llx (%lldMiB), " 3215 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 3216 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), 3217 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); 3218 3219 if (mtd->numeraseregions) 3220 for (i = 0; i < mtd->numeraseregions; i++) 3221 dev_dbg(dev, 3222 "mtd.eraseregions[%d] = { .offset = 0x%llx, " 3223 ".erasesize = 0x%.8x (%uKiB), " 3224 ".numblocks = %d }\n", 3225 i, (long long)mtd->eraseregions[i].offset, 3226 mtd->eraseregions[i].erasesize, 3227 mtd->eraseregions[i].erasesize / 1024, 3228 mtd->eraseregions[i].numblocks); 3229 return 0; 3230 } 3231 EXPORT_SYMBOL_GPL(spi_nor_scan); 3232 3233 static int spi_nor_create_read_dirmap(struct spi_nor *nor) 3234 { 3235 struct spi_mem_dirmap_info info = { 3236 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 3237 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), 3238 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 3239 SPI_MEM_OP_DATA_IN(0, NULL, 0)), 3240 .offset = 0, 3241 .length = nor->mtd.size, 3242 }; 3243 struct spi_mem_op *op = &info.op_tmpl; 3244 3245 spi_nor_spimem_setup_op(nor, op, nor->read_proto); 3246 3247 /* convert the dummy cycles to the number of bytes */ 3248 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; 3249 if (spi_nor_protocol_is_dtr(nor->read_proto)) 3250 op->dummy.nbytes *= 2; 3251 3252 /* 3253 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3254 * of data bytes is non-zero, the data buswidth won't be set here. So, 3255 * do it explicitly. 3256 */ 3257 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); 3258 3259 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3260 &info); 3261 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); 3262 } 3263 3264 static int spi_nor_create_write_dirmap(struct spi_nor *nor) 3265 { 3266 struct spi_mem_dirmap_info info = { 3267 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 3268 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), 3269 SPI_MEM_OP_NO_DUMMY, 3270 SPI_MEM_OP_DATA_OUT(0, NULL, 0)), 3271 .offset = 0, 3272 .length = nor->mtd.size, 3273 }; 3274 struct spi_mem_op *op = &info.op_tmpl; 3275 3276 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 3277 op->addr.nbytes = 0; 3278 3279 spi_nor_spimem_setup_op(nor, op, nor->write_proto); 3280 3281 /* 3282 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3283 * of data bytes is non-zero, the data buswidth won't be set here. So, 3284 * do it explicitly. 3285 */ 3286 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); 3287 3288 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3289 &info); 3290 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); 3291 } 3292 3293 static int spi_nor_probe(struct spi_mem *spimem) 3294 { 3295 struct spi_device *spi = spimem->spi; 3296 struct flash_platform_data *data = dev_get_platdata(&spi->dev); 3297 struct spi_nor *nor; 3298 /* 3299 * Enable all caps by default. The core will mask them after 3300 * checking what's really supported using spi_mem_supports_op(). 3301 */ 3302 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; 3303 char *flash_name; 3304 int ret; 3305 3306 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL); 3307 if (!nor) 3308 return -ENOMEM; 3309 3310 nor->spimem = spimem; 3311 nor->dev = &spi->dev; 3312 spi_nor_set_flash_node(nor, spi->dev.of_node); 3313 3314 spi_mem_set_drvdata(spimem, nor); 3315 3316 if (data && data->name) 3317 nor->mtd.name = data->name; 3318 3319 if (!nor->mtd.name) 3320 nor->mtd.name = spi_mem_get_name(spimem); 3321 3322 /* 3323 * For some (historical?) reason many platforms provide two different 3324 * names in flash_platform_data: "name" and "type". Quite often name is 3325 * set to "m25p80" and then "type" provides a real chip name. 3326 * If that's the case, respect "type" and ignore a "name". 3327 */ 3328 if (data && data->type) 3329 flash_name = data->type; 3330 else if (!strcmp(spi->modalias, "spi-nor")) 3331 flash_name = NULL; /* auto-detect */ 3332 else 3333 flash_name = spi->modalias; 3334 3335 ret = spi_nor_scan(nor, flash_name, &hwcaps); 3336 if (ret) 3337 return ret; 3338 3339 /* 3340 * None of the existing parts have > 512B pages, but let's play safe 3341 * and add this logic so that if anyone ever adds support for such 3342 * a NOR we don't end up with buffer overflows. 3343 */ 3344 if (nor->page_size > PAGE_SIZE) { 3345 nor->bouncebuf_size = nor->page_size; 3346 devm_kfree(nor->dev, nor->bouncebuf); 3347 nor->bouncebuf = devm_kmalloc(nor->dev, 3348 nor->bouncebuf_size, 3349 GFP_KERNEL); 3350 if (!nor->bouncebuf) 3351 return -ENOMEM; 3352 } 3353 3354 ret = spi_nor_create_read_dirmap(nor); 3355 if (ret) 3356 return ret; 3357 3358 ret = spi_nor_create_write_dirmap(nor); 3359 if (ret) 3360 return ret; 3361 3362 return mtd_device_register(&nor->mtd, data ? data->parts : NULL, 3363 data ? data->nr_parts : 0); 3364 } 3365 3366 static int spi_nor_remove(struct spi_mem *spimem) 3367 { 3368 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3369 3370 spi_nor_restore(nor); 3371 3372 /* Clean up MTD stuff. */ 3373 return mtd_device_unregister(&nor->mtd); 3374 } 3375 3376 static void spi_nor_shutdown(struct spi_mem *spimem) 3377 { 3378 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3379 3380 spi_nor_restore(nor); 3381 } 3382 3383 /* 3384 * Do NOT add to this array without reading the following: 3385 * 3386 * Historically, many flash devices are bound to this driver by their name. But 3387 * since most of these flash are compatible to some extent, and their 3388 * differences can often be differentiated by the JEDEC read-ID command, we 3389 * encourage new users to add support to the spi-nor library, and simply bind 3390 * against a generic string here (e.g., "jedec,spi-nor"). 3391 * 3392 * Many flash names are kept here in this list (as well as in spi-nor.c) to 3393 * keep them available as module aliases for existing platforms. 3394 */ 3395 static const struct spi_device_id spi_nor_dev_ids[] = { 3396 /* 3397 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and 3398 * hack around the fact that the SPI core does not provide uevent 3399 * matching for .of_match_table 3400 */ 3401 {"spi-nor"}, 3402 3403 /* 3404 * Entries not used in DTs that should be safe to drop after replacing 3405 * them with "spi-nor" in platform data. 3406 */ 3407 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, 3408 3409 /* 3410 * Entries that were used in DTs without "jedec,spi-nor" fallback and 3411 * should be kept for backward compatibility. 3412 */ 3413 {"at25df321a"}, {"at25df641"}, {"at26df081a"}, 3414 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, 3415 {"mx25l25635e"},{"mx66l51235l"}, 3416 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, 3417 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, 3418 {"s25fl064k"}, 3419 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, 3420 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, 3421 {"m25p64"}, {"m25p128"}, 3422 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, 3423 {"w25q80bl"}, {"w25q128"}, {"w25q256"}, 3424 3425 /* Flashes that can't be detected using JEDEC */ 3426 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, 3427 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, 3428 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, 3429 3430 /* Everspin MRAMs (non-JEDEC) */ 3431 { "mr25h128" }, /* 128 Kib, 40 MHz */ 3432 { "mr25h256" }, /* 256 Kib, 40 MHz */ 3433 { "mr25h10" }, /* 1 Mib, 40 MHz */ 3434 { "mr25h40" }, /* 4 Mib, 40 MHz */ 3435 3436 { }, 3437 }; 3438 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); 3439 3440 static const struct of_device_id spi_nor_of_table[] = { 3441 /* 3442 * Generic compatibility for SPI NOR that can be identified by the 3443 * JEDEC READ ID opcode (0x9F). Use this, if possible. 3444 */ 3445 { .compatible = "jedec,spi-nor" }, 3446 { /* sentinel */ }, 3447 }; 3448 MODULE_DEVICE_TABLE(of, spi_nor_of_table); 3449 3450 /* 3451 * REVISIT: many of these chips have deep power-down modes, which 3452 * should clearly be entered on suspend() to minimize power use. 3453 * And also when they're otherwise idle... 3454 */ 3455 static struct spi_mem_driver spi_nor_driver = { 3456 .spidrv = { 3457 .driver = { 3458 .name = "spi-nor", 3459 .of_match_table = spi_nor_of_table, 3460 .dev_groups = spi_nor_sysfs_groups, 3461 }, 3462 .id_table = spi_nor_dev_ids, 3463 }, 3464 .probe = spi_nor_probe, 3465 .remove = spi_nor_remove, 3466 .shutdown = spi_nor_shutdown, 3467 }; 3468 module_spi_mem_driver(spi_nor_driver); 3469 3470 MODULE_LICENSE("GPL v2"); 3471 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); 3472 MODULE_AUTHOR("Mike Lavender"); 3473 MODULE_DESCRIPTION("framework for SPI NOR"); 3474