1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with 4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c 5 * 6 * Copyright (C) 2005, Intec Automation Inc. 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 */ 9 10 #include <linux/err.h> 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/device.h> 14 #include <linux/mutex.h> 15 #include <linux/math64.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 19 #include <linux/mtd/mtd.h> 20 #include <linux/of_platform.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/spi/flash.h> 23 #include <linux/mtd/spi-nor.h> 24 25 #include "core.h" 26 27 /* Define max times to check status register before we give up. */ 28 29 /* 30 * For everything but full-chip erase; probably could be much smaller, but kept 31 * around for safety for now 32 */ 33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) 34 35 /* 36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up 37 * for larger flash 38 */ 39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) 40 41 #define SPI_NOR_MAX_ADDR_WIDTH 4 42 43 #define SPI_NOR_SRST_SLEEP_MIN 200 44 #define SPI_NOR_SRST_SLEEP_MAX 400 45 46 /** 47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the 48 * extension type. 49 * @nor: pointer to a 'struct spi_nor' 50 * @op: pointer to the 'struct spi_mem_op' whose properties 51 * need to be initialized. 52 * 53 * Right now, only "repeat" and "invert" are supported. 54 * 55 * Return: The opcode extension. 56 */ 57 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, 58 const struct spi_mem_op *op) 59 { 60 switch (nor->cmd_ext_type) { 61 case SPI_NOR_EXT_INVERT: 62 return ~op->cmd.opcode; 63 64 case SPI_NOR_EXT_REPEAT: 65 return op->cmd.opcode; 66 67 default: 68 dev_err(nor->dev, "Unknown command extension type\n"); 69 return 0; 70 } 71 } 72 73 /** 74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. 75 * @nor: pointer to a 'struct spi_nor' 76 * @op: pointer to the 'struct spi_mem_op' whose properties 77 * need to be initialized. 78 * @proto: the protocol from which the properties need to be set. 79 */ 80 void spi_nor_spimem_setup_op(const struct spi_nor *nor, 81 struct spi_mem_op *op, 82 const enum spi_nor_protocol proto) 83 { 84 u8 ext; 85 86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); 87 88 if (op->addr.nbytes) 89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); 90 91 if (op->dummy.nbytes) 92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); 93 94 if (op->data.nbytes) 95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); 96 97 if (spi_nor_protocol_is_dtr(proto)) { 98 /* 99 * SPIMEM supports mixed DTR modes, but right now we can only 100 * have all phases either DTR or STR. IOW, SPIMEM can have 101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 102 * phases to either DTR or STR. 103 */ 104 op->cmd.dtr = true; 105 op->addr.dtr = true; 106 op->dummy.dtr = true; 107 op->data.dtr = true; 108 109 /* 2 bytes per clock cycle in DTR mode. */ 110 op->dummy.nbytes *= 2; 111 112 ext = spi_nor_get_cmd_ext(nor, op); 113 op->cmd.opcode = (op->cmd.opcode << 8) | ext; 114 op->cmd.nbytes = 2; 115 } 116 } 117 118 /** 119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data 120 * transfer 121 * @nor: pointer to 'struct spi_nor' 122 * @op: pointer to 'struct spi_mem_op' template for transfer 123 * 124 * If we have to use the bounce buffer, the data field in @op will be updated. 125 * 126 * Return: true if the bounce buffer is needed, false if not 127 */ 128 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) 129 { 130 /* op->data.buf.in occupies the same memory as op->data.buf.out */ 131 if (object_is_on_stack(op->data.buf.in) || 132 !virt_addr_valid(op->data.buf.in)) { 133 if (op->data.nbytes > nor->bouncebuf_size) 134 op->data.nbytes = nor->bouncebuf_size; 135 op->data.buf.in = nor->bouncebuf; 136 return true; 137 } 138 139 return false; 140 } 141 142 /** 143 * spi_nor_spimem_exec_op() - execute a memory operation 144 * @nor: pointer to 'struct spi_nor' 145 * @op: pointer to 'struct spi_mem_op' template for transfer 146 * 147 * Return: 0 on success, -error otherwise. 148 */ 149 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) 150 { 151 int error; 152 153 error = spi_mem_adjust_op_size(nor->spimem, op); 154 if (error) 155 return error; 156 157 return spi_mem_exec_op(nor->spimem, op); 158 } 159 160 static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, 161 u8 *buf, size_t len) 162 { 163 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 164 return -EOPNOTSUPP; 165 166 return nor->controller_ops->read_reg(nor, opcode, buf, len); 167 } 168 169 static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, 170 const u8 *buf, size_t len) 171 { 172 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 173 return -EOPNOTSUPP; 174 175 return nor->controller_ops->write_reg(nor, opcode, buf, len); 176 } 177 178 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) 179 { 180 if (spi_nor_protocol_is_dtr(nor->write_proto)) 181 return -EOPNOTSUPP; 182 183 return nor->controller_ops->erase(nor, offs); 184 } 185 186 /** 187 * spi_nor_spimem_read_data() - read data from flash's memory region via 188 * spi-mem 189 * @nor: pointer to 'struct spi_nor' 190 * @from: offset to read from 191 * @len: number of bytes to read 192 * @buf: pointer to dst buffer 193 * 194 * Return: number of bytes read successfully, -errno otherwise 195 */ 196 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, 197 size_t len, u8 *buf) 198 { 199 struct spi_mem_op op = 200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 201 SPI_MEM_OP_ADDR(nor->addr_width, from, 0), 202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 203 SPI_MEM_OP_DATA_IN(len, buf, 0)); 204 bool usebouncebuf; 205 ssize_t nbytes; 206 int error; 207 208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto); 209 210 /* convert the dummy cycles to the number of bytes */ 211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 212 if (spi_nor_protocol_is_dtr(nor->read_proto)) 213 op.dummy.nbytes *= 2; 214 215 usebouncebuf = spi_nor_spimem_bounce(nor, &op); 216 217 if (nor->dirmap.rdesc) { 218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, 219 op.data.nbytes, op.data.buf.in); 220 } else { 221 error = spi_nor_spimem_exec_op(nor, &op); 222 if (error) 223 return error; 224 nbytes = op.data.nbytes; 225 } 226 227 if (usebouncebuf && nbytes > 0) 228 memcpy(buf, op.data.buf.in, nbytes); 229 230 return nbytes; 231 } 232 233 /** 234 * spi_nor_read_data() - read data from flash memory 235 * @nor: pointer to 'struct spi_nor' 236 * @from: offset to read from 237 * @len: number of bytes to read 238 * @buf: pointer to dst buffer 239 * 240 * Return: number of bytes read successfully, -errno otherwise 241 */ 242 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) 243 { 244 if (nor->spimem) 245 return spi_nor_spimem_read_data(nor, from, len, buf); 246 247 return nor->controller_ops->read(nor, from, len, buf); 248 } 249 250 /** 251 * spi_nor_spimem_write_data() - write data to flash memory via 252 * spi-mem 253 * @nor: pointer to 'struct spi_nor' 254 * @to: offset to write to 255 * @len: number of bytes to write 256 * @buf: pointer to src buffer 257 * 258 * Return: number of bytes written successfully, -errno otherwise 259 */ 260 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, 261 size_t len, const u8 *buf) 262 { 263 struct spi_mem_op op = 264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 265 SPI_MEM_OP_ADDR(nor->addr_width, to, 0), 266 SPI_MEM_OP_NO_DUMMY, 267 SPI_MEM_OP_DATA_OUT(len, buf, 0)); 268 ssize_t nbytes; 269 int error; 270 271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 272 op.addr.nbytes = 0; 273 274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 275 276 if (spi_nor_spimem_bounce(nor, &op)) 277 memcpy(nor->bouncebuf, buf, op.data.nbytes); 278 279 if (nor->dirmap.wdesc) { 280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, 281 op.data.nbytes, op.data.buf.out); 282 } else { 283 error = spi_nor_spimem_exec_op(nor, &op); 284 if (error) 285 return error; 286 nbytes = op.data.nbytes; 287 } 288 289 return nbytes; 290 } 291 292 /** 293 * spi_nor_write_data() - write data to flash memory 294 * @nor: pointer to 'struct spi_nor' 295 * @to: offset to write to 296 * @len: number of bytes to write 297 * @buf: pointer to src buffer 298 * 299 * Return: number of bytes written successfully, -errno otherwise 300 */ 301 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, 302 const u8 *buf) 303 { 304 if (nor->spimem) 305 return spi_nor_spimem_write_data(nor, to, len, buf); 306 307 return nor->controller_ops->write(nor, to, len, buf); 308 } 309 310 /** 311 * spi_nor_write_enable() - Set write enable latch with Write Enable command. 312 * @nor: pointer to 'struct spi_nor'. 313 * 314 * Return: 0 on success, -errno otherwise. 315 */ 316 int spi_nor_write_enable(struct spi_nor *nor) 317 { 318 int ret; 319 320 if (nor->spimem) { 321 struct spi_mem_op op = 322 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0), 323 SPI_MEM_OP_NO_ADDR, 324 SPI_MEM_OP_NO_DUMMY, 325 SPI_MEM_OP_NO_DATA); 326 327 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 328 329 ret = spi_mem_exec_op(nor->spimem, &op); 330 } else { 331 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, 332 NULL, 0); 333 } 334 335 if (ret) 336 dev_dbg(nor->dev, "error %d on Write Enable\n", ret); 337 338 return ret; 339 } 340 341 /** 342 * spi_nor_write_disable() - Send Write Disable instruction to the chip. 343 * @nor: pointer to 'struct spi_nor'. 344 * 345 * Return: 0 on success, -errno otherwise. 346 */ 347 int spi_nor_write_disable(struct spi_nor *nor) 348 { 349 int ret; 350 351 if (nor->spimem) { 352 struct spi_mem_op op = 353 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0), 354 SPI_MEM_OP_NO_ADDR, 355 SPI_MEM_OP_NO_DUMMY, 356 SPI_MEM_OP_NO_DATA); 357 358 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 359 360 ret = spi_mem_exec_op(nor->spimem, &op); 361 } else { 362 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, 363 NULL, 0); 364 } 365 366 if (ret) 367 dev_dbg(nor->dev, "error %d on Write Disable\n", ret); 368 369 return ret; 370 } 371 372 /** 373 * spi_nor_read_sr() - Read the Status Register. 374 * @nor: pointer to 'struct spi_nor'. 375 * @sr: pointer to a DMA-able buffer where the value of the 376 * Status Register will be written. Should be at least 2 bytes. 377 * 378 * Return: 0 on success, -errno otherwise. 379 */ 380 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) 381 { 382 int ret; 383 384 if (nor->spimem) { 385 struct spi_mem_op op = 386 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0), 387 SPI_MEM_OP_NO_ADDR, 388 SPI_MEM_OP_NO_DUMMY, 389 SPI_MEM_OP_DATA_IN(1, sr, 0)); 390 391 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 392 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 393 op.dummy.nbytes = nor->params->rdsr_dummy; 394 /* 395 * We don't want to read only one byte in DTR mode. So, 396 * read 2 and then discard the second byte. 397 */ 398 op.data.nbytes = 2; 399 } 400 401 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 402 403 ret = spi_mem_exec_op(nor->spimem, &op); 404 } else { 405 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, 406 1); 407 } 408 409 if (ret) 410 dev_dbg(nor->dev, "error %d reading SR\n", ret); 411 412 return ret; 413 } 414 415 /** 416 * spi_nor_read_fsr() - Read the Flag Status Register. 417 * @nor: pointer to 'struct spi_nor' 418 * @fsr: pointer to a DMA-able buffer where the value of the 419 * Flag Status Register will be written. Should be at least 2 420 * bytes. 421 * 422 * Return: 0 on success, -errno otherwise. 423 */ 424 static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) 425 { 426 int ret; 427 428 if (nor->spimem) { 429 struct spi_mem_op op = 430 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), 431 SPI_MEM_OP_NO_ADDR, 432 SPI_MEM_OP_NO_DUMMY, 433 SPI_MEM_OP_DATA_IN(1, fsr, 0)); 434 435 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 436 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 437 op.dummy.nbytes = nor->params->rdsr_dummy; 438 /* 439 * We don't want to read only one byte in DTR mode. So, 440 * read 2 and then discard the second byte. 441 */ 442 op.data.nbytes = 2; 443 } 444 445 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 446 447 ret = spi_mem_exec_op(nor->spimem, &op); 448 } else { 449 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, 450 1); 451 } 452 453 if (ret) 454 dev_dbg(nor->dev, "error %d reading FSR\n", ret); 455 456 return ret; 457 } 458 459 /** 460 * spi_nor_read_cr() - Read the Configuration Register using the 461 * SPINOR_OP_RDCR (35h) command. 462 * @nor: pointer to 'struct spi_nor' 463 * @cr: pointer to a DMA-able buffer where the value of the 464 * Configuration Register will be written. 465 * 466 * Return: 0 on success, -errno otherwise. 467 */ 468 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) 469 { 470 int ret; 471 472 if (nor->spimem) { 473 struct spi_mem_op op = 474 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0), 475 SPI_MEM_OP_NO_ADDR, 476 SPI_MEM_OP_NO_DUMMY, 477 SPI_MEM_OP_DATA_IN(1, cr, 0)); 478 479 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 480 481 ret = spi_mem_exec_op(nor->spimem, &op); 482 } else { 483 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, 484 1); 485 } 486 487 if (ret) 488 dev_dbg(nor->dev, "error %d reading CR\n", ret); 489 490 return ret; 491 } 492 493 /** 494 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode. 495 * @nor: pointer to 'struct spi_nor'. 496 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 497 * address mode. 498 * 499 * Return: 0 on success, -errno otherwise. 500 */ 501 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 502 { 503 int ret; 504 505 if (nor->spimem) { 506 struct spi_mem_op op = 507 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? 508 SPINOR_OP_EN4B : 509 SPINOR_OP_EX4B, 510 0), 511 SPI_MEM_OP_NO_ADDR, 512 SPI_MEM_OP_NO_DUMMY, 513 SPI_MEM_OP_NO_DATA); 514 515 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 516 517 ret = spi_mem_exec_op(nor->spimem, &op); 518 } else { 519 ret = spi_nor_controller_ops_write_reg(nor, 520 enable ? SPINOR_OP_EN4B : 521 SPINOR_OP_EX4B, 522 NULL, 0); 523 } 524 525 if (ret) 526 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 527 528 return ret; 529 } 530 531 /** 532 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion 533 * flashes. 534 * @nor: pointer to 'struct spi_nor'. 535 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 536 * address mode. 537 * 538 * Return: 0 on success, -errno otherwise. 539 */ 540 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 541 { 542 int ret; 543 544 nor->bouncebuf[0] = enable << 7; 545 546 if (nor->spimem) { 547 struct spi_mem_op op = 548 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0), 549 SPI_MEM_OP_NO_ADDR, 550 SPI_MEM_OP_NO_DUMMY, 551 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); 552 553 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 554 555 ret = spi_mem_exec_op(nor->spimem, &op); 556 } else { 557 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, 558 nor->bouncebuf, 1); 559 } 560 561 if (ret) 562 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 563 564 return ret; 565 } 566 567 /** 568 * spi_nor_write_ear() - Write Extended Address Register. 569 * @nor: pointer to 'struct spi_nor'. 570 * @ear: value to write to the Extended Address Register. 571 * 572 * Return: 0 on success, -errno otherwise. 573 */ 574 int spi_nor_write_ear(struct spi_nor *nor, u8 ear) 575 { 576 int ret; 577 578 nor->bouncebuf[0] = ear; 579 580 if (nor->spimem) { 581 struct spi_mem_op op = 582 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0), 583 SPI_MEM_OP_NO_ADDR, 584 SPI_MEM_OP_NO_DUMMY, 585 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); 586 587 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 588 589 ret = spi_mem_exec_op(nor->spimem, &op); 590 } else { 591 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR, 592 nor->bouncebuf, 1); 593 } 594 595 if (ret) 596 dev_dbg(nor->dev, "error %d writing EAR\n", ret); 597 598 return ret; 599 } 600 601 /** 602 * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. 603 * @nor: pointer to 'struct spi_nor'. 604 * @sr: pointer to a DMA-able buffer where the value of the 605 * Status Register will be written. 606 * 607 * Return: 0 on success, -errno otherwise. 608 */ 609 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) 610 { 611 int ret; 612 613 if (nor->spimem) { 614 struct spi_mem_op op = 615 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0), 616 SPI_MEM_OP_NO_ADDR, 617 SPI_MEM_OP_NO_DUMMY, 618 SPI_MEM_OP_DATA_IN(1, sr, 0)); 619 620 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 621 622 ret = spi_mem_exec_op(nor->spimem, &op); 623 } else { 624 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr, 625 1); 626 } 627 628 if (ret) 629 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); 630 631 return ret; 632 } 633 634 /** 635 * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if 636 * the flash is ready for new commands. 637 * @nor: pointer to 'struct spi_nor'. 638 * 639 * Return: 1 if ready, 0 if not ready, -errno on errors. 640 */ 641 static int spi_nor_xsr_ready(struct spi_nor *nor) 642 { 643 int ret; 644 645 ret = spi_nor_xread_sr(nor, nor->bouncebuf); 646 if (ret) 647 return ret; 648 649 return !!(nor->bouncebuf[0] & XSR_RDY); 650 } 651 652 /** 653 * spi_nor_clear_sr() - Clear the Status Register. 654 * @nor: pointer to 'struct spi_nor'. 655 */ 656 static void spi_nor_clear_sr(struct spi_nor *nor) 657 { 658 int ret; 659 660 if (nor->spimem) { 661 struct spi_mem_op op = 662 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), 663 SPI_MEM_OP_NO_ADDR, 664 SPI_MEM_OP_NO_DUMMY, 665 SPI_MEM_OP_NO_DATA); 666 667 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 668 669 ret = spi_mem_exec_op(nor->spimem, &op); 670 } else { 671 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, 672 NULL, 0); 673 } 674 675 if (ret) 676 dev_dbg(nor->dev, "error %d clearing SR\n", ret); 677 } 678 679 /** 680 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready 681 * for new commands. 682 * @nor: pointer to 'struct spi_nor'. 683 * 684 * Return: 1 if ready, 0 if not ready, -errno on errors. 685 */ 686 static int spi_nor_sr_ready(struct spi_nor *nor) 687 { 688 int ret = spi_nor_read_sr(nor, nor->bouncebuf); 689 690 if (ret) 691 return ret; 692 693 if (nor->flags & SNOR_F_USE_CLSR && 694 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { 695 if (nor->bouncebuf[0] & SR_E_ERR) 696 dev_err(nor->dev, "Erase Error occurred\n"); 697 else 698 dev_err(nor->dev, "Programming Error occurred\n"); 699 700 spi_nor_clear_sr(nor); 701 702 /* 703 * WEL bit remains set to one when an erase or page program 704 * error occurs. Issue a Write Disable command to protect 705 * against inadvertent writes that can possibly corrupt the 706 * contents of the memory. 707 */ 708 ret = spi_nor_write_disable(nor); 709 if (ret) 710 return ret; 711 712 return -EIO; 713 } 714 715 return !(nor->bouncebuf[0] & SR_WIP); 716 } 717 718 /** 719 * spi_nor_clear_fsr() - Clear the Flag Status Register. 720 * @nor: pointer to 'struct spi_nor'. 721 */ 722 static void spi_nor_clear_fsr(struct spi_nor *nor) 723 { 724 int ret; 725 726 if (nor->spimem) { 727 struct spi_mem_op op = 728 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), 729 SPI_MEM_OP_NO_ADDR, 730 SPI_MEM_OP_NO_DUMMY, 731 SPI_MEM_OP_NO_DATA); 732 733 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 734 735 ret = spi_mem_exec_op(nor->spimem, &op); 736 } else { 737 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, 738 NULL, 0); 739 } 740 741 if (ret) 742 dev_dbg(nor->dev, "error %d clearing FSR\n", ret); 743 } 744 745 /** 746 * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is 747 * ready for new commands. 748 * @nor: pointer to 'struct spi_nor'. 749 * 750 * Return: 1 if ready, 0 if not ready, -errno on errors. 751 */ 752 static int spi_nor_fsr_ready(struct spi_nor *nor) 753 { 754 int ret = spi_nor_read_fsr(nor, nor->bouncebuf); 755 756 if (ret) 757 return ret; 758 759 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { 760 if (nor->bouncebuf[0] & FSR_E_ERR) 761 dev_err(nor->dev, "Erase operation failed.\n"); 762 else 763 dev_err(nor->dev, "Program operation failed.\n"); 764 765 if (nor->bouncebuf[0] & FSR_PT_ERR) 766 dev_err(nor->dev, 767 "Attempted to modify a protected sector.\n"); 768 769 spi_nor_clear_fsr(nor); 770 771 /* 772 * WEL bit remains set to one when an erase or page program 773 * error occurs. Issue a Write Disable command to protect 774 * against inadvertent writes that can possibly corrupt the 775 * contents of the memory. 776 */ 777 ret = spi_nor_write_disable(nor); 778 if (ret) 779 return ret; 780 781 return -EIO; 782 } 783 784 return !!(nor->bouncebuf[0] & FSR_READY); 785 } 786 787 /** 788 * spi_nor_ready() - Query the flash to see if it is ready for new commands. 789 * @nor: pointer to 'struct spi_nor'. 790 * 791 * Return: 1 if ready, 0 if not ready, -errno on errors. 792 */ 793 static int spi_nor_ready(struct spi_nor *nor) 794 { 795 int sr, fsr; 796 797 if (nor->flags & SNOR_F_READY_XSR_RDY) 798 sr = spi_nor_xsr_ready(nor); 799 else 800 sr = spi_nor_sr_ready(nor); 801 if (sr < 0) 802 return sr; 803 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; 804 if (fsr < 0) 805 return fsr; 806 return sr && fsr; 807 } 808 809 /** 810 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the 811 * Status Register until ready, or timeout occurs. 812 * @nor: pointer to "struct spi_nor". 813 * @timeout_jiffies: jiffies to wait until timeout. 814 * 815 * Return: 0 on success, -errno otherwise. 816 */ 817 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, 818 unsigned long timeout_jiffies) 819 { 820 unsigned long deadline; 821 int timeout = 0, ret; 822 823 deadline = jiffies + timeout_jiffies; 824 825 while (!timeout) { 826 if (time_after_eq(jiffies, deadline)) 827 timeout = 1; 828 829 ret = spi_nor_ready(nor); 830 if (ret < 0) 831 return ret; 832 if (ret) 833 return 0; 834 835 cond_resched(); 836 } 837 838 dev_dbg(nor->dev, "flash operation timed out\n"); 839 840 return -ETIMEDOUT; 841 } 842 843 /** 844 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the 845 * flash to be ready, or timeout occurs. 846 * @nor: pointer to "struct spi_nor". 847 * 848 * Return: 0 on success, -errno otherwise. 849 */ 850 int spi_nor_wait_till_ready(struct spi_nor *nor) 851 { 852 return spi_nor_wait_till_ready_with_timeout(nor, 853 DEFAULT_READY_WAIT_JIFFIES); 854 } 855 856 /** 857 * spi_nor_global_block_unlock() - Unlock Global Block Protection. 858 * @nor: pointer to 'struct spi_nor'. 859 * 860 * Return: 0 on success, -errno otherwise. 861 */ 862 int spi_nor_global_block_unlock(struct spi_nor *nor) 863 { 864 int ret; 865 866 ret = spi_nor_write_enable(nor); 867 if (ret) 868 return ret; 869 870 if (nor->spimem) { 871 struct spi_mem_op op = 872 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0), 873 SPI_MEM_OP_NO_ADDR, 874 SPI_MEM_OP_NO_DUMMY, 875 SPI_MEM_OP_NO_DATA); 876 877 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 878 879 ret = spi_mem_exec_op(nor->spimem, &op); 880 } else { 881 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK, 882 NULL, 0); 883 } 884 885 if (ret) { 886 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret); 887 return ret; 888 } 889 890 return spi_nor_wait_till_ready(nor); 891 } 892 893 /** 894 * spi_nor_write_sr() - Write the Status Register. 895 * @nor: pointer to 'struct spi_nor'. 896 * @sr: pointer to DMA-able buffer to write to the Status Register. 897 * @len: number of bytes to write to the Status Register. 898 * 899 * Return: 0 on success, -errno otherwise. 900 */ 901 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) 902 { 903 int ret; 904 905 ret = spi_nor_write_enable(nor); 906 if (ret) 907 return ret; 908 909 if (nor->spimem) { 910 struct spi_mem_op op = 911 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0), 912 SPI_MEM_OP_NO_ADDR, 913 SPI_MEM_OP_NO_DUMMY, 914 SPI_MEM_OP_DATA_OUT(len, sr, 0)); 915 916 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 917 918 ret = spi_mem_exec_op(nor->spimem, &op); 919 } else { 920 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, 921 len); 922 } 923 924 if (ret) { 925 dev_dbg(nor->dev, "error %d writing SR\n", ret); 926 return ret; 927 } 928 929 return spi_nor_wait_till_ready(nor); 930 } 931 932 /** 933 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and 934 * ensure that the byte written match the received value. 935 * @nor: pointer to a 'struct spi_nor'. 936 * @sr1: byte value to be written to the Status Register. 937 * 938 * Return: 0 on success, -errno otherwise. 939 */ 940 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) 941 { 942 int ret; 943 944 nor->bouncebuf[0] = sr1; 945 946 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); 947 if (ret) 948 return ret; 949 950 ret = spi_nor_read_sr(nor, nor->bouncebuf); 951 if (ret) 952 return ret; 953 954 if (nor->bouncebuf[0] != sr1) { 955 dev_dbg(nor->dev, "SR1: read back test failed\n"); 956 return -EIO; 957 } 958 959 return 0; 960 } 961 962 /** 963 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the 964 * Status Register 2 in one shot. Ensure that the byte written in the Status 965 * Register 1 match the received value, and that the 16-bit Write did not 966 * affect what was already in the Status Register 2. 967 * @nor: pointer to a 'struct spi_nor'. 968 * @sr1: byte value to be written to the Status Register 1. 969 * 970 * Return: 0 on success, -errno otherwise. 971 */ 972 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) 973 { 974 int ret; 975 u8 *sr_cr = nor->bouncebuf; 976 u8 cr_written; 977 978 /* Make sure we don't overwrite the contents of Status Register 2. */ 979 if (!(nor->flags & SNOR_F_NO_READ_CR)) { 980 ret = spi_nor_read_cr(nor, &sr_cr[1]); 981 if (ret) 982 return ret; 983 } else if (nor->params->quad_enable) { 984 /* 985 * If the Status Register 2 Read command (35h) is not 986 * supported, we should at least be sure we don't 987 * change the value of the SR2 Quad Enable bit. 988 * 989 * We can safely assume that when the Quad Enable method is 990 * set, the value of the QE bit is one, as a consequence of the 991 * nor->params->quad_enable() call. 992 * 993 * We can safely assume that the Quad Enable bit is present in 994 * the Status Register 2 at BIT(1). According to the JESD216 995 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit 996 * Write Status (01h) command is available just for the cases 997 * in which the QE bit is described in SR2 at BIT(1). 998 */ 999 sr_cr[1] = SR2_QUAD_EN_BIT1; 1000 } else { 1001 sr_cr[1] = 0; 1002 } 1003 1004 sr_cr[0] = sr1; 1005 1006 ret = spi_nor_write_sr(nor, sr_cr, 2); 1007 if (ret) 1008 return ret; 1009 1010 if (nor->flags & SNOR_F_NO_READ_CR) 1011 return 0; 1012 1013 cr_written = sr_cr[1]; 1014 1015 ret = spi_nor_read_cr(nor, &sr_cr[1]); 1016 if (ret) 1017 return ret; 1018 1019 if (cr_written != sr_cr[1]) { 1020 dev_dbg(nor->dev, "CR: read back test failed\n"); 1021 return -EIO; 1022 } 1023 1024 return 0; 1025 } 1026 1027 /** 1028 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the 1029 * Configuration Register in one shot. Ensure that the byte written in the 1030 * Configuration Register match the received value, and that the 16-bit Write 1031 * did not affect what was already in the Status Register 1. 1032 * @nor: pointer to a 'struct spi_nor'. 1033 * @cr: byte value to be written to the Configuration Register. 1034 * 1035 * Return: 0 on success, -errno otherwise. 1036 */ 1037 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) 1038 { 1039 int ret; 1040 u8 *sr_cr = nor->bouncebuf; 1041 u8 sr_written; 1042 1043 /* Keep the current value of the Status Register 1. */ 1044 ret = spi_nor_read_sr(nor, sr_cr); 1045 if (ret) 1046 return ret; 1047 1048 sr_cr[1] = cr; 1049 1050 ret = spi_nor_write_sr(nor, sr_cr, 2); 1051 if (ret) 1052 return ret; 1053 1054 sr_written = sr_cr[0]; 1055 1056 ret = spi_nor_read_sr(nor, sr_cr); 1057 if (ret) 1058 return ret; 1059 1060 if (sr_written != sr_cr[0]) { 1061 dev_dbg(nor->dev, "SR: Read back test failed\n"); 1062 return -EIO; 1063 } 1064 1065 if (nor->flags & SNOR_F_NO_READ_CR) 1066 return 0; 1067 1068 ret = spi_nor_read_cr(nor, &sr_cr[1]); 1069 if (ret) 1070 return ret; 1071 1072 if (cr != sr_cr[1]) { 1073 dev_dbg(nor->dev, "CR: read back test failed\n"); 1074 return -EIO; 1075 } 1076 1077 return 0; 1078 } 1079 1080 /** 1081 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that 1082 * the byte written match the received value without affecting other bits in the 1083 * Status Register 1 and 2. 1084 * @nor: pointer to a 'struct spi_nor'. 1085 * @sr1: byte value to be written to the Status Register. 1086 * 1087 * Return: 0 on success, -errno otherwise. 1088 */ 1089 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) 1090 { 1091 if (nor->flags & SNOR_F_HAS_16BIT_SR) 1092 return spi_nor_write_16bit_sr_and_check(nor, sr1); 1093 1094 return spi_nor_write_sr1_and_check(nor, sr1); 1095 } 1096 1097 /** 1098 * spi_nor_write_sr2() - Write the Status Register 2 using the 1099 * SPINOR_OP_WRSR2 (3eh) command. 1100 * @nor: pointer to 'struct spi_nor'. 1101 * @sr2: pointer to DMA-able buffer to write to the Status Register 2. 1102 * 1103 * Return: 0 on success, -errno otherwise. 1104 */ 1105 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) 1106 { 1107 int ret; 1108 1109 ret = spi_nor_write_enable(nor); 1110 if (ret) 1111 return ret; 1112 1113 if (nor->spimem) { 1114 struct spi_mem_op op = 1115 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0), 1116 SPI_MEM_OP_NO_ADDR, 1117 SPI_MEM_OP_NO_DUMMY, 1118 SPI_MEM_OP_DATA_OUT(1, sr2, 0)); 1119 1120 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1121 1122 ret = spi_mem_exec_op(nor->spimem, &op); 1123 } else { 1124 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, 1125 sr2, 1); 1126 } 1127 1128 if (ret) { 1129 dev_dbg(nor->dev, "error %d writing SR2\n", ret); 1130 return ret; 1131 } 1132 1133 return spi_nor_wait_till_ready(nor); 1134 } 1135 1136 /** 1137 * spi_nor_read_sr2() - Read the Status Register 2 using the 1138 * SPINOR_OP_RDSR2 (3fh) command. 1139 * @nor: pointer to 'struct spi_nor'. 1140 * @sr2: pointer to DMA-able buffer where the value of the 1141 * Status Register 2 will be written. 1142 * 1143 * Return: 0 on success, -errno otherwise. 1144 */ 1145 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) 1146 { 1147 int ret; 1148 1149 if (nor->spimem) { 1150 struct spi_mem_op op = 1151 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0), 1152 SPI_MEM_OP_NO_ADDR, 1153 SPI_MEM_OP_NO_DUMMY, 1154 SPI_MEM_OP_DATA_IN(1, sr2, 0)); 1155 1156 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1157 1158 ret = spi_mem_exec_op(nor->spimem, &op); 1159 } else { 1160 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, 1161 1); 1162 } 1163 1164 if (ret) 1165 dev_dbg(nor->dev, "error %d reading SR2\n", ret); 1166 1167 return ret; 1168 } 1169 1170 /** 1171 * spi_nor_erase_chip() - Erase the entire flash memory. 1172 * @nor: pointer to 'struct spi_nor'. 1173 * 1174 * Return: 0 on success, -errno otherwise. 1175 */ 1176 static int spi_nor_erase_chip(struct spi_nor *nor) 1177 { 1178 int ret; 1179 1180 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); 1181 1182 if (nor->spimem) { 1183 struct spi_mem_op op = 1184 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), 1185 SPI_MEM_OP_NO_ADDR, 1186 SPI_MEM_OP_NO_DUMMY, 1187 SPI_MEM_OP_NO_DATA); 1188 1189 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 1190 1191 ret = spi_mem_exec_op(nor->spimem, &op); 1192 } else { 1193 ret = spi_nor_controller_ops_write_reg(nor, 1194 SPINOR_OP_CHIP_ERASE, 1195 NULL, 0); 1196 } 1197 1198 if (ret) 1199 dev_dbg(nor->dev, "error %d erasing chip\n", ret); 1200 1201 return ret; 1202 } 1203 1204 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) 1205 { 1206 size_t i; 1207 1208 for (i = 0; i < size; i++) 1209 if (table[i][0] == opcode) 1210 return table[i][1]; 1211 1212 /* No conversion found, keep input op code. */ 1213 return opcode; 1214 } 1215 1216 u8 spi_nor_convert_3to4_read(u8 opcode) 1217 { 1218 static const u8 spi_nor_3to4_read[][2] = { 1219 { SPINOR_OP_READ, SPINOR_OP_READ_4B }, 1220 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, 1221 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, 1222 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, 1223 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, 1224 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, 1225 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, 1226 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, 1227 1228 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, 1229 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, 1230 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, 1231 }; 1232 1233 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, 1234 ARRAY_SIZE(spi_nor_3to4_read)); 1235 } 1236 1237 static u8 spi_nor_convert_3to4_program(u8 opcode) 1238 { 1239 static const u8 spi_nor_3to4_program[][2] = { 1240 { SPINOR_OP_PP, SPINOR_OP_PP_4B }, 1241 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, 1242 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, 1243 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, 1244 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, 1245 }; 1246 1247 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, 1248 ARRAY_SIZE(spi_nor_3to4_program)); 1249 } 1250 1251 static u8 spi_nor_convert_3to4_erase(u8 opcode) 1252 { 1253 static const u8 spi_nor_3to4_erase[][2] = { 1254 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, 1255 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, 1256 { SPINOR_OP_SE, SPINOR_OP_SE_4B }, 1257 }; 1258 1259 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, 1260 ARRAY_SIZE(spi_nor_3to4_erase)); 1261 } 1262 1263 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) 1264 { 1265 return !!nor->params->erase_map.uniform_erase_type; 1266 } 1267 1268 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) 1269 { 1270 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); 1271 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); 1272 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); 1273 1274 if (!spi_nor_has_uniform_erase(nor)) { 1275 struct spi_nor_erase_map *map = &nor->params->erase_map; 1276 struct spi_nor_erase_type *erase; 1277 int i; 1278 1279 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1280 erase = &map->erase_type[i]; 1281 erase->opcode = 1282 spi_nor_convert_3to4_erase(erase->opcode); 1283 } 1284 } 1285 } 1286 1287 int spi_nor_lock_and_prep(struct spi_nor *nor) 1288 { 1289 int ret = 0; 1290 1291 mutex_lock(&nor->lock); 1292 1293 if (nor->controller_ops && nor->controller_ops->prepare) { 1294 ret = nor->controller_ops->prepare(nor); 1295 if (ret) { 1296 mutex_unlock(&nor->lock); 1297 return ret; 1298 } 1299 } 1300 return ret; 1301 } 1302 1303 void spi_nor_unlock_and_unprep(struct spi_nor *nor) 1304 { 1305 if (nor->controller_ops && nor->controller_ops->unprepare) 1306 nor->controller_ops->unprepare(nor); 1307 mutex_unlock(&nor->lock); 1308 } 1309 1310 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) 1311 { 1312 if (!nor->params->convert_addr) 1313 return addr; 1314 1315 return nor->params->convert_addr(nor, addr); 1316 } 1317 1318 /* 1319 * Initiate the erasure of a single sector 1320 */ 1321 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) 1322 { 1323 int i; 1324 1325 addr = spi_nor_convert_addr(nor, addr); 1326 1327 if (nor->spimem) { 1328 struct spi_mem_op op = 1329 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0), 1330 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0), 1331 SPI_MEM_OP_NO_DUMMY, 1332 SPI_MEM_OP_NO_DATA); 1333 1334 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 1335 1336 return spi_mem_exec_op(nor->spimem, &op); 1337 } else if (nor->controller_ops->erase) { 1338 return spi_nor_controller_ops_erase(nor, addr); 1339 } 1340 1341 /* 1342 * Default implementation, if driver doesn't have a specialized HW 1343 * control 1344 */ 1345 for (i = nor->addr_width - 1; i >= 0; i--) { 1346 nor->bouncebuf[i] = addr & 0xff; 1347 addr >>= 8; 1348 } 1349 1350 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, 1351 nor->bouncebuf, nor->addr_width); 1352 } 1353 1354 /** 1355 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend 1356 * @erase: pointer to a structure that describes a SPI NOR erase type 1357 * @dividend: dividend value 1358 * @remainder: pointer to u32 remainder (will be updated) 1359 * 1360 * Return: the result of the division 1361 */ 1362 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, 1363 u64 dividend, u32 *remainder) 1364 { 1365 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 1366 *remainder = (u32)dividend & erase->size_mask; 1367 return dividend >> erase->size_shift; 1368 } 1369 1370 /** 1371 * spi_nor_find_best_erase_type() - find the best erase type for the given 1372 * offset in the serial flash memory and the 1373 * number of bytes to erase. The region in 1374 * which the address fits is expected to be 1375 * provided. 1376 * @map: the erase map of the SPI NOR 1377 * @region: pointer to a structure that describes a SPI NOR erase region 1378 * @addr: offset in the serial flash memory 1379 * @len: number of bytes to erase 1380 * 1381 * Return: a pointer to the best fitted erase type, NULL otherwise. 1382 */ 1383 static const struct spi_nor_erase_type * 1384 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, 1385 const struct spi_nor_erase_region *region, 1386 u64 addr, u32 len) 1387 { 1388 const struct spi_nor_erase_type *erase; 1389 u32 rem; 1390 int i; 1391 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 1392 1393 /* 1394 * Erase types are ordered by size, with the smallest erase type at 1395 * index 0. 1396 */ 1397 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 1398 /* Does the erase region support the tested erase type? */ 1399 if (!(erase_mask & BIT(i))) 1400 continue; 1401 1402 erase = &map->erase_type[i]; 1403 1404 /* Alignment is not mandatory for overlaid regions */ 1405 if (region->offset & SNOR_OVERLAID_REGION && 1406 region->size <= len) 1407 return erase; 1408 1409 /* Don't erase more than what the user has asked for. */ 1410 if (erase->size > len) 1411 continue; 1412 1413 spi_nor_div_by_erase_size(erase, addr, &rem); 1414 if (!rem) 1415 return erase; 1416 } 1417 1418 return NULL; 1419 } 1420 1421 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) 1422 { 1423 return region->offset & SNOR_LAST_REGION; 1424 } 1425 1426 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) 1427 { 1428 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; 1429 } 1430 1431 /** 1432 * spi_nor_region_next() - get the next spi nor region 1433 * @region: pointer to a structure that describes a SPI NOR erase region 1434 * 1435 * Return: the next spi nor region or NULL if last region. 1436 */ 1437 struct spi_nor_erase_region * 1438 spi_nor_region_next(struct spi_nor_erase_region *region) 1439 { 1440 if (spi_nor_region_is_last(region)) 1441 return NULL; 1442 region++; 1443 return region; 1444 } 1445 1446 /** 1447 * spi_nor_find_erase_region() - find the region of the serial flash memory in 1448 * which the offset fits 1449 * @map: the erase map of the SPI NOR 1450 * @addr: offset in the serial flash memory 1451 * 1452 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno) 1453 * otherwise. 1454 */ 1455 static struct spi_nor_erase_region * 1456 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) 1457 { 1458 struct spi_nor_erase_region *region = map->regions; 1459 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1460 u64 region_end = region_start + region->size; 1461 1462 while (addr < region_start || addr >= region_end) { 1463 region = spi_nor_region_next(region); 1464 if (!region) 1465 return ERR_PTR(-EINVAL); 1466 1467 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1468 region_end = region_start + region->size; 1469 } 1470 1471 return region; 1472 } 1473 1474 /** 1475 * spi_nor_init_erase_cmd() - initialize an erase command 1476 * @region: pointer to a structure that describes a SPI NOR erase region 1477 * @erase: pointer to a structure that describes a SPI NOR erase type 1478 * 1479 * Return: the pointer to the allocated erase command, ERR_PTR(-errno) 1480 * otherwise. 1481 */ 1482 static struct spi_nor_erase_command * 1483 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, 1484 const struct spi_nor_erase_type *erase) 1485 { 1486 struct spi_nor_erase_command *cmd; 1487 1488 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1489 if (!cmd) 1490 return ERR_PTR(-ENOMEM); 1491 1492 INIT_LIST_HEAD(&cmd->list); 1493 cmd->opcode = erase->opcode; 1494 cmd->count = 1; 1495 1496 if (region->offset & SNOR_OVERLAID_REGION) 1497 cmd->size = region->size; 1498 else 1499 cmd->size = erase->size; 1500 1501 return cmd; 1502 } 1503 1504 /** 1505 * spi_nor_destroy_erase_cmd_list() - destroy erase command list 1506 * @erase_list: list of erase commands 1507 */ 1508 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) 1509 { 1510 struct spi_nor_erase_command *cmd, *next; 1511 1512 list_for_each_entry_safe(cmd, next, erase_list, list) { 1513 list_del(&cmd->list); 1514 kfree(cmd); 1515 } 1516 } 1517 1518 /** 1519 * spi_nor_init_erase_cmd_list() - initialize erase command list 1520 * @nor: pointer to a 'struct spi_nor' 1521 * @erase_list: list of erase commands to be executed once we validate that the 1522 * erase can be performed 1523 * @addr: offset in the serial flash memory 1524 * @len: number of bytes to erase 1525 * 1526 * Builds the list of best fitted erase commands and verifies if the erase can 1527 * be performed. 1528 * 1529 * Return: 0 on success, -errno otherwise. 1530 */ 1531 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, 1532 struct list_head *erase_list, 1533 u64 addr, u32 len) 1534 { 1535 const struct spi_nor_erase_map *map = &nor->params->erase_map; 1536 const struct spi_nor_erase_type *erase, *prev_erase = NULL; 1537 struct spi_nor_erase_region *region; 1538 struct spi_nor_erase_command *cmd = NULL; 1539 u64 region_end; 1540 int ret = -EINVAL; 1541 1542 region = spi_nor_find_erase_region(map, addr); 1543 if (IS_ERR(region)) 1544 return PTR_ERR(region); 1545 1546 region_end = spi_nor_region_end(region); 1547 1548 while (len) { 1549 erase = spi_nor_find_best_erase_type(map, region, addr, len); 1550 if (!erase) 1551 goto destroy_erase_cmd_list; 1552 1553 if (prev_erase != erase || 1554 erase->size != cmd->size || 1555 region->offset & SNOR_OVERLAID_REGION) { 1556 cmd = spi_nor_init_erase_cmd(region, erase); 1557 if (IS_ERR(cmd)) { 1558 ret = PTR_ERR(cmd); 1559 goto destroy_erase_cmd_list; 1560 } 1561 1562 list_add_tail(&cmd->list, erase_list); 1563 } else { 1564 cmd->count++; 1565 } 1566 1567 addr += cmd->size; 1568 len -= cmd->size; 1569 1570 if (len && addr >= region_end) { 1571 region = spi_nor_region_next(region); 1572 if (!region) 1573 goto destroy_erase_cmd_list; 1574 region_end = spi_nor_region_end(region); 1575 } 1576 1577 prev_erase = erase; 1578 } 1579 1580 return 0; 1581 1582 destroy_erase_cmd_list: 1583 spi_nor_destroy_erase_cmd_list(erase_list); 1584 return ret; 1585 } 1586 1587 /** 1588 * spi_nor_erase_multi_sectors() - perform a non-uniform erase 1589 * @nor: pointer to a 'struct spi_nor' 1590 * @addr: offset in the serial flash memory 1591 * @len: number of bytes to erase 1592 * 1593 * Build a list of best fitted erase commands and execute it once we validate 1594 * that the erase can be performed. 1595 * 1596 * Return: 0 on success, -errno otherwise. 1597 */ 1598 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) 1599 { 1600 LIST_HEAD(erase_list); 1601 struct spi_nor_erase_command *cmd, *next; 1602 int ret; 1603 1604 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); 1605 if (ret) 1606 return ret; 1607 1608 list_for_each_entry_safe(cmd, next, &erase_list, list) { 1609 nor->erase_opcode = cmd->opcode; 1610 while (cmd->count) { 1611 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", 1612 cmd->size, cmd->opcode, cmd->count); 1613 1614 ret = spi_nor_write_enable(nor); 1615 if (ret) 1616 goto destroy_erase_cmd_list; 1617 1618 ret = spi_nor_erase_sector(nor, addr); 1619 if (ret) 1620 goto destroy_erase_cmd_list; 1621 1622 ret = spi_nor_wait_till_ready(nor); 1623 if (ret) 1624 goto destroy_erase_cmd_list; 1625 1626 addr += cmd->size; 1627 cmd->count--; 1628 } 1629 list_del(&cmd->list); 1630 kfree(cmd); 1631 } 1632 1633 return 0; 1634 1635 destroy_erase_cmd_list: 1636 spi_nor_destroy_erase_cmd_list(&erase_list); 1637 return ret; 1638 } 1639 1640 /* 1641 * Erase an address range on the nor chip. The address range may extend 1642 * one or more erase sectors. Return an error if there is a problem erasing. 1643 */ 1644 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) 1645 { 1646 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1647 u32 addr, len; 1648 uint32_t rem; 1649 int ret; 1650 1651 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, 1652 (long long)instr->len); 1653 1654 if (spi_nor_has_uniform_erase(nor)) { 1655 div_u64_rem(instr->len, mtd->erasesize, &rem); 1656 if (rem) 1657 return -EINVAL; 1658 } 1659 1660 addr = instr->addr; 1661 len = instr->len; 1662 1663 ret = spi_nor_lock_and_prep(nor); 1664 if (ret) 1665 return ret; 1666 1667 /* whole-chip erase? */ 1668 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { 1669 unsigned long timeout; 1670 1671 ret = spi_nor_write_enable(nor); 1672 if (ret) 1673 goto erase_err; 1674 1675 ret = spi_nor_erase_chip(nor); 1676 if (ret) 1677 goto erase_err; 1678 1679 /* 1680 * Scale the timeout linearly with the size of the flash, with 1681 * a minimum calibrated to an old 2MB flash. We could try to 1682 * pull these from CFI/SFDP, but these values should be good 1683 * enough for now. 1684 */ 1685 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, 1686 CHIP_ERASE_2MB_READY_WAIT_JIFFIES * 1687 (unsigned long)(mtd->size / SZ_2M)); 1688 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); 1689 if (ret) 1690 goto erase_err; 1691 1692 /* REVISIT in some cases we could speed up erasing large regions 1693 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up 1694 * to use "small sector erase", but that's not always optimal. 1695 */ 1696 1697 /* "sector"-at-a-time erase */ 1698 } else if (spi_nor_has_uniform_erase(nor)) { 1699 while (len) { 1700 ret = spi_nor_write_enable(nor); 1701 if (ret) 1702 goto erase_err; 1703 1704 ret = spi_nor_erase_sector(nor, addr); 1705 if (ret) 1706 goto erase_err; 1707 1708 ret = spi_nor_wait_till_ready(nor); 1709 if (ret) 1710 goto erase_err; 1711 1712 addr += mtd->erasesize; 1713 len -= mtd->erasesize; 1714 } 1715 1716 /* erase multiple sectors */ 1717 } else { 1718 ret = spi_nor_erase_multi_sectors(nor, addr, len); 1719 if (ret) 1720 goto erase_err; 1721 } 1722 1723 ret = spi_nor_write_disable(nor); 1724 1725 erase_err: 1726 spi_nor_unlock_and_unprep(nor); 1727 1728 return ret; 1729 } 1730 1731 /** 1732 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status 1733 * Register 1. 1734 * @nor: pointer to a 'struct spi_nor' 1735 * 1736 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. 1737 * 1738 * Return: 0 on success, -errno otherwise. 1739 */ 1740 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) 1741 { 1742 int ret; 1743 1744 ret = spi_nor_read_sr(nor, nor->bouncebuf); 1745 if (ret) 1746 return ret; 1747 1748 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) 1749 return 0; 1750 1751 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1752 1753 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); 1754 } 1755 1756 /** 1757 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status 1758 * Register 2. 1759 * @nor: pointer to a 'struct spi_nor'. 1760 * 1761 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. 1762 * 1763 * Return: 0 on success, -errno otherwise. 1764 */ 1765 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) 1766 { 1767 int ret; 1768 1769 if (nor->flags & SNOR_F_NO_READ_CR) 1770 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); 1771 1772 ret = spi_nor_read_cr(nor, nor->bouncebuf); 1773 if (ret) 1774 return ret; 1775 1776 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) 1777 return 0; 1778 1779 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1780 1781 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); 1782 } 1783 1784 /** 1785 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. 1786 * @nor: pointer to a 'struct spi_nor' 1787 * 1788 * Set the Quad Enable (QE) bit in the Status Register 2. 1789 * 1790 * This is one of the procedures to set the QE bit described in the SFDP 1791 * (JESD216 rev B) specification but no manufacturer using this procedure has 1792 * been identified yet, hence the name of the function. 1793 * 1794 * Return: 0 on success, -errno otherwise. 1795 */ 1796 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) 1797 { 1798 u8 *sr2 = nor->bouncebuf; 1799 int ret; 1800 u8 sr2_written; 1801 1802 /* Check current Quad Enable bit value. */ 1803 ret = spi_nor_read_sr2(nor, sr2); 1804 if (ret) 1805 return ret; 1806 if (*sr2 & SR2_QUAD_EN_BIT7) 1807 return 0; 1808 1809 /* Update the Quad Enable bit. */ 1810 *sr2 |= SR2_QUAD_EN_BIT7; 1811 1812 ret = spi_nor_write_sr2(nor, sr2); 1813 if (ret) 1814 return ret; 1815 1816 sr2_written = *sr2; 1817 1818 /* Read back and check it. */ 1819 ret = spi_nor_read_sr2(nor, sr2); 1820 if (ret) 1821 return ret; 1822 1823 if (*sr2 != sr2_written) { 1824 dev_dbg(nor->dev, "SR2: Read back test failed\n"); 1825 return -EIO; 1826 } 1827 1828 return 0; 1829 } 1830 1831 static const struct spi_nor_manufacturer *manufacturers[] = { 1832 &spi_nor_atmel, 1833 &spi_nor_catalyst, 1834 &spi_nor_eon, 1835 &spi_nor_esmt, 1836 &spi_nor_everspin, 1837 &spi_nor_fujitsu, 1838 &spi_nor_gigadevice, 1839 &spi_nor_intel, 1840 &spi_nor_issi, 1841 &spi_nor_macronix, 1842 &spi_nor_micron, 1843 &spi_nor_st, 1844 &spi_nor_spansion, 1845 &spi_nor_sst, 1846 &spi_nor_winbond, 1847 &spi_nor_xilinx, 1848 &spi_nor_xmc, 1849 }; 1850 1851 static const struct flash_info * 1852 spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts, 1853 const u8 *id) 1854 { 1855 unsigned int i; 1856 1857 for (i = 0; i < nparts; i++) { 1858 if (parts[i].id_len && 1859 !memcmp(parts[i].id, id, parts[i].id_len)) 1860 return &parts[i]; 1861 } 1862 1863 return NULL; 1864 } 1865 1866 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) 1867 { 1868 const struct flash_info *info; 1869 u8 *id = nor->bouncebuf; 1870 unsigned int i; 1871 int ret; 1872 1873 if (nor->spimem) { 1874 struct spi_mem_op op = 1875 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), 1876 SPI_MEM_OP_NO_ADDR, 1877 SPI_MEM_OP_NO_DUMMY, 1878 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1)); 1879 1880 ret = spi_mem_exec_op(nor->spimem, &op); 1881 } else { 1882 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, 1883 SPI_NOR_MAX_ID_LEN); 1884 } 1885 if (ret) { 1886 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); 1887 return ERR_PTR(ret); 1888 } 1889 1890 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 1891 info = spi_nor_search_part_by_id(manufacturers[i]->parts, 1892 manufacturers[i]->nparts, 1893 id); 1894 if (info) { 1895 nor->manufacturer = manufacturers[i]; 1896 return info; 1897 } 1898 } 1899 1900 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", 1901 SPI_NOR_MAX_ID_LEN, id); 1902 return ERR_PTR(-ENODEV); 1903 } 1904 1905 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, 1906 size_t *retlen, u_char *buf) 1907 { 1908 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1909 ssize_t ret; 1910 1911 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); 1912 1913 ret = spi_nor_lock_and_prep(nor); 1914 if (ret) 1915 return ret; 1916 1917 while (len) { 1918 loff_t addr = from; 1919 1920 addr = spi_nor_convert_addr(nor, addr); 1921 1922 ret = spi_nor_read_data(nor, addr, len, buf); 1923 if (ret == 0) { 1924 /* We shouldn't see 0-length reads */ 1925 ret = -EIO; 1926 goto read_err; 1927 } 1928 if (ret < 0) 1929 goto read_err; 1930 1931 WARN_ON(ret > len); 1932 *retlen += ret; 1933 buf += ret; 1934 from += ret; 1935 len -= ret; 1936 } 1937 ret = 0; 1938 1939 read_err: 1940 spi_nor_unlock_and_unprep(nor); 1941 return ret; 1942 } 1943 1944 /* 1945 * Write an address range to the nor chip. Data must be written in 1946 * FLASH_PAGESIZE chunks. The address range may be any size provided 1947 * it is within the physical boundaries. 1948 */ 1949 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, 1950 size_t *retlen, const u_char *buf) 1951 { 1952 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1953 size_t page_offset, page_remain, i; 1954 ssize_t ret; 1955 u32 page_size = nor->params->page_size; 1956 1957 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 1958 1959 ret = spi_nor_lock_and_prep(nor); 1960 if (ret) 1961 return ret; 1962 1963 for (i = 0; i < len; ) { 1964 ssize_t written; 1965 loff_t addr = to + i; 1966 1967 /* 1968 * If page_size is a power of two, the offset can be quickly 1969 * calculated with an AND operation. On the other cases we 1970 * need to do a modulus operation (more expensive). 1971 */ 1972 if (is_power_of_2(page_size)) { 1973 page_offset = addr & (page_size - 1); 1974 } else { 1975 uint64_t aux = addr; 1976 1977 page_offset = do_div(aux, page_size); 1978 } 1979 /* the size of data remaining on the first page */ 1980 page_remain = min_t(size_t, page_size - page_offset, len - i); 1981 1982 addr = spi_nor_convert_addr(nor, addr); 1983 1984 ret = spi_nor_write_enable(nor); 1985 if (ret) 1986 goto write_err; 1987 1988 ret = spi_nor_write_data(nor, addr, page_remain, buf + i); 1989 if (ret < 0) 1990 goto write_err; 1991 written = ret; 1992 1993 ret = spi_nor_wait_till_ready(nor); 1994 if (ret) 1995 goto write_err; 1996 *retlen += written; 1997 i += written; 1998 } 1999 2000 write_err: 2001 spi_nor_unlock_and_unprep(nor); 2002 return ret; 2003 } 2004 2005 static int spi_nor_check(struct spi_nor *nor) 2006 { 2007 if (!nor->dev || 2008 (!nor->spimem && !nor->controller_ops) || 2009 (!nor->spimem && nor->controller_ops && 2010 (!nor->controller_ops->read || 2011 !nor->controller_ops->write || 2012 !nor->controller_ops->read_reg || 2013 !nor->controller_ops->write_reg))) { 2014 pr_err("spi-nor: please fill all the necessary fields!\n"); 2015 return -EINVAL; 2016 } 2017 2018 if (nor->spimem && nor->controller_ops) { 2019 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); 2020 return -EINVAL; 2021 } 2022 2023 return 0; 2024 } 2025 2026 void 2027 spi_nor_set_read_settings(struct spi_nor_read_command *read, 2028 u8 num_mode_clocks, 2029 u8 num_wait_states, 2030 u8 opcode, 2031 enum spi_nor_protocol proto) 2032 { 2033 read->num_mode_clocks = num_mode_clocks; 2034 read->num_wait_states = num_wait_states; 2035 read->opcode = opcode; 2036 read->proto = proto; 2037 } 2038 2039 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, 2040 enum spi_nor_protocol proto) 2041 { 2042 pp->opcode = opcode; 2043 pp->proto = proto; 2044 } 2045 2046 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) 2047 { 2048 size_t i; 2049 2050 for (i = 0; i < size; i++) 2051 if (table[i][0] == (int)hwcaps) 2052 return table[i][1]; 2053 2054 return -EINVAL; 2055 } 2056 2057 int spi_nor_hwcaps_read2cmd(u32 hwcaps) 2058 { 2059 static const int hwcaps_read2cmd[][2] = { 2060 { SNOR_HWCAPS_READ, SNOR_CMD_READ }, 2061 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, 2062 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, 2063 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, 2064 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, 2065 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, 2066 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, 2067 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, 2068 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, 2069 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, 2070 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, 2071 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, 2072 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, 2073 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, 2074 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, 2075 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, 2076 }; 2077 2078 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, 2079 ARRAY_SIZE(hwcaps_read2cmd)); 2080 } 2081 2082 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) 2083 { 2084 static const int hwcaps_pp2cmd[][2] = { 2085 { SNOR_HWCAPS_PP, SNOR_CMD_PP }, 2086 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, 2087 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, 2088 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, 2089 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, 2090 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, 2091 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, 2092 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, 2093 }; 2094 2095 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, 2096 ARRAY_SIZE(hwcaps_pp2cmd)); 2097 } 2098 2099 /** 2100 * spi_nor_spimem_check_op - check if the operation is supported 2101 * by controller 2102 *@nor: pointer to a 'struct spi_nor' 2103 *@op: pointer to op template to be checked 2104 * 2105 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2106 */ 2107 static int spi_nor_spimem_check_op(struct spi_nor *nor, 2108 struct spi_mem_op *op) 2109 { 2110 /* 2111 * First test with 4 address bytes. The opcode itself might 2112 * be a 3B addressing opcode but we don't care, because 2113 * SPI controller implementation should not check the opcode, 2114 * but just the sequence. 2115 */ 2116 op->addr.nbytes = 4; 2117 if (!spi_mem_supports_op(nor->spimem, op)) { 2118 if (nor->params->size > SZ_16M) 2119 return -EOPNOTSUPP; 2120 2121 /* If flash size <= 16MB, 3 address bytes are sufficient */ 2122 op->addr.nbytes = 3; 2123 if (!spi_mem_supports_op(nor->spimem, op)) 2124 return -EOPNOTSUPP; 2125 } 2126 2127 return 0; 2128 } 2129 2130 /** 2131 * spi_nor_spimem_check_readop - check if the read op is supported 2132 * by controller 2133 *@nor: pointer to a 'struct spi_nor' 2134 *@read: pointer to op template to be checked 2135 * 2136 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2137 */ 2138 static int spi_nor_spimem_check_readop(struct spi_nor *nor, 2139 const struct spi_nor_read_command *read) 2140 { 2141 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0), 2142 SPI_MEM_OP_ADDR(3, 0, 0), 2143 SPI_MEM_OP_DUMMY(1, 0), 2144 SPI_MEM_OP_DATA_IN(2, NULL, 0)); 2145 2146 spi_nor_spimem_setup_op(nor, &op, read->proto); 2147 2148 /* convert the dummy cycles to the number of bytes */ 2149 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 2150 if (spi_nor_protocol_is_dtr(nor->read_proto)) 2151 op.dummy.nbytes *= 2; 2152 2153 return spi_nor_spimem_check_op(nor, &op); 2154 } 2155 2156 /** 2157 * spi_nor_spimem_check_pp - check if the page program op is supported 2158 * by controller 2159 *@nor: pointer to a 'struct spi_nor' 2160 *@pp: pointer to op template to be checked 2161 * 2162 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2163 */ 2164 static int spi_nor_spimem_check_pp(struct spi_nor *nor, 2165 const struct spi_nor_pp_command *pp) 2166 { 2167 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0), 2168 SPI_MEM_OP_ADDR(3, 0, 0), 2169 SPI_MEM_OP_NO_DUMMY, 2170 SPI_MEM_OP_DATA_OUT(2, NULL, 0)); 2171 2172 spi_nor_spimem_setup_op(nor, &op, pp->proto); 2173 2174 return spi_nor_spimem_check_op(nor, &op); 2175 } 2176 2177 /** 2178 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol 2179 * based on SPI controller capabilities 2180 * @nor: pointer to a 'struct spi_nor' 2181 * @hwcaps: pointer to resulting capabilities after adjusting 2182 * according to controller and flash's capability 2183 */ 2184 static void 2185 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) 2186 { 2187 struct spi_nor_flash_parameter *params = nor->params; 2188 unsigned int cap; 2189 2190 /* X-X-X modes are not supported yet, mask them all. */ 2191 *hwcaps &= ~SNOR_HWCAPS_X_X_X; 2192 2193 /* 2194 * If the reset line is broken, we do not want to enter a stateful 2195 * mode. 2196 */ 2197 if (nor->flags & SNOR_F_BROKEN_RESET) 2198 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); 2199 2200 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { 2201 int rdidx, ppidx; 2202 2203 if (!(*hwcaps & BIT(cap))) 2204 continue; 2205 2206 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); 2207 if (rdidx >= 0 && 2208 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) 2209 *hwcaps &= ~BIT(cap); 2210 2211 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); 2212 if (ppidx < 0) 2213 continue; 2214 2215 if (spi_nor_spimem_check_pp(nor, 2216 ¶ms->page_programs[ppidx])) 2217 *hwcaps &= ~BIT(cap); 2218 } 2219 } 2220 2221 /** 2222 * spi_nor_set_erase_type() - set a SPI NOR erase type 2223 * @erase: pointer to a structure that describes a SPI NOR erase type 2224 * @size: the size of the sector/block erased by the erase type 2225 * @opcode: the SPI command op code to erase the sector/block 2226 */ 2227 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, 2228 u8 opcode) 2229 { 2230 erase->size = size; 2231 erase->opcode = opcode; 2232 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 2233 erase->size_shift = ffs(erase->size) - 1; 2234 erase->size_mask = (1 << erase->size_shift) - 1; 2235 } 2236 2237 /** 2238 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map 2239 * @map: the erase map of the SPI NOR 2240 * @erase_mask: bitmask encoding erase types that can erase the entire 2241 * flash memory 2242 * @flash_size: the spi nor flash memory size 2243 */ 2244 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, 2245 u8 erase_mask, u64 flash_size) 2246 { 2247 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */ 2248 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | 2249 SNOR_LAST_REGION; 2250 map->uniform_region.size = flash_size; 2251 map->regions = &map->uniform_region; 2252 map->uniform_erase_type = erase_mask; 2253 } 2254 2255 int spi_nor_post_bfpt_fixups(struct spi_nor *nor, 2256 const struct sfdp_parameter_header *bfpt_header, 2257 const struct sfdp_bfpt *bfpt) 2258 { 2259 int ret; 2260 2261 if (nor->manufacturer && nor->manufacturer->fixups && 2262 nor->manufacturer->fixups->post_bfpt) { 2263 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, 2264 bfpt); 2265 if (ret) 2266 return ret; 2267 } 2268 2269 if (nor->info->fixups && nor->info->fixups->post_bfpt) 2270 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt); 2271 2272 return 0; 2273 } 2274 2275 static int spi_nor_select_read(struct spi_nor *nor, 2276 u32 shared_hwcaps) 2277 { 2278 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; 2279 const struct spi_nor_read_command *read; 2280 2281 if (best_match < 0) 2282 return -EINVAL; 2283 2284 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); 2285 if (cmd < 0) 2286 return -EINVAL; 2287 2288 read = &nor->params->reads[cmd]; 2289 nor->read_opcode = read->opcode; 2290 nor->read_proto = read->proto; 2291 2292 /* 2293 * In the SPI NOR framework, we don't need to make the difference 2294 * between mode clock cycles and wait state clock cycles. 2295 * Indeed, the value of the mode clock cycles is used by a QSPI 2296 * flash memory to know whether it should enter or leave its 0-4-4 2297 * (Continuous Read / XIP) mode. 2298 * eXecution In Place is out of the scope of the mtd sub-system. 2299 * Hence we choose to merge both mode and wait state clock cycles 2300 * into the so called dummy clock cycles. 2301 */ 2302 nor->read_dummy = read->num_mode_clocks + read->num_wait_states; 2303 return 0; 2304 } 2305 2306 static int spi_nor_select_pp(struct spi_nor *nor, 2307 u32 shared_hwcaps) 2308 { 2309 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; 2310 const struct spi_nor_pp_command *pp; 2311 2312 if (best_match < 0) 2313 return -EINVAL; 2314 2315 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); 2316 if (cmd < 0) 2317 return -EINVAL; 2318 2319 pp = &nor->params->page_programs[cmd]; 2320 nor->program_opcode = pp->opcode; 2321 nor->write_proto = pp->proto; 2322 return 0; 2323 } 2324 2325 /** 2326 * spi_nor_select_uniform_erase() - select optimum uniform erase type 2327 * @map: the erase map of the SPI NOR 2328 * @wanted_size: the erase type size to search for. Contains the value of 2329 * info->sector_size or of the "small sector" size in case 2330 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined. 2331 * 2332 * Once the optimum uniform sector erase command is found, disable all the 2333 * other. 2334 * 2335 * Return: pointer to erase type on success, NULL otherwise. 2336 */ 2337 static const struct spi_nor_erase_type * 2338 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, 2339 const u32 wanted_size) 2340 { 2341 const struct spi_nor_erase_type *tested_erase, *erase = NULL; 2342 int i; 2343 u8 uniform_erase_type = map->uniform_erase_type; 2344 2345 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2346 if (!(uniform_erase_type & BIT(i))) 2347 continue; 2348 2349 tested_erase = &map->erase_type[i]; 2350 2351 /* 2352 * If the current erase size is the one, stop here: 2353 * we have found the right uniform Sector Erase command. 2354 */ 2355 if (tested_erase->size == wanted_size) { 2356 erase = tested_erase; 2357 break; 2358 } 2359 2360 /* 2361 * Otherwise, the current erase size is still a valid candidate. 2362 * Select the biggest valid candidate. 2363 */ 2364 if (!erase && tested_erase->size) 2365 erase = tested_erase; 2366 /* keep iterating to find the wanted_size */ 2367 } 2368 2369 if (!erase) 2370 return NULL; 2371 2372 /* Disable all other Sector Erase commands. */ 2373 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; 2374 map->uniform_erase_type |= BIT(erase - map->erase_type); 2375 return erase; 2376 } 2377 2378 static int spi_nor_select_erase(struct spi_nor *nor) 2379 { 2380 struct spi_nor_erase_map *map = &nor->params->erase_map; 2381 const struct spi_nor_erase_type *erase = NULL; 2382 struct mtd_info *mtd = &nor->mtd; 2383 u32 wanted_size = nor->info->sector_size; 2384 int i; 2385 2386 /* 2387 * The previous implementation handling Sector Erase commands assumed 2388 * that the SPI flash memory has an uniform layout then used only one 2389 * of the supported erase sizes for all Sector Erase commands. 2390 * So to be backward compatible, the new implementation also tries to 2391 * manage the SPI flash memory as uniform with a single erase sector 2392 * size, when possible. 2393 */ 2394 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS 2395 /* prefer "small sector" erase if possible */ 2396 wanted_size = 4096u; 2397 #endif 2398 2399 if (spi_nor_has_uniform_erase(nor)) { 2400 erase = spi_nor_select_uniform_erase(map, wanted_size); 2401 if (!erase) 2402 return -EINVAL; 2403 nor->erase_opcode = erase->opcode; 2404 mtd->erasesize = erase->size; 2405 return 0; 2406 } 2407 2408 /* 2409 * For non-uniform SPI flash memory, set mtd->erasesize to the 2410 * maximum erase sector size. No need to set nor->erase_opcode. 2411 */ 2412 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2413 if (map->erase_type[i].size) { 2414 erase = &map->erase_type[i]; 2415 break; 2416 } 2417 } 2418 2419 if (!erase) 2420 return -EINVAL; 2421 2422 mtd->erasesize = erase->size; 2423 return 0; 2424 } 2425 2426 static int spi_nor_default_setup(struct spi_nor *nor, 2427 const struct spi_nor_hwcaps *hwcaps) 2428 { 2429 struct spi_nor_flash_parameter *params = nor->params; 2430 u32 ignored_mask, shared_mask; 2431 int err; 2432 2433 /* 2434 * Keep only the hardware capabilities supported by both the SPI 2435 * controller and the SPI flash memory. 2436 */ 2437 shared_mask = hwcaps->mask & params->hwcaps.mask; 2438 2439 if (nor->spimem) { 2440 /* 2441 * When called from spi_nor_probe(), all caps are set and we 2442 * need to discard some of them based on what the SPI 2443 * controller actually supports (using spi_mem_supports_op()). 2444 */ 2445 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); 2446 } else { 2447 /* 2448 * SPI n-n-n protocols are not supported when the SPI 2449 * controller directly implements the spi_nor interface. 2450 * Yet another reason to switch to spi-mem. 2451 */ 2452 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; 2453 if (shared_mask & ignored_mask) { 2454 dev_dbg(nor->dev, 2455 "SPI n-n-n protocols are not supported.\n"); 2456 shared_mask &= ~ignored_mask; 2457 } 2458 } 2459 2460 /* Select the (Fast) Read command. */ 2461 err = spi_nor_select_read(nor, shared_mask); 2462 if (err) { 2463 dev_dbg(nor->dev, 2464 "can't select read settings supported by both the SPI controller and memory.\n"); 2465 return err; 2466 } 2467 2468 /* Select the Page Program command. */ 2469 err = spi_nor_select_pp(nor, shared_mask); 2470 if (err) { 2471 dev_dbg(nor->dev, 2472 "can't select write settings supported by both the SPI controller and memory.\n"); 2473 return err; 2474 } 2475 2476 /* Select the Sector Erase command. */ 2477 err = spi_nor_select_erase(nor); 2478 if (err) { 2479 dev_dbg(nor->dev, 2480 "can't select erase settings supported by both the SPI controller and memory.\n"); 2481 return err; 2482 } 2483 2484 return 0; 2485 } 2486 2487 static int spi_nor_set_addr_width(struct spi_nor *nor) 2488 { 2489 if (nor->addr_width) { 2490 /* already configured from SFDP */ 2491 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { 2492 /* 2493 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So 2494 * in this protocol an odd address width cannot be used because 2495 * then the address phase would only span a cycle and a half. 2496 * Half a cycle would be left over. We would then have to start 2497 * the dummy phase in the middle of a cycle and so too the data 2498 * phase, and we will end the transaction with half a cycle left 2499 * over. 2500 * 2501 * Force all 8D-8D-8D flashes to use an address width of 4 to 2502 * avoid this situation. 2503 */ 2504 nor->addr_width = 4; 2505 } else if (nor->info->addr_width) { 2506 nor->addr_width = nor->info->addr_width; 2507 } else { 2508 nor->addr_width = 3; 2509 } 2510 2511 if (nor->addr_width == 3 && nor->params->size > 0x1000000) { 2512 /* enable 4-byte addressing if the device exceeds 16MiB */ 2513 nor->addr_width = 4; 2514 } 2515 2516 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { 2517 dev_dbg(nor->dev, "address width is too large: %u\n", 2518 nor->addr_width); 2519 return -EINVAL; 2520 } 2521 2522 /* Set 4byte opcodes when possible. */ 2523 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES && 2524 !(nor->flags & SNOR_F_HAS_4BAIT)) 2525 spi_nor_set_4byte_opcodes(nor); 2526 2527 return 0; 2528 } 2529 2530 static int spi_nor_setup(struct spi_nor *nor, 2531 const struct spi_nor_hwcaps *hwcaps) 2532 { 2533 int ret; 2534 2535 if (nor->params->setup) { 2536 ret = nor->params->setup(nor, hwcaps); 2537 if (ret) 2538 return ret; 2539 } 2540 2541 return spi_nor_set_addr_width(nor); 2542 } 2543 2544 /** 2545 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and 2546 * settings based on MFR register and ->default_init() hook. 2547 * @nor: pointer to a 'struct spi_nor'. 2548 */ 2549 static void spi_nor_manufacturer_init_params(struct spi_nor *nor) 2550 { 2551 if (nor->manufacturer && nor->manufacturer->fixups && 2552 nor->manufacturer->fixups->default_init) 2553 nor->manufacturer->fixups->default_init(nor); 2554 2555 if (nor->info->fixups && nor->info->fixups->default_init) 2556 nor->info->fixups->default_init(nor); 2557 } 2558 2559 /** 2560 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and 2561 * settings based on nor->info->sfdp_flags. This method should be called only by 2562 * flashes that do not define SFDP tables. If the flash supports SFDP but the 2563 * information is wrong and the settings from this function can not be retrieved 2564 * by parsing SFDP, one should instead use the fixup hooks and update the wrong 2565 * bits. 2566 * @nor: pointer to a 'struct spi_nor'. 2567 */ 2568 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor) 2569 { 2570 struct spi_nor_flash_parameter *params = nor->params; 2571 struct spi_nor_erase_map *map = ¶ms->erase_map; 2572 const u8 no_sfdp_flags = nor->info->no_sfdp_flags; 2573 u8 i, erase_mask; 2574 2575 if (no_sfdp_flags & SPI_NOR_DUAL_READ) { 2576 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2577 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], 2578 0, 8, SPINOR_OP_READ_1_1_2, 2579 SNOR_PROTO_1_1_2); 2580 } 2581 2582 if (no_sfdp_flags & SPI_NOR_QUAD_READ) { 2583 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2584 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], 2585 0, 8, SPINOR_OP_READ_1_1_4, 2586 SNOR_PROTO_1_1_4); 2587 } 2588 2589 if (no_sfdp_flags & SPI_NOR_OCTAL_READ) { 2590 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2591 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], 2592 0, 8, SPINOR_OP_READ_1_1_8, 2593 SNOR_PROTO_1_1_8); 2594 } 2595 2596 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) { 2597 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; 2598 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 2599 0, 20, SPINOR_OP_READ_FAST, 2600 SNOR_PROTO_8_8_8_DTR); 2601 } 2602 2603 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) { 2604 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; 2605 /* 2606 * Since xSPI Page Program opcode is backward compatible with 2607 * Legacy SPI, use Legacy SPI opcode there as well. 2608 */ 2609 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], 2610 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); 2611 } 2612 2613 /* 2614 * Sector Erase settings. Sort Erase Types in ascending order, with the 2615 * smallest erase size starting at BIT(0). 2616 */ 2617 erase_mask = 0; 2618 i = 0; 2619 if (no_sfdp_flags & SECT_4K_PMC) { 2620 erase_mask |= BIT(i); 2621 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2622 SPINOR_OP_BE_4K_PMC); 2623 i++; 2624 } else if (no_sfdp_flags & SECT_4K) { 2625 erase_mask |= BIT(i); 2626 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2627 SPINOR_OP_BE_4K); 2628 i++; 2629 } 2630 erase_mask |= BIT(i); 2631 spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size, 2632 SPINOR_OP_SE); 2633 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 2634 } 2635 2636 /** 2637 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined 2638 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP. 2639 * @nor: pointer to a 'struct spi_nor' 2640 */ 2641 static void spi_nor_init_flags(struct spi_nor *nor) 2642 { 2643 struct device_node *np = spi_nor_get_flash_node(nor); 2644 const u16 flags = nor->info->flags; 2645 2646 if (of_property_read_bool(np, "broken-flash-reset")) 2647 nor->flags |= SNOR_F_BROKEN_RESET; 2648 2649 if (flags & SPI_NOR_SWP_IS_VOLATILE) 2650 nor->flags |= SNOR_F_SWP_IS_VOLATILE; 2651 2652 if (flags & SPI_NOR_HAS_LOCK) 2653 nor->flags |= SNOR_F_HAS_LOCK; 2654 2655 if (flags & SPI_NOR_HAS_TB) { 2656 nor->flags |= SNOR_F_HAS_SR_TB; 2657 if (flags & SPI_NOR_TB_SR_BIT6) 2658 nor->flags |= SNOR_F_HAS_SR_TB_BIT6; 2659 } 2660 2661 if (flags & SPI_NOR_4BIT_BP) { 2662 nor->flags |= SNOR_F_HAS_4BIT_BP; 2663 if (flags & SPI_NOR_BP3_SR_BIT6) 2664 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; 2665 } 2666 2667 if (flags & NO_CHIP_ERASE) 2668 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; 2669 2670 if (flags & USE_CLSR) 2671 nor->flags |= SNOR_F_USE_CLSR; 2672 2673 if (flags & USE_FSR) 2674 nor->flags |= SNOR_F_USE_FSR; 2675 2676 /* 2677 * Make sure the XSR_RDY flag is set before calling 2678 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR 2679 * with Atmel SPI NOR. 2680 */ 2681 if (flags & SPI_NOR_XSR_RDY) 2682 nor->flags |= SNOR_F_READY_XSR_RDY; 2683 } 2684 2685 /** 2686 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not 2687 * be discovered by SFDP for this particular flash because the SFDP table that 2688 * indicates this support is not defined in the flash. In case the table for 2689 * this support is defined but has wrong values, one should instead use a 2690 * post_sfdp() hook to set the SNOR_F equivalent flag. 2691 * @nor: pointer to a 'struct spi_nor' 2692 */ 2693 static void spi_nor_init_fixup_flags(struct spi_nor *nor) 2694 { 2695 const u8 fixup_flags = nor->info->fixup_flags; 2696 2697 if (fixup_flags & SPI_NOR_4B_OPCODES) 2698 nor->flags |= SNOR_F_4B_OPCODES; 2699 2700 if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE) 2701 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 2702 } 2703 2704 /** 2705 * spi_nor_late_init_params() - Late initialization of default flash parameters. 2706 * @nor: pointer to a 'struct spi_nor' 2707 * 2708 * Used to initialize flash parameters that are not declared in the JESD216 2709 * SFDP standard, or where SFDP tables are not defined at all. 2710 * Will replace the spi_nor_manufacturer_init_params() method. 2711 */ 2712 static void spi_nor_late_init_params(struct spi_nor *nor) 2713 { 2714 if (nor->manufacturer && nor->manufacturer->fixups && 2715 nor->manufacturer->fixups->late_init) 2716 nor->manufacturer->fixups->late_init(nor); 2717 2718 if (nor->info->fixups && nor->info->fixups->late_init) 2719 nor->info->fixups->late_init(nor); 2720 2721 spi_nor_init_flags(nor); 2722 spi_nor_init_fixup_flags(nor); 2723 2724 /* 2725 * NOR protection support. When locking_ops are not provided, we pick 2726 * the default ones. 2727 */ 2728 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) 2729 spi_nor_init_default_locking_ops(nor); 2730 } 2731 2732 /** 2733 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash 2734 * parameters and settings based on JESD216 SFDP standard. 2735 * @nor: pointer to a 'struct spi_nor'. 2736 * 2737 * The method has a roll-back mechanism: in case the SFDP parsing fails, the 2738 * legacy flash parameters and settings will be restored. 2739 */ 2740 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor) 2741 { 2742 struct spi_nor_flash_parameter sfdp_params; 2743 2744 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); 2745 2746 if (spi_nor_parse_sfdp(nor)) { 2747 memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); 2748 nor->addr_width = 0; 2749 nor->flags &= ~SNOR_F_4B_OPCODES; 2750 } 2751 } 2752 2753 /** 2754 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash 2755 * parameters and settings. 2756 * @nor: pointer to a 'struct spi_nor'. 2757 * 2758 * The method assumes that flash doesn't support SFDP so it initializes flash 2759 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten 2760 * when parsing SFDP, if supported. 2761 */ 2762 static void spi_nor_init_params_deprecated(struct spi_nor *nor) 2763 { 2764 spi_nor_no_sfdp_init_params(nor); 2765 2766 spi_nor_manufacturer_init_params(nor); 2767 2768 if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ | 2769 SPI_NOR_QUAD_READ | 2770 SPI_NOR_OCTAL_READ | 2771 SPI_NOR_OCTAL_DTR_READ)) 2772 spi_nor_sfdp_init_params_deprecated(nor); 2773 } 2774 2775 /** 2776 * spi_nor_init_default_params() - Default initialization of flash parameters 2777 * and settings. Done for all flashes, regardless is they define SFDP tables 2778 * or not. 2779 * @nor: pointer to a 'struct spi_nor'. 2780 */ 2781 static void spi_nor_init_default_params(struct spi_nor *nor) 2782 { 2783 struct spi_nor_flash_parameter *params = nor->params; 2784 const struct flash_info *info = nor->info; 2785 struct device_node *np = spi_nor_get_flash_node(nor); 2786 2787 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 2788 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode; 2789 params->setup = spi_nor_default_setup; 2790 params->otp.org = &info->otp_org; 2791 2792 /* Default to 16-bit Write Status (01h) Command */ 2793 nor->flags |= SNOR_F_HAS_16BIT_SR; 2794 2795 /* Set SPI NOR sizes. */ 2796 params->writesize = 1; 2797 params->size = (u64)info->sector_size * info->n_sectors; 2798 params->page_size = info->page_size; 2799 2800 if (!(info->flags & SPI_NOR_NO_FR)) { 2801 /* Default to Fast Read for DT and non-DT platform devices. */ 2802 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; 2803 2804 /* Mask out Fast Read if not requested at DT instantiation. */ 2805 if (np && !of_property_read_bool(np, "m25p,fast-read")) 2806 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; 2807 } 2808 2809 /* (Fast) Read settings. */ 2810 params->hwcaps.mask |= SNOR_HWCAPS_READ; 2811 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], 2812 0, 0, SPINOR_OP_READ, 2813 SNOR_PROTO_1_1_1); 2814 2815 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) 2816 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], 2817 0, 8, SPINOR_OP_READ_FAST, 2818 SNOR_PROTO_1_1_1); 2819 /* Page Program settings. */ 2820 params->hwcaps.mask |= SNOR_HWCAPS_PP; 2821 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], 2822 SPINOR_OP_PP, SNOR_PROTO_1_1_1); 2823 } 2824 2825 /** 2826 * spi_nor_init_params() - Initialize the flash's parameters and settings. 2827 * @nor: pointer to a 'struct spi_nor'. 2828 * 2829 * The flash parameters and settings are initialized based on a sequence of 2830 * calls that are ordered by priority: 2831 * 2832 * 1/ Default flash parameters initialization. The initializations are done 2833 * based on nor->info data: 2834 * spi_nor_info_init_params() 2835 * 2836 * which can be overwritten by: 2837 * 2/ Manufacturer flash parameters initialization. The initializations are 2838 * done based on MFR register, or when the decisions can not be done solely 2839 * based on MFR, by using specific flash_info tweeks, ->default_init(): 2840 * spi_nor_manufacturer_init_params() 2841 * 2842 * which can be overwritten by: 2843 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and 2844 * should be more accurate that the above. 2845 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params() 2846 * 2847 * Please note that there is a ->post_bfpt() fixup hook that can overwrite 2848 * the flash parameters and settings immediately after parsing the Basic 2849 * Flash Parameter Table. 2850 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed. 2851 * It is used to tweak various flash parameters when information provided 2852 * by the SFDP tables are wrong. 2853 * 2854 * which can be overwritten by: 2855 * 4/ Late flash parameters initialization, used to initialize flash 2856 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP 2857 * tables are not defined at all. 2858 * spi_nor_late_init_params() 2859 * 2860 * Return: 0 on success, -errno otherwise. 2861 */ 2862 static int spi_nor_init_params(struct spi_nor *nor) 2863 { 2864 int ret; 2865 2866 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); 2867 if (!nor->params) 2868 return -ENOMEM; 2869 2870 spi_nor_init_default_params(nor); 2871 2872 if (nor->info->parse_sfdp) { 2873 ret = spi_nor_parse_sfdp(nor); 2874 if (ret) { 2875 dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n"); 2876 return ret; 2877 } 2878 } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) { 2879 spi_nor_no_sfdp_init_params(nor); 2880 } else { 2881 spi_nor_init_params_deprecated(nor); 2882 } 2883 2884 spi_nor_late_init_params(nor); 2885 2886 return 0; 2887 } 2888 2889 /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed 2890 * @nor: pointer to a 'struct spi_nor' 2891 * @enable: whether to enable or disable Octal DTR 2892 * 2893 * Return: 0 on success, -errno otherwise. 2894 */ 2895 static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable) 2896 { 2897 int ret; 2898 2899 if (!nor->params->octal_dtr_enable) 2900 return 0; 2901 2902 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && 2903 nor->write_proto == SNOR_PROTO_8_8_8_DTR)) 2904 return 0; 2905 2906 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) 2907 return 0; 2908 2909 ret = nor->params->octal_dtr_enable(nor, enable); 2910 if (ret) 2911 return ret; 2912 2913 if (enable) 2914 nor->reg_proto = SNOR_PROTO_8_8_8_DTR; 2915 else 2916 nor->reg_proto = SNOR_PROTO_1_1_1; 2917 2918 return 0; 2919 } 2920 2921 /** 2922 * spi_nor_quad_enable() - enable Quad I/O if needed. 2923 * @nor: pointer to a 'struct spi_nor' 2924 * 2925 * Return: 0 on success, -errno otherwise. 2926 */ 2927 static int spi_nor_quad_enable(struct spi_nor *nor) 2928 { 2929 if (!nor->params->quad_enable) 2930 return 0; 2931 2932 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || 2933 spi_nor_get_protocol_width(nor->write_proto) == 4)) 2934 return 0; 2935 2936 return nor->params->quad_enable(nor); 2937 } 2938 2939 static int spi_nor_init(struct spi_nor *nor) 2940 { 2941 int err; 2942 2943 err = spi_nor_octal_dtr_enable(nor, true); 2944 if (err) { 2945 dev_dbg(nor->dev, "octal mode not supported\n"); 2946 return err; 2947 } 2948 2949 err = spi_nor_quad_enable(nor); 2950 if (err) { 2951 dev_dbg(nor->dev, "quad mode not supported\n"); 2952 return err; 2953 } 2954 2955 /* 2956 * Some SPI NOR flashes are write protected by default after a power-on 2957 * reset cycle, in order to avoid inadvertent writes during power-up. 2958 * Backward compatibility imposes to unlock the entire flash memory 2959 * array at power-up by default. Depending on the kernel configuration 2960 * (1) do nothing, (2) always unlock the entire flash array or (3) 2961 * unlock the entire flash array only when the software write 2962 * protection bits are volatile. The latter is indicated by 2963 * SNOR_F_SWP_IS_VOLATILE. 2964 */ 2965 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || 2966 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && 2967 nor->flags & SNOR_F_SWP_IS_VOLATILE)) 2968 spi_nor_try_unlock_all(nor); 2969 2970 if (nor->addr_width == 4 && 2971 nor->read_proto != SNOR_PROTO_8_8_8_DTR && 2972 !(nor->flags & SNOR_F_4B_OPCODES)) { 2973 /* 2974 * If the RESET# pin isn't hooked up properly, or the system 2975 * otherwise doesn't perform a reset command in the boot 2976 * sequence, it's impossible to 100% protect against unexpected 2977 * reboots (e.g., crashes). Warn the user (or hopefully, system 2978 * designer) that this is bad. 2979 */ 2980 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, 2981 "enabling reset hack; may not recover from unexpected reboots\n"); 2982 nor->params->set_4byte_addr_mode(nor, true); 2983 } 2984 2985 return 0; 2986 } 2987 2988 /** 2989 * spi_nor_soft_reset() - Perform a software reset 2990 * @nor: pointer to 'struct spi_nor' 2991 * 2992 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets 2993 * the device to its power-on-reset state. This is useful when the software has 2994 * made some changes to device (volatile) registers and needs to reset it before 2995 * shutting down, for example. 2996 * 2997 * Not every flash supports this sequence. The same set of opcodes might be used 2998 * for some other operation on a flash that does not support this. Support for 2999 * this sequence can be discovered via SFDP in the BFPT table. 3000 * 3001 * Return: 0 on success, -errno otherwise. 3002 */ 3003 static void spi_nor_soft_reset(struct spi_nor *nor) 3004 { 3005 struct spi_mem_op op; 3006 int ret; 3007 3008 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0), 3009 SPI_MEM_OP_NO_DUMMY, 3010 SPI_MEM_OP_NO_ADDR, 3011 SPI_MEM_OP_NO_DATA); 3012 3013 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 3014 3015 ret = spi_mem_exec_op(nor->spimem, &op); 3016 if (ret) { 3017 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 3018 return; 3019 } 3020 3021 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0), 3022 SPI_MEM_OP_NO_DUMMY, 3023 SPI_MEM_OP_NO_ADDR, 3024 SPI_MEM_OP_NO_DATA); 3025 3026 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 3027 3028 ret = spi_mem_exec_op(nor->spimem, &op); 3029 if (ret) { 3030 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 3031 return; 3032 } 3033 3034 /* 3035 * Software Reset is not instant, and the delay varies from flash to 3036 * flash. Looking at a few flashes, most range somewhere below 100 3037 * microseconds. So, sleep for a range of 200-400 us. 3038 */ 3039 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); 3040 } 3041 3042 /* mtd suspend handler */ 3043 static int spi_nor_suspend(struct mtd_info *mtd) 3044 { 3045 struct spi_nor *nor = mtd_to_spi_nor(mtd); 3046 int ret; 3047 3048 /* Disable octal DTR mode if we enabled it. */ 3049 ret = spi_nor_octal_dtr_enable(nor, false); 3050 if (ret) 3051 dev_err(nor->dev, "suspend() failed\n"); 3052 3053 return ret; 3054 } 3055 3056 /* mtd resume handler */ 3057 static void spi_nor_resume(struct mtd_info *mtd) 3058 { 3059 struct spi_nor *nor = mtd_to_spi_nor(mtd); 3060 struct device *dev = nor->dev; 3061 int ret; 3062 3063 /* re-initialize the nor chip */ 3064 ret = spi_nor_init(nor); 3065 if (ret) 3066 dev_err(dev, "resume() failed\n"); 3067 } 3068 3069 static int spi_nor_get_device(struct mtd_info *mtd) 3070 { 3071 struct mtd_info *master = mtd_get_master(mtd); 3072 struct spi_nor *nor = mtd_to_spi_nor(master); 3073 struct device *dev; 3074 3075 if (nor->spimem) 3076 dev = nor->spimem->spi->controller->dev.parent; 3077 else 3078 dev = nor->dev; 3079 3080 if (!try_module_get(dev->driver->owner)) 3081 return -ENODEV; 3082 3083 return 0; 3084 } 3085 3086 static void spi_nor_put_device(struct mtd_info *mtd) 3087 { 3088 struct mtd_info *master = mtd_get_master(mtd); 3089 struct spi_nor *nor = mtd_to_spi_nor(master); 3090 struct device *dev; 3091 3092 if (nor->spimem) 3093 dev = nor->spimem->spi->controller->dev.parent; 3094 else 3095 dev = nor->dev; 3096 3097 module_put(dev->driver->owner); 3098 } 3099 3100 void spi_nor_restore(struct spi_nor *nor) 3101 { 3102 /* restore the addressing mode */ 3103 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && 3104 nor->flags & SNOR_F_BROKEN_RESET) 3105 nor->params->set_4byte_addr_mode(nor, false); 3106 3107 if (nor->flags & SNOR_F_SOFT_RESET) 3108 spi_nor_soft_reset(nor); 3109 } 3110 EXPORT_SYMBOL_GPL(spi_nor_restore); 3111 3112 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, 3113 const char *name) 3114 { 3115 unsigned int i, j; 3116 3117 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 3118 for (j = 0; j < manufacturers[i]->nparts; j++) { 3119 if (!strcmp(name, manufacturers[i]->parts[j].name)) { 3120 nor->manufacturer = manufacturers[i]; 3121 return &manufacturers[i]->parts[j]; 3122 } 3123 } 3124 } 3125 3126 return NULL; 3127 } 3128 3129 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, 3130 const char *name) 3131 { 3132 const struct flash_info *info = NULL; 3133 3134 if (name) 3135 info = spi_nor_match_id(nor, name); 3136 /* Try to auto-detect if chip name wasn't specified or not found */ 3137 if (!info) 3138 info = spi_nor_read_id(nor); 3139 if (IS_ERR_OR_NULL(info)) 3140 return ERR_PTR(-ENOENT); 3141 3142 /* 3143 * If caller has specified name of flash model that can normally be 3144 * detected using JEDEC, let's verify it. 3145 */ 3146 if (name && info->id_len) { 3147 const struct flash_info *jinfo; 3148 3149 jinfo = spi_nor_read_id(nor); 3150 if (IS_ERR(jinfo)) { 3151 return jinfo; 3152 } else if (jinfo != info) { 3153 /* 3154 * JEDEC knows better, so overwrite platform ID. We 3155 * can't trust partitions any longer, but we'll let 3156 * mtd apply them anyway, since some partitions may be 3157 * marked read-only, and we don't want to lose that 3158 * information, even if it's not 100% accurate. 3159 */ 3160 dev_warn(nor->dev, "found %s, expected %s\n", 3161 jinfo->name, info->name); 3162 info = jinfo; 3163 } 3164 } 3165 3166 return info; 3167 } 3168 3169 static void spi_nor_set_mtd_info(struct spi_nor *nor) 3170 { 3171 struct mtd_info *mtd = &nor->mtd; 3172 struct device *dev = nor->dev; 3173 3174 spi_nor_set_mtd_locking_ops(nor); 3175 spi_nor_set_mtd_otp_ops(nor); 3176 3177 mtd->dev.parent = dev; 3178 if (!mtd->name) 3179 mtd->name = dev_name(dev); 3180 mtd->type = MTD_NORFLASH; 3181 mtd->flags = MTD_CAP_NORFLASH; 3182 if (nor->info->flags & SPI_NOR_NO_ERASE) 3183 mtd->flags |= MTD_NO_ERASE; 3184 mtd->writesize = nor->params->writesize; 3185 mtd->writebufsize = nor->params->page_size; 3186 mtd->size = nor->params->size; 3187 mtd->_erase = spi_nor_erase; 3188 mtd->_read = spi_nor_read; 3189 /* Might be already set by some SST flashes. */ 3190 if (!mtd->_write) 3191 mtd->_write = spi_nor_write; 3192 mtd->_suspend = spi_nor_suspend; 3193 mtd->_resume = spi_nor_resume; 3194 mtd->_get_device = spi_nor_get_device; 3195 mtd->_put_device = spi_nor_put_device; 3196 } 3197 3198 int spi_nor_scan(struct spi_nor *nor, const char *name, 3199 const struct spi_nor_hwcaps *hwcaps) 3200 { 3201 const struct flash_info *info; 3202 struct device *dev = nor->dev; 3203 struct mtd_info *mtd = &nor->mtd; 3204 int ret; 3205 int i; 3206 3207 ret = spi_nor_check(nor); 3208 if (ret) 3209 return ret; 3210 3211 /* Reset SPI protocol for all commands. */ 3212 nor->reg_proto = SNOR_PROTO_1_1_1; 3213 nor->read_proto = SNOR_PROTO_1_1_1; 3214 nor->write_proto = SNOR_PROTO_1_1_1; 3215 3216 /* 3217 * We need the bounce buffer early to read/write registers when going 3218 * through the spi-mem layer (buffers have to be DMA-able). 3219 * For spi-mem drivers, we'll reallocate a new buffer if 3220 * nor->params->page_size turns out to be greater than PAGE_SIZE (which 3221 * shouldn't happen before long since NOR pages are usually less 3222 * than 1KB) after spi_nor_scan() returns. 3223 */ 3224 nor->bouncebuf_size = PAGE_SIZE; 3225 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, 3226 GFP_KERNEL); 3227 if (!nor->bouncebuf) 3228 return -ENOMEM; 3229 3230 info = spi_nor_get_flash_info(nor, name); 3231 if (IS_ERR(info)) 3232 return PTR_ERR(info); 3233 3234 nor->info = info; 3235 3236 mutex_init(&nor->lock); 3237 3238 /* Init flash parameters based on flash_info struct and SFDP */ 3239 ret = spi_nor_init_params(nor); 3240 if (ret) 3241 return ret; 3242 3243 /* 3244 * Configure the SPI memory: 3245 * - select op codes for (Fast) Read, Page Program and Sector Erase. 3246 * - set the number of dummy cycles (mode cycles + wait states). 3247 * - set the SPI protocols for register and memory accesses. 3248 * - set the address width. 3249 */ 3250 ret = spi_nor_setup(nor, hwcaps); 3251 if (ret) 3252 return ret; 3253 3254 /* Send all the required SPI flash commands to initialize device */ 3255 ret = spi_nor_init(nor); 3256 if (ret) 3257 return ret; 3258 3259 /* No mtd_info fields should be used up to this point. */ 3260 spi_nor_set_mtd_info(nor); 3261 3262 dev_info(dev, "%s (%lld Kbytes)\n", info->name, 3263 (long long)mtd->size >> 10); 3264 3265 dev_dbg(dev, 3266 "mtd .name = %s, .size = 0x%llx (%lldMiB), " 3267 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 3268 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), 3269 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); 3270 3271 if (mtd->numeraseregions) 3272 for (i = 0; i < mtd->numeraseregions; i++) 3273 dev_dbg(dev, 3274 "mtd.eraseregions[%d] = { .offset = 0x%llx, " 3275 ".erasesize = 0x%.8x (%uKiB), " 3276 ".numblocks = %d }\n", 3277 i, (long long)mtd->eraseregions[i].offset, 3278 mtd->eraseregions[i].erasesize, 3279 mtd->eraseregions[i].erasesize / 1024, 3280 mtd->eraseregions[i].numblocks); 3281 return 0; 3282 } 3283 EXPORT_SYMBOL_GPL(spi_nor_scan); 3284 3285 static int spi_nor_create_read_dirmap(struct spi_nor *nor) 3286 { 3287 struct spi_mem_dirmap_info info = { 3288 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 3289 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), 3290 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 3291 SPI_MEM_OP_DATA_IN(0, NULL, 0)), 3292 .offset = 0, 3293 .length = nor->params->size, 3294 }; 3295 struct spi_mem_op *op = &info.op_tmpl; 3296 3297 spi_nor_spimem_setup_op(nor, op, nor->read_proto); 3298 3299 /* convert the dummy cycles to the number of bytes */ 3300 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; 3301 if (spi_nor_protocol_is_dtr(nor->read_proto)) 3302 op->dummy.nbytes *= 2; 3303 3304 /* 3305 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3306 * of data bytes is non-zero, the data buswidth won't be set here. So, 3307 * do it explicitly. 3308 */ 3309 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); 3310 3311 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3312 &info); 3313 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); 3314 } 3315 3316 static int spi_nor_create_write_dirmap(struct spi_nor *nor) 3317 { 3318 struct spi_mem_dirmap_info info = { 3319 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 3320 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), 3321 SPI_MEM_OP_NO_DUMMY, 3322 SPI_MEM_OP_DATA_OUT(0, NULL, 0)), 3323 .offset = 0, 3324 .length = nor->params->size, 3325 }; 3326 struct spi_mem_op *op = &info.op_tmpl; 3327 3328 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 3329 op->addr.nbytes = 0; 3330 3331 spi_nor_spimem_setup_op(nor, op, nor->write_proto); 3332 3333 /* 3334 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3335 * of data bytes is non-zero, the data buswidth won't be set here. So, 3336 * do it explicitly. 3337 */ 3338 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); 3339 3340 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3341 &info); 3342 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); 3343 } 3344 3345 static int spi_nor_probe(struct spi_mem *spimem) 3346 { 3347 struct spi_device *spi = spimem->spi; 3348 struct flash_platform_data *data = dev_get_platdata(&spi->dev); 3349 struct spi_nor *nor; 3350 /* 3351 * Enable all caps by default. The core will mask them after 3352 * checking what's really supported using spi_mem_supports_op(). 3353 */ 3354 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; 3355 char *flash_name; 3356 int ret; 3357 3358 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL); 3359 if (!nor) 3360 return -ENOMEM; 3361 3362 nor->spimem = spimem; 3363 nor->dev = &spi->dev; 3364 spi_nor_set_flash_node(nor, spi->dev.of_node); 3365 3366 spi_mem_set_drvdata(spimem, nor); 3367 3368 if (data && data->name) 3369 nor->mtd.name = data->name; 3370 3371 if (!nor->mtd.name) 3372 nor->mtd.name = spi_mem_get_name(spimem); 3373 3374 /* 3375 * For some (historical?) reason many platforms provide two different 3376 * names in flash_platform_data: "name" and "type". Quite often name is 3377 * set to "m25p80" and then "type" provides a real chip name. 3378 * If that's the case, respect "type" and ignore a "name". 3379 */ 3380 if (data && data->type) 3381 flash_name = data->type; 3382 else if (!strcmp(spi->modalias, "spi-nor")) 3383 flash_name = NULL; /* auto-detect */ 3384 else 3385 flash_name = spi->modalias; 3386 3387 ret = spi_nor_scan(nor, flash_name, &hwcaps); 3388 if (ret) 3389 return ret; 3390 3391 /* 3392 * None of the existing parts have > 512B pages, but let's play safe 3393 * and add this logic so that if anyone ever adds support for such 3394 * a NOR we don't end up with buffer overflows. 3395 */ 3396 if (nor->params->page_size > PAGE_SIZE) { 3397 nor->bouncebuf_size = nor->params->page_size; 3398 devm_kfree(nor->dev, nor->bouncebuf); 3399 nor->bouncebuf = devm_kmalloc(nor->dev, 3400 nor->bouncebuf_size, 3401 GFP_KERNEL); 3402 if (!nor->bouncebuf) 3403 return -ENOMEM; 3404 } 3405 3406 ret = spi_nor_create_read_dirmap(nor); 3407 if (ret) 3408 return ret; 3409 3410 ret = spi_nor_create_write_dirmap(nor); 3411 if (ret) 3412 return ret; 3413 3414 return mtd_device_register(&nor->mtd, data ? data->parts : NULL, 3415 data ? data->nr_parts : 0); 3416 } 3417 3418 static int spi_nor_remove(struct spi_mem *spimem) 3419 { 3420 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3421 3422 spi_nor_restore(nor); 3423 3424 /* Clean up MTD stuff. */ 3425 return mtd_device_unregister(&nor->mtd); 3426 } 3427 3428 static void spi_nor_shutdown(struct spi_mem *spimem) 3429 { 3430 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3431 3432 spi_nor_restore(nor); 3433 } 3434 3435 /* 3436 * Do NOT add to this array without reading the following: 3437 * 3438 * Historically, many flash devices are bound to this driver by their name. But 3439 * since most of these flash are compatible to some extent, and their 3440 * differences can often be differentiated by the JEDEC read-ID command, we 3441 * encourage new users to add support to the spi-nor library, and simply bind 3442 * against a generic string here (e.g., "jedec,spi-nor"). 3443 * 3444 * Many flash names are kept here in this list to keep them available 3445 * as module aliases for existing platforms. 3446 */ 3447 static const struct spi_device_id spi_nor_dev_ids[] = { 3448 /* 3449 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and 3450 * hack around the fact that the SPI core does not provide uevent 3451 * matching for .of_match_table 3452 */ 3453 {"spi-nor"}, 3454 3455 /* 3456 * Entries not used in DTs that should be safe to drop after replacing 3457 * them with "spi-nor" in platform data. 3458 */ 3459 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, 3460 3461 /* 3462 * Entries that were used in DTs without "jedec,spi-nor" fallback and 3463 * should be kept for backward compatibility. 3464 */ 3465 {"at25df321a"}, {"at25df641"}, {"at26df081a"}, 3466 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, 3467 {"mx25l25635e"},{"mx66l51235l"}, 3468 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, 3469 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, 3470 {"s25fl064k"}, 3471 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, 3472 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, 3473 {"m25p64"}, {"m25p128"}, 3474 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, 3475 {"w25q80bl"}, {"w25q128"}, {"w25q256"}, 3476 3477 /* Flashes that can't be detected using JEDEC */ 3478 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, 3479 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, 3480 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, 3481 3482 /* Everspin MRAMs (non-JEDEC) */ 3483 { "mr25h128" }, /* 128 Kib, 40 MHz */ 3484 { "mr25h256" }, /* 256 Kib, 40 MHz */ 3485 { "mr25h10" }, /* 1 Mib, 40 MHz */ 3486 { "mr25h40" }, /* 4 Mib, 40 MHz */ 3487 3488 { }, 3489 }; 3490 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); 3491 3492 static const struct of_device_id spi_nor_of_table[] = { 3493 /* 3494 * Generic compatibility for SPI NOR that can be identified by the 3495 * JEDEC READ ID opcode (0x9F). Use this, if possible. 3496 */ 3497 { .compatible = "jedec,spi-nor" }, 3498 { /* sentinel */ }, 3499 }; 3500 MODULE_DEVICE_TABLE(of, spi_nor_of_table); 3501 3502 /* 3503 * REVISIT: many of these chips have deep power-down modes, which 3504 * should clearly be entered on suspend() to minimize power use. 3505 * And also when they're otherwise idle... 3506 */ 3507 static struct spi_mem_driver spi_nor_driver = { 3508 .spidrv = { 3509 .driver = { 3510 .name = "spi-nor", 3511 .of_match_table = spi_nor_of_table, 3512 .dev_groups = spi_nor_sysfs_groups, 3513 }, 3514 .id_table = spi_nor_dev_ids, 3515 }, 3516 .probe = spi_nor_probe, 3517 .remove = spi_nor_remove, 3518 .shutdown = spi_nor_shutdown, 3519 }; 3520 module_spi_mem_driver(spi_nor_driver); 3521 3522 MODULE_LICENSE("GPL v2"); 3523 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); 3524 MODULE_AUTHOR("Mike Lavender"); 3525 MODULE_DESCRIPTION("framework for SPI NOR"); 3526