1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2005, Intec Automation Inc. 4 * Copyright (C) 2014, Freescale Semiconductor, Inc. 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/slab.h> 9 #include <linux/sort.h> 10 #include <linux/mtd/spi-nor.h> 11 12 #include "core.h" 13 14 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) 15 #define SFDP_PARAM_HEADER_PTP(p) \ 16 (((p)->parameter_table_pointer[2] << 16) | \ 17 ((p)->parameter_table_pointer[1] << 8) | \ 18 ((p)->parameter_table_pointer[0] << 0)) 19 20 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ 21 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ 22 #define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ 23 #define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */ 24 #define SFDP_SCCR_MAP_ID 0xff87 /* 25 * Status, Control and Configuration 26 * Register Map. 27 */ 28 29 #define SFDP_SIGNATURE 0x50444653U 30 31 struct sfdp_header { 32 u32 signature; /* Ox50444653U <=> "SFDP" */ 33 u8 minor; 34 u8 major; 35 u8 nph; /* 0-base number of parameter headers */ 36 u8 unused; 37 38 /* Basic Flash Parameter Table. */ 39 struct sfdp_parameter_header bfpt_header; 40 }; 41 42 /* Fast Read settings. */ 43 struct sfdp_bfpt_read { 44 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ 45 u32 hwcaps; 46 47 /* 48 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us 49 * whether the Fast Read x-y-z command is supported. 50 */ 51 u32 supported_dword; 52 u32 supported_bit; 53 54 /* 55 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD 56 * encodes the op code, the number of mode clocks and the number of wait 57 * states to be used by Fast Read x-y-z command. 58 */ 59 u32 settings_dword; 60 u32 settings_shift; 61 62 /* The SPI protocol for this Fast Read x-y-z command. */ 63 enum spi_nor_protocol proto; 64 }; 65 66 struct sfdp_bfpt_erase { 67 /* 68 * The half-word at offset <shift> in DWORD <dword> encodes the 69 * op code and erase sector size to be used by Sector Erase commands. 70 */ 71 u32 dword; 72 u32 shift; 73 }; 74 75 #define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22) 76 #define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22) 77 #define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22) 78 #define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22) 79 #define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22) 80 81 #define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16) 82 #define SMPT_CMD_READ_DUMMY_SHIFT 16 83 #define SMPT_CMD_READ_DUMMY(_cmd) \ 84 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT) 85 #define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL 86 87 #define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24) 88 #define SMPT_CMD_READ_DATA_SHIFT 24 89 #define SMPT_CMD_READ_DATA(_cmd) \ 90 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT) 91 92 #define SMPT_CMD_OPCODE_MASK GENMASK(15, 8) 93 #define SMPT_CMD_OPCODE_SHIFT 8 94 #define SMPT_CMD_OPCODE(_cmd) \ 95 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT) 96 97 #define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16) 98 #define SMPT_MAP_REGION_COUNT_SHIFT 16 99 #define SMPT_MAP_REGION_COUNT(_header) \ 100 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \ 101 SMPT_MAP_REGION_COUNT_SHIFT) + 1) 102 103 #define SMPT_MAP_ID_MASK GENMASK(15, 8) 104 #define SMPT_MAP_ID_SHIFT 8 105 #define SMPT_MAP_ID(_header) \ 106 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT) 107 108 #define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8) 109 #define SMPT_MAP_REGION_SIZE_SHIFT 8 110 #define SMPT_MAP_REGION_SIZE(_region) \ 111 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \ 112 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256) 113 114 #define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0) 115 #define SMPT_MAP_REGION_ERASE_TYPE(_region) \ 116 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK) 117 118 #define SMPT_DESC_TYPE_MAP BIT(1) 119 #define SMPT_DESC_END BIT(0) 120 121 #define SFDP_4BAIT_DWORD_MAX 2 122 123 struct sfdp_4bait { 124 /* The hardware capability. */ 125 u32 hwcaps; 126 127 /* 128 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether 129 * the associated 4-byte address op code is supported. 130 */ 131 u32 supported_bit; 132 }; 133 134 /** 135 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode, 136 * addr_width and read_dummy members of the struct spi_nor 137 * should be previously 138 * set. 139 * @nor: pointer to a 'struct spi_nor' 140 * @addr: offset in the serial flash memory 141 * @len: number of bytes to read 142 * @buf: buffer where the data is copied into (dma-safe memory) 143 * 144 * Return: 0 on success, -errno otherwise. 145 */ 146 static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) 147 { 148 ssize_t ret; 149 150 while (len) { 151 ret = spi_nor_read_data(nor, addr, len, buf); 152 if (ret < 0) 153 return ret; 154 if (!ret || ret > len) 155 return -EIO; 156 157 buf += ret; 158 addr += ret; 159 len -= ret; 160 } 161 return 0; 162 } 163 164 /** 165 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. 166 * @nor: pointer to a 'struct spi_nor' 167 * @addr: offset in the SFDP area to start reading data from 168 * @len: number of bytes to read 169 * @buf: buffer where the SFDP data are copied into (dma-safe memory) 170 * 171 * Whatever the actual numbers of bytes for address and dummy cycles are 172 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always 173 * followed by a 3-byte address and 8 dummy clock cycles. 174 * 175 * Return: 0 on success, -errno otherwise. 176 */ 177 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, 178 size_t len, void *buf) 179 { 180 u8 addr_width, read_opcode, read_dummy; 181 int ret; 182 183 read_opcode = nor->read_opcode; 184 addr_width = nor->addr_width; 185 read_dummy = nor->read_dummy; 186 187 nor->read_opcode = SPINOR_OP_RDSFDP; 188 nor->addr_width = 3; 189 nor->read_dummy = 8; 190 191 ret = spi_nor_read_raw(nor, addr, len, buf); 192 193 nor->read_opcode = read_opcode; 194 nor->addr_width = addr_width; 195 nor->read_dummy = read_dummy; 196 197 return ret; 198 } 199 200 /** 201 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. 202 * @nor: pointer to a 'struct spi_nor' 203 * @addr: offset in the SFDP area to start reading data from 204 * @len: number of bytes to read 205 * @buf: buffer where the SFDP data are copied into 206 * 207 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not 208 * guaranteed to be dma-safe. 209 * 210 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() 211 * otherwise. 212 */ 213 static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, 214 size_t len, void *buf) 215 { 216 void *dma_safe_buf; 217 int ret; 218 219 dma_safe_buf = kmalloc(len, GFP_KERNEL); 220 if (!dma_safe_buf) 221 return -ENOMEM; 222 223 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); 224 memcpy(buf, dma_safe_buf, len); 225 kfree(dma_safe_buf); 226 227 return ret; 228 } 229 230 static void 231 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, 232 u16 half, 233 enum spi_nor_protocol proto) 234 { 235 read->num_mode_clocks = (half >> 5) & 0x07; 236 read->num_wait_states = (half >> 0) & 0x1f; 237 read->opcode = (half >> 8) & 0xff; 238 read->proto = proto; 239 } 240 241 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { 242 /* Fast Read 1-1-2 */ 243 { 244 SNOR_HWCAPS_READ_1_1_2, 245 BFPT_DWORD(1), BIT(16), /* Supported bit */ 246 BFPT_DWORD(4), 0, /* Settings */ 247 SNOR_PROTO_1_1_2, 248 }, 249 250 /* Fast Read 1-2-2 */ 251 { 252 SNOR_HWCAPS_READ_1_2_2, 253 BFPT_DWORD(1), BIT(20), /* Supported bit */ 254 BFPT_DWORD(4), 16, /* Settings */ 255 SNOR_PROTO_1_2_2, 256 }, 257 258 /* Fast Read 2-2-2 */ 259 { 260 SNOR_HWCAPS_READ_2_2_2, 261 BFPT_DWORD(5), BIT(0), /* Supported bit */ 262 BFPT_DWORD(6), 16, /* Settings */ 263 SNOR_PROTO_2_2_2, 264 }, 265 266 /* Fast Read 1-1-4 */ 267 { 268 SNOR_HWCAPS_READ_1_1_4, 269 BFPT_DWORD(1), BIT(22), /* Supported bit */ 270 BFPT_DWORD(3), 16, /* Settings */ 271 SNOR_PROTO_1_1_4, 272 }, 273 274 /* Fast Read 1-4-4 */ 275 { 276 SNOR_HWCAPS_READ_1_4_4, 277 BFPT_DWORD(1), BIT(21), /* Supported bit */ 278 BFPT_DWORD(3), 0, /* Settings */ 279 SNOR_PROTO_1_4_4, 280 }, 281 282 /* Fast Read 4-4-4 */ 283 { 284 SNOR_HWCAPS_READ_4_4_4, 285 BFPT_DWORD(5), BIT(4), /* Supported bit */ 286 BFPT_DWORD(7), 16, /* Settings */ 287 SNOR_PROTO_4_4_4, 288 }, 289 }; 290 291 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { 292 /* Erase Type 1 in DWORD8 bits[15:0] */ 293 {BFPT_DWORD(8), 0}, 294 295 /* Erase Type 2 in DWORD8 bits[31:16] */ 296 {BFPT_DWORD(8), 16}, 297 298 /* Erase Type 3 in DWORD9 bits[15:0] */ 299 {BFPT_DWORD(9), 0}, 300 301 /* Erase Type 4 in DWORD9 bits[31:16] */ 302 {BFPT_DWORD(9), 16}, 303 }; 304 305 /** 306 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT 307 * @erase: pointer to a structure that describes a SPI NOR erase type 308 * @size: the size of the sector/block erased by the erase type 309 * @opcode: the SPI command op code to erase the sector/block 310 * @i: erase type index as sorted in the Basic Flash Parameter Table 311 * 312 * The supported Erase Types will be sorted at init in ascending order, with 313 * the smallest Erase Type size being the first member in the erase_type array 314 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in 315 * the Basic Flash Parameter Table since it will be used later on to 316 * synchronize with the supported Erase Types defined in SFDP optional tables. 317 */ 318 static void 319 spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase, 320 u32 size, u8 opcode, u8 i) 321 { 322 erase->idx = i; 323 spi_nor_set_erase_type(erase, size, opcode); 324 } 325 326 /** 327 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size 328 * @l: member in the left half of the map's erase_type array 329 * @r: member in the right half of the map's erase_type array 330 * 331 * Comparison function used in the sort() call to sort in ascending order the 332 * map's erase types, the smallest erase type size being the first member in the 333 * sorted erase_type array. 334 * 335 * Return: the result of @l->size - @r->size 336 */ 337 static int spi_nor_map_cmp_erase_type(const void *l, const void *r) 338 { 339 const struct spi_nor_erase_type *left = l, *right = r; 340 341 return left->size - right->size; 342 } 343 344 /** 345 * spi_nor_sort_erase_mask() - sort erase mask 346 * @map: the erase map of the SPI NOR 347 * @erase_mask: the erase type mask to be sorted 348 * 349 * Replicate the sort done for the map's erase types in BFPT: sort the erase 350 * mask in ascending order with the smallest erase type size starting from 351 * BIT(0) in the sorted erase mask. 352 * 353 * Return: sorted erase mask. 354 */ 355 static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) 356 { 357 struct spi_nor_erase_type *erase_type = map->erase_type; 358 int i; 359 u8 sorted_erase_mask = 0; 360 361 if (!erase_mask) 362 return 0; 363 364 /* Replicate the sort done for the map's erase types. */ 365 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) 366 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx)) 367 sorted_erase_mask |= BIT(i); 368 369 return sorted_erase_mask; 370 } 371 372 /** 373 * spi_nor_regions_sort_erase_types() - sort erase types in each region 374 * @map: the erase map of the SPI NOR 375 * 376 * Function assumes that the erase types defined in the erase map are already 377 * sorted in ascending order, with the smallest erase type size being the first 378 * member in the erase_type array. It replicates the sort done for the map's 379 * erase types. Each region's erase bitmask will indicate which erase types are 380 * supported from the sorted erase types defined in the erase map. 381 * Sort the all region's erase type at init in order to speed up the process of 382 * finding the best erase command at runtime. 383 */ 384 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) 385 { 386 struct spi_nor_erase_region *region = map->regions; 387 u8 region_erase_mask, sorted_erase_mask; 388 389 while (region) { 390 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 391 392 sorted_erase_mask = spi_nor_sort_erase_mask(map, 393 region_erase_mask); 394 395 /* Overwrite erase mask. */ 396 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | 397 sorted_erase_mask; 398 399 region = spi_nor_region_next(region); 400 } 401 } 402 403 /** 404 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. 405 * @nor: pointer to a 'struct spi_nor' 406 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing 407 * the Basic Flash Parameter Table length and version 408 * @params: pointer to the 'struct spi_nor_flash_parameter' to be 409 * filled 410 * 411 * The Basic Flash Parameter Table is the main and only mandatory table as 412 * defined by the SFDP (JESD216) specification. 413 * It provides us with the total size (memory density) of the data array and 414 * the number of address bytes for Fast Read, Page Program and Sector Erase 415 * commands. 416 * For Fast READ commands, it also gives the number of mode clock cycles and 417 * wait states (regrouped in the number of dummy clock cycles) for each 418 * supported instruction op code. 419 * For Page Program, the page size is now available since JESD216 rev A, however 420 * the supported instruction op codes are still not provided. 421 * For Sector Erase commands, this table stores the supported instruction op 422 * codes and the associated sector sizes. 423 * Finally, the Quad Enable Requirements (QER) are also available since JESD216 424 * rev A. The QER bits encode the manufacturer dependent procedure to be 425 * executed to set the Quad Enable (QE) bit in some internal register of the 426 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before 427 * sending any Quad SPI command to the memory. Actually, setting the QE bit 428 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 429 * and IO3 hence enabling 4 (Quad) I/O lines. 430 * 431 * Return: 0 on success, -errno otherwise. 432 */ 433 static int spi_nor_parse_bfpt(struct spi_nor *nor, 434 const struct sfdp_parameter_header *bfpt_header, 435 struct spi_nor_flash_parameter *params) 436 { 437 struct spi_nor_erase_map *map = ¶ms->erase_map; 438 struct spi_nor_erase_type *erase_type = map->erase_type; 439 struct sfdp_bfpt bfpt; 440 size_t len; 441 int i, cmd, err; 442 u32 addr, val; 443 u16 half; 444 u8 erase_mask; 445 446 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ 447 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) 448 return -EINVAL; 449 450 /* Read the Basic Flash Parameter Table. */ 451 len = min_t(size_t, sizeof(bfpt), 452 bfpt_header->length * sizeof(u32)); 453 addr = SFDP_PARAM_HEADER_PTP(bfpt_header); 454 memset(&bfpt, 0, sizeof(bfpt)); 455 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); 456 if (err < 0) 457 return err; 458 459 /* Fix endianness of the BFPT DWORDs. */ 460 le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX); 461 462 /* Number of address bytes. */ 463 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { 464 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: 465 case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: 466 nor->addr_width = 3; 467 break; 468 469 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: 470 nor->addr_width = 4; 471 break; 472 473 default: 474 break; 475 } 476 477 /* Flash Memory Density (in bits). */ 478 val = bfpt.dwords[BFPT_DWORD(2)]; 479 if (val & BIT(31)) { 480 val &= ~BIT(31); 481 482 /* 483 * Prevent overflows on params->size. Anyway, a NOR of 2^64 484 * bits is unlikely to exist so this error probably means 485 * the BFPT we are reading is corrupted/wrong. 486 */ 487 if (val > 63) 488 return -EINVAL; 489 490 params->size = 1ULL << val; 491 } else { 492 params->size = val + 1; 493 } 494 params->size >>= 3; /* Convert to bytes. */ 495 496 /* Fast Read settings. */ 497 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { 498 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; 499 struct spi_nor_read_command *read; 500 501 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { 502 params->hwcaps.mask &= ~rd->hwcaps; 503 continue; 504 } 505 506 params->hwcaps.mask |= rd->hwcaps; 507 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); 508 read = ¶ms->reads[cmd]; 509 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; 510 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); 511 } 512 513 /* 514 * Sector Erase settings. Reinitialize the uniform erase map using the 515 * Erase Types defined in the bfpt table. 516 */ 517 erase_mask = 0; 518 memset(¶ms->erase_map, 0, sizeof(params->erase_map)); 519 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { 520 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; 521 u32 erasesize; 522 u8 opcode; 523 524 half = bfpt.dwords[er->dword] >> er->shift; 525 erasesize = half & 0xff; 526 527 /* erasesize == 0 means this Erase Type is not supported. */ 528 if (!erasesize) 529 continue; 530 531 erasesize = 1U << erasesize; 532 opcode = (half >> 8) & 0xff; 533 erase_mask |= BIT(i); 534 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize, 535 opcode, i); 536 } 537 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 538 /* 539 * Sort all the map's Erase Types in ascending order with the smallest 540 * erase size being the first member in the erase_type array. 541 */ 542 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]), 543 spi_nor_map_cmp_erase_type, NULL); 544 /* 545 * Sort the erase types in the uniform region in order to update the 546 * uniform_erase_type bitmask. The bitmask will be used later on when 547 * selecting the uniform erase. 548 */ 549 spi_nor_regions_sort_erase_types(map); 550 map->uniform_erase_type = map->uniform_region.offset & 551 SNOR_ERASE_TYPE_MASK; 552 553 /* Stop here if not JESD216 rev A or later. */ 554 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216) 555 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, 556 params); 557 558 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ 559 val = bfpt.dwords[BFPT_DWORD(11)]; 560 val &= BFPT_DWORD11_PAGE_SIZE_MASK; 561 val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; 562 params->page_size = 1U << val; 563 564 /* Quad Enable Requirements. */ 565 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { 566 case BFPT_DWORD15_QER_NONE: 567 params->quad_enable = NULL; 568 break; 569 570 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: 571 /* 572 * Writing only one byte to the Status Register has the 573 * side-effect of clearing Status Register 2. 574 */ 575 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: 576 /* 577 * Read Configuration Register (35h) instruction is not 578 * supported. 579 */ 580 nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR; 581 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 582 break; 583 584 case BFPT_DWORD15_QER_SR1_BIT6: 585 nor->flags &= ~SNOR_F_HAS_16BIT_SR; 586 params->quad_enable = spi_nor_sr1_bit6_quad_enable; 587 break; 588 589 case BFPT_DWORD15_QER_SR2_BIT7: 590 nor->flags &= ~SNOR_F_HAS_16BIT_SR; 591 params->quad_enable = spi_nor_sr2_bit7_quad_enable; 592 break; 593 594 case BFPT_DWORD15_QER_SR2_BIT1: 595 /* 596 * JESD216 rev B or later does not specify if writing only one 597 * byte to the Status Register clears or not the Status 598 * Register 2, so let's be cautious and keep the default 599 * assumption of a 16-bit Write Status (01h) command. 600 */ 601 nor->flags |= SNOR_F_HAS_16BIT_SR; 602 603 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 604 break; 605 606 default: 607 dev_dbg(nor->dev, "BFPT QER reserved value used\n"); 608 break; 609 } 610 611 /* Soft Reset support. */ 612 if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) 613 nor->flags |= SNOR_F_SOFT_RESET; 614 615 /* Stop here if not JESD216 rev C or later. */ 616 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) 617 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, 618 params); 619 /* 8D-8D-8D command extension. */ 620 switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { 621 case BFPT_DWORD18_CMD_EXT_REP: 622 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; 623 break; 624 625 case BFPT_DWORD18_CMD_EXT_INV: 626 nor->cmd_ext_type = SPI_NOR_EXT_INVERT; 627 break; 628 629 case BFPT_DWORD18_CMD_EXT_RES: 630 dev_dbg(nor->dev, "Reserved command extension used\n"); 631 break; 632 633 case BFPT_DWORD18_CMD_EXT_16B: 634 dev_dbg(nor->dev, "16-bit opcodes not supported\n"); 635 return -EOPNOTSUPP; 636 } 637 638 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); 639 } 640 641 /** 642 * spi_nor_smpt_addr_width() - return the address width used in the 643 * configuration detection command. 644 * @nor: pointer to a 'struct spi_nor' 645 * @settings: configuration detection command descriptor, dword1 646 */ 647 static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings) 648 { 649 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) { 650 case SMPT_CMD_ADDRESS_LEN_0: 651 return 0; 652 case SMPT_CMD_ADDRESS_LEN_3: 653 return 3; 654 case SMPT_CMD_ADDRESS_LEN_4: 655 return 4; 656 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT: 657 default: 658 return nor->addr_width; 659 } 660 } 661 662 /** 663 * spi_nor_smpt_read_dummy() - return the configuration detection command read 664 * latency, in clock cycles. 665 * @nor: pointer to a 'struct spi_nor' 666 * @settings: configuration detection command descriptor, dword1 667 * 668 * Return: the number of dummy cycles for an SMPT read 669 */ 670 static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings) 671 { 672 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings); 673 674 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE) 675 return nor->read_dummy; 676 return read_dummy; 677 } 678 679 /** 680 * spi_nor_get_map_in_use() - get the configuration map in use 681 * @nor: pointer to a 'struct spi_nor' 682 * @smpt: pointer to the sector map parameter table 683 * @smpt_len: sector map parameter table length 684 * 685 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise. 686 */ 687 static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, 688 u8 smpt_len) 689 { 690 const u32 *ret; 691 u8 *buf; 692 u32 addr; 693 int err; 694 u8 i; 695 u8 addr_width, read_opcode, read_dummy; 696 u8 read_data_mask, map_id; 697 698 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ 699 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 700 if (!buf) 701 return ERR_PTR(-ENOMEM); 702 703 addr_width = nor->addr_width; 704 read_dummy = nor->read_dummy; 705 read_opcode = nor->read_opcode; 706 707 map_id = 0; 708 /* Determine if there are any optional Detection Command Descriptors */ 709 for (i = 0; i < smpt_len; i += 2) { 710 if (smpt[i] & SMPT_DESC_TYPE_MAP) 711 break; 712 713 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); 714 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); 715 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); 716 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); 717 addr = smpt[i + 1]; 718 719 err = spi_nor_read_raw(nor, addr, 1, buf); 720 if (err) { 721 ret = ERR_PTR(err); 722 goto out; 723 } 724 725 /* 726 * Build an index value that is used to select the Sector Map 727 * Configuration that is currently in use. 728 */ 729 map_id = map_id << 1 | !!(*buf & read_data_mask); 730 } 731 732 /* 733 * If command descriptors are provided, they always precede map 734 * descriptors in the table. There is no need to start the iteration 735 * over smpt array all over again. 736 * 737 * Find the matching configuration map. 738 */ 739 ret = ERR_PTR(-EINVAL); 740 while (i < smpt_len) { 741 if (SMPT_MAP_ID(smpt[i]) == map_id) { 742 ret = smpt + i; 743 break; 744 } 745 746 /* 747 * If there are no more configuration map descriptors and no 748 * configuration ID matched the configuration identifier, the 749 * sector address map is unknown. 750 */ 751 if (smpt[i] & SMPT_DESC_END) 752 break; 753 754 /* increment the table index to the next map */ 755 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; 756 } 757 758 /* fall through */ 759 out: 760 kfree(buf); 761 nor->addr_width = addr_width; 762 nor->read_dummy = read_dummy; 763 nor->read_opcode = read_opcode; 764 return ret; 765 } 766 767 static void spi_nor_region_mark_end(struct spi_nor_erase_region *region) 768 { 769 region->offset |= SNOR_LAST_REGION; 770 } 771 772 static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) 773 { 774 region->offset |= SNOR_OVERLAID_REGION; 775 } 776 777 /** 778 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid 779 * @region: pointer to a structure that describes a SPI NOR erase region 780 * @erase: pointer to a structure that describes a SPI NOR erase type 781 * @erase_type: erase type bitmask 782 */ 783 static void 784 spi_nor_region_check_overlay(struct spi_nor_erase_region *region, 785 const struct spi_nor_erase_type *erase, 786 const u8 erase_type) 787 { 788 int i; 789 790 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 791 if (!(erase_type & BIT(i))) 792 continue; 793 if (region->size & erase[i].size_mask) { 794 spi_nor_region_mark_overlay(region); 795 return; 796 } 797 } 798 } 799 800 /** 801 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map 802 * @nor: pointer to a 'struct spi_nor' 803 * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is 804 * used for storing SFDP parsed data 805 * @smpt: pointer to the sector map parameter table 806 * 807 * Return: 0 on success, -errno otherwise. 808 */ 809 static int 810 spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, 811 struct spi_nor_flash_parameter *params, 812 const u32 *smpt) 813 { 814 struct spi_nor_erase_map *map = ¶ms->erase_map; 815 struct spi_nor_erase_type *erase = map->erase_type; 816 struct spi_nor_erase_region *region; 817 u64 offset; 818 u32 region_count; 819 int i, j; 820 u8 uniform_erase_type, save_uniform_erase_type; 821 u8 erase_type, regions_erase_type; 822 823 region_count = SMPT_MAP_REGION_COUNT(*smpt); 824 /* 825 * The regions will be freed when the driver detaches from the 826 * device. 827 */ 828 region = devm_kcalloc(nor->dev, region_count, sizeof(*region), 829 GFP_KERNEL); 830 if (!region) 831 return -ENOMEM; 832 map->regions = region; 833 834 uniform_erase_type = 0xff; 835 regions_erase_type = 0; 836 offset = 0; 837 /* Populate regions. */ 838 for (i = 0; i < region_count; i++) { 839 j = i + 1; /* index for the region dword */ 840 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]); 841 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]); 842 region[i].offset = offset | erase_type; 843 844 spi_nor_region_check_overlay(®ion[i], erase, erase_type); 845 846 /* 847 * Save the erase types that are supported in all regions and 848 * can erase the entire flash memory. 849 */ 850 uniform_erase_type &= erase_type; 851 852 /* 853 * regions_erase_type mask will indicate all the erase types 854 * supported in this configuration map. 855 */ 856 regions_erase_type |= erase_type; 857 858 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + 859 region[i].size; 860 } 861 862 save_uniform_erase_type = map->uniform_erase_type; 863 map->uniform_erase_type = spi_nor_sort_erase_mask(map, 864 uniform_erase_type); 865 866 if (!regions_erase_type) { 867 /* 868 * Roll back to the previous uniform_erase_type mask, SMPT is 869 * broken. 870 */ 871 map->uniform_erase_type = save_uniform_erase_type; 872 return -EINVAL; 873 } 874 875 /* 876 * BFPT advertises all the erase types supported by all the possible 877 * map configurations. Mask out the erase types that are not supported 878 * by the current map configuration. 879 */ 880 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) 881 if (!(regions_erase_type & BIT(erase[i].idx))) 882 spi_nor_set_erase_type(&erase[i], 0, 0xFF); 883 884 spi_nor_region_mark_end(®ion[i - 1]); 885 886 return 0; 887 } 888 889 /** 890 * spi_nor_parse_smpt() - parse Sector Map Parameter Table 891 * @nor: pointer to a 'struct spi_nor' 892 * @smpt_header: sector map parameter table header 893 * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' 894 * that is used for storing SFDP parsed data 895 * 896 * This table is optional, but when available, we parse it to identify the 897 * location and size of sectors within the main data array of the flash memory 898 * device and to identify which Erase Types are supported by each sector. 899 * 900 * Return: 0 on success, -errno otherwise. 901 */ 902 static int spi_nor_parse_smpt(struct spi_nor *nor, 903 const struct sfdp_parameter_header *smpt_header, 904 struct spi_nor_flash_parameter *params) 905 { 906 const u32 *sector_map; 907 u32 *smpt; 908 size_t len; 909 u32 addr; 910 int ret; 911 912 /* Read the Sector Map Parameter Table. */ 913 len = smpt_header->length * sizeof(*smpt); 914 smpt = kmalloc(len, GFP_KERNEL); 915 if (!smpt) 916 return -ENOMEM; 917 918 addr = SFDP_PARAM_HEADER_PTP(smpt_header); 919 ret = spi_nor_read_sfdp(nor, addr, len, smpt); 920 if (ret) 921 goto out; 922 923 /* Fix endianness of the SMPT DWORDs. */ 924 le32_to_cpu_array(smpt, smpt_header->length); 925 926 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length); 927 if (IS_ERR(sector_map)) { 928 ret = PTR_ERR(sector_map); 929 goto out; 930 } 931 932 ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map); 933 if (ret) 934 goto out; 935 936 spi_nor_regions_sort_erase_types(¶ms->erase_map); 937 /* fall through */ 938 out: 939 kfree(smpt); 940 return ret; 941 } 942 943 /** 944 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table 945 * @nor: pointer to a 'struct spi_nor'. 946 * @param_header: pointer to the 'struct sfdp_parameter_header' describing 947 * the 4-Byte Address Instruction Table length and version. 948 * @params: pointer to the 'struct spi_nor_flash_parameter' to be. 949 * 950 * Return: 0 on success, -errno otherwise. 951 */ 952 static int spi_nor_parse_4bait(struct spi_nor *nor, 953 const struct sfdp_parameter_header *param_header, 954 struct spi_nor_flash_parameter *params) 955 { 956 static const struct sfdp_4bait reads[] = { 957 { SNOR_HWCAPS_READ, BIT(0) }, 958 { SNOR_HWCAPS_READ_FAST, BIT(1) }, 959 { SNOR_HWCAPS_READ_1_1_2, BIT(2) }, 960 { SNOR_HWCAPS_READ_1_2_2, BIT(3) }, 961 { SNOR_HWCAPS_READ_1_1_4, BIT(4) }, 962 { SNOR_HWCAPS_READ_1_4_4, BIT(5) }, 963 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) }, 964 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) }, 965 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) }, 966 }; 967 static const struct sfdp_4bait programs[] = { 968 { SNOR_HWCAPS_PP, BIT(6) }, 969 { SNOR_HWCAPS_PP_1_1_4, BIT(7) }, 970 { SNOR_HWCAPS_PP_1_4_4, BIT(8) }, 971 }; 972 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = { 973 { 0u /* not used */, BIT(9) }, 974 { 0u /* not used */, BIT(10) }, 975 { 0u /* not used */, BIT(11) }, 976 { 0u /* not used */, BIT(12) }, 977 }; 978 struct spi_nor_pp_command *params_pp = params->page_programs; 979 struct spi_nor_erase_map *map = ¶ms->erase_map; 980 struct spi_nor_erase_type *erase_type = map->erase_type; 981 u32 *dwords; 982 size_t len; 983 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask; 984 int i, ret; 985 986 if (param_header->major != SFDP_JESD216_MAJOR || 987 param_header->length < SFDP_4BAIT_DWORD_MAX) 988 return -EINVAL; 989 990 /* Read the 4-byte Address Instruction Table. */ 991 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX; 992 993 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ 994 dwords = kmalloc(len, GFP_KERNEL); 995 if (!dwords) 996 return -ENOMEM; 997 998 addr = SFDP_PARAM_HEADER_PTP(param_header); 999 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1000 if (ret) 1001 goto out; 1002 1003 /* Fix endianness of the 4BAIT DWORDs. */ 1004 le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX); 1005 1006 /* 1007 * Compute the subset of (Fast) Read commands for which the 4-byte 1008 * version is supported. 1009 */ 1010 discard_hwcaps = 0; 1011 read_hwcaps = 0; 1012 for (i = 0; i < ARRAY_SIZE(reads); i++) { 1013 const struct sfdp_4bait *read = &reads[i]; 1014 1015 discard_hwcaps |= read->hwcaps; 1016 if ((params->hwcaps.mask & read->hwcaps) && 1017 (dwords[0] & read->supported_bit)) 1018 read_hwcaps |= read->hwcaps; 1019 } 1020 1021 /* 1022 * Compute the subset of Page Program commands for which the 4-byte 1023 * version is supported. 1024 */ 1025 pp_hwcaps = 0; 1026 for (i = 0; i < ARRAY_SIZE(programs); i++) { 1027 const struct sfdp_4bait *program = &programs[i]; 1028 1029 /* 1030 * The 4 Byte Address Instruction (Optional) Table is the only 1031 * SFDP table that indicates support for Page Program Commands. 1032 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest 1033 * authority for specifying Page Program support. 1034 */ 1035 discard_hwcaps |= program->hwcaps; 1036 if (dwords[0] & program->supported_bit) 1037 pp_hwcaps |= program->hwcaps; 1038 } 1039 1040 /* 1041 * Compute the subset of Sector Erase commands for which the 4-byte 1042 * version is supported. 1043 */ 1044 erase_mask = 0; 1045 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1046 const struct sfdp_4bait *erase = &erases[i]; 1047 1048 if (dwords[0] & erase->supported_bit) 1049 erase_mask |= BIT(i); 1050 } 1051 1052 /* Replicate the sort done for the map's erase types in BFPT. */ 1053 erase_mask = spi_nor_sort_erase_mask(map, erase_mask); 1054 1055 /* 1056 * We need at least one 4-byte op code per read, program and erase 1057 * operation; the .read(), .write() and .erase() hooks share the 1058 * nor->addr_width value. 1059 */ 1060 if (!read_hwcaps || !pp_hwcaps || !erase_mask) 1061 goto out; 1062 1063 /* 1064 * Discard all operations from the 4-byte instruction set which are 1065 * not supported by this memory. 1066 */ 1067 params->hwcaps.mask &= ~discard_hwcaps; 1068 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps); 1069 1070 /* Use the 4-byte address instruction set. */ 1071 for (i = 0; i < SNOR_CMD_READ_MAX; i++) { 1072 struct spi_nor_read_command *read_cmd = ¶ms->reads[i]; 1073 1074 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode); 1075 } 1076 1077 /* 4BAIT is the only SFDP table that indicates page program support. */ 1078 if (pp_hwcaps & SNOR_HWCAPS_PP) { 1079 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP], 1080 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); 1081 /* 1082 * Since xSPI Page Program opcode is backward compatible with 1083 * Legacy SPI, use Legacy SPI opcode there as well. 1084 */ 1085 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR], 1086 SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); 1087 } 1088 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4) 1089 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4], 1090 SPINOR_OP_PP_1_1_4_4B, 1091 SNOR_PROTO_1_1_4); 1092 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4) 1093 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4], 1094 SPINOR_OP_PP_1_4_4_4B, 1095 SNOR_PROTO_1_4_4); 1096 1097 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1098 if (erase_mask & BIT(i)) 1099 erase_type[i].opcode = (dwords[1] >> 1100 erase_type[i].idx * 8) & 0xFF; 1101 else 1102 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF); 1103 } 1104 1105 /* 1106 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes() 1107 * later because we already did the conversion to 4byte opcodes. Also, 1108 * this latest function implements a legacy quirk for the erase size of 1109 * Spansion memory. However this quirk is no longer needed with new 1110 * SFDP compliant memories. 1111 */ 1112 nor->addr_width = 4; 1113 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT; 1114 1115 /* fall through */ 1116 out: 1117 kfree(dwords); 1118 return ret; 1119 } 1120 1121 #define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29) 1122 #define PROFILE1_DWORD1_RDSR_DUMMY BIT(28) 1123 #define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8) 1124 #define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7) 1125 #define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27) 1126 #define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17) 1127 #define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7) 1128 1129 /** 1130 * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table 1131 * @nor: pointer to a 'struct spi_nor' 1132 * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing 1133 * the Profile 1.0 Table length and version. 1134 * @params: pointer to the 'struct spi_nor_flash_parameter' to be. 1135 * 1136 * Return: 0 on success, -errno otherwise. 1137 */ 1138 static int spi_nor_parse_profile1(struct spi_nor *nor, 1139 const struct sfdp_parameter_header *profile1_header, 1140 struct spi_nor_flash_parameter *params) 1141 { 1142 u32 *dwords, addr; 1143 size_t len; 1144 int ret; 1145 u8 dummy, opcode; 1146 1147 len = profile1_header->length * sizeof(*dwords); 1148 dwords = kmalloc(len, GFP_KERNEL); 1149 if (!dwords) 1150 return -ENOMEM; 1151 1152 addr = SFDP_PARAM_HEADER_PTP(profile1_header); 1153 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1154 if (ret) 1155 goto out; 1156 1157 le32_to_cpu_array(dwords, profile1_header->length); 1158 1159 /* Get 8D-8D-8D fast read opcode and dummy cycles. */ 1160 opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]); 1161 1162 /* Set the Read Status Register dummy cycles and dummy address bytes. */ 1163 if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY) 1164 params->rdsr_dummy = 8; 1165 else 1166 params->rdsr_dummy = 4; 1167 1168 if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) 1169 params->rdsr_addr_nbytes = 4; 1170 else 1171 params->rdsr_addr_nbytes = 0; 1172 1173 /* 1174 * We don't know what speed the controller is running at. Find the 1175 * dummy cycles for the fastest frequency the flash can run at to be 1176 * sure we are never short of dummy cycles. A value of 0 means the 1177 * frequency is not supported. 1178 * 1179 * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let 1180 * flashes set the correct value if needed in their fixup hooks. 1181 */ 1182 dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]); 1183 if (!dummy) 1184 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]); 1185 if (!dummy) 1186 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]); 1187 if (!dummy) 1188 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]); 1189 if (!dummy) 1190 dev_dbg(nor->dev, 1191 "Can't find dummy cycles from Profile 1.0 table\n"); 1192 1193 /* Round up to an even value to avoid tripping controllers up. */ 1194 dummy = round_up(dummy, 2); 1195 1196 /* Update the fast read settings. */ 1197 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 1198 0, dummy, opcode, 1199 SNOR_PROTO_8_8_8_DTR); 1200 1201 out: 1202 kfree(dwords); 1203 return ret; 1204 } 1205 1206 #define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31) 1207 1208 /** 1209 * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register 1210 * Map. 1211 * @nor: pointer to a 'struct spi_nor' 1212 * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing 1213 * the SCCR Map table length and version. 1214 * @params: pointer to the 'struct spi_nor_flash_parameter' to be. 1215 * 1216 * Return: 0 on success, -errno otherwise. 1217 */ 1218 static int spi_nor_parse_sccr(struct spi_nor *nor, 1219 const struct sfdp_parameter_header *sccr_header, 1220 struct spi_nor_flash_parameter *params) 1221 { 1222 u32 *dwords, addr; 1223 size_t len; 1224 int ret; 1225 1226 len = sccr_header->length * sizeof(*dwords); 1227 dwords = kmalloc(len, GFP_KERNEL); 1228 if (!dwords) 1229 return -ENOMEM; 1230 1231 addr = SFDP_PARAM_HEADER_PTP(sccr_header); 1232 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1233 if (ret) 1234 goto out; 1235 1236 le32_to_cpu_array(dwords, sccr_header->length); 1237 1238 if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22])) 1239 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 1240 1241 out: 1242 kfree(dwords); 1243 return ret; 1244 } 1245 1246 /** 1247 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. 1248 * @nor: pointer to a 'struct spi_nor' 1249 * @params: pointer to the 'struct spi_nor_flash_parameter' to be 1250 * filled 1251 * 1252 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 1253 * specification. This is a standard which tends to supported by almost all 1254 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at 1255 * runtime the main parameters needed to perform basic SPI flash operations such 1256 * as Fast Read, Page Program or Sector Erase commands. 1257 * 1258 * Return: 0 on success, -errno otherwise. 1259 */ 1260 int spi_nor_parse_sfdp(struct spi_nor *nor, 1261 struct spi_nor_flash_parameter *params) 1262 { 1263 const struct sfdp_parameter_header *param_header, *bfpt_header; 1264 struct sfdp_parameter_header *param_headers = NULL; 1265 struct sfdp_header header; 1266 struct device *dev = nor->dev; 1267 size_t psize; 1268 int i, err; 1269 1270 /* Get the SFDP header. */ 1271 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); 1272 if (err < 0) 1273 return err; 1274 1275 /* Check the SFDP header version. */ 1276 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || 1277 header.major != SFDP_JESD216_MAJOR) 1278 return -EINVAL; 1279 1280 /* 1281 * Verify that the first and only mandatory parameter header is a 1282 * Basic Flash Parameter Table header as specified in JESD216. 1283 */ 1284 bfpt_header = &header.bfpt_header; 1285 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || 1286 bfpt_header->major != SFDP_JESD216_MAJOR) 1287 return -EINVAL; 1288 1289 /* 1290 * Allocate memory then read all parameter headers with a single 1291 * Read SFDP command. These parameter headers will actually be parsed 1292 * twice: a first time to get the latest revision of the basic flash 1293 * parameter table, then a second time to handle the supported optional 1294 * tables. 1295 * Hence we read the parameter headers once for all to reduce the 1296 * processing time. Also we use kmalloc() instead of devm_kmalloc() 1297 * because we don't need to keep these parameter headers: the allocated 1298 * memory is always released with kfree() before exiting this function. 1299 */ 1300 if (header.nph) { 1301 psize = header.nph * sizeof(*param_headers); 1302 1303 param_headers = kmalloc(psize, GFP_KERNEL); 1304 if (!param_headers) 1305 return -ENOMEM; 1306 1307 err = spi_nor_read_sfdp(nor, sizeof(header), 1308 psize, param_headers); 1309 if (err < 0) { 1310 dev_dbg(dev, "failed to read SFDP parameter headers\n"); 1311 goto exit; 1312 } 1313 } 1314 1315 /* 1316 * Check other parameter headers to get the latest revision of 1317 * the basic flash parameter table. 1318 */ 1319 for (i = 0; i < header.nph; i++) { 1320 param_header = ¶m_headers[i]; 1321 1322 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && 1323 param_header->major == SFDP_JESD216_MAJOR && 1324 (param_header->minor > bfpt_header->minor || 1325 (param_header->minor == bfpt_header->minor && 1326 param_header->length > bfpt_header->length))) 1327 bfpt_header = param_header; 1328 } 1329 1330 err = spi_nor_parse_bfpt(nor, bfpt_header, params); 1331 if (err) 1332 goto exit; 1333 1334 /* Parse optional parameter tables. */ 1335 for (i = 0; i < header.nph; i++) { 1336 param_header = ¶m_headers[i]; 1337 1338 switch (SFDP_PARAM_HEADER_ID(param_header)) { 1339 case SFDP_SECTOR_MAP_ID: 1340 err = spi_nor_parse_smpt(nor, param_header, params); 1341 break; 1342 1343 case SFDP_4BAIT_ID: 1344 err = spi_nor_parse_4bait(nor, param_header, params); 1345 break; 1346 1347 case SFDP_PROFILE1_ID: 1348 err = spi_nor_parse_profile1(nor, param_header, params); 1349 break; 1350 1351 case SFDP_SCCR_MAP_ID: 1352 err = spi_nor_parse_sccr(nor, param_header, params); 1353 break; 1354 1355 default: 1356 break; 1357 } 1358 1359 if (err) { 1360 dev_warn(dev, "Failed to parse optional parameter table: %04x\n", 1361 SFDP_PARAM_HEADER_ID(param_header)); 1362 /* 1363 * Let's not drop all information we extracted so far 1364 * if optional table parsers fail. In case of failing, 1365 * each optional parser is responsible to roll back to 1366 * the previously known spi_nor data. 1367 */ 1368 err = 0; 1369 } 1370 } 1371 1372 exit: 1373 kfree(param_headers); 1374 return err; 1375 } 1376