1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <asm/io.h> 28 #include <asm/byteorder.h> 29 30 #include <linux/errno.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/interrupt.h> 34 #include <linux/reboot.h> 35 #include <linux/of.h> 36 #include <linux/of_platform.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_RETRIES 3 46 47 #define SST49LF004B 0x0060 48 #define SST49LF040B 0x0050 49 #define SST49LF008A 0x005a 50 #define AT49BV6416 0x00d6 51 52 /* 53 * Status Register bit description. Used by flash devices that don't 54 * support DQ polling (e.g. HyperFlash) 55 */ 56 #define CFI_SR_DRB BIT(7) 57 #define CFI_SR_ESB BIT(5) 58 #define CFI_SR_PSB BIT(4) 59 #define CFI_SR_WBASB BIT(3) 60 #define CFI_SR_SLSB BIT(1) 61 62 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 63 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 64 #if !FORCE_WORD_WRITE 65 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 66 #endif 67 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 68 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 69 static void cfi_amdstd_sync (struct mtd_info *); 70 static int cfi_amdstd_suspend (struct mtd_info *); 71 static void cfi_amdstd_resume (struct mtd_info *); 72 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 73 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 74 size_t *, struct otp_info *); 75 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 76 size_t *, struct otp_info *); 77 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 78 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 79 size_t *, u_char *); 80 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 81 size_t *, u_char *); 82 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 83 size_t *, u_char *); 84 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 85 86 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 87 size_t *retlen, const u_char *buf); 88 89 static void cfi_amdstd_destroy(struct mtd_info *); 90 91 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 92 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 93 94 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 95 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 96 #include "fwh_lock.h" 97 98 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 99 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 100 101 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 102 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 103 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 104 105 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 106 .probe = NULL, /* Not usable directly */ 107 .destroy = cfi_amdstd_destroy, 108 .name = "cfi_cmdset_0002", 109 .module = THIS_MODULE 110 }; 111 112 /* 113 * Use status register to poll for Erase/write completion when DQ is not 114 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in 115 * CFI Primary Vendor-Specific Extended Query table 1.5 116 */ 117 static int cfi_use_status_reg(struct cfi_private *cfi) 118 { 119 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 120 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; 121 122 return extp->MinorVersion >= '5' && 123 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; 124 } 125 126 static int cfi_check_err_status(struct map_info *map, struct flchip *chip, 127 unsigned long adr) 128 { 129 struct cfi_private *cfi = map->fldrv_priv; 130 map_word status; 131 132 if (!cfi_use_status_reg(cfi)) 133 return 0; 134 135 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 136 cfi->device_type, NULL); 137 status = map_read(map, adr); 138 139 /* The error bits are invalid while the chip's busy */ 140 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) 141 return 0; 142 143 if (map_word_bitsset(map, status, CMD(0x3a))) { 144 unsigned long chipstatus = MERGESTATUS(status); 145 146 if (chipstatus & CFI_SR_ESB) 147 pr_err("%s erase operation failed, status %lx\n", 148 map->name, chipstatus); 149 if (chipstatus & CFI_SR_PSB) 150 pr_err("%s program operation failed, status %lx\n", 151 map->name, chipstatus); 152 if (chipstatus & CFI_SR_WBASB) 153 pr_err("%s buffer program command aborted, status %lx\n", 154 map->name, chipstatus); 155 if (chipstatus & CFI_SR_SLSB) 156 pr_err("%s sector write protected, status %lx\n", 157 map->name, chipstatus); 158 159 /* Erase/Program status bits are set on the operation failure */ 160 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) 161 return 1; 162 } 163 return 0; 164 } 165 166 /* #define DEBUG_CFI_FEATURES */ 167 168 169 #ifdef DEBUG_CFI_FEATURES 170 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 171 { 172 const char* erase_suspend[3] = { 173 "Not supported", "Read only", "Read/write" 174 }; 175 const char* top_bottom[6] = { 176 "No WP", "8x8KiB sectors at top & bottom, no WP", 177 "Bottom boot", "Top boot", 178 "Uniform, Bottom WP", "Uniform, Top WP" 179 }; 180 181 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 182 printk(" Address sensitive unlock: %s\n", 183 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 184 185 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 186 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 187 else 188 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 189 190 if (extp->BlkProt == 0) 191 printk(" Block protection: Not supported\n"); 192 else 193 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 194 195 196 printk(" Temporary block unprotect: %s\n", 197 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 198 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 199 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 200 printk(" Burst mode: %s\n", 201 extp->BurstMode ? "Supported" : "Not supported"); 202 if (extp->PageMode == 0) 203 printk(" Page mode: Not supported\n"); 204 else 205 printk(" Page mode: %d word page\n", extp->PageMode << 2); 206 207 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 208 extp->VppMin >> 4, extp->VppMin & 0xf); 209 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 210 extp->VppMax >> 4, extp->VppMax & 0xf); 211 212 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 213 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 214 else 215 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 216 } 217 #endif 218 219 #ifdef AMD_BOOTLOC_BUG 220 /* Wheee. Bring me the head of someone at AMD. */ 221 static void fixup_amd_bootblock(struct mtd_info *mtd) 222 { 223 struct map_info *map = mtd->priv; 224 struct cfi_private *cfi = map->fldrv_priv; 225 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 226 __u8 major = extp->MajorVersion; 227 __u8 minor = extp->MinorVersion; 228 229 if (((major << 8) | minor) < 0x3131) { 230 /* CFI version 1.0 => don't trust bootloc */ 231 232 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 233 map->name, cfi->mfr, cfi->id); 234 235 /* AFAICS all 29LV400 with a bottom boot block have a device ID 236 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 237 * These were badly detected as they have the 0x80 bit set 238 * so treat them as a special case. 239 */ 240 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 241 242 /* Macronix added CFI to their 2nd generation 243 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 244 * Fujitsu, Spansion, EON, ESI and older Macronix) 245 * has CFI. 246 * 247 * Therefore also check the manufacturer. 248 * This reduces the risk of false detection due to 249 * the 8-bit device ID. 250 */ 251 (cfi->mfr == CFI_MFR_MACRONIX)) { 252 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 253 " detected\n", map->name); 254 extp->TopBottom = 2; /* bottom boot */ 255 } else 256 if (cfi->id & 0x80) { 257 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 258 extp->TopBottom = 3; /* top boot */ 259 } else { 260 extp->TopBottom = 2; /* bottom boot */ 261 } 262 263 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 264 " deduced %s from Device ID\n", map->name, major, minor, 265 extp->TopBottom == 2 ? "bottom" : "top"); 266 } 267 } 268 #endif 269 270 #if !FORCE_WORD_WRITE 271 static void fixup_use_write_buffers(struct mtd_info *mtd) 272 { 273 struct map_info *map = mtd->priv; 274 struct cfi_private *cfi = map->fldrv_priv; 275 if (cfi->cfiq->BufWriteTimeoutTyp) { 276 pr_debug("Using buffer write method\n"); 277 mtd->_write = cfi_amdstd_write_buffers; 278 } 279 } 280 #endif /* !FORCE_WORD_WRITE */ 281 282 /* Atmel chips don't use the same PRI format as AMD chips */ 283 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 284 { 285 struct map_info *map = mtd->priv; 286 struct cfi_private *cfi = map->fldrv_priv; 287 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 288 struct cfi_pri_atmel atmel_pri; 289 290 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 291 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 292 293 if (atmel_pri.Features & 0x02) 294 extp->EraseSuspend = 2; 295 296 /* Some chips got it backwards... */ 297 if (cfi->id == AT49BV6416) { 298 if (atmel_pri.BottomBoot) 299 extp->TopBottom = 3; 300 else 301 extp->TopBottom = 2; 302 } else { 303 if (atmel_pri.BottomBoot) 304 extp->TopBottom = 2; 305 else 306 extp->TopBottom = 3; 307 } 308 309 /* burst write mode not supported */ 310 cfi->cfiq->BufWriteTimeoutTyp = 0; 311 cfi->cfiq->BufWriteTimeoutMax = 0; 312 } 313 314 static void fixup_use_secsi(struct mtd_info *mtd) 315 { 316 /* Setup for chips with a secsi area */ 317 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 318 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 319 } 320 321 static void fixup_use_erase_chip(struct mtd_info *mtd) 322 { 323 struct map_info *map = mtd->priv; 324 struct cfi_private *cfi = map->fldrv_priv; 325 if ((cfi->cfiq->NumEraseRegions == 1) && 326 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 327 mtd->_erase = cfi_amdstd_erase_chip; 328 } 329 330 } 331 332 /* 333 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 334 * locked by default. 335 */ 336 static void fixup_use_atmel_lock(struct mtd_info *mtd) 337 { 338 mtd->_lock = cfi_atmel_lock; 339 mtd->_unlock = cfi_atmel_unlock; 340 mtd->flags |= MTD_POWERUP_LOCK; 341 } 342 343 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 344 { 345 struct map_info *map = mtd->priv; 346 struct cfi_private *cfi = map->fldrv_priv; 347 348 /* 349 * These flashes report two separate eraseblock regions based on the 350 * sector_erase-size and block_erase-size, although they both operate on the 351 * same memory. This is not allowed according to CFI, so we just pick the 352 * sector_erase-size. 353 */ 354 cfi->cfiq->NumEraseRegions = 1; 355 } 356 357 static void fixup_sst39vf(struct mtd_info *mtd) 358 { 359 struct map_info *map = mtd->priv; 360 struct cfi_private *cfi = map->fldrv_priv; 361 362 fixup_old_sst_eraseregion(mtd); 363 364 cfi->addr_unlock1 = 0x5555; 365 cfi->addr_unlock2 = 0x2AAA; 366 } 367 368 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 369 { 370 struct map_info *map = mtd->priv; 371 struct cfi_private *cfi = map->fldrv_priv; 372 373 fixup_old_sst_eraseregion(mtd); 374 375 cfi->addr_unlock1 = 0x555; 376 cfi->addr_unlock2 = 0x2AA; 377 378 cfi->sector_erase_cmd = CMD(0x50); 379 } 380 381 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 382 { 383 struct map_info *map = mtd->priv; 384 struct cfi_private *cfi = map->fldrv_priv; 385 386 fixup_sst39vf_rev_b(mtd); 387 388 /* 389 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 390 * it should report a size of 8KBytes (0x0020*256). 391 */ 392 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 393 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", 394 mtd->name); 395 } 396 397 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 398 { 399 struct map_info *map = mtd->priv; 400 struct cfi_private *cfi = map->fldrv_priv; 401 402 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 403 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 404 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", 405 mtd->name); 406 } 407 } 408 409 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 410 { 411 struct map_info *map = mtd->priv; 412 struct cfi_private *cfi = map->fldrv_priv; 413 414 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 415 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 416 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", 417 mtd->name); 418 } 419 } 420 421 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 422 { 423 struct map_info *map = mtd->priv; 424 struct cfi_private *cfi = map->fldrv_priv; 425 426 /* 427 * S29NS512P flash uses more than 8bits to report number of sectors, 428 * which is not permitted by CFI. 429 */ 430 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 431 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", 432 mtd->name); 433 } 434 435 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 436 static struct cfi_fixup cfi_nopri_fixup_table[] = { 437 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 438 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 439 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 440 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 441 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 442 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 443 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 444 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 445 { 0, 0, NULL } 446 }; 447 448 static struct cfi_fixup cfi_fixup_table[] = { 449 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 450 #ifdef AMD_BOOTLOC_BUG 451 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 452 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 453 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 454 #endif 455 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 456 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 457 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 458 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 459 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 460 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 461 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 462 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 463 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 464 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 465 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 466 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 467 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 468 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 469 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 470 #if !FORCE_WORD_WRITE 471 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 472 #endif 473 { 0, 0, NULL } 474 }; 475 static struct cfi_fixup jedec_fixup_table[] = { 476 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 477 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 478 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 479 { 0, 0, NULL } 480 }; 481 482 static struct cfi_fixup fixup_table[] = { 483 /* The CFI vendor ids and the JEDEC vendor IDs appear 484 * to be common. It is like the devices id's are as 485 * well. This table is to pick all cases where 486 * we know that is the case. 487 */ 488 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 489 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 490 { 0, 0, NULL } 491 }; 492 493 494 static void cfi_fixup_major_minor(struct cfi_private *cfi, 495 struct cfi_pri_amdstd *extp) 496 { 497 if (cfi->mfr == CFI_MFR_SAMSUNG) { 498 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 499 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 500 /* 501 * Samsung K8P2815UQB and K8D6x16UxM chips 502 * report major=0 / minor=0. 503 * K8D3x16UxC chips report major=3 / minor=3. 504 */ 505 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 506 " Extended Query version to 1.%c\n", 507 extp->MinorVersion); 508 extp->MajorVersion = '1'; 509 } 510 } 511 512 /* 513 * SST 38VF640x chips report major=0xFF / minor=0xFF. 514 */ 515 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 516 extp->MajorVersion = '1'; 517 extp->MinorVersion = '0'; 518 } 519 } 520 521 static int is_m29ew(struct cfi_private *cfi) 522 { 523 if (cfi->mfr == CFI_MFR_INTEL && 524 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 525 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 526 return 1; 527 return 0; 528 } 529 530 /* 531 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 532 * Some revisions of the M29EW suffer from erase suspend hang ups. In 533 * particular, it can occur when the sequence 534 * Erase Confirm -> Suspend -> Program -> Resume 535 * causes a lockup due to internal timing issues. The consequence is that the 536 * erase cannot be resumed without inserting a dummy command after programming 537 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 538 * that writes an F0 command code before the RESUME command. 539 */ 540 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 541 unsigned long adr) 542 { 543 struct cfi_private *cfi = map->fldrv_priv; 544 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 545 if (is_m29ew(cfi)) 546 map_write(map, CMD(0xF0), adr); 547 } 548 549 /* 550 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 551 * 552 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 553 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 554 * command is issued after an ERASE RESUME operation without waiting for a 555 * minimum delay. The result is that once the ERASE seems to be completed 556 * (no bits are toggling), the contents of the Flash memory block on which 557 * the erase was ongoing could be inconsistent with the expected values 558 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 559 * values), causing a consequent failure of the ERASE operation. 560 * The occurrence of this issue could be high, especially when file system 561 * operations on the Flash are intensive. As a result, it is recommended 562 * that a patch be applied. Intensive file system operations can cause many 563 * calls to the garbage routine to free Flash space (also by erasing physical 564 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 565 * commands can occur. The problem disappears when a delay is inserted after 566 * the RESUME command by using the udelay() function available in Linux. 567 * The DELAY value must be tuned based on the customer's platform. 568 * The maximum value that fixes the problem in all cases is 500us. 569 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 570 * in most cases. 571 * We have chosen 500µs because this latency is acceptable. 572 */ 573 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 574 { 575 /* 576 * Resolving the Delay After Resume Issue see Micron TN-13-07 577 * Worst case delay must be 500µs but 30-50µs should be ok as well 578 */ 579 if (is_m29ew(cfi)) 580 cfi_udelay(500); 581 } 582 583 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 584 { 585 struct cfi_private *cfi = map->fldrv_priv; 586 struct device_node __maybe_unused *np = map->device_node; 587 struct mtd_info *mtd; 588 int i; 589 590 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 591 if (!mtd) 592 return NULL; 593 mtd->priv = map; 594 mtd->type = MTD_NORFLASH; 595 596 /* Fill in the default mtd operations */ 597 mtd->_erase = cfi_amdstd_erase_varsize; 598 mtd->_write = cfi_amdstd_write_words; 599 mtd->_read = cfi_amdstd_read; 600 mtd->_sync = cfi_amdstd_sync; 601 mtd->_suspend = cfi_amdstd_suspend; 602 mtd->_resume = cfi_amdstd_resume; 603 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 604 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 605 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 606 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 607 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 608 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 609 mtd->flags = MTD_CAP_NORFLASH; 610 mtd->name = map->name; 611 mtd->writesize = 1; 612 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 613 614 pr_debug("MTD %s(): write buffer size %d\n", __func__, 615 mtd->writebufsize); 616 617 mtd->_panic_write = cfi_amdstd_panic_write; 618 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 619 620 if (cfi->cfi_mode==CFI_MODE_CFI){ 621 unsigned char bootloc; 622 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 623 struct cfi_pri_amdstd *extp; 624 625 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 626 if (extp) { 627 /* 628 * It's a real CFI chip, not one for which the probe 629 * routine faked a CFI structure. 630 */ 631 cfi_fixup_major_minor(cfi, extp); 632 633 /* 634 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 635 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 636 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 637 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 638 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 639 */ 640 if (extp->MajorVersion != '1' || 641 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 642 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 643 "version %c.%c (%#02x/%#02x).\n", 644 extp->MajorVersion, extp->MinorVersion, 645 extp->MajorVersion, extp->MinorVersion); 646 kfree(extp); 647 kfree(mtd); 648 return NULL; 649 } 650 651 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 652 extp->MajorVersion, extp->MinorVersion); 653 654 /* Install our own private info structure */ 655 cfi->cmdset_priv = extp; 656 657 /* Apply cfi device specific fixups */ 658 cfi_fixup(mtd, cfi_fixup_table); 659 660 #ifdef DEBUG_CFI_FEATURES 661 /* Tell the user about it in lots of lovely detail */ 662 cfi_tell_features(extp); 663 #endif 664 665 #ifdef CONFIG_OF 666 if (np && of_property_read_bool( 667 np, "use-advanced-sector-protection") 668 && extp->BlkProtUnprot == 8) { 669 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 670 mtd->_lock = cfi_ppb_lock; 671 mtd->_unlock = cfi_ppb_unlock; 672 mtd->_is_locked = cfi_ppb_is_locked; 673 } 674 #endif 675 676 bootloc = extp->TopBottom; 677 if ((bootloc < 2) || (bootloc > 5)) { 678 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 679 "bank location (%d). Assuming bottom.\n", 680 map->name, bootloc); 681 bootloc = 2; 682 } 683 684 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 685 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 686 687 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 688 int j = (cfi->cfiq->NumEraseRegions-1)-i; 689 690 swap(cfi->cfiq->EraseRegionInfo[i], 691 cfi->cfiq->EraseRegionInfo[j]); 692 } 693 } 694 /* Set the default CFI lock/unlock addresses */ 695 cfi->addr_unlock1 = 0x555; 696 cfi->addr_unlock2 = 0x2aa; 697 } 698 cfi_fixup(mtd, cfi_nopri_fixup_table); 699 700 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 701 kfree(mtd); 702 return NULL; 703 } 704 705 } /* CFI mode */ 706 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 707 /* Apply jedec specific fixups */ 708 cfi_fixup(mtd, jedec_fixup_table); 709 } 710 /* Apply generic fixups */ 711 cfi_fixup(mtd, fixup_table); 712 713 for (i=0; i< cfi->numchips; i++) { 714 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 715 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 716 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 717 /* 718 * First calculate the timeout max according to timeout field 719 * of struct cfi_ident that probed from chip's CFI aera, if 720 * available. Specify a minimum of 2000us, in case the CFI data 721 * is wrong. 722 */ 723 if (cfi->cfiq->BufWriteTimeoutTyp && 724 cfi->cfiq->BufWriteTimeoutMax) 725 cfi->chips[i].buffer_write_time_max = 726 1 << (cfi->cfiq->BufWriteTimeoutTyp + 727 cfi->cfiq->BufWriteTimeoutMax); 728 else 729 cfi->chips[i].buffer_write_time_max = 0; 730 731 cfi->chips[i].buffer_write_time_max = 732 max(cfi->chips[i].buffer_write_time_max, 2000); 733 734 cfi->chips[i].ref_point_counter = 0; 735 init_waitqueue_head(&(cfi->chips[i].wq)); 736 } 737 738 map->fldrv = &cfi_amdstd_chipdrv; 739 740 return cfi_amdstd_setup(mtd); 741 } 742 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 743 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 744 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 745 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 746 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 747 748 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 749 { 750 struct map_info *map = mtd->priv; 751 struct cfi_private *cfi = map->fldrv_priv; 752 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 753 unsigned long offset = 0; 754 int i,j; 755 756 printk(KERN_NOTICE "number of %s chips: %d\n", 757 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 758 /* Select the correct geometry setup */ 759 mtd->size = devsize * cfi->numchips; 760 761 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 762 mtd->eraseregions = kmalloc_array(mtd->numeraseregions, 763 sizeof(struct mtd_erase_region_info), 764 GFP_KERNEL); 765 if (!mtd->eraseregions) 766 goto setup_err; 767 768 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 769 unsigned long ernum, ersize; 770 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 771 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 772 773 if (mtd->erasesize < ersize) { 774 mtd->erasesize = ersize; 775 } 776 for (j=0; j<cfi->numchips; j++) { 777 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 778 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 779 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 780 } 781 offset += (ersize * ernum); 782 } 783 if (offset != devsize) { 784 /* Argh */ 785 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 786 goto setup_err; 787 } 788 789 __module_get(THIS_MODULE); 790 register_reboot_notifier(&mtd->reboot_notifier); 791 return mtd; 792 793 setup_err: 794 kfree(mtd->eraseregions); 795 kfree(mtd); 796 kfree(cfi->cmdset_priv); 797 return NULL; 798 } 799 800 /* 801 * Return true if the chip is ready. 802 * 803 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 804 * non-suspended sector) and is indicated by no toggle bits toggling. 805 * 806 * Note that anything more complicated than checking if no bits are toggling 807 * (including checking DQ5 for an error status) is tricky to get working 808 * correctly and is therefore not done (particularly with interleaved chips 809 * as each chip must be checked independently of the others). 810 */ 811 static int __xipram chip_ready(struct map_info *map, struct flchip *chip, 812 unsigned long addr) 813 { 814 struct cfi_private *cfi = map->fldrv_priv; 815 map_word d, t; 816 817 if (cfi_use_status_reg(cfi)) { 818 map_word ready = CMD(CFI_SR_DRB); 819 /* 820 * For chips that support status register, check device 821 * ready bit 822 */ 823 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 824 cfi->device_type, NULL); 825 d = map_read(map, addr); 826 827 return map_word_andequal(map, d, ready, ready); 828 } 829 830 d = map_read(map, addr); 831 t = map_read(map, addr); 832 833 return map_word_equal(map, d, t); 834 } 835 836 /* 837 * Return true if the chip is ready and has the correct value. 838 * 839 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 840 * non-suspended sector) and it is indicated by no bits toggling. 841 * 842 * Error are indicated by toggling bits or bits held with the wrong value, 843 * or with bits toggling. 844 * 845 * Note that anything more complicated than checking if no bits are toggling 846 * (including checking DQ5 for an error status) is tricky to get working 847 * correctly and is therefore not done (particularly with interleaved chips 848 * as each chip must be checked independently of the others). 849 * 850 */ 851 static int __xipram chip_good(struct map_info *map, struct flchip *chip, 852 unsigned long addr, map_word expected) 853 { 854 struct cfi_private *cfi = map->fldrv_priv; 855 map_word oldd, curd; 856 857 if (cfi_use_status_reg(cfi)) { 858 map_word ready = CMD(CFI_SR_DRB); 859 860 /* 861 * For chips that support status register, check device 862 * ready bit 863 */ 864 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 865 cfi->device_type, NULL); 866 curd = map_read(map, addr); 867 868 return map_word_andequal(map, curd, ready, ready); 869 } 870 871 oldd = map_read(map, addr); 872 curd = map_read(map, addr); 873 874 return map_word_equal(map, oldd, curd) && 875 map_word_equal(map, curd, expected); 876 } 877 878 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 879 { 880 DECLARE_WAITQUEUE(wait, current); 881 struct cfi_private *cfi = map->fldrv_priv; 882 unsigned long timeo; 883 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 884 885 resettime: 886 timeo = jiffies + HZ; 887 retry: 888 switch (chip->state) { 889 890 case FL_STATUS: 891 for (;;) { 892 if (chip_ready(map, chip, adr)) 893 break; 894 895 if (time_after(jiffies, timeo)) { 896 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 897 return -EIO; 898 } 899 mutex_unlock(&chip->mutex); 900 cfi_udelay(1); 901 mutex_lock(&chip->mutex); 902 /* Someone else might have been playing with it. */ 903 goto retry; 904 } 905 906 case FL_READY: 907 case FL_CFI_QUERY: 908 case FL_JEDEC_QUERY: 909 return 0; 910 911 case FL_ERASING: 912 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 913 !(mode == FL_READY || mode == FL_POINT || 914 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 915 goto sleep; 916 917 /* Do not allow suspend iff read/write to EB address */ 918 if ((adr & chip->in_progress_block_mask) == 919 chip->in_progress_block_addr) 920 goto sleep; 921 922 /* Erase suspend */ 923 /* It's harmless to issue the Erase-Suspend and Erase-Resume 924 * commands when the erase algorithm isn't in progress. */ 925 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 926 chip->oldstate = FL_ERASING; 927 chip->state = FL_ERASE_SUSPENDING; 928 chip->erase_suspended = 1; 929 for (;;) { 930 if (chip_ready(map, chip, adr)) 931 break; 932 933 if (time_after(jiffies, timeo)) { 934 /* Should have suspended the erase by now. 935 * Send an Erase-Resume command as either 936 * there was an error (so leave the erase 937 * routine to recover from it) or we trying to 938 * use the erase-in-progress sector. */ 939 put_chip(map, chip, adr); 940 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 941 return -EIO; 942 } 943 944 mutex_unlock(&chip->mutex); 945 cfi_udelay(1); 946 mutex_lock(&chip->mutex); 947 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 948 So we can just loop here. */ 949 } 950 chip->state = FL_READY; 951 return 0; 952 953 case FL_XIP_WHILE_ERASING: 954 if (mode != FL_READY && mode != FL_POINT && 955 (!cfip || !(cfip->EraseSuspend&2))) 956 goto sleep; 957 chip->oldstate = chip->state; 958 chip->state = FL_READY; 959 return 0; 960 961 case FL_SHUTDOWN: 962 /* The machine is rebooting */ 963 return -EIO; 964 965 case FL_POINT: 966 /* Only if there's no operation suspended... */ 967 if (mode == FL_READY && chip->oldstate == FL_READY) 968 return 0; 969 /* fall through */ 970 971 default: 972 sleep: 973 set_current_state(TASK_UNINTERRUPTIBLE); 974 add_wait_queue(&chip->wq, &wait); 975 mutex_unlock(&chip->mutex); 976 schedule(); 977 remove_wait_queue(&chip->wq, &wait); 978 mutex_lock(&chip->mutex); 979 goto resettime; 980 } 981 } 982 983 984 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 985 { 986 struct cfi_private *cfi = map->fldrv_priv; 987 988 switch(chip->oldstate) { 989 case FL_ERASING: 990 cfi_fixup_m29ew_erase_suspend(map, 991 chip->in_progress_block_addr); 992 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 993 cfi_fixup_m29ew_delay_after_resume(cfi); 994 chip->oldstate = FL_READY; 995 chip->state = FL_ERASING; 996 break; 997 998 case FL_XIP_WHILE_ERASING: 999 chip->state = chip->oldstate; 1000 chip->oldstate = FL_READY; 1001 break; 1002 1003 case FL_READY: 1004 case FL_STATUS: 1005 break; 1006 default: 1007 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 1008 } 1009 wake_up(&chip->wq); 1010 } 1011 1012 #ifdef CONFIG_MTD_XIP 1013 1014 /* 1015 * No interrupt what so ever can be serviced while the flash isn't in array 1016 * mode. This is ensured by the xip_disable() and xip_enable() functions 1017 * enclosing any code path where the flash is known not to be in array mode. 1018 * And within a XIP disabled code path, only functions marked with __xipram 1019 * may be called and nothing else (it's a good thing to inspect generated 1020 * assembly to make sure inline functions were actually inlined and that gcc 1021 * didn't emit calls to its own support functions). Also configuring MTD CFI 1022 * support to a single buswidth and a single interleave is also recommended. 1023 */ 1024 1025 static void xip_disable(struct map_info *map, struct flchip *chip, 1026 unsigned long adr) 1027 { 1028 /* TODO: chips with no XIP use should ignore and return */ 1029 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 1030 local_irq_disable(); 1031 } 1032 1033 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 1034 unsigned long adr) 1035 { 1036 struct cfi_private *cfi = map->fldrv_priv; 1037 1038 if (chip->state != FL_POINT && chip->state != FL_READY) { 1039 map_write(map, CMD(0xf0), adr); 1040 chip->state = FL_READY; 1041 } 1042 (void) map_read(map, adr); 1043 xip_iprefetch(); 1044 local_irq_enable(); 1045 } 1046 1047 /* 1048 * When a delay is required for the flash operation to complete, the 1049 * xip_udelay() function is polling for both the given timeout and pending 1050 * (but still masked) hardware interrupts. Whenever there is an interrupt 1051 * pending then the flash erase operation is suspended, array mode restored 1052 * and interrupts unmasked. Task scheduling might also happen at that 1053 * point. The CPU eventually returns from the interrupt or the call to 1054 * schedule() and the suspended flash operation is resumed for the remaining 1055 * of the delay period. 1056 * 1057 * Warning: this function _will_ fool interrupt latency tracing tools. 1058 */ 1059 1060 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 1061 unsigned long adr, int usec) 1062 { 1063 struct cfi_private *cfi = map->fldrv_priv; 1064 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 1065 map_word status, OK = CMD(0x80); 1066 unsigned long suspended, start = xip_currtime(); 1067 flstate_t oldstate; 1068 1069 do { 1070 cpu_relax(); 1071 if (xip_irqpending() && extp && 1072 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 1073 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 1074 /* 1075 * Let's suspend the erase operation when supported. 1076 * Note that we currently don't try to suspend 1077 * interleaved chips if there is already another 1078 * operation suspended (imagine what happens 1079 * when one chip was already done with the current 1080 * operation while another chip suspended it, then 1081 * we resume the whole thing at once). Yes, it 1082 * can happen! 1083 */ 1084 map_write(map, CMD(0xb0), adr); 1085 usec -= xip_elapsed_since(start); 1086 suspended = xip_currtime(); 1087 do { 1088 if (xip_elapsed_since(suspended) > 100000) { 1089 /* 1090 * The chip doesn't want to suspend 1091 * after waiting for 100 msecs. 1092 * This is a critical error but there 1093 * is not much we can do here. 1094 */ 1095 return; 1096 } 1097 status = map_read(map, adr); 1098 } while (!map_word_andequal(map, status, OK, OK)); 1099 1100 /* Suspend succeeded */ 1101 oldstate = chip->state; 1102 if (!map_word_bitsset(map, status, CMD(0x40))) 1103 break; 1104 chip->state = FL_XIP_WHILE_ERASING; 1105 chip->erase_suspended = 1; 1106 map_write(map, CMD(0xf0), adr); 1107 (void) map_read(map, adr); 1108 xip_iprefetch(); 1109 local_irq_enable(); 1110 mutex_unlock(&chip->mutex); 1111 xip_iprefetch(); 1112 cond_resched(); 1113 1114 /* 1115 * We're back. However someone else might have 1116 * decided to go write to the chip if we are in 1117 * a suspended erase state. If so let's wait 1118 * until it's done. 1119 */ 1120 mutex_lock(&chip->mutex); 1121 while (chip->state != FL_XIP_WHILE_ERASING) { 1122 DECLARE_WAITQUEUE(wait, current); 1123 set_current_state(TASK_UNINTERRUPTIBLE); 1124 add_wait_queue(&chip->wq, &wait); 1125 mutex_unlock(&chip->mutex); 1126 schedule(); 1127 remove_wait_queue(&chip->wq, &wait); 1128 mutex_lock(&chip->mutex); 1129 } 1130 /* Disallow XIP again */ 1131 local_irq_disable(); 1132 1133 /* Correct Erase Suspend Hangups for M29EW */ 1134 cfi_fixup_m29ew_erase_suspend(map, adr); 1135 /* Resume the write or erase operation */ 1136 map_write(map, cfi->sector_erase_cmd, adr); 1137 chip->state = oldstate; 1138 start = xip_currtime(); 1139 } else if (usec >= 1000000/HZ) { 1140 /* 1141 * Try to save on CPU power when waiting delay 1142 * is at least a system timer tick period. 1143 * No need to be extremely accurate here. 1144 */ 1145 xip_cpu_idle(); 1146 } 1147 status = map_read(map, adr); 1148 } while (!map_word_andequal(map, status, OK, OK) 1149 && xip_elapsed_since(start) < usec); 1150 } 1151 1152 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1153 1154 /* 1155 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1156 * the flash is actively programming or erasing since we have to poll for 1157 * the operation to complete anyway. We can't do that in a generic way with 1158 * a XIP setup so do it before the actual flash operation in this case 1159 * and stub it out from INVALIDATE_CACHE_UDELAY. 1160 */ 1161 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1162 INVALIDATE_CACHED_RANGE(map, from, size) 1163 1164 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1165 UDELAY(map, chip, adr, usec) 1166 1167 /* 1168 * Extra notes: 1169 * 1170 * Activating this XIP support changes the way the code works a bit. For 1171 * example the code to suspend the current process when concurrent access 1172 * happens is never executed because xip_udelay() will always return with the 1173 * same chip state as it was entered with. This is why there is no care for 1174 * the presence of add_wait_queue() or schedule() calls from within a couple 1175 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1176 * The queueing and scheduling are always happening within xip_udelay(). 1177 * 1178 * Similarly, get_chip() and put_chip() just happen to always be executed 1179 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1180 * is in array mode, therefore never executing many cases therein and not 1181 * causing any problem with XIP. 1182 */ 1183 1184 #else 1185 1186 #define xip_disable(map, chip, adr) 1187 #define xip_enable(map, chip, adr) 1188 #define XIP_INVAL_CACHED_RANGE(x...) 1189 1190 #define UDELAY(map, chip, adr, usec) \ 1191 do { \ 1192 mutex_unlock(&chip->mutex); \ 1193 cfi_udelay(usec); \ 1194 mutex_lock(&chip->mutex); \ 1195 } while (0) 1196 1197 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1198 do { \ 1199 mutex_unlock(&chip->mutex); \ 1200 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1201 cfi_udelay(usec); \ 1202 mutex_lock(&chip->mutex); \ 1203 } while (0) 1204 1205 #endif 1206 1207 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1208 { 1209 unsigned long cmd_addr; 1210 struct cfi_private *cfi = map->fldrv_priv; 1211 int ret; 1212 1213 adr += chip->start; 1214 1215 /* Ensure cmd read/writes are aligned. */ 1216 cmd_addr = adr & ~(map_bankwidth(map)-1); 1217 1218 mutex_lock(&chip->mutex); 1219 ret = get_chip(map, chip, cmd_addr, FL_READY); 1220 if (ret) { 1221 mutex_unlock(&chip->mutex); 1222 return ret; 1223 } 1224 1225 if (chip->state != FL_POINT && chip->state != FL_READY) { 1226 map_write(map, CMD(0xf0), cmd_addr); 1227 chip->state = FL_READY; 1228 } 1229 1230 map_copy_from(map, buf, adr, len); 1231 1232 put_chip(map, chip, cmd_addr); 1233 1234 mutex_unlock(&chip->mutex); 1235 return 0; 1236 } 1237 1238 1239 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1240 { 1241 struct map_info *map = mtd->priv; 1242 struct cfi_private *cfi = map->fldrv_priv; 1243 unsigned long ofs; 1244 int chipnum; 1245 int ret = 0; 1246 1247 /* ofs: offset within the first chip that the first read should start */ 1248 chipnum = (from >> cfi->chipshift); 1249 ofs = from - (chipnum << cfi->chipshift); 1250 1251 while (len) { 1252 unsigned long thislen; 1253 1254 if (chipnum >= cfi->numchips) 1255 break; 1256 1257 if ((len + ofs -1) >> cfi->chipshift) 1258 thislen = (1<<cfi->chipshift) - ofs; 1259 else 1260 thislen = len; 1261 1262 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1263 if (ret) 1264 break; 1265 1266 *retlen += thislen; 1267 len -= thislen; 1268 buf += thislen; 1269 1270 ofs = 0; 1271 chipnum++; 1272 } 1273 return ret; 1274 } 1275 1276 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1277 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1278 1279 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1280 loff_t adr, size_t len) 1281 { 1282 struct cfi_private *cfi = map->fldrv_priv; 1283 1284 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1285 cfi->device_type, NULL); 1286 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1287 cfi->device_type, NULL); 1288 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1289 cfi->device_type, NULL); 1290 1291 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1292 } 1293 1294 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1295 loff_t adr, size_t len) 1296 { 1297 struct cfi_private *cfi = map->fldrv_priv; 1298 1299 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1300 cfi->device_type, NULL); 1301 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1302 cfi->device_type, NULL); 1303 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1304 cfi->device_type, NULL); 1305 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1306 cfi->device_type, NULL); 1307 1308 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1309 } 1310 1311 static inline int do_read_secsi_onechip(struct map_info *map, 1312 struct flchip *chip, loff_t adr, 1313 size_t len, u_char *buf, 1314 size_t grouplen) 1315 { 1316 DECLARE_WAITQUEUE(wait, current); 1317 1318 retry: 1319 mutex_lock(&chip->mutex); 1320 1321 if (chip->state != FL_READY){ 1322 set_current_state(TASK_UNINTERRUPTIBLE); 1323 add_wait_queue(&chip->wq, &wait); 1324 1325 mutex_unlock(&chip->mutex); 1326 1327 schedule(); 1328 remove_wait_queue(&chip->wq, &wait); 1329 1330 goto retry; 1331 } 1332 1333 adr += chip->start; 1334 1335 chip->state = FL_READY; 1336 1337 otp_enter(map, chip, adr, len); 1338 map_copy_from(map, buf, adr, len); 1339 otp_exit(map, chip, adr, len); 1340 1341 wake_up(&chip->wq); 1342 mutex_unlock(&chip->mutex); 1343 1344 return 0; 1345 } 1346 1347 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1348 { 1349 struct map_info *map = mtd->priv; 1350 struct cfi_private *cfi = map->fldrv_priv; 1351 unsigned long ofs; 1352 int chipnum; 1353 int ret = 0; 1354 1355 /* ofs: offset within the first chip that the first read should start */ 1356 /* 8 secsi bytes per chip */ 1357 chipnum=from>>3; 1358 ofs=from & 7; 1359 1360 while (len) { 1361 unsigned long thislen; 1362 1363 if (chipnum >= cfi->numchips) 1364 break; 1365 1366 if ((len + ofs -1) >> 3) 1367 thislen = (1<<3) - ofs; 1368 else 1369 thislen = len; 1370 1371 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1372 thislen, buf, 0); 1373 if (ret) 1374 break; 1375 1376 *retlen += thislen; 1377 len -= thislen; 1378 buf += thislen; 1379 1380 ofs = 0; 1381 chipnum++; 1382 } 1383 return ret; 1384 } 1385 1386 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1387 unsigned long adr, map_word datum, 1388 int mode); 1389 1390 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1391 size_t len, u_char *buf, size_t grouplen) 1392 { 1393 int ret; 1394 while (len) { 1395 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1396 int gap = adr - bus_ofs; 1397 int n = min_t(int, len, map_bankwidth(map) - gap); 1398 map_word datum = map_word_ff(map); 1399 1400 if (n != map_bankwidth(map)) { 1401 /* partial write of a word, load old contents */ 1402 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1403 datum = map_read(map, bus_ofs); 1404 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1405 } 1406 1407 datum = map_word_load_partial(map, datum, buf, gap, n); 1408 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1409 if (ret) 1410 return ret; 1411 1412 adr += n; 1413 buf += n; 1414 len -= n; 1415 } 1416 1417 return 0; 1418 } 1419 1420 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1421 size_t len, u_char *buf, size_t grouplen) 1422 { 1423 struct cfi_private *cfi = map->fldrv_priv; 1424 uint8_t lockreg; 1425 unsigned long timeo; 1426 int ret; 1427 1428 /* make sure area matches group boundaries */ 1429 if ((adr != 0) || (len != grouplen)) 1430 return -EINVAL; 1431 1432 mutex_lock(&chip->mutex); 1433 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1434 if (ret) { 1435 mutex_unlock(&chip->mutex); 1436 return ret; 1437 } 1438 chip->state = FL_LOCKING; 1439 1440 /* Enter lock register command */ 1441 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1442 cfi->device_type, NULL); 1443 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1444 cfi->device_type, NULL); 1445 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1446 cfi->device_type, NULL); 1447 1448 /* read lock register */ 1449 lockreg = cfi_read_query(map, 0); 1450 1451 /* set bit 0 to protect extended memory block */ 1452 lockreg &= ~0x01; 1453 1454 /* set bit 0 to protect extended memory block */ 1455 /* write lock register */ 1456 map_write(map, CMD(0xA0), chip->start); 1457 map_write(map, CMD(lockreg), chip->start); 1458 1459 /* wait for chip to become ready */ 1460 timeo = jiffies + msecs_to_jiffies(2); 1461 for (;;) { 1462 if (chip_ready(map, chip, adr)) 1463 break; 1464 1465 if (time_after(jiffies, timeo)) { 1466 pr_err("Waiting for chip to be ready timed out.\n"); 1467 ret = -EIO; 1468 break; 1469 } 1470 UDELAY(map, chip, 0, 1); 1471 } 1472 1473 /* exit protection commands */ 1474 map_write(map, CMD(0x90), chip->start); 1475 map_write(map, CMD(0x00), chip->start); 1476 1477 chip->state = FL_READY; 1478 put_chip(map, chip, chip->start); 1479 mutex_unlock(&chip->mutex); 1480 1481 return ret; 1482 } 1483 1484 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1485 size_t *retlen, u_char *buf, 1486 otp_op_t action, int user_regs) 1487 { 1488 struct map_info *map = mtd->priv; 1489 struct cfi_private *cfi = map->fldrv_priv; 1490 int ofs_factor = cfi->interleave * cfi->device_type; 1491 unsigned long base; 1492 int chipnum; 1493 struct flchip *chip; 1494 uint8_t otp, lockreg; 1495 int ret; 1496 1497 size_t user_size, factory_size, otpsize; 1498 loff_t user_offset, factory_offset, otpoffset; 1499 int user_locked = 0, otplocked; 1500 1501 *retlen = 0; 1502 1503 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1504 chip = &cfi->chips[chipnum]; 1505 factory_size = 0; 1506 user_size = 0; 1507 1508 /* Micron M29EW family */ 1509 if (is_m29ew(cfi)) { 1510 base = chip->start; 1511 1512 /* check whether secsi area is factory locked 1513 or user lockable */ 1514 mutex_lock(&chip->mutex); 1515 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1516 if (ret) { 1517 mutex_unlock(&chip->mutex); 1518 return ret; 1519 } 1520 cfi_qry_mode_on(base, map, cfi); 1521 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1522 cfi_qry_mode_off(base, map, cfi); 1523 put_chip(map, chip, base); 1524 mutex_unlock(&chip->mutex); 1525 1526 if (otp & 0x80) { 1527 /* factory locked */ 1528 factory_offset = 0; 1529 factory_size = 0x100; 1530 } else { 1531 /* customer lockable */ 1532 user_offset = 0; 1533 user_size = 0x100; 1534 1535 mutex_lock(&chip->mutex); 1536 ret = get_chip(map, chip, base, FL_LOCKING); 1537 if (ret) { 1538 mutex_unlock(&chip->mutex); 1539 return ret; 1540 } 1541 1542 /* Enter lock register command */ 1543 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1544 chip->start, map, cfi, 1545 cfi->device_type, NULL); 1546 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1547 chip->start, map, cfi, 1548 cfi->device_type, NULL); 1549 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1550 chip->start, map, cfi, 1551 cfi->device_type, NULL); 1552 /* read lock register */ 1553 lockreg = cfi_read_query(map, 0); 1554 /* exit protection commands */ 1555 map_write(map, CMD(0x90), chip->start); 1556 map_write(map, CMD(0x00), chip->start); 1557 put_chip(map, chip, chip->start); 1558 mutex_unlock(&chip->mutex); 1559 1560 user_locked = ((lockreg & 0x01) == 0x00); 1561 } 1562 } 1563 1564 otpsize = user_regs ? user_size : factory_size; 1565 if (!otpsize) 1566 continue; 1567 otpoffset = user_regs ? user_offset : factory_offset; 1568 otplocked = user_regs ? user_locked : 1; 1569 1570 if (!action) { 1571 /* return otpinfo */ 1572 struct otp_info *otpinfo; 1573 len -= sizeof(*otpinfo); 1574 if (len <= 0) 1575 return -ENOSPC; 1576 otpinfo = (struct otp_info *)buf; 1577 otpinfo->start = from; 1578 otpinfo->length = otpsize; 1579 otpinfo->locked = otplocked; 1580 buf += sizeof(*otpinfo); 1581 *retlen += sizeof(*otpinfo); 1582 from += otpsize; 1583 } else if ((from < otpsize) && (len > 0)) { 1584 size_t size; 1585 size = (len < otpsize - from) ? len : otpsize - from; 1586 ret = action(map, chip, otpoffset + from, size, buf, 1587 otpsize); 1588 if (ret < 0) 1589 return ret; 1590 1591 buf += size; 1592 len -= size; 1593 *retlen += size; 1594 from = 0; 1595 } else { 1596 from -= otpsize; 1597 } 1598 } 1599 return 0; 1600 } 1601 1602 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1603 size_t *retlen, struct otp_info *buf) 1604 { 1605 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1606 NULL, 0); 1607 } 1608 1609 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1610 size_t *retlen, struct otp_info *buf) 1611 { 1612 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1613 NULL, 1); 1614 } 1615 1616 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1617 size_t len, size_t *retlen, 1618 u_char *buf) 1619 { 1620 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1621 buf, do_read_secsi_onechip, 0); 1622 } 1623 1624 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1625 size_t len, size_t *retlen, 1626 u_char *buf) 1627 { 1628 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1629 buf, do_read_secsi_onechip, 1); 1630 } 1631 1632 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1633 size_t len, size_t *retlen, 1634 u_char *buf) 1635 { 1636 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, 1637 do_otp_write, 1); 1638 } 1639 1640 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1641 size_t len) 1642 { 1643 size_t retlen; 1644 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1645 do_otp_lock, 1); 1646 } 1647 1648 static int __xipram do_write_oneword_once(struct map_info *map, 1649 struct flchip *chip, 1650 unsigned long adr, map_word datum, 1651 int mode, struct cfi_private *cfi) 1652 { 1653 unsigned long timeo = jiffies + HZ; 1654 /* 1655 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1656 * have a max write time of a few hundreds usec). However, we should 1657 * use the maximum timeout value given by the chip at probe time 1658 * instead. Unfortunately, struct flchip does have a field for 1659 * maximum timeout, only for typical which can be far too short 1660 * depending of the conditions. The ' + 1' is to avoid having a 1661 * timeout of 0 jiffies if HZ is smaller than 1000. 1662 */ 1663 unsigned long uWriteTimeout = (HZ / 1000) + 1; 1664 int ret = 0; 1665 1666 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1667 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1668 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1669 map_write(map, datum, adr); 1670 chip->state = mode; 1671 1672 INVALIDATE_CACHE_UDELAY(map, chip, 1673 adr, map_bankwidth(map), 1674 chip->word_write_time); 1675 1676 /* See comment above for timeout value. */ 1677 timeo = jiffies + uWriteTimeout; 1678 for (;;) { 1679 if (chip->state != mode) { 1680 /* Someone's suspended the write. Sleep */ 1681 DECLARE_WAITQUEUE(wait, current); 1682 1683 set_current_state(TASK_UNINTERRUPTIBLE); 1684 add_wait_queue(&chip->wq, &wait); 1685 mutex_unlock(&chip->mutex); 1686 schedule(); 1687 remove_wait_queue(&chip->wq, &wait); 1688 timeo = jiffies + (HZ / 2); /* FIXME */ 1689 mutex_lock(&chip->mutex); 1690 continue; 1691 } 1692 1693 /* 1694 * We check "time_after" and "!chip_good" before checking 1695 * "chip_good" to avoid the failure due to scheduling. 1696 */ 1697 if (time_after(jiffies, timeo) && 1698 !chip_good(map, chip, adr, datum)) { 1699 xip_enable(map, chip, adr); 1700 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1701 xip_disable(map, chip, adr); 1702 ret = -EIO; 1703 break; 1704 } 1705 1706 if (chip_good(map, chip, adr, datum)) { 1707 if (cfi_check_err_status(map, chip, adr)) 1708 ret = -EIO; 1709 break; 1710 } 1711 1712 /* Latency issues. Drop the lock, wait a while and retry */ 1713 UDELAY(map, chip, adr, 1); 1714 } 1715 1716 return ret; 1717 } 1718 1719 static int __xipram do_write_oneword_start(struct map_info *map, 1720 struct flchip *chip, 1721 unsigned long adr, int mode) 1722 { 1723 int ret; 1724 1725 mutex_lock(&chip->mutex); 1726 1727 ret = get_chip(map, chip, adr, mode); 1728 if (ret) { 1729 mutex_unlock(&chip->mutex); 1730 return ret; 1731 } 1732 1733 if (mode == FL_OTP_WRITE) 1734 otp_enter(map, chip, adr, map_bankwidth(map)); 1735 1736 return ret; 1737 } 1738 1739 static void __xipram do_write_oneword_done(struct map_info *map, 1740 struct flchip *chip, 1741 unsigned long adr, int mode) 1742 { 1743 if (mode == FL_OTP_WRITE) 1744 otp_exit(map, chip, adr, map_bankwidth(map)); 1745 1746 chip->state = FL_READY; 1747 DISABLE_VPP(map); 1748 put_chip(map, chip, adr); 1749 1750 mutex_unlock(&chip->mutex); 1751 } 1752 1753 static int __xipram do_write_oneword_retry(struct map_info *map, 1754 struct flchip *chip, 1755 unsigned long adr, map_word datum, 1756 int mode) 1757 { 1758 struct cfi_private *cfi = map->fldrv_priv; 1759 int ret = 0; 1760 map_word oldd; 1761 int retry_cnt = 0; 1762 1763 /* 1764 * Check for a NOP for the case when the datum to write is already 1765 * present - it saves time and works around buggy chips that corrupt 1766 * data at other locations when 0xff is written to a location that 1767 * already contains 0xff. 1768 */ 1769 oldd = map_read(map, adr); 1770 if (map_word_equal(map, oldd, datum)) { 1771 pr_debug("MTD %s(): NOP\n", __func__); 1772 return ret; 1773 } 1774 1775 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1776 ENABLE_VPP(map); 1777 xip_disable(map, chip, adr); 1778 1779 retry: 1780 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); 1781 if (ret) { 1782 /* reset on all failures. */ 1783 map_write(map, CMD(0xF0), chip->start); 1784 /* FIXME - should have reset delay before continuing */ 1785 1786 if (++retry_cnt <= MAX_RETRIES) { 1787 ret = 0; 1788 goto retry; 1789 } 1790 } 1791 xip_enable(map, chip, adr); 1792 1793 return ret; 1794 } 1795 1796 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1797 unsigned long adr, map_word datum, 1798 int mode) 1799 { 1800 int ret; 1801 1802 adr += chip->start; 1803 1804 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, 1805 datum.x[0]); 1806 1807 ret = do_write_oneword_start(map, chip, adr, mode); 1808 if (ret) 1809 return ret; 1810 1811 ret = do_write_oneword_retry(map, chip, adr, datum, mode); 1812 1813 do_write_oneword_done(map, chip, adr, mode); 1814 1815 return ret; 1816 } 1817 1818 1819 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1820 size_t *retlen, const u_char *buf) 1821 { 1822 struct map_info *map = mtd->priv; 1823 struct cfi_private *cfi = map->fldrv_priv; 1824 int ret; 1825 int chipnum; 1826 unsigned long ofs, chipstart; 1827 DECLARE_WAITQUEUE(wait, current); 1828 1829 chipnum = to >> cfi->chipshift; 1830 ofs = to - (chipnum << cfi->chipshift); 1831 chipstart = cfi->chips[chipnum].start; 1832 1833 /* If it's not bus-aligned, do the first byte write */ 1834 if (ofs & (map_bankwidth(map)-1)) { 1835 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1836 int i = ofs - bus_ofs; 1837 int n = 0; 1838 map_word tmp_buf; 1839 1840 retry: 1841 mutex_lock(&cfi->chips[chipnum].mutex); 1842 1843 if (cfi->chips[chipnum].state != FL_READY) { 1844 set_current_state(TASK_UNINTERRUPTIBLE); 1845 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1846 1847 mutex_unlock(&cfi->chips[chipnum].mutex); 1848 1849 schedule(); 1850 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1851 goto retry; 1852 } 1853 1854 /* Load 'tmp_buf' with old contents of flash */ 1855 tmp_buf = map_read(map, bus_ofs+chipstart); 1856 1857 mutex_unlock(&cfi->chips[chipnum].mutex); 1858 1859 /* Number of bytes to copy from buffer */ 1860 n = min_t(int, len, map_bankwidth(map)-i); 1861 1862 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1863 1864 ret = do_write_oneword(map, &cfi->chips[chipnum], 1865 bus_ofs, tmp_buf, FL_WRITING); 1866 if (ret) 1867 return ret; 1868 1869 ofs += n; 1870 buf += n; 1871 (*retlen) += n; 1872 len -= n; 1873 1874 if (ofs >> cfi->chipshift) { 1875 chipnum ++; 1876 ofs = 0; 1877 if (chipnum == cfi->numchips) 1878 return 0; 1879 } 1880 } 1881 1882 /* We are now aligned, write as much as possible */ 1883 while(len >= map_bankwidth(map)) { 1884 map_word datum; 1885 1886 datum = map_word_load(map, buf); 1887 1888 ret = do_write_oneword(map, &cfi->chips[chipnum], 1889 ofs, datum, FL_WRITING); 1890 if (ret) 1891 return ret; 1892 1893 ofs += map_bankwidth(map); 1894 buf += map_bankwidth(map); 1895 (*retlen) += map_bankwidth(map); 1896 len -= map_bankwidth(map); 1897 1898 if (ofs >> cfi->chipshift) { 1899 chipnum ++; 1900 ofs = 0; 1901 if (chipnum == cfi->numchips) 1902 return 0; 1903 chipstart = cfi->chips[chipnum].start; 1904 } 1905 } 1906 1907 /* Write the trailing bytes if any */ 1908 if (len & (map_bankwidth(map)-1)) { 1909 map_word tmp_buf; 1910 1911 retry1: 1912 mutex_lock(&cfi->chips[chipnum].mutex); 1913 1914 if (cfi->chips[chipnum].state != FL_READY) { 1915 set_current_state(TASK_UNINTERRUPTIBLE); 1916 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1917 1918 mutex_unlock(&cfi->chips[chipnum].mutex); 1919 1920 schedule(); 1921 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1922 goto retry1; 1923 } 1924 1925 tmp_buf = map_read(map, ofs + chipstart); 1926 1927 mutex_unlock(&cfi->chips[chipnum].mutex); 1928 1929 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1930 1931 ret = do_write_oneword(map, &cfi->chips[chipnum], 1932 ofs, tmp_buf, FL_WRITING); 1933 if (ret) 1934 return ret; 1935 1936 (*retlen) += len; 1937 } 1938 1939 return 0; 1940 } 1941 1942 #if !FORCE_WORD_WRITE 1943 static int __xipram do_write_buffer_wait(struct map_info *map, 1944 struct flchip *chip, unsigned long adr, 1945 map_word datum) 1946 { 1947 unsigned long timeo; 1948 unsigned long u_write_timeout; 1949 int ret = 0; 1950 1951 /* 1952 * Timeout is calculated according to CFI data, if available. 1953 * See more comments in cfi_cmdset_0002(). 1954 */ 1955 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max); 1956 timeo = jiffies + u_write_timeout; 1957 1958 for (;;) { 1959 if (chip->state != FL_WRITING) { 1960 /* Someone's suspended the write. Sleep */ 1961 DECLARE_WAITQUEUE(wait, current); 1962 1963 set_current_state(TASK_UNINTERRUPTIBLE); 1964 add_wait_queue(&chip->wq, &wait); 1965 mutex_unlock(&chip->mutex); 1966 schedule(); 1967 remove_wait_queue(&chip->wq, &wait); 1968 timeo = jiffies + (HZ / 2); /* FIXME */ 1969 mutex_lock(&chip->mutex); 1970 continue; 1971 } 1972 1973 /* 1974 * We check "time_after" and "!chip_good" before checking 1975 * "chip_good" to avoid the failure due to scheduling. 1976 */ 1977 if (time_after(jiffies, timeo) && 1978 !chip_good(map, chip, adr, datum)) { 1979 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", 1980 __func__, adr); 1981 ret = -EIO; 1982 break; 1983 } 1984 1985 if (chip_good(map, chip, adr, datum)) { 1986 if (cfi_check_err_status(map, chip, adr)) 1987 ret = -EIO; 1988 break; 1989 } 1990 1991 /* Latency issues. Drop the lock, wait a while and retry */ 1992 UDELAY(map, chip, adr, 1); 1993 } 1994 1995 return ret; 1996 } 1997 1998 static void __xipram do_write_buffer_reset(struct map_info *map, 1999 struct flchip *chip, 2000 struct cfi_private *cfi) 2001 { 2002 /* 2003 * Recovery from write-buffer programming failures requires 2004 * the write-to-buffer-reset sequence. Since the last part 2005 * of the sequence also works as a normal reset, we can run 2006 * the same commands regardless of why we are here. 2007 * See e.g. 2008 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 2009 */ 2010 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2011 cfi->device_type, NULL); 2012 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2013 cfi->device_type, NULL); 2014 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 2015 cfi->device_type, NULL); 2016 2017 /* FIXME - should have reset delay before continuing */ 2018 } 2019 2020 /* 2021 * FIXME: interleaved mode not tested, and probably not supported! 2022 */ 2023 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 2024 unsigned long adr, const u_char *buf, 2025 int len) 2026 { 2027 struct cfi_private *cfi = map->fldrv_priv; 2028 int ret; 2029 unsigned long cmd_adr; 2030 int z, words; 2031 map_word datum; 2032 2033 adr += chip->start; 2034 cmd_adr = adr; 2035 2036 mutex_lock(&chip->mutex); 2037 ret = get_chip(map, chip, adr, FL_WRITING); 2038 if (ret) { 2039 mutex_unlock(&chip->mutex); 2040 return ret; 2041 } 2042 2043 datum = map_word_load(map, buf); 2044 2045 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 2046 __func__, adr, datum.x[0]); 2047 2048 XIP_INVAL_CACHED_RANGE(map, adr, len); 2049 ENABLE_VPP(map); 2050 xip_disable(map, chip, cmd_adr); 2051 2052 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2053 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2054 2055 /* Write Buffer Load */ 2056 map_write(map, CMD(0x25), cmd_adr); 2057 2058 chip->state = FL_WRITING_TO_BUFFER; 2059 2060 /* Write length of data to come */ 2061 words = len / map_bankwidth(map); 2062 map_write(map, CMD(words - 1), cmd_adr); 2063 /* Write data */ 2064 z = 0; 2065 while(z < words * map_bankwidth(map)) { 2066 datum = map_word_load(map, buf); 2067 map_write(map, datum, adr + z); 2068 2069 z += map_bankwidth(map); 2070 buf += map_bankwidth(map); 2071 } 2072 z -= map_bankwidth(map); 2073 2074 adr += z; 2075 2076 /* Write Buffer Program Confirm: GO GO GO */ 2077 map_write(map, CMD(0x29), cmd_adr); 2078 chip->state = FL_WRITING; 2079 2080 INVALIDATE_CACHE_UDELAY(map, chip, 2081 adr, map_bankwidth(map), 2082 chip->word_write_time); 2083 2084 ret = do_write_buffer_wait(map, chip, adr, datum); 2085 if (ret) 2086 do_write_buffer_reset(map, chip, cfi); 2087 2088 xip_enable(map, chip, adr); 2089 2090 chip->state = FL_READY; 2091 DISABLE_VPP(map); 2092 put_chip(map, chip, adr); 2093 mutex_unlock(&chip->mutex); 2094 2095 return ret; 2096 } 2097 2098 2099 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 2100 size_t *retlen, const u_char *buf) 2101 { 2102 struct map_info *map = mtd->priv; 2103 struct cfi_private *cfi = map->fldrv_priv; 2104 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 2105 int ret; 2106 int chipnum; 2107 unsigned long ofs; 2108 2109 chipnum = to >> cfi->chipshift; 2110 ofs = to - (chipnum << cfi->chipshift); 2111 2112 /* If it's not bus-aligned, do the first word write */ 2113 if (ofs & (map_bankwidth(map)-1)) { 2114 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 2115 if (local_len > len) 2116 local_len = len; 2117 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2118 local_len, retlen, buf); 2119 if (ret) 2120 return ret; 2121 ofs += local_len; 2122 buf += local_len; 2123 len -= local_len; 2124 2125 if (ofs >> cfi->chipshift) { 2126 chipnum ++; 2127 ofs = 0; 2128 if (chipnum == cfi->numchips) 2129 return 0; 2130 } 2131 } 2132 2133 /* Write buffer is worth it only if more than one word to write... */ 2134 while (len >= map_bankwidth(map) * 2) { 2135 /* We must not cross write block boundaries */ 2136 int size = wbufsize - (ofs & (wbufsize-1)); 2137 2138 if (size > len) 2139 size = len; 2140 if (size % map_bankwidth(map)) 2141 size -= size % map_bankwidth(map); 2142 2143 ret = do_write_buffer(map, &cfi->chips[chipnum], 2144 ofs, buf, size); 2145 if (ret) 2146 return ret; 2147 2148 ofs += size; 2149 buf += size; 2150 (*retlen) += size; 2151 len -= size; 2152 2153 if (ofs >> cfi->chipshift) { 2154 chipnum ++; 2155 ofs = 0; 2156 if (chipnum == cfi->numchips) 2157 return 0; 2158 } 2159 } 2160 2161 if (len) { 2162 size_t retlen_dregs = 0; 2163 2164 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2165 len, &retlen_dregs, buf); 2166 2167 *retlen += retlen_dregs; 2168 return ret; 2169 } 2170 2171 return 0; 2172 } 2173 #endif /* !FORCE_WORD_WRITE */ 2174 2175 /* 2176 * Wait for the flash chip to become ready to write data 2177 * 2178 * This is only called during the panic_write() path. When panic_write() 2179 * is called, the kernel is in the process of a panic, and will soon be 2180 * dead. Therefore we don't take any locks, and attempt to get access 2181 * to the chip as soon as possible. 2182 */ 2183 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2184 unsigned long adr) 2185 { 2186 struct cfi_private *cfi = map->fldrv_priv; 2187 int retries = 10; 2188 int i; 2189 2190 /* 2191 * If the driver thinks the chip is idle, and no toggle bits 2192 * are changing, then the chip is actually idle for sure. 2193 */ 2194 if (chip->state == FL_READY && chip_ready(map, chip, adr)) 2195 return 0; 2196 2197 /* 2198 * Try several times to reset the chip and then wait for it 2199 * to become idle. The upper limit of a few milliseconds of 2200 * delay isn't a big problem: the kernel is dying anyway. It 2201 * is more important to save the messages. 2202 */ 2203 while (retries > 0) { 2204 const unsigned long timeo = (HZ / 1000) + 1; 2205 2206 /* send the reset command */ 2207 map_write(map, CMD(0xF0), chip->start); 2208 2209 /* wait for the chip to become ready */ 2210 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2211 if (chip_ready(map, chip, adr)) 2212 return 0; 2213 2214 udelay(1); 2215 } 2216 2217 retries--; 2218 } 2219 2220 /* the chip never became ready */ 2221 return -EBUSY; 2222 } 2223 2224 /* 2225 * Write out one word of data to a single flash chip during a kernel panic 2226 * 2227 * This is only called during the panic_write() path. When panic_write() 2228 * is called, the kernel is in the process of a panic, and will soon be 2229 * dead. Therefore we don't take any locks, and attempt to get access 2230 * to the chip as soon as possible. 2231 * 2232 * The implementation of this routine is intentionally similar to 2233 * do_write_oneword(), in order to ease code maintenance. 2234 */ 2235 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2236 unsigned long adr, map_word datum) 2237 { 2238 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2239 struct cfi_private *cfi = map->fldrv_priv; 2240 int retry_cnt = 0; 2241 map_word oldd; 2242 int ret; 2243 int i; 2244 2245 adr += chip->start; 2246 2247 ret = cfi_amdstd_panic_wait(map, chip, adr); 2248 if (ret) 2249 return ret; 2250 2251 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2252 __func__, adr, datum.x[0]); 2253 2254 /* 2255 * Check for a NOP for the case when the datum to write is already 2256 * present - it saves time and works around buggy chips that corrupt 2257 * data at other locations when 0xff is written to a location that 2258 * already contains 0xff. 2259 */ 2260 oldd = map_read(map, adr); 2261 if (map_word_equal(map, oldd, datum)) { 2262 pr_debug("MTD %s(): NOP\n", __func__); 2263 goto op_done; 2264 } 2265 2266 ENABLE_VPP(map); 2267 2268 retry: 2269 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2270 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2271 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2272 map_write(map, datum, adr); 2273 2274 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2275 if (chip_ready(map, chip, adr)) 2276 break; 2277 2278 udelay(1); 2279 } 2280 2281 if (!chip_good(map, chip, adr, datum) || 2282 cfi_check_err_status(map, chip, adr)) { 2283 /* reset on all failures. */ 2284 map_write(map, CMD(0xF0), chip->start); 2285 /* FIXME - should have reset delay before continuing */ 2286 2287 if (++retry_cnt <= MAX_RETRIES) 2288 goto retry; 2289 2290 ret = -EIO; 2291 } 2292 2293 op_done: 2294 DISABLE_VPP(map); 2295 return ret; 2296 } 2297 2298 /* 2299 * Write out some data during a kernel panic 2300 * 2301 * This is used by the mtdoops driver to save the dying messages from a 2302 * kernel which has panic'd. 2303 * 2304 * This routine ignores all of the locking used throughout the rest of the 2305 * driver, in order to ensure that the data gets written out no matter what 2306 * state this driver (and the flash chip itself) was in when the kernel crashed. 2307 * 2308 * The implementation of this routine is intentionally similar to 2309 * cfi_amdstd_write_words(), in order to ease code maintenance. 2310 */ 2311 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2312 size_t *retlen, const u_char *buf) 2313 { 2314 struct map_info *map = mtd->priv; 2315 struct cfi_private *cfi = map->fldrv_priv; 2316 unsigned long ofs, chipstart; 2317 int ret; 2318 int chipnum; 2319 2320 chipnum = to >> cfi->chipshift; 2321 ofs = to - (chipnum << cfi->chipshift); 2322 chipstart = cfi->chips[chipnum].start; 2323 2324 /* If it's not bus aligned, do the first byte write */ 2325 if (ofs & (map_bankwidth(map) - 1)) { 2326 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2327 int i = ofs - bus_ofs; 2328 int n = 0; 2329 map_word tmp_buf; 2330 2331 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2332 if (ret) 2333 return ret; 2334 2335 /* Load 'tmp_buf' with old contents of flash */ 2336 tmp_buf = map_read(map, bus_ofs + chipstart); 2337 2338 /* Number of bytes to copy from buffer */ 2339 n = min_t(int, len, map_bankwidth(map) - i); 2340 2341 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2342 2343 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2344 bus_ofs, tmp_buf); 2345 if (ret) 2346 return ret; 2347 2348 ofs += n; 2349 buf += n; 2350 (*retlen) += n; 2351 len -= n; 2352 2353 if (ofs >> cfi->chipshift) { 2354 chipnum++; 2355 ofs = 0; 2356 if (chipnum == cfi->numchips) 2357 return 0; 2358 } 2359 } 2360 2361 /* We are now aligned, write as much as possible */ 2362 while (len >= map_bankwidth(map)) { 2363 map_word datum; 2364 2365 datum = map_word_load(map, buf); 2366 2367 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2368 ofs, datum); 2369 if (ret) 2370 return ret; 2371 2372 ofs += map_bankwidth(map); 2373 buf += map_bankwidth(map); 2374 (*retlen) += map_bankwidth(map); 2375 len -= map_bankwidth(map); 2376 2377 if (ofs >> cfi->chipshift) { 2378 chipnum++; 2379 ofs = 0; 2380 if (chipnum == cfi->numchips) 2381 return 0; 2382 2383 chipstart = cfi->chips[chipnum].start; 2384 } 2385 } 2386 2387 /* Write the trailing bytes if any */ 2388 if (len & (map_bankwidth(map) - 1)) { 2389 map_word tmp_buf; 2390 2391 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2392 if (ret) 2393 return ret; 2394 2395 tmp_buf = map_read(map, ofs + chipstart); 2396 2397 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2398 2399 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2400 ofs, tmp_buf); 2401 if (ret) 2402 return ret; 2403 2404 (*retlen) += len; 2405 } 2406 2407 return 0; 2408 } 2409 2410 2411 /* 2412 * Handle devices with one erase region, that only implement 2413 * the chip erase command. 2414 */ 2415 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2416 { 2417 struct cfi_private *cfi = map->fldrv_priv; 2418 unsigned long timeo = jiffies + HZ; 2419 unsigned long int adr; 2420 DECLARE_WAITQUEUE(wait, current); 2421 int ret; 2422 int retry_cnt = 0; 2423 2424 adr = cfi->addr_unlock1; 2425 2426 mutex_lock(&chip->mutex); 2427 ret = get_chip(map, chip, adr, FL_ERASING); 2428 if (ret) { 2429 mutex_unlock(&chip->mutex); 2430 return ret; 2431 } 2432 2433 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2434 __func__, chip->start); 2435 2436 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2437 ENABLE_VPP(map); 2438 xip_disable(map, chip, adr); 2439 2440 retry: 2441 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2442 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2443 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2444 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2445 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2446 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2447 2448 chip->state = FL_ERASING; 2449 chip->erase_suspended = 0; 2450 chip->in_progress_block_addr = adr; 2451 chip->in_progress_block_mask = ~(map->size - 1); 2452 2453 INVALIDATE_CACHE_UDELAY(map, chip, 2454 adr, map->size, 2455 chip->erase_time*500); 2456 2457 timeo = jiffies + (HZ*20); 2458 2459 for (;;) { 2460 if (chip->state != FL_ERASING) { 2461 /* Someone's suspended the erase. Sleep */ 2462 set_current_state(TASK_UNINTERRUPTIBLE); 2463 add_wait_queue(&chip->wq, &wait); 2464 mutex_unlock(&chip->mutex); 2465 schedule(); 2466 remove_wait_queue(&chip->wq, &wait); 2467 mutex_lock(&chip->mutex); 2468 continue; 2469 } 2470 if (chip->erase_suspended) { 2471 /* This erase was suspended and resumed. 2472 Adjust the timeout */ 2473 timeo = jiffies + (HZ*20); /* FIXME */ 2474 chip->erase_suspended = 0; 2475 } 2476 2477 if (chip_good(map, chip, adr, map_word_ff(map))) { 2478 if (cfi_check_err_status(map, chip, adr)) 2479 ret = -EIO; 2480 break; 2481 } 2482 2483 if (time_after(jiffies, timeo)) { 2484 printk(KERN_WARNING "MTD %s(): software timeout\n", 2485 __func__); 2486 ret = -EIO; 2487 break; 2488 } 2489 2490 /* Latency issues. Drop the lock, wait a while and retry */ 2491 UDELAY(map, chip, adr, 1000000/HZ); 2492 } 2493 /* Did we succeed? */ 2494 if (ret) { 2495 /* reset on all failures. */ 2496 map_write(map, CMD(0xF0), chip->start); 2497 /* FIXME - should have reset delay before continuing */ 2498 2499 if (++retry_cnt <= MAX_RETRIES) { 2500 ret = 0; 2501 goto retry; 2502 } 2503 } 2504 2505 chip->state = FL_READY; 2506 xip_enable(map, chip, adr); 2507 DISABLE_VPP(map); 2508 put_chip(map, chip, adr); 2509 mutex_unlock(&chip->mutex); 2510 2511 return ret; 2512 } 2513 2514 2515 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2516 { 2517 struct cfi_private *cfi = map->fldrv_priv; 2518 unsigned long timeo = jiffies + HZ; 2519 DECLARE_WAITQUEUE(wait, current); 2520 int ret; 2521 int retry_cnt = 0; 2522 2523 adr += chip->start; 2524 2525 mutex_lock(&chip->mutex); 2526 ret = get_chip(map, chip, adr, FL_ERASING); 2527 if (ret) { 2528 mutex_unlock(&chip->mutex); 2529 return ret; 2530 } 2531 2532 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2533 __func__, adr); 2534 2535 XIP_INVAL_CACHED_RANGE(map, adr, len); 2536 ENABLE_VPP(map); 2537 xip_disable(map, chip, adr); 2538 2539 retry: 2540 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2541 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2542 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2543 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2544 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2545 map_write(map, cfi->sector_erase_cmd, adr); 2546 2547 chip->state = FL_ERASING; 2548 chip->erase_suspended = 0; 2549 chip->in_progress_block_addr = adr; 2550 chip->in_progress_block_mask = ~(len - 1); 2551 2552 INVALIDATE_CACHE_UDELAY(map, chip, 2553 adr, len, 2554 chip->erase_time*500); 2555 2556 timeo = jiffies + (HZ*20); 2557 2558 for (;;) { 2559 if (chip->state != FL_ERASING) { 2560 /* Someone's suspended the erase. Sleep */ 2561 set_current_state(TASK_UNINTERRUPTIBLE); 2562 add_wait_queue(&chip->wq, &wait); 2563 mutex_unlock(&chip->mutex); 2564 schedule(); 2565 remove_wait_queue(&chip->wq, &wait); 2566 mutex_lock(&chip->mutex); 2567 continue; 2568 } 2569 if (chip->erase_suspended) { 2570 /* This erase was suspended and resumed. 2571 Adjust the timeout */ 2572 timeo = jiffies + (HZ*20); /* FIXME */ 2573 chip->erase_suspended = 0; 2574 } 2575 2576 if (chip_good(map, chip, adr, map_word_ff(map))) { 2577 if (cfi_check_err_status(map, chip, adr)) 2578 ret = -EIO; 2579 break; 2580 } 2581 2582 if (time_after(jiffies, timeo)) { 2583 printk(KERN_WARNING "MTD %s(): software timeout\n", 2584 __func__); 2585 ret = -EIO; 2586 break; 2587 } 2588 2589 /* Latency issues. Drop the lock, wait a while and retry */ 2590 UDELAY(map, chip, adr, 1000000/HZ); 2591 } 2592 /* Did we succeed? */ 2593 if (ret) { 2594 /* reset on all failures. */ 2595 map_write(map, CMD(0xF0), chip->start); 2596 /* FIXME - should have reset delay before continuing */ 2597 2598 if (++retry_cnt <= MAX_RETRIES) { 2599 ret = 0; 2600 goto retry; 2601 } 2602 } 2603 2604 chip->state = FL_READY; 2605 xip_enable(map, chip, adr); 2606 DISABLE_VPP(map); 2607 put_chip(map, chip, adr); 2608 mutex_unlock(&chip->mutex); 2609 return ret; 2610 } 2611 2612 2613 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2614 { 2615 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr, 2616 instr->len, NULL); 2617 } 2618 2619 2620 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2621 { 2622 struct map_info *map = mtd->priv; 2623 struct cfi_private *cfi = map->fldrv_priv; 2624 2625 if (instr->addr != 0) 2626 return -EINVAL; 2627 2628 if (instr->len != mtd->size) 2629 return -EINVAL; 2630 2631 return do_erase_chip(map, &cfi->chips[0]); 2632 } 2633 2634 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2635 unsigned long adr, int len, void *thunk) 2636 { 2637 struct cfi_private *cfi = map->fldrv_priv; 2638 int ret; 2639 2640 mutex_lock(&chip->mutex); 2641 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2642 if (ret) 2643 goto out_unlock; 2644 chip->state = FL_LOCKING; 2645 2646 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2647 2648 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2649 cfi->device_type, NULL); 2650 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2651 cfi->device_type, NULL); 2652 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2653 cfi->device_type, NULL); 2654 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2655 cfi->device_type, NULL); 2656 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2657 cfi->device_type, NULL); 2658 map_write(map, CMD(0x40), chip->start + adr); 2659 2660 chip->state = FL_READY; 2661 put_chip(map, chip, adr + chip->start); 2662 ret = 0; 2663 2664 out_unlock: 2665 mutex_unlock(&chip->mutex); 2666 return ret; 2667 } 2668 2669 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2670 unsigned long adr, int len, void *thunk) 2671 { 2672 struct cfi_private *cfi = map->fldrv_priv; 2673 int ret; 2674 2675 mutex_lock(&chip->mutex); 2676 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2677 if (ret) 2678 goto out_unlock; 2679 chip->state = FL_UNLOCKING; 2680 2681 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2682 2683 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2684 cfi->device_type, NULL); 2685 map_write(map, CMD(0x70), adr); 2686 2687 chip->state = FL_READY; 2688 put_chip(map, chip, adr + chip->start); 2689 ret = 0; 2690 2691 out_unlock: 2692 mutex_unlock(&chip->mutex); 2693 return ret; 2694 } 2695 2696 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2697 { 2698 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2699 } 2700 2701 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2702 { 2703 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2704 } 2705 2706 /* 2707 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2708 */ 2709 2710 struct ppb_lock { 2711 struct flchip *chip; 2712 unsigned long adr; 2713 int locked; 2714 }; 2715 2716 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2717 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2718 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2719 2720 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2721 struct flchip *chip, 2722 unsigned long adr, int len, void *thunk) 2723 { 2724 struct cfi_private *cfi = map->fldrv_priv; 2725 unsigned long timeo; 2726 int ret; 2727 2728 adr += chip->start; 2729 mutex_lock(&chip->mutex); 2730 ret = get_chip(map, chip, adr, FL_LOCKING); 2731 if (ret) { 2732 mutex_unlock(&chip->mutex); 2733 return ret; 2734 } 2735 2736 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2737 2738 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2739 cfi->device_type, NULL); 2740 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2741 cfi->device_type, NULL); 2742 /* PPB entry command */ 2743 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2744 cfi->device_type, NULL); 2745 2746 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2747 chip->state = FL_LOCKING; 2748 map_write(map, CMD(0xA0), adr); 2749 map_write(map, CMD(0x00), adr); 2750 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2751 /* 2752 * Unlocking of one specific sector is not supported, so we 2753 * have to unlock all sectors of this device instead 2754 */ 2755 chip->state = FL_UNLOCKING; 2756 map_write(map, CMD(0x80), chip->start); 2757 map_write(map, CMD(0x30), chip->start); 2758 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2759 chip->state = FL_JEDEC_QUERY; 2760 /* Return locked status: 0->locked, 1->unlocked */ 2761 ret = !cfi_read_query(map, adr); 2762 } else 2763 BUG(); 2764 2765 /* 2766 * Wait for some time as unlocking of all sectors takes quite long 2767 */ 2768 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2769 for (;;) { 2770 if (chip_ready(map, chip, adr)) 2771 break; 2772 2773 if (time_after(jiffies, timeo)) { 2774 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2775 ret = -EIO; 2776 break; 2777 } 2778 2779 UDELAY(map, chip, adr, 1); 2780 } 2781 2782 /* Exit BC commands */ 2783 map_write(map, CMD(0x90), chip->start); 2784 map_write(map, CMD(0x00), chip->start); 2785 2786 chip->state = FL_READY; 2787 put_chip(map, chip, adr); 2788 mutex_unlock(&chip->mutex); 2789 2790 return ret; 2791 } 2792 2793 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2794 uint64_t len) 2795 { 2796 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2797 DO_XXLOCK_ONEBLOCK_LOCK); 2798 } 2799 2800 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2801 uint64_t len) 2802 { 2803 struct mtd_erase_region_info *regions = mtd->eraseregions; 2804 struct map_info *map = mtd->priv; 2805 struct cfi_private *cfi = map->fldrv_priv; 2806 struct ppb_lock *sect; 2807 unsigned long adr; 2808 loff_t offset; 2809 uint64_t length; 2810 int chipnum; 2811 int i; 2812 int sectors; 2813 int ret; 2814 int max_sectors; 2815 2816 /* 2817 * PPB unlocking always unlocks all sectors of the flash chip. 2818 * We need to re-lock all previously locked sectors. So lets 2819 * first check the locking status of all sectors and save 2820 * it for future use. 2821 */ 2822 max_sectors = 0; 2823 for (i = 0; i < mtd->numeraseregions; i++) 2824 max_sectors += regions[i].numblocks; 2825 2826 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL); 2827 if (!sect) 2828 return -ENOMEM; 2829 2830 /* 2831 * This code to walk all sectors is a slightly modified version 2832 * of the cfi_varsize_frob() code. 2833 */ 2834 i = 0; 2835 chipnum = 0; 2836 adr = 0; 2837 sectors = 0; 2838 offset = 0; 2839 length = mtd->size; 2840 2841 while (length) { 2842 int size = regions[i].erasesize; 2843 2844 /* 2845 * Only test sectors that shall not be unlocked. The other 2846 * sectors shall be unlocked, so lets keep their locking 2847 * status at "unlocked" (locked=0) for the final re-locking. 2848 */ 2849 if ((offset < ofs) || (offset >= (ofs + len))) { 2850 sect[sectors].chip = &cfi->chips[chipnum]; 2851 sect[sectors].adr = adr; 2852 sect[sectors].locked = do_ppb_xxlock( 2853 map, &cfi->chips[chipnum], adr, 0, 2854 DO_XXLOCK_ONEBLOCK_GETLOCK); 2855 } 2856 2857 adr += size; 2858 offset += size; 2859 length -= size; 2860 2861 if (offset == regions[i].offset + size * regions[i].numblocks) 2862 i++; 2863 2864 if (adr >> cfi->chipshift) { 2865 if (offset >= (ofs + len)) 2866 break; 2867 adr = 0; 2868 chipnum++; 2869 2870 if (chipnum >= cfi->numchips) 2871 break; 2872 } 2873 2874 sectors++; 2875 if (sectors >= max_sectors) { 2876 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2877 max_sectors); 2878 kfree(sect); 2879 return -EINVAL; 2880 } 2881 } 2882 2883 /* Now unlock the whole chip */ 2884 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2885 DO_XXLOCK_ONEBLOCK_UNLOCK); 2886 if (ret) { 2887 kfree(sect); 2888 return ret; 2889 } 2890 2891 /* 2892 * PPB unlocking always unlocks all sectors of the flash chip. 2893 * We need to re-lock all previously locked sectors. 2894 */ 2895 for (i = 0; i < sectors; i++) { 2896 if (sect[i].locked) 2897 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, 2898 DO_XXLOCK_ONEBLOCK_LOCK); 2899 } 2900 2901 kfree(sect); 2902 return ret; 2903 } 2904 2905 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2906 uint64_t len) 2907 { 2908 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2909 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2910 } 2911 2912 static void cfi_amdstd_sync (struct mtd_info *mtd) 2913 { 2914 struct map_info *map = mtd->priv; 2915 struct cfi_private *cfi = map->fldrv_priv; 2916 int i; 2917 struct flchip *chip; 2918 int ret = 0; 2919 DECLARE_WAITQUEUE(wait, current); 2920 2921 for (i=0; !ret && i<cfi->numchips; i++) { 2922 chip = &cfi->chips[i]; 2923 2924 retry: 2925 mutex_lock(&chip->mutex); 2926 2927 switch(chip->state) { 2928 case FL_READY: 2929 case FL_STATUS: 2930 case FL_CFI_QUERY: 2931 case FL_JEDEC_QUERY: 2932 chip->oldstate = chip->state; 2933 chip->state = FL_SYNCING; 2934 /* No need to wake_up() on this state change - 2935 * as the whole point is that nobody can do anything 2936 * with the chip now anyway. 2937 */ 2938 /* fall through */ 2939 case FL_SYNCING: 2940 mutex_unlock(&chip->mutex); 2941 break; 2942 2943 default: 2944 /* Not an idle state */ 2945 set_current_state(TASK_UNINTERRUPTIBLE); 2946 add_wait_queue(&chip->wq, &wait); 2947 2948 mutex_unlock(&chip->mutex); 2949 2950 schedule(); 2951 2952 remove_wait_queue(&chip->wq, &wait); 2953 2954 goto retry; 2955 } 2956 } 2957 2958 /* Unlock the chips again */ 2959 2960 for (i--; i >=0; i--) { 2961 chip = &cfi->chips[i]; 2962 2963 mutex_lock(&chip->mutex); 2964 2965 if (chip->state == FL_SYNCING) { 2966 chip->state = chip->oldstate; 2967 wake_up(&chip->wq); 2968 } 2969 mutex_unlock(&chip->mutex); 2970 } 2971 } 2972 2973 2974 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2975 { 2976 struct map_info *map = mtd->priv; 2977 struct cfi_private *cfi = map->fldrv_priv; 2978 int i; 2979 struct flchip *chip; 2980 int ret = 0; 2981 2982 for (i=0; !ret && i<cfi->numchips; i++) { 2983 chip = &cfi->chips[i]; 2984 2985 mutex_lock(&chip->mutex); 2986 2987 switch(chip->state) { 2988 case FL_READY: 2989 case FL_STATUS: 2990 case FL_CFI_QUERY: 2991 case FL_JEDEC_QUERY: 2992 chip->oldstate = chip->state; 2993 chip->state = FL_PM_SUSPENDED; 2994 /* No need to wake_up() on this state change - 2995 * as the whole point is that nobody can do anything 2996 * with the chip now anyway. 2997 */ 2998 case FL_PM_SUSPENDED: 2999 break; 3000 3001 default: 3002 ret = -EAGAIN; 3003 break; 3004 } 3005 mutex_unlock(&chip->mutex); 3006 } 3007 3008 /* Unlock the chips again */ 3009 3010 if (ret) { 3011 for (i--; i >=0; i--) { 3012 chip = &cfi->chips[i]; 3013 3014 mutex_lock(&chip->mutex); 3015 3016 if (chip->state == FL_PM_SUSPENDED) { 3017 chip->state = chip->oldstate; 3018 wake_up(&chip->wq); 3019 } 3020 mutex_unlock(&chip->mutex); 3021 } 3022 } 3023 3024 return ret; 3025 } 3026 3027 3028 static void cfi_amdstd_resume(struct mtd_info *mtd) 3029 { 3030 struct map_info *map = mtd->priv; 3031 struct cfi_private *cfi = map->fldrv_priv; 3032 int i; 3033 struct flchip *chip; 3034 3035 for (i=0; i<cfi->numchips; i++) { 3036 3037 chip = &cfi->chips[i]; 3038 3039 mutex_lock(&chip->mutex); 3040 3041 if (chip->state == FL_PM_SUSPENDED) { 3042 chip->state = FL_READY; 3043 map_write(map, CMD(0xF0), chip->start); 3044 wake_up(&chip->wq); 3045 } 3046 else 3047 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 3048 3049 mutex_unlock(&chip->mutex); 3050 } 3051 } 3052 3053 3054 /* 3055 * Ensure that the flash device is put back into read array mode before 3056 * unloading the driver or rebooting. On some systems, rebooting while 3057 * the flash is in query/program/erase mode will prevent the CPU from 3058 * fetching the bootloader code, requiring a hard reset or power cycle. 3059 */ 3060 static int cfi_amdstd_reset(struct mtd_info *mtd) 3061 { 3062 struct map_info *map = mtd->priv; 3063 struct cfi_private *cfi = map->fldrv_priv; 3064 int i, ret; 3065 struct flchip *chip; 3066 3067 for (i = 0; i < cfi->numchips; i++) { 3068 3069 chip = &cfi->chips[i]; 3070 3071 mutex_lock(&chip->mutex); 3072 3073 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 3074 if (!ret) { 3075 map_write(map, CMD(0xF0), chip->start); 3076 chip->state = FL_SHUTDOWN; 3077 put_chip(map, chip, chip->start); 3078 } 3079 3080 mutex_unlock(&chip->mutex); 3081 } 3082 3083 return 0; 3084 } 3085 3086 3087 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 3088 void *v) 3089 { 3090 struct mtd_info *mtd; 3091 3092 mtd = container_of(nb, struct mtd_info, reboot_notifier); 3093 cfi_amdstd_reset(mtd); 3094 return NOTIFY_DONE; 3095 } 3096 3097 3098 static void cfi_amdstd_destroy(struct mtd_info *mtd) 3099 { 3100 struct map_info *map = mtd->priv; 3101 struct cfi_private *cfi = map->fldrv_priv; 3102 3103 cfi_amdstd_reset(mtd); 3104 unregister_reboot_notifier(&mtd->reboot_notifier); 3105 kfree(cfi->cmdset_priv); 3106 kfree(cfi->cfiq); 3107 kfree(cfi); 3108 kfree(mtd->eraseregions); 3109 } 3110 3111 MODULE_LICENSE("GPL"); 3112 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 3113 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 3114 MODULE_ALIAS("cfi_cmdset_0006"); 3115 MODULE_ALIAS("cfi_cmdset_0701"); 3116