1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <asm/io.h> 28 #include <asm/byteorder.h> 29 30 #include <linux/errno.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/interrupt.h> 34 #include <linux/reboot.h> 35 #include <linux/of.h> 36 #include <linux/of_platform.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_RETRIES 3 46 47 #define SST49LF004B 0x0060 48 #define SST49LF040B 0x0050 49 #define SST49LF008A 0x005a 50 #define AT49BV6416 0x00d6 51 52 /* 53 * Status Register bit description. Used by flash devices that don't 54 * support DQ polling (e.g. HyperFlash) 55 */ 56 #define CFI_SR_DRB BIT(7) 57 #define CFI_SR_ESB BIT(5) 58 #define CFI_SR_PSB BIT(4) 59 #define CFI_SR_WBASB BIT(3) 60 #define CFI_SR_SLSB BIT(1) 61 62 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 63 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 64 #if !FORCE_WORD_WRITE 65 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 66 #endif 67 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 68 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 69 static void cfi_amdstd_sync (struct mtd_info *); 70 static int cfi_amdstd_suspend (struct mtd_info *); 71 static void cfi_amdstd_resume (struct mtd_info *); 72 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 73 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 74 size_t *, struct otp_info *); 75 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 76 size_t *, struct otp_info *); 77 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 78 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 79 size_t *, u_char *); 80 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 81 size_t *, u_char *); 82 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 83 size_t *, u_char *); 84 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 85 86 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 87 size_t *retlen, const u_char *buf); 88 89 static void cfi_amdstd_destroy(struct mtd_info *); 90 91 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 92 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 93 94 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 95 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 96 #include "fwh_lock.h" 97 98 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 99 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 100 101 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 102 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 103 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 104 105 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 106 .probe = NULL, /* Not usable directly */ 107 .destroy = cfi_amdstd_destroy, 108 .name = "cfi_cmdset_0002", 109 .module = THIS_MODULE 110 }; 111 112 /* 113 * Use status register to poll for Erase/write completion when DQ is not 114 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in 115 * CFI Primary Vendor-Specific Extended Query table 1.5 116 */ 117 static int cfi_use_status_reg(struct cfi_private *cfi) 118 { 119 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 120 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ; 121 122 return extp->MinorVersion >= '5' && 123 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; 124 } 125 126 static void cfi_check_err_status(struct map_info *map, struct flchip *chip, 127 unsigned long adr) 128 { 129 struct cfi_private *cfi = map->fldrv_priv; 130 map_word status; 131 132 if (!cfi_use_status_reg(cfi)) 133 return; 134 135 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 136 cfi->device_type, NULL); 137 status = map_read(map, adr); 138 139 if (map_word_bitsset(map, status, CMD(0x3a))) { 140 unsigned long chipstatus = MERGESTATUS(status); 141 142 if (chipstatus & CFI_SR_ESB) 143 pr_err("%s erase operation failed, status %lx\n", 144 map->name, chipstatus); 145 if (chipstatus & CFI_SR_PSB) 146 pr_err("%s program operation failed, status %lx\n", 147 map->name, chipstatus); 148 if (chipstatus & CFI_SR_WBASB) 149 pr_err("%s buffer program command aborted, status %lx\n", 150 map->name, chipstatus); 151 if (chipstatus & CFI_SR_SLSB) 152 pr_err("%s sector write protected, status %lx\n", 153 map->name, chipstatus); 154 } 155 } 156 157 /* #define DEBUG_CFI_FEATURES */ 158 159 160 #ifdef DEBUG_CFI_FEATURES 161 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 162 { 163 const char* erase_suspend[3] = { 164 "Not supported", "Read only", "Read/write" 165 }; 166 const char* top_bottom[6] = { 167 "No WP", "8x8KiB sectors at top & bottom, no WP", 168 "Bottom boot", "Top boot", 169 "Uniform, Bottom WP", "Uniform, Top WP" 170 }; 171 172 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 173 printk(" Address sensitive unlock: %s\n", 174 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 175 176 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 177 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 178 else 179 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 180 181 if (extp->BlkProt == 0) 182 printk(" Block protection: Not supported\n"); 183 else 184 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 185 186 187 printk(" Temporary block unprotect: %s\n", 188 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 189 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 190 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 191 printk(" Burst mode: %s\n", 192 extp->BurstMode ? "Supported" : "Not supported"); 193 if (extp->PageMode == 0) 194 printk(" Page mode: Not supported\n"); 195 else 196 printk(" Page mode: %d word page\n", extp->PageMode << 2); 197 198 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 199 extp->VppMin >> 4, extp->VppMin & 0xf); 200 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 201 extp->VppMax >> 4, extp->VppMax & 0xf); 202 203 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 204 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 205 else 206 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 207 } 208 #endif 209 210 #ifdef AMD_BOOTLOC_BUG 211 /* Wheee. Bring me the head of someone at AMD. */ 212 static void fixup_amd_bootblock(struct mtd_info *mtd) 213 { 214 struct map_info *map = mtd->priv; 215 struct cfi_private *cfi = map->fldrv_priv; 216 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 217 __u8 major = extp->MajorVersion; 218 __u8 minor = extp->MinorVersion; 219 220 if (((major << 8) | minor) < 0x3131) { 221 /* CFI version 1.0 => don't trust bootloc */ 222 223 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 224 map->name, cfi->mfr, cfi->id); 225 226 /* AFAICS all 29LV400 with a bottom boot block have a device ID 227 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 228 * These were badly detected as they have the 0x80 bit set 229 * so treat them as a special case. 230 */ 231 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 232 233 /* Macronix added CFI to their 2nd generation 234 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 235 * Fujitsu, Spansion, EON, ESI and older Macronix) 236 * has CFI. 237 * 238 * Therefore also check the manufacturer. 239 * This reduces the risk of false detection due to 240 * the 8-bit device ID. 241 */ 242 (cfi->mfr == CFI_MFR_MACRONIX)) { 243 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 244 " detected\n", map->name); 245 extp->TopBottom = 2; /* bottom boot */ 246 } else 247 if (cfi->id & 0x80) { 248 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 249 extp->TopBottom = 3; /* top boot */ 250 } else { 251 extp->TopBottom = 2; /* bottom boot */ 252 } 253 254 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 255 " deduced %s from Device ID\n", map->name, major, minor, 256 extp->TopBottom == 2 ? "bottom" : "top"); 257 } 258 } 259 #endif 260 261 #if !FORCE_WORD_WRITE 262 static void fixup_use_write_buffers(struct mtd_info *mtd) 263 { 264 struct map_info *map = mtd->priv; 265 struct cfi_private *cfi = map->fldrv_priv; 266 if (cfi->cfiq->BufWriteTimeoutTyp) { 267 pr_debug("Using buffer write method\n"); 268 mtd->_write = cfi_amdstd_write_buffers; 269 } 270 } 271 #endif /* !FORCE_WORD_WRITE */ 272 273 /* Atmel chips don't use the same PRI format as AMD chips */ 274 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 279 struct cfi_pri_atmel atmel_pri; 280 281 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 282 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 283 284 if (atmel_pri.Features & 0x02) 285 extp->EraseSuspend = 2; 286 287 /* Some chips got it backwards... */ 288 if (cfi->id == AT49BV6416) { 289 if (atmel_pri.BottomBoot) 290 extp->TopBottom = 3; 291 else 292 extp->TopBottom = 2; 293 } else { 294 if (atmel_pri.BottomBoot) 295 extp->TopBottom = 2; 296 else 297 extp->TopBottom = 3; 298 } 299 300 /* burst write mode not supported */ 301 cfi->cfiq->BufWriteTimeoutTyp = 0; 302 cfi->cfiq->BufWriteTimeoutMax = 0; 303 } 304 305 static void fixup_use_secsi(struct mtd_info *mtd) 306 { 307 /* Setup for chips with a secsi area */ 308 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 309 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 310 } 311 312 static void fixup_use_erase_chip(struct mtd_info *mtd) 313 { 314 struct map_info *map = mtd->priv; 315 struct cfi_private *cfi = map->fldrv_priv; 316 if ((cfi->cfiq->NumEraseRegions == 1) && 317 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 318 mtd->_erase = cfi_amdstd_erase_chip; 319 } 320 321 } 322 323 /* 324 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 325 * locked by default. 326 */ 327 static void fixup_use_atmel_lock(struct mtd_info *mtd) 328 { 329 mtd->_lock = cfi_atmel_lock; 330 mtd->_unlock = cfi_atmel_unlock; 331 mtd->flags |= MTD_POWERUP_LOCK; 332 } 333 334 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 335 { 336 struct map_info *map = mtd->priv; 337 struct cfi_private *cfi = map->fldrv_priv; 338 339 /* 340 * These flashes report two separate eraseblock regions based on the 341 * sector_erase-size and block_erase-size, although they both operate on the 342 * same memory. This is not allowed according to CFI, so we just pick the 343 * sector_erase-size. 344 */ 345 cfi->cfiq->NumEraseRegions = 1; 346 } 347 348 static void fixup_sst39vf(struct mtd_info *mtd) 349 { 350 struct map_info *map = mtd->priv; 351 struct cfi_private *cfi = map->fldrv_priv; 352 353 fixup_old_sst_eraseregion(mtd); 354 355 cfi->addr_unlock1 = 0x5555; 356 cfi->addr_unlock2 = 0x2AAA; 357 } 358 359 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 360 { 361 struct map_info *map = mtd->priv; 362 struct cfi_private *cfi = map->fldrv_priv; 363 364 fixup_old_sst_eraseregion(mtd); 365 366 cfi->addr_unlock1 = 0x555; 367 cfi->addr_unlock2 = 0x2AA; 368 369 cfi->sector_erase_cmd = CMD(0x50); 370 } 371 372 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 373 { 374 struct map_info *map = mtd->priv; 375 struct cfi_private *cfi = map->fldrv_priv; 376 377 fixup_sst39vf_rev_b(mtd); 378 379 /* 380 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 381 * it should report a size of 8KBytes (0x0020*256). 382 */ 383 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 384 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", 385 mtd->name); 386 } 387 388 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 389 { 390 struct map_info *map = mtd->priv; 391 struct cfi_private *cfi = map->fldrv_priv; 392 393 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 394 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 395 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", 396 mtd->name); 397 } 398 } 399 400 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 401 { 402 struct map_info *map = mtd->priv; 403 struct cfi_private *cfi = map->fldrv_priv; 404 405 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 406 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 407 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", 408 mtd->name); 409 } 410 } 411 412 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 413 { 414 struct map_info *map = mtd->priv; 415 struct cfi_private *cfi = map->fldrv_priv; 416 417 /* 418 * S29NS512P flash uses more than 8bits to report number of sectors, 419 * which is not permitted by CFI. 420 */ 421 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 422 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", 423 mtd->name); 424 } 425 426 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 427 static struct cfi_fixup cfi_nopri_fixup_table[] = { 428 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 429 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 430 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 431 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 432 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 433 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 434 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 435 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 436 { 0, 0, NULL } 437 }; 438 439 static struct cfi_fixup cfi_fixup_table[] = { 440 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 441 #ifdef AMD_BOOTLOC_BUG 442 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 443 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 444 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 445 #endif 446 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 447 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 448 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 449 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 450 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 451 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 452 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 453 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 454 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 455 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 456 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 457 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 458 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 459 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 460 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 461 #if !FORCE_WORD_WRITE 462 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 463 #endif 464 { 0, 0, NULL } 465 }; 466 static struct cfi_fixup jedec_fixup_table[] = { 467 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 468 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 469 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 470 { 0, 0, NULL } 471 }; 472 473 static struct cfi_fixup fixup_table[] = { 474 /* The CFI vendor ids and the JEDEC vendor IDs appear 475 * to be common. It is like the devices id's are as 476 * well. This table is to pick all cases where 477 * we know that is the case. 478 */ 479 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 480 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 481 { 0, 0, NULL } 482 }; 483 484 485 static void cfi_fixup_major_minor(struct cfi_private *cfi, 486 struct cfi_pri_amdstd *extp) 487 { 488 if (cfi->mfr == CFI_MFR_SAMSUNG) { 489 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 490 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 491 /* 492 * Samsung K8P2815UQB and K8D6x16UxM chips 493 * report major=0 / minor=0. 494 * K8D3x16UxC chips report major=3 / minor=3. 495 */ 496 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 497 " Extended Query version to 1.%c\n", 498 extp->MinorVersion); 499 extp->MajorVersion = '1'; 500 } 501 } 502 503 /* 504 * SST 38VF640x chips report major=0xFF / minor=0xFF. 505 */ 506 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 507 extp->MajorVersion = '1'; 508 extp->MinorVersion = '0'; 509 } 510 } 511 512 static int is_m29ew(struct cfi_private *cfi) 513 { 514 if (cfi->mfr == CFI_MFR_INTEL && 515 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 516 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 517 return 1; 518 return 0; 519 } 520 521 /* 522 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 523 * Some revisions of the M29EW suffer from erase suspend hang ups. In 524 * particular, it can occur when the sequence 525 * Erase Confirm -> Suspend -> Program -> Resume 526 * causes a lockup due to internal timing issues. The consequence is that the 527 * erase cannot be resumed without inserting a dummy command after programming 528 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 529 * that writes an F0 command code before the RESUME command. 530 */ 531 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 532 unsigned long adr) 533 { 534 struct cfi_private *cfi = map->fldrv_priv; 535 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 536 if (is_m29ew(cfi)) 537 map_write(map, CMD(0xF0), adr); 538 } 539 540 /* 541 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 542 * 543 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 544 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 545 * command is issued after an ERASE RESUME operation without waiting for a 546 * minimum delay. The result is that once the ERASE seems to be completed 547 * (no bits are toggling), the contents of the Flash memory block on which 548 * the erase was ongoing could be inconsistent with the expected values 549 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 550 * values), causing a consequent failure of the ERASE operation. 551 * The occurrence of this issue could be high, especially when file system 552 * operations on the Flash are intensive. As a result, it is recommended 553 * that a patch be applied. Intensive file system operations can cause many 554 * calls to the garbage routine to free Flash space (also by erasing physical 555 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 556 * commands can occur. The problem disappears when a delay is inserted after 557 * the RESUME command by using the udelay() function available in Linux. 558 * The DELAY value must be tuned based on the customer's platform. 559 * The maximum value that fixes the problem in all cases is 500us. 560 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 561 * in most cases. 562 * We have chosen 500µs because this latency is acceptable. 563 */ 564 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 565 { 566 /* 567 * Resolving the Delay After Resume Issue see Micron TN-13-07 568 * Worst case delay must be 500µs but 30-50µs should be ok as well 569 */ 570 if (is_m29ew(cfi)) 571 cfi_udelay(500); 572 } 573 574 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 575 { 576 struct cfi_private *cfi = map->fldrv_priv; 577 struct device_node __maybe_unused *np = map->device_node; 578 struct mtd_info *mtd; 579 int i; 580 581 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 582 if (!mtd) 583 return NULL; 584 mtd->priv = map; 585 mtd->type = MTD_NORFLASH; 586 587 /* Fill in the default mtd operations */ 588 mtd->_erase = cfi_amdstd_erase_varsize; 589 mtd->_write = cfi_amdstd_write_words; 590 mtd->_read = cfi_amdstd_read; 591 mtd->_sync = cfi_amdstd_sync; 592 mtd->_suspend = cfi_amdstd_suspend; 593 mtd->_resume = cfi_amdstd_resume; 594 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 595 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 596 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 597 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 598 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 599 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 600 mtd->flags = MTD_CAP_NORFLASH; 601 mtd->name = map->name; 602 mtd->writesize = 1; 603 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 604 605 pr_debug("MTD %s(): write buffer size %d\n", __func__, 606 mtd->writebufsize); 607 608 mtd->_panic_write = cfi_amdstd_panic_write; 609 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 610 611 if (cfi->cfi_mode==CFI_MODE_CFI){ 612 unsigned char bootloc; 613 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 614 struct cfi_pri_amdstd *extp; 615 616 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 617 if (extp) { 618 /* 619 * It's a real CFI chip, not one for which the probe 620 * routine faked a CFI structure. 621 */ 622 cfi_fixup_major_minor(cfi, extp); 623 624 /* 625 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 626 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 627 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 628 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 629 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 630 */ 631 if (extp->MajorVersion != '1' || 632 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 633 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 634 "version %c.%c (%#02x/%#02x).\n", 635 extp->MajorVersion, extp->MinorVersion, 636 extp->MajorVersion, extp->MinorVersion); 637 kfree(extp); 638 kfree(mtd); 639 return NULL; 640 } 641 642 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 643 extp->MajorVersion, extp->MinorVersion); 644 645 /* Install our own private info structure */ 646 cfi->cmdset_priv = extp; 647 648 /* Apply cfi device specific fixups */ 649 cfi_fixup(mtd, cfi_fixup_table); 650 651 #ifdef DEBUG_CFI_FEATURES 652 /* Tell the user about it in lots of lovely detail */ 653 cfi_tell_features(extp); 654 #endif 655 656 #ifdef CONFIG_OF 657 if (np && of_property_read_bool( 658 np, "use-advanced-sector-protection") 659 && extp->BlkProtUnprot == 8) { 660 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 661 mtd->_lock = cfi_ppb_lock; 662 mtd->_unlock = cfi_ppb_unlock; 663 mtd->_is_locked = cfi_ppb_is_locked; 664 } 665 #endif 666 667 bootloc = extp->TopBottom; 668 if ((bootloc < 2) || (bootloc > 5)) { 669 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 670 "bank location (%d). Assuming bottom.\n", 671 map->name, bootloc); 672 bootloc = 2; 673 } 674 675 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 676 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 677 678 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 679 int j = (cfi->cfiq->NumEraseRegions-1)-i; 680 681 swap(cfi->cfiq->EraseRegionInfo[i], 682 cfi->cfiq->EraseRegionInfo[j]); 683 } 684 } 685 /* Set the default CFI lock/unlock addresses */ 686 cfi->addr_unlock1 = 0x555; 687 cfi->addr_unlock2 = 0x2aa; 688 } 689 cfi_fixup(mtd, cfi_nopri_fixup_table); 690 691 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 692 kfree(mtd); 693 return NULL; 694 } 695 696 } /* CFI mode */ 697 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 698 /* Apply jedec specific fixups */ 699 cfi_fixup(mtd, jedec_fixup_table); 700 } 701 /* Apply generic fixups */ 702 cfi_fixup(mtd, fixup_table); 703 704 for (i=0; i< cfi->numchips; i++) { 705 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 706 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 707 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 708 /* 709 * First calculate the timeout max according to timeout field 710 * of struct cfi_ident that probed from chip's CFI aera, if 711 * available. Specify a minimum of 2000us, in case the CFI data 712 * is wrong. 713 */ 714 if (cfi->cfiq->BufWriteTimeoutTyp && 715 cfi->cfiq->BufWriteTimeoutMax) 716 cfi->chips[i].buffer_write_time_max = 717 1 << (cfi->cfiq->BufWriteTimeoutTyp + 718 cfi->cfiq->BufWriteTimeoutMax); 719 else 720 cfi->chips[i].buffer_write_time_max = 0; 721 722 cfi->chips[i].buffer_write_time_max = 723 max(cfi->chips[i].buffer_write_time_max, 2000); 724 725 cfi->chips[i].ref_point_counter = 0; 726 init_waitqueue_head(&(cfi->chips[i].wq)); 727 } 728 729 map->fldrv = &cfi_amdstd_chipdrv; 730 731 return cfi_amdstd_setup(mtd); 732 } 733 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 734 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 735 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 736 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 737 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 738 739 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 740 { 741 struct map_info *map = mtd->priv; 742 struct cfi_private *cfi = map->fldrv_priv; 743 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 744 unsigned long offset = 0; 745 int i,j; 746 747 printk(KERN_NOTICE "number of %s chips: %d\n", 748 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 749 /* Select the correct geometry setup */ 750 mtd->size = devsize * cfi->numchips; 751 752 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 753 mtd->eraseregions = kmalloc_array(mtd->numeraseregions, 754 sizeof(struct mtd_erase_region_info), 755 GFP_KERNEL); 756 if (!mtd->eraseregions) 757 goto setup_err; 758 759 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 760 unsigned long ernum, ersize; 761 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 762 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 763 764 if (mtd->erasesize < ersize) { 765 mtd->erasesize = ersize; 766 } 767 for (j=0; j<cfi->numchips; j++) { 768 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 769 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 770 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 771 } 772 offset += (ersize * ernum); 773 } 774 if (offset != devsize) { 775 /* Argh */ 776 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 777 goto setup_err; 778 } 779 780 __module_get(THIS_MODULE); 781 register_reboot_notifier(&mtd->reboot_notifier); 782 return mtd; 783 784 setup_err: 785 kfree(mtd->eraseregions); 786 kfree(mtd); 787 kfree(cfi->cmdset_priv); 788 kfree(cfi->cfiq); 789 return NULL; 790 } 791 792 /* 793 * Return true if the chip is ready. 794 * 795 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 796 * non-suspended sector) and is indicated by no toggle bits toggling. 797 * 798 * Note that anything more complicated than checking if no bits are toggling 799 * (including checking DQ5 for an error status) is tricky to get working 800 * correctly and is therefore not done (particularly with interleaved chips 801 * as each chip must be checked independently of the others). 802 */ 803 static int __xipram chip_ready(struct map_info *map, struct flchip *chip, 804 unsigned long addr) 805 { 806 struct cfi_private *cfi = map->fldrv_priv; 807 map_word d, t; 808 809 if (cfi_use_status_reg(cfi)) { 810 map_word ready = CMD(CFI_SR_DRB); 811 /* 812 * For chips that support status register, check device 813 * ready bit 814 */ 815 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 816 cfi->device_type, NULL); 817 d = map_read(map, addr); 818 819 return map_word_andequal(map, d, ready, ready); 820 } 821 822 d = map_read(map, addr); 823 t = map_read(map, addr); 824 825 return map_word_equal(map, d, t); 826 } 827 828 /* 829 * Return true if the chip is ready and has the correct value. 830 * 831 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 832 * non-suspended sector) and it is indicated by no bits toggling. 833 * 834 * Error are indicated by toggling bits or bits held with the wrong value, 835 * or with bits toggling. 836 * 837 * Note that anything more complicated than checking if no bits are toggling 838 * (including checking DQ5 for an error status) is tricky to get working 839 * correctly and is therefore not done (particularly with interleaved chips 840 * as each chip must be checked independently of the others). 841 * 842 */ 843 static int __xipram chip_good(struct map_info *map, struct flchip *chip, 844 unsigned long addr, map_word expected) 845 { 846 struct cfi_private *cfi = map->fldrv_priv; 847 map_word oldd, curd; 848 849 if (cfi_use_status_reg(cfi)) { 850 map_word ready = CMD(CFI_SR_DRB); 851 map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB); 852 /* 853 * For chips that support status register, check device 854 * ready bit and Erase/Program status bit to know if 855 * operation succeeded. 856 */ 857 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, 858 cfi->device_type, NULL); 859 curd = map_read(map, addr); 860 861 if (map_word_andequal(map, curd, ready, ready)) 862 return !map_word_bitsset(map, curd, err); 863 864 return 0; 865 } 866 867 oldd = map_read(map, addr); 868 curd = map_read(map, addr); 869 870 return map_word_equal(map, oldd, curd) && 871 map_word_equal(map, curd, expected); 872 } 873 874 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 875 { 876 DECLARE_WAITQUEUE(wait, current); 877 struct cfi_private *cfi = map->fldrv_priv; 878 unsigned long timeo; 879 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 880 881 resettime: 882 timeo = jiffies + HZ; 883 retry: 884 switch (chip->state) { 885 886 case FL_STATUS: 887 for (;;) { 888 if (chip_ready(map, chip, adr)) 889 break; 890 891 if (time_after(jiffies, timeo)) { 892 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 893 return -EIO; 894 } 895 mutex_unlock(&chip->mutex); 896 cfi_udelay(1); 897 mutex_lock(&chip->mutex); 898 /* Someone else might have been playing with it. */ 899 goto retry; 900 } 901 902 case FL_READY: 903 case FL_CFI_QUERY: 904 case FL_JEDEC_QUERY: 905 return 0; 906 907 case FL_ERASING: 908 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 909 !(mode == FL_READY || mode == FL_POINT || 910 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 911 goto sleep; 912 913 /* Do not allow suspend iff read/write to EB address */ 914 if ((adr & chip->in_progress_block_mask) == 915 chip->in_progress_block_addr) 916 goto sleep; 917 918 /* Erase suspend */ 919 /* It's harmless to issue the Erase-Suspend and Erase-Resume 920 * commands when the erase algorithm isn't in progress. */ 921 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 922 chip->oldstate = FL_ERASING; 923 chip->state = FL_ERASE_SUSPENDING; 924 chip->erase_suspended = 1; 925 for (;;) { 926 if (chip_ready(map, chip, adr)) 927 break; 928 929 if (time_after(jiffies, timeo)) { 930 /* Should have suspended the erase by now. 931 * Send an Erase-Resume command as either 932 * there was an error (so leave the erase 933 * routine to recover from it) or we trying to 934 * use the erase-in-progress sector. */ 935 put_chip(map, chip, adr); 936 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 937 return -EIO; 938 } 939 940 mutex_unlock(&chip->mutex); 941 cfi_udelay(1); 942 mutex_lock(&chip->mutex); 943 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 944 So we can just loop here. */ 945 } 946 chip->state = FL_READY; 947 return 0; 948 949 case FL_XIP_WHILE_ERASING: 950 if (mode != FL_READY && mode != FL_POINT && 951 (!cfip || !(cfip->EraseSuspend&2))) 952 goto sleep; 953 chip->oldstate = chip->state; 954 chip->state = FL_READY; 955 return 0; 956 957 case FL_SHUTDOWN: 958 /* The machine is rebooting */ 959 return -EIO; 960 961 case FL_POINT: 962 /* Only if there's no operation suspended... */ 963 if (mode == FL_READY && chip->oldstate == FL_READY) 964 return 0; 965 /* fall through */ 966 967 default: 968 sleep: 969 set_current_state(TASK_UNINTERRUPTIBLE); 970 add_wait_queue(&chip->wq, &wait); 971 mutex_unlock(&chip->mutex); 972 schedule(); 973 remove_wait_queue(&chip->wq, &wait); 974 mutex_lock(&chip->mutex); 975 goto resettime; 976 } 977 } 978 979 980 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 981 { 982 struct cfi_private *cfi = map->fldrv_priv; 983 984 switch(chip->oldstate) { 985 case FL_ERASING: 986 cfi_fixup_m29ew_erase_suspend(map, 987 chip->in_progress_block_addr); 988 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 989 cfi_fixup_m29ew_delay_after_resume(cfi); 990 chip->oldstate = FL_READY; 991 chip->state = FL_ERASING; 992 break; 993 994 case FL_XIP_WHILE_ERASING: 995 chip->state = chip->oldstate; 996 chip->oldstate = FL_READY; 997 break; 998 999 case FL_READY: 1000 case FL_STATUS: 1001 break; 1002 default: 1003 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 1004 } 1005 wake_up(&chip->wq); 1006 } 1007 1008 #ifdef CONFIG_MTD_XIP 1009 1010 /* 1011 * No interrupt what so ever can be serviced while the flash isn't in array 1012 * mode. This is ensured by the xip_disable() and xip_enable() functions 1013 * enclosing any code path where the flash is known not to be in array mode. 1014 * And within a XIP disabled code path, only functions marked with __xipram 1015 * may be called and nothing else (it's a good thing to inspect generated 1016 * assembly to make sure inline functions were actually inlined and that gcc 1017 * didn't emit calls to its own support functions). Also configuring MTD CFI 1018 * support to a single buswidth and a single interleave is also recommended. 1019 */ 1020 1021 static void xip_disable(struct map_info *map, struct flchip *chip, 1022 unsigned long adr) 1023 { 1024 /* TODO: chips with no XIP use should ignore and return */ 1025 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 1026 local_irq_disable(); 1027 } 1028 1029 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 1030 unsigned long adr) 1031 { 1032 struct cfi_private *cfi = map->fldrv_priv; 1033 1034 if (chip->state != FL_POINT && chip->state != FL_READY) { 1035 map_write(map, CMD(0xf0), adr); 1036 chip->state = FL_READY; 1037 } 1038 (void) map_read(map, adr); 1039 xip_iprefetch(); 1040 local_irq_enable(); 1041 } 1042 1043 /* 1044 * When a delay is required for the flash operation to complete, the 1045 * xip_udelay() function is polling for both the given timeout and pending 1046 * (but still masked) hardware interrupts. Whenever there is an interrupt 1047 * pending then the flash erase operation is suspended, array mode restored 1048 * and interrupts unmasked. Task scheduling might also happen at that 1049 * point. The CPU eventually returns from the interrupt or the call to 1050 * schedule() and the suspended flash operation is resumed for the remaining 1051 * of the delay period. 1052 * 1053 * Warning: this function _will_ fool interrupt latency tracing tools. 1054 */ 1055 1056 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 1057 unsigned long adr, int usec) 1058 { 1059 struct cfi_private *cfi = map->fldrv_priv; 1060 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 1061 map_word status, OK = CMD(0x80); 1062 unsigned long suspended, start = xip_currtime(); 1063 flstate_t oldstate; 1064 1065 do { 1066 cpu_relax(); 1067 if (xip_irqpending() && extp && 1068 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 1069 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 1070 /* 1071 * Let's suspend the erase operation when supported. 1072 * Note that we currently don't try to suspend 1073 * interleaved chips if there is already another 1074 * operation suspended (imagine what happens 1075 * when one chip was already done with the current 1076 * operation while another chip suspended it, then 1077 * we resume the whole thing at once). Yes, it 1078 * can happen! 1079 */ 1080 map_write(map, CMD(0xb0), adr); 1081 usec -= xip_elapsed_since(start); 1082 suspended = xip_currtime(); 1083 do { 1084 if (xip_elapsed_since(suspended) > 100000) { 1085 /* 1086 * The chip doesn't want to suspend 1087 * after waiting for 100 msecs. 1088 * This is a critical error but there 1089 * is not much we can do here. 1090 */ 1091 return; 1092 } 1093 status = map_read(map, adr); 1094 } while (!map_word_andequal(map, status, OK, OK)); 1095 1096 /* Suspend succeeded */ 1097 oldstate = chip->state; 1098 if (!map_word_bitsset(map, status, CMD(0x40))) 1099 break; 1100 chip->state = FL_XIP_WHILE_ERASING; 1101 chip->erase_suspended = 1; 1102 map_write(map, CMD(0xf0), adr); 1103 (void) map_read(map, adr); 1104 xip_iprefetch(); 1105 local_irq_enable(); 1106 mutex_unlock(&chip->mutex); 1107 xip_iprefetch(); 1108 cond_resched(); 1109 1110 /* 1111 * We're back. However someone else might have 1112 * decided to go write to the chip if we are in 1113 * a suspended erase state. If so let's wait 1114 * until it's done. 1115 */ 1116 mutex_lock(&chip->mutex); 1117 while (chip->state != FL_XIP_WHILE_ERASING) { 1118 DECLARE_WAITQUEUE(wait, current); 1119 set_current_state(TASK_UNINTERRUPTIBLE); 1120 add_wait_queue(&chip->wq, &wait); 1121 mutex_unlock(&chip->mutex); 1122 schedule(); 1123 remove_wait_queue(&chip->wq, &wait); 1124 mutex_lock(&chip->mutex); 1125 } 1126 /* Disallow XIP again */ 1127 local_irq_disable(); 1128 1129 /* Correct Erase Suspend Hangups for M29EW */ 1130 cfi_fixup_m29ew_erase_suspend(map, adr); 1131 /* Resume the write or erase operation */ 1132 map_write(map, cfi->sector_erase_cmd, adr); 1133 chip->state = oldstate; 1134 start = xip_currtime(); 1135 } else if (usec >= 1000000/HZ) { 1136 /* 1137 * Try to save on CPU power when waiting delay 1138 * is at least a system timer tick period. 1139 * No need to be extremely accurate here. 1140 */ 1141 xip_cpu_idle(); 1142 } 1143 status = map_read(map, adr); 1144 } while (!map_word_andequal(map, status, OK, OK) 1145 && xip_elapsed_since(start) < usec); 1146 } 1147 1148 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1149 1150 /* 1151 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1152 * the flash is actively programming or erasing since we have to poll for 1153 * the operation to complete anyway. We can't do that in a generic way with 1154 * a XIP setup so do it before the actual flash operation in this case 1155 * and stub it out from INVALIDATE_CACHE_UDELAY. 1156 */ 1157 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1158 INVALIDATE_CACHED_RANGE(map, from, size) 1159 1160 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1161 UDELAY(map, chip, adr, usec) 1162 1163 /* 1164 * Extra notes: 1165 * 1166 * Activating this XIP support changes the way the code works a bit. For 1167 * example the code to suspend the current process when concurrent access 1168 * happens is never executed because xip_udelay() will always return with the 1169 * same chip state as it was entered with. This is why there is no care for 1170 * the presence of add_wait_queue() or schedule() calls from within a couple 1171 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1172 * The queueing and scheduling are always happening within xip_udelay(). 1173 * 1174 * Similarly, get_chip() and put_chip() just happen to always be executed 1175 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1176 * is in array mode, therefore never executing many cases therein and not 1177 * causing any problem with XIP. 1178 */ 1179 1180 #else 1181 1182 #define xip_disable(map, chip, adr) 1183 #define xip_enable(map, chip, adr) 1184 #define XIP_INVAL_CACHED_RANGE(x...) 1185 1186 #define UDELAY(map, chip, adr, usec) \ 1187 do { \ 1188 mutex_unlock(&chip->mutex); \ 1189 cfi_udelay(usec); \ 1190 mutex_lock(&chip->mutex); \ 1191 } while (0) 1192 1193 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1194 do { \ 1195 mutex_unlock(&chip->mutex); \ 1196 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1197 cfi_udelay(usec); \ 1198 mutex_lock(&chip->mutex); \ 1199 } while (0) 1200 1201 #endif 1202 1203 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1204 { 1205 unsigned long cmd_addr; 1206 struct cfi_private *cfi = map->fldrv_priv; 1207 int ret; 1208 1209 adr += chip->start; 1210 1211 /* Ensure cmd read/writes are aligned. */ 1212 cmd_addr = adr & ~(map_bankwidth(map)-1); 1213 1214 mutex_lock(&chip->mutex); 1215 ret = get_chip(map, chip, cmd_addr, FL_READY); 1216 if (ret) { 1217 mutex_unlock(&chip->mutex); 1218 return ret; 1219 } 1220 1221 if (chip->state != FL_POINT && chip->state != FL_READY) { 1222 map_write(map, CMD(0xf0), cmd_addr); 1223 chip->state = FL_READY; 1224 } 1225 1226 map_copy_from(map, buf, adr, len); 1227 1228 put_chip(map, chip, cmd_addr); 1229 1230 mutex_unlock(&chip->mutex); 1231 return 0; 1232 } 1233 1234 1235 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1236 { 1237 struct map_info *map = mtd->priv; 1238 struct cfi_private *cfi = map->fldrv_priv; 1239 unsigned long ofs; 1240 int chipnum; 1241 int ret = 0; 1242 1243 /* ofs: offset within the first chip that the first read should start */ 1244 chipnum = (from >> cfi->chipshift); 1245 ofs = from - (chipnum << cfi->chipshift); 1246 1247 while (len) { 1248 unsigned long thislen; 1249 1250 if (chipnum >= cfi->numchips) 1251 break; 1252 1253 if ((len + ofs -1) >> cfi->chipshift) 1254 thislen = (1<<cfi->chipshift) - ofs; 1255 else 1256 thislen = len; 1257 1258 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1259 if (ret) 1260 break; 1261 1262 *retlen += thislen; 1263 len -= thislen; 1264 buf += thislen; 1265 1266 ofs = 0; 1267 chipnum++; 1268 } 1269 return ret; 1270 } 1271 1272 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1273 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1274 1275 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1276 loff_t adr, size_t len) 1277 { 1278 struct cfi_private *cfi = map->fldrv_priv; 1279 1280 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1281 cfi->device_type, NULL); 1282 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1283 cfi->device_type, NULL); 1284 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1285 cfi->device_type, NULL); 1286 1287 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1288 } 1289 1290 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1291 loff_t adr, size_t len) 1292 { 1293 struct cfi_private *cfi = map->fldrv_priv; 1294 1295 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1296 cfi->device_type, NULL); 1297 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1298 cfi->device_type, NULL); 1299 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1300 cfi->device_type, NULL); 1301 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1302 cfi->device_type, NULL); 1303 1304 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1305 } 1306 1307 static inline int do_read_secsi_onechip(struct map_info *map, 1308 struct flchip *chip, loff_t adr, 1309 size_t len, u_char *buf, 1310 size_t grouplen) 1311 { 1312 DECLARE_WAITQUEUE(wait, current); 1313 1314 retry: 1315 mutex_lock(&chip->mutex); 1316 1317 if (chip->state != FL_READY){ 1318 set_current_state(TASK_UNINTERRUPTIBLE); 1319 add_wait_queue(&chip->wq, &wait); 1320 1321 mutex_unlock(&chip->mutex); 1322 1323 schedule(); 1324 remove_wait_queue(&chip->wq, &wait); 1325 1326 goto retry; 1327 } 1328 1329 adr += chip->start; 1330 1331 chip->state = FL_READY; 1332 1333 otp_enter(map, chip, adr, len); 1334 map_copy_from(map, buf, adr, len); 1335 otp_exit(map, chip, adr, len); 1336 1337 wake_up(&chip->wq); 1338 mutex_unlock(&chip->mutex); 1339 1340 return 0; 1341 } 1342 1343 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1344 { 1345 struct map_info *map = mtd->priv; 1346 struct cfi_private *cfi = map->fldrv_priv; 1347 unsigned long ofs; 1348 int chipnum; 1349 int ret = 0; 1350 1351 /* ofs: offset within the first chip that the first read should start */ 1352 /* 8 secsi bytes per chip */ 1353 chipnum=from>>3; 1354 ofs=from & 7; 1355 1356 while (len) { 1357 unsigned long thislen; 1358 1359 if (chipnum >= cfi->numchips) 1360 break; 1361 1362 if ((len + ofs -1) >> 3) 1363 thislen = (1<<3) - ofs; 1364 else 1365 thislen = len; 1366 1367 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1368 thislen, buf, 0); 1369 if (ret) 1370 break; 1371 1372 *retlen += thislen; 1373 len -= thislen; 1374 buf += thislen; 1375 1376 ofs = 0; 1377 chipnum++; 1378 } 1379 return ret; 1380 } 1381 1382 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1383 unsigned long adr, map_word datum, 1384 int mode); 1385 1386 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1387 size_t len, u_char *buf, size_t grouplen) 1388 { 1389 int ret; 1390 while (len) { 1391 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1392 int gap = adr - bus_ofs; 1393 int n = min_t(int, len, map_bankwidth(map) - gap); 1394 map_word datum = map_word_ff(map); 1395 1396 if (n != map_bankwidth(map)) { 1397 /* partial write of a word, load old contents */ 1398 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1399 datum = map_read(map, bus_ofs); 1400 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1401 } 1402 1403 datum = map_word_load_partial(map, datum, buf, gap, n); 1404 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1405 if (ret) 1406 return ret; 1407 1408 adr += n; 1409 buf += n; 1410 len -= n; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1417 size_t len, u_char *buf, size_t grouplen) 1418 { 1419 struct cfi_private *cfi = map->fldrv_priv; 1420 uint8_t lockreg; 1421 unsigned long timeo; 1422 int ret; 1423 1424 /* make sure area matches group boundaries */ 1425 if ((adr != 0) || (len != grouplen)) 1426 return -EINVAL; 1427 1428 mutex_lock(&chip->mutex); 1429 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1430 if (ret) { 1431 mutex_unlock(&chip->mutex); 1432 return ret; 1433 } 1434 chip->state = FL_LOCKING; 1435 1436 /* Enter lock register command */ 1437 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1438 cfi->device_type, NULL); 1439 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1440 cfi->device_type, NULL); 1441 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1442 cfi->device_type, NULL); 1443 1444 /* read lock register */ 1445 lockreg = cfi_read_query(map, 0); 1446 1447 /* set bit 0 to protect extended memory block */ 1448 lockreg &= ~0x01; 1449 1450 /* set bit 0 to protect extended memory block */ 1451 /* write lock register */ 1452 map_write(map, CMD(0xA0), chip->start); 1453 map_write(map, CMD(lockreg), chip->start); 1454 1455 /* wait for chip to become ready */ 1456 timeo = jiffies + msecs_to_jiffies(2); 1457 for (;;) { 1458 if (chip_ready(map, chip, adr)) 1459 break; 1460 1461 if (time_after(jiffies, timeo)) { 1462 pr_err("Waiting for chip to be ready timed out.\n"); 1463 ret = -EIO; 1464 break; 1465 } 1466 UDELAY(map, chip, 0, 1); 1467 } 1468 1469 /* exit protection commands */ 1470 map_write(map, CMD(0x90), chip->start); 1471 map_write(map, CMD(0x00), chip->start); 1472 1473 chip->state = FL_READY; 1474 put_chip(map, chip, chip->start); 1475 mutex_unlock(&chip->mutex); 1476 1477 return ret; 1478 } 1479 1480 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1481 size_t *retlen, u_char *buf, 1482 otp_op_t action, int user_regs) 1483 { 1484 struct map_info *map = mtd->priv; 1485 struct cfi_private *cfi = map->fldrv_priv; 1486 int ofs_factor = cfi->interleave * cfi->device_type; 1487 unsigned long base; 1488 int chipnum; 1489 struct flchip *chip; 1490 uint8_t otp, lockreg; 1491 int ret; 1492 1493 size_t user_size, factory_size, otpsize; 1494 loff_t user_offset, factory_offset, otpoffset; 1495 int user_locked = 0, otplocked; 1496 1497 *retlen = 0; 1498 1499 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1500 chip = &cfi->chips[chipnum]; 1501 factory_size = 0; 1502 user_size = 0; 1503 1504 /* Micron M29EW family */ 1505 if (is_m29ew(cfi)) { 1506 base = chip->start; 1507 1508 /* check whether secsi area is factory locked 1509 or user lockable */ 1510 mutex_lock(&chip->mutex); 1511 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1512 if (ret) { 1513 mutex_unlock(&chip->mutex); 1514 return ret; 1515 } 1516 cfi_qry_mode_on(base, map, cfi); 1517 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1518 cfi_qry_mode_off(base, map, cfi); 1519 put_chip(map, chip, base); 1520 mutex_unlock(&chip->mutex); 1521 1522 if (otp & 0x80) { 1523 /* factory locked */ 1524 factory_offset = 0; 1525 factory_size = 0x100; 1526 } else { 1527 /* customer lockable */ 1528 user_offset = 0; 1529 user_size = 0x100; 1530 1531 mutex_lock(&chip->mutex); 1532 ret = get_chip(map, chip, base, FL_LOCKING); 1533 if (ret) { 1534 mutex_unlock(&chip->mutex); 1535 return ret; 1536 } 1537 1538 /* Enter lock register command */ 1539 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1540 chip->start, map, cfi, 1541 cfi->device_type, NULL); 1542 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1543 chip->start, map, cfi, 1544 cfi->device_type, NULL); 1545 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1546 chip->start, map, cfi, 1547 cfi->device_type, NULL); 1548 /* read lock register */ 1549 lockreg = cfi_read_query(map, 0); 1550 /* exit protection commands */ 1551 map_write(map, CMD(0x90), chip->start); 1552 map_write(map, CMD(0x00), chip->start); 1553 put_chip(map, chip, chip->start); 1554 mutex_unlock(&chip->mutex); 1555 1556 user_locked = ((lockreg & 0x01) == 0x00); 1557 } 1558 } 1559 1560 otpsize = user_regs ? user_size : factory_size; 1561 if (!otpsize) 1562 continue; 1563 otpoffset = user_regs ? user_offset : factory_offset; 1564 otplocked = user_regs ? user_locked : 1; 1565 1566 if (!action) { 1567 /* return otpinfo */ 1568 struct otp_info *otpinfo; 1569 len -= sizeof(*otpinfo); 1570 if (len <= 0) 1571 return -ENOSPC; 1572 otpinfo = (struct otp_info *)buf; 1573 otpinfo->start = from; 1574 otpinfo->length = otpsize; 1575 otpinfo->locked = otplocked; 1576 buf += sizeof(*otpinfo); 1577 *retlen += sizeof(*otpinfo); 1578 from += otpsize; 1579 } else if ((from < otpsize) && (len > 0)) { 1580 size_t size; 1581 size = (len < otpsize - from) ? len : otpsize - from; 1582 ret = action(map, chip, otpoffset + from, size, buf, 1583 otpsize); 1584 if (ret < 0) 1585 return ret; 1586 1587 buf += size; 1588 len -= size; 1589 *retlen += size; 1590 from = 0; 1591 } else { 1592 from -= otpsize; 1593 } 1594 } 1595 return 0; 1596 } 1597 1598 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1599 size_t *retlen, struct otp_info *buf) 1600 { 1601 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1602 NULL, 0); 1603 } 1604 1605 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1606 size_t *retlen, struct otp_info *buf) 1607 { 1608 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1609 NULL, 1); 1610 } 1611 1612 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1613 size_t len, size_t *retlen, 1614 u_char *buf) 1615 { 1616 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1617 buf, do_read_secsi_onechip, 0); 1618 } 1619 1620 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1621 size_t len, size_t *retlen, 1622 u_char *buf) 1623 { 1624 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1625 buf, do_read_secsi_onechip, 1); 1626 } 1627 1628 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1629 size_t len, size_t *retlen, 1630 u_char *buf) 1631 { 1632 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, 1633 do_otp_write, 1); 1634 } 1635 1636 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1637 size_t len) 1638 { 1639 size_t retlen; 1640 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1641 do_otp_lock, 1); 1642 } 1643 1644 static int __xipram do_write_oneword_once(struct map_info *map, 1645 struct flchip *chip, 1646 unsigned long adr, map_word datum, 1647 int mode, struct cfi_private *cfi) 1648 { 1649 unsigned long timeo = jiffies + HZ; 1650 /* 1651 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1652 * have a max write time of a few hundreds usec). However, we should 1653 * use the maximum timeout value given by the chip at probe time 1654 * instead. Unfortunately, struct flchip does have a field for 1655 * maximum timeout, only for typical which can be far too short 1656 * depending of the conditions. The ' + 1' is to avoid having a 1657 * timeout of 0 jiffies if HZ is smaller than 1000. 1658 */ 1659 unsigned long uWriteTimeout = (HZ / 1000) + 1; 1660 int ret = 0; 1661 1662 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1663 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1664 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1665 map_write(map, datum, adr); 1666 chip->state = mode; 1667 1668 INVALIDATE_CACHE_UDELAY(map, chip, 1669 adr, map_bankwidth(map), 1670 chip->word_write_time); 1671 1672 /* See comment above for timeout value. */ 1673 timeo = jiffies + uWriteTimeout; 1674 for (;;) { 1675 if (chip->state != mode) { 1676 /* Someone's suspended the write. Sleep */ 1677 DECLARE_WAITQUEUE(wait, current); 1678 1679 set_current_state(TASK_UNINTERRUPTIBLE); 1680 add_wait_queue(&chip->wq, &wait); 1681 mutex_unlock(&chip->mutex); 1682 schedule(); 1683 remove_wait_queue(&chip->wq, &wait); 1684 timeo = jiffies + (HZ / 2); /* FIXME */ 1685 mutex_lock(&chip->mutex); 1686 continue; 1687 } 1688 1689 /* 1690 * We check "time_after" and "!chip_good" before checking 1691 * "chip_good" to avoid the failure due to scheduling. 1692 */ 1693 if (time_after(jiffies, timeo) && 1694 !chip_good(map, chip, adr, datum)) { 1695 xip_enable(map, chip, adr); 1696 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1697 xip_disable(map, chip, adr); 1698 ret = -EIO; 1699 break; 1700 } 1701 1702 if (chip_good(map, chip, adr, datum)) 1703 break; 1704 1705 /* Latency issues. Drop the lock, wait a while and retry */ 1706 UDELAY(map, chip, adr, 1); 1707 } 1708 1709 return ret; 1710 } 1711 1712 static int __xipram do_write_oneword_start(struct map_info *map, 1713 struct flchip *chip, 1714 unsigned long adr, int mode) 1715 { 1716 int ret = 0; 1717 1718 mutex_lock(&chip->mutex); 1719 1720 ret = get_chip(map, chip, adr, mode); 1721 if (ret) { 1722 mutex_unlock(&chip->mutex); 1723 return ret; 1724 } 1725 1726 if (mode == FL_OTP_WRITE) 1727 otp_enter(map, chip, adr, map_bankwidth(map)); 1728 1729 return ret; 1730 } 1731 1732 static void __xipram do_write_oneword_done(struct map_info *map, 1733 struct flchip *chip, 1734 unsigned long adr, int mode) 1735 { 1736 if (mode == FL_OTP_WRITE) 1737 otp_exit(map, chip, adr, map_bankwidth(map)); 1738 1739 chip->state = FL_READY; 1740 DISABLE_VPP(map); 1741 put_chip(map, chip, adr); 1742 1743 mutex_unlock(&chip->mutex); 1744 } 1745 1746 static int __xipram do_write_oneword_retry(struct map_info *map, 1747 struct flchip *chip, 1748 unsigned long adr, map_word datum, 1749 int mode) 1750 { 1751 struct cfi_private *cfi = map->fldrv_priv; 1752 int ret = 0; 1753 map_word oldd; 1754 int retry_cnt = 0; 1755 1756 /* 1757 * Check for a NOP for the case when the datum to write is already 1758 * present - it saves time and works around buggy chips that corrupt 1759 * data at other locations when 0xff is written to a location that 1760 * already contains 0xff. 1761 */ 1762 oldd = map_read(map, adr); 1763 if (map_word_equal(map, oldd, datum)) { 1764 pr_debug("MTD %s(): NOP\n", __func__); 1765 return ret; 1766 } 1767 1768 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1769 ENABLE_VPP(map); 1770 xip_disable(map, chip, adr); 1771 1772 retry: 1773 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); 1774 if (ret) { 1775 /* reset on all failures. */ 1776 cfi_check_err_status(map, chip, adr); 1777 map_write(map, CMD(0xF0), chip->start); 1778 /* FIXME - should have reset delay before continuing */ 1779 1780 if (++retry_cnt <= MAX_RETRIES) { 1781 ret = 0; 1782 goto retry; 1783 } 1784 } 1785 xip_enable(map, chip, adr); 1786 1787 return ret; 1788 } 1789 1790 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1791 unsigned long adr, map_word datum, 1792 int mode) 1793 { 1794 int ret = 0; 1795 1796 adr += chip->start; 1797 1798 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, 1799 datum.x[0]); 1800 1801 ret = do_write_oneword_start(map, chip, adr, mode); 1802 if (ret) 1803 return ret; 1804 1805 ret = do_write_oneword_retry(map, chip, adr, datum, mode); 1806 1807 do_write_oneword_done(map, chip, adr, mode); 1808 1809 return ret; 1810 } 1811 1812 1813 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1814 size_t *retlen, const u_char *buf) 1815 { 1816 struct map_info *map = mtd->priv; 1817 struct cfi_private *cfi = map->fldrv_priv; 1818 int ret = 0; 1819 int chipnum; 1820 unsigned long ofs, chipstart; 1821 DECLARE_WAITQUEUE(wait, current); 1822 1823 chipnum = to >> cfi->chipshift; 1824 ofs = to - (chipnum << cfi->chipshift); 1825 chipstart = cfi->chips[chipnum].start; 1826 1827 /* If it's not bus-aligned, do the first byte write */ 1828 if (ofs & (map_bankwidth(map)-1)) { 1829 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1830 int i = ofs - bus_ofs; 1831 int n = 0; 1832 map_word tmp_buf; 1833 1834 retry: 1835 mutex_lock(&cfi->chips[chipnum].mutex); 1836 1837 if (cfi->chips[chipnum].state != FL_READY) { 1838 set_current_state(TASK_UNINTERRUPTIBLE); 1839 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1840 1841 mutex_unlock(&cfi->chips[chipnum].mutex); 1842 1843 schedule(); 1844 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1845 goto retry; 1846 } 1847 1848 /* Load 'tmp_buf' with old contents of flash */ 1849 tmp_buf = map_read(map, bus_ofs+chipstart); 1850 1851 mutex_unlock(&cfi->chips[chipnum].mutex); 1852 1853 /* Number of bytes to copy from buffer */ 1854 n = min_t(int, len, map_bankwidth(map)-i); 1855 1856 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1857 1858 ret = do_write_oneword(map, &cfi->chips[chipnum], 1859 bus_ofs, tmp_buf, FL_WRITING); 1860 if (ret) 1861 return ret; 1862 1863 ofs += n; 1864 buf += n; 1865 (*retlen) += n; 1866 len -= n; 1867 1868 if (ofs >> cfi->chipshift) { 1869 chipnum ++; 1870 ofs = 0; 1871 if (chipnum == cfi->numchips) 1872 return 0; 1873 } 1874 } 1875 1876 /* We are now aligned, write as much as possible */ 1877 while(len >= map_bankwidth(map)) { 1878 map_word datum; 1879 1880 datum = map_word_load(map, buf); 1881 1882 ret = do_write_oneword(map, &cfi->chips[chipnum], 1883 ofs, datum, FL_WRITING); 1884 if (ret) 1885 return ret; 1886 1887 ofs += map_bankwidth(map); 1888 buf += map_bankwidth(map); 1889 (*retlen) += map_bankwidth(map); 1890 len -= map_bankwidth(map); 1891 1892 if (ofs >> cfi->chipshift) { 1893 chipnum ++; 1894 ofs = 0; 1895 if (chipnum == cfi->numchips) 1896 return 0; 1897 chipstart = cfi->chips[chipnum].start; 1898 } 1899 } 1900 1901 /* Write the trailing bytes if any */ 1902 if (len & (map_bankwidth(map)-1)) { 1903 map_word tmp_buf; 1904 1905 retry1: 1906 mutex_lock(&cfi->chips[chipnum].mutex); 1907 1908 if (cfi->chips[chipnum].state != FL_READY) { 1909 set_current_state(TASK_UNINTERRUPTIBLE); 1910 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1911 1912 mutex_unlock(&cfi->chips[chipnum].mutex); 1913 1914 schedule(); 1915 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1916 goto retry1; 1917 } 1918 1919 tmp_buf = map_read(map, ofs + chipstart); 1920 1921 mutex_unlock(&cfi->chips[chipnum].mutex); 1922 1923 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1924 1925 ret = do_write_oneword(map, &cfi->chips[chipnum], 1926 ofs, tmp_buf, FL_WRITING); 1927 if (ret) 1928 return ret; 1929 1930 (*retlen) += len; 1931 } 1932 1933 return 0; 1934 } 1935 1936 #if !FORCE_WORD_WRITE 1937 static int __xipram do_write_buffer_wait(struct map_info *map, 1938 struct flchip *chip, unsigned long adr, 1939 map_word datum) 1940 { 1941 unsigned long timeo; 1942 unsigned long u_write_timeout; 1943 int ret = 0; 1944 1945 /* 1946 * Timeout is calculated according to CFI data, if available. 1947 * See more comments in cfi_cmdset_0002(). 1948 */ 1949 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max); 1950 timeo = jiffies + u_write_timeout; 1951 1952 for (;;) { 1953 if (chip->state != FL_WRITING) { 1954 /* Someone's suspended the write. Sleep */ 1955 DECLARE_WAITQUEUE(wait, current); 1956 1957 set_current_state(TASK_UNINTERRUPTIBLE); 1958 add_wait_queue(&chip->wq, &wait); 1959 mutex_unlock(&chip->mutex); 1960 schedule(); 1961 remove_wait_queue(&chip->wq, &wait); 1962 timeo = jiffies + (HZ / 2); /* FIXME */ 1963 mutex_lock(&chip->mutex); 1964 continue; 1965 } 1966 1967 /* 1968 * We check "time_after" and "!chip_good" before checking 1969 * "chip_good" to avoid the failure due to scheduling. 1970 */ 1971 if (time_after(jiffies, timeo) && 1972 !chip_good(map, chip, adr, datum)) { 1973 ret = -EIO; 1974 break; 1975 } 1976 1977 if (chip_good(map, chip, adr, datum)) 1978 break; 1979 1980 /* Latency issues. Drop the lock, wait a while and retry */ 1981 UDELAY(map, chip, adr, 1); 1982 } 1983 1984 return ret; 1985 } 1986 1987 static void __xipram do_write_buffer_reset(struct map_info *map, 1988 struct flchip *chip, 1989 struct cfi_private *cfi) 1990 { 1991 /* 1992 * Recovery from write-buffer programming failures requires 1993 * the write-to-buffer-reset sequence. Since the last part 1994 * of the sequence also works as a normal reset, we can run 1995 * the same commands regardless of why we are here. 1996 * See e.g. 1997 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1998 */ 1999 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2000 cfi->device_type, NULL); 2001 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2002 cfi->device_type, NULL); 2003 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 2004 cfi->device_type, NULL); 2005 2006 /* FIXME - should have reset delay before continuing */ 2007 } 2008 2009 /* 2010 * FIXME: interleaved mode not tested, and probably not supported! 2011 */ 2012 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 2013 unsigned long adr, const u_char *buf, 2014 int len) 2015 { 2016 struct cfi_private *cfi = map->fldrv_priv; 2017 int ret = -EIO; 2018 unsigned long cmd_adr; 2019 int z, words; 2020 map_word datum; 2021 2022 adr += chip->start; 2023 cmd_adr = adr; 2024 2025 mutex_lock(&chip->mutex); 2026 ret = get_chip(map, chip, adr, FL_WRITING); 2027 if (ret) { 2028 mutex_unlock(&chip->mutex); 2029 return ret; 2030 } 2031 2032 datum = map_word_load(map, buf); 2033 2034 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 2035 __func__, adr, datum.x[0]); 2036 2037 XIP_INVAL_CACHED_RANGE(map, adr, len); 2038 ENABLE_VPP(map); 2039 xip_disable(map, chip, cmd_adr); 2040 2041 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2042 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2043 2044 /* Write Buffer Load */ 2045 map_write(map, CMD(0x25), cmd_adr); 2046 2047 chip->state = FL_WRITING_TO_BUFFER; 2048 2049 /* Write length of data to come */ 2050 words = len / map_bankwidth(map); 2051 map_write(map, CMD(words - 1), cmd_adr); 2052 /* Write data */ 2053 z = 0; 2054 while(z < words * map_bankwidth(map)) { 2055 datum = map_word_load(map, buf); 2056 map_write(map, datum, adr + z); 2057 2058 z += map_bankwidth(map); 2059 buf += map_bankwidth(map); 2060 } 2061 z -= map_bankwidth(map); 2062 2063 adr += z; 2064 2065 /* Write Buffer Program Confirm: GO GO GO */ 2066 map_write(map, CMD(0x29), cmd_adr); 2067 chip->state = FL_WRITING; 2068 2069 INVALIDATE_CACHE_UDELAY(map, chip, 2070 adr, map_bankwidth(map), 2071 chip->word_write_time); 2072 2073 ret = do_write_buffer_wait(map, chip, adr, datum); 2074 if (ret) { 2075 cfi_check_err_status(map, chip, adr); 2076 do_write_buffer_reset(map, chip, cfi); 2077 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", 2078 __func__, adr); 2079 } 2080 2081 xip_enable(map, chip, adr); 2082 2083 chip->state = FL_READY; 2084 DISABLE_VPP(map); 2085 put_chip(map, chip, adr); 2086 mutex_unlock(&chip->mutex); 2087 2088 return ret; 2089 } 2090 2091 2092 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 2093 size_t *retlen, const u_char *buf) 2094 { 2095 struct map_info *map = mtd->priv; 2096 struct cfi_private *cfi = map->fldrv_priv; 2097 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 2098 int ret = 0; 2099 int chipnum; 2100 unsigned long ofs; 2101 2102 chipnum = to >> cfi->chipshift; 2103 ofs = to - (chipnum << cfi->chipshift); 2104 2105 /* If it's not bus-aligned, do the first word write */ 2106 if (ofs & (map_bankwidth(map)-1)) { 2107 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 2108 if (local_len > len) 2109 local_len = len; 2110 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2111 local_len, retlen, buf); 2112 if (ret) 2113 return ret; 2114 ofs += local_len; 2115 buf += local_len; 2116 len -= local_len; 2117 2118 if (ofs >> cfi->chipshift) { 2119 chipnum ++; 2120 ofs = 0; 2121 if (chipnum == cfi->numchips) 2122 return 0; 2123 } 2124 } 2125 2126 /* Write buffer is worth it only if more than one word to write... */ 2127 while (len >= map_bankwidth(map) * 2) { 2128 /* We must not cross write block boundaries */ 2129 int size = wbufsize - (ofs & (wbufsize-1)); 2130 2131 if (size > len) 2132 size = len; 2133 if (size % map_bankwidth(map)) 2134 size -= size % map_bankwidth(map); 2135 2136 ret = do_write_buffer(map, &cfi->chips[chipnum], 2137 ofs, buf, size); 2138 if (ret) 2139 return ret; 2140 2141 ofs += size; 2142 buf += size; 2143 (*retlen) += size; 2144 len -= size; 2145 2146 if (ofs >> cfi->chipshift) { 2147 chipnum ++; 2148 ofs = 0; 2149 if (chipnum == cfi->numchips) 2150 return 0; 2151 } 2152 } 2153 2154 if (len) { 2155 size_t retlen_dregs = 0; 2156 2157 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 2158 len, &retlen_dregs, buf); 2159 2160 *retlen += retlen_dregs; 2161 return ret; 2162 } 2163 2164 return 0; 2165 } 2166 #endif /* !FORCE_WORD_WRITE */ 2167 2168 /* 2169 * Wait for the flash chip to become ready to write data 2170 * 2171 * This is only called during the panic_write() path. When panic_write() 2172 * is called, the kernel is in the process of a panic, and will soon be 2173 * dead. Therefore we don't take any locks, and attempt to get access 2174 * to the chip as soon as possible. 2175 */ 2176 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2177 unsigned long adr) 2178 { 2179 struct cfi_private *cfi = map->fldrv_priv; 2180 int retries = 10; 2181 int i; 2182 2183 /* 2184 * If the driver thinks the chip is idle, and no toggle bits 2185 * are changing, then the chip is actually idle for sure. 2186 */ 2187 if (chip->state == FL_READY && chip_ready(map, chip, adr)) 2188 return 0; 2189 2190 /* 2191 * Try several times to reset the chip and then wait for it 2192 * to become idle. The upper limit of a few milliseconds of 2193 * delay isn't a big problem: the kernel is dying anyway. It 2194 * is more important to save the messages. 2195 */ 2196 while (retries > 0) { 2197 const unsigned long timeo = (HZ / 1000) + 1; 2198 2199 /* send the reset command */ 2200 map_write(map, CMD(0xF0), chip->start); 2201 2202 /* wait for the chip to become ready */ 2203 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2204 if (chip_ready(map, chip, adr)) 2205 return 0; 2206 2207 udelay(1); 2208 } 2209 2210 retries--; 2211 } 2212 2213 /* the chip never became ready */ 2214 return -EBUSY; 2215 } 2216 2217 /* 2218 * Write out one word of data to a single flash chip during a kernel panic 2219 * 2220 * This is only called during the panic_write() path. When panic_write() 2221 * is called, the kernel is in the process of a panic, and will soon be 2222 * dead. Therefore we don't take any locks, and attempt to get access 2223 * to the chip as soon as possible. 2224 * 2225 * The implementation of this routine is intentionally similar to 2226 * do_write_oneword(), in order to ease code maintenance. 2227 */ 2228 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2229 unsigned long adr, map_word datum) 2230 { 2231 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2232 struct cfi_private *cfi = map->fldrv_priv; 2233 int retry_cnt = 0; 2234 map_word oldd; 2235 int ret = 0; 2236 int i; 2237 2238 adr += chip->start; 2239 2240 ret = cfi_amdstd_panic_wait(map, chip, adr); 2241 if (ret) 2242 return ret; 2243 2244 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2245 __func__, adr, datum.x[0]); 2246 2247 /* 2248 * Check for a NOP for the case when the datum to write is already 2249 * present - it saves time and works around buggy chips that corrupt 2250 * data at other locations when 0xff is written to a location that 2251 * already contains 0xff. 2252 */ 2253 oldd = map_read(map, adr); 2254 if (map_word_equal(map, oldd, datum)) { 2255 pr_debug("MTD %s(): NOP\n", __func__); 2256 goto op_done; 2257 } 2258 2259 ENABLE_VPP(map); 2260 2261 retry: 2262 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2263 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2264 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2265 map_write(map, datum, adr); 2266 2267 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2268 if (chip_ready(map, chip, adr)) 2269 break; 2270 2271 udelay(1); 2272 } 2273 2274 if (!chip_good(map, chip, adr, datum)) { 2275 /* reset on all failures. */ 2276 cfi_check_err_status(map, chip, adr); 2277 map_write(map, CMD(0xF0), chip->start); 2278 /* FIXME - should have reset delay before continuing */ 2279 2280 if (++retry_cnt <= MAX_RETRIES) 2281 goto retry; 2282 2283 ret = -EIO; 2284 } 2285 2286 op_done: 2287 DISABLE_VPP(map); 2288 return ret; 2289 } 2290 2291 /* 2292 * Write out some data during a kernel panic 2293 * 2294 * This is used by the mtdoops driver to save the dying messages from a 2295 * kernel which has panic'd. 2296 * 2297 * This routine ignores all of the locking used throughout the rest of the 2298 * driver, in order to ensure that the data gets written out no matter what 2299 * state this driver (and the flash chip itself) was in when the kernel crashed. 2300 * 2301 * The implementation of this routine is intentionally similar to 2302 * cfi_amdstd_write_words(), in order to ease code maintenance. 2303 */ 2304 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2305 size_t *retlen, const u_char *buf) 2306 { 2307 struct map_info *map = mtd->priv; 2308 struct cfi_private *cfi = map->fldrv_priv; 2309 unsigned long ofs, chipstart; 2310 int ret = 0; 2311 int chipnum; 2312 2313 chipnum = to >> cfi->chipshift; 2314 ofs = to - (chipnum << cfi->chipshift); 2315 chipstart = cfi->chips[chipnum].start; 2316 2317 /* If it's not bus aligned, do the first byte write */ 2318 if (ofs & (map_bankwidth(map) - 1)) { 2319 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2320 int i = ofs - bus_ofs; 2321 int n = 0; 2322 map_word tmp_buf; 2323 2324 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2325 if (ret) 2326 return ret; 2327 2328 /* Load 'tmp_buf' with old contents of flash */ 2329 tmp_buf = map_read(map, bus_ofs + chipstart); 2330 2331 /* Number of bytes to copy from buffer */ 2332 n = min_t(int, len, map_bankwidth(map) - i); 2333 2334 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2335 2336 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2337 bus_ofs, tmp_buf); 2338 if (ret) 2339 return ret; 2340 2341 ofs += n; 2342 buf += n; 2343 (*retlen) += n; 2344 len -= n; 2345 2346 if (ofs >> cfi->chipshift) { 2347 chipnum++; 2348 ofs = 0; 2349 if (chipnum == cfi->numchips) 2350 return 0; 2351 } 2352 } 2353 2354 /* We are now aligned, write as much as possible */ 2355 while (len >= map_bankwidth(map)) { 2356 map_word datum; 2357 2358 datum = map_word_load(map, buf); 2359 2360 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2361 ofs, datum); 2362 if (ret) 2363 return ret; 2364 2365 ofs += map_bankwidth(map); 2366 buf += map_bankwidth(map); 2367 (*retlen) += map_bankwidth(map); 2368 len -= map_bankwidth(map); 2369 2370 if (ofs >> cfi->chipshift) { 2371 chipnum++; 2372 ofs = 0; 2373 if (chipnum == cfi->numchips) 2374 return 0; 2375 2376 chipstart = cfi->chips[chipnum].start; 2377 } 2378 } 2379 2380 /* Write the trailing bytes if any */ 2381 if (len & (map_bankwidth(map) - 1)) { 2382 map_word tmp_buf; 2383 2384 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2385 if (ret) 2386 return ret; 2387 2388 tmp_buf = map_read(map, ofs + chipstart); 2389 2390 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2391 2392 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2393 ofs, tmp_buf); 2394 if (ret) 2395 return ret; 2396 2397 (*retlen) += len; 2398 } 2399 2400 return 0; 2401 } 2402 2403 2404 /* 2405 * Handle devices with one erase region, that only implement 2406 * the chip erase command. 2407 */ 2408 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2409 { 2410 struct cfi_private *cfi = map->fldrv_priv; 2411 unsigned long timeo = jiffies + HZ; 2412 unsigned long int adr; 2413 DECLARE_WAITQUEUE(wait, current); 2414 int ret = 0; 2415 int retry_cnt = 0; 2416 2417 adr = cfi->addr_unlock1; 2418 2419 mutex_lock(&chip->mutex); 2420 ret = get_chip(map, chip, adr, FL_ERASING); 2421 if (ret) { 2422 mutex_unlock(&chip->mutex); 2423 return ret; 2424 } 2425 2426 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2427 __func__, chip->start); 2428 2429 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2430 ENABLE_VPP(map); 2431 xip_disable(map, chip, adr); 2432 2433 retry: 2434 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2435 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2436 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2437 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2438 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2439 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2440 2441 chip->state = FL_ERASING; 2442 chip->erase_suspended = 0; 2443 chip->in_progress_block_addr = adr; 2444 chip->in_progress_block_mask = ~(map->size - 1); 2445 2446 INVALIDATE_CACHE_UDELAY(map, chip, 2447 adr, map->size, 2448 chip->erase_time*500); 2449 2450 timeo = jiffies + (HZ*20); 2451 2452 for (;;) { 2453 if (chip->state != FL_ERASING) { 2454 /* Someone's suspended the erase. Sleep */ 2455 set_current_state(TASK_UNINTERRUPTIBLE); 2456 add_wait_queue(&chip->wq, &wait); 2457 mutex_unlock(&chip->mutex); 2458 schedule(); 2459 remove_wait_queue(&chip->wq, &wait); 2460 mutex_lock(&chip->mutex); 2461 continue; 2462 } 2463 if (chip->erase_suspended) { 2464 /* This erase was suspended and resumed. 2465 Adjust the timeout */ 2466 timeo = jiffies + (HZ*20); /* FIXME */ 2467 chip->erase_suspended = 0; 2468 } 2469 2470 if (chip_good(map, chip, adr, map_word_ff(map))) 2471 break; 2472 2473 if (time_after(jiffies, timeo)) { 2474 printk(KERN_WARNING "MTD %s(): software timeout\n", 2475 __func__); 2476 ret = -EIO; 2477 break; 2478 } 2479 2480 /* Latency issues. Drop the lock, wait a while and retry */ 2481 UDELAY(map, chip, adr, 1000000/HZ); 2482 } 2483 /* Did we succeed? */ 2484 if (ret) { 2485 /* reset on all failures. */ 2486 cfi_check_err_status(map, chip, adr); 2487 map_write(map, CMD(0xF0), chip->start); 2488 /* FIXME - should have reset delay before continuing */ 2489 2490 if (++retry_cnt <= MAX_RETRIES) { 2491 ret = 0; 2492 goto retry; 2493 } 2494 } 2495 2496 chip->state = FL_READY; 2497 xip_enable(map, chip, adr); 2498 DISABLE_VPP(map); 2499 put_chip(map, chip, adr); 2500 mutex_unlock(&chip->mutex); 2501 2502 return ret; 2503 } 2504 2505 2506 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2507 { 2508 struct cfi_private *cfi = map->fldrv_priv; 2509 unsigned long timeo = jiffies + HZ; 2510 DECLARE_WAITQUEUE(wait, current); 2511 int ret = 0; 2512 int retry_cnt = 0; 2513 2514 adr += chip->start; 2515 2516 mutex_lock(&chip->mutex); 2517 ret = get_chip(map, chip, adr, FL_ERASING); 2518 if (ret) { 2519 mutex_unlock(&chip->mutex); 2520 return ret; 2521 } 2522 2523 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2524 __func__, adr); 2525 2526 XIP_INVAL_CACHED_RANGE(map, adr, len); 2527 ENABLE_VPP(map); 2528 xip_disable(map, chip, adr); 2529 2530 retry: 2531 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2532 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2533 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2534 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2535 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2536 map_write(map, cfi->sector_erase_cmd, adr); 2537 2538 chip->state = FL_ERASING; 2539 chip->erase_suspended = 0; 2540 chip->in_progress_block_addr = adr; 2541 chip->in_progress_block_mask = ~(len - 1); 2542 2543 INVALIDATE_CACHE_UDELAY(map, chip, 2544 adr, len, 2545 chip->erase_time*500); 2546 2547 timeo = jiffies + (HZ*20); 2548 2549 for (;;) { 2550 if (chip->state != FL_ERASING) { 2551 /* Someone's suspended the erase. Sleep */ 2552 set_current_state(TASK_UNINTERRUPTIBLE); 2553 add_wait_queue(&chip->wq, &wait); 2554 mutex_unlock(&chip->mutex); 2555 schedule(); 2556 remove_wait_queue(&chip->wq, &wait); 2557 mutex_lock(&chip->mutex); 2558 continue; 2559 } 2560 if (chip->erase_suspended) { 2561 /* This erase was suspended and resumed. 2562 Adjust the timeout */ 2563 timeo = jiffies + (HZ*20); /* FIXME */ 2564 chip->erase_suspended = 0; 2565 } 2566 2567 if (chip_good(map, chip, adr, map_word_ff(map))) 2568 break; 2569 2570 if (time_after(jiffies, timeo)) { 2571 printk(KERN_WARNING "MTD %s(): software timeout\n", 2572 __func__); 2573 ret = -EIO; 2574 break; 2575 } 2576 2577 /* Latency issues. Drop the lock, wait a while and retry */ 2578 UDELAY(map, chip, adr, 1000000/HZ); 2579 } 2580 /* Did we succeed? */ 2581 if (ret) { 2582 /* reset on all failures. */ 2583 cfi_check_err_status(map, chip, adr); 2584 map_write(map, CMD(0xF0), chip->start); 2585 /* FIXME - should have reset delay before continuing */ 2586 2587 if (++retry_cnt <= MAX_RETRIES) { 2588 ret = 0; 2589 goto retry; 2590 } 2591 } 2592 2593 chip->state = FL_READY; 2594 xip_enable(map, chip, adr); 2595 DISABLE_VPP(map); 2596 put_chip(map, chip, adr); 2597 mutex_unlock(&chip->mutex); 2598 return ret; 2599 } 2600 2601 2602 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2603 { 2604 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr, 2605 instr->len, NULL); 2606 } 2607 2608 2609 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2610 { 2611 struct map_info *map = mtd->priv; 2612 struct cfi_private *cfi = map->fldrv_priv; 2613 2614 if (instr->addr != 0) 2615 return -EINVAL; 2616 2617 if (instr->len != mtd->size) 2618 return -EINVAL; 2619 2620 return do_erase_chip(map, &cfi->chips[0]); 2621 } 2622 2623 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2624 unsigned long adr, int len, void *thunk) 2625 { 2626 struct cfi_private *cfi = map->fldrv_priv; 2627 int ret; 2628 2629 mutex_lock(&chip->mutex); 2630 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2631 if (ret) 2632 goto out_unlock; 2633 chip->state = FL_LOCKING; 2634 2635 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2636 2637 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2638 cfi->device_type, NULL); 2639 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2640 cfi->device_type, NULL); 2641 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2642 cfi->device_type, NULL); 2643 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2644 cfi->device_type, NULL); 2645 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2646 cfi->device_type, NULL); 2647 map_write(map, CMD(0x40), chip->start + adr); 2648 2649 chip->state = FL_READY; 2650 put_chip(map, chip, adr + chip->start); 2651 ret = 0; 2652 2653 out_unlock: 2654 mutex_unlock(&chip->mutex); 2655 return ret; 2656 } 2657 2658 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2659 unsigned long adr, int len, void *thunk) 2660 { 2661 struct cfi_private *cfi = map->fldrv_priv; 2662 int ret; 2663 2664 mutex_lock(&chip->mutex); 2665 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2666 if (ret) 2667 goto out_unlock; 2668 chip->state = FL_UNLOCKING; 2669 2670 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2671 2672 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2673 cfi->device_type, NULL); 2674 map_write(map, CMD(0x70), adr); 2675 2676 chip->state = FL_READY; 2677 put_chip(map, chip, adr + chip->start); 2678 ret = 0; 2679 2680 out_unlock: 2681 mutex_unlock(&chip->mutex); 2682 return ret; 2683 } 2684 2685 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2686 { 2687 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2688 } 2689 2690 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2691 { 2692 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2693 } 2694 2695 /* 2696 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2697 */ 2698 2699 struct ppb_lock { 2700 struct flchip *chip; 2701 unsigned long adr; 2702 int locked; 2703 }; 2704 2705 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2706 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2707 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2708 2709 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2710 struct flchip *chip, 2711 unsigned long adr, int len, void *thunk) 2712 { 2713 struct cfi_private *cfi = map->fldrv_priv; 2714 unsigned long timeo; 2715 int ret; 2716 2717 adr += chip->start; 2718 mutex_lock(&chip->mutex); 2719 ret = get_chip(map, chip, adr, FL_LOCKING); 2720 if (ret) { 2721 mutex_unlock(&chip->mutex); 2722 return ret; 2723 } 2724 2725 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2726 2727 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2728 cfi->device_type, NULL); 2729 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2730 cfi->device_type, NULL); 2731 /* PPB entry command */ 2732 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2733 cfi->device_type, NULL); 2734 2735 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2736 chip->state = FL_LOCKING; 2737 map_write(map, CMD(0xA0), adr); 2738 map_write(map, CMD(0x00), adr); 2739 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2740 /* 2741 * Unlocking of one specific sector is not supported, so we 2742 * have to unlock all sectors of this device instead 2743 */ 2744 chip->state = FL_UNLOCKING; 2745 map_write(map, CMD(0x80), chip->start); 2746 map_write(map, CMD(0x30), chip->start); 2747 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2748 chip->state = FL_JEDEC_QUERY; 2749 /* Return locked status: 0->locked, 1->unlocked */ 2750 ret = !cfi_read_query(map, adr); 2751 } else 2752 BUG(); 2753 2754 /* 2755 * Wait for some time as unlocking of all sectors takes quite long 2756 */ 2757 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2758 for (;;) { 2759 if (chip_ready(map, chip, adr)) 2760 break; 2761 2762 if (time_after(jiffies, timeo)) { 2763 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2764 ret = -EIO; 2765 break; 2766 } 2767 2768 UDELAY(map, chip, adr, 1); 2769 } 2770 2771 /* Exit BC commands */ 2772 map_write(map, CMD(0x90), chip->start); 2773 map_write(map, CMD(0x00), chip->start); 2774 2775 chip->state = FL_READY; 2776 put_chip(map, chip, adr); 2777 mutex_unlock(&chip->mutex); 2778 2779 return ret; 2780 } 2781 2782 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2783 uint64_t len) 2784 { 2785 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2786 DO_XXLOCK_ONEBLOCK_LOCK); 2787 } 2788 2789 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2790 uint64_t len) 2791 { 2792 struct mtd_erase_region_info *regions = mtd->eraseregions; 2793 struct map_info *map = mtd->priv; 2794 struct cfi_private *cfi = map->fldrv_priv; 2795 struct ppb_lock *sect; 2796 unsigned long adr; 2797 loff_t offset; 2798 uint64_t length; 2799 int chipnum; 2800 int i; 2801 int sectors; 2802 int ret; 2803 int max_sectors; 2804 2805 /* 2806 * PPB unlocking always unlocks all sectors of the flash chip. 2807 * We need to re-lock all previously locked sectors. So lets 2808 * first check the locking status of all sectors and save 2809 * it for future use. 2810 */ 2811 max_sectors = 0; 2812 for (i = 0; i < mtd->numeraseregions; i++) 2813 max_sectors += regions[i].numblocks; 2814 2815 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL); 2816 if (!sect) 2817 return -ENOMEM; 2818 2819 /* 2820 * This code to walk all sectors is a slightly modified version 2821 * of the cfi_varsize_frob() code. 2822 */ 2823 i = 0; 2824 chipnum = 0; 2825 adr = 0; 2826 sectors = 0; 2827 offset = 0; 2828 length = mtd->size; 2829 2830 while (length) { 2831 int size = regions[i].erasesize; 2832 2833 /* 2834 * Only test sectors that shall not be unlocked. The other 2835 * sectors shall be unlocked, so lets keep their locking 2836 * status at "unlocked" (locked=0) for the final re-locking. 2837 */ 2838 if ((offset < ofs) || (offset >= (ofs + len))) { 2839 sect[sectors].chip = &cfi->chips[chipnum]; 2840 sect[sectors].adr = adr; 2841 sect[sectors].locked = do_ppb_xxlock( 2842 map, &cfi->chips[chipnum], adr, 0, 2843 DO_XXLOCK_ONEBLOCK_GETLOCK); 2844 } 2845 2846 adr += size; 2847 offset += size; 2848 length -= size; 2849 2850 if (offset == regions[i].offset + size * regions[i].numblocks) 2851 i++; 2852 2853 if (adr >> cfi->chipshift) { 2854 if (offset >= (ofs + len)) 2855 break; 2856 adr = 0; 2857 chipnum++; 2858 2859 if (chipnum >= cfi->numchips) 2860 break; 2861 } 2862 2863 sectors++; 2864 if (sectors >= max_sectors) { 2865 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2866 max_sectors); 2867 kfree(sect); 2868 return -EINVAL; 2869 } 2870 } 2871 2872 /* Now unlock the whole chip */ 2873 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2874 DO_XXLOCK_ONEBLOCK_UNLOCK); 2875 if (ret) { 2876 kfree(sect); 2877 return ret; 2878 } 2879 2880 /* 2881 * PPB unlocking always unlocks all sectors of the flash chip. 2882 * We need to re-lock all previously locked sectors. 2883 */ 2884 for (i = 0; i < sectors; i++) { 2885 if (sect[i].locked) 2886 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, 2887 DO_XXLOCK_ONEBLOCK_LOCK); 2888 } 2889 2890 kfree(sect); 2891 return ret; 2892 } 2893 2894 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2895 uint64_t len) 2896 { 2897 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2898 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2899 } 2900 2901 static void cfi_amdstd_sync (struct mtd_info *mtd) 2902 { 2903 struct map_info *map = mtd->priv; 2904 struct cfi_private *cfi = map->fldrv_priv; 2905 int i; 2906 struct flchip *chip; 2907 int ret = 0; 2908 DECLARE_WAITQUEUE(wait, current); 2909 2910 for (i=0; !ret && i<cfi->numchips; i++) { 2911 chip = &cfi->chips[i]; 2912 2913 retry: 2914 mutex_lock(&chip->mutex); 2915 2916 switch(chip->state) { 2917 case FL_READY: 2918 case FL_STATUS: 2919 case FL_CFI_QUERY: 2920 case FL_JEDEC_QUERY: 2921 chip->oldstate = chip->state; 2922 chip->state = FL_SYNCING; 2923 /* No need to wake_up() on this state change - 2924 * as the whole point is that nobody can do anything 2925 * with the chip now anyway. 2926 */ 2927 /* fall through */ 2928 case FL_SYNCING: 2929 mutex_unlock(&chip->mutex); 2930 break; 2931 2932 default: 2933 /* Not an idle state */ 2934 set_current_state(TASK_UNINTERRUPTIBLE); 2935 add_wait_queue(&chip->wq, &wait); 2936 2937 mutex_unlock(&chip->mutex); 2938 2939 schedule(); 2940 2941 remove_wait_queue(&chip->wq, &wait); 2942 2943 goto retry; 2944 } 2945 } 2946 2947 /* Unlock the chips again */ 2948 2949 for (i--; i >=0; i--) { 2950 chip = &cfi->chips[i]; 2951 2952 mutex_lock(&chip->mutex); 2953 2954 if (chip->state == FL_SYNCING) { 2955 chip->state = chip->oldstate; 2956 wake_up(&chip->wq); 2957 } 2958 mutex_unlock(&chip->mutex); 2959 } 2960 } 2961 2962 2963 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2964 { 2965 struct map_info *map = mtd->priv; 2966 struct cfi_private *cfi = map->fldrv_priv; 2967 int i; 2968 struct flchip *chip; 2969 int ret = 0; 2970 2971 for (i=0; !ret && i<cfi->numchips; i++) { 2972 chip = &cfi->chips[i]; 2973 2974 mutex_lock(&chip->mutex); 2975 2976 switch(chip->state) { 2977 case FL_READY: 2978 case FL_STATUS: 2979 case FL_CFI_QUERY: 2980 case FL_JEDEC_QUERY: 2981 chip->oldstate = chip->state; 2982 chip->state = FL_PM_SUSPENDED; 2983 /* No need to wake_up() on this state change - 2984 * as the whole point is that nobody can do anything 2985 * with the chip now anyway. 2986 */ 2987 case FL_PM_SUSPENDED: 2988 break; 2989 2990 default: 2991 ret = -EAGAIN; 2992 break; 2993 } 2994 mutex_unlock(&chip->mutex); 2995 } 2996 2997 /* Unlock the chips again */ 2998 2999 if (ret) { 3000 for (i--; i >=0; i--) { 3001 chip = &cfi->chips[i]; 3002 3003 mutex_lock(&chip->mutex); 3004 3005 if (chip->state == FL_PM_SUSPENDED) { 3006 chip->state = chip->oldstate; 3007 wake_up(&chip->wq); 3008 } 3009 mutex_unlock(&chip->mutex); 3010 } 3011 } 3012 3013 return ret; 3014 } 3015 3016 3017 static void cfi_amdstd_resume(struct mtd_info *mtd) 3018 { 3019 struct map_info *map = mtd->priv; 3020 struct cfi_private *cfi = map->fldrv_priv; 3021 int i; 3022 struct flchip *chip; 3023 3024 for (i=0; i<cfi->numchips; i++) { 3025 3026 chip = &cfi->chips[i]; 3027 3028 mutex_lock(&chip->mutex); 3029 3030 if (chip->state == FL_PM_SUSPENDED) { 3031 chip->state = FL_READY; 3032 map_write(map, CMD(0xF0), chip->start); 3033 wake_up(&chip->wq); 3034 } 3035 else 3036 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 3037 3038 mutex_unlock(&chip->mutex); 3039 } 3040 } 3041 3042 3043 /* 3044 * Ensure that the flash device is put back into read array mode before 3045 * unloading the driver or rebooting. On some systems, rebooting while 3046 * the flash is in query/program/erase mode will prevent the CPU from 3047 * fetching the bootloader code, requiring a hard reset or power cycle. 3048 */ 3049 static int cfi_amdstd_reset(struct mtd_info *mtd) 3050 { 3051 struct map_info *map = mtd->priv; 3052 struct cfi_private *cfi = map->fldrv_priv; 3053 int i, ret; 3054 struct flchip *chip; 3055 3056 for (i = 0; i < cfi->numchips; i++) { 3057 3058 chip = &cfi->chips[i]; 3059 3060 mutex_lock(&chip->mutex); 3061 3062 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 3063 if (!ret) { 3064 map_write(map, CMD(0xF0), chip->start); 3065 chip->state = FL_SHUTDOWN; 3066 put_chip(map, chip, chip->start); 3067 } 3068 3069 mutex_unlock(&chip->mutex); 3070 } 3071 3072 return 0; 3073 } 3074 3075 3076 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 3077 void *v) 3078 { 3079 struct mtd_info *mtd; 3080 3081 mtd = container_of(nb, struct mtd_info, reboot_notifier); 3082 cfi_amdstd_reset(mtd); 3083 return NOTIFY_DONE; 3084 } 3085 3086 3087 static void cfi_amdstd_destroy(struct mtd_info *mtd) 3088 { 3089 struct map_info *map = mtd->priv; 3090 struct cfi_private *cfi = map->fldrv_priv; 3091 3092 cfi_amdstd_reset(mtd); 3093 unregister_reboot_notifier(&mtd->reboot_notifier); 3094 kfree(cfi->cmdset_priv); 3095 kfree(cfi->cfiq); 3096 kfree(cfi); 3097 kfree(mtd->eraseregions); 3098 } 3099 3100 MODULE_LICENSE("GPL"); 3101 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 3102 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 3103 MODULE_ALIAS("cfi_cmdset_0006"); 3104 MODULE_ALIAS("cfi_cmdset_0701"); 3105