1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <linux/init.h> 28 #include <asm/io.h> 29 #include <asm/byteorder.h> 30 31 #include <linux/errno.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/interrupt.h> 35 #include <linux/reboot.h> 36 #include <linux/mtd/map.h> 37 #include <linux/mtd/mtd.h> 38 #include <linux/mtd/cfi.h> 39 #include <linux/mtd/xip.h> 40 41 #define AMD_BOOTLOC_BUG 42 #define FORCE_WORD_WRITE 0 43 44 #define MAX_WORD_RETRIES 3 45 46 #define SST49LF004B 0x0060 47 #define SST49LF040B 0x0050 48 #define SST49LF008A 0x005a 49 #define AT49BV6416 0x00d6 50 51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 56 static void cfi_amdstd_sync (struct mtd_info *); 57 static int cfi_amdstd_suspend (struct mtd_info *); 58 static void cfi_amdstd_resume (struct mtd_info *); 59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61 62 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 63 size_t *retlen, const u_char *buf); 64 65 static void cfi_amdstd_destroy(struct mtd_info *); 66 67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 69 70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 72 #include "fwh_lock.h" 73 74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 76 77 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 78 .probe = NULL, /* Not usable directly */ 79 .destroy = cfi_amdstd_destroy, 80 .name = "cfi_cmdset_0002", 81 .module = THIS_MODULE 82 }; 83 84 85 /* #define DEBUG_CFI_FEATURES */ 86 87 88 #ifdef DEBUG_CFI_FEATURES 89 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 90 { 91 const char* erase_suspend[3] = { 92 "Not supported", "Read only", "Read/write" 93 }; 94 const char* top_bottom[6] = { 95 "No WP", "8x8KiB sectors at top & bottom, no WP", 96 "Bottom boot", "Top boot", 97 "Uniform, Bottom WP", "Uniform, Top WP" 98 }; 99 100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 101 printk(" Address sensitive unlock: %s\n", 102 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 103 104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 106 else 107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 108 109 if (extp->BlkProt == 0) 110 printk(" Block protection: Not supported\n"); 111 else 112 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 113 114 115 printk(" Temporary block unprotect: %s\n", 116 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 119 printk(" Burst mode: %s\n", 120 extp->BurstMode ? "Supported" : "Not supported"); 121 if (extp->PageMode == 0) 122 printk(" Page mode: Not supported\n"); 123 else 124 printk(" Page mode: %d word page\n", extp->PageMode << 2); 125 126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 127 extp->VppMin >> 4, extp->VppMin & 0xf); 128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 129 extp->VppMax >> 4, extp->VppMax & 0xf); 130 131 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 133 else 134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 135 } 136 #endif 137 138 #ifdef AMD_BOOTLOC_BUG 139 /* Wheee. Bring me the head of someone at AMD. */ 140 static void fixup_amd_bootblock(struct mtd_info *mtd) 141 { 142 struct map_info *map = mtd->priv; 143 struct cfi_private *cfi = map->fldrv_priv; 144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 145 __u8 major = extp->MajorVersion; 146 __u8 minor = extp->MinorVersion; 147 148 if (((major << 8) | minor) < 0x3131) { 149 /* CFI version 1.0 => don't trust bootloc */ 150 151 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 152 map->name, cfi->mfr, cfi->id); 153 154 /* AFAICS all 29LV400 with a bottom boot block have a device ID 155 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 156 * These were badly detected as they have the 0x80 bit set 157 * so treat them as a special case. 158 */ 159 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 160 161 /* Macronix added CFI to their 2nd generation 162 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 163 * Fujitsu, Spansion, EON, ESI and older Macronix) 164 * has CFI. 165 * 166 * Therefore also check the manufacturer. 167 * This reduces the risk of false detection due to 168 * the 8-bit device ID. 169 */ 170 (cfi->mfr == CFI_MFR_MACRONIX)) { 171 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 172 " detected\n", map->name); 173 extp->TopBottom = 2; /* bottom boot */ 174 } else 175 if (cfi->id & 0x80) { 176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 177 extp->TopBottom = 3; /* top boot */ 178 } else { 179 extp->TopBottom = 2; /* bottom boot */ 180 } 181 182 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 183 " deduced %s from Device ID\n", map->name, major, minor, 184 extp->TopBottom == 2 ? "bottom" : "top"); 185 } 186 } 187 #endif 188 189 static void fixup_use_write_buffers(struct mtd_info *mtd) 190 { 191 struct map_info *map = mtd->priv; 192 struct cfi_private *cfi = map->fldrv_priv; 193 if (cfi->cfiq->BufWriteTimeoutTyp) { 194 pr_debug("Using buffer write method\n" ); 195 mtd->_write = cfi_amdstd_write_buffers; 196 } 197 } 198 199 /* Atmel chips don't use the same PRI format as AMD chips */ 200 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 201 { 202 struct map_info *map = mtd->priv; 203 struct cfi_private *cfi = map->fldrv_priv; 204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 205 struct cfi_pri_atmel atmel_pri; 206 207 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 208 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 209 210 if (atmel_pri.Features & 0x02) 211 extp->EraseSuspend = 2; 212 213 /* Some chips got it backwards... */ 214 if (cfi->id == AT49BV6416) { 215 if (atmel_pri.BottomBoot) 216 extp->TopBottom = 3; 217 else 218 extp->TopBottom = 2; 219 } else { 220 if (atmel_pri.BottomBoot) 221 extp->TopBottom = 2; 222 else 223 extp->TopBottom = 3; 224 } 225 226 /* burst write mode not supported */ 227 cfi->cfiq->BufWriteTimeoutTyp = 0; 228 cfi->cfiq->BufWriteTimeoutMax = 0; 229 } 230 231 static void fixup_use_secsi(struct mtd_info *mtd) 232 { 233 /* Setup for chips with a secsi area */ 234 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 235 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 236 } 237 238 static void fixup_use_erase_chip(struct mtd_info *mtd) 239 { 240 struct map_info *map = mtd->priv; 241 struct cfi_private *cfi = map->fldrv_priv; 242 if ((cfi->cfiq->NumEraseRegions == 1) && 243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 244 mtd->_erase = cfi_amdstd_erase_chip; 245 } 246 247 } 248 249 /* 250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 251 * locked by default. 252 */ 253 static void fixup_use_atmel_lock(struct mtd_info *mtd) 254 { 255 mtd->_lock = cfi_atmel_lock; 256 mtd->_unlock = cfi_atmel_unlock; 257 mtd->flags |= MTD_POWERUP_LOCK; 258 } 259 260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 261 { 262 struct map_info *map = mtd->priv; 263 struct cfi_private *cfi = map->fldrv_priv; 264 265 /* 266 * These flashes report two separate eraseblock regions based on the 267 * sector_erase-size and block_erase-size, although they both operate on the 268 * same memory. This is not allowed according to CFI, so we just pick the 269 * sector_erase-size. 270 */ 271 cfi->cfiq->NumEraseRegions = 1; 272 } 273 274 static void fixup_sst39vf(struct mtd_info *mtd) 275 { 276 struct map_info *map = mtd->priv; 277 struct cfi_private *cfi = map->fldrv_priv; 278 279 fixup_old_sst_eraseregion(mtd); 280 281 cfi->addr_unlock1 = 0x5555; 282 cfi->addr_unlock2 = 0x2AAA; 283 } 284 285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 286 { 287 struct map_info *map = mtd->priv; 288 struct cfi_private *cfi = map->fldrv_priv; 289 290 fixup_old_sst_eraseregion(mtd); 291 292 cfi->addr_unlock1 = 0x555; 293 cfi->addr_unlock2 = 0x2AA; 294 295 cfi->sector_erase_cmd = CMD(0x50); 296 } 297 298 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 299 { 300 struct map_info *map = mtd->priv; 301 struct cfi_private *cfi = map->fldrv_priv; 302 303 fixup_sst39vf_rev_b(mtd); 304 305 /* 306 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 307 * it should report a size of 8KBytes (0x0020*256). 308 */ 309 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 310 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 311 } 312 313 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 314 { 315 struct map_info *map = mtd->priv; 316 struct cfi_private *cfi = map->fldrv_priv; 317 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 320 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); 321 } 322 } 323 324 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 325 { 326 struct map_info *map = mtd->priv; 327 struct cfi_private *cfi = map->fldrv_priv; 328 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 331 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); 332 } 333 } 334 335 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 336 { 337 struct map_info *map = mtd->priv; 338 struct cfi_private *cfi = map->fldrv_priv; 339 340 /* 341 * S29NS512P flash uses more than 8bits to report number of sectors, 342 * which is not permitted by CFI. 343 */ 344 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 345 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); 346 } 347 348 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 349 static struct cfi_fixup cfi_nopri_fixup_table[] = { 350 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 351 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 352 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 353 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 354 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 355 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 356 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 357 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 358 { 0, 0, NULL } 359 }; 360 361 static struct cfi_fixup cfi_fixup_table[] = { 362 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 363 #ifdef AMD_BOOTLOC_BUG 364 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 365 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 366 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 367 #endif 368 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 369 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 370 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 371 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 372 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 373 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 374 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 375 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 376 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 377 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 378 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 379 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 380 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 381 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 382 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 383 #if !FORCE_WORD_WRITE 384 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 385 #endif 386 { 0, 0, NULL } 387 }; 388 static struct cfi_fixup jedec_fixup_table[] = { 389 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 390 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 391 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 392 { 0, 0, NULL } 393 }; 394 395 static struct cfi_fixup fixup_table[] = { 396 /* The CFI vendor ids and the JEDEC vendor IDs appear 397 * to be common. It is like the devices id's are as 398 * well. This table is to pick all cases where 399 * we know that is the case. 400 */ 401 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 402 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 403 { 0, 0, NULL } 404 }; 405 406 407 static void cfi_fixup_major_minor(struct cfi_private *cfi, 408 struct cfi_pri_amdstd *extp) 409 { 410 if (cfi->mfr == CFI_MFR_SAMSUNG) { 411 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 412 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 413 /* 414 * Samsung K8P2815UQB and K8D6x16UxM chips 415 * report major=0 / minor=0. 416 * K8D3x16UxC chips report major=3 / minor=3. 417 */ 418 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 419 " Extended Query version to 1.%c\n", 420 extp->MinorVersion); 421 extp->MajorVersion = '1'; 422 } 423 } 424 425 /* 426 * SST 38VF640x chips report major=0xFF / minor=0xFF. 427 */ 428 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 429 extp->MajorVersion = '1'; 430 extp->MinorVersion = '0'; 431 } 432 } 433 434 static int is_m29ew(struct cfi_private *cfi) 435 { 436 if (cfi->mfr == CFI_MFR_INTEL && 437 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 438 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 439 return 1; 440 return 0; 441 } 442 443 /* 444 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 445 * Some revisions of the M29EW suffer from erase suspend hang ups. In 446 * particular, it can occur when the sequence 447 * Erase Confirm -> Suspend -> Program -> Resume 448 * causes a lockup due to internal timing issues. The consequence is that the 449 * erase cannot be resumed without inserting a dummy command after programming 450 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 451 * that writes an F0 command code before the RESUME command. 452 */ 453 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 454 unsigned long adr) 455 { 456 struct cfi_private *cfi = map->fldrv_priv; 457 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 458 if (is_m29ew(cfi)) 459 map_write(map, CMD(0xF0), adr); 460 } 461 462 /* 463 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 464 * 465 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 466 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 467 * command is issued after an ERASE RESUME operation without waiting for a 468 * minimum delay. The result is that once the ERASE seems to be completed 469 * (no bits are toggling), the contents of the Flash memory block on which 470 * the erase was ongoing could be inconsistent with the expected values 471 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 472 * values), causing a consequent failure of the ERASE operation. 473 * The occurrence of this issue could be high, especially when file system 474 * operations on the Flash are intensive. As a result, it is recommended 475 * that a patch be applied. Intensive file system operations can cause many 476 * calls to the garbage routine to free Flash space (also by erasing physical 477 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 478 * commands can occur. The problem disappears when a delay is inserted after 479 * the RESUME command by using the udelay() function available in Linux. 480 * The DELAY value must be tuned based on the customer's platform. 481 * The maximum value that fixes the problem in all cases is 500us. 482 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 483 * in most cases. 484 * We have chosen 500µs because this latency is acceptable. 485 */ 486 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 487 { 488 /* 489 * Resolving the Delay After Resume Issue see Micron TN-13-07 490 * Worst case delay must be 500µs but 30-50µs should be ok as well 491 */ 492 if (is_m29ew(cfi)) 493 cfi_udelay(500); 494 } 495 496 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 497 { 498 struct cfi_private *cfi = map->fldrv_priv; 499 struct mtd_info *mtd; 500 int i; 501 502 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 503 if (!mtd) { 504 printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); 505 return NULL; 506 } 507 mtd->priv = map; 508 mtd->type = MTD_NORFLASH; 509 510 /* Fill in the default mtd operations */ 511 mtd->_erase = cfi_amdstd_erase_varsize; 512 mtd->_write = cfi_amdstd_write_words; 513 mtd->_read = cfi_amdstd_read; 514 mtd->_sync = cfi_amdstd_sync; 515 mtd->_suspend = cfi_amdstd_suspend; 516 mtd->_resume = cfi_amdstd_resume; 517 mtd->flags = MTD_CAP_NORFLASH; 518 mtd->name = map->name; 519 mtd->writesize = 1; 520 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 521 522 pr_debug("MTD %s(): write buffer size %d\n", __func__, 523 mtd->writebufsize); 524 525 mtd->_panic_write = cfi_amdstd_panic_write; 526 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 527 528 if (cfi->cfi_mode==CFI_MODE_CFI){ 529 unsigned char bootloc; 530 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 531 struct cfi_pri_amdstd *extp; 532 533 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 534 if (extp) { 535 /* 536 * It's a real CFI chip, not one for which the probe 537 * routine faked a CFI structure. 538 */ 539 cfi_fixup_major_minor(cfi, extp); 540 541 /* 542 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 543 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 544 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 545 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 546 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 547 */ 548 if (extp->MajorVersion != '1' || 549 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 550 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 551 "version %c.%c (%#02x/%#02x).\n", 552 extp->MajorVersion, extp->MinorVersion, 553 extp->MajorVersion, extp->MinorVersion); 554 kfree(extp); 555 kfree(mtd); 556 return NULL; 557 } 558 559 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 560 extp->MajorVersion, extp->MinorVersion); 561 562 /* Install our own private info structure */ 563 cfi->cmdset_priv = extp; 564 565 /* Apply cfi device specific fixups */ 566 cfi_fixup(mtd, cfi_fixup_table); 567 568 #ifdef DEBUG_CFI_FEATURES 569 /* Tell the user about it in lots of lovely detail */ 570 cfi_tell_features(extp); 571 #endif 572 573 bootloc = extp->TopBottom; 574 if ((bootloc < 2) || (bootloc > 5)) { 575 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 576 "bank location (%d). Assuming bottom.\n", 577 map->name, bootloc); 578 bootloc = 2; 579 } 580 581 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 582 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 583 584 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 585 int j = (cfi->cfiq->NumEraseRegions-1)-i; 586 __u32 swap; 587 588 swap = cfi->cfiq->EraseRegionInfo[i]; 589 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 590 cfi->cfiq->EraseRegionInfo[j] = swap; 591 } 592 } 593 /* Set the default CFI lock/unlock addresses */ 594 cfi->addr_unlock1 = 0x555; 595 cfi->addr_unlock2 = 0x2aa; 596 } 597 cfi_fixup(mtd, cfi_nopri_fixup_table); 598 599 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 600 kfree(mtd); 601 return NULL; 602 } 603 604 } /* CFI mode */ 605 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 606 /* Apply jedec specific fixups */ 607 cfi_fixup(mtd, jedec_fixup_table); 608 } 609 /* Apply generic fixups */ 610 cfi_fixup(mtd, fixup_table); 611 612 for (i=0; i< cfi->numchips; i++) { 613 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 614 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 615 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 616 cfi->chips[i].ref_point_counter = 0; 617 init_waitqueue_head(&(cfi->chips[i].wq)); 618 } 619 620 map->fldrv = &cfi_amdstd_chipdrv; 621 622 return cfi_amdstd_setup(mtd); 623 } 624 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 625 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 626 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 627 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 628 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 629 630 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 631 { 632 struct map_info *map = mtd->priv; 633 struct cfi_private *cfi = map->fldrv_priv; 634 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 635 unsigned long offset = 0; 636 int i,j; 637 638 printk(KERN_NOTICE "number of %s chips: %d\n", 639 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 640 /* Select the correct geometry setup */ 641 mtd->size = devsize * cfi->numchips; 642 643 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 644 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 645 * mtd->numeraseregions, GFP_KERNEL); 646 if (!mtd->eraseregions) { 647 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); 648 goto setup_err; 649 } 650 651 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 652 unsigned long ernum, ersize; 653 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 654 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 655 656 if (mtd->erasesize < ersize) { 657 mtd->erasesize = ersize; 658 } 659 for (j=0; j<cfi->numchips; j++) { 660 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 661 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 662 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 663 } 664 offset += (ersize * ernum); 665 } 666 if (offset != devsize) { 667 /* Argh */ 668 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 669 goto setup_err; 670 } 671 672 __module_get(THIS_MODULE); 673 register_reboot_notifier(&mtd->reboot_notifier); 674 return mtd; 675 676 setup_err: 677 kfree(mtd->eraseregions); 678 kfree(mtd); 679 kfree(cfi->cmdset_priv); 680 kfree(cfi->cfiq); 681 return NULL; 682 } 683 684 /* 685 * Return true if the chip is ready. 686 * 687 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 688 * non-suspended sector) and is indicated by no toggle bits toggling. 689 * 690 * Note that anything more complicated than checking if no bits are toggling 691 * (including checking DQ5 for an error status) is tricky to get working 692 * correctly and is therefore not done (particularly with interleaved chips 693 * as each chip must be checked independently of the others). 694 */ 695 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 696 { 697 map_word d, t; 698 699 d = map_read(map, addr); 700 t = map_read(map, addr); 701 702 return map_word_equal(map, d, t); 703 } 704 705 /* 706 * Return true if the chip is ready and has the correct value. 707 * 708 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 709 * non-suspended sector) and it is indicated by no bits toggling. 710 * 711 * Error are indicated by toggling bits or bits held with the wrong value, 712 * or with bits toggling. 713 * 714 * Note that anything more complicated than checking if no bits are toggling 715 * (including checking DQ5 for an error status) is tricky to get working 716 * correctly and is therefore not done (particularly with interleaved chips 717 * as each chip must be checked independently of the others). 718 * 719 */ 720 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 721 { 722 map_word oldd, curd; 723 724 oldd = map_read(map, addr); 725 curd = map_read(map, addr); 726 727 return map_word_equal(map, oldd, curd) && 728 map_word_equal(map, curd, expected); 729 } 730 731 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 732 { 733 DECLARE_WAITQUEUE(wait, current); 734 struct cfi_private *cfi = map->fldrv_priv; 735 unsigned long timeo; 736 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 737 738 resettime: 739 timeo = jiffies + HZ; 740 retry: 741 switch (chip->state) { 742 743 case FL_STATUS: 744 for (;;) { 745 if (chip_ready(map, adr)) 746 break; 747 748 if (time_after(jiffies, timeo)) { 749 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 750 return -EIO; 751 } 752 mutex_unlock(&chip->mutex); 753 cfi_udelay(1); 754 mutex_lock(&chip->mutex); 755 /* Someone else might have been playing with it. */ 756 goto retry; 757 } 758 759 case FL_READY: 760 case FL_CFI_QUERY: 761 case FL_JEDEC_QUERY: 762 return 0; 763 764 case FL_ERASING: 765 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 766 !(mode == FL_READY || mode == FL_POINT || 767 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 768 goto sleep; 769 770 /* We could check to see if we're trying to access the sector 771 * that is currently being erased. However, no user will try 772 * anything like that so we just wait for the timeout. */ 773 774 /* Erase suspend */ 775 /* It's harmless to issue the Erase-Suspend and Erase-Resume 776 * commands when the erase algorithm isn't in progress. */ 777 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 778 chip->oldstate = FL_ERASING; 779 chip->state = FL_ERASE_SUSPENDING; 780 chip->erase_suspended = 1; 781 for (;;) { 782 if (chip_ready(map, adr)) 783 break; 784 785 if (time_after(jiffies, timeo)) { 786 /* Should have suspended the erase by now. 787 * Send an Erase-Resume command as either 788 * there was an error (so leave the erase 789 * routine to recover from it) or we trying to 790 * use the erase-in-progress sector. */ 791 put_chip(map, chip, adr); 792 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 793 return -EIO; 794 } 795 796 mutex_unlock(&chip->mutex); 797 cfi_udelay(1); 798 mutex_lock(&chip->mutex); 799 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 800 So we can just loop here. */ 801 } 802 chip->state = FL_READY; 803 return 0; 804 805 case FL_XIP_WHILE_ERASING: 806 if (mode != FL_READY && mode != FL_POINT && 807 (!cfip || !(cfip->EraseSuspend&2))) 808 goto sleep; 809 chip->oldstate = chip->state; 810 chip->state = FL_READY; 811 return 0; 812 813 case FL_SHUTDOWN: 814 /* The machine is rebooting */ 815 return -EIO; 816 817 case FL_POINT: 818 /* Only if there's no operation suspended... */ 819 if (mode == FL_READY && chip->oldstate == FL_READY) 820 return 0; 821 822 default: 823 sleep: 824 set_current_state(TASK_UNINTERRUPTIBLE); 825 add_wait_queue(&chip->wq, &wait); 826 mutex_unlock(&chip->mutex); 827 schedule(); 828 remove_wait_queue(&chip->wq, &wait); 829 mutex_lock(&chip->mutex); 830 goto resettime; 831 } 832 } 833 834 835 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 836 { 837 struct cfi_private *cfi = map->fldrv_priv; 838 839 switch(chip->oldstate) { 840 case FL_ERASING: 841 cfi_fixup_m29ew_erase_suspend(map, 842 chip->in_progress_block_addr); 843 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 844 cfi_fixup_m29ew_delay_after_resume(cfi); 845 chip->oldstate = FL_READY; 846 chip->state = FL_ERASING; 847 break; 848 849 case FL_XIP_WHILE_ERASING: 850 chip->state = chip->oldstate; 851 chip->oldstate = FL_READY; 852 break; 853 854 case FL_READY: 855 case FL_STATUS: 856 break; 857 default: 858 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 859 } 860 wake_up(&chip->wq); 861 } 862 863 #ifdef CONFIG_MTD_XIP 864 865 /* 866 * No interrupt what so ever can be serviced while the flash isn't in array 867 * mode. This is ensured by the xip_disable() and xip_enable() functions 868 * enclosing any code path where the flash is known not to be in array mode. 869 * And within a XIP disabled code path, only functions marked with __xipram 870 * may be called and nothing else (it's a good thing to inspect generated 871 * assembly to make sure inline functions were actually inlined and that gcc 872 * didn't emit calls to its own support functions). Also configuring MTD CFI 873 * support to a single buswidth and a single interleave is also recommended. 874 */ 875 876 static void xip_disable(struct map_info *map, struct flchip *chip, 877 unsigned long adr) 878 { 879 /* TODO: chips with no XIP use should ignore and return */ 880 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 881 local_irq_disable(); 882 } 883 884 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 885 unsigned long adr) 886 { 887 struct cfi_private *cfi = map->fldrv_priv; 888 889 if (chip->state != FL_POINT && chip->state != FL_READY) { 890 map_write(map, CMD(0xf0), adr); 891 chip->state = FL_READY; 892 } 893 (void) map_read(map, adr); 894 xip_iprefetch(); 895 local_irq_enable(); 896 } 897 898 /* 899 * When a delay is required for the flash operation to complete, the 900 * xip_udelay() function is polling for both the given timeout and pending 901 * (but still masked) hardware interrupts. Whenever there is an interrupt 902 * pending then the flash erase operation is suspended, array mode restored 903 * and interrupts unmasked. Task scheduling might also happen at that 904 * point. The CPU eventually returns from the interrupt or the call to 905 * schedule() and the suspended flash operation is resumed for the remaining 906 * of the delay period. 907 * 908 * Warning: this function _will_ fool interrupt latency tracing tools. 909 */ 910 911 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 912 unsigned long adr, int usec) 913 { 914 struct cfi_private *cfi = map->fldrv_priv; 915 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 916 map_word status, OK = CMD(0x80); 917 unsigned long suspended, start = xip_currtime(); 918 flstate_t oldstate; 919 920 do { 921 cpu_relax(); 922 if (xip_irqpending() && extp && 923 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 924 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 925 /* 926 * Let's suspend the erase operation when supported. 927 * Note that we currently don't try to suspend 928 * interleaved chips if there is already another 929 * operation suspended (imagine what happens 930 * when one chip was already done with the current 931 * operation while another chip suspended it, then 932 * we resume the whole thing at once). Yes, it 933 * can happen! 934 */ 935 map_write(map, CMD(0xb0), adr); 936 usec -= xip_elapsed_since(start); 937 suspended = xip_currtime(); 938 do { 939 if (xip_elapsed_since(suspended) > 100000) { 940 /* 941 * The chip doesn't want to suspend 942 * after waiting for 100 msecs. 943 * This is a critical error but there 944 * is not much we can do here. 945 */ 946 return; 947 } 948 status = map_read(map, adr); 949 } while (!map_word_andequal(map, status, OK, OK)); 950 951 /* Suspend succeeded */ 952 oldstate = chip->state; 953 if (!map_word_bitsset(map, status, CMD(0x40))) 954 break; 955 chip->state = FL_XIP_WHILE_ERASING; 956 chip->erase_suspended = 1; 957 map_write(map, CMD(0xf0), adr); 958 (void) map_read(map, adr); 959 xip_iprefetch(); 960 local_irq_enable(); 961 mutex_unlock(&chip->mutex); 962 xip_iprefetch(); 963 cond_resched(); 964 965 /* 966 * We're back. However someone else might have 967 * decided to go write to the chip if we are in 968 * a suspended erase state. If so let's wait 969 * until it's done. 970 */ 971 mutex_lock(&chip->mutex); 972 while (chip->state != FL_XIP_WHILE_ERASING) { 973 DECLARE_WAITQUEUE(wait, current); 974 set_current_state(TASK_UNINTERRUPTIBLE); 975 add_wait_queue(&chip->wq, &wait); 976 mutex_unlock(&chip->mutex); 977 schedule(); 978 remove_wait_queue(&chip->wq, &wait); 979 mutex_lock(&chip->mutex); 980 } 981 /* Disallow XIP again */ 982 local_irq_disable(); 983 984 /* Correct Erase Suspend Hangups for M29EW */ 985 cfi_fixup_m29ew_erase_suspend(map, adr); 986 /* Resume the write or erase operation */ 987 map_write(map, cfi->sector_erase_cmd, adr); 988 chip->state = oldstate; 989 start = xip_currtime(); 990 } else if (usec >= 1000000/HZ) { 991 /* 992 * Try to save on CPU power when waiting delay 993 * is at least a system timer tick period. 994 * No need to be extremely accurate here. 995 */ 996 xip_cpu_idle(); 997 } 998 status = map_read(map, adr); 999 } while (!map_word_andequal(map, status, OK, OK) 1000 && xip_elapsed_since(start) < usec); 1001 } 1002 1003 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1004 1005 /* 1006 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1007 * the flash is actively programming or erasing since we have to poll for 1008 * the operation to complete anyway. We can't do that in a generic way with 1009 * a XIP setup so do it before the actual flash operation in this case 1010 * and stub it out from INVALIDATE_CACHE_UDELAY. 1011 */ 1012 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1013 INVALIDATE_CACHED_RANGE(map, from, size) 1014 1015 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1016 UDELAY(map, chip, adr, usec) 1017 1018 /* 1019 * Extra notes: 1020 * 1021 * Activating this XIP support changes the way the code works a bit. For 1022 * example the code to suspend the current process when concurrent access 1023 * happens is never executed because xip_udelay() will always return with the 1024 * same chip state as it was entered with. This is why there is no care for 1025 * the presence of add_wait_queue() or schedule() calls from within a couple 1026 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1027 * The queueing and scheduling are always happening within xip_udelay(). 1028 * 1029 * Similarly, get_chip() and put_chip() just happen to always be executed 1030 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1031 * is in array mode, therefore never executing many cases therein and not 1032 * causing any problem with XIP. 1033 */ 1034 1035 #else 1036 1037 #define xip_disable(map, chip, adr) 1038 #define xip_enable(map, chip, adr) 1039 #define XIP_INVAL_CACHED_RANGE(x...) 1040 1041 #define UDELAY(map, chip, adr, usec) \ 1042 do { \ 1043 mutex_unlock(&chip->mutex); \ 1044 cfi_udelay(usec); \ 1045 mutex_lock(&chip->mutex); \ 1046 } while (0) 1047 1048 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1049 do { \ 1050 mutex_unlock(&chip->mutex); \ 1051 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1052 cfi_udelay(usec); \ 1053 mutex_lock(&chip->mutex); \ 1054 } while (0) 1055 1056 #endif 1057 1058 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1059 { 1060 unsigned long cmd_addr; 1061 struct cfi_private *cfi = map->fldrv_priv; 1062 int ret; 1063 1064 adr += chip->start; 1065 1066 /* Ensure cmd read/writes are aligned. */ 1067 cmd_addr = adr & ~(map_bankwidth(map)-1); 1068 1069 mutex_lock(&chip->mutex); 1070 ret = get_chip(map, chip, cmd_addr, FL_READY); 1071 if (ret) { 1072 mutex_unlock(&chip->mutex); 1073 return ret; 1074 } 1075 1076 if (chip->state != FL_POINT && chip->state != FL_READY) { 1077 map_write(map, CMD(0xf0), cmd_addr); 1078 chip->state = FL_READY; 1079 } 1080 1081 map_copy_from(map, buf, adr, len); 1082 1083 put_chip(map, chip, cmd_addr); 1084 1085 mutex_unlock(&chip->mutex); 1086 return 0; 1087 } 1088 1089 1090 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1091 { 1092 struct map_info *map = mtd->priv; 1093 struct cfi_private *cfi = map->fldrv_priv; 1094 unsigned long ofs; 1095 int chipnum; 1096 int ret = 0; 1097 1098 /* ofs: offset within the first chip that the first read should start */ 1099 chipnum = (from >> cfi->chipshift); 1100 ofs = from - (chipnum << cfi->chipshift); 1101 1102 while (len) { 1103 unsigned long thislen; 1104 1105 if (chipnum >= cfi->numchips) 1106 break; 1107 1108 if ((len + ofs -1) >> cfi->chipshift) 1109 thislen = (1<<cfi->chipshift) - ofs; 1110 else 1111 thislen = len; 1112 1113 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1114 if (ret) 1115 break; 1116 1117 *retlen += thislen; 1118 len -= thislen; 1119 buf += thislen; 1120 1121 ofs = 0; 1122 chipnum++; 1123 } 1124 return ret; 1125 } 1126 1127 1128 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1129 { 1130 DECLARE_WAITQUEUE(wait, current); 1131 unsigned long timeo = jiffies + HZ; 1132 struct cfi_private *cfi = map->fldrv_priv; 1133 1134 retry: 1135 mutex_lock(&chip->mutex); 1136 1137 if (chip->state != FL_READY){ 1138 set_current_state(TASK_UNINTERRUPTIBLE); 1139 add_wait_queue(&chip->wq, &wait); 1140 1141 mutex_unlock(&chip->mutex); 1142 1143 schedule(); 1144 remove_wait_queue(&chip->wq, &wait); 1145 timeo = jiffies + HZ; 1146 1147 goto retry; 1148 } 1149 1150 adr += chip->start; 1151 1152 chip->state = FL_READY; 1153 1154 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1155 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1156 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1157 1158 map_copy_from(map, buf, adr, len); 1159 1160 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1161 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1162 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1163 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1164 1165 wake_up(&chip->wq); 1166 mutex_unlock(&chip->mutex); 1167 1168 return 0; 1169 } 1170 1171 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1172 { 1173 struct map_info *map = mtd->priv; 1174 struct cfi_private *cfi = map->fldrv_priv; 1175 unsigned long ofs; 1176 int chipnum; 1177 int ret = 0; 1178 1179 /* ofs: offset within the first chip that the first read should start */ 1180 /* 8 secsi bytes per chip */ 1181 chipnum=from>>3; 1182 ofs=from & 7; 1183 1184 while (len) { 1185 unsigned long thislen; 1186 1187 if (chipnum >= cfi->numchips) 1188 break; 1189 1190 if ((len + ofs -1) >> 3) 1191 thislen = (1<<3) - ofs; 1192 else 1193 thislen = len; 1194 1195 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1196 if (ret) 1197 break; 1198 1199 *retlen += thislen; 1200 len -= thislen; 1201 buf += thislen; 1202 1203 ofs = 0; 1204 chipnum++; 1205 } 1206 return ret; 1207 } 1208 1209 1210 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum) 1211 { 1212 struct cfi_private *cfi = map->fldrv_priv; 1213 unsigned long timeo = jiffies + HZ; 1214 /* 1215 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1216 * have a max write time of a few hundreds usec). However, we should 1217 * use the maximum timeout value given by the chip at probe time 1218 * instead. Unfortunately, struct flchip does have a field for 1219 * maximum timeout, only for typical which can be far too short 1220 * depending of the conditions. The ' + 1' is to avoid having a 1221 * timeout of 0 jiffies if HZ is smaller than 1000. 1222 */ 1223 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1224 int ret = 0; 1225 map_word oldd; 1226 int retry_cnt = 0; 1227 1228 adr += chip->start; 1229 1230 mutex_lock(&chip->mutex); 1231 ret = get_chip(map, chip, adr, FL_WRITING); 1232 if (ret) { 1233 mutex_unlock(&chip->mutex); 1234 return ret; 1235 } 1236 1237 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1238 __func__, adr, datum.x[0] ); 1239 1240 /* 1241 * Check for a NOP for the case when the datum to write is already 1242 * present - it saves time and works around buggy chips that corrupt 1243 * data at other locations when 0xff is written to a location that 1244 * already contains 0xff. 1245 */ 1246 oldd = map_read(map, adr); 1247 if (map_word_equal(map, oldd, datum)) { 1248 pr_debug("MTD %s(): NOP\n", 1249 __func__); 1250 goto op_done; 1251 } 1252 1253 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1254 ENABLE_VPP(map); 1255 xip_disable(map, chip, adr); 1256 retry: 1257 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1258 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1259 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1260 map_write(map, datum, adr); 1261 chip->state = FL_WRITING; 1262 1263 INVALIDATE_CACHE_UDELAY(map, chip, 1264 adr, map_bankwidth(map), 1265 chip->word_write_time); 1266 1267 /* See comment above for timeout value. */ 1268 timeo = jiffies + uWriteTimeout; 1269 for (;;) { 1270 if (chip->state != FL_WRITING) { 1271 /* Someone's suspended the write. Sleep */ 1272 DECLARE_WAITQUEUE(wait, current); 1273 1274 set_current_state(TASK_UNINTERRUPTIBLE); 1275 add_wait_queue(&chip->wq, &wait); 1276 mutex_unlock(&chip->mutex); 1277 schedule(); 1278 remove_wait_queue(&chip->wq, &wait); 1279 timeo = jiffies + (HZ / 2); /* FIXME */ 1280 mutex_lock(&chip->mutex); 1281 continue; 1282 } 1283 1284 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1285 xip_enable(map, chip, adr); 1286 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1287 xip_disable(map, chip, adr); 1288 break; 1289 } 1290 1291 if (chip_ready(map, adr)) 1292 break; 1293 1294 /* Latency issues. Drop the lock, wait a while and retry */ 1295 UDELAY(map, chip, adr, 1); 1296 } 1297 /* Did we succeed? */ 1298 if (!chip_good(map, adr, datum)) { 1299 /* reset on all failures. */ 1300 map_write( map, CMD(0xF0), chip->start ); 1301 /* FIXME - should have reset delay before continuing */ 1302 1303 if (++retry_cnt <= MAX_WORD_RETRIES) 1304 goto retry; 1305 1306 ret = -EIO; 1307 } 1308 xip_enable(map, chip, adr); 1309 op_done: 1310 chip->state = FL_READY; 1311 DISABLE_VPP(map); 1312 put_chip(map, chip, adr); 1313 mutex_unlock(&chip->mutex); 1314 1315 return ret; 1316 } 1317 1318 1319 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1320 size_t *retlen, const u_char *buf) 1321 { 1322 struct map_info *map = mtd->priv; 1323 struct cfi_private *cfi = map->fldrv_priv; 1324 int ret = 0; 1325 int chipnum; 1326 unsigned long ofs, chipstart; 1327 DECLARE_WAITQUEUE(wait, current); 1328 1329 chipnum = to >> cfi->chipshift; 1330 ofs = to - (chipnum << cfi->chipshift); 1331 chipstart = cfi->chips[chipnum].start; 1332 1333 /* If it's not bus-aligned, do the first byte write */ 1334 if (ofs & (map_bankwidth(map)-1)) { 1335 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1336 int i = ofs - bus_ofs; 1337 int n = 0; 1338 map_word tmp_buf; 1339 1340 retry: 1341 mutex_lock(&cfi->chips[chipnum].mutex); 1342 1343 if (cfi->chips[chipnum].state != FL_READY) { 1344 set_current_state(TASK_UNINTERRUPTIBLE); 1345 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1346 1347 mutex_unlock(&cfi->chips[chipnum].mutex); 1348 1349 schedule(); 1350 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1351 goto retry; 1352 } 1353 1354 /* Load 'tmp_buf' with old contents of flash */ 1355 tmp_buf = map_read(map, bus_ofs+chipstart); 1356 1357 mutex_unlock(&cfi->chips[chipnum].mutex); 1358 1359 /* Number of bytes to copy from buffer */ 1360 n = min_t(int, len, map_bankwidth(map)-i); 1361 1362 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1363 1364 ret = do_write_oneword(map, &cfi->chips[chipnum], 1365 bus_ofs, tmp_buf); 1366 if (ret) 1367 return ret; 1368 1369 ofs += n; 1370 buf += n; 1371 (*retlen) += n; 1372 len -= n; 1373 1374 if (ofs >> cfi->chipshift) { 1375 chipnum ++; 1376 ofs = 0; 1377 if (chipnum == cfi->numchips) 1378 return 0; 1379 } 1380 } 1381 1382 /* We are now aligned, write as much as possible */ 1383 while(len >= map_bankwidth(map)) { 1384 map_word datum; 1385 1386 datum = map_word_load(map, buf); 1387 1388 ret = do_write_oneword(map, &cfi->chips[chipnum], 1389 ofs, datum); 1390 if (ret) 1391 return ret; 1392 1393 ofs += map_bankwidth(map); 1394 buf += map_bankwidth(map); 1395 (*retlen) += map_bankwidth(map); 1396 len -= map_bankwidth(map); 1397 1398 if (ofs >> cfi->chipshift) { 1399 chipnum ++; 1400 ofs = 0; 1401 if (chipnum == cfi->numchips) 1402 return 0; 1403 chipstart = cfi->chips[chipnum].start; 1404 } 1405 } 1406 1407 /* Write the trailing bytes if any */ 1408 if (len & (map_bankwidth(map)-1)) { 1409 map_word tmp_buf; 1410 1411 retry1: 1412 mutex_lock(&cfi->chips[chipnum].mutex); 1413 1414 if (cfi->chips[chipnum].state != FL_READY) { 1415 set_current_state(TASK_UNINTERRUPTIBLE); 1416 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1417 1418 mutex_unlock(&cfi->chips[chipnum].mutex); 1419 1420 schedule(); 1421 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1422 goto retry1; 1423 } 1424 1425 tmp_buf = map_read(map, ofs + chipstart); 1426 1427 mutex_unlock(&cfi->chips[chipnum].mutex); 1428 1429 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1430 1431 ret = do_write_oneword(map, &cfi->chips[chipnum], 1432 ofs, tmp_buf); 1433 if (ret) 1434 return ret; 1435 1436 (*retlen) += len; 1437 } 1438 1439 return 0; 1440 } 1441 1442 1443 /* 1444 * FIXME: interleaved mode not tested, and probably not supported! 1445 */ 1446 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1447 unsigned long adr, const u_char *buf, 1448 int len) 1449 { 1450 struct cfi_private *cfi = map->fldrv_priv; 1451 unsigned long timeo = jiffies + HZ; 1452 /* see comments in do_write_oneword() regarding uWriteTimeo. */ 1453 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1454 int ret = -EIO; 1455 unsigned long cmd_adr; 1456 int z, words; 1457 map_word datum; 1458 1459 adr += chip->start; 1460 cmd_adr = adr; 1461 1462 mutex_lock(&chip->mutex); 1463 ret = get_chip(map, chip, adr, FL_WRITING); 1464 if (ret) { 1465 mutex_unlock(&chip->mutex); 1466 return ret; 1467 } 1468 1469 datum = map_word_load(map, buf); 1470 1471 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1472 __func__, adr, datum.x[0] ); 1473 1474 XIP_INVAL_CACHED_RANGE(map, adr, len); 1475 ENABLE_VPP(map); 1476 xip_disable(map, chip, cmd_adr); 1477 1478 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1479 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1480 1481 /* Write Buffer Load */ 1482 map_write(map, CMD(0x25), cmd_adr); 1483 1484 chip->state = FL_WRITING_TO_BUFFER; 1485 1486 /* Write length of data to come */ 1487 words = len / map_bankwidth(map); 1488 map_write(map, CMD(words - 1), cmd_adr); 1489 /* Write data */ 1490 z = 0; 1491 while(z < words * map_bankwidth(map)) { 1492 datum = map_word_load(map, buf); 1493 map_write(map, datum, adr + z); 1494 1495 z += map_bankwidth(map); 1496 buf += map_bankwidth(map); 1497 } 1498 z -= map_bankwidth(map); 1499 1500 adr += z; 1501 1502 /* Write Buffer Program Confirm: GO GO GO */ 1503 map_write(map, CMD(0x29), cmd_adr); 1504 chip->state = FL_WRITING; 1505 1506 INVALIDATE_CACHE_UDELAY(map, chip, 1507 adr, map_bankwidth(map), 1508 chip->word_write_time); 1509 1510 timeo = jiffies + uWriteTimeout; 1511 1512 for (;;) { 1513 if (chip->state != FL_WRITING) { 1514 /* Someone's suspended the write. Sleep */ 1515 DECLARE_WAITQUEUE(wait, current); 1516 1517 set_current_state(TASK_UNINTERRUPTIBLE); 1518 add_wait_queue(&chip->wq, &wait); 1519 mutex_unlock(&chip->mutex); 1520 schedule(); 1521 remove_wait_queue(&chip->wq, &wait); 1522 timeo = jiffies + (HZ / 2); /* FIXME */ 1523 mutex_lock(&chip->mutex); 1524 continue; 1525 } 1526 1527 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1528 break; 1529 1530 if (chip_ready(map, adr)) { 1531 xip_enable(map, chip, adr); 1532 goto op_done; 1533 } 1534 1535 /* Latency issues. Drop the lock, wait a while and retry */ 1536 UDELAY(map, chip, adr, 1); 1537 } 1538 1539 /* 1540 * Recovery from write-buffer programming failures requires 1541 * the write-to-buffer-reset sequence. Since the last part 1542 * of the sequence also works as a normal reset, we can run 1543 * the same commands regardless of why we are here. 1544 * See e.g. 1545 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1546 */ 1547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1548 cfi->device_type, NULL); 1549 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1550 cfi->device_type, NULL); 1551 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 1552 cfi->device_type, NULL); 1553 xip_enable(map, chip, adr); 1554 /* FIXME - should have reset delay before continuing */ 1555 1556 printk(KERN_WARNING "MTD %s(): software timeout\n", 1557 __func__ ); 1558 1559 ret = -EIO; 1560 op_done: 1561 chip->state = FL_READY; 1562 DISABLE_VPP(map); 1563 put_chip(map, chip, adr); 1564 mutex_unlock(&chip->mutex); 1565 1566 return ret; 1567 } 1568 1569 1570 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1571 size_t *retlen, const u_char *buf) 1572 { 1573 struct map_info *map = mtd->priv; 1574 struct cfi_private *cfi = map->fldrv_priv; 1575 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1576 int ret = 0; 1577 int chipnum; 1578 unsigned long ofs; 1579 1580 chipnum = to >> cfi->chipshift; 1581 ofs = to - (chipnum << cfi->chipshift); 1582 1583 /* If it's not bus-aligned, do the first word write */ 1584 if (ofs & (map_bankwidth(map)-1)) { 1585 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1586 if (local_len > len) 1587 local_len = len; 1588 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1589 local_len, retlen, buf); 1590 if (ret) 1591 return ret; 1592 ofs += local_len; 1593 buf += local_len; 1594 len -= local_len; 1595 1596 if (ofs >> cfi->chipshift) { 1597 chipnum ++; 1598 ofs = 0; 1599 if (chipnum == cfi->numchips) 1600 return 0; 1601 } 1602 } 1603 1604 /* Write buffer is worth it only if more than one word to write... */ 1605 while (len >= map_bankwidth(map) * 2) { 1606 /* We must not cross write block boundaries */ 1607 int size = wbufsize - (ofs & (wbufsize-1)); 1608 1609 if (size > len) 1610 size = len; 1611 if (size % map_bankwidth(map)) 1612 size -= size % map_bankwidth(map); 1613 1614 ret = do_write_buffer(map, &cfi->chips[chipnum], 1615 ofs, buf, size); 1616 if (ret) 1617 return ret; 1618 1619 ofs += size; 1620 buf += size; 1621 (*retlen) += size; 1622 len -= size; 1623 1624 if (ofs >> cfi->chipshift) { 1625 chipnum ++; 1626 ofs = 0; 1627 if (chipnum == cfi->numchips) 1628 return 0; 1629 } 1630 } 1631 1632 if (len) { 1633 size_t retlen_dregs = 0; 1634 1635 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1636 len, &retlen_dregs, buf); 1637 1638 *retlen += retlen_dregs; 1639 return ret; 1640 } 1641 1642 return 0; 1643 } 1644 1645 /* 1646 * Wait for the flash chip to become ready to write data 1647 * 1648 * This is only called during the panic_write() path. When panic_write() 1649 * is called, the kernel is in the process of a panic, and will soon be 1650 * dead. Therefore we don't take any locks, and attempt to get access 1651 * to the chip as soon as possible. 1652 */ 1653 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 1654 unsigned long adr) 1655 { 1656 struct cfi_private *cfi = map->fldrv_priv; 1657 int retries = 10; 1658 int i; 1659 1660 /* 1661 * If the driver thinks the chip is idle, and no toggle bits 1662 * are changing, then the chip is actually idle for sure. 1663 */ 1664 if (chip->state == FL_READY && chip_ready(map, adr)) 1665 return 0; 1666 1667 /* 1668 * Try several times to reset the chip and then wait for it 1669 * to become idle. The upper limit of a few milliseconds of 1670 * delay isn't a big problem: the kernel is dying anyway. It 1671 * is more important to save the messages. 1672 */ 1673 while (retries > 0) { 1674 const unsigned long timeo = (HZ / 1000) + 1; 1675 1676 /* send the reset command */ 1677 map_write(map, CMD(0xF0), chip->start); 1678 1679 /* wait for the chip to become ready */ 1680 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 1681 if (chip_ready(map, adr)) 1682 return 0; 1683 1684 udelay(1); 1685 } 1686 } 1687 1688 /* the chip never became ready */ 1689 return -EBUSY; 1690 } 1691 1692 /* 1693 * Write out one word of data to a single flash chip during a kernel panic 1694 * 1695 * This is only called during the panic_write() path. When panic_write() 1696 * is called, the kernel is in the process of a panic, and will soon be 1697 * dead. Therefore we don't take any locks, and attempt to get access 1698 * to the chip as soon as possible. 1699 * 1700 * The implementation of this routine is intentionally similar to 1701 * do_write_oneword(), in order to ease code maintenance. 1702 */ 1703 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 1704 unsigned long adr, map_word datum) 1705 { 1706 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 1707 struct cfi_private *cfi = map->fldrv_priv; 1708 int retry_cnt = 0; 1709 map_word oldd; 1710 int ret = 0; 1711 int i; 1712 1713 adr += chip->start; 1714 1715 ret = cfi_amdstd_panic_wait(map, chip, adr); 1716 if (ret) 1717 return ret; 1718 1719 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 1720 __func__, adr, datum.x[0]); 1721 1722 /* 1723 * Check for a NOP for the case when the datum to write is already 1724 * present - it saves time and works around buggy chips that corrupt 1725 * data at other locations when 0xff is written to a location that 1726 * already contains 0xff. 1727 */ 1728 oldd = map_read(map, adr); 1729 if (map_word_equal(map, oldd, datum)) { 1730 pr_debug("MTD %s(): NOP\n", __func__); 1731 goto op_done; 1732 } 1733 1734 ENABLE_VPP(map); 1735 1736 retry: 1737 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1738 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1739 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1740 map_write(map, datum, adr); 1741 1742 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 1743 if (chip_ready(map, adr)) 1744 break; 1745 1746 udelay(1); 1747 } 1748 1749 if (!chip_good(map, adr, datum)) { 1750 /* reset on all failures. */ 1751 map_write(map, CMD(0xF0), chip->start); 1752 /* FIXME - should have reset delay before continuing */ 1753 1754 if (++retry_cnt <= MAX_WORD_RETRIES) 1755 goto retry; 1756 1757 ret = -EIO; 1758 } 1759 1760 op_done: 1761 DISABLE_VPP(map); 1762 return ret; 1763 } 1764 1765 /* 1766 * Write out some data during a kernel panic 1767 * 1768 * This is used by the mtdoops driver to save the dying messages from a 1769 * kernel which has panic'd. 1770 * 1771 * This routine ignores all of the locking used throughout the rest of the 1772 * driver, in order to ensure that the data gets written out no matter what 1773 * state this driver (and the flash chip itself) was in when the kernel crashed. 1774 * 1775 * The implementation of this routine is intentionally similar to 1776 * cfi_amdstd_write_words(), in order to ease code maintenance. 1777 */ 1778 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 1779 size_t *retlen, const u_char *buf) 1780 { 1781 struct map_info *map = mtd->priv; 1782 struct cfi_private *cfi = map->fldrv_priv; 1783 unsigned long ofs, chipstart; 1784 int ret = 0; 1785 int chipnum; 1786 1787 chipnum = to >> cfi->chipshift; 1788 ofs = to - (chipnum << cfi->chipshift); 1789 chipstart = cfi->chips[chipnum].start; 1790 1791 /* If it's not bus aligned, do the first byte write */ 1792 if (ofs & (map_bankwidth(map) - 1)) { 1793 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 1794 int i = ofs - bus_ofs; 1795 int n = 0; 1796 map_word tmp_buf; 1797 1798 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 1799 if (ret) 1800 return ret; 1801 1802 /* Load 'tmp_buf' with old contents of flash */ 1803 tmp_buf = map_read(map, bus_ofs + chipstart); 1804 1805 /* Number of bytes to copy from buffer */ 1806 n = min_t(int, len, map_bankwidth(map) - i); 1807 1808 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1809 1810 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1811 bus_ofs, tmp_buf); 1812 if (ret) 1813 return ret; 1814 1815 ofs += n; 1816 buf += n; 1817 (*retlen) += n; 1818 len -= n; 1819 1820 if (ofs >> cfi->chipshift) { 1821 chipnum++; 1822 ofs = 0; 1823 if (chipnum == cfi->numchips) 1824 return 0; 1825 } 1826 } 1827 1828 /* We are now aligned, write as much as possible */ 1829 while (len >= map_bankwidth(map)) { 1830 map_word datum; 1831 1832 datum = map_word_load(map, buf); 1833 1834 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1835 ofs, datum); 1836 if (ret) 1837 return ret; 1838 1839 ofs += map_bankwidth(map); 1840 buf += map_bankwidth(map); 1841 (*retlen) += map_bankwidth(map); 1842 len -= map_bankwidth(map); 1843 1844 if (ofs >> cfi->chipshift) { 1845 chipnum++; 1846 ofs = 0; 1847 if (chipnum == cfi->numchips) 1848 return 0; 1849 1850 chipstart = cfi->chips[chipnum].start; 1851 } 1852 } 1853 1854 /* Write the trailing bytes if any */ 1855 if (len & (map_bankwidth(map) - 1)) { 1856 map_word tmp_buf; 1857 1858 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 1859 if (ret) 1860 return ret; 1861 1862 tmp_buf = map_read(map, ofs + chipstart); 1863 1864 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1865 1866 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 1867 ofs, tmp_buf); 1868 if (ret) 1869 return ret; 1870 1871 (*retlen) += len; 1872 } 1873 1874 return 0; 1875 } 1876 1877 1878 /* 1879 * Handle devices with one erase region, that only implement 1880 * the chip erase command. 1881 */ 1882 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 1883 { 1884 struct cfi_private *cfi = map->fldrv_priv; 1885 unsigned long timeo = jiffies + HZ; 1886 unsigned long int adr; 1887 DECLARE_WAITQUEUE(wait, current); 1888 int ret = 0; 1889 1890 adr = cfi->addr_unlock1; 1891 1892 mutex_lock(&chip->mutex); 1893 ret = get_chip(map, chip, adr, FL_WRITING); 1894 if (ret) { 1895 mutex_unlock(&chip->mutex); 1896 return ret; 1897 } 1898 1899 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1900 __func__, chip->start ); 1901 1902 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 1903 ENABLE_VPP(map); 1904 xip_disable(map, chip, adr); 1905 1906 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1907 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1908 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1909 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1910 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1911 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1912 1913 chip->state = FL_ERASING; 1914 chip->erase_suspended = 0; 1915 chip->in_progress_block_addr = adr; 1916 1917 INVALIDATE_CACHE_UDELAY(map, chip, 1918 adr, map->size, 1919 chip->erase_time*500); 1920 1921 timeo = jiffies + (HZ*20); 1922 1923 for (;;) { 1924 if (chip->state != FL_ERASING) { 1925 /* Someone's suspended the erase. Sleep */ 1926 set_current_state(TASK_UNINTERRUPTIBLE); 1927 add_wait_queue(&chip->wq, &wait); 1928 mutex_unlock(&chip->mutex); 1929 schedule(); 1930 remove_wait_queue(&chip->wq, &wait); 1931 mutex_lock(&chip->mutex); 1932 continue; 1933 } 1934 if (chip->erase_suspended) { 1935 /* This erase was suspended and resumed. 1936 Adjust the timeout */ 1937 timeo = jiffies + (HZ*20); /* FIXME */ 1938 chip->erase_suspended = 0; 1939 } 1940 1941 if (chip_ready(map, adr)) 1942 break; 1943 1944 if (time_after(jiffies, timeo)) { 1945 printk(KERN_WARNING "MTD %s(): software timeout\n", 1946 __func__ ); 1947 break; 1948 } 1949 1950 /* Latency issues. Drop the lock, wait a while and retry */ 1951 UDELAY(map, chip, adr, 1000000/HZ); 1952 } 1953 /* Did we succeed? */ 1954 if (!chip_good(map, adr, map_word_ff(map))) { 1955 /* reset on all failures. */ 1956 map_write( map, CMD(0xF0), chip->start ); 1957 /* FIXME - should have reset delay before continuing */ 1958 1959 ret = -EIO; 1960 } 1961 1962 chip->state = FL_READY; 1963 xip_enable(map, chip, adr); 1964 DISABLE_VPP(map); 1965 put_chip(map, chip, adr); 1966 mutex_unlock(&chip->mutex); 1967 1968 return ret; 1969 } 1970 1971 1972 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 1973 { 1974 struct cfi_private *cfi = map->fldrv_priv; 1975 unsigned long timeo = jiffies + HZ; 1976 DECLARE_WAITQUEUE(wait, current); 1977 int ret = 0; 1978 1979 adr += chip->start; 1980 1981 mutex_lock(&chip->mutex); 1982 ret = get_chip(map, chip, adr, FL_ERASING); 1983 if (ret) { 1984 mutex_unlock(&chip->mutex); 1985 return ret; 1986 } 1987 1988 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 1989 __func__, adr ); 1990 1991 XIP_INVAL_CACHED_RANGE(map, adr, len); 1992 ENABLE_VPP(map); 1993 xip_disable(map, chip, adr); 1994 1995 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1996 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1997 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1998 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1999 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2000 map_write(map, cfi->sector_erase_cmd, adr); 2001 2002 chip->state = FL_ERASING; 2003 chip->erase_suspended = 0; 2004 chip->in_progress_block_addr = adr; 2005 2006 INVALIDATE_CACHE_UDELAY(map, chip, 2007 adr, len, 2008 chip->erase_time*500); 2009 2010 timeo = jiffies + (HZ*20); 2011 2012 for (;;) { 2013 if (chip->state != FL_ERASING) { 2014 /* Someone's suspended the erase. Sleep */ 2015 set_current_state(TASK_UNINTERRUPTIBLE); 2016 add_wait_queue(&chip->wq, &wait); 2017 mutex_unlock(&chip->mutex); 2018 schedule(); 2019 remove_wait_queue(&chip->wq, &wait); 2020 mutex_lock(&chip->mutex); 2021 continue; 2022 } 2023 if (chip->erase_suspended) { 2024 /* This erase was suspended and resumed. 2025 Adjust the timeout */ 2026 timeo = jiffies + (HZ*20); /* FIXME */ 2027 chip->erase_suspended = 0; 2028 } 2029 2030 if (chip_ready(map, adr)) { 2031 xip_enable(map, chip, adr); 2032 break; 2033 } 2034 2035 if (time_after(jiffies, timeo)) { 2036 xip_enable(map, chip, adr); 2037 printk(KERN_WARNING "MTD %s(): software timeout\n", 2038 __func__ ); 2039 break; 2040 } 2041 2042 /* Latency issues. Drop the lock, wait a while and retry */ 2043 UDELAY(map, chip, adr, 1000000/HZ); 2044 } 2045 /* Did we succeed? */ 2046 if (!chip_good(map, adr, map_word_ff(map))) { 2047 /* reset on all failures. */ 2048 map_write( map, CMD(0xF0), chip->start ); 2049 /* FIXME - should have reset delay before continuing */ 2050 2051 ret = -EIO; 2052 } 2053 2054 chip->state = FL_READY; 2055 DISABLE_VPP(map); 2056 put_chip(map, chip, adr); 2057 mutex_unlock(&chip->mutex); 2058 return ret; 2059 } 2060 2061 2062 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2063 { 2064 unsigned long ofs, len; 2065 int ret; 2066 2067 ofs = instr->addr; 2068 len = instr->len; 2069 2070 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 2071 if (ret) 2072 return ret; 2073 2074 instr->state = MTD_ERASE_DONE; 2075 mtd_erase_callback(instr); 2076 2077 return 0; 2078 } 2079 2080 2081 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2082 { 2083 struct map_info *map = mtd->priv; 2084 struct cfi_private *cfi = map->fldrv_priv; 2085 int ret = 0; 2086 2087 if (instr->addr != 0) 2088 return -EINVAL; 2089 2090 if (instr->len != mtd->size) 2091 return -EINVAL; 2092 2093 ret = do_erase_chip(map, &cfi->chips[0]); 2094 if (ret) 2095 return ret; 2096 2097 instr->state = MTD_ERASE_DONE; 2098 mtd_erase_callback(instr); 2099 2100 return 0; 2101 } 2102 2103 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2104 unsigned long adr, int len, void *thunk) 2105 { 2106 struct cfi_private *cfi = map->fldrv_priv; 2107 int ret; 2108 2109 mutex_lock(&chip->mutex); 2110 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2111 if (ret) 2112 goto out_unlock; 2113 chip->state = FL_LOCKING; 2114 2115 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2116 2117 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2118 cfi->device_type, NULL); 2119 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2120 cfi->device_type, NULL); 2121 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2122 cfi->device_type, NULL); 2123 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2124 cfi->device_type, NULL); 2125 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2126 cfi->device_type, NULL); 2127 map_write(map, CMD(0x40), chip->start + adr); 2128 2129 chip->state = FL_READY; 2130 put_chip(map, chip, adr + chip->start); 2131 ret = 0; 2132 2133 out_unlock: 2134 mutex_unlock(&chip->mutex); 2135 return ret; 2136 } 2137 2138 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2139 unsigned long adr, int len, void *thunk) 2140 { 2141 struct cfi_private *cfi = map->fldrv_priv; 2142 int ret; 2143 2144 mutex_lock(&chip->mutex); 2145 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2146 if (ret) 2147 goto out_unlock; 2148 chip->state = FL_UNLOCKING; 2149 2150 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2151 2152 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2153 cfi->device_type, NULL); 2154 map_write(map, CMD(0x70), adr); 2155 2156 chip->state = FL_READY; 2157 put_chip(map, chip, adr + chip->start); 2158 ret = 0; 2159 2160 out_unlock: 2161 mutex_unlock(&chip->mutex); 2162 return ret; 2163 } 2164 2165 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2166 { 2167 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2168 } 2169 2170 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2171 { 2172 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2173 } 2174 2175 2176 static void cfi_amdstd_sync (struct mtd_info *mtd) 2177 { 2178 struct map_info *map = mtd->priv; 2179 struct cfi_private *cfi = map->fldrv_priv; 2180 int i; 2181 struct flchip *chip; 2182 int ret = 0; 2183 DECLARE_WAITQUEUE(wait, current); 2184 2185 for (i=0; !ret && i<cfi->numchips; i++) { 2186 chip = &cfi->chips[i]; 2187 2188 retry: 2189 mutex_lock(&chip->mutex); 2190 2191 switch(chip->state) { 2192 case FL_READY: 2193 case FL_STATUS: 2194 case FL_CFI_QUERY: 2195 case FL_JEDEC_QUERY: 2196 chip->oldstate = chip->state; 2197 chip->state = FL_SYNCING; 2198 /* No need to wake_up() on this state change - 2199 * as the whole point is that nobody can do anything 2200 * with the chip now anyway. 2201 */ 2202 case FL_SYNCING: 2203 mutex_unlock(&chip->mutex); 2204 break; 2205 2206 default: 2207 /* Not an idle state */ 2208 set_current_state(TASK_UNINTERRUPTIBLE); 2209 add_wait_queue(&chip->wq, &wait); 2210 2211 mutex_unlock(&chip->mutex); 2212 2213 schedule(); 2214 2215 remove_wait_queue(&chip->wq, &wait); 2216 2217 goto retry; 2218 } 2219 } 2220 2221 /* Unlock the chips again */ 2222 2223 for (i--; i >=0; i--) { 2224 chip = &cfi->chips[i]; 2225 2226 mutex_lock(&chip->mutex); 2227 2228 if (chip->state == FL_SYNCING) { 2229 chip->state = chip->oldstate; 2230 wake_up(&chip->wq); 2231 } 2232 mutex_unlock(&chip->mutex); 2233 } 2234 } 2235 2236 2237 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2238 { 2239 struct map_info *map = mtd->priv; 2240 struct cfi_private *cfi = map->fldrv_priv; 2241 int i; 2242 struct flchip *chip; 2243 int ret = 0; 2244 2245 for (i=0; !ret && i<cfi->numchips; i++) { 2246 chip = &cfi->chips[i]; 2247 2248 mutex_lock(&chip->mutex); 2249 2250 switch(chip->state) { 2251 case FL_READY: 2252 case FL_STATUS: 2253 case FL_CFI_QUERY: 2254 case FL_JEDEC_QUERY: 2255 chip->oldstate = chip->state; 2256 chip->state = FL_PM_SUSPENDED; 2257 /* No need to wake_up() on this state change - 2258 * as the whole point is that nobody can do anything 2259 * with the chip now anyway. 2260 */ 2261 case FL_PM_SUSPENDED: 2262 break; 2263 2264 default: 2265 ret = -EAGAIN; 2266 break; 2267 } 2268 mutex_unlock(&chip->mutex); 2269 } 2270 2271 /* Unlock the chips again */ 2272 2273 if (ret) { 2274 for (i--; i >=0; i--) { 2275 chip = &cfi->chips[i]; 2276 2277 mutex_lock(&chip->mutex); 2278 2279 if (chip->state == FL_PM_SUSPENDED) { 2280 chip->state = chip->oldstate; 2281 wake_up(&chip->wq); 2282 } 2283 mutex_unlock(&chip->mutex); 2284 } 2285 } 2286 2287 return ret; 2288 } 2289 2290 2291 static void cfi_amdstd_resume(struct mtd_info *mtd) 2292 { 2293 struct map_info *map = mtd->priv; 2294 struct cfi_private *cfi = map->fldrv_priv; 2295 int i; 2296 struct flchip *chip; 2297 2298 for (i=0; i<cfi->numchips; i++) { 2299 2300 chip = &cfi->chips[i]; 2301 2302 mutex_lock(&chip->mutex); 2303 2304 if (chip->state == FL_PM_SUSPENDED) { 2305 chip->state = FL_READY; 2306 map_write(map, CMD(0xF0), chip->start); 2307 wake_up(&chip->wq); 2308 } 2309 else 2310 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2311 2312 mutex_unlock(&chip->mutex); 2313 } 2314 } 2315 2316 2317 /* 2318 * Ensure that the flash device is put back into read array mode before 2319 * unloading the driver or rebooting. On some systems, rebooting while 2320 * the flash is in query/program/erase mode will prevent the CPU from 2321 * fetching the bootloader code, requiring a hard reset or power cycle. 2322 */ 2323 static int cfi_amdstd_reset(struct mtd_info *mtd) 2324 { 2325 struct map_info *map = mtd->priv; 2326 struct cfi_private *cfi = map->fldrv_priv; 2327 int i, ret; 2328 struct flchip *chip; 2329 2330 for (i = 0; i < cfi->numchips; i++) { 2331 2332 chip = &cfi->chips[i]; 2333 2334 mutex_lock(&chip->mutex); 2335 2336 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2337 if (!ret) { 2338 map_write(map, CMD(0xF0), chip->start); 2339 chip->state = FL_SHUTDOWN; 2340 put_chip(map, chip, chip->start); 2341 } 2342 2343 mutex_unlock(&chip->mutex); 2344 } 2345 2346 return 0; 2347 } 2348 2349 2350 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2351 void *v) 2352 { 2353 struct mtd_info *mtd; 2354 2355 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2356 cfi_amdstd_reset(mtd); 2357 return NOTIFY_DONE; 2358 } 2359 2360 2361 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2362 { 2363 struct map_info *map = mtd->priv; 2364 struct cfi_private *cfi = map->fldrv_priv; 2365 2366 cfi_amdstd_reset(mtd); 2367 unregister_reboot_notifier(&mtd->reboot_notifier); 2368 kfree(cfi->cmdset_priv); 2369 kfree(cfi->cfiq); 2370 kfree(cfi); 2371 kfree(mtd->eraseregions); 2372 } 2373 2374 MODULE_LICENSE("GPL"); 2375 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2376 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2377 MODULE_ALIAS("cfi_cmdset_0006"); 2378 MODULE_ALIAS("cfi_cmdset_0701"); 2379