1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <asm/io.h> 28 #include <asm/byteorder.h> 29 30 #include <linux/errno.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/interrupt.h> 34 #include <linux/reboot.h> 35 #include <linux/of.h> 36 #include <linux/of_platform.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_WORD_RETRIES 3 46 47 #define SST49LF004B 0x0060 48 #define SST49LF040B 0x0050 49 #define SST49LF008A 0x005a 50 #define AT49BV6416 0x00d6 51 52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 57 static void cfi_amdstd_sync (struct mtd_info *); 58 static int cfi_amdstd_suspend (struct mtd_info *); 59 static void cfi_amdstd_resume (struct mtd_info *); 60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 61 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 62 size_t *, struct otp_info *); 63 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 64 size_t *, struct otp_info *); 65 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 66 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 67 size_t *, u_char *); 68 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 69 size_t *, u_char *); 70 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 71 size_t *, u_char *); 72 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 73 74 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 75 size_t *retlen, const u_char *buf); 76 77 static void cfi_amdstd_destroy(struct mtd_info *); 78 79 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 80 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 81 82 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 83 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 84 #include "fwh_lock.h" 85 86 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 87 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 88 89 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 90 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 91 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 92 93 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 94 .probe = NULL, /* Not usable directly */ 95 .destroy = cfi_amdstd_destroy, 96 .name = "cfi_cmdset_0002", 97 .module = THIS_MODULE 98 }; 99 100 101 /* #define DEBUG_CFI_FEATURES */ 102 103 104 #ifdef DEBUG_CFI_FEATURES 105 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 106 { 107 const char* erase_suspend[3] = { 108 "Not supported", "Read only", "Read/write" 109 }; 110 const char* top_bottom[6] = { 111 "No WP", "8x8KiB sectors at top & bottom, no WP", 112 "Bottom boot", "Top boot", 113 "Uniform, Bottom WP", "Uniform, Top WP" 114 }; 115 116 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 117 printk(" Address sensitive unlock: %s\n", 118 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 119 120 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 121 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 122 else 123 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 124 125 if (extp->BlkProt == 0) 126 printk(" Block protection: Not supported\n"); 127 else 128 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 129 130 131 printk(" Temporary block unprotect: %s\n", 132 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 133 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 134 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 135 printk(" Burst mode: %s\n", 136 extp->BurstMode ? "Supported" : "Not supported"); 137 if (extp->PageMode == 0) 138 printk(" Page mode: Not supported\n"); 139 else 140 printk(" Page mode: %d word page\n", extp->PageMode << 2); 141 142 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 143 extp->VppMin >> 4, extp->VppMin & 0xf); 144 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 145 extp->VppMax >> 4, extp->VppMax & 0xf); 146 147 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 148 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 149 else 150 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 151 } 152 #endif 153 154 #ifdef AMD_BOOTLOC_BUG 155 /* Wheee. Bring me the head of someone at AMD. */ 156 static void fixup_amd_bootblock(struct mtd_info *mtd) 157 { 158 struct map_info *map = mtd->priv; 159 struct cfi_private *cfi = map->fldrv_priv; 160 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 161 __u8 major = extp->MajorVersion; 162 __u8 minor = extp->MinorVersion; 163 164 if (((major << 8) | minor) < 0x3131) { 165 /* CFI version 1.0 => don't trust bootloc */ 166 167 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 168 map->name, cfi->mfr, cfi->id); 169 170 /* AFAICS all 29LV400 with a bottom boot block have a device ID 171 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 172 * These were badly detected as they have the 0x80 bit set 173 * so treat them as a special case. 174 */ 175 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 176 177 /* Macronix added CFI to their 2nd generation 178 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 179 * Fujitsu, Spansion, EON, ESI and older Macronix) 180 * has CFI. 181 * 182 * Therefore also check the manufacturer. 183 * This reduces the risk of false detection due to 184 * the 8-bit device ID. 185 */ 186 (cfi->mfr == CFI_MFR_MACRONIX)) { 187 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 188 " detected\n", map->name); 189 extp->TopBottom = 2; /* bottom boot */ 190 } else 191 if (cfi->id & 0x80) { 192 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 193 extp->TopBottom = 3; /* top boot */ 194 } else { 195 extp->TopBottom = 2; /* bottom boot */ 196 } 197 198 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 199 " deduced %s from Device ID\n", map->name, major, minor, 200 extp->TopBottom == 2 ? "bottom" : "top"); 201 } 202 } 203 #endif 204 205 static void fixup_use_write_buffers(struct mtd_info *mtd) 206 { 207 struct map_info *map = mtd->priv; 208 struct cfi_private *cfi = map->fldrv_priv; 209 if (cfi->cfiq->BufWriteTimeoutTyp) { 210 pr_debug("Using buffer write method\n" ); 211 mtd->_write = cfi_amdstd_write_buffers; 212 } 213 } 214 215 /* Atmel chips don't use the same PRI format as AMD chips */ 216 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 217 { 218 struct map_info *map = mtd->priv; 219 struct cfi_private *cfi = map->fldrv_priv; 220 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 221 struct cfi_pri_atmel atmel_pri; 222 223 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 224 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 225 226 if (atmel_pri.Features & 0x02) 227 extp->EraseSuspend = 2; 228 229 /* Some chips got it backwards... */ 230 if (cfi->id == AT49BV6416) { 231 if (atmel_pri.BottomBoot) 232 extp->TopBottom = 3; 233 else 234 extp->TopBottom = 2; 235 } else { 236 if (atmel_pri.BottomBoot) 237 extp->TopBottom = 2; 238 else 239 extp->TopBottom = 3; 240 } 241 242 /* burst write mode not supported */ 243 cfi->cfiq->BufWriteTimeoutTyp = 0; 244 cfi->cfiq->BufWriteTimeoutMax = 0; 245 } 246 247 static void fixup_use_secsi(struct mtd_info *mtd) 248 { 249 /* Setup for chips with a secsi area */ 250 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 251 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 252 } 253 254 static void fixup_use_erase_chip(struct mtd_info *mtd) 255 { 256 struct map_info *map = mtd->priv; 257 struct cfi_private *cfi = map->fldrv_priv; 258 if ((cfi->cfiq->NumEraseRegions == 1) && 259 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 260 mtd->_erase = cfi_amdstd_erase_chip; 261 } 262 263 } 264 265 /* 266 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 267 * locked by default. 268 */ 269 static void fixup_use_atmel_lock(struct mtd_info *mtd) 270 { 271 mtd->_lock = cfi_atmel_lock; 272 mtd->_unlock = cfi_atmel_unlock; 273 mtd->flags |= MTD_POWERUP_LOCK; 274 } 275 276 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 277 { 278 struct map_info *map = mtd->priv; 279 struct cfi_private *cfi = map->fldrv_priv; 280 281 /* 282 * These flashes report two separate eraseblock regions based on the 283 * sector_erase-size and block_erase-size, although they both operate on the 284 * same memory. This is not allowed according to CFI, so we just pick the 285 * sector_erase-size. 286 */ 287 cfi->cfiq->NumEraseRegions = 1; 288 } 289 290 static void fixup_sst39vf(struct mtd_info *mtd) 291 { 292 struct map_info *map = mtd->priv; 293 struct cfi_private *cfi = map->fldrv_priv; 294 295 fixup_old_sst_eraseregion(mtd); 296 297 cfi->addr_unlock1 = 0x5555; 298 cfi->addr_unlock2 = 0x2AAA; 299 } 300 301 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 302 { 303 struct map_info *map = mtd->priv; 304 struct cfi_private *cfi = map->fldrv_priv; 305 306 fixup_old_sst_eraseregion(mtd); 307 308 cfi->addr_unlock1 = 0x555; 309 cfi->addr_unlock2 = 0x2AA; 310 311 cfi->sector_erase_cmd = CMD(0x50); 312 } 313 314 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 315 { 316 struct map_info *map = mtd->priv; 317 struct cfi_private *cfi = map->fldrv_priv; 318 319 fixup_sst39vf_rev_b(mtd); 320 321 /* 322 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 323 * it should report a size of 8KBytes (0x0020*256). 324 */ 325 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 326 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); 327 } 328 329 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 330 { 331 struct map_info *map = mtd->priv; 332 struct cfi_private *cfi = map->fldrv_priv; 333 334 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 335 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 336 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); 337 } 338 } 339 340 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 341 { 342 struct map_info *map = mtd->priv; 343 struct cfi_private *cfi = map->fldrv_priv; 344 345 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 346 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 347 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); 348 } 349 } 350 351 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 352 { 353 struct map_info *map = mtd->priv; 354 struct cfi_private *cfi = map->fldrv_priv; 355 356 /* 357 * S29NS512P flash uses more than 8bits to report number of sectors, 358 * which is not permitted by CFI. 359 */ 360 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 361 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); 362 } 363 364 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 365 static struct cfi_fixup cfi_nopri_fixup_table[] = { 366 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 367 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 368 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 369 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 370 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 371 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 372 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 373 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 374 { 0, 0, NULL } 375 }; 376 377 static struct cfi_fixup cfi_fixup_table[] = { 378 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 379 #ifdef AMD_BOOTLOC_BUG 380 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 381 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 382 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 383 #endif 384 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 385 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 386 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 387 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 388 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 389 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 390 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 391 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 392 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 393 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 394 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 395 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 396 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 397 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 398 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 399 #if !FORCE_WORD_WRITE 400 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 401 #endif 402 { 0, 0, NULL } 403 }; 404 static struct cfi_fixup jedec_fixup_table[] = { 405 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 406 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 407 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 408 { 0, 0, NULL } 409 }; 410 411 static struct cfi_fixup fixup_table[] = { 412 /* The CFI vendor ids and the JEDEC vendor IDs appear 413 * to be common. It is like the devices id's are as 414 * well. This table is to pick all cases where 415 * we know that is the case. 416 */ 417 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 418 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 419 { 0, 0, NULL } 420 }; 421 422 423 static void cfi_fixup_major_minor(struct cfi_private *cfi, 424 struct cfi_pri_amdstd *extp) 425 { 426 if (cfi->mfr == CFI_MFR_SAMSUNG) { 427 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 428 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 429 /* 430 * Samsung K8P2815UQB and K8D6x16UxM chips 431 * report major=0 / minor=0. 432 * K8D3x16UxC chips report major=3 / minor=3. 433 */ 434 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 435 " Extended Query version to 1.%c\n", 436 extp->MinorVersion); 437 extp->MajorVersion = '1'; 438 } 439 } 440 441 /* 442 * SST 38VF640x chips report major=0xFF / minor=0xFF. 443 */ 444 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 445 extp->MajorVersion = '1'; 446 extp->MinorVersion = '0'; 447 } 448 } 449 450 static int is_m29ew(struct cfi_private *cfi) 451 { 452 if (cfi->mfr == CFI_MFR_INTEL && 453 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 454 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 455 return 1; 456 return 0; 457 } 458 459 /* 460 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 461 * Some revisions of the M29EW suffer from erase suspend hang ups. In 462 * particular, it can occur when the sequence 463 * Erase Confirm -> Suspend -> Program -> Resume 464 * causes a lockup due to internal timing issues. The consequence is that the 465 * erase cannot be resumed without inserting a dummy command after programming 466 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 467 * that writes an F0 command code before the RESUME command. 468 */ 469 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 470 unsigned long adr) 471 { 472 struct cfi_private *cfi = map->fldrv_priv; 473 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 474 if (is_m29ew(cfi)) 475 map_write(map, CMD(0xF0), adr); 476 } 477 478 /* 479 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 480 * 481 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 482 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 483 * command is issued after an ERASE RESUME operation without waiting for a 484 * minimum delay. The result is that once the ERASE seems to be completed 485 * (no bits are toggling), the contents of the Flash memory block on which 486 * the erase was ongoing could be inconsistent with the expected values 487 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 488 * values), causing a consequent failure of the ERASE operation. 489 * The occurrence of this issue could be high, especially when file system 490 * operations on the Flash are intensive. As a result, it is recommended 491 * that a patch be applied. Intensive file system operations can cause many 492 * calls to the garbage routine to free Flash space (also by erasing physical 493 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 494 * commands can occur. The problem disappears when a delay is inserted after 495 * the RESUME command by using the udelay() function available in Linux. 496 * The DELAY value must be tuned based on the customer's platform. 497 * The maximum value that fixes the problem in all cases is 500us. 498 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 499 * in most cases. 500 * We have chosen 500µs because this latency is acceptable. 501 */ 502 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 503 { 504 /* 505 * Resolving the Delay After Resume Issue see Micron TN-13-07 506 * Worst case delay must be 500µs but 30-50µs should be ok as well 507 */ 508 if (is_m29ew(cfi)) 509 cfi_udelay(500); 510 } 511 512 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 513 { 514 struct cfi_private *cfi = map->fldrv_priv; 515 struct device_node __maybe_unused *np = map->device_node; 516 struct mtd_info *mtd; 517 int i; 518 519 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 520 if (!mtd) 521 return NULL; 522 mtd->priv = map; 523 mtd->type = MTD_NORFLASH; 524 525 /* Fill in the default mtd operations */ 526 mtd->_erase = cfi_amdstd_erase_varsize; 527 mtd->_write = cfi_amdstd_write_words; 528 mtd->_read = cfi_amdstd_read; 529 mtd->_sync = cfi_amdstd_sync; 530 mtd->_suspend = cfi_amdstd_suspend; 531 mtd->_resume = cfi_amdstd_resume; 532 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 533 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 534 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 535 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 536 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 537 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 538 mtd->flags = MTD_CAP_NORFLASH; 539 mtd->name = map->name; 540 mtd->writesize = 1; 541 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 542 543 pr_debug("MTD %s(): write buffer size %d\n", __func__, 544 mtd->writebufsize); 545 546 mtd->_panic_write = cfi_amdstd_panic_write; 547 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 548 549 if (cfi->cfi_mode==CFI_MODE_CFI){ 550 unsigned char bootloc; 551 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 552 struct cfi_pri_amdstd *extp; 553 554 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 555 if (extp) { 556 /* 557 * It's a real CFI chip, not one for which the probe 558 * routine faked a CFI structure. 559 */ 560 cfi_fixup_major_minor(cfi, extp); 561 562 /* 563 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 564 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 565 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 566 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 567 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 568 */ 569 if (extp->MajorVersion != '1' || 570 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 571 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 572 "version %c.%c (%#02x/%#02x).\n", 573 extp->MajorVersion, extp->MinorVersion, 574 extp->MajorVersion, extp->MinorVersion); 575 kfree(extp); 576 kfree(mtd); 577 return NULL; 578 } 579 580 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 581 extp->MajorVersion, extp->MinorVersion); 582 583 /* Install our own private info structure */ 584 cfi->cmdset_priv = extp; 585 586 /* Apply cfi device specific fixups */ 587 cfi_fixup(mtd, cfi_fixup_table); 588 589 #ifdef DEBUG_CFI_FEATURES 590 /* Tell the user about it in lots of lovely detail */ 591 cfi_tell_features(extp); 592 #endif 593 594 #ifdef CONFIG_OF 595 if (np && of_property_read_bool( 596 np, "use-advanced-sector-protection") 597 && extp->BlkProtUnprot == 8) { 598 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 599 mtd->_lock = cfi_ppb_lock; 600 mtd->_unlock = cfi_ppb_unlock; 601 mtd->_is_locked = cfi_ppb_is_locked; 602 } 603 #endif 604 605 bootloc = extp->TopBottom; 606 if ((bootloc < 2) || (bootloc > 5)) { 607 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 608 "bank location (%d). Assuming bottom.\n", 609 map->name, bootloc); 610 bootloc = 2; 611 } 612 613 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 614 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 615 616 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 617 int j = (cfi->cfiq->NumEraseRegions-1)-i; 618 __u32 swap; 619 620 swap = cfi->cfiq->EraseRegionInfo[i]; 621 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; 622 cfi->cfiq->EraseRegionInfo[j] = swap; 623 } 624 } 625 /* Set the default CFI lock/unlock addresses */ 626 cfi->addr_unlock1 = 0x555; 627 cfi->addr_unlock2 = 0x2aa; 628 } 629 cfi_fixup(mtd, cfi_nopri_fixup_table); 630 631 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 632 kfree(mtd); 633 return NULL; 634 } 635 636 } /* CFI mode */ 637 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 638 /* Apply jedec specific fixups */ 639 cfi_fixup(mtd, jedec_fixup_table); 640 } 641 /* Apply generic fixups */ 642 cfi_fixup(mtd, fixup_table); 643 644 for (i=0; i< cfi->numchips; i++) { 645 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 646 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 647 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 648 /* 649 * First calculate the timeout max according to timeout field 650 * of struct cfi_ident that probed from chip's CFI aera, if 651 * available. Specify a minimum of 2000us, in case the CFI data 652 * is wrong. 653 */ 654 if (cfi->cfiq->BufWriteTimeoutTyp && 655 cfi->cfiq->BufWriteTimeoutMax) 656 cfi->chips[i].buffer_write_time_max = 657 1 << (cfi->cfiq->BufWriteTimeoutTyp + 658 cfi->cfiq->BufWriteTimeoutMax); 659 else 660 cfi->chips[i].buffer_write_time_max = 0; 661 662 cfi->chips[i].buffer_write_time_max = 663 max(cfi->chips[i].buffer_write_time_max, 2000); 664 665 cfi->chips[i].ref_point_counter = 0; 666 init_waitqueue_head(&(cfi->chips[i].wq)); 667 } 668 669 map->fldrv = &cfi_amdstd_chipdrv; 670 671 return cfi_amdstd_setup(mtd); 672 } 673 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 674 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 675 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 676 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 677 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 678 679 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 680 { 681 struct map_info *map = mtd->priv; 682 struct cfi_private *cfi = map->fldrv_priv; 683 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 684 unsigned long offset = 0; 685 int i,j; 686 687 printk(KERN_NOTICE "number of %s chips: %d\n", 688 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 689 /* Select the correct geometry setup */ 690 mtd->size = devsize * cfi->numchips; 691 692 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 693 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 694 * mtd->numeraseregions, GFP_KERNEL); 695 if (!mtd->eraseregions) 696 goto setup_err; 697 698 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 699 unsigned long ernum, ersize; 700 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 701 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 702 703 if (mtd->erasesize < ersize) { 704 mtd->erasesize = ersize; 705 } 706 for (j=0; j<cfi->numchips; j++) { 707 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 708 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 709 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 710 } 711 offset += (ersize * ernum); 712 } 713 if (offset != devsize) { 714 /* Argh */ 715 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 716 goto setup_err; 717 } 718 719 __module_get(THIS_MODULE); 720 register_reboot_notifier(&mtd->reboot_notifier); 721 return mtd; 722 723 setup_err: 724 kfree(mtd->eraseregions); 725 kfree(mtd); 726 kfree(cfi->cmdset_priv); 727 kfree(cfi->cfiq); 728 return NULL; 729 } 730 731 /* 732 * Return true if the chip is ready. 733 * 734 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 735 * non-suspended sector) and is indicated by no toggle bits toggling. 736 * 737 * Note that anything more complicated than checking if no bits are toggling 738 * (including checking DQ5 for an error status) is tricky to get working 739 * correctly and is therefore not done (particularly with interleaved chips 740 * as each chip must be checked independently of the others). 741 */ 742 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 743 { 744 map_word d, t; 745 746 d = map_read(map, addr); 747 t = map_read(map, addr); 748 749 return map_word_equal(map, d, t); 750 } 751 752 /* 753 * Return true if the chip is ready and has the correct value. 754 * 755 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 756 * non-suspended sector) and it is indicated by no bits toggling. 757 * 758 * Error are indicated by toggling bits or bits held with the wrong value, 759 * or with bits toggling. 760 * 761 * Note that anything more complicated than checking if no bits are toggling 762 * (including checking DQ5 for an error status) is tricky to get working 763 * correctly and is therefore not done (particularly with interleaved chips 764 * as each chip must be checked independently of the others). 765 * 766 */ 767 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 768 { 769 map_word oldd, curd; 770 771 oldd = map_read(map, addr); 772 curd = map_read(map, addr); 773 774 return map_word_equal(map, oldd, curd) && 775 map_word_equal(map, curd, expected); 776 } 777 778 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 779 { 780 DECLARE_WAITQUEUE(wait, current); 781 struct cfi_private *cfi = map->fldrv_priv; 782 unsigned long timeo; 783 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 784 785 resettime: 786 timeo = jiffies + HZ; 787 retry: 788 switch (chip->state) { 789 790 case FL_STATUS: 791 for (;;) { 792 if (chip_ready(map, adr)) 793 break; 794 795 if (time_after(jiffies, timeo)) { 796 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 797 return -EIO; 798 } 799 mutex_unlock(&chip->mutex); 800 cfi_udelay(1); 801 mutex_lock(&chip->mutex); 802 /* Someone else might have been playing with it. */ 803 goto retry; 804 } 805 806 case FL_READY: 807 case FL_CFI_QUERY: 808 case FL_JEDEC_QUERY: 809 return 0; 810 811 case FL_ERASING: 812 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 813 !(mode == FL_READY || mode == FL_POINT || 814 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 815 goto sleep; 816 817 /* We could check to see if we're trying to access the sector 818 * that is currently being erased. However, no user will try 819 * anything like that so we just wait for the timeout. */ 820 821 /* Erase suspend */ 822 /* It's harmless to issue the Erase-Suspend and Erase-Resume 823 * commands when the erase algorithm isn't in progress. */ 824 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 825 chip->oldstate = FL_ERASING; 826 chip->state = FL_ERASE_SUSPENDING; 827 chip->erase_suspended = 1; 828 for (;;) { 829 if (chip_ready(map, adr)) 830 break; 831 832 if (time_after(jiffies, timeo)) { 833 /* Should have suspended the erase by now. 834 * Send an Erase-Resume command as either 835 * there was an error (so leave the erase 836 * routine to recover from it) or we trying to 837 * use the erase-in-progress sector. */ 838 put_chip(map, chip, adr); 839 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 840 return -EIO; 841 } 842 843 mutex_unlock(&chip->mutex); 844 cfi_udelay(1); 845 mutex_lock(&chip->mutex); 846 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 847 So we can just loop here. */ 848 } 849 chip->state = FL_READY; 850 return 0; 851 852 case FL_XIP_WHILE_ERASING: 853 if (mode != FL_READY && mode != FL_POINT && 854 (!cfip || !(cfip->EraseSuspend&2))) 855 goto sleep; 856 chip->oldstate = chip->state; 857 chip->state = FL_READY; 858 return 0; 859 860 case FL_SHUTDOWN: 861 /* The machine is rebooting */ 862 return -EIO; 863 864 case FL_POINT: 865 /* Only if there's no operation suspended... */ 866 if (mode == FL_READY && chip->oldstate == FL_READY) 867 return 0; 868 869 default: 870 sleep: 871 set_current_state(TASK_UNINTERRUPTIBLE); 872 add_wait_queue(&chip->wq, &wait); 873 mutex_unlock(&chip->mutex); 874 schedule(); 875 remove_wait_queue(&chip->wq, &wait); 876 mutex_lock(&chip->mutex); 877 goto resettime; 878 } 879 } 880 881 882 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 883 { 884 struct cfi_private *cfi = map->fldrv_priv; 885 886 switch(chip->oldstate) { 887 case FL_ERASING: 888 cfi_fixup_m29ew_erase_suspend(map, 889 chip->in_progress_block_addr); 890 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 891 cfi_fixup_m29ew_delay_after_resume(cfi); 892 chip->oldstate = FL_READY; 893 chip->state = FL_ERASING; 894 break; 895 896 case FL_XIP_WHILE_ERASING: 897 chip->state = chip->oldstate; 898 chip->oldstate = FL_READY; 899 break; 900 901 case FL_READY: 902 case FL_STATUS: 903 break; 904 default: 905 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 906 } 907 wake_up(&chip->wq); 908 } 909 910 #ifdef CONFIG_MTD_XIP 911 912 /* 913 * No interrupt what so ever can be serviced while the flash isn't in array 914 * mode. This is ensured by the xip_disable() and xip_enable() functions 915 * enclosing any code path where the flash is known not to be in array mode. 916 * And within a XIP disabled code path, only functions marked with __xipram 917 * may be called and nothing else (it's a good thing to inspect generated 918 * assembly to make sure inline functions were actually inlined and that gcc 919 * didn't emit calls to its own support functions). Also configuring MTD CFI 920 * support to a single buswidth and a single interleave is also recommended. 921 */ 922 923 static void xip_disable(struct map_info *map, struct flchip *chip, 924 unsigned long adr) 925 { 926 /* TODO: chips with no XIP use should ignore and return */ 927 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 928 local_irq_disable(); 929 } 930 931 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 932 unsigned long adr) 933 { 934 struct cfi_private *cfi = map->fldrv_priv; 935 936 if (chip->state != FL_POINT && chip->state != FL_READY) { 937 map_write(map, CMD(0xf0), adr); 938 chip->state = FL_READY; 939 } 940 (void) map_read(map, adr); 941 xip_iprefetch(); 942 local_irq_enable(); 943 } 944 945 /* 946 * When a delay is required for the flash operation to complete, the 947 * xip_udelay() function is polling for both the given timeout and pending 948 * (but still masked) hardware interrupts. Whenever there is an interrupt 949 * pending then the flash erase operation is suspended, array mode restored 950 * and interrupts unmasked. Task scheduling might also happen at that 951 * point. The CPU eventually returns from the interrupt or the call to 952 * schedule() and the suspended flash operation is resumed for the remaining 953 * of the delay period. 954 * 955 * Warning: this function _will_ fool interrupt latency tracing tools. 956 */ 957 958 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 959 unsigned long adr, int usec) 960 { 961 struct cfi_private *cfi = map->fldrv_priv; 962 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 963 map_word status, OK = CMD(0x80); 964 unsigned long suspended, start = xip_currtime(); 965 flstate_t oldstate; 966 967 do { 968 cpu_relax(); 969 if (xip_irqpending() && extp && 970 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 971 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 972 /* 973 * Let's suspend the erase operation when supported. 974 * Note that we currently don't try to suspend 975 * interleaved chips if there is already another 976 * operation suspended (imagine what happens 977 * when one chip was already done with the current 978 * operation while another chip suspended it, then 979 * we resume the whole thing at once). Yes, it 980 * can happen! 981 */ 982 map_write(map, CMD(0xb0), adr); 983 usec -= xip_elapsed_since(start); 984 suspended = xip_currtime(); 985 do { 986 if (xip_elapsed_since(suspended) > 100000) { 987 /* 988 * The chip doesn't want to suspend 989 * after waiting for 100 msecs. 990 * This is a critical error but there 991 * is not much we can do here. 992 */ 993 return; 994 } 995 status = map_read(map, adr); 996 } while (!map_word_andequal(map, status, OK, OK)); 997 998 /* Suspend succeeded */ 999 oldstate = chip->state; 1000 if (!map_word_bitsset(map, status, CMD(0x40))) 1001 break; 1002 chip->state = FL_XIP_WHILE_ERASING; 1003 chip->erase_suspended = 1; 1004 map_write(map, CMD(0xf0), adr); 1005 (void) map_read(map, adr); 1006 xip_iprefetch(); 1007 local_irq_enable(); 1008 mutex_unlock(&chip->mutex); 1009 xip_iprefetch(); 1010 cond_resched(); 1011 1012 /* 1013 * We're back. However someone else might have 1014 * decided to go write to the chip if we are in 1015 * a suspended erase state. If so let's wait 1016 * until it's done. 1017 */ 1018 mutex_lock(&chip->mutex); 1019 while (chip->state != FL_XIP_WHILE_ERASING) { 1020 DECLARE_WAITQUEUE(wait, current); 1021 set_current_state(TASK_UNINTERRUPTIBLE); 1022 add_wait_queue(&chip->wq, &wait); 1023 mutex_unlock(&chip->mutex); 1024 schedule(); 1025 remove_wait_queue(&chip->wq, &wait); 1026 mutex_lock(&chip->mutex); 1027 } 1028 /* Disallow XIP again */ 1029 local_irq_disable(); 1030 1031 /* Correct Erase Suspend Hangups for M29EW */ 1032 cfi_fixup_m29ew_erase_suspend(map, adr); 1033 /* Resume the write or erase operation */ 1034 map_write(map, cfi->sector_erase_cmd, adr); 1035 chip->state = oldstate; 1036 start = xip_currtime(); 1037 } else if (usec >= 1000000/HZ) { 1038 /* 1039 * Try to save on CPU power when waiting delay 1040 * is at least a system timer tick period. 1041 * No need to be extremely accurate here. 1042 */ 1043 xip_cpu_idle(); 1044 } 1045 status = map_read(map, adr); 1046 } while (!map_word_andequal(map, status, OK, OK) 1047 && xip_elapsed_since(start) < usec); 1048 } 1049 1050 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1051 1052 /* 1053 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1054 * the flash is actively programming or erasing since we have to poll for 1055 * the operation to complete anyway. We can't do that in a generic way with 1056 * a XIP setup so do it before the actual flash operation in this case 1057 * and stub it out from INVALIDATE_CACHE_UDELAY. 1058 */ 1059 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1060 INVALIDATE_CACHED_RANGE(map, from, size) 1061 1062 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1063 UDELAY(map, chip, adr, usec) 1064 1065 /* 1066 * Extra notes: 1067 * 1068 * Activating this XIP support changes the way the code works a bit. For 1069 * example the code to suspend the current process when concurrent access 1070 * happens is never executed because xip_udelay() will always return with the 1071 * same chip state as it was entered with. This is why there is no care for 1072 * the presence of add_wait_queue() or schedule() calls from within a couple 1073 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1074 * The queueing and scheduling are always happening within xip_udelay(). 1075 * 1076 * Similarly, get_chip() and put_chip() just happen to always be executed 1077 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1078 * is in array mode, therefore never executing many cases therein and not 1079 * causing any problem with XIP. 1080 */ 1081 1082 #else 1083 1084 #define xip_disable(map, chip, adr) 1085 #define xip_enable(map, chip, adr) 1086 #define XIP_INVAL_CACHED_RANGE(x...) 1087 1088 #define UDELAY(map, chip, adr, usec) \ 1089 do { \ 1090 mutex_unlock(&chip->mutex); \ 1091 cfi_udelay(usec); \ 1092 mutex_lock(&chip->mutex); \ 1093 } while (0) 1094 1095 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1096 do { \ 1097 mutex_unlock(&chip->mutex); \ 1098 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1099 cfi_udelay(usec); \ 1100 mutex_lock(&chip->mutex); \ 1101 } while (0) 1102 1103 #endif 1104 1105 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1106 { 1107 unsigned long cmd_addr; 1108 struct cfi_private *cfi = map->fldrv_priv; 1109 int ret; 1110 1111 adr += chip->start; 1112 1113 /* Ensure cmd read/writes are aligned. */ 1114 cmd_addr = adr & ~(map_bankwidth(map)-1); 1115 1116 mutex_lock(&chip->mutex); 1117 ret = get_chip(map, chip, cmd_addr, FL_READY); 1118 if (ret) { 1119 mutex_unlock(&chip->mutex); 1120 return ret; 1121 } 1122 1123 if (chip->state != FL_POINT && chip->state != FL_READY) { 1124 map_write(map, CMD(0xf0), cmd_addr); 1125 chip->state = FL_READY; 1126 } 1127 1128 map_copy_from(map, buf, adr, len); 1129 1130 put_chip(map, chip, cmd_addr); 1131 1132 mutex_unlock(&chip->mutex); 1133 return 0; 1134 } 1135 1136 1137 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1138 { 1139 struct map_info *map = mtd->priv; 1140 struct cfi_private *cfi = map->fldrv_priv; 1141 unsigned long ofs; 1142 int chipnum; 1143 int ret = 0; 1144 1145 /* ofs: offset within the first chip that the first read should start */ 1146 chipnum = (from >> cfi->chipshift); 1147 ofs = from - (chipnum << cfi->chipshift); 1148 1149 while (len) { 1150 unsigned long thislen; 1151 1152 if (chipnum >= cfi->numchips) 1153 break; 1154 1155 if ((len + ofs -1) >> cfi->chipshift) 1156 thislen = (1<<cfi->chipshift) - ofs; 1157 else 1158 thislen = len; 1159 1160 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1161 if (ret) 1162 break; 1163 1164 *retlen += thislen; 1165 len -= thislen; 1166 buf += thislen; 1167 1168 ofs = 0; 1169 chipnum++; 1170 } 1171 return ret; 1172 } 1173 1174 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1175 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1176 1177 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1178 loff_t adr, size_t len) 1179 { 1180 struct cfi_private *cfi = map->fldrv_priv; 1181 1182 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1183 cfi->device_type, NULL); 1184 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1185 cfi->device_type, NULL); 1186 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1187 cfi->device_type, NULL); 1188 1189 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1190 } 1191 1192 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1193 loff_t adr, size_t len) 1194 { 1195 struct cfi_private *cfi = map->fldrv_priv; 1196 1197 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1198 cfi->device_type, NULL); 1199 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1200 cfi->device_type, NULL); 1201 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1202 cfi->device_type, NULL); 1203 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1204 cfi->device_type, NULL); 1205 1206 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1207 } 1208 1209 static inline int do_read_secsi_onechip(struct map_info *map, 1210 struct flchip *chip, loff_t adr, 1211 size_t len, u_char *buf, 1212 size_t grouplen) 1213 { 1214 DECLARE_WAITQUEUE(wait, current); 1215 unsigned long timeo = jiffies + HZ; 1216 1217 retry: 1218 mutex_lock(&chip->mutex); 1219 1220 if (chip->state != FL_READY){ 1221 set_current_state(TASK_UNINTERRUPTIBLE); 1222 add_wait_queue(&chip->wq, &wait); 1223 1224 mutex_unlock(&chip->mutex); 1225 1226 schedule(); 1227 remove_wait_queue(&chip->wq, &wait); 1228 timeo = jiffies + HZ; 1229 1230 goto retry; 1231 } 1232 1233 adr += chip->start; 1234 1235 chip->state = FL_READY; 1236 1237 otp_enter(map, chip, adr, len); 1238 map_copy_from(map, buf, adr, len); 1239 otp_exit(map, chip, adr, len); 1240 1241 wake_up(&chip->wq); 1242 mutex_unlock(&chip->mutex); 1243 1244 return 0; 1245 } 1246 1247 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1248 { 1249 struct map_info *map = mtd->priv; 1250 struct cfi_private *cfi = map->fldrv_priv; 1251 unsigned long ofs; 1252 int chipnum; 1253 int ret = 0; 1254 1255 /* ofs: offset within the first chip that the first read should start */ 1256 /* 8 secsi bytes per chip */ 1257 chipnum=from>>3; 1258 ofs=from & 7; 1259 1260 while (len) { 1261 unsigned long thislen; 1262 1263 if (chipnum >= cfi->numchips) 1264 break; 1265 1266 if ((len + ofs -1) >> 3) 1267 thislen = (1<<3) - ofs; 1268 else 1269 thislen = len; 1270 1271 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1272 thislen, buf, 0); 1273 if (ret) 1274 break; 1275 1276 *retlen += thislen; 1277 len -= thislen; 1278 buf += thislen; 1279 1280 ofs = 0; 1281 chipnum++; 1282 } 1283 return ret; 1284 } 1285 1286 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1287 unsigned long adr, map_word datum, 1288 int mode); 1289 1290 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1291 size_t len, u_char *buf, size_t grouplen) 1292 { 1293 int ret; 1294 while (len) { 1295 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1296 int gap = adr - bus_ofs; 1297 int n = min_t(int, len, map_bankwidth(map) - gap); 1298 map_word datum; 1299 1300 if (n != map_bankwidth(map)) { 1301 /* partial write of a word, load old contents */ 1302 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1303 datum = map_read(map, bus_ofs); 1304 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1305 } 1306 1307 datum = map_word_load_partial(map, datum, buf, gap, n); 1308 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1309 if (ret) 1310 return ret; 1311 1312 adr += n; 1313 buf += n; 1314 len -= n; 1315 } 1316 1317 return 0; 1318 } 1319 1320 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1321 size_t len, u_char *buf, size_t grouplen) 1322 { 1323 struct cfi_private *cfi = map->fldrv_priv; 1324 uint8_t lockreg; 1325 unsigned long timeo; 1326 int ret; 1327 1328 /* make sure area matches group boundaries */ 1329 if ((adr != 0) || (len != grouplen)) 1330 return -EINVAL; 1331 1332 mutex_lock(&chip->mutex); 1333 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1334 if (ret) { 1335 mutex_unlock(&chip->mutex); 1336 return ret; 1337 } 1338 chip->state = FL_LOCKING; 1339 1340 /* Enter lock register command */ 1341 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1342 cfi->device_type, NULL); 1343 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1344 cfi->device_type, NULL); 1345 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1346 cfi->device_type, NULL); 1347 1348 /* read lock register */ 1349 lockreg = cfi_read_query(map, 0); 1350 1351 /* set bit 0 to protect extended memory block */ 1352 lockreg &= ~0x01; 1353 1354 /* set bit 0 to protect extended memory block */ 1355 /* write lock register */ 1356 map_write(map, CMD(0xA0), chip->start); 1357 map_write(map, CMD(lockreg), chip->start); 1358 1359 /* wait for chip to become ready */ 1360 timeo = jiffies + msecs_to_jiffies(2); 1361 for (;;) { 1362 if (chip_ready(map, adr)) 1363 break; 1364 1365 if (time_after(jiffies, timeo)) { 1366 pr_err("Waiting for chip to be ready timed out.\n"); 1367 ret = -EIO; 1368 break; 1369 } 1370 UDELAY(map, chip, 0, 1); 1371 } 1372 1373 /* exit protection commands */ 1374 map_write(map, CMD(0x90), chip->start); 1375 map_write(map, CMD(0x00), chip->start); 1376 1377 chip->state = FL_READY; 1378 put_chip(map, chip, chip->start); 1379 mutex_unlock(&chip->mutex); 1380 1381 return ret; 1382 } 1383 1384 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1385 size_t *retlen, u_char *buf, 1386 otp_op_t action, int user_regs) 1387 { 1388 struct map_info *map = mtd->priv; 1389 struct cfi_private *cfi = map->fldrv_priv; 1390 int ofs_factor = cfi->interleave * cfi->device_type; 1391 unsigned long base; 1392 int chipnum; 1393 struct flchip *chip; 1394 uint8_t otp, lockreg; 1395 int ret; 1396 1397 size_t user_size, factory_size, otpsize; 1398 loff_t user_offset, factory_offset, otpoffset; 1399 int user_locked = 0, otplocked; 1400 1401 *retlen = 0; 1402 1403 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1404 chip = &cfi->chips[chipnum]; 1405 factory_size = 0; 1406 user_size = 0; 1407 1408 /* Micron M29EW family */ 1409 if (is_m29ew(cfi)) { 1410 base = chip->start; 1411 1412 /* check whether secsi area is factory locked 1413 or user lockable */ 1414 mutex_lock(&chip->mutex); 1415 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1416 if (ret) { 1417 mutex_unlock(&chip->mutex); 1418 return ret; 1419 } 1420 cfi_qry_mode_on(base, map, cfi); 1421 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1422 cfi_qry_mode_off(base, map, cfi); 1423 put_chip(map, chip, base); 1424 mutex_unlock(&chip->mutex); 1425 1426 if (otp & 0x80) { 1427 /* factory locked */ 1428 factory_offset = 0; 1429 factory_size = 0x100; 1430 } else { 1431 /* customer lockable */ 1432 user_offset = 0; 1433 user_size = 0x100; 1434 1435 mutex_lock(&chip->mutex); 1436 ret = get_chip(map, chip, base, FL_LOCKING); 1437 if (ret) { 1438 mutex_unlock(&chip->mutex); 1439 return ret; 1440 } 1441 1442 /* Enter lock register command */ 1443 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1444 chip->start, map, cfi, 1445 cfi->device_type, NULL); 1446 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1447 chip->start, map, cfi, 1448 cfi->device_type, NULL); 1449 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1450 chip->start, map, cfi, 1451 cfi->device_type, NULL); 1452 /* read lock register */ 1453 lockreg = cfi_read_query(map, 0); 1454 /* exit protection commands */ 1455 map_write(map, CMD(0x90), chip->start); 1456 map_write(map, CMD(0x00), chip->start); 1457 put_chip(map, chip, chip->start); 1458 mutex_unlock(&chip->mutex); 1459 1460 user_locked = ((lockreg & 0x01) == 0x00); 1461 } 1462 } 1463 1464 otpsize = user_regs ? user_size : factory_size; 1465 if (!otpsize) 1466 continue; 1467 otpoffset = user_regs ? user_offset : factory_offset; 1468 otplocked = user_regs ? user_locked : 1; 1469 1470 if (!action) { 1471 /* return otpinfo */ 1472 struct otp_info *otpinfo; 1473 len -= sizeof(*otpinfo); 1474 if (len <= 0) 1475 return -ENOSPC; 1476 otpinfo = (struct otp_info *)buf; 1477 otpinfo->start = from; 1478 otpinfo->length = otpsize; 1479 otpinfo->locked = otplocked; 1480 buf += sizeof(*otpinfo); 1481 *retlen += sizeof(*otpinfo); 1482 from += otpsize; 1483 } else if ((from < otpsize) && (len > 0)) { 1484 size_t size; 1485 size = (len < otpsize - from) ? len : otpsize - from; 1486 ret = action(map, chip, otpoffset + from, size, buf, 1487 otpsize); 1488 if (ret < 0) 1489 return ret; 1490 1491 buf += size; 1492 len -= size; 1493 *retlen += size; 1494 from = 0; 1495 } else { 1496 from -= otpsize; 1497 } 1498 } 1499 return 0; 1500 } 1501 1502 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1503 size_t *retlen, struct otp_info *buf) 1504 { 1505 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1506 NULL, 0); 1507 } 1508 1509 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1510 size_t *retlen, struct otp_info *buf) 1511 { 1512 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1513 NULL, 1); 1514 } 1515 1516 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1517 size_t len, size_t *retlen, 1518 u_char *buf) 1519 { 1520 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1521 buf, do_read_secsi_onechip, 0); 1522 } 1523 1524 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1525 size_t len, size_t *retlen, 1526 u_char *buf) 1527 { 1528 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1529 buf, do_read_secsi_onechip, 1); 1530 } 1531 1532 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1533 size_t len, size_t *retlen, 1534 u_char *buf) 1535 { 1536 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, 1537 do_otp_write, 1); 1538 } 1539 1540 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1541 size_t len) 1542 { 1543 size_t retlen; 1544 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1545 do_otp_lock, 1); 1546 } 1547 1548 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1549 unsigned long adr, map_word datum, 1550 int mode) 1551 { 1552 struct cfi_private *cfi = map->fldrv_priv; 1553 unsigned long timeo = jiffies + HZ; 1554 /* 1555 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1556 * have a max write time of a few hundreds usec). However, we should 1557 * use the maximum timeout value given by the chip at probe time 1558 * instead. Unfortunately, struct flchip does have a field for 1559 * maximum timeout, only for typical which can be far too short 1560 * depending of the conditions. The ' + 1' is to avoid having a 1561 * timeout of 0 jiffies if HZ is smaller than 1000. 1562 */ 1563 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1564 int ret = 0; 1565 map_word oldd; 1566 int retry_cnt = 0; 1567 1568 adr += chip->start; 1569 1570 mutex_lock(&chip->mutex); 1571 ret = get_chip(map, chip, adr, mode); 1572 if (ret) { 1573 mutex_unlock(&chip->mutex); 1574 return ret; 1575 } 1576 1577 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1578 __func__, adr, datum.x[0] ); 1579 1580 if (mode == FL_OTP_WRITE) 1581 otp_enter(map, chip, adr, map_bankwidth(map)); 1582 1583 /* 1584 * Check for a NOP for the case when the datum to write is already 1585 * present - it saves time and works around buggy chips that corrupt 1586 * data at other locations when 0xff is written to a location that 1587 * already contains 0xff. 1588 */ 1589 oldd = map_read(map, adr); 1590 if (map_word_equal(map, oldd, datum)) { 1591 pr_debug("MTD %s(): NOP\n", 1592 __func__); 1593 goto op_done; 1594 } 1595 1596 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1597 ENABLE_VPP(map); 1598 xip_disable(map, chip, adr); 1599 1600 retry: 1601 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1602 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1603 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1604 map_write(map, datum, adr); 1605 chip->state = mode; 1606 1607 INVALIDATE_CACHE_UDELAY(map, chip, 1608 adr, map_bankwidth(map), 1609 chip->word_write_time); 1610 1611 /* See comment above for timeout value. */ 1612 timeo = jiffies + uWriteTimeout; 1613 for (;;) { 1614 if (chip->state != mode) { 1615 /* Someone's suspended the write. Sleep */ 1616 DECLARE_WAITQUEUE(wait, current); 1617 1618 set_current_state(TASK_UNINTERRUPTIBLE); 1619 add_wait_queue(&chip->wq, &wait); 1620 mutex_unlock(&chip->mutex); 1621 schedule(); 1622 remove_wait_queue(&chip->wq, &wait); 1623 timeo = jiffies + (HZ / 2); /* FIXME */ 1624 mutex_lock(&chip->mutex); 1625 continue; 1626 } 1627 1628 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1629 xip_enable(map, chip, adr); 1630 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1631 xip_disable(map, chip, adr); 1632 break; 1633 } 1634 1635 if (chip_ready(map, adr)) 1636 break; 1637 1638 /* Latency issues. Drop the lock, wait a while and retry */ 1639 UDELAY(map, chip, adr, 1); 1640 } 1641 /* Did we succeed? */ 1642 if (!chip_good(map, adr, datum)) { 1643 /* reset on all failures. */ 1644 map_write( map, CMD(0xF0), chip->start ); 1645 /* FIXME - should have reset delay before continuing */ 1646 1647 if (++retry_cnt <= MAX_WORD_RETRIES) 1648 goto retry; 1649 1650 ret = -EIO; 1651 } 1652 xip_enable(map, chip, adr); 1653 op_done: 1654 if (mode == FL_OTP_WRITE) 1655 otp_exit(map, chip, adr, map_bankwidth(map)); 1656 chip->state = FL_READY; 1657 DISABLE_VPP(map); 1658 put_chip(map, chip, adr); 1659 mutex_unlock(&chip->mutex); 1660 1661 return ret; 1662 } 1663 1664 1665 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1666 size_t *retlen, const u_char *buf) 1667 { 1668 struct map_info *map = mtd->priv; 1669 struct cfi_private *cfi = map->fldrv_priv; 1670 int ret = 0; 1671 int chipnum; 1672 unsigned long ofs, chipstart; 1673 DECLARE_WAITQUEUE(wait, current); 1674 1675 chipnum = to >> cfi->chipshift; 1676 ofs = to - (chipnum << cfi->chipshift); 1677 chipstart = cfi->chips[chipnum].start; 1678 1679 /* If it's not bus-aligned, do the first byte write */ 1680 if (ofs & (map_bankwidth(map)-1)) { 1681 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1682 int i = ofs - bus_ofs; 1683 int n = 0; 1684 map_word tmp_buf; 1685 1686 retry: 1687 mutex_lock(&cfi->chips[chipnum].mutex); 1688 1689 if (cfi->chips[chipnum].state != FL_READY) { 1690 set_current_state(TASK_UNINTERRUPTIBLE); 1691 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1692 1693 mutex_unlock(&cfi->chips[chipnum].mutex); 1694 1695 schedule(); 1696 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1697 goto retry; 1698 } 1699 1700 /* Load 'tmp_buf' with old contents of flash */ 1701 tmp_buf = map_read(map, bus_ofs+chipstart); 1702 1703 mutex_unlock(&cfi->chips[chipnum].mutex); 1704 1705 /* Number of bytes to copy from buffer */ 1706 n = min_t(int, len, map_bankwidth(map)-i); 1707 1708 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1709 1710 ret = do_write_oneword(map, &cfi->chips[chipnum], 1711 bus_ofs, tmp_buf, FL_WRITING); 1712 if (ret) 1713 return ret; 1714 1715 ofs += n; 1716 buf += n; 1717 (*retlen) += n; 1718 len -= n; 1719 1720 if (ofs >> cfi->chipshift) { 1721 chipnum ++; 1722 ofs = 0; 1723 if (chipnum == cfi->numchips) 1724 return 0; 1725 } 1726 } 1727 1728 /* We are now aligned, write as much as possible */ 1729 while(len >= map_bankwidth(map)) { 1730 map_word datum; 1731 1732 datum = map_word_load(map, buf); 1733 1734 ret = do_write_oneword(map, &cfi->chips[chipnum], 1735 ofs, datum, FL_WRITING); 1736 if (ret) 1737 return ret; 1738 1739 ofs += map_bankwidth(map); 1740 buf += map_bankwidth(map); 1741 (*retlen) += map_bankwidth(map); 1742 len -= map_bankwidth(map); 1743 1744 if (ofs >> cfi->chipshift) { 1745 chipnum ++; 1746 ofs = 0; 1747 if (chipnum == cfi->numchips) 1748 return 0; 1749 chipstart = cfi->chips[chipnum].start; 1750 } 1751 } 1752 1753 /* Write the trailing bytes if any */ 1754 if (len & (map_bankwidth(map)-1)) { 1755 map_word tmp_buf; 1756 1757 retry1: 1758 mutex_lock(&cfi->chips[chipnum].mutex); 1759 1760 if (cfi->chips[chipnum].state != FL_READY) { 1761 set_current_state(TASK_UNINTERRUPTIBLE); 1762 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1763 1764 mutex_unlock(&cfi->chips[chipnum].mutex); 1765 1766 schedule(); 1767 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1768 goto retry1; 1769 } 1770 1771 tmp_buf = map_read(map, ofs + chipstart); 1772 1773 mutex_unlock(&cfi->chips[chipnum].mutex); 1774 1775 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1776 1777 ret = do_write_oneword(map, &cfi->chips[chipnum], 1778 ofs, tmp_buf, FL_WRITING); 1779 if (ret) 1780 return ret; 1781 1782 (*retlen) += len; 1783 } 1784 1785 return 0; 1786 } 1787 1788 1789 /* 1790 * FIXME: interleaved mode not tested, and probably not supported! 1791 */ 1792 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1793 unsigned long adr, const u_char *buf, 1794 int len) 1795 { 1796 struct cfi_private *cfi = map->fldrv_priv; 1797 unsigned long timeo = jiffies + HZ; 1798 /* 1799 * Timeout is calculated according to CFI data, if available. 1800 * See more comments in cfi_cmdset_0002(). 1801 */ 1802 unsigned long uWriteTimeout = 1803 usecs_to_jiffies(chip->buffer_write_time_max); 1804 int ret = -EIO; 1805 unsigned long cmd_adr; 1806 int z, words; 1807 map_word datum; 1808 1809 adr += chip->start; 1810 cmd_adr = adr; 1811 1812 mutex_lock(&chip->mutex); 1813 ret = get_chip(map, chip, adr, FL_WRITING); 1814 if (ret) { 1815 mutex_unlock(&chip->mutex); 1816 return ret; 1817 } 1818 1819 datum = map_word_load(map, buf); 1820 1821 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1822 __func__, adr, datum.x[0] ); 1823 1824 XIP_INVAL_CACHED_RANGE(map, adr, len); 1825 ENABLE_VPP(map); 1826 xip_disable(map, chip, cmd_adr); 1827 1828 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1829 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1830 1831 /* Write Buffer Load */ 1832 map_write(map, CMD(0x25), cmd_adr); 1833 1834 chip->state = FL_WRITING_TO_BUFFER; 1835 1836 /* Write length of data to come */ 1837 words = len / map_bankwidth(map); 1838 map_write(map, CMD(words - 1), cmd_adr); 1839 /* Write data */ 1840 z = 0; 1841 while(z < words * map_bankwidth(map)) { 1842 datum = map_word_load(map, buf); 1843 map_write(map, datum, adr + z); 1844 1845 z += map_bankwidth(map); 1846 buf += map_bankwidth(map); 1847 } 1848 z -= map_bankwidth(map); 1849 1850 adr += z; 1851 1852 /* Write Buffer Program Confirm: GO GO GO */ 1853 map_write(map, CMD(0x29), cmd_adr); 1854 chip->state = FL_WRITING; 1855 1856 INVALIDATE_CACHE_UDELAY(map, chip, 1857 adr, map_bankwidth(map), 1858 chip->word_write_time); 1859 1860 timeo = jiffies + uWriteTimeout; 1861 1862 for (;;) { 1863 if (chip->state != FL_WRITING) { 1864 /* Someone's suspended the write. Sleep */ 1865 DECLARE_WAITQUEUE(wait, current); 1866 1867 set_current_state(TASK_UNINTERRUPTIBLE); 1868 add_wait_queue(&chip->wq, &wait); 1869 mutex_unlock(&chip->mutex); 1870 schedule(); 1871 remove_wait_queue(&chip->wq, &wait); 1872 timeo = jiffies + (HZ / 2); /* FIXME */ 1873 mutex_lock(&chip->mutex); 1874 continue; 1875 } 1876 1877 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1878 break; 1879 1880 if (chip_ready(map, adr)) { 1881 xip_enable(map, chip, adr); 1882 goto op_done; 1883 } 1884 1885 /* Latency issues. Drop the lock, wait a while and retry */ 1886 UDELAY(map, chip, adr, 1); 1887 } 1888 1889 /* 1890 * Recovery from write-buffer programming failures requires 1891 * the write-to-buffer-reset sequence. Since the last part 1892 * of the sequence also works as a normal reset, we can run 1893 * the same commands regardless of why we are here. 1894 * See e.g. 1895 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1896 */ 1897 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1898 cfi->device_type, NULL); 1899 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1900 cfi->device_type, NULL); 1901 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 1902 cfi->device_type, NULL); 1903 xip_enable(map, chip, adr); 1904 /* FIXME - should have reset delay before continuing */ 1905 1906 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n", 1907 __func__, adr); 1908 1909 ret = -EIO; 1910 op_done: 1911 chip->state = FL_READY; 1912 DISABLE_VPP(map); 1913 put_chip(map, chip, adr); 1914 mutex_unlock(&chip->mutex); 1915 1916 return ret; 1917 } 1918 1919 1920 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1921 size_t *retlen, const u_char *buf) 1922 { 1923 struct map_info *map = mtd->priv; 1924 struct cfi_private *cfi = map->fldrv_priv; 1925 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1926 int ret = 0; 1927 int chipnum; 1928 unsigned long ofs; 1929 1930 chipnum = to >> cfi->chipshift; 1931 ofs = to - (chipnum << cfi->chipshift); 1932 1933 /* If it's not bus-aligned, do the first word write */ 1934 if (ofs & (map_bankwidth(map)-1)) { 1935 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1936 if (local_len > len) 1937 local_len = len; 1938 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1939 local_len, retlen, buf); 1940 if (ret) 1941 return ret; 1942 ofs += local_len; 1943 buf += local_len; 1944 len -= local_len; 1945 1946 if (ofs >> cfi->chipshift) { 1947 chipnum ++; 1948 ofs = 0; 1949 if (chipnum == cfi->numchips) 1950 return 0; 1951 } 1952 } 1953 1954 /* Write buffer is worth it only if more than one word to write... */ 1955 while (len >= map_bankwidth(map) * 2) { 1956 /* We must not cross write block boundaries */ 1957 int size = wbufsize - (ofs & (wbufsize-1)); 1958 1959 if (size > len) 1960 size = len; 1961 if (size % map_bankwidth(map)) 1962 size -= size % map_bankwidth(map); 1963 1964 ret = do_write_buffer(map, &cfi->chips[chipnum], 1965 ofs, buf, size); 1966 if (ret) 1967 return ret; 1968 1969 ofs += size; 1970 buf += size; 1971 (*retlen) += size; 1972 len -= size; 1973 1974 if (ofs >> cfi->chipshift) { 1975 chipnum ++; 1976 ofs = 0; 1977 if (chipnum == cfi->numchips) 1978 return 0; 1979 } 1980 } 1981 1982 if (len) { 1983 size_t retlen_dregs = 0; 1984 1985 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1986 len, &retlen_dregs, buf); 1987 1988 *retlen += retlen_dregs; 1989 return ret; 1990 } 1991 1992 return 0; 1993 } 1994 1995 /* 1996 * Wait for the flash chip to become ready to write data 1997 * 1998 * This is only called during the panic_write() path. When panic_write() 1999 * is called, the kernel is in the process of a panic, and will soon be 2000 * dead. Therefore we don't take any locks, and attempt to get access 2001 * to the chip as soon as possible. 2002 */ 2003 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2004 unsigned long adr) 2005 { 2006 struct cfi_private *cfi = map->fldrv_priv; 2007 int retries = 10; 2008 int i; 2009 2010 /* 2011 * If the driver thinks the chip is idle, and no toggle bits 2012 * are changing, then the chip is actually idle for sure. 2013 */ 2014 if (chip->state == FL_READY && chip_ready(map, adr)) 2015 return 0; 2016 2017 /* 2018 * Try several times to reset the chip and then wait for it 2019 * to become idle. The upper limit of a few milliseconds of 2020 * delay isn't a big problem: the kernel is dying anyway. It 2021 * is more important to save the messages. 2022 */ 2023 while (retries > 0) { 2024 const unsigned long timeo = (HZ / 1000) + 1; 2025 2026 /* send the reset command */ 2027 map_write(map, CMD(0xF0), chip->start); 2028 2029 /* wait for the chip to become ready */ 2030 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2031 if (chip_ready(map, adr)) 2032 return 0; 2033 2034 udelay(1); 2035 } 2036 2037 retries--; 2038 } 2039 2040 /* the chip never became ready */ 2041 return -EBUSY; 2042 } 2043 2044 /* 2045 * Write out one word of data to a single flash chip during a kernel panic 2046 * 2047 * This is only called during the panic_write() path. When panic_write() 2048 * is called, the kernel is in the process of a panic, and will soon be 2049 * dead. Therefore we don't take any locks, and attempt to get access 2050 * to the chip as soon as possible. 2051 * 2052 * The implementation of this routine is intentionally similar to 2053 * do_write_oneword(), in order to ease code maintenance. 2054 */ 2055 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2056 unsigned long adr, map_word datum) 2057 { 2058 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2059 struct cfi_private *cfi = map->fldrv_priv; 2060 int retry_cnt = 0; 2061 map_word oldd; 2062 int ret = 0; 2063 int i; 2064 2065 adr += chip->start; 2066 2067 ret = cfi_amdstd_panic_wait(map, chip, adr); 2068 if (ret) 2069 return ret; 2070 2071 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2072 __func__, adr, datum.x[0]); 2073 2074 /* 2075 * Check for a NOP for the case when the datum to write is already 2076 * present - it saves time and works around buggy chips that corrupt 2077 * data at other locations when 0xff is written to a location that 2078 * already contains 0xff. 2079 */ 2080 oldd = map_read(map, adr); 2081 if (map_word_equal(map, oldd, datum)) { 2082 pr_debug("MTD %s(): NOP\n", __func__); 2083 goto op_done; 2084 } 2085 2086 ENABLE_VPP(map); 2087 2088 retry: 2089 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2090 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2091 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2092 map_write(map, datum, adr); 2093 2094 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2095 if (chip_ready(map, adr)) 2096 break; 2097 2098 udelay(1); 2099 } 2100 2101 if (!chip_good(map, adr, datum)) { 2102 /* reset on all failures. */ 2103 map_write(map, CMD(0xF0), chip->start); 2104 /* FIXME - should have reset delay before continuing */ 2105 2106 if (++retry_cnt <= MAX_WORD_RETRIES) 2107 goto retry; 2108 2109 ret = -EIO; 2110 } 2111 2112 op_done: 2113 DISABLE_VPP(map); 2114 return ret; 2115 } 2116 2117 /* 2118 * Write out some data during a kernel panic 2119 * 2120 * This is used by the mtdoops driver to save the dying messages from a 2121 * kernel which has panic'd. 2122 * 2123 * This routine ignores all of the locking used throughout the rest of the 2124 * driver, in order to ensure that the data gets written out no matter what 2125 * state this driver (and the flash chip itself) was in when the kernel crashed. 2126 * 2127 * The implementation of this routine is intentionally similar to 2128 * cfi_amdstd_write_words(), in order to ease code maintenance. 2129 */ 2130 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2131 size_t *retlen, const u_char *buf) 2132 { 2133 struct map_info *map = mtd->priv; 2134 struct cfi_private *cfi = map->fldrv_priv; 2135 unsigned long ofs, chipstart; 2136 int ret = 0; 2137 int chipnum; 2138 2139 chipnum = to >> cfi->chipshift; 2140 ofs = to - (chipnum << cfi->chipshift); 2141 chipstart = cfi->chips[chipnum].start; 2142 2143 /* If it's not bus aligned, do the first byte write */ 2144 if (ofs & (map_bankwidth(map) - 1)) { 2145 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2146 int i = ofs - bus_ofs; 2147 int n = 0; 2148 map_word tmp_buf; 2149 2150 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2151 if (ret) 2152 return ret; 2153 2154 /* Load 'tmp_buf' with old contents of flash */ 2155 tmp_buf = map_read(map, bus_ofs + chipstart); 2156 2157 /* Number of bytes to copy from buffer */ 2158 n = min_t(int, len, map_bankwidth(map) - i); 2159 2160 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2161 2162 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2163 bus_ofs, tmp_buf); 2164 if (ret) 2165 return ret; 2166 2167 ofs += n; 2168 buf += n; 2169 (*retlen) += n; 2170 len -= n; 2171 2172 if (ofs >> cfi->chipshift) { 2173 chipnum++; 2174 ofs = 0; 2175 if (chipnum == cfi->numchips) 2176 return 0; 2177 } 2178 } 2179 2180 /* We are now aligned, write as much as possible */ 2181 while (len >= map_bankwidth(map)) { 2182 map_word datum; 2183 2184 datum = map_word_load(map, buf); 2185 2186 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2187 ofs, datum); 2188 if (ret) 2189 return ret; 2190 2191 ofs += map_bankwidth(map); 2192 buf += map_bankwidth(map); 2193 (*retlen) += map_bankwidth(map); 2194 len -= map_bankwidth(map); 2195 2196 if (ofs >> cfi->chipshift) { 2197 chipnum++; 2198 ofs = 0; 2199 if (chipnum == cfi->numchips) 2200 return 0; 2201 2202 chipstart = cfi->chips[chipnum].start; 2203 } 2204 } 2205 2206 /* Write the trailing bytes if any */ 2207 if (len & (map_bankwidth(map) - 1)) { 2208 map_word tmp_buf; 2209 2210 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2211 if (ret) 2212 return ret; 2213 2214 tmp_buf = map_read(map, ofs + chipstart); 2215 2216 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2217 2218 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2219 ofs, tmp_buf); 2220 if (ret) 2221 return ret; 2222 2223 (*retlen) += len; 2224 } 2225 2226 return 0; 2227 } 2228 2229 2230 /* 2231 * Handle devices with one erase region, that only implement 2232 * the chip erase command. 2233 */ 2234 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2235 { 2236 struct cfi_private *cfi = map->fldrv_priv; 2237 unsigned long timeo = jiffies + HZ; 2238 unsigned long int adr; 2239 DECLARE_WAITQUEUE(wait, current); 2240 int ret = 0; 2241 2242 adr = cfi->addr_unlock1; 2243 2244 mutex_lock(&chip->mutex); 2245 ret = get_chip(map, chip, adr, FL_WRITING); 2246 if (ret) { 2247 mutex_unlock(&chip->mutex); 2248 return ret; 2249 } 2250 2251 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2252 __func__, chip->start ); 2253 2254 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2255 ENABLE_VPP(map); 2256 xip_disable(map, chip, adr); 2257 2258 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2259 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2260 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2261 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2262 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2263 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2264 2265 chip->state = FL_ERASING; 2266 chip->erase_suspended = 0; 2267 chip->in_progress_block_addr = adr; 2268 2269 INVALIDATE_CACHE_UDELAY(map, chip, 2270 adr, map->size, 2271 chip->erase_time*500); 2272 2273 timeo = jiffies + (HZ*20); 2274 2275 for (;;) { 2276 if (chip->state != FL_ERASING) { 2277 /* Someone's suspended the erase. Sleep */ 2278 set_current_state(TASK_UNINTERRUPTIBLE); 2279 add_wait_queue(&chip->wq, &wait); 2280 mutex_unlock(&chip->mutex); 2281 schedule(); 2282 remove_wait_queue(&chip->wq, &wait); 2283 mutex_lock(&chip->mutex); 2284 continue; 2285 } 2286 if (chip->erase_suspended) { 2287 /* This erase was suspended and resumed. 2288 Adjust the timeout */ 2289 timeo = jiffies + (HZ*20); /* FIXME */ 2290 chip->erase_suspended = 0; 2291 } 2292 2293 if (chip_ready(map, adr)) 2294 break; 2295 2296 if (time_after(jiffies, timeo)) { 2297 printk(KERN_WARNING "MTD %s(): software timeout\n", 2298 __func__ ); 2299 break; 2300 } 2301 2302 /* Latency issues. Drop the lock, wait a while and retry */ 2303 UDELAY(map, chip, adr, 1000000/HZ); 2304 } 2305 /* Did we succeed? */ 2306 if (!chip_good(map, adr, map_word_ff(map))) { 2307 /* reset on all failures. */ 2308 map_write( map, CMD(0xF0), chip->start ); 2309 /* FIXME - should have reset delay before continuing */ 2310 2311 ret = -EIO; 2312 } 2313 2314 chip->state = FL_READY; 2315 xip_enable(map, chip, adr); 2316 DISABLE_VPP(map); 2317 put_chip(map, chip, adr); 2318 mutex_unlock(&chip->mutex); 2319 2320 return ret; 2321 } 2322 2323 2324 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2325 { 2326 struct cfi_private *cfi = map->fldrv_priv; 2327 unsigned long timeo = jiffies + HZ; 2328 DECLARE_WAITQUEUE(wait, current); 2329 int ret = 0; 2330 2331 adr += chip->start; 2332 2333 mutex_lock(&chip->mutex); 2334 ret = get_chip(map, chip, adr, FL_ERASING); 2335 if (ret) { 2336 mutex_unlock(&chip->mutex); 2337 return ret; 2338 } 2339 2340 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2341 __func__, adr ); 2342 2343 XIP_INVAL_CACHED_RANGE(map, adr, len); 2344 ENABLE_VPP(map); 2345 xip_disable(map, chip, adr); 2346 2347 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2348 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2349 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2350 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2351 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2352 map_write(map, cfi->sector_erase_cmd, adr); 2353 2354 chip->state = FL_ERASING; 2355 chip->erase_suspended = 0; 2356 chip->in_progress_block_addr = adr; 2357 2358 INVALIDATE_CACHE_UDELAY(map, chip, 2359 adr, len, 2360 chip->erase_time*500); 2361 2362 timeo = jiffies + (HZ*20); 2363 2364 for (;;) { 2365 if (chip->state != FL_ERASING) { 2366 /* Someone's suspended the erase. Sleep */ 2367 set_current_state(TASK_UNINTERRUPTIBLE); 2368 add_wait_queue(&chip->wq, &wait); 2369 mutex_unlock(&chip->mutex); 2370 schedule(); 2371 remove_wait_queue(&chip->wq, &wait); 2372 mutex_lock(&chip->mutex); 2373 continue; 2374 } 2375 if (chip->erase_suspended) { 2376 /* This erase was suspended and resumed. 2377 Adjust the timeout */ 2378 timeo = jiffies + (HZ*20); /* FIXME */ 2379 chip->erase_suspended = 0; 2380 } 2381 2382 if (chip_ready(map, adr)) { 2383 xip_enable(map, chip, adr); 2384 break; 2385 } 2386 2387 if (time_after(jiffies, timeo)) { 2388 xip_enable(map, chip, adr); 2389 printk(KERN_WARNING "MTD %s(): software timeout\n", 2390 __func__ ); 2391 break; 2392 } 2393 2394 /* Latency issues. Drop the lock, wait a while and retry */ 2395 UDELAY(map, chip, adr, 1000000/HZ); 2396 } 2397 /* Did we succeed? */ 2398 if (!chip_good(map, adr, map_word_ff(map))) { 2399 /* reset on all failures. */ 2400 map_write( map, CMD(0xF0), chip->start ); 2401 /* FIXME - should have reset delay before continuing */ 2402 2403 ret = -EIO; 2404 } 2405 2406 chip->state = FL_READY; 2407 DISABLE_VPP(map); 2408 put_chip(map, chip, adr); 2409 mutex_unlock(&chip->mutex); 2410 return ret; 2411 } 2412 2413 2414 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2415 { 2416 unsigned long ofs, len; 2417 int ret; 2418 2419 ofs = instr->addr; 2420 len = instr->len; 2421 2422 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 2423 if (ret) 2424 return ret; 2425 2426 instr->state = MTD_ERASE_DONE; 2427 mtd_erase_callback(instr); 2428 2429 return 0; 2430 } 2431 2432 2433 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2434 { 2435 struct map_info *map = mtd->priv; 2436 struct cfi_private *cfi = map->fldrv_priv; 2437 int ret = 0; 2438 2439 if (instr->addr != 0) 2440 return -EINVAL; 2441 2442 if (instr->len != mtd->size) 2443 return -EINVAL; 2444 2445 ret = do_erase_chip(map, &cfi->chips[0]); 2446 if (ret) 2447 return ret; 2448 2449 instr->state = MTD_ERASE_DONE; 2450 mtd_erase_callback(instr); 2451 2452 return 0; 2453 } 2454 2455 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2456 unsigned long adr, int len, void *thunk) 2457 { 2458 struct cfi_private *cfi = map->fldrv_priv; 2459 int ret; 2460 2461 mutex_lock(&chip->mutex); 2462 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2463 if (ret) 2464 goto out_unlock; 2465 chip->state = FL_LOCKING; 2466 2467 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2468 2469 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2470 cfi->device_type, NULL); 2471 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2472 cfi->device_type, NULL); 2473 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2474 cfi->device_type, NULL); 2475 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2476 cfi->device_type, NULL); 2477 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2478 cfi->device_type, NULL); 2479 map_write(map, CMD(0x40), chip->start + adr); 2480 2481 chip->state = FL_READY; 2482 put_chip(map, chip, adr + chip->start); 2483 ret = 0; 2484 2485 out_unlock: 2486 mutex_unlock(&chip->mutex); 2487 return ret; 2488 } 2489 2490 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2491 unsigned long adr, int len, void *thunk) 2492 { 2493 struct cfi_private *cfi = map->fldrv_priv; 2494 int ret; 2495 2496 mutex_lock(&chip->mutex); 2497 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2498 if (ret) 2499 goto out_unlock; 2500 chip->state = FL_UNLOCKING; 2501 2502 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2503 2504 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2505 cfi->device_type, NULL); 2506 map_write(map, CMD(0x70), adr); 2507 2508 chip->state = FL_READY; 2509 put_chip(map, chip, adr + chip->start); 2510 ret = 0; 2511 2512 out_unlock: 2513 mutex_unlock(&chip->mutex); 2514 return ret; 2515 } 2516 2517 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2518 { 2519 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2520 } 2521 2522 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2523 { 2524 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2525 } 2526 2527 /* 2528 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2529 */ 2530 2531 struct ppb_lock { 2532 struct flchip *chip; 2533 loff_t offset; 2534 int locked; 2535 }; 2536 2537 #define MAX_SECTORS 512 2538 2539 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2540 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2541 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2542 2543 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2544 struct flchip *chip, 2545 unsigned long adr, int len, void *thunk) 2546 { 2547 struct cfi_private *cfi = map->fldrv_priv; 2548 unsigned long timeo; 2549 int ret; 2550 2551 mutex_lock(&chip->mutex); 2552 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2553 if (ret) { 2554 mutex_unlock(&chip->mutex); 2555 return ret; 2556 } 2557 2558 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2559 2560 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2561 cfi->device_type, NULL); 2562 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2563 cfi->device_type, NULL); 2564 /* PPB entry command */ 2565 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2566 cfi->device_type, NULL); 2567 2568 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2569 chip->state = FL_LOCKING; 2570 map_write(map, CMD(0xA0), chip->start + adr); 2571 map_write(map, CMD(0x00), chip->start + adr); 2572 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2573 /* 2574 * Unlocking of one specific sector is not supported, so we 2575 * have to unlock all sectors of this device instead 2576 */ 2577 chip->state = FL_UNLOCKING; 2578 map_write(map, CMD(0x80), chip->start); 2579 map_write(map, CMD(0x30), chip->start); 2580 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2581 chip->state = FL_JEDEC_QUERY; 2582 /* Return locked status: 0->locked, 1->unlocked */ 2583 ret = !cfi_read_query(map, adr); 2584 } else 2585 BUG(); 2586 2587 /* 2588 * Wait for some time as unlocking of all sectors takes quite long 2589 */ 2590 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2591 for (;;) { 2592 if (chip_ready(map, adr)) 2593 break; 2594 2595 if (time_after(jiffies, timeo)) { 2596 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2597 ret = -EIO; 2598 break; 2599 } 2600 2601 UDELAY(map, chip, adr, 1); 2602 } 2603 2604 /* Exit BC commands */ 2605 map_write(map, CMD(0x90), chip->start); 2606 map_write(map, CMD(0x00), chip->start); 2607 2608 chip->state = FL_READY; 2609 put_chip(map, chip, adr + chip->start); 2610 mutex_unlock(&chip->mutex); 2611 2612 return ret; 2613 } 2614 2615 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2616 uint64_t len) 2617 { 2618 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2619 DO_XXLOCK_ONEBLOCK_LOCK); 2620 } 2621 2622 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2623 uint64_t len) 2624 { 2625 struct mtd_erase_region_info *regions = mtd->eraseregions; 2626 struct map_info *map = mtd->priv; 2627 struct cfi_private *cfi = map->fldrv_priv; 2628 struct ppb_lock *sect; 2629 unsigned long adr; 2630 loff_t offset; 2631 uint64_t length; 2632 int chipnum; 2633 int i; 2634 int sectors; 2635 int ret; 2636 2637 /* 2638 * PPB unlocking always unlocks all sectors of the flash chip. 2639 * We need to re-lock all previously locked sectors. So lets 2640 * first check the locking status of all sectors and save 2641 * it for future use. 2642 */ 2643 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); 2644 if (!sect) 2645 return -ENOMEM; 2646 2647 /* 2648 * This code to walk all sectors is a slightly modified version 2649 * of the cfi_varsize_frob() code. 2650 */ 2651 i = 0; 2652 chipnum = 0; 2653 adr = 0; 2654 sectors = 0; 2655 offset = 0; 2656 length = mtd->size; 2657 2658 while (length) { 2659 int size = regions[i].erasesize; 2660 2661 /* 2662 * Only test sectors that shall not be unlocked. The other 2663 * sectors shall be unlocked, so lets keep their locking 2664 * status at "unlocked" (locked=0) for the final re-locking. 2665 */ 2666 if ((adr < ofs) || (adr >= (ofs + len))) { 2667 sect[sectors].chip = &cfi->chips[chipnum]; 2668 sect[sectors].offset = offset; 2669 sect[sectors].locked = do_ppb_xxlock( 2670 map, &cfi->chips[chipnum], adr, 0, 2671 DO_XXLOCK_ONEBLOCK_GETLOCK); 2672 } 2673 2674 adr += size; 2675 offset += size; 2676 length -= size; 2677 2678 if (offset == regions[i].offset + size * regions[i].numblocks) 2679 i++; 2680 2681 if (adr >> cfi->chipshift) { 2682 adr = 0; 2683 chipnum++; 2684 2685 if (chipnum >= cfi->numchips) 2686 break; 2687 } 2688 2689 sectors++; 2690 if (sectors >= MAX_SECTORS) { 2691 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2692 MAX_SECTORS); 2693 kfree(sect); 2694 return -EINVAL; 2695 } 2696 } 2697 2698 /* Now unlock the whole chip */ 2699 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2700 DO_XXLOCK_ONEBLOCK_UNLOCK); 2701 if (ret) { 2702 kfree(sect); 2703 return ret; 2704 } 2705 2706 /* 2707 * PPB unlocking always unlocks all sectors of the flash chip. 2708 * We need to re-lock all previously locked sectors. 2709 */ 2710 for (i = 0; i < sectors; i++) { 2711 if (sect[i].locked) 2712 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, 2713 DO_XXLOCK_ONEBLOCK_LOCK); 2714 } 2715 2716 kfree(sect); 2717 return ret; 2718 } 2719 2720 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2721 uint64_t len) 2722 { 2723 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2724 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2725 } 2726 2727 static void cfi_amdstd_sync (struct mtd_info *mtd) 2728 { 2729 struct map_info *map = mtd->priv; 2730 struct cfi_private *cfi = map->fldrv_priv; 2731 int i; 2732 struct flchip *chip; 2733 int ret = 0; 2734 DECLARE_WAITQUEUE(wait, current); 2735 2736 for (i=0; !ret && i<cfi->numchips; i++) { 2737 chip = &cfi->chips[i]; 2738 2739 retry: 2740 mutex_lock(&chip->mutex); 2741 2742 switch(chip->state) { 2743 case FL_READY: 2744 case FL_STATUS: 2745 case FL_CFI_QUERY: 2746 case FL_JEDEC_QUERY: 2747 chip->oldstate = chip->state; 2748 chip->state = FL_SYNCING; 2749 /* No need to wake_up() on this state change - 2750 * as the whole point is that nobody can do anything 2751 * with the chip now anyway. 2752 */ 2753 case FL_SYNCING: 2754 mutex_unlock(&chip->mutex); 2755 break; 2756 2757 default: 2758 /* Not an idle state */ 2759 set_current_state(TASK_UNINTERRUPTIBLE); 2760 add_wait_queue(&chip->wq, &wait); 2761 2762 mutex_unlock(&chip->mutex); 2763 2764 schedule(); 2765 2766 remove_wait_queue(&chip->wq, &wait); 2767 2768 goto retry; 2769 } 2770 } 2771 2772 /* Unlock the chips again */ 2773 2774 for (i--; i >=0; i--) { 2775 chip = &cfi->chips[i]; 2776 2777 mutex_lock(&chip->mutex); 2778 2779 if (chip->state == FL_SYNCING) { 2780 chip->state = chip->oldstate; 2781 wake_up(&chip->wq); 2782 } 2783 mutex_unlock(&chip->mutex); 2784 } 2785 } 2786 2787 2788 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2789 { 2790 struct map_info *map = mtd->priv; 2791 struct cfi_private *cfi = map->fldrv_priv; 2792 int i; 2793 struct flchip *chip; 2794 int ret = 0; 2795 2796 for (i=0; !ret && i<cfi->numchips; i++) { 2797 chip = &cfi->chips[i]; 2798 2799 mutex_lock(&chip->mutex); 2800 2801 switch(chip->state) { 2802 case FL_READY: 2803 case FL_STATUS: 2804 case FL_CFI_QUERY: 2805 case FL_JEDEC_QUERY: 2806 chip->oldstate = chip->state; 2807 chip->state = FL_PM_SUSPENDED; 2808 /* No need to wake_up() on this state change - 2809 * as the whole point is that nobody can do anything 2810 * with the chip now anyway. 2811 */ 2812 case FL_PM_SUSPENDED: 2813 break; 2814 2815 default: 2816 ret = -EAGAIN; 2817 break; 2818 } 2819 mutex_unlock(&chip->mutex); 2820 } 2821 2822 /* Unlock the chips again */ 2823 2824 if (ret) { 2825 for (i--; i >=0; i--) { 2826 chip = &cfi->chips[i]; 2827 2828 mutex_lock(&chip->mutex); 2829 2830 if (chip->state == FL_PM_SUSPENDED) { 2831 chip->state = chip->oldstate; 2832 wake_up(&chip->wq); 2833 } 2834 mutex_unlock(&chip->mutex); 2835 } 2836 } 2837 2838 return ret; 2839 } 2840 2841 2842 static void cfi_amdstd_resume(struct mtd_info *mtd) 2843 { 2844 struct map_info *map = mtd->priv; 2845 struct cfi_private *cfi = map->fldrv_priv; 2846 int i; 2847 struct flchip *chip; 2848 2849 for (i=0; i<cfi->numchips; i++) { 2850 2851 chip = &cfi->chips[i]; 2852 2853 mutex_lock(&chip->mutex); 2854 2855 if (chip->state == FL_PM_SUSPENDED) { 2856 chip->state = FL_READY; 2857 map_write(map, CMD(0xF0), chip->start); 2858 wake_up(&chip->wq); 2859 } 2860 else 2861 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2862 2863 mutex_unlock(&chip->mutex); 2864 } 2865 } 2866 2867 2868 /* 2869 * Ensure that the flash device is put back into read array mode before 2870 * unloading the driver or rebooting. On some systems, rebooting while 2871 * the flash is in query/program/erase mode will prevent the CPU from 2872 * fetching the bootloader code, requiring a hard reset or power cycle. 2873 */ 2874 static int cfi_amdstd_reset(struct mtd_info *mtd) 2875 { 2876 struct map_info *map = mtd->priv; 2877 struct cfi_private *cfi = map->fldrv_priv; 2878 int i, ret; 2879 struct flchip *chip; 2880 2881 for (i = 0; i < cfi->numchips; i++) { 2882 2883 chip = &cfi->chips[i]; 2884 2885 mutex_lock(&chip->mutex); 2886 2887 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2888 if (!ret) { 2889 map_write(map, CMD(0xF0), chip->start); 2890 chip->state = FL_SHUTDOWN; 2891 put_chip(map, chip, chip->start); 2892 } 2893 2894 mutex_unlock(&chip->mutex); 2895 } 2896 2897 return 0; 2898 } 2899 2900 2901 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2902 void *v) 2903 { 2904 struct mtd_info *mtd; 2905 2906 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2907 cfi_amdstd_reset(mtd); 2908 return NOTIFY_DONE; 2909 } 2910 2911 2912 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2913 { 2914 struct map_info *map = mtd->priv; 2915 struct cfi_private *cfi = map->fldrv_priv; 2916 2917 cfi_amdstd_reset(mtd); 2918 unregister_reboot_notifier(&mtd->reboot_notifier); 2919 kfree(cfi->cmdset_priv); 2920 kfree(cfi->cfiq); 2921 kfree(cfi); 2922 kfree(mtd->eraseregions); 2923 } 2924 2925 MODULE_LICENSE("GPL"); 2926 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2927 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2928 MODULE_ALIAS("cfi_cmdset_0006"); 2929 MODULE_ALIAS("cfi_cmdset_0701"); 2930