1 /* 2 * Common Flash Interface support: 3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) 4 * 5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> 6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> 7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> 8 * 9 * 2_by_8 routines added by Simon Munton 10 * 11 * 4_by_16 work by Carolyn J. Smith 12 * 13 * XIP support hooks by Vitaly Wool (based on code for Intel flash 14 * by Nicolas Pitre) 15 * 16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 17 * 18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 19 * 20 * This code is GPL 21 */ 22 23 #include <linux/module.h> 24 #include <linux/types.h> 25 #include <linux/kernel.h> 26 #include <linux/sched.h> 27 #include <asm/io.h> 28 #include <asm/byteorder.h> 29 30 #include <linux/errno.h> 31 #include <linux/slab.h> 32 #include <linux/delay.h> 33 #include <linux/interrupt.h> 34 #include <linux/reboot.h> 35 #include <linux/of.h> 36 #include <linux/of_platform.h> 37 #include <linux/mtd/map.h> 38 #include <linux/mtd/mtd.h> 39 #include <linux/mtd/cfi.h> 40 #include <linux/mtd/xip.h> 41 42 #define AMD_BOOTLOC_BUG 43 #define FORCE_WORD_WRITE 0 44 45 #define MAX_WORD_RETRIES 3 46 47 #define SST49LF004B 0x0060 48 #define SST49LF040B 0x0050 49 #define SST49LF008A 0x005a 50 #define AT49BV6416 0x00d6 51 52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); 56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); 57 static void cfi_amdstd_sync (struct mtd_info *); 58 static int cfi_amdstd_suspend (struct mtd_info *); 59 static void cfi_amdstd_resume (struct mtd_info *); 60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); 61 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t, 62 size_t *, struct otp_info *); 63 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t, 64 size_t *, struct otp_info *); 65 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 66 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t, 67 size_t *, u_char *); 68 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t, 69 size_t *, u_char *); 70 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t, 71 size_t *, u_char *); 72 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t); 73 74 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 75 size_t *retlen, const u_char *buf); 76 77 static void cfi_amdstd_destroy(struct mtd_info *); 78 79 struct mtd_info *cfi_cmdset_0002(struct map_info *, int); 80 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *); 81 82 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); 83 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); 84 #include "fwh_lock.h" 85 86 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 87 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 88 89 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 90 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); 91 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); 92 93 static struct mtd_chip_driver cfi_amdstd_chipdrv = { 94 .probe = NULL, /* Not usable directly */ 95 .destroy = cfi_amdstd_destroy, 96 .name = "cfi_cmdset_0002", 97 .module = THIS_MODULE 98 }; 99 100 101 /* #define DEBUG_CFI_FEATURES */ 102 103 104 #ifdef DEBUG_CFI_FEATURES 105 static void cfi_tell_features(struct cfi_pri_amdstd *extp) 106 { 107 const char* erase_suspend[3] = { 108 "Not supported", "Read only", "Read/write" 109 }; 110 const char* top_bottom[6] = { 111 "No WP", "8x8KiB sectors at top & bottom, no WP", 112 "Bottom boot", "Top boot", 113 "Uniform, Bottom WP", "Uniform, Top WP" 114 }; 115 116 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); 117 printk(" Address sensitive unlock: %s\n", 118 (extp->SiliconRevision & 1) ? "Not required" : "Required"); 119 120 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) 121 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); 122 else 123 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); 124 125 if (extp->BlkProt == 0) 126 printk(" Block protection: Not supported\n"); 127 else 128 printk(" Block protection: %d sectors per group\n", extp->BlkProt); 129 130 131 printk(" Temporary block unprotect: %s\n", 132 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); 133 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); 134 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); 135 printk(" Burst mode: %s\n", 136 extp->BurstMode ? "Supported" : "Not supported"); 137 if (extp->PageMode == 0) 138 printk(" Page mode: Not supported\n"); 139 else 140 printk(" Page mode: %d word page\n", extp->PageMode << 2); 141 142 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", 143 extp->VppMin >> 4, extp->VppMin & 0xf); 144 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", 145 extp->VppMax >> 4, extp->VppMax & 0xf); 146 147 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) 148 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); 149 else 150 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); 151 } 152 #endif 153 154 #ifdef AMD_BOOTLOC_BUG 155 /* Wheee. Bring me the head of someone at AMD. */ 156 static void fixup_amd_bootblock(struct mtd_info *mtd) 157 { 158 struct map_info *map = mtd->priv; 159 struct cfi_private *cfi = map->fldrv_priv; 160 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 161 __u8 major = extp->MajorVersion; 162 __u8 minor = extp->MinorVersion; 163 164 if (((major << 8) | minor) < 0x3131) { 165 /* CFI version 1.0 => don't trust bootloc */ 166 167 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", 168 map->name, cfi->mfr, cfi->id); 169 170 /* AFAICS all 29LV400 with a bottom boot block have a device ID 171 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. 172 * These were badly detected as they have the 0x80 bit set 173 * so treat them as a special case. 174 */ 175 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && 176 177 /* Macronix added CFI to their 2nd generation 178 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, 179 * Fujitsu, Spansion, EON, ESI and older Macronix) 180 * has CFI. 181 * 182 * Therefore also check the manufacturer. 183 * This reduces the risk of false detection due to 184 * the 8-bit device ID. 185 */ 186 (cfi->mfr == CFI_MFR_MACRONIX)) { 187 pr_debug("%s: Macronix MX29LV400C with bottom boot block" 188 " detected\n", map->name); 189 extp->TopBottom = 2; /* bottom boot */ 190 } else 191 if (cfi->id & 0x80) { 192 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); 193 extp->TopBottom = 3; /* top boot */ 194 } else { 195 extp->TopBottom = 2; /* bottom boot */ 196 } 197 198 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" 199 " deduced %s from Device ID\n", map->name, major, minor, 200 extp->TopBottom == 2 ? "bottom" : "top"); 201 } 202 } 203 #endif 204 205 static void fixup_use_write_buffers(struct mtd_info *mtd) 206 { 207 struct map_info *map = mtd->priv; 208 struct cfi_private *cfi = map->fldrv_priv; 209 if (cfi->cfiq->BufWriteTimeoutTyp) { 210 pr_debug("Using buffer write method\n" ); 211 mtd->_write = cfi_amdstd_write_buffers; 212 } 213 } 214 215 /* Atmel chips don't use the same PRI format as AMD chips */ 216 static void fixup_convert_atmel_pri(struct mtd_info *mtd) 217 { 218 struct map_info *map = mtd->priv; 219 struct cfi_private *cfi = map->fldrv_priv; 220 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 221 struct cfi_pri_atmel atmel_pri; 222 223 memcpy(&atmel_pri, extp, sizeof(atmel_pri)); 224 memset((char *)extp + 5, 0, sizeof(*extp) - 5); 225 226 if (atmel_pri.Features & 0x02) 227 extp->EraseSuspend = 2; 228 229 /* Some chips got it backwards... */ 230 if (cfi->id == AT49BV6416) { 231 if (atmel_pri.BottomBoot) 232 extp->TopBottom = 3; 233 else 234 extp->TopBottom = 2; 235 } else { 236 if (atmel_pri.BottomBoot) 237 extp->TopBottom = 2; 238 else 239 extp->TopBottom = 3; 240 } 241 242 /* burst write mode not supported */ 243 cfi->cfiq->BufWriteTimeoutTyp = 0; 244 cfi->cfiq->BufWriteTimeoutMax = 0; 245 } 246 247 static void fixup_use_secsi(struct mtd_info *mtd) 248 { 249 /* Setup for chips with a secsi area */ 250 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; 251 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; 252 } 253 254 static void fixup_use_erase_chip(struct mtd_info *mtd) 255 { 256 struct map_info *map = mtd->priv; 257 struct cfi_private *cfi = map->fldrv_priv; 258 if ((cfi->cfiq->NumEraseRegions == 1) && 259 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { 260 mtd->_erase = cfi_amdstd_erase_chip; 261 } 262 263 } 264 265 /* 266 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors 267 * locked by default. 268 */ 269 static void fixup_use_atmel_lock(struct mtd_info *mtd) 270 { 271 mtd->_lock = cfi_atmel_lock; 272 mtd->_unlock = cfi_atmel_unlock; 273 mtd->flags |= MTD_POWERUP_LOCK; 274 } 275 276 static void fixup_old_sst_eraseregion(struct mtd_info *mtd) 277 { 278 struct map_info *map = mtd->priv; 279 struct cfi_private *cfi = map->fldrv_priv; 280 281 /* 282 * These flashes report two separate eraseblock regions based on the 283 * sector_erase-size and block_erase-size, although they both operate on the 284 * same memory. This is not allowed according to CFI, so we just pick the 285 * sector_erase-size. 286 */ 287 cfi->cfiq->NumEraseRegions = 1; 288 } 289 290 static void fixup_sst39vf(struct mtd_info *mtd) 291 { 292 struct map_info *map = mtd->priv; 293 struct cfi_private *cfi = map->fldrv_priv; 294 295 fixup_old_sst_eraseregion(mtd); 296 297 cfi->addr_unlock1 = 0x5555; 298 cfi->addr_unlock2 = 0x2AAA; 299 } 300 301 static void fixup_sst39vf_rev_b(struct mtd_info *mtd) 302 { 303 struct map_info *map = mtd->priv; 304 struct cfi_private *cfi = map->fldrv_priv; 305 306 fixup_old_sst_eraseregion(mtd); 307 308 cfi->addr_unlock1 = 0x555; 309 cfi->addr_unlock2 = 0x2AA; 310 311 cfi->sector_erase_cmd = CMD(0x50); 312 } 313 314 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) 315 { 316 struct map_info *map = mtd->priv; 317 struct cfi_private *cfi = map->fldrv_priv; 318 319 fixup_sst39vf_rev_b(mtd); 320 321 /* 322 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where 323 * it should report a size of 8KBytes (0x0020*256). 324 */ 325 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; 326 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", 327 mtd->name); 328 } 329 330 static void fixup_s29gl064n_sectors(struct mtd_info *mtd) 331 { 332 struct map_info *map = mtd->priv; 333 struct cfi_private *cfi = map->fldrv_priv; 334 335 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 336 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 337 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", 338 mtd->name); 339 } 340 } 341 342 static void fixup_s29gl032n_sectors(struct mtd_info *mtd) 343 { 344 struct map_info *map = mtd->priv; 345 struct cfi_private *cfi = map->fldrv_priv; 346 347 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 348 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 349 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", 350 mtd->name); 351 } 352 } 353 354 static void fixup_s29ns512p_sectors(struct mtd_info *mtd) 355 { 356 struct map_info *map = mtd->priv; 357 struct cfi_private *cfi = map->fldrv_priv; 358 359 /* 360 * S29NS512P flash uses more than 8bits to report number of sectors, 361 * which is not permitted by CFI. 362 */ 363 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; 364 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", 365 mtd->name); 366 } 367 368 /* Used to fix CFI-Tables of chips without Extended Query Tables */ 369 static struct cfi_fixup cfi_nopri_fixup_table[] = { 370 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 371 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ 372 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ 373 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ 374 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ 375 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ 376 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ 377 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ 378 { 0, 0, NULL } 379 }; 380 381 static struct cfi_fixup cfi_fixup_table[] = { 382 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, 383 #ifdef AMD_BOOTLOC_BUG 384 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, 385 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, 386 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, 387 #endif 388 { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, 389 { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, 390 { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, 391 { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, 392 { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, 393 { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, 394 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, 395 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 396 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 397 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 398 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, 399 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 400 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 401 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 402 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ 403 #if !FORCE_WORD_WRITE 404 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, 405 #endif 406 { 0, 0, NULL } 407 }; 408 static struct cfi_fixup jedec_fixup_table[] = { 409 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, 410 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, 411 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, 412 { 0, 0, NULL } 413 }; 414 415 static struct cfi_fixup fixup_table[] = { 416 /* The CFI vendor ids and the JEDEC vendor IDs appear 417 * to be common. It is like the devices id's are as 418 * well. This table is to pick all cases where 419 * we know that is the case. 420 */ 421 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, 422 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, 423 { 0, 0, NULL } 424 }; 425 426 427 static void cfi_fixup_major_minor(struct cfi_private *cfi, 428 struct cfi_pri_amdstd *extp) 429 { 430 if (cfi->mfr == CFI_MFR_SAMSUNG) { 431 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || 432 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { 433 /* 434 * Samsung K8P2815UQB and K8D6x16UxM chips 435 * report major=0 / minor=0. 436 * K8D3x16UxC chips report major=3 / minor=3. 437 */ 438 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" 439 " Extended Query version to 1.%c\n", 440 extp->MinorVersion); 441 extp->MajorVersion = '1'; 442 } 443 } 444 445 /* 446 * SST 38VF640x chips report major=0xFF / minor=0xFF. 447 */ 448 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { 449 extp->MajorVersion = '1'; 450 extp->MinorVersion = '0'; 451 } 452 } 453 454 static int is_m29ew(struct cfi_private *cfi) 455 { 456 if (cfi->mfr == CFI_MFR_INTEL && 457 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || 458 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) 459 return 1; 460 return 0; 461 } 462 463 /* 464 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: 465 * Some revisions of the M29EW suffer from erase suspend hang ups. In 466 * particular, it can occur when the sequence 467 * Erase Confirm -> Suspend -> Program -> Resume 468 * causes a lockup due to internal timing issues. The consequence is that the 469 * erase cannot be resumed without inserting a dummy command after programming 470 * and prior to resuming. [...] The work-around is to issue a dummy write cycle 471 * that writes an F0 command code before the RESUME command. 472 */ 473 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, 474 unsigned long adr) 475 { 476 struct cfi_private *cfi = map->fldrv_priv; 477 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ 478 if (is_m29ew(cfi)) 479 map_write(map, CMD(0xF0), adr); 480 } 481 482 /* 483 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: 484 * 485 * Some revisions of the M29EW (for example, A1 and A2 step revisions) 486 * are affected by a problem that could cause a hang up when an ERASE SUSPEND 487 * command is issued after an ERASE RESUME operation without waiting for a 488 * minimum delay. The result is that once the ERASE seems to be completed 489 * (no bits are toggling), the contents of the Flash memory block on which 490 * the erase was ongoing could be inconsistent with the expected values 491 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 492 * values), causing a consequent failure of the ERASE operation. 493 * The occurrence of this issue could be high, especially when file system 494 * operations on the Flash are intensive. As a result, it is recommended 495 * that a patch be applied. Intensive file system operations can cause many 496 * calls to the garbage routine to free Flash space (also by erasing physical 497 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME 498 * commands can occur. The problem disappears when a delay is inserted after 499 * the RESUME command by using the udelay() function available in Linux. 500 * The DELAY value must be tuned based on the customer's platform. 501 * The maximum value that fixes the problem in all cases is 500us. 502 * But, in our experience, a delay of 30 µs to 50 µs is sufficient 503 * in most cases. 504 * We have chosen 500µs because this latency is acceptable. 505 */ 506 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) 507 { 508 /* 509 * Resolving the Delay After Resume Issue see Micron TN-13-07 510 * Worst case delay must be 500µs but 30-50µs should be ok as well 511 */ 512 if (is_m29ew(cfi)) 513 cfi_udelay(500); 514 } 515 516 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) 517 { 518 struct cfi_private *cfi = map->fldrv_priv; 519 struct device_node __maybe_unused *np = map->device_node; 520 struct mtd_info *mtd; 521 int i; 522 523 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); 524 if (!mtd) 525 return NULL; 526 mtd->priv = map; 527 mtd->type = MTD_NORFLASH; 528 529 /* Fill in the default mtd operations */ 530 mtd->_erase = cfi_amdstd_erase_varsize; 531 mtd->_write = cfi_amdstd_write_words; 532 mtd->_read = cfi_amdstd_read; 533 mtd->_sync = cfi_amdstd_sync; 534 mtd->_suspend = cfi_amdstd_suspend; 535 mtd->_resume = cfi_amdstd_resume; 536 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; 537 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; 538 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; 539 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; 540 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; 541 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; 542 mtd->flags = MTD_CAP_NORFLASH; 543 mtd->name = map->name; 544 mtd->writesize = 1; 545 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 546 547 pr_debug("MTD %s(): write buffer size %d\n", __func__, 548 mtd->writebufsize); 549 550 mtd->_panic_write = cfi_amdstd_panic_write; 551 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; 552 553 if (cfi->cfi_mode==CFI_MODE_CFI){ 554 unsigned char bootloc; 555 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; 556 struct cfi_pri_amdstd *extp; 557 558 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); 559 if (extp) { 560 /* 561 * It's a real CFI chip, not one for which the probe 562 * routine faked a CFI structure. 563 */ 564 cfi_fixup_major_minor(cfi, extp); 565 566 /* 567 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 568 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 569 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf 570 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf 571 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf 572 */ 573 if (extp->MajorVersion != '1' || 574 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { 575 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " 576 "version %c.%c (%#02x/%#02x).\n", 577 extp->MajorVersion, extp->MinorVersion, 578 extp->MajorVersion, extp->MinorVersion); 579 kfree(extp); 580 kfree(mtd); 581 return NULL; 582 } 583 584 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", 585 extp->MajorVersion, extp->MinorVersion); 586 587 /* Install our own private info structure */ 588 cfi->cmdset_priv = extp; 589 590 /* Apply cfi device specific fixups */ 591 cfi_fixup(mtd, cfi_fixup_table); 592 593 #ifdef DEBUG_CFI_FEATURES 594 /* Tell the user about it in lots of lovely detail */ 595 cfi_tell_features(extp); 596 #endif 597 598 #ifdef CONFIG_OF 599 if (np && of_property_read_bool( 600 np, "use-advanced-sector-protection") 601 && extp->BlkProtUnprot == 8) { 602 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); 603 mtd->_lock = cfi_ppb_lock; 604 mtd->_unlock = cfi_ppb_unlock; 605 mtd->_is_locked = cfi_ppb_is_locked; 606 } 607 #endif 608 609 bootloc = extp->TopBottom; 610 if ((bootloc < 2) || (bootloc > 5)) { 611 printk(KERN_WARNING "%s: CFI contains unrecognised boot " 612 "bank location (%d). Assuming bottom.\n", 613 map->name, bootloc); 614 bootloc = 2; 615 } 616 617 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { 618 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); 619 620 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { 621 int j = (cfi->cfiq->NumEraseRegions-1)-i; 622 623 swap(cfi->cfiq->EraseRegionInfo[i], 624 cfi->cfiq->EraseRegionInfo[j]); 625 } 626 } 627 /* Set the default CFI lock/unlock addresses */ 628 cfi->addr_unlock1 = 0x555; 629 cfi->addr_unlock2 = 0x2aa; 630 } 631 cfi_fixup(mtd, cfi_nopri_fixup_table); 632 633 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { 634 kfree(mtd); 635 return NULL; 636 } 637 638 } /* CFI mode */ 639 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { 640 /* Apply jedec specific fixups */ 641 cfi_fixup(mtd, jedec_fixup_table); 642 } 643 /* Apply generic fixups */ 644 cfi_fixup(mtd, fixup_table); 645 646 for (i=0; i< cfi->numchips; i++) { 647 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 648 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 649 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 650 /* 651 * First calculate the timeout max according to timeout field 652 * of struct cfi_ident that probed from chip's CFI aera, if 653 * available. Specify a minimum of 2000us, in case the CFI data 654 * is wrong. 655 */ 656 if (cfi->cfiq->BufWriteTimeoutTyp && 657 cfi->cfiq->BufWriteTimeoutMax) 658 cfi->chips[i].buffer_write_time_max = 659 1 << (cfi->cfiq->BufWriteTimeoutTyp + 660 cfi->cfiq->BufWriteTimeoutMax); 661 else 662 cfi->chips[i].buffer_write_time_max = 0; 663 664 cfi->chips[i].buffer_write_time_max = 665 max(cfi->chips[i].buffer_write_time_max, 2000); 666 667 cfi->chips[i].ref_point_counter = 0; 668 init_waitqueue_head(&(cfi->chips[i].wq)); 669 } 670 671 map->fldrv = &cfi_amdstd_chipdrv; 672 673 return cfi_amdstd_setup(mtd); 674 } 675 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 676 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); 677 EXPORT_SYMBOL_GPL(cfi_cmdset_0002); 678 EXPORT_SYMBOL_GPL(cfi_cmdset_0006); 679 EXPORT_SYMBOL_GPL(cfi_cmdset_0701); 680 681 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 682 { 683 struct map_info *map = mtd->priv; 684 struct cfi_private *cfi = map->fldrv_priv; 685 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; 686 unsigned long offset = 0; 687 int i,j; 688 689 printk(KERN_NOTICE "number of %s chips: %d\n", 690 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); 691 /* Select the correct geometry setup */ 692 mtd->size = devsize * cfi->numchips; 693 694 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; 695 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 696 * mtd->numeraseregions, GFP_KERNEL); 697 if (!mtd->eraseregions) 698 goto setup_err; 699 700 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { 701 unsigned long ernum, ersize; 702 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; 703 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; 704 705 if (mtd->erasesize < ersize) { 706 mtd->erasesize = ersize; 707 } 708 for (j=0; j<cfi->numchips; j++) { 709 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; 710 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; 711 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; 712 } 713 offset += (ersize * ernum); 714 } 715 if (offset != devsize) { 716 /* Argh */ 717 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); 718 goto setup_err; 719 } 720 721 __module_get(THIS_MODULE); 722 register_reboot_notifier(&mtd->reboot_notifier); 723 return mtd; 724 725 setup_err: 726 kfree(mtd->eraseregions); 727 kfree(mtd); 728 kfree(cfi->cmdset_priv); 729 kfree(cfi->cfiq); 730 return NULL; 731 } 732 733 /* 734 * Return true if the chip is ready. 735 * 736 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 737 * non-suspended sector) and is indicated by no toggle bits toggling. 738 * 739 * Note that anything more complicated than checking if no bits are toggling 740 * (including checking DQ5 for an error status) is tricky to get working 741 * correctly and is therefore not done (particularly with interleaved chips 742 * as each chip must be checked independently of the others). 743 */ 744 static int __xipram chip_ready(struct map_info *map, unsigned long addr) 745 { 746 map_word d, t; 747 748 d = map_read(map, addr); 749 t = map_read(map, addr); 750 751 return map_word_equal(map, d, t); 752 } 753 754 /* 755 * Return true if the chip is ready and has the correct value. 756 * 757 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any 758 * non-suspended sector) and it is indicated by no bits toggling. 759 * 760 * Error are indicated by toggling bits or bits held with the wrong value, 761 * or with bits toggling. 762 * 763 * Note that anything more complicated than checking if no bits are toggling 764 * (including checking DQ5 for an error status) is tricky to get working 765 * correctly and is therefore not done (particularly with interleaved chips 766 * as each chip must be checked independently of the others). 767 * 768 */ 769 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 770 { 771 map_word oldd, curd; 772 773 oldd = map_read(map, addr); 774 curd = map_read(map, addr); 775 776 return map_word_equal(map, oldd, curd) && 777 map_word_equal(map, curd, expected); 778 } 779 780 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) 781 { 782 DECLARE_WAITQUEUE(wait, current); 783 struct cfi_private *cfi = map->fldrv_priv; 784 unsigned long timeo; 785 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; 786 787 resettime: 788 timeo = jiffies + HZ; 789 retry: 790 switch (chip->state) { 791 792 case FL_STATUS: 793 for (;;) { 794 if (chip_ready(map, adr)) 795 break; 796 797 if (time_after(jiffies, timeo)) { 798 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 799 return -EIO; 800 } 801 mutex_unlock(&chip->mutex); 802 cfi_udelay(1); 803 mutex_lock(&chip->mutex); 804 /* Someone else might have been playing with it. */ 805 goto retry; 806 } 807 808 case FL_READY: 809 case FL_CFI_QUERY: 810 case FL_JEDEC_QUERY: 811 return 0; 812 813 case FL_ERASING: 814 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || 815 !(mode == FL_READY || mode == FL_POINT || 816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 817 goto sleep; 818 819 /* We could check to see if we're trying to access the sector 820 * that is currently being erased. However, no user will try 821 * anything like that so we just wait for the timeout. */ 822 823 /* Erase suspend */ 824 /* It's harmless to issue the Erase-Suspend and Erase-Resume 825 * commands when the erase algorithm isn't in progress. */ 826 map_write(map, CMD(0xB0), chip->in_progress_block_addr); 827 chip->oldstate = FL_ERASING; 828 chip->state = FL_ERASE_SUSPENDING; 829 chip->erase_suspended = 1; 830 for (;;) { 831 if (chip_ready(map, adr)) 832 break; 833 834 if (time_after(jiffies, timeo)) { 835 /* Should have suspended the erase by now. 836 * Send an Erase-Resume command as either 837 * there was an error (so leave the erase 838 * routine to recover from it) or we trying to 839 * use the erase-in-progress sector. */ 840 put_chip(map, chip, adr); 841 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); 842 return -EIO; 843 } 844 845 mutex_unlock(&chip->mutex); 846 cfi_udelay(1); 847 mutex_lock(&chip->mutex); 848 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. 849 So we can just loop here. */ 850 } 851 chip->state = FL_READY; 852 return 0; 853 854 case FL_XIP_WHILE_ERASING: 855 if (mode != FL_READY && mode != FL_POINT && 856 (!cfip || !(cfip->EraseSuspend&2))) 857 goto sleep; 858 chip->oldstate = chip->state; 859 chip->state = FL_READY; 860 return 0; 861 862 case FL_SHUTDOWN: 863 /* The machine is rebooting */ 864 return -EIO; 865 866 case FL_POINT: 867 /* Only if there's no operation suspended... */ 868 if (mode == FL_READY && chip->oldstate == FL_READY) 869 return 0; 870 871 default: 872 sleep: 873 set_current_state(TASK_UNINTERRUPTIBLE); 874 add_wait_queue(&chip->wq, &wait); 875 mutex_unlock(&chip->mutex); 876 schedule(); 877 remove_wait_queue(&chip->wq, &wait); 878 mutex_lock(&chip->mutex); 879 goto resettime; 880 } 881 } 882 883 884 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) 885 { 886 struct cfi_private *cfi = map->fldrv_priv; 887 888 switch(chip->oldstate) { 889 case FL_ERASING: 890 cfi_fixup_m29ew_erase_suspend(map, 891 chip->in_progress_block_addr); 892 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); 893 cfi_fixup_m29ew_delay_after_resume(cfi); 894 chip->oldstate = FL_READY; 895 chip->state = FL_ERASING; 896 break; 897 898 case FL_XIP_WHILE_ERASING: 899 chip->state = chip->oldstate; 900 chip->oldstate = FL_READY; 901 break; 902 903 case FL_READY: 904 case FL_STATUS: 905 break; 906 default: 907 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); 908 } 909 wake_up(&chip->wq); 910 } 911 912 #ifdef CONFIG_MTD_XIP 913 914 /* 915 * No interrupt what so ever can be serviced while the flash isn't in array 916 * mode. This is ensured by the xip_disable() and xip_enable() functions 917 * enclosing any code path where the flash is known not to be in array mode. 918 * And within a XIP disabled code path, only functions marked with __xipram 919 * may be called and nothing else (it's a good thing to inspect generated 920 * assembly to make sure inline functions were actually inlined and that gcc 921 * didn't emit calls to its own support functions). Also configuring MTD CFI 922 * support to a single buswidth and a single interleave is also recommended. 923 */ 924 925 static void xip_disable(struct map_info *map, struct flchip *chip, 926 unsigned long adr) 927 { 928 /* TODO: chips with no XIP use should ignore and return */ 929 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 930 local_irq_disable(); 931 } 932 933 static void __xipram xip_enable(struct map_info *map, struct flchip *chip, 934 unsigned long adr) 935 { 936 struct cfi_private *cfi = map->fldrv_priv; 937 938 if (chip->state != FL_POINT && chip->state != FL_READY) { 939 map_write(map, CMD(0xf0), adr); 940 chip->state = FL_READY; 941 } 942 (void) map_read(map, adr); 943 xip_iprefetch(); 944 local_irq_enable(); 945 } 946 947 /* 948 * When a delay is required for the flash operation to complete, the 949 * xip_udelay() function is polling for both the given timeout and pending 950 * (but still masked) hardware interrupts. Whenever there is an interrupt 951 * pending then the flash erase operation is suspended, array mode restored 952 * and interrupts unmasked. Task scheduling might also happen at that 953 * point. The CPU eventually returns from the interrupt or the call to 954 * schedule() and the suspended flash operation is resumed for the remaining 955 * of the delay period. 956 * 957 * Warning: this function _will_ fool interrupt latency tracing tools. 958 */ 959 960 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 961 unsigned long adr, int usec) 962 { 963 struct cfi_private *cfi = map->fldrv_priv; 964 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 965 map_word status, OK = CMD(0x80); 966 unsigned long suspended, start = xip_currtime(); 967 flstate_t oldstate; 968 969 do { 970 cpu_relax(); 971 if (xip_irqpending() && extp && 972 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && 973 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { 974 /* 975 * Let's suspend the erase operation when supported. 976 * Note that we currently don't try to suspend 977 * interleaved chips if there is already another 978 * operation suspended (imagine what happens 979 * when one chip was already done with the current 980 * operation while another chip suspended it, then 981 * we resume the whole thing at once). Yes, it 982 * can happen! 983 */ 984 map_write(map, CMD(0xb0), adr); 985 usec -= xip_elapsed_since(start); 986 suspended = xip_currtime(); 987 do { 988 if (xip_elapsed_since(suspended) > 100000) { 989 /* 990 * The chip doesn't want to suspend 991 * after waiting for 100 msecs. 992 * This is a critical error but there 993 * is not much we can do here. 994 */ 995 return; 996 } 997 status = map_read(map, adr); 998 } while (!map_word_andequal(map, status, OK, OK)); 999 1000 /* Suspend succeeded */ 1001 oldstate = chip->state; 1002 if (!map_word_bitsset(map, status, CMD(0x40))) 1003 break; 1004 chip->state = FL_XIP_WHILE_ERASING; 1005 chip->erase_suspended = 1; 1006 map_write(map, CMD(0xf0), adr); 1007 (void) map_read(map, adr); 1008 xip_iprefetch(); 1009 local_irq_enable(); 1010 mutex_unlock(&chip->mutex); 1011 xip_iprefetch(); 1012 cond_resched(); 1013 1014 /* 1015 * We're back. However someone else might have 1016 * decided to go write to the chip if we are in 1017 * a suspended erase state. If so let's wait 1018 * until it's done. 1019 */ 1020 mutex_lock(&chip->mutex); 1021 while (chip->state != FL_XIP_WHILE_ERASING) { 1022 DECLARE_WAITQUEUE(wait, current); 1023 set_current_state(TASK_UNINTERRUPTIBLE); 1024 add_wait_queue(&chip->wq, &wait); 1025 mutex_unlock(&chip->mutex); 1026 schedule(); 1027 remove_wait_queue(&chip->wq, &wait); 1028 mutex_lock(&chip->mutex); 1029 } 1030 /* Disallow XIP again */ 1031 local_irq_disable(); 1032 1033 /* Correct Erase Suspend Hangups for M29EW */ 1034 cfi_fixup_m29ew_erase_suspend(map, adr); 1035 /* Resume the write or erase operation */ 1036 map_write(map, cfi->sector_erase_cmd, adr); 1037 chip->state = oldstate; 1038 start = xip_currtime(); 1039 } else if (usec >= 1000000/HZ) { 1040 /* 1041 * Try to save on CPU power when waiting delay 1042 * is at least a system timer tick period. 1043 * No need to be extremely accurate here. 1044 */ 1045 xip_cpu_idle(); 1046 } 1047 status = map_read(map, adr); 1048 } while (!map_word_andequal(map, status, OK, OK) 1049 && xip_elapsed_since(start) < usec); 1050 } 1051 1052 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1053 1054 /* 1055 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1056 * the flash is actively programming or erasing since we have to poll for 1057 * the operation to complete anyway. We can't do that in a generic way with 1058 * a XIP setup so do it before the actual flash operation in this case 1059 * and stub it out from INVALIDATE_CACHE_UDELAY. 1060 */ 1061 #define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1062 INVALIDATE_CACHED_RANGE(map, from, size) 1063 1064 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1065 UDELAY(map, chip, adr, usec) 1066 1067 /* 1068 * Extra notes: 1069 * 1070 * Activating this XIP support changes the way the code works a bit. For 1071 * example the code to suspend the current process when concurrent access 1072 * happens is never executed because xip_udelay() will always return with the 1073 * same chip state as it was entered with. This is why there is no care for 1074 * the presence of add_wait_queue() or schedule() calls from within a couple 1075 * xip_disable()'d areas of code, like in do_erase_oneblock for example. 1076 * The queueing and scheduling are always happening within xip_udelay(). 1077 * 1078 * Similarly, get_chip() and put_chip() just happen to always be executed 1079 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state 1080 * is in array mode, therefore never executing many cases therein and not 1081 * causing any problem with XIP. 1082 */ 1083 1084 #else 1085 1086 #define xip_disable(map, chip, adr) 1087 #define xip_enable(map, chip, adr) 1088 #define XIP_INVAL_CACHED_RANGE(x...) 1089 1090 #define UDELAY(map, chip, adr, usec) \ 1091 do { \ 1092 mutex_unlock(&chip->mutex); \ 1093 cfi_udelay(usec); \ 1094 mutex_lock(&chip->mutex); \ 1095 } while (0) 1096 1097 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ 1098 do { \ 1099 mutex_unlock(&chip->mutex); \ 1100 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1101 cfi_udelay(usec); \ 1102 mutex_lock(&chip->mutex); \ 1103 } while (0) 1104 1105 #endif 1106 1107 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) 1108 { 1109 unsigned long cmd_addr; 1110 struct cfi_private *cfi = map->fldrv_priv; 1111 int ret; 1112 1113 adr += chip->start; 1114 1115 /* Ensure cmd read/writes are aligned. */ 1116 cmd_addr = adr & ~(map_bankwidth(map)-1); 1117 1118 mutex_lock(&chip->mutex); 1119 ret = get_chip(map, chip, cmd_addr, FL_READY); 1120 if (ret) { 1121 mutex_unlock(&chip->mutex); 1122 return ret; 1123 } 1124 1125 if (chip->state != FL_POINT && chip->state != FL_READY) { 1126 map_write(map, CMD(0xf0), cmd_addr); 1127 chip->state = FL_READY; 1128 } 1129 1130 map_copy_from(map, buf, adr, len); 1131 1132 put_chip(map, chip, cmd_addr); 1133 1134 mutex_unlock(&chip->mutex); 1135 return 0; 1136 } 1137 1138 1139 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1140 { 1141 struct map_info *map = mtd->priv; 1142 struct cfi_private *cfi = map->fldrv_priv; 1143 unsigned long ofs; 1144 int chipnum; 1145 int ret = 0; 1146 1147 /* ofs: offset within the first chip that the first read should start */ 1148 chipnum = (from >> cfi->chipshift); 1149 ofs = from - (chipnum << cfi->chipshift); 1150 1151 while (len) { 1152 unsigned long thislen; 1153 1154 if (chipnum >= cfi->numchips) 1155 break; 1156 1157 if ((len + ofs -1) >> cfi->chipshift) 1158 thislen = (1<<cfi->chipshift) - ofs; 1159 else 1160 thislen = len; 1161 1162 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); 1163 if (ret) 1164 break; 1165 1166 *retlen += thislen; 1167 len -= thislen; 1168 buf += thislen; 1169 1170 ofs = 0; 1171 chipnum++; 1172 } 1173 return ret; 1174 } 1175 1176 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 1177 loff_t adr, size_t len, u_char *buf, size_t grouplen); 1178 1179 static inline void otp_enter(struct map_info *map, struct flchip *chip, 1180 loff_t adr, size_t len) 1181 { 1182 struct cfi_private *cfi = map->fldrv_priv; 1183 1184 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1185 cfi->device_type, NULL); 1186 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1187 cfi->device_type, NULL); 1188 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, 1189 cfi->device_type, NULL); 1190 1191 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1192 } 1193 1194 static inline void otp_exit(struct map_info *map, struct flchip *chip, 1195 loff_t adr, size_t len) 1196 { 1197 struct cfi_private *cfi = map->fldrv_priv; 1198 1199 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1200 cfi->device_type, NULL); 1201 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1202 cfi->device_type, NULL); 1203 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, 1204 cfi->device_type, NULL); 1205 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, 1206 cfi->device_type, NULL); 1207 1208 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); 1209 } 1210 1211 static inline int do_read_secsi_onechip(struct map_info *map, 1212 struct flchip *chip, loff_t adr, 1213 size_t len, u_char *buf, 1214 size_t grouplen) 1215 { 1216 DECLARE_WAITQUEUE(wait, current); 1217 unsigned long timeo = jiffies + HZ; 1218 1219 retry: 1220 mutex_lock(&chip->mutex); 1221 1222 if (chip->state != FL_READY){ 1223 set_current_state(TASK_UNINTERRUPTIBLE); 1224 add_wait_queue(&chip->wq, &wait); 1225 1226 mutex_unlock(&chip->mutex); 1227 1228 schedule(); 1229 remove_wait_queue(&chip->wq, &wait); 1230 timeo = jiffies + HZ; 1231 1232 goto retry; 1233 } 1234 1235 adr += chip->start; 1236 1237 chip->state = FL_READY; 1238 1239 otp_enter(map, chip, adr, len); 1240 map_copy_from(map, buf, adr, len); 1241 otp_exit(map, chip, adr, len); 1242 1243 wake_up(&chip->wq); 1244 mutex_unlock(&chip->mutex); 1245 1246 return 0; 1247 } 1248 1249 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 1250 { 1251 struct map_info *map = mtd->priv; 1252 struct cfi_private *cfi = map->fldrv_priv; 1253 unsigned long ofs; 1254 int chipnum; 1255 int ret = 0; 1256 1257 /* ofs: offset within the first chip that the first read should start */ 1258 /* 8 secsi bytes per chip */ 1259 chipnum=from>>3; 1260 ofs=from & 7; 1261 1262 while (len) { 1263 unsigned long thislen; 1264 1265 if (chipnum >= cfi->numchips) 1266 break; 1267 1268 if ((len + ofs -1) >> 3) 1269 thislen = (1<<3) - ofs; 1270 else 1271 thislen = len; 1272 1273 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, 1274 thislen, buf, 0); 1275 if (ret) 1276 break; 1277 1278 *retlen += thislen; 1279 len -= thislen; 1280 buf += thislen; 1281 1282 ofs = 0; 1283 chipnum++; 1284 } 1285 return ret; 1286 } 1287 1288 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1289 unsigned long adr, map_word datum, 1290 int mode); 1291 1292 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, 1293 size_t len, u_char *buf, size_t grouplen) 1294 { 1295 int ret; 1296 while (len) { 1297 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); 1298 int gap = adr - bus_ofs; 1299 int n = min_t(int, len, map_bankwidth(map) - gap); 1300 map_word datum = map_word_ff(map); 1301 1302 if (n != map_bankwidth(map)) { 1303 /* partial write of a word, load old contents */ 1304 otp_enter(map, chip, bus_ofs, map_bankwidth(map)); 1305 datum = map_read(map, bus_ofs); 1306 otp_exit(map, chip, bus_ofs, map_bankwidth(map)); 1307 } 1308 1309 datum = map_word_load_partial(map, datum, buf, gap, n); 1310 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); 1311 if (ret) 1312 return ret; 1313 1314 adr += n; 1315 buf += n; 1316 len -= n; 1317 } 1318 1319 return 0; 1320 } 1321 1322 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, 1323 size_t len, u_char *buf, size_t grouplen) 1324 { 1325 struct cfi_private *cfi = map->fldrv_priv; 1326 uint8_t lockreg; 1327 unsigned long timeo; 1328 int ret; 1329 1330 /* make sure area matches group boundaries */ 1331 if ((adr != 0) || (len != grouplen)) 1332 return -EINVAL; 1333 1334 mutex_lock(&chip->mutex); 1335 ret = get_chip(map, chip, chip->start, FL_LOCKING); 1336 if (ret) { 1337 mutex_unlock(&chip->mutex); 1338 return ret; 1339 } 1340 chip->state = FL_LOCKING; 1341 1342 /* Enter lock register command */ 1343 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1344 cfi->device_type, NULL); 1345 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1346 cfi->device_type, NULL); 1347 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, 1348 cfi->device_type, NULL); 1349 1350 /* read lock register */ 1351 lockreg = cfi_read_query(map, 0); 1352 1353 /* set bit 0 to protect extended memory block */ 1354 lockreg &= ~0x01; 1355 1356 /* set bit 0 to protect extended memory block */ 1357 /* write lock register */ 1358 map_write(map, CMD(0xA0), chip->start); 1359 map_write(map, CMD(lockreg), chip->start); 1360 1361 /* wait for chip to become ready */ 1362 timeo = jiffies + msecs_to_jiffies(2); 1363 for (;;) { 1364 if (chip_ready(map, adr)) 1365 break; 1366 1367 if (time_after(jiffies, timeo)) { 1368 pr_err("Waiting for chip to be ready timed out.\n"); 1369 ret = -EIO; 1370 break; 1371 } 1372 UDELAY(map, chip, 0, 1); 1373 } 1374 1375 /* exit protection commands */ 1376 map_write(map, CMD(0x90), chip->start); 1377 map_write(map, CMD(0x00), chip->start); 1378 1379 chip->state = FL_READY; 1380 put_chip(map, chip, chip->start); 1381 mutex_unlock(&chip->mutex); 1382 1383 return ret; 1384 } 1385 1386 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, 1387 size_t *retlen, u_char *buf, 1388 otp_op_t action, int user_regs) 1389 { 1390 struct map_info *map = mtd->priv; 1391 struct cfi_private *cfi = map->fldrv_priv; 1392 int ofs_factor = cfi->interleave * cfi->device_type; 1393 unsigned long base; 1394 int chipnum; 1395 struct flchip *chip; 1396 uint8_t otp, lockreg; 1397 int ret; 1398 1399 size_t user_size, factory_size, otpsize; 1400 loff_t user_offset, factory_offset, otpoffset; 1401 int user_locked = 0, otplocked; 1402 1403 *retlen = 0; 1404 1405 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { 1406 chip = &cfi->chips[chipnum]; 1407 factory_size = 0; 1408 user_size = 0; 1409 1410 /* Micron M29EW family */ 1411 if (is_m29ew(cfi)) { 1412 base = chip->start; 1413 1414 /* check whether secsi area is factory locked 1415 or user lockable */ 1416 mutex_lock(&chip->mutex); 1417 ret = get_chip(map, chip, base, FL_CFI_QUERY); 1418 if (ret) { 1419 mutex_unlock(&chip->mutex); 1420 return ret; 1421 } 1422 cfi_qry_mode_on(base, map, cfi); 1423 otp = cfi_read_query(map, base + 0x3 * ofs_factor); 1424 cfi_qry_mode_off(base, map, cfi); 1425 put_chip(map, chip, base); 1426 mutex_unlock(&chip->mutex); 1427 1428 if (otp & 0x80) { 1429 /* factory locked */ 1430 factory_offset = 0; 1431 factory_size = 0x100; 1432 } else { 1433 /* customer lockable */ 1434 user_offset = 0; 1435 user_size = 0x100; 1436 1437 mutex_lock(&chip->mutex); 1438 ret = get_chip(map, chip, base, FL_LOCKING); 1439 if (ret) { 1440 mutex_unlock(&chip->mutex); 1441 return ret; 1442 } 1443 1444 /* Enter lock register command */ 1445 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, 1446 chip->start, map, cfi, 1447 cfi->device_type, NULL); 1448 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, 1449 chip->start, map, cfi, 1450 cfi->device_type, NULL); 1451 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, 1452 chip->start, map, cfi, 1453 cfi->device_type, NULL); 1454 /* read lock register */ 1455 lockreg = cfi_read_query(map, 0); 1456 /* exit protection commands */ 1457 map_write(map, CMD(0x90), chip->start); 1458 map_write(map, CMD(0x00), chip->start); 1459 put_chip(map, chip, chip->start); 1460 mutex_unlock(&chip->mutex); 1461 1462 user_locked = ((lockreg & 0x01) == 0x00); 1463 } 1464 } 1465 1466 otpsize = user_regs ? user_size : factory_size; 1467 if (!otpsize) 1468 continue; 1469 otpoffset = user_regs ? user_offset : factory_offset; 1470 otplocked = user_regs ? user_locked : 1; 1471 1472 if (!action) { 1473 /* return otpinfo */ 1474 struct otp_info *otpinfo; 1475 len -= sizeof(*otpinfo); 1476 if (len <= 0) 1477 return -ENOSPC; 1478 otpinfo = (struct otp_info *)buf; 1479 otpinfo->start = from; 1480 otpinfo->length = otpsize; 1481 otpinfo->locked = otplocked; 1482 buf += sizeof(*otpinfo); 1483 *retlen += sizeof(*otpinfo); 1484 from += otpsize; 1485 } else if ((from < otpsize) && (len > 0)) { 1486 size_t size; 1487 size = (len < otpsize - from) ? len : otpsize - from; 1488 ret = action(map, chip, otpoffset + from, size, buf, 1489 otpsize); 1490 if (ret < 0) 1491 return ret; 1492 1493 buf += size; 1494 len -= size; 1495 *retlen += size; 1496 from = 0; 1497 } else { 1498 from -= otpsize; 1499 } 1500 } 1501 return 0; 1502 } 1503 1504 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len, 1505 size_t *retlen, struct otp_info *buf) 1506 { 1507 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1508 NULL, 0); 1509 } 1510 1511 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len, 1512 size_t *retlen, struct otp_info *buf) 1513 { 1514 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf, 1515 NULL, 1); 1516 } 1517 1518 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, 1519 size_t len, size_t *retlen, 1520 u_char *buf) 1521 { 1522 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1523 buf, do_read_secsi_onechip, 0); 1524 } 1525 1526 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, 1527 size_t len, size_t *retlen, 1528 u_char *buf) 1529 { 1530 return cfi_amdstd_otp_walk(mtd, from, len, retlen, 1531 buf, do_read_secsi_onechip, 1); 1532 } 1533 1534 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, 1535 size_t len, size_t *retlen, 1536 u_char *buf) 1537 { 1538 return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, 1539 do_otp_write, 1); 1540 } 1541 1542 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, 1543 size_t len) 1544 { 1545 size_t retlen; 1546 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, 1547 do_otp_lock, 1); 1548 } 1549 1550 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, 1551 unsigned long adr, map_word datum, 1552 int mode) 1553 { 1554 struct cfi_private *cfi = map->fldrv_priv; 1555 unsigned long timeo = jiffies + HZ; 1556 /* 1557 * We use a 1ms + 1 jiffies generic timeout for writes (most devices 1558 * have a max write time of a few hundreds usec). However, we should 1559 * use the maximum timeout value given by the chip at probe time 1560 * instead. Unfortunately, struct flchip does have a field for 1561 * maximum timeout, only for typical which can be far too short 1562 * depending of the conditions. The ' + 1' is to avoid having a 1563 * timeout of 0 jiffies if HZ is smaller than 1000. 1564 */ 1565 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1; 1566 int ret = 0; 1567 map_word oldd; 1568 int retry_cnt = 0; 1569 1570 adr += chip->start; 1571 1572 mutex_lock(&chip->mutex); 1573 ret = get_chip(map, chip, adr, mode); 1574 if (ret) { 1575 mutex_unlock(&chip->mutex); 1576 return ret; 1577 } 1578 1579 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1580 __func__, adr, datum.x[0] ); 1581 1582 if (mode == FL_OTP_WRITE) 1583 otp_enter(map, chip, adr, map_bankwidth(map)); 1584 1585 /* 1586 * Check for a NOP for the case when the datum to write is already 1587 * present - it saves time and works around buggy chips that corrupt 1588 * data at other locations when 0xff is written to a location that 1589 * already contains 0xff. 1590 */ 1591 oldd = map_read(map, adr); 1592 if (map_word_equal(map, oldd, datum)) { 1593 pr_debug("MTD %s(): NOP\n", 1594 __func__); 1595 goto op_done; 1596 } 1597 1598 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); 1599 ENABLE_VPP(map); 1600 xip_disable(map, chip, adr); 1601 1602 retry: 1603 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1604 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1605 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1606 map_write(map, datum, adr); 1607 chip->state = mode; 1608 1609 INVALIDATE_CACHE_UDELAY(map, chip, 1610 adr, map_bankwidth(map), 1611 chip->word_write_time); 1612 1613 /* See comment above for timeout value. */ 1614 timeo = jiffies + uWriteTimeout; 1615 for (;;) { 1616 if (chip->state != mode) { 1617 /* Someone's suspended the write. Sleep */ 1618 DECLARE_WAITQUEUE(wait, current); 1619 1620 set_current_state(TASK_UNINTERRUPTIBLE); 1621 add_wait_queue(&chip->wq, &wait); 1622 mutex_unlock(&chip->mutex); 1623 schedule(); 1624 remove_wait_queue(&chip->wq, &wait); 1625 timeo = jiffies + (HZ / 2); /* FIXME */ 1626 mutex_lock(&chip->mutex); 1627 continue; 1628 } 1629 1630 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1631 xip_enable(map, chip, adr); 1632 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1633 xip_disable(map, chip, adr); 1634 break; 1635 } 1636 1637 if (chip_ready(map, adr)) 1638 break; 1639 1640 /* Latency issues. Drop the lock, wait a while and retry */ 1641 UDELAY(map, chip, adr, 1); 1642 } 1643 /* Did we succeed? */ 1644 if (!chip_good(map, adr, datum)) { 1645 /* reset on all failures. */ 1646 map_write( map, CMD(0xF0), chip->start ); 1647 /* FIXME - should have reset delay before continuing */ 1648 1649 if (++retry_cnt <= MAX_WORD_RETRIES) 1650 goto retry; 1651 1652 ret = -EIO; 1653 } 1654 xip_enable(map, chip, adr); 1655 op_done: 1656 if (mode == FL_OTP_WRITE) 1657 otp_exit(map, chip, adr, map_bankwidth(map)); 1658 chip->state = FL_READY; 1659 DISABLE_VPP(map); 1660 put_chip(map, chip, adr); 1661 mutex_unlock(&chip->mutex); 1662 1663 return ret; 1664 } 1665 1666 1667 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, 1668 size_t *retlen, const u_char *buf) 1669 { 1670 struct map_info *map = mtd->priv; 1671 struct cfi_private *cfi = map->fldrv_priv; 1672 int ret = 0; 1673 int chipnum; 1674 unsigned long ofs, chipstart; 1675 DECLARE_WAITQUEUE(wait, current); 1676 1677 chipnum = to >> cfi->chipshift; 1678 ofs = to - (chipnum << cfi->chipshift); 1679 chipstart = cfi->chips[chipnum].start; 1680 1681 /* If it's not bus-aligned, do the first byte write */ 1682 if (ofs & (map_bankwidth(map)-1)) { 1683 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); 1684 int i = ofs - bus_ofs; 1685 int n = 0; 1686 map_word tmp_buf; 1687 1688 retry: 1689 mutex_lock(&cfi->chips[chipnum].mutex); 1690 1691 if (cfi->chips[chipnum].state != FL_READY) { 1692 set_current_state(TASK_UNINTERRUPTIBLE); 1693 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1694 1695 mutex_unlock(&cfi->chips[chipnum].mutex); 1696 1697 schedule(); 1698 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1699 goto retry; 1700 } 1701 1702 /* Load 'tmp_buf' with old contents of flash */ 1703 tmp_buf = map_read(map, bus_ofs+chipstart); 1704 1705 mutex_unlock(&cfi->chips[chipnum].mutex); 1706 1707 /* Number of bytes to copy from buffer */ 1708 n = min_t(int, len, map_bankwidth(map)-i); 1709 1710 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 1711 1712 ret = do_write_oneword(map, &cfi->chips[chipnum], 1713 bus_ofs, tmp_buf, FL_WRITING); 1714 if (ret) 1715 return ret; 1716 1717 ofs += n; 1718 buf += n; 1719 (*retlen) += n; 1720 len -= n; 1721 1722 if (ofs >> cfi->chipshift) { 1723 chipnum ++; 1724 ofs = 0; 1725 if (chipnum == cfi->numchips) 1726 return 0; 1727 } 1728 } 1729 1730 /* We are now aligned, write as much as possible */ 1731 while(len >= map_bankwidth(map)) { 1732 map_word datum; 1733 1734 datum = map_word_load(map, buf); 1735 1736 ret = do_write_oneword(map, &cfi->chips[chipnum], 1737 ofs, datum, FL_WRITING); 1738 if (ret) 1739 return ret; 1740 1741 ofs += map_bankwidth(map); 1742 buf += map_bankwidth(map); 1743 (*retlen) += map_bankwidth(map); 1744 len -= map_bankwidth(map); 1745 1746 if (ofs >> cfi->chipshift) { 1747 chipnum ++; 1748 ofs = 0; 1749 if (chipnum == cfi->numchips) 1750 return 0; 1751 chipstart = cfi->chips[chipnum].start; 1752 } 1753 } 1754 1755 /* Write the trailing bytes if any */ 1756 if (len & (map_bankwidth(map)-1)) { 1757 map_word tmp_buf; 1758 1759 retry1: 1760 mutex_lock(&cfi->chips[chipnum].mutex); 1761 1762 if (cfi->chips[chipnum].state != FL_READY) { 1763 set_current_state(TASK_UNINTERRUPTIBLE); 1764 add_wait_queue(&cfi->chips[chipnum].wq, &wait); 1765 1766 mutex_unlock(&cfi->chips[chipnum].mutex); 1767 1768 schedule(); 1769 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); 1770 goto retry1; 1771 } 1772 1773 tmp_buf = map_read(map, ofs + chipstart); 1774 1775 mutex_unlock(&cfi->chips[chipnum].mutex); 1776 1777 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 1778 1779 ret = do_write_oneword(map, &cfi->chips[chipnum], 1780 ofs, tmp_buf, FL_WRITING); 1781 if (ret) 1782 return ret; 1783 1784 (*retlen) += len; 1785 } 1786 1787 return 0; 1788 } 1789 1790 1791 /* 1792 * FIXME: interleaved mode not tested, and probably not supported! 1793 */ 1794 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 1795 unsigned long adr, const u_char *buf, 1796 int len) 1797 { 1798 struct cfi_private *cfi = map->fldrv_priv; 1799 unsigned long timeo = jiffies + HZ; 1800 /* 1801 * Timeout is calculated according to CFI data, if available. 1802 * See more comments in cfi_cmdset_0002(). 1803 */ 1804 unsigned long uWriteTimeout = 1805 usecs_to_jiffies(chip->buffer_write_time_max); 1806 int ret = -EIO; 1807 unsigned long cmd_adr; 1808 int z, words; 1809 map_word datum; 1810 1811 adr += chip->start; 1812 cmd_adr = adr; 1813 1814 mutex_lock(&chip->mutex); 1815 ret = get_chip(map, chip, adr, FL_WRITING); 1816 if (ret) { 1817 mutex_unlock(&chip->mutex); 1818 return ret; 1819 } 1820 1821 datum = map_word_load(map, buf); 1822 1823 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", 1824 __func__, adr, datum.x[0] ); 1825 1826 XIP_INVAL_CACHED_RANGE(map, adr, len); 1827 ENABLE_VPP(map); 1828 xip_disable(map, chip, cmd_adr); 1829 1830 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 1831 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 1832 1833 /* Write Buffer Load */ 1834 map_write(map, CMD(0x25), cmd_adr); 1835 1836 chip->state = FL_WRITING_TO_BUFFER; 1837 1838 /* Write length of data to come */ 1839 words = len / map_bankwidth(map); 1840 map_write(map, CMD(words - 1), cmd_adr); 1841 /* Write data */ 1842 z = 0; 1843 while(z < words * map_bankwidth(map)) { 1844 datum = map_word_load(map, buf); 1845 map_write(map, datum, adr + z); 1846 1847 z += map_bankwidth(map); 1848 buf += map_bankwidth(map); 1849 } 1850 z -= map_bankwidth(map); 1851 1852 adr += z; 1853 1854 /* Write Buffer Program Confirm: GO GO GO */ 1855 map_write(map, CMD(0x29), cmd_adr); 1856 chip->state = FL_WRITING; 1857 1858 INVALIDATE_CACHE_UDELAY(map, chip, 1859 adr, map_bankwidth(map), 1860 chip->word_write_time); 1861 1862 timeo = jiffies + uWriteTimeout; 1863 1864 for (;;) { 1865 if (chip->state != FL_WRITING) { 1866 /* Someone's suspended the write. Sleep */ 1867 DECLARE_WAITQUEUE(wait, current); 1868 1869 set_current_state(TASK_UNINTERRUPTIBLE); 1870 add_wait_queue(&chip->wq, &wait); 1871 mutex_unlock(&chip->mutex); 1872 schedule(); 1873 remove_wait_queue(&chip->wq, &wait); 1874 timeo = jiffies + (HZ / 2); /* FIXME */ 1875 mutex_lock(&chip->mutex); 1876 continue; 1877 } 1878 1879 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1880 break; 1881 1882 if (chip_ready(map, adr)) { 1883 xip_enable(map, chip, adr); 1884 goto op_done; 1885 } 1886 1887 /* Latency issues. Drop the lock, wait a while and retry */ 1888 UDELAY(map, chip, adr, 1); 1889 } 1890 1891 /* 1892 * Recovery from write-buffer programming failures requires 1893 * the write-to-buffer-reset sequence. Since the last part 1894 * of the sequence also works as a normal reset, we can run 1895 * the same commands regardless of why we are here. 1896 * See e.g. 1897 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1898 */ 1899 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1900 cfi->device_type, NULL); 1901 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1902 cfi->device_type, NULL); 1903 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, 1904 cfi->device_type, NULL); 1905 xip_enable(map, chip, adr); 1906 /* FIXME - should have reset delay before continuing */ 1907 1908 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n", 1909 __func__, adr); 1910 1911 ret = -EIO; 1912 op_done: 1913 chip->state = FL_READY; 1914 DISABLE_VPP(map); 1915 put_chip(map, chip, adr); 1916 mutex_unlock(&chip->mutex); 1917 1918 return ret; 1919 } 1920 1921 1922 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, 1923 size_t *retlen, const u_char *buf) 1924 { 1925 struct map_info *map = mtd->priv; 1926 struct cfi_private *cfi = map->fldrv_priv; 1927 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; 1928 int ret = 0; 1929 int chipnum; 1930 unsigned long ofs; 1931 1932 chipnum = to >> cfi->chipshift; 1933 ofs = to - (chipnum << cfi->chipshift); 1934 1935 /* If it's not bus-aligned, do the first word write */ 1936 if (ofs & (map_bankwidth(map)-1)) { 1937 size_t local_len = (-ofs)&(map_bankwidth(map)-1); 1938 if (local_len > len) 1939 local_len = len; 1940 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1941 local_len, retlen, buf); 1942 if (ret) 1943 return ret; 1944 ofs += local_len; 1945 buf += local_len; 1946 len -= local_len; 1947 1948 if (ofs >> cfi->chipshift) { 1949 chipnum ++; 1950 ofs = 0; 1951 if (chipnum == cfi->numchips) 1952 return 0; 1953 } 1954 } 1955 1956 /* Write buffer is worth it only if more than one word to write... */ 1957 while (len >= map_bankwidth(map) * 2) { 1958 /* We must not cross write block boundaries */ 1959 int size = wbufsize - (ofs & (wbufsize-1)); 1960 1961 if (size > len) 1962 size = len; 1963 if (size % map_bankwidth(map)) 1964 size -= size % map_bankwidth(map); 1965 1966 ret = do_write_buffer(map, &cfi->chips[chipnum], 1967 ofs, buf, size); 1968 if (ret) 1969 return ret; 1970 1971 ofs += size; 1972 buf += size; 1973 (*retlen) += size; 1974 len -= size; 1975 1976 if (ofs >> cfi->chipshift) { 1977 chipnum ++; 1978 ofs = 0; 1979 if (chipnum == cfi->numchips) 1980 return 0; 1981 } 1982 } 1983 1984 if (len) { 1985 size_t retlen_dregs = 0; 1986 1987 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), 1988 len, &retlen_dregs, buf); 1989 1990 *retlen += retlen_dregs; 1991 return ret; 1992 } 1993 1994 return 0; 1995 } 1996 1997 /* 1998 * Wait for the flash chip to become ready to write data 1999 * 2000 * This is only called during the panic_write() path. When panic_write() 2001 * is called, the kernel is in the process of a panic, and will soon be 2002 * dead. Therefore we don't take any locks, and attempt to get access 2003 * to the chip as soon as possible. 2004 */ 2005 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, 2006 unsigned long adr) 2007 { 2008 struct cfi_private *cfi = map->fldrv_priv; 2009 int retries = 10; 2010 int i; 2011 2012 /* 2013 * If the driver thinks the chip is idle, and no toggle bits 2014 * are changing, then the chip is actually idle for sure. 2015 */ 2016 if (chip->state == FL_READY && chip_ready(map, adr)) 2017 return 0; 2018 2019 /* 2020 * Try several times to reset the chip and then wait for it 2021 * to become idle. The upper limit of a few milliseconds of 2022 * delay isn't a big problem: the kernel is dying anyway. It 2023 * is more important to save the messages. 2024 */ 2025 while (retries > 0) { 2026 const unsigned long timeo = (HZ / 1000) + 1; 2027 2028 /* send the reset command */ 2029 map_write(map, CMD(0xF0), chip->start); 2030 2031 /* wait for the chip to become ready */ 2032 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2033 if (chip_ready(map, adr)) 2034 return 0; 2035 2036 udelay(1); 2037 } 2038 2039 retries--; 2040 } 2041 2042 /* the chip never became ready */ 2043 return -EBUSY; 2044 } 2045 2046 /* 2047 * Write out one word of data to a single flash chip during a kernel panic 2048 * 2049 * This is only called during the panic_write() path. When panic_write() 2050 * is called, the kernel is in the process of a panic, and will soon be 2051 * dead. Therefore we don't take any locks, and attempt to get access 2052 * to the chip as soon as possible. 2053 * 2054 * The implementation of this routine is intentionally similar to 2055 * do_write_oneword(), in order to ease code maintenance. 2056 */ 2057 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, 2058 unsigned long adr, map_word datum) 2059 { 2060 const unsigned long uWriteTimeout = (HZ / 1000) + 1; 2061 struct cfi_private *cfi = map->fldrv_priv; 2062 int retry_cnt = 0; 2063 map_word oldd; 2064 int ret = 0; 2065 int i; 2066 2067 adr += chip->start; 2068 2069 ret = cfi_amdstd_panic_wait(map, chip, adr); 2070 if (ret) 2071 return ret; 2072 2073 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", 2074 __func__, adr, datum.x[0]); 2075 2076 /* 2077 * Check for a NOP for the case when the datum to write is already 2078 * present - it saves time and works around buggy chips that corrupt 2079 * data at other locations when 0xff is written to a location that 2080 * already contains 0xff. 2081 */ 2082 oldd = map_read(map, adr); 2083 if (map_word_equal(map, oldd, datum)) { 2084 pr_debug("MTD %s(): NOP\n", __func__); 2085 goto op_done; 2086 } 2087 2088 ENABLE_VPP(map); 2089 2090 retry: 2091 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2092 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2093 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2094 map_write(map, datum, adr); 2095 2096 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2097 if (chip_ready(map, adr)) 2098 break; 2099 2100 udelay(1); 2101 } 2102 2103 if (!chip_good(map, adr, datum)) { 2104 /* reset on all failures. */ 2105 map_write(map, CMD(0xF0), chip->start); 2106 /* FIXME - should have reset delay before continuing */ 2107 2108 if (++retry_cnt <= MAX_WORD_RETRIES) 2109 goto retry; 2110 2111 ret = -EIO; 2112 } 2113 2114 op_done: 2115 DISABLE_VPP(map); 2116 return ret; 2117 } 2118 2119 /* 2120 * Write out some data during a kernel panic 2121 * 2122 * This is used by the mtdoops driver to save the dying messages from a 2123 * kernel which has panic'd. 2124 * 2125 * This routine ignores all of the locking used throughout the rest of the 2126 * driver, in order to ensure that the data gets written out no matter what 2127 * state this driver (and the flash chip itself) was in when the kernel crashed. 2128 * 2129 * The implementation of this routine is intentionally similar to 2130 * cfi_amdstd_write_words(), in order to ease code maintenance. 2131 */ 2132 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, 2133 size_t *retlen, const u_char *buf) 2134 { 2135 struct map_info *map = mtd->priv; 2136 struct cfi_private *cfi = map->fldrv_priv; 2137 unsigned long ofs, chipstart; 2138 int ret = 0; 2139 int chipnum; 2140 2141 chipnum = to >> cfi->chipshift; 2142 ofs = to - (chipnum << cfi->chipshift); 2143 chipstart = cfi->chips[chipnum].start; 2144 2145 /* If it's not bus aligned, do the first byte write */ 2146 if (ofs & (map_bankwidth(map) - 1)) { 2147 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); 2148 int i = ofs - bus_ofs; 2149 int n = 0; 2150 map_word tmp_buf; 2151 2152 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); 2153 if (ret) 2154 return ret; 2155 2156 /* Load 'tmp_buf' with old contents of flash */ 2157 tmp_buf = map_read(map, bus_ofs + chipstart); 2158 2159 /* Number of bytes to copy from buffer */ 2160 n = min_t(int, len, map_bankwidth(map) - i); 2161 2162 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); 2163 2164 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2165 bus_ofs, tmp_buf); 2166 if (ret) 2167 return ret; 2168 2169 ofs += n; 2170 buf += n; 2171 (*retlen) += n; 2172 len -= n; 2173 2174 if (ofs >> cfi->chipshift) { 2175 chipnum++; 2176 ofs = 0; 2177 if (chipnum == cfi->numchips) 2178 return 0; 2179 } 2180 } 2181 2182 /* We are now aligned, write as much as possible */ 2183 while (len >= map_bankwidth(map)) { 2184 map_word datum; 2185 2186 datum = map_word_load(map, buf); 2187 2188 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2189 ofs, datum); 2190 if (ret) 2191 return ret; 2192 2193 ofs += map_bankwidth(map); 2194 buf += map_bankwidth(map); 2195 (*retlen) += map_bankwidth(map); 2196 len -= map_bankwidth(map); 2197 2198 if (ofs >> cfi->chipshift) { 2199 chipnum++; 2200 ofs = 0; 2201 if (chipnum == cfi->numchips) 2202 return 0; 2203 2204 chipstart = cfi->chips[chipnum].start; 2205 } 2206 } 2207 2208 /* Write the trailing bytes if any */ 2209 if (len & (map_bankwidth(map) - 1)) { 2210 map_word tmp_buf; 2211 2212 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); 2213 if (ret) 2214 return ret; 2215 2216 tmp_buf = map_read(map, ofs + chipstart); 2217 2218 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); 2219 2220 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], 2221 ofs, tmp_buf); 2222 if (ret) 2223 return ret; 2224 2225 (*retlen) += len; 2226 } 2227 2228 return 0; 2229 } 2230 2231 2232 /* 2233 * Handle devices with one erase region, that only implement 2234 * the chip erase command. 2235 */ 2236 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) 2237 { 2238 struct cfi_private *cfi = map->fldrv_priv; 2239 unsigned long timeo = jiffies + HZ; 2240 unsigned long int adr; 2241 DECLARE_WAITQUEUE(wait, current); 2242 int ret = 0; 2243 2244 adr = cfi->addr_unlock1; 2245 2246 mutex_lock(&chip->mutex); 2247 ret = get_chip(map, chip, adr, FL_WRITING); 2248 if (ret) { 2249 mutex_unlock(&chip->mutex); 2250 return ret; 2251 } 2252 2253 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2254 __func__, chip->start ); 2255 2256 XIP_INVAL_CACHED_RANGE(map, adr, map->size); 2257 ENABLE_VPP(map); 2258 xip_disable(map, chip, adr); 2259 2260 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2261 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2262 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2263 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2264 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2265 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2266 2267 chip->state = FL_ERASING; 2268 chip->erase_suspended = 0; 2269 chip->in_progress_block_addr = adr; 2270 2271 INVALIDATE_CACHE_UDELAY(map, chip, 2272 adr, map->size, 2273 chip->erase_time*500); 2274 2275 timeo = jiffies + (HZ*20); 2276 2277 for (;;) { 2278 if (chip->state != FL_ERASING) { 2279 /* Someone's suspended the erase. Sleep */ 2280 set_current_state(TASK_UNINTERRUPTIBLE); 2281 add_wait_queue(&chip->wq, &wait); 2282 mutex_unlock(&chip->mutex); 2283 schedule(); 2284 remove_wait_queue(&chip->wq, &wait); 2285 mutex_lock(&chip->mutex); 2286 continue; 2287 } 2288 if (chip->erase_suspended) { 2289 /* This erase was suspended and resumed. 2290 Adjust the timeout */ 2291 timeo = jiffies + (HZ*20); /* FIXME */ 2292 chip->erase_suspended = 0; 2293 } 2294 2295 if (chip_ready(map, adr)) 2296 break; 2297 2298 if (time_after(jiffies, timeo)) { 2299 printk(KERN_WARNING "MTD %s(): software timeout\n", 2300 __func__ ); 2301 break; 2302 } 2303 2304 /* Latency issues. Drop the lock, wait a while and retry */ 2305 UDELAY(map, chip, adr, 1000000/HZ); 2306 } 2307 /* Did we succeed? */ 2308 if (!chip_good(map, adr, map_word_ff(map))) { 2309 /* reset on all failures. */ 2310 map_write( map, CMD(0xF0), chip->start ); 2311 /* FIXME - should have reset delay before continuing */ 2312 2313 ret = -EIO; 2314 } 2315 2316 chip->state = FL_READY; 2317 xip_enable(map, chip, adr); 2318 DISABLE_VPP(map); 2319 put_chip(map, chip, adr); 2320 mutex_unlock(&chip->mutex); 2321 2322 return ret; 2323 } 2324 2325 2326 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) 2327 { 2328 struct cfi_private *cfi = map->fldrv_priv; 2329 unsigned long timeo = jiffies + HZ; 2330 DECLARE_WAITQUEUE(wait, current); 2331 int ret = 0; 2332 2333 adr += chip->start; 2334 2335 mutex_lock(&chip->mutex); 2336 ret = get_chip(map, chip, adr, FL_ERASING); 2337 if (ret) { 2338 mutex_unlock(&chip->mutex); 2339 return ret; 2340 } 2341 2342 pr_debug("MTD %s(): ERASE 0x%.8lx\n", 2343 __func__, adr ); 2344 2345 XIP_INVAL_CACHED_RANGE(map, adr, len); 2346 ENABLE_VPP(map); 2347 xip_disable(map, chip, adr); 2348 2349 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2350 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2351 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2352 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); 2353 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); 2354 map_write(map, cfi->sector_erase_cmd, adr); 2355 2356 chip->state = FL_ERASING; 2357 chip->erase_suspended = 0; 2358 chip->in_progress_block_addr = adr; 2359 2360 INVALIDATE_CACHE_UDELAY(map, chip, 2361 adr, len, 2362 chip->erase_time*500); 2363 2364 timeo = jiffies + (HZ*20); 2365 2366 for (;;) { 2367 if (chip->state != FL_ERASING) { 2368 /* Someone's suspended the erase. Sleep */ 2369 set_current_state(TASK_UNINTERRUPTIBLE); 2370 add_wait_queue(&chip->wq, &wait); 2371 mutex_unlock(&chip->mutex); 2372 schedule(); 2373 remove_wait_queue(&chip->wq, &wait); 2374 mutex_lock(&chip->mutex); 2375 continue; 2376 } 2377 if (chip->erase_suspended) { 2378 /* This erase was suspended and resumed. 2379 Adjust the timeout */ 2380 timeo = jiffies + (HZ*20); /* FIXME */ 2381 chip->erase_suspended = 0; 2382 } 2383 2384 if (chip_ready(map, adr)) { 2385 xip_enable(map, chip, adr); 2386 break; 2387 } 2388 2389 if (time_after(jiffies, timeo)) { 2390 xip_enable(map, chip, adr); 2391 printk(KERN_WARNING "MTD %s(): software timeout\n", 2392 __func__ ); 2393 break; 2394 } 2395 2396 /* Latency issues. Drop the lock, wait a while and retry */ 2397 UDELAY(map, chip, adr, 1000000/HZ); 2398 } 2399 /* Did we succeed? */ 2400 if (!chip_good(map, adr, map_word_ff(map))) { 2401 /* reset on all failures. */ 2402 map_write( map, CMD(0xF0), chip->start ); 2403 /* FIXME - should have reset delay before continuing */ 2404 2405 ret = -EIO; 2406 } 2407 2408 chip->state = FL_READY; 2409 DISABLE_VPP(map); 2410 put_chip(map, chip, adr); 2411 mutex_unlock(&chip->mutex); 2412 return ret; 2413 } 2414 2415 2416 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) 2417 { 2418 unsigned long ofs, len; 2419 int ret; 2420 2421 ofs = instr->addr; 2422 len = instr->len; 2423 2424 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL); 2425 if (ret) 2426 return ret; 2427 2428 instr->state = MTD_ERASE_DONE; 2429 mtd_erase_callback(instr); 2430 2431 return 0; 2432 } 2433 2434 2435 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) 2436 { 2437 struct map_info *map = mtd->priv; 2438 struct cfi_private *cfi = map->fldrv_priv; 2439 int ret = 0; 2440 2441 if (instr->addr != 0) 2442 return -EINVAL; 2443 2444 if (instr->len != mtd->size) 2445 return -EINVAL; 2446 2447 ret = do_erase_chip(map, &cfi->chips[0]); 2448 if (ret) 2449 return ret; 2450 2451 instr->state = MTD_ERASE_DONE; 2452 mtd_erase_callback(instr); 2453 2454 return 0; 2455 } 2456 2457 static int do_atmel_lock(struct map_info *map, struct flchip *chip, 2458 unsigned long adr, int len, void *thunk) 2459 { 2460 struct cfi_private *cfi = map->fldrv_priv; 2461 int ret; 2462 2463 mutex_lock(&chip->mutex); 2464 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2465 if (ret) 2466 goto out_unlock; 2467 chip->state = FL_LOCKING; 2468 2469 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2470 2471 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2472 cfi->device_type, NULL); 2473 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2474 cfi->device_type, NULL); 2475 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, 2476 cfi->device_type, NULL); 2477 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2478 cfi->device_type, NULL); 2479 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2480 cfi->device_type, NULL); 2481 map_write(map, CMD(0x40), chip->start + adr); 2482 2483 chip->state = FL_READY; 2484 put_chip(map, chip, adr + chip->start); 2485 ret = 0; 2486 2487 out_unlock: 2488 mutex_unlock(&chip->mutex); 2489 return ret; 2490 } 2491 2492 static int do_atmel_unlock(struct map_info *map, struct flchip *chip, 2493 unsigned long adr, int len, void *thunk) 2494 { 2495 struct cfi_private *cfi = map->fldrv_priv; 2496 int ret; 2497 2498 mutex_lock(&chip->mutex); 2499 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); 2500 if (ret) 2501 goto out_unlock; 2502 chip->state = FL_UNLOCKING; 2503 2504 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); 2505 2506 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2507 cfi->device_type, NULL); 2508 map_write(map, CMD(0x70), adr); 2509 2510 chip->state = FL_READY; 2511 put_chip(map, chip, adr + chip->start); 2512 ret = 0; 2513 2514 out_unlock: 2515 mutex_unlock(&chip->mutex); 2516 return ret; 2517 } 2518 2519 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2520 { 2521 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); 2522 } 2523 2524 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 2525 { 2526 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); 2527 } 2528 2529 /* 2530 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking 2531 */ 2532 2533 struct ppb_lock { 2534 struct flchip *chip; 2535 loff_t offset; 2536 int locked; 2537 }; 2538 2539 #define MAX_SECTORS 512 2540 2541 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2542 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2543 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2544 2545 static int __maybe_unused do_ppb_xxlock(struct map_info *map, 2546 struct flchip *chip, 2547 unsigned long adr, int len, void *thunk) 2548 { 2549 struct cfi_private *cfi = map->fldrv_priv; 2550 unsigned long timeo; 2551 int ret; 2552 2553 mutex_lock(&chip->mutex); 2554 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); 2555 if (ret) { 2556 mutex_unlock(&chip->mutex); 2557 return ret; 2558 } 2559 2560 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); 2561 2562 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 2563 cfi->device_type, NULL); 2564 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 2565 cfi->device_type, NULL); 2566 /* PPB entry command */ 2567 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, 2568 cfi->device_type, NULL); 2569 2570 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { 2571 chip->state = FL_LOCKING; 2572 map_write(map, CMD(0xA0), chip->start + adr); 2573 map_write(map, CMD(0x00), chip->start + adr); 2574 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { 2575 /* 2576 * Unlocking of one specific sector is not supported, so we 2577 * have to unlock all sectors of this device instead 2578 */ 2579 chip->state = FL_UNLOCKING; 2580 map_write(map, CMD(0x80), chip->start); 2581 map_write(map, CMD(0x30), chip->start); 2582 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { 2583 chip->state = FL_JEDEC_QUERY; 2584 /* Return locked status: 0->locked, 1->unlocked */ 2585 ret = !cfi_read_query(map, adr); 2586 } else 2587 BUG(); 2588 2589 /* 2590 * Wait for some time as unlocking of all sectors takes quite long 2591 */ 2592 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2593 for (;;) { 2594 if (chip_ready(map, adr)) 2595 break; 2596 2597 if (time_after(jiffies, timeo)) { 2598 printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); 2599 ret = -EIO; 2600 break; 2601 } 2602 2603 UDELAY(map, chip, adr, 1); 2604 } 2605 2606 /* Exit BC commands */ 2607 map_write(map, CMD(0x90), chip->start); 2608 map_write(map, CMD(0x00), chip->start); 2609 2610 chip->state = FL_READY; 2611 put_chip(map, chip, adr + chip->start); 2612 mutex_unlock(&chip->mutex); 2613 2614 return ret; 2615 } 2616 2617 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, 2618 uint64_t len) 2619 { 2620 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2621 DO_XXLOCK_ONEBLOCK_LOCK); 2622 } 2623 2624 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, 2625 uint64_t len) 2626 { 2627 struct mtd_erase_region_info *regions = mtd->eraseregions; 2628 struct map_info *map = mtd->priv; 2629 struct cfi_private *cfi = map->fldrv_priv; 2630 struct ppb_lock *sect; 2631 unsigned long adr; 2632 loff_t offset; 2633 uint64_t length; 2634 int chipnum; 2635 int i; 2636 int sectors; 2637 int ret; 2638 2639 /* 2640 * PPB unlocking always unlocks all sectors of the flash chip. 2641 * We need to re-lock all previously locked sectors. So lets 2642 * first check the locking status of all sectors and save 2643 * it for future use. 2644 */ 2645 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); 2646 if (!sect) 2647 return -ENOMEM; 2648 2649 /* 2650 * This code to walk all sectors is a slightly modified version 2651 * of the cfi_varsize_frob() code. 2652 */ 2653 i = 0; 2654 chipnum = 0; 2655 adr = 0; 2656 sectors = 0; 2657 offset = 0; 2658 length = mtd->size; 2659 2660 while (length) { 2661 int size = regions[i].erasesize; 2662 2663 /* 2664 * Only test sectors that shall not be unlocked. The other 2665 * sectors shall be unlocked, so lets keep their locking 2666 * status at "unlocked" (locked=0) for the final re-locking. 2667 */ 2668 if ((adr < ofs) || (adr >= (ofs + len))) { 2669 sect[sectors].chip = &cfi->chips[chipnum]; 2670 sect[sectors].offset = offset; 2671 sect[sectors].locked = do_ppb_xxlock( 2672 map, &cfi->chips[chipnum], adr, 0, 2673 DO_XXLOCK_ONEBLOCK_GETLOCK); 2674 } 2675 2676 adr += size; 2677 offset += size; 2678 length -= size; 2679 2680 if (offset == regions[i].offset + size * regions[i].numblocks) 2681 i++; 2682 2683 if (adr >> cfi->chipshift) { 2684 adr = 0; 2685 chipnum++; 2686 2687 if (chipnum >= cfi->numchips) 2688 break; 2689 } 2690 2691 sectors++; 2692 if (sectors >= MAX_SECTORS) { 2693 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2694 MAX_SECTORS); 2695 kfree(sect); 2696 return -EINVAL; 2697 } 2698 } 2699 2700 /* Now unlock the whole chip */ 2701 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2702 DO_XXLOCK_ONEBLOCK_UNLOCK); 2703 if (ret) { 2704 kfree(sect); 2705 return ret; 2706 } 2707 2708 /* 2709 * PPB unlocking always unlocks all sectors of the flash chip. 2710 * We need to re-lock all previously locked sectors. 2711 */ 2712 for (i = 0; i < sectors; i++) { 2713 if (sect[i].locked) 2714 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, 2715 DO_XXLOCK_ONEBLOCK_LOCK); 2716 } 2717 2718 kfree(sect); 2719 return ret; 2720 } 2721 2722 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, 2723 uint64_t len) 2724 { 2725 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, 2726 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; 2727 } 2728 2729 static void cfi_amdstd_sync (struct mtd_info *mtd) 2730 { 2731 struct map_info *map = mtd->priv; 2732 struct cfi_private *cfi = map->fldrv_priv; 2733 int i; 2734 struct flchip *chip; 2735 int ret = 0; 2736 DECLARE_WAITQUEUE(wait, current); 2737 2738 for (i=0; !ret && i<cfi->numchips; i++) { 2739 chip = &cfi->chips[i]; 2740 2741 retry: 2742 mutex_lock(&chip->mutex); 2743 2744 switch(chip->state) { 2745 case FL_READY: 2746 case FL_STATUS: 2747 case FL_CFI_QUERY: 2748 case FL_JEDEC_QUERY: 2749 chip->oldstate = chip->state; 2750 chip->state = FL_SYNCING; 2751 /* No need to wake_up() on this state change - 2752 * as the whole point is that nobody can do anything 2753 * with the chip now anyway. 2754 */ 2755 case FL_SYNCING: 2756 mutex_unlock(&chip->mutex); 2757 break; 2758 2759 default: 2760 /* Not an idle state */ 2761 set_current_state(TASK_UNINTERRUPTIBLE); 2762 add_wait_queue(&chip->wq, &wait); 2763 2764 mutex_unlock(&chip->mutex); 2765 2766 schedule(); 2767 2768 remove_wait_queue(&chip->wq, &wait); 2769 2770 goto retry; 2771 } 2772 } 2773 2774 /* Unlock the chips again */ 2775 2776 for (i--; i >=0; i--) { 2777 chip = &cfi->chips[i]; 2778 2779 mutex_lock(&chip->mutex); 2780 2781 if (chip->state == FL_SYNCING) { 2782 chip->state = chip->oldstate; 2783 wake_up(&chip->wq); 2784 } 2785 mutex_unlock(&chip->mutex); 2786 } 2787 } 2788 2789 2790 static int cfi_amdstd_suspend(struct mtd_info *mtd) 2791 { 2792 struct map_info *map = mtd->priv; 2793 struct cfi_private *cfi = map->fldrv_priv; 2794 int i; 2795 struct flchip *chip; 2796 int ret = 0; 2797 2798 for (i=0; !ret && i<cfi->numchips; i++) { 2799 chip = &cfi->chips[i]; 2800 2801 mutex_lock(&chip->mutex); 2802 2803 switch(chip->state) { 2804 case FL_READY: 2805 case FL_STATUS: 2806 case FL_CFI_QUERY: 2807 case FL_JEDEC_QUERY: 2808 chip->oldstate = chip->state; 2809 chip->state = FL_PM_SUSPENDED; 2810 /* No need to wake_up() on this state change - 2811 * as the whole point is that nobody can do anything 2812 * with the chip now anyway. 2813 */ 2814 case FL_PM_SUSPENDED: 2815 break; 2816 2817 default: 2818 ret = -EAGAIN; 2819 break; 2820 } 2821 mutex_unlock(&chip->mutex); 2822 } 2823 2824 /* Unlock the chips again */ 2825 2826 if (ret) { 2827 for (i--; i >=0; i--) { 2828 chip = &cfi->chips[i]; 2829 2830 mutex_lock(&chip->mutex); 2831 2832 if (chip->state == FL_PM_SUSPENDED) { 2833 chip->state = chip->oldstate; 2834 wake_up(&chip->wq); 2835 } 2836 mutex_unlock(&chip->mutex); 2837 } 2838 } 2839 2840 return ret; 2841 } 2842 2843 2844 static void cfi_amdstd_resume(struct mtd_info *mtd) 2845 { 2846 struct map_info *map = mtd->priv; 2847 struct cfi_private *cfi = map->fldrv_priv; 2848 int i; 2849 struct flchip *chip; 2850 2851 for (i=0; i<cfi->numchips; i++) { 2852 2853 chip = &cfi->chips[i]; 2854 2855 mutex_lock(&chip->mutex); 2856 2857 if (chip->state == FL_PM_SUSPENDED) { 2858 chip->state = FL_READY; 2859 map_write(map, CMD(0xF0), chip->start); 2860 wake_up(&chip->wq); 2861 } 2862 else 2863 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); 2864 2865 mutex_unlock(&chip->mutex); 2866 } 2867 } 2868 2869 2870 /* 2871 * Ensure that the flash device is put back into read array mode before 2872 * unloading the driver or rebooting. On some systems, rebooting while 2873 * the flash is in query/program/erase mode will prevent the CPU from 2874 * fetching the bootloader code, requiring a hard reset or power cycle. 2875 */ 2876 static int cfi_amdstd_reset(struct mtd_info *mtd) 2877 { 2878 struct map_info *map = mtd->priv; 2879 struct cfi_private *cfi = map->fldrv_priv; 2880 int i, ret; 2881 struct flchip *chip; 2882 2883 for (i = 0; i < cfi->numchips; i++) { 2884 2885 chip = &cfi->chips[i]; 2886 2887 mutex_lock(&chip->mutex); 2888 2889 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); 2890 if (!ret) { 2891 map_write(map, CMD(0xF0), chip->start); 2892 chip->state = FL_SHUTDOWN; 2893 put_chip(map, chip, chip->start); 2894 } 2895 2896 mutex_unlock(&chip->mutex); 2897 } 2898 2899 return 0; 2900 } 2901 2902 2903 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, 2904 void *v) 2905 { 2906 struct mtd_info *mtd; 2907 2908 mtd = container_of(nb, struct mtd_info, reboot_notifier); 2909 cfi_amdstd_reset(mtd); 2910 return NOTIFY_DONE; 2911 } 2912 2913 2914 static void cfi_amdstd_destroy(struct mtd_info *mtd) 2915 { 2916 struct map_info *map = mtd->priv; 2917 struct cfi_private *cfi = map->fldrv_priv; 2918 2919 cfi_amdstd_reset(mtd); 2920 unregister_reboot_notifier(&mtd->reboot_notifier); 2921 kfree(cfi->cmdset_priv); 2922 kfree(cfi->cfiq); 2923 kfree(cfi); 2924 kfree(mtd->eraseregions); 2925 } 2926 2927 MODULE_LICENSE("GPL"); 2928 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 2929 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 2930 MODULE_ALIAS("cfi_cmdset_0006"); 2931 MODULE_ALIAS("cfi_cmdset_0701"); 2932