1 /*
2  * Common Flash Interface support:
3  *   AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
4  *
5  * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6  * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7  * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
8  *
9  * 2_by_8 routines added by Simon Munton
10  *
11  * 4_by_16 work by Carolyn J. Smith
12  *
13  * XIP support hooks by Vitaly Wool (based on code for Intel flash
14  * by Nicolas Pitre)
15  *
16  * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17  *
18  * This code is GPL
19  *
20  * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/sched.h>
28 #include <linux/init.h>
29 #include <asm/io.h>
30 #include <asm/byteorder.h>
31 
32 #include <linux/errno.h>
33 #include <linux/slab.h>
34 #include <linux/delay.h>
35 #include <linux/interrupt.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
41 
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
44 
45 #define MAX_WORD_RETRIES 3
46 
47 #define MANUFACTURER_AMD	0x0001
48 #define MANUFACTURER_ATMEL	0x001F
49 #define MANUFACTURER_SST	0x00BF
50 #define SST49LF004B	        0x0060
51 #define SST49LF040B	        0x0050
52 #define SST49LF008A		0x005a
53 #define AT49BV6416		0x00d6
54 
55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_amdstd_sync (struct mtd_info *);
61 static int cfi_amdstd_suspend (struct mtd_info *);
62 static void cfi_amdstd_resume (struct mtd_info *);
63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 
65 static void cfi_amdstd_destroy(struct mtd_info *);
66 
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
69 
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72 #include "fwh_lock.h"
73 
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
76 
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 	.probe		= NULL, /* Not usable directly */
79 	.destroy	= cfi_amdstd_destroy,
80 	.name		= "cfi_cmdset_0002",
81 	.module		= THIS_MODULE
82 };
83 
84 
85 /* #define DEBUG_CFI_FEATURES */
86 
87 
88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
90 {
91 	const char* erase_suspend[3] = {
92 		"Not supported", "Read only", "Read/write"
93 	};
94 	const char* top_bottom[6] = {
95 		"No WP", "8x8KiB sectors at top & bottom, no WP",
96 		"Bottom boot", "Top boot",
97 		"Uniform, Bottom WP", "Uniform, Top WP"
98 	};
99 
100 	printk("  Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 	printk("  Address sensitive unlock: %s\n",
102 	       (extp->SiliconRevision & 1) ? "Not required" : "Required");
103 
104 	if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 		printk("  Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106 	else
107 		printk("  Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
108 
109 	if (extp->BlkProt == 0)
110 		printk("  Block protection: Not supported\n");
111 	else
112 		printk("  Block protection: %d sectors per group\n", extp->BlkProt);
113 
114 
115 	printk("  Temporary block unprotect: %s\n",
116 	       extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 	printk("  Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 	printk("  Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 	printk("  Burst mode: %s\n",
120 	       extp->BurstMode ? "Supported" : "Not supported");
121 	if (extp->PageMode == 0)
122 		printk("  Page mode: Not supported\n");
123 	else
124 		printk("  Page mode: %d word page\n", extp->PageMode << 2);
125 
126 	printk("  Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 	       extp->VppMin >> 4, extp->VppMin & 0xf);
128 	printk("  Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 	       extp->VppMax >> 4, extp->VppMax & 0xf);
130 
131 	if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 		printk("  Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133 	else
134 		printk("  Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135 }
136 #endif
137 
138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
141 {
142 	struct map_info *map = mtd->priv;
143 	struct cfi_private *cfi = map->fldrv_priv;
144 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 	__u8 major = extp->MajorVersion;
146 	__u8 minor = extp->MinorVersion;
147 
148 	if (((major << 8) | minor) < 0x3131) {
149 		/* CFI version 1.0 => don't trust bootloc */
150 		if (cfi->id & 0x80) {
151 			printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
152 			extp->TopBottom = 3;	/* top boot */
153 		} else {
154 			extp->TopBottom = 2;	/* bottom boot */
155 		}
156 	}
157 }
158 #endif
159 
160 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
161 {
162 	struct map_info *map = mtd->priv;
163 	struct cfi_private *cfi = map->fldrv_priv;
164 	if (cfi->cfiq->BufWriteTimeoutTyp) {
165 		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
166 		mtd->write = cfi_amdstd_write_buffers;
167 	}
168 }
169 
170 /* Atmel chips don't use the same PRI format as AMD chips */
171 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
172 {
173 	struct map_info *map = mtd->priv;
174 	struct cfi_private *cfi = map->fldrv_priv;
175 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
176 	struct cfi_pri_atmel atmel_pri;
177 
178 	memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179 	memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180 
181 	if (atmel_pri.Features & 0x02)
182 		extp->EraseSuspend = 2;
183 
184 	if (atmel_pri.BottomBoot)
185 		extp->TopBottom = 2;
186 	else
187 		extp->TopBottom = 3;
188 }
189 
190 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
191 {
192 	/* Setup for chips with a secsi area */
193 	mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
194 	mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
195 }
196 
197 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
198 {
199 	struct map_info *map = mtd->priv;
200 	struct cfi_private *cfi = map->fldrv_priv;
201 	if ((cfi->cfiq->NumEraseRegions == 1) &&
202 		((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
203 		mtd->erase = cfi_amdstd_erase_chip;
204 	}
205 
206 }
207 
208 /*
209  * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
210  * locked by default.
211  */
212 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
213 {
214 	mtd->lock = cfi_atmel_lock;
215 	mtd->unlock = cfi_atmel_unlock;
216 	mtd->flags |= MTD_STUPID_LOCK;
217 }
218 
219 static struct cfi_fixup cfi_fixup_table[] = {
220 #ifdef AMD_BOOTLOC_BUG
221 	{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
222 #endif
223 	{ CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
224 	{ CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
225 	{ CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
226 	{ CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
227 	{ CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
228 	{ CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
229 #if !FORCE_WORD_WRITE
230 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
231 #endif
232 	{ CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
233 	{ 0, 0, NULL, NULL }
234 };
235 static struct cfi_fixup jedec_fixup_table[] = {
236 	{ MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
237 	{ MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
238 	{ MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
239 	{ 0, 0, NULL, NULL }
240 };
241 
242 static struct cfi_fixup fixup_table[] = {
243 	/* The CFI vendor ids and the JEDEC vendor IDs appear
244 	 * to be common.  It is like the devices id's are as
245 	 * well.  This table is to pick all cases where
246 	 * we know that is the case.
247 	 */
248 	{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
249 	{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
250 	{ 0, 0, NULL, NULL }
251 };
252 
253 
254 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
255 {
256 	struct cfi_private *cfi = map->fldrv_priv;
257 	struct mtd_info *mtd;
258 	int i;
259 
260 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
261 	if (!mtd) {
262 		printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
263 		return NULL;
264 	}
265 	mtd->priv = map;
266 	mtd->type = MTD_NORFLASH;
267 
268 	/* Fill in the default mtd operations */
269 	mtd->erase   = cfi_amdstd_erase_varsize;
270 	mtd->write   = cfi_amdstd_write_words;
271 	mtd->read    = cfi_amdstd_read;
272 	mtd->sync    = cfi_amdstd_sync;
273 	mtd->suspend = cfi_amdstd_suspend;
274 	mtd->resume  = cfi_amdstd_resume;
275 	mtd->flags   = MTD_CAP_NORFLASH;
276 	mtd->name    = map->name;
277 	mtd->writesize = 1;
278 
279 	if (cfi->cfi_mode==CFI_MODE_CFI){
280 		unsigned char bootloc;
281 		/*
282 		 * It's a real CFI chip, not one for which the probe
283 		 * routine faked a CFI structure. So we read the feature
284 		 * table from it.
285 		 */
286 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
287 		struct cfi_pri_amdstd *extp;
288 
289 		extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
290 		if (!extp) {
291 			kfree(mtd);
292 			return NULL;
293 		}
294 
295 		if (extp->MajorVersion != '1' ||
296 		    (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
297 			printk(KERN_ERR "  Unknown Amd/Fujitsu Extended Query "
298 			       "version %c.%c.\n",  extp->MajorVersion,
299 			       extp->MinorVersion);
300 			kfree(extp);
301 			kfree(mtd);
302 			return NULL;
303 		}
304 
305 		/* Install our own private info structure */
306 		cfi->cmdset_priv = extp;
307 
308 		/* Apply cfi device specific fixups */
309 		cfi_fixup(mtd, cfi_fixup_table);
310 
311 #ifdef DEBUG_CFI_FEATURES
312 		/* Tell the user about it in lots of lovely detail */
313 		cfi_tell_features(extp);
314 #endif
315 
316 		bootloc = extp->TopBottom;
317 		if ((bootloc != 2) && (bootloc != 3)) {
318 			printk(KERN_WARNING "%s: CFI does not contain boot "
319 			       "bank location. Assuming top.\n", map->name);
320 			bootloc = 2;
321 		}
322 
323 		if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
324 			printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
325 
326 			for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
327 				int j = (cfi->cfiq->NumEraseRegions-1)-i;
328 				__u32 swap;
329 
330 				swap = cfi->cfiq->EraseRegionInfo[i];
331 				cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
332 				cfi->cfiq->EraseRegionInfo[j] = swap;
333 			}
334 		}
335 		/* Set the default CFI lock/unlock addresses */
336 		cfi->addr_unlock1 = 0x555;
337 		cfi->addr_unlock2 = 0x2aa;
338 		/* Modify the unlock address if we are in compatibility mode */
339 		if (	/* x16 in x8 mode */
340 			((cfi->device_type == CFI_DEVICETYPE_X8) &&
341 				(cfi->cfiq->InterfaceDesc == 2)) ||
342 			/* x32 in x16 mode */
343 			((cfi->device_type == CFI_DEVICETYPE_X16) &&
344 				(cfi->cfiq->InterfaceDesc == 4)))
345 		{
346 			cfi->addr_unlock1 = 0xaaa;
347 			cfi->addr_unlock2 = 0x555;
348 		}
349 
350 	} /* CFI mode */
351 	else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
352 		/* Apply jedec specific fixups */
353 		cfi_fixup(mtd, jedec_fixup_table);
354 	}
355 	/* Apply generic fixups */
356 	cfi_fixup(mtd, fixup_table);
357 
358 	for (i=0; i< cfi->numchips; i++) {
359 		cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
360 		cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
361 		cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
362 		cfi->chips[i].ref_point_counter = 0;
363 		init_waitqueue_head(&(cfi->chips[i].wq));
364 	}
365 
366 	map->fldrv = &cfi_amdstd_chipdrv;
367 
368 	return cfi_amdstd_setup(mtd);
369 }
370 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
371 
372 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
373 {
374 	struct map_info *map = mtd->priv;
375 	struct cfi_private *cfi = map->fldrv_priv;
376 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
377 	unsigned long offset = 0;
378 	int i,j;
379 
380 	printk(KERN_NOTICE "number of %s chips: %d\n",
381 	       (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
382 	/* Select the correct geometry setup */
383 	mtd->size = devsize * cfi->numchips;
384 
385 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
386 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
387 				    * mtd->numeraseregions, GFP_KERNEL);
388 	if (!mtd->eraseregions) {
389 		printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
390 		goto setup_err;
391 	}
392 
393 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
394 		unsigned long ernum, ersize;
395 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
396 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
397 
398 		if (mtd->erasesize < ersize) {
399 			mtd->erasesize = ersize;
400 		}
401 		for (j=0; j<cfi->numchips; j++) {
402 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
403 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
404 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
405 		}
406 		offset += (ersize * ernum);
407 	}
408 	if (offset != devsize) {
409 		/* Argh */
410 		printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
411 		goto setup_err;
412 	}
413 #if 0
414 	// debug
415 	for (i=0; i<mtd->numeraseregions;i++){
416 		printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
417 		       i,mtd->eraseregions[i].offset,
418 		       mtd->eraseregions[i].erasesize,
419 		       mtd->eraseregions[i].numblocks);
420 	}
421 #endif
422 
423 	/* FIXME: erase-suspend-program is broken.  See
424 	   http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
425 	printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
426 
427 	__module_get(THIS_MODULE);
428 	return mtd;
429 
430  setup_err:
431 	if(mtd) {
432 		kfree(mtd->eraseregions);
433 		kfree(mtd);
434 	}
435 	kfree(cfi->cmdset_priv);
436 	kfree(cfi->cfiq);
437 	return NULL;
438 }
439 
440 /*
441  * Return true if the chip is ready.
442  *
443  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
444  * non-suspended sector) and is indicated by no toggle bits toggling.
445  *
446  * Note that anything more complicated than checking if no bits are toggling
447  * (including checking DQ5 for an error status) is tricky to get working
448  * correctly and is therefore not done	(particulary with interleaved chips
449  * as each chip must be checked independantly of the others).
450  */
451 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
452 {
453 	map_word d, t;
454 
455 	d = map_read(map, addr);
456 	t = map_read(map, addr);
457 
458 	return map_word_equal(map, d, t);
459 }
460 
461 /*
462  * Return true if the chip is ready and has the correct value.
463  *
464  * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
465  * non-suspended sector) and it is indicated by no bits toggling.
466  *
467  * Error are indicated by toggling bits or bits held with the wrong value,
468  * or with bits toggling.
469  *
470  * Note that anything more complicated than checking if no bits are toggling
471  * (including checking DQ5 for an error status) is tricky to get working
472  * correctly and is therefore not done	(particulary with interleaved chips
473  * as each chip must be checked independantly of the others).
474  *
475  */
476 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
477 {
478 	map_word oldd, curd;
479 
480 	oldd = map_read(map, addr);
481 	curd = map_read(map, addr);
482 
483 	return	map_word_equal(map, oldd, curd) &&
484 		map_word_equal(map, curd, expected);
485 }
486 
487 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
488 {
489 	DECLARE_WAITQUEUE(wait, current);
490 	struct cfi_private *cfi = map->fldrv_priv;
491 	unsigned long timeo;
492 	struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
493 
494  resettime:
495 	timeo = jiffies + HZ;
496  retry:
497 	switch (chip->state) {
498 
499 	case FL_STATUS:
500 		for (;;) {
501 			if (chip_ready(map, adr))
502 				break;
503 
504 			if (time_after(jiffies, timeo)) {
505 				printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
506 				spin_unlock(chip->mutex);
507 				return -EIO;
508 			}
509 			spin_unlock(chip->mutex);
510 			cfi_udelay(1);
511 			spin_lock(chip->mutex);
512 			/* Someone else might have been playing with it. */
513 			goto retry;
514 		}
515 
516 	case FL_READY:
517 	case FL_CFI_QUERY:
518 	case FL_JEDEC_QUERY:
519 		return 0;
520 
521 	case FL_ERASING:
522 		if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
523 			goto sleep;
524 
525 		if (!(   mode == FL_READY
526 		      || mode == FL_POINT
527 		      || !cfip
528 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
529 		      || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
530 		    )))
531 			goto sleep;
532 
533 		/* We could check to see if we're trying to access the sector
534 		 * that is currently being erased. However, no user will try
535 		 * anything like that so we just wait for the timeout. */
536 
537 		/* Erase suspend */
538 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
539 		 * commands when the erase algorithm isn't in progress. */
540 		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
541 		chip->oldstate = FL_ERASING;
542 		chip->state = FL_ERASE_SUSPENDING;
543 		chip->erase_suspended = 1;
544 		for (;;) {
545 			if (chip_ready(map, adr))
546 				break;
547 
548 			if (time_after(jiffies, timeo)) {
549 				/* Should have suspended the erase by now.
550 				 * Send an Erase-Resume command as either
551 				 * there was an error (so leave the erase
552 				 * routine to recover from it) or we trying to
553 				 * use the erase-in-progress sector. */
554 				map_write(map, CMD(0x30), chip->in_progress_block_addr);
555 				chip->state = FL_ERASING;
556 				chip->oldstate = FL_READY;
557 				printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
558 				return -EIO;
559 			}
560 
561 			spin_unlock(chip->mutex);
562 			cfi_udelay(1);
563 			spin_lock(chip->mutex);
564 			/* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
565 			   So we can just loop here. */
566 		}
567 		chip->state = FL_READY;
568 		return 0;
569 
570 	case FL_XIP_WHILE_ERASING:
571 		if (mode != FL_READY && mode != FL_POINT &&
572 		    (!cfip || !(cfip->EraseSuspend&2)))
573 			goto sleep;
574 		chip->oldstate = chip->state;
575 		chip->state = FL_READY;
576 		return 0;
577 
578 	case FL_POINT:
579 		/* Only if there's no operation suspended... */
580 		if (mode == FL_READY && chip->oldstate == FL_READY)
581 			return 0;
582 
583 	default:
584 	sleep:
585 		set_current_state(TASK_UNINTERRUPTIBLE);
586 		add_wait_queue(&chip->wq, &wait);
587 		spin_unlock(chip->mutex);
588 		schedule();
589 		remove_wait_queue(&chip->wq, &wait);
590 		spin_lock(chip->mutex);
591 		goto resettime;
592 	}
593 }
594 
595 
596 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
597 {
598 	struct cfi_private *cfi = map->fldrv_priv;
599 
600 	switch(chip->oldstate) {
601 	case FL_ERASING:
602 		chip->state = chip->oldstate;
603 		map_write(map, CMD(0x30), chip->in_progress_block_addr);
604 		chip->oldstate = FL_READY;
605 		chip->state = FL_ERASING;
606 		break;
607 
608 	case FL_XIP_WHILE_ERASING:
609 		chip->state = chip->oldstate;
610 		chip->oldstate = FL_READY;
611 		break;
612 
613 	case FL_READY:
614 	case FL_STATUS:
615 		/* We should really make set_vpp() count, rather than doing this */
616 		DISABLE_VPP(map);
617 		break;
618 	default:
619 		printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
620 	}
621 	wake_up(&chip->wq);
622 }
623 
624 #ifdef CONFIG_MTD_XIP
625 
626 /*
627  * No interrupt what so ever can be serviced while the flash isn't in array
628  * mode.  This is ensured by the xip_disable() and xip_enable() functions
629  * enclosing any code path where the flash is known not to be in array mode.
630  * And within a XIP disabled code path, only functions marked with __xipram
631  * may be called and nothing else (it's a good thing to inspect generated
632  * assembly to make sure inline functions were actually inlined and that gcc
633  * didn't emit calls to its own support functions). Also configuring MTD CFI
634  * support to a single buswidth and a single interleave is also recommended.
635  */
636 
637 static void xip_disable(struct map_info *map, struct flchip *chip,
638 			unsigned long adr)
639 {
640 	/* TODO: chips with no XIP use should ignore and return */
641 	(void) map_read(map, adr); /* ensure mmu mapping is up to date */
642 	local_irq_disable();
643 }
644 
645 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
646 				unsigned long adr)
647 {
648 	struct cfi_private *cfi = map->fldrv_priv;
649 
650 	if (chip->state != FL_POINT && chip->state != FL_READY) {
651 		map_write(map, CMD(0xf0), adr);
652 		chip->state = FL_READY;
653 	}
654 	(void) map_read(map, adr);
655 	xip_iprefetch();
656 	local_irq_enable();
657 }
658 
659 /*
660  * When a delay is required for the flash operation to complete, the
661  * xip_udelay() function is polling for both the given timeout and pending
662  * (but still masked) hardware interrupts.  Whenever there is an interrupt
663  * pending then the flash erase operation is suspended, array mode restored
664  * and interrupts unmasked.  Task scheduling might also happen at that
665  * point.  The CPU eventually returns from the interrupt or the call to
666  * schedule() and the suspended flash operation is resumed for the remaining
667  * of the delay period.
668  *
669  * Warning: this function _will_ fool interrupt latency tracing tools.
670  */
671 
672 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
673 				unsigned long adr, int usec)
674 {
675 	struct cfi_private *cfi = map->fldrv_priv;
676 	struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
677 	map_word status, OK = CMD(0x80);
678 	unsigned long suspended, start = xip_currtime();
679 	flstate_t oldstate;
680 
681 	do {
682 		cpu_relax();
683 		if (xip_irqpending() && extp &&
684 		    ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
685 		    (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
686 			/*
687 			 * Let's suspend the erase operation when supported.
688 			 * Note that we currently don't try to suspend
689 			 * interleaved chips if there is already another
690 			 * operation suspended (imagine what happens
691 			 * when one chip was already done with the current
692 			 * operation while another chip suspended it, then
693 			 * we resume the whole thing at once).  Yes, it
694 			 * can happen!
695 			 */
696 			map_write(map, CMD(0xb0), adr);
697 			usec -= xip_elapsed_since(start);
698 			suspended = xip_currtime();
699 			do {
700 				if (xip_elapsed_since(suspended) > 100000) {
701 					/*
702 					 * The chip doesn't want to suspend
703 					 * after waiting for 100 msecs.
704 					 * This is a critical error but there
705 					 * is not much we can do here.
706 					 */
707 					return;
708 				}
709 				status = map_read(map, adr);
710 			} while (!map_word_andequal(map, status, OK, OK));
711 
712 			/* Suspend succeeded */
713 			oldstate = chip->state;
714 			if (!map_word_bitsset(map, status, CMD(0x40)))
715 				break;
716 			chip->state = FL_XIP_WHILE_ERASING;
717 			chip->erase_suspended = 1;
718 			map_write(map, CMD(0xf0), adr);
719 			(void) map_read(map, adr);
720 			asm volatile (".rep 8; nop; .endr");
721 			local_irq_enable();
722 			spin_unlock(chip->mutex);
723 			asm volatile (".rep 8; nop; .endr");
724 			cond_resched();
725 
726 			/*
727 			 * We're back.  However someone else might have
728 			 * decided to go write to the chip if we are in
729 			 * a suspended erase state.  If so let's wait
730 			 * until it's done.
731 			 */
732 			spin_lock(chip->mutex);
733 			while (chip->state != FL_XIP_WHILE_ERASING) {
734 				DECLARE_WAITQUEUE(wait, current);
735 				set_current_state(TASK_UNINTERRUPTIBLE);
736 				add_wait_queue(&chip->wq, &wait);
737 				spin_unlock(chip->mutex);
738 				schedule();
739 				remove_wait_queue(&chip->wq, &wait);
740 				spin_lock(chip->mutex);
741 			}
742 			/* Disallow XIP again */
743 			local_irq_disable();
744 
745 			/* Resume the write or erase operation */
746 			map_write(map, CMD(0x30), adr);
747 			chip->state = oldstate;
748 			start = xip_currtime();
749 		} else if (usec >= 1000000/HZ) {
750 			/*
751 			 * Try to save on CPU power when waiting delay
752 			 * is at least a system timer tick period.
753 			 * No need to be extremely accurate here.
754 			 */
755 			xip_cpu_idle();
756 		}
757 		status = map_read(map, adr);
758 	} while (!map_word_andequal(map, status, OK, OK)
759 		 && xip_elapsed_since(start) < usec);
760 }
761 
762 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
763 
764 /*
765  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
766  * the flash is actively programming or erasing since we have to poll for
767  * the operation to complete anyway.  We can't do that in a generic way with
768  * a XIP setup so do it before the actual flash operation in this case
769  * and stub it out from INVALIDATE_CACHE_UDELAY.
770  */
771 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
772 	INVALIDATE_CACHED_RANGE(map, from, size)
773 
774 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
775 	UDELAY(map, chip, adr, usec)
776 
777 /*
778  * Extra notes:
779  *
780  * Activating this XIP support changes the way the code works a bit.  For
781  * example the code to suspend the current process when concurrent access
782  * happens is never executed because xip_udelay() will always return with the
783  * same chip state as it was entered with.  This is why there is no care for
784  * the presence of add_wait_queue() or schedule() calls from within a couple
785  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
786  * The queueing and scheduling are always happening within xip_udelay().
787  *
788  * Similarly, get_chip() and put_chip() just happen to always be executed
789  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
790  * is in array mode, therefore never executing many cases therein and not
791  * causing any problem with XIP.
792  */
793 
794 #else
795 
796 #define xip_disable(map, chip, adr)
797 #define xip_enable(map, chip, adr)
798 #define XIP_INVAL_CACHED_RANGE(x...)
799 
800 #define UDELAY(map, chip, adr, usec)  \
801 do {  \
802 	spin_unlock(chip->mutex);  \
803 	cfi_udelay(usec);  \
804 	spin_lock(chip->mutex);  \
805 } while (0)
806 
807 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec)  \
808 do {  \
809 	spin_unlock(chip->mutex);  \
810 	INVALIDATE_CACHED_RANGE(map, adr, len);  \
811 	cfi_udelay(usec);  \
812 	spin_lock(chip->mutex);  \
813 } while (0)
814 
815 #endif
816 
817 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
818 {
819 	unsigned long cmd_addr;
820 	struct cfi_private *cfi = map->fldrv_priv;
821 	int ret;
822 
823 	adr += chip->start;
824 
825 	/* Ensure cmd read/writes are aligned. */
826 	cmd_addr = adr & ~(map_bankwidth(map)-1);
827 
828 	spin_lock(chip->mutex);
829 	ret = get_chip(map, chip, cmd_addr, FL_READY);
830 	if (ret) {
831 		spin_unlock(chip->mutex);
832 		return ret;
833 	}
834 
835 	if (chip->state != FL_POINT && chip->state != FL_READY) {
836 		map_write(map, CMD(0xf0), cmd_addr);
837 		chip->state = FL_READY;
838 	}
839 
840 	map_copy_from(map, buf, adr, len);
841 
842 	put_chip(map, chip, cmd_addr);
843 
844 	spin_unlock(chip->mutex);
845 	return 0;
846 }
847 
848 
849 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
850 {
851 	struct map_info *map = mtd->priv;
852 	struct cfi_private *cfi = map->fldrv_priv;
853 	unsigned long ofs;
854 	int chipnum;
855 	int ret = 0;
856 
857 	/* ofs: offset within the first chip that the first read should start */
858 
859 	chipnum = (from >> cfi->chipshift);
860 	ofs = from - (chipnum <<  cfi->chipshift);
861 
862 
863 	*retlen = 0;
864 
865 	while (len) {
866 		unsigned long thislen;
867 
868 		if (chipnum >= cfi->numchips)
869 			break;
870 
871 		if ((len + ofs -1) >> cfi->chipshift)
872 			thislen = (1<<cfi->chipshift) - ofs;
873 		else
874 			thislen = len;
875 
876 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
877 		if (ret)
878 			break;
879 
880 		*retlen += thislen;
881 		len -= thislen;
882 		buf += thislen;
883 
884 		ofs = 0;
885 		chipnum++;
886 	}
887 	return ret;
888 }
889 
890 
891 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
892 {
893 	DECLARE_WAITQUEUE(wait, current);
894 	unsigned long timeo = jiffies + HZ;
895 	struct cfi_private *cfi = map->fldrv_priv;
896 
897  retry:
898 	spin_lock(chip->mutex);
899 
900 	if (chip->state != FL_READY){
901 #if 0
902 		printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
903 #endif
904 		set_current_state(TASK_UNINTERRUPTIBLE);
905 		add_wait_queue(&chip->wq, &wait);
906 
907 		spin_unlock(chip->mutex);
908 
909 		schedule();
910 		remove_wait_queue(&chip->wq, &wait);
911 #if 0
912 		if(signal_pending(current))
913 			return -EINTR;
914 #endif
915 		timeo = jiffies + HZ;
916 
917 		goto retry;
918 	}
919 
920 	adr += chip->start;
921 
922 	chip->state = FL_READY;
923 
924 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
925 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
926 	cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
927 
928 	map_copy_from(map, buf, adr, len);
929 
930 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
931 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
932 	cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
933 	cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
934 
935 	wake_up(&chip->wq);
936 	spin_unlock(chip->mutex);
937 
938 	return 0;
939 }
940 
941 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
942 {
943 	struct map_info *map = mtd->priv;
944 	struct cfi_private *cfi = map->fldrv_priv;
945 	unsigned long ofs;
946 	int chipnum;
947 	int ret = 0;
948 
949 
950 	/* ofs: offset within the first chip that the first read should start */
951 
952 	/* 8 secsi bytes per chip */
953 	chipnum=from>>3;
954 	ofs=from & 7;
955 
956 
957 	*retlen = 0;
958 
959 	while (len) {
960 		unsigned long thislen;
961 
962 		if (chipnum >= cfi->numchips)
963 			break;
964 
965 		if ((len + ofs -1) >> 3)
966 			thislen = (1<<3) - ofs;
967 		else
968 			thislen = len;
969 
970 		ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
971 		if (ret)
972 			break;
973 
974 		*retlen += thislen;
975 		len -= thislen;
976 		buf += thislen;
977 
978 		ofs = 0;
979 		chipnum++;
980 	}
981 	return ret;
982 }
983 
984 
985 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
986 {
987 	struct cfi_private *cfi = map->fldrv_priv;
988 	unsigned long timeo = jiffies + HZ;
989 	/*
990 	 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
991 	 * have a max write time of a few hundreds usec). However, we should
992 	 * use the maximum timeout value given by the chip at probe time
993 	 * instead.  Unfortunately, struct flchip does have a field for
994 	 * maximum timeout, only for typical which can be far too short
995 	 * depending of the conditions.	 The ' + 1' is to avoid having a
996 	 * timeout of 0 jiffies if HZ is smaller than 1000.
997 	 */
998 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
999 	int ret = 0;
1000 	map_word oldd;
1001 	int retry_cnt = 0;
1002 
1003 	adr += chip->start;
1004 
1005 	spin_lock(chip->mutex);
1006 	ret = get_chip(map, chip, adr, FL_WRITING);
1007 	if (ret) {
1008 		spin_unlock(chip->mutex);
1009 		return ret;
1010 	}
1011 
1012 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1013 	       __func__, adr, datum.x[0] );
1014 
1015 	/*
1016 	 * Check for a NOP for the case when the datum to write is already
1017 	 * present - it saves time and works around buggy chips that corrupt
1018 	 * data at other locations when 0xff is written to a location that
1019 	 * already contains 0xff.
1020 	 */
1021 	oldd = map_read(map, adr);
1022 	if (map_word_equal(map, oldd, datum)) {
1023 		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1024 		       __func__);
1025 		goto op_done;
1026 	}
1027 
1028 	XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1029 	ENABLE_VPP(map);
1030 	xip_disable(map, chip, adr);
1031  retry:
1032 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1033 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1034 	cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1035 	map_write(map, datum, adr);
1036 	chip->state = FL_WRITING;
1037 
1038 	INVALIDATE_CACHE_UDELAY(map, chip,
1039 				adr, map_bankwidth(map),
1040 				chip->word_write_time);
1041 
1042 	/* See comment above for timeout value. */
1043 	timeo = jiffies + uWriteTimeout;
1044 	for (;;) {
1045 		if (chip->state != FL_WRITING) {
1046 			/* Someone's suspended the write. Sleep */
1047 			DECLARE_WAITQUEUE(wait, current);
1048 
1049 			set_current_state(TASK_UNINTERRUPTIBLE);
1050 			add_wait_queue(&chip->wq, &wait);
1051 			spin_unlock(chip->mutex);
1052 			schedule();
1053 			remove_wait_queue(&chip->wq, &wait);
1054 			timeo = jiffies + (HZ / 2); /* FIXME */
1055 			spin_lock(chip->mutex);
1056 			continue;
1057 		}
1058 
1059 		if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1060 			xip_enable(map, chip, adr);
1061 			printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1062 			xip_disable(map, chip, adr);
1063 			break;
1064 		}
1065 
1066 		if (chip_ready(map, adr))
1067 			break;
1068 
1069 		/* Latency issues. Drop the lock, wait a while and retry */
1070 		UDELAY(map, chip, adr, 1);
1071 	}
1072 	/* Did we succeed? */
1073 	if (!chip_good(map, adr, datum)) {
1074 		/* reset on all failures. */
1075 		map_write( map, CMD(0xF0), chip->start );
1076 		/* FIXME - should have reset delay before continuing */
1077 
1078 		if (++retry_cnt <= MAX_WORD_RETRIES)
1079 			goto retry;
1080 
1081 		ret = -EIO;
1082 	}
1083 	xip_enable(map, chip, adr);
1084  op_done:
1085 	chip->state = FL_READY;
1086 	put_chip(map, chip, adr);
1087 	spin_unlock(chip->mutex);
1088 
1089 	return ret;
1090 }
1091 
1092 
1093 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1094 				  size_t *retlen, const u_char *buf)
1095 {
1096 	struct map_info *map = mtd->priv;
1097 	struct cfi_private *cfi = map->fldrv_priv;
1098 	int ret = 0;
1099 	int chipnum;
1100 	unsigned long ofs, chipstart;
1101 	DECLARE_WAITQUEUE(wait, current);
1102 
1103 	*retlen = 0;
1104 	if (!len)
1105 		return 0;
1106 
1107 	chipnum = to >> cfi->chipshift;
1108 	ofs = to  - (chipnum << cfi->chipshift);
1109 	chipstart = cfi->chips[chipnum].start;
1110 
1111 	/* If it's not bus-aligned, do the first byte write */
1112 	if (ofs & (map_bankwidth(map)-1)) {
1113 		unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1114 		int i = ofs - bus_ofs;
1115 		int n = 0;
1116 		map_word tmp_buf;
1117 
1118  retry:
1119 		spin_lock(cfi->chips[chipnum].mutex);
1120 
1121 		if (cfi->chips[chipnum].state != FL_READY) {
1122 #if 0
1123 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1124 #endif
1125 			set_current_state(TASK_UNINTERRUPTIBLE);
1126 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1127 
1128 			spin_unlock(cfi->chips[chipnum].mutex);
1129 
1130 			schedule();
1131 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1132 #if 0
1133 			if(signal_pending(current))
1134 				return -EINTR;
1135 #endif
1136 			goto retry;
1137 		}
1138 
1139 		/* Load 'tmp_buf' with old contents of flash */
1140 		tmp_buf = map_read(map, bus_ofs+chipstart);
1141 
1142 		spin_unlock(cfi->chips[chipnum].mutex);
1143 
1144 		/* Number of bytes to copy from buffer */
1145 		n = min_t(int, len, map_bankwidth(map)-i);
1146 
1147 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1148 
1149 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1150 				       bus_ofs, tmp_buf);
1151 		if (ret)
1152 			return ret;
1153 
1154 		ofs += n;
1155 		buf += n;
1156 		(*retlen) += n;
1157 		len -= n;
1158 
1159 		if (ofs >> cfi->chipshift) {
1160 			chipnum ++;
1161 			ofs = 0;
1162 			if (chipnum == cfi->numchips)
1163 				return 0;
1164 		}
1165 	}
1166 
1167 	/* We are now aligned, write as much as possible */
1168 	while(len >= map_bankwidth(map)) {
1169 		map_word datum;
1170 
1171 		datum = map_word_load(map, buf);
1172 
1173 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1174 				       ofs, datum);
1175 		if (ret)
1176 			return ret;
1177 
1178 		ofs += map_bankwidth(map);
1179 		buf += map_bankwidth(map);
1180 		(*retlen) += map_bankwidth(map);
1181 		len -= map_bankwidth(map);
1182 
1183 		if (ofs >> cfi->chipshift) {
1184 			chipnum ++;
1185 			ofs = 0;
1186 			if (chipnum == cfi->numchips)
1187 				return 0;
1188 			chipstart = cfi->chips[chipnum].start;
1189 		}
1190 	}
1191 
1192 	/* Write the trailing bytes if any */
1193 	if (len & (map_bankwidth(map)-1)) {
1194 		map_word tmp_buf;
1195 
1196  retry1:
1197 		spin_lock(cfi->chips[chipnum].mutex);
1198 
1199 		if (cfi->chips[chipnum].state != FL_READY) {
1200 #if 0
1201 			printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1202 #endif
1203 			set_current_state(TASK_UNINTERRUPTIBLE);
1204 			add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1205 
1206 			spin_unlock(cfi->chips[chipnum].mutex);
1207 
1208 			schedule();
1209 			remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1210 #if 0
1211 			if(signal_pending(current))
1212 				return -EINTR;
1213 #endif
1214 			goto retry1;
1215 		}
1216 
1217 		tmp_buf = map_read(map, ofs + chipstart);
1218 
1219 		spin_unlock(cfi->chips[chipnum].mutex);
1220 
1221 		tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1222 
1223 		ret = do_write_oneword(map, &cfi->chips[chipnum],
1224 				ofs, tmp_buf);
1225 		if (ret)
1226 			return ret;
1227 
1228 		(*retlen) += len;
1229 	}
1230 
1231 	return 0;
1232 }
1233 
1234 
1235 /*
1236  * FIXME: interleaved mode not tested, and probably not supported!
1237  */
1238 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1239 				    unsigned long adr, const u_char *buf,
1240 				    int len)
1241 {
1242 	struct cfi_private *cfi = map->fldrv_priv;
1243 	unsigned long timeo = jiffies + HZ;
1244 	/* see comments in do_write_oneword() regarding uWriteTimeo. */
1245 	unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1246 	int ret = -EIO;
1247 	unsigned long cmd_adr;
1248 	int z, words;
1249 	map_word datum;
1250 
1251 	adr += chip->start;
1252 	cmd_adr = adr;
1253 
1254 	spin_lock(chip->mutex);
1255 	ret = get_chip(map, chip, adr, FL_WRITING);
1256 	if (ret) {
1257 		spin_unlock(chip->mutex);
1258 		return ret;
1259 	}
1260 
1261 	datum = map_word_load(map, buf);
1262 
1263 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1264 	       __func__, adr, datum.x[0] );
1265 
1266 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1267 	ENABLE_VPP(map);
1268 	xip_disable(map, chip, cmd_adr);
1269 
1270 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1271 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1272 	//cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1273 
1274 	/* Write Buffer Load */
1275 	map_write(map, CMD(0x25), cmd_adr);
1276 
1277 	chip->state = FL_WRITING_TO_BUFFER;
1278 
1279 	/* Write length of data to come */
1280 	words = len / map_bankwidth(map);
1281 	map_write(map, CMD(words - 1), cmd_adr);
1282 	/* Write data */
1283 	z = 0;
1284 	while(z < words * map_bankwidth(map)) {
1285 		datum = map_word_load(map, buf);
1286 		map_write(map, datum, adr + z);
1287 
1288 		z += map_bankwidth(map);
1289 		buf += map_bankwidth(map);
1290 	}
1291 	z -= map_bankwidth(map);
1292 
1293 	adr += z;
1294 
1295 	/* Write Buffer Program Confirm: GO GO GO */
1296 	map_write(map, CMD(0x29), cmd_adr);
1297 	chip->state = FL_WRITING;
1298 
1299 	INVALIDATE_CACHE_UDELAY(map, chip,
1300 				adr, map_bankwidth(map),
1301 				chip->word_write_time);
1302 
1303 	timeo = jiffies + uWriteTimeout;
1304 
1305 	for (;;) {
1306 		if (chip->state != FL_WRITING) {
1307 			/* Someone's suspended the write. Sleep */
1308 			DECLARE_WAITQUEUE(wait, current);
1309 
1310 			set_current_state(TASK_UNINTERRUPTIBLE);
1311 			add_wait_queue(&chip->wq, &wait);
1312 			spin_unlock(chip->mutex);
1313 			schedule();
1314 			remove_wait_queue(&chip->wq, &wait);
1315 			timeo = jiffies + (HZ / 2); /* FIXME */
1316 			spin_lock(chip->mutex);
1317 			continue;
1318 		}
1319 
1320 		if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1321 			break;
1322 
1323 		if (chip_ready(map, adr)) {
1324 			xip_enable(map, chip, adr);
1325 			goto op_done;
1326 		}
1327 
1328 		/* Latency issues. Drop the lock, wait a while and retry */
1329 		UDELAY(map, chip, adr, 1);
1330 	}
1331 
1332 	/* reset on all failures. */
1333 	map_write( map, CMD(0xF0), chip->start );
1334 	xip_enable(map, chip, adr);
1335 	/* FIXME - should have reset delay before continuing */
1336 
1337 	printk(KERN_WARNING "MTD %s(): software timeout\n",
1338 	       __func__ );
1339 
1340 	ret = -EIO;
1341  op_done:
1342 	chip->state = FL_READY;
1343 	put_chip(map, chip, adr);
1344 	spin_unlock(chip->mutex);
1345 
1346 	return ret;
1347 }
1348 
1349 
1350 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1351 				    size_t *retlen, const u_char *buf)
1352 {
1353 	struct map_info *map = mtd->priv;
1354 	struct cfi_private *cfi = map->fldrv_priv;
1355 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1356 	int ret = 0;
1357 	int chipnum;
1358 	unsigned long ofs;
1359 
1360 	*retlen = 0;
1361 	if (!len)
1362 		return 0;
1363 
1364 	chipnum = to >> cfi->chipshift;
1365 	ofs = to  - (chipnum << cfi->chipshift);
1366 
1367 	/* If it's not bus-aligned, do the first word write */
1368 	if (ofs & (map_bankwidth(map)-1)) {
1369 		size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1370 		if (local_len > len)
1371 			local_len = len;
1372 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1373 					     local_len, retlen, buf);
1374 		if (ret)
1375 			return ret;
1376 		ofs += local_len;
1377 		buf += local_len;
1378 		len -= local_len;
1379 
1380 		if (ofs >> cfi->chipshift) {
1381 			chipnum ++;
1382 			ofs = 0;
1383 			if (chipnum == cfi->numchips)
1384 				return 0;
1385 		}
1386 	}
1387 
1388 	/* Write buffer is worth it only if more than one word to write... */
1389 	while (len >= map_bankwidth(map) * 2) {
1390 		/* We must not cross write block boundaries */
1391 		int size = wbufsize - (ofs & (wbufsize-1));
1392 
1393 		if (size > len)
1394 			size = len;
1395 		if (size % map_bankwidth(map))
1396 			size -= size % map_bankwidth(map);
1397 
1398 		ret = do_write_buffer(map, &cfi->chips[chipnum],
1399 				      ofs, buf, size);
1400 		if (ret)
1401 			return ret;
1402 
1403 		ofs += size;
1404 		buf += size;
1405 		(*retlen) += size;
1406 		len -= size;
1407 
1408 		if (ofs >> cfi->chipshift) {
1409 			chipnum ++;
1410 			ofs = 0;
1411 			if (chipnum == cfi->numchips)
1412 				return 0;
1413 		}
1414 	}
1415 
1416 	if (len) {
1417 		size_t retlen_dregs = 0;
1418 
1419 		ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1420 					     len, &retlen_dregs, buf);
1421 
1422 		*retlen += retlen_dregs;
1423 		return ret;
1424 	}
1425 
1426 	return 0;
1427 }
1428 
1429 
1430 /*
1431  * Handle devices with one erase region, that only implement
1432  * the chip erase command.
1433  */
1434 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1435 {
1436 	struct cfi_private *cfi = map->fldrv_priv;
1437 	unsigned long timeo = jiffies + HZ;
1438 	unsigned long int adr;
1439 	DECLARE_WAITQUEUE(wait, current);
1440 	int ret = 0;
1441 
1442 	adr = cfi->addr_unlock1;
1443 
1444 	spin_lock(chip->mutex);
1445 	ret = get_chip(map, chip, adr, FL_WRITING);
1446 	if (ret) {
1447 		spin_unlock(chip->mutex);
1448 		return ret;
1449 	}
1450 
1451 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1452 	       __func__, chip->start );
1453 
1454 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1455 	ENABLE_VPP(map);
1456 	xip_disable(map, chip, adr);
1457 
1458 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1459 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1460 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1461 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1462 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1463 	cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1464 
1465 	chip->state = FL_ERASING;
1466 	chip->erase_suspended = 0;
1467 	chip->in_progress_block_addr = adr;
1468 
1469 	INVALIDATE_CACHE_UDELAY(map, chip,
1470 				adr, map->size,
1471 				chip->erase_time*500);
1472 
1473 	timeo = jiffies + (HZ*20);
1474 
1475 	for (;;) {
1476 		if (chip->state != FL_ERASING) {
1477 			/* Someone's suspended the erase. Sleep */
1478 			set_current_state(TASK_UNINTERRUPTIBLE);
1479 			add_wait_queue(&chip->wq, &wait);
1480 			spin_unlock(chip->mutex);
1481 			schedule();
1482 			remove_wait_queue(&chip->wq, &wait);
1483 			spin_lock(chip->mutex);
1484 			continue;
1485 		}
1486 		if (chip->erase_suspended) {
1487 			/* This erase was suspended and resumed.
1488 			   Adjust the timeout */
1489 			timeo = jiffies + (HZ*20); /* FIXME */
1490 			chip->erase_suspended = 0;
1491 		}
1492 
1493 		if (chip_ready(map, adr))
1494 			break;
1495 
1496 		if (time_after(jiffies, timeo)) {
1497 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1498 				__func__ );
1499 			break;
1500 		}
1501 
1502 		/* Latency issues. Drop the lock, wait a while and retry */
1503 		UDELAY(map, chip, adr, 1000000/HZ);
1504 	}
1505 	/* Did we succeed? */
1506 	if (!chip_good(map, adr, map_word_ff(map))) {
1507 		/* reset on all failures. */
1508 		map_write( map, CMD(0xF0), chip->start );
1509 		/* FIXME - should have reset delay before continuing */
1510 
1511 		ret = -EIO;
1512 	}
1513 
1514 	chip->state = FL_READY;
1515 	xip_enable(map, chip, adr);
1516 	put_chip(map, chip, adr);
1517 	spin_unlock(chip->mutex);
1518 
1519 	return ret;
1520 }
1521 
1522 
1523 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1524 {
1525 	struct cfi_private *cfi = map->fldrv_priv;
1526 	unsigned long timeo = jiffies + HZ;
1527 	DECLARE_WAITQUEUE(wait, current);
1528 	int ret = 0;
1529 
1530 	adr += chip->start;
1531 
1532 	spin_lock(chip->mutex);
1533 	ret = get_chip(map, chip, adr, FL_ERASING);
1534 	if (ret) {
1535 		spin_unlock(chip->mutex);
1536 		return ret;
1537 	}
1538 
1539 	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1540 	       __func__, adr );
1541 
1542 	XIP_INVAL_CACHED_RANGE(map, adr, len);
1543 	ENABLE_VPP(map);
1544 	xip_disable(map, chip, adr);
1545 
1546 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1547 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1548 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1549 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1550 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1551 	map_write(map, CMD(0x30), adr);
1552 
1553 	chip->state = FL_ERASING;
1554 	chip->erase_suspended = 0;
1555 	chip->in_progress_block_addr = adr;
1556 
1557 	INVALIDATE_CACHE_UDELAY(map, chip,
1558 				adr, len,
1559 				chip->erase_time*500);
1560 
1561 	timeo = jiffies + (HZ*20);
1562 
1563 	for (;;) {
1564 		if (chip->state != FL_ERASING) {
1565 			/* Someone's suspended the erase. Sleep */
1566 			set_current_state(TASK_UNINTERRUPTIBLE);
1567 			add_wait_queue(&chip->wq, &wait);
1568 			spin_unlock(chip->mutex);
1569 			schedule();
1570 			remove_wait_queue(&chip->wq, &wait);
1571 			spin_lock(chip->mutex);
1572 			continue;
1573 		}
1574 		if (chip->erase_suspended) {
1575 			/* This erase was suspended and resumed.
1576 			   Adjust the timeout */
1577 			timeo = jiffies + (HZ*20); /* FIXME */
1578 			chip->erase_suspended = 0;
1579 		}
1580 
1581 		if (chip_ready(map, adr)) {
1582 			xip_enable(map, chip, adr);
1583 			break;
1584 		}
1585 
1586 		if (time_after(jiffies, timeo)) {
1587 			xip_enable(map, chip, adr);
1588 			printk(KERN_WARNING "MTD %s(): software timeout\n",
1589 				__func__ );
1590 			break;
1591 		}
1592 
1593 		/* Latency issues. Drop the lock, wait a while and retry */
1594 		UDELAY(map, chip, adr, 1000000/HZ);
1595 	}
1596 	/* Did we succeed? */
1597 	if (!chip_good(map, adr, map_word_ff(map))) {
1598 		/* reset on all failures. */
1599 		map_write( map, CMD(0xF0), chip->start );
1600 		/* FIXME - should have reset delay before continuing */
1601 
1602 		ret = -EIO;
1603 	}
1604 
1605 	chip->state = FL_READY;
1606 	put_chip(map, chip, adr);
1607 	spin_unlock(chip->mutex);
1608 	return ret;
1609 }
1610 
1611 
1612 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1613 {
1614 	unsigned long ofs, len;
1615 	int ret;
1616 
1617 	ofs = instr->addr;
1618 	len = instr->len;
1619 
1620 	ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1621 	if (ret)
1622 		return ret;
1623 
1624 	instr->state = MTD_ERASE_DONE;
1625 	mtd_erase_callback(instr);
1626 
1627 	return 0;
1628 }
1629 
1630 
1631 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1632 {
1633 	struct map_info *map = mtd->priv;
1634 	struct cfi_private *cfi = map->fldrv_priv;
1635 	int ret = 0;
1636 
1637 	if (instr->addr != 0)
1638 		return -EINVAL;
1639 
1640 	if (instr->len != mtd->size)
1641 		return -EINVAL;
1642 
1643 	ret = do_erase_chip(map, &cfi->chips[0]);
1644 	if (ret)
1645 		return ret;
1646 
1647 	instr->state = MTD_ERASE_DONE;
1648 	mtd_erase_callback(instr);
1649 
1650 	return 0;
1651 }
1652 
1653 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1654 			 unsigned long adr, int len, void *thunk)
1655 {
1656 	struct cfi_private *cfi = map->fldrv_priv;
1657 	int ret;
1658 
1659 	spin_lock(chip->mutex);
1660 	ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1661 	if (ret)
1662 		goto out_unlock;
1663 	chip->state = FL_LOCKING;
1664 
1665 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1666 	      __func__, adr, len);
1667 
1668 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1669 			 cfi->device_type, NULL);
1670 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1671 			 cfi->device_type, NULL);
1672 	cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1673 			 cfi->device_type, NULL);
1674 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1675 			 cfi->device_type, NULL);
1676 	cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1677 			 cfi->device_type, NULL);
1678 	map_write(map, CMD(0x40), chip->start + adr);
1679 
1680 	chip->state = FL_READY;
1681 	put_chip(map, chip, adr + chip->start);
1682 	ret = 0;
1683 
1684 out_unlock:
1685 	spin_unlock(chip->mutex);
1686 	return ret;
1687 }
1688 
1689 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1690 			   unsigned long adr, int len, void *thunk)
1691 {
1692 	struct cfi_private *cfi = map->fldrv_priv;
1693 	int ret;
1694 
1695 	spin_lock(chip->mutex);
1696 	ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1697 	if (ret)
1698 		goto out_unlock;
1699 	chip->state = FL_UNLOCKING;
1700 
1701 	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1702 	      __func__, adr, len);
1703 
1704 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1705 			 cfi->device_type, NULL);
1706 	map_write(map, CMD(0x70), adr);
1707 
1708 	chip->state = FL_READY;
1709 	put_chip(map, chip, adr + chip->start);
1710 	ret = 0;
1711 
1712 out_unlock:
1713 	spin_unlock(chip->mutex);
1714 	return ret;
1715 }
1716 
1717 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1718 {
1719 	return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1720 }
1721 
1722 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1723 {
1724 	return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1725 }
1726 
1727 
1728 static void cfi_amdstd_sync (struct mtd_info *mtd)
1729 {
1730 	struct map_info *map = mtd->priv;
1731 	struct cfi_private *cfi = map->fldrv_priv;
1732 	int i;
1733 	struct flchip *chip;
1734 	int ret = 0;
1735 	DECLARE_WAITQUEUE(wait, current);
1736 
1737 	for (i=0; !ret && i<cfi->numchips; i++) {
1738 		chip = &cfi->chips[i];
1739 
1740 	retry:
1741 		spin_lock(chip->mutex);
1742 
1743 		switch(chip->state) {
1744 		case FL_READY:
1745 		case FL_STATUS:
1746 		case FL_CFI_QUERY:
1747 		case FL_JEDEC_QUERY:
1748 			chip->oldstate = chip->state;
1749 			chip->state = FL_SYNCING;
1750 			/* No need to wake_up() on this state change -
1751 			 * as the whole point is that nobody can do anything
1752 			 * with the chip now anyway.
1753 			 */
1754 		case FL_SYNCING:
1755 			spin_unlock(chip->mutex);
1756 			break;
1757 
1758 		default:
1759 			/* Not an idle state */
1760 			add_wait_queue(&chip->wq, &wait);
1761 
1762 			spin_unlock(chip->mutex);
1763 
1764 			schedule();
1765 
1766 			remove_wait_queue(&chip->wq, &wait);
1767 
1768 			goto retry;
1769 		}
1770 	}
1771 
1772 	/* Unlock the chips again */
1773 
1774 	for (i--; i >=0; i--) {
1775 		chip = &cfi->chips[i];
1776 
1777 		spin_lock(chip->mutex);
1778 
1779 		if (chip->state == FL_SYNCING) {
1780 			chip->state = chip->oldstate;
1781 			wake_up(&chip->wq);
1782 		}
1783 		spin_unlock(chip->mutex);
1784 	}
1785 }
1786 
1787 
1788 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1789 {
1790 	struct map_info *map = mtd->priv;
1791 	struct cfi_private *cfi = map->fldrv_priv;
1792 	int i;
1793 	struct flchip *chip;
1794 	int ret = 0;
1795 
1796 	for (i=0; !ret && i<cfi->numchips; i++) {
1797 		chip = &cfi->chips[i];
1798 
1799 		spin_lock(chip->mutex);
1800 
1801 		switch(chip->state) {
1802 		case FL_READY:
1803 		case FL_STATUS:
1804 		case FL_CFI_QUERY:
1805 		case FL_JEDEC_QUERY:
1806 			chip->oldstate = chip->state;
1807 			chip->state = FL_PM_SUSPENDED;
1808 			/* No need to wake_up() on this state change -
1809 			 * as the whole point is that nobody can do anything
1810 			 * with the chip now anyway.
1811 			 */
1812 		case FL_PM_SUSPENDED:
1813 			break;
1814 
1815 		default:
1816 			ret = -EAGAIN;
1817 			break;
1818 		}
1819 		spin_unlock(chip->mutex);
1820 	}
1821 
1822 	/* Unlock the chips again */
1823 
1824 	if (ret) {
1825 		for (i--; i >=0; i--) {
1826 			chip = &cfi->chips[i];
1827 
1828 			spin_lock(chip->mutex);
1829 
1830 			if (chip->state == FL_PM_SUSPENDED) {
1831 				chip->state = chip->oldstate;
1832 				wake_up(&chip->wq);
1833 			}
1834 			spin_unlock(chip->mutex);
1835 		}
1836 	}
1837 
1838 	return ret;
1839 }
1840 
1841 
1842 static void cfi_amdstd_resume(struct mtd_info *mtd)
1843 {
1844 	struct map_info *map = mtd->priv;
1845 	struct cfi_private *cfi = map->fldrv_priv;
1846 	int i;
1847 	struct flchip *chip;
1848 
1849 	for (i=0; i<cfi->numchips; i++) {
1850 
1851 		chip = &cfi->chips[i];
1852 
1853 		spin_lock(chip->mutex);
1854 
1855 		if (chip->state == FL_PM_SUSPENDED) {
1856 			chip->state = FL_READY;
1857 			map_write(map, CMD(0xF0), chip->start);
1858 			wake_up(&chip->wq);
1859 		}
1860 		else
1861 			printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1862 
1863 		spin_unlock(chip->mutex);
1864 	}
1865 }
1866 
1867 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1868 {
1869 	struct map_info *map = mtd->priv;
1870 	struct cfi_private *cfi = map->fldrv_priv;
1871 
1872 	kfree(cfi->cmdset_priv);
1873 	kfree(cfi->cfiq);
1874 	kfree(cfi);
1875 	kfree(mtd->eraseregions);
1876 }
1877 
1878 MODULE_LICENSE("GPL");
1879 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1880 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
1881