1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
8  *
9  * 10/10/2000	Nicolas Pitre <nico@cam.org>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
16  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
17  *	  (command set 0x0020)
18  *	- added a writev function
19  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
20  * 	- Plugged memory leak in cfi_staa_writev().
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30 
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/cfi.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/compatmac.h>
39 
40 
41 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
43 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
44 		unsigned long count, loff_t to, size_t *retlen);
45 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_staa_sync (struct mtd_info *);
47 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_staa_suspend (struct mtd_info *);
50 static void cfi_staa_resume (struct mtd_info *);
51 
52 static void cfi_staa_destroy(struct mtd_info *);
53 
54 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
55 
56 static struct mtd_info *cfi_staa_setup (struct map_info *);
57 
58 static struct mtd_chip_driver cfi_staa_chipdrv = {
59 	.probe		= NULL, /* Not usable directly */
60 	.destroy	= cfi_staa_destroy,
61 	.name		= "cfi_cmdset_0020",
62 	.module		= THIS_MODULE
63 };
64 
65 /* #define DEBUG_LOCK_BITS */
66 //#define DEBUG_CFI_FEATURES
67 
68 #ifdef DEBUG_CFI_FEATURES
69 static void cfi_tell_features(struct cfi_pri_intelext *extp)
70 {
71         int i;
72         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
73 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
74 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
75 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
76 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
77 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
78 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
79 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
80 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
81 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
82 	for (i=9; i<32; i++) {
83 		if (extp->FeatureSupport & (1<<i))
84 			printk("     - Unknown Bit %X:      supported\n", i);
85 	}
86 
87 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
88 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
89 	for (i=1; i<8; i++) {
90 		if (extp->SuspendCmdSupport & (1<<i))
91 			printk("     - Unknown Bit %X:               supported\n", i);
92 	}
93 
94 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
95 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
96 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
97 	for (i=2; i<16; i++) {
98 		if (extp->BlkStatusRegMask & (1<<i))
99 			printk("     - Unknown Bit %X Active: yes\n",i);
100 	}
101 
102 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
104 	if (extp->VppOptimal)
105 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
106 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
107 }
108 #endif
109 
110 /* This routine is made available to other mtd code via
111  * inter_module_register.  It must only be accessed through
112  * inter_module_get which will bump the use count of this module.  The
113  * addresses passed back in cfi are valid as long as the use count of
114  * this module is non-zero, i.e. between inter_module_get and
115  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
116  */
117 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
118 {
119 	struct cfi_private *cfi = map->fldrv_priv;
120 	int i;
121 
122 	if (cfi->cfi_mode) {
123 		/*
124 		 * It's a real CFI chip, not one for which the probe
125 		 * routine faked a CFI structure. So we read the feature
126 		 * table from it.
127 		 */
128 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
129 		struct cfi_pri_intelext *extp;
130 
131 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
132 		if (!extp)
133 			return NULL;
134 
135 		if (extp->MajorVersion != '1' ||
136 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
137 			printk(KERN_ERR "  Unknown ST Microelectronics"
138 			       " Extended Query version %c.%c.\n",
139 			       extp->MajorVersion, extp->MinorVersion);
140 			kfree(extp);
141 			return NULL;
142 		}
143 
144 		/* Do some byteswapping if necessary */
145 		extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
146 		extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
147 
148 #ifdef DEBUG_CFI_FEATURES
149 		/* Tell the user about it in lots of lovely detail */
150 		cfi_tell_features(extp);
151 #endif
152 
153 		/* Install our own private info structure */
154 		cfi->cmdset_priv = extp;
155 	}
156 
157 	for (i=0; i< cfi->numchips; i++) {
158 		cfi->chips[i].word_write_time = 128;
159 		cfi->chips[i].buffer_write_time = 128;
160 		cfi->chips[i].erase_time = 1024;
161 		cfi->chips[i].ref_point_counter = 0;
162 		init_waitqueue_head(&(cfi->chips[i].wq));
163 	}
164 
165 	return cfi_staa_setup(map);
166 }
167 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
168 
169 static struct mtd_info *cfi_staa_setup(struct map_info *map)
170 {
171 	struct cfi_private *cfi = map->fldrv_priv;
172 	struct mtd_info *mtd;
173 	unsigned long offset = 0;
174 	int i,j;
175 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
176 
177 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
178 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
179 
180 	if (!mtd) {
181 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
182 		kfree(cfi->cmdset_priv);
183 		return NULL;
184 	}
185 
186 	mtd->priv = map;
187 	mtd->type = MTD_NORFLASH;
188 	mtd->size = devsize * cfi->numchips;
189 
190 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
191 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
192 			* mtd->numeraseregions, GFP_KERNEL);
193 	if (!mtd->eraseregions) {
194 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
195 		kfree(cfi->cmdset_priv);
196 		kfree(mtd);
197 		return NULL;
198 	}
199 
200 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
201 		unsigned long ernum, ersize;
202 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
203 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
204 
205 		if (mtd->erasesize < ersize) {
206 			mtd->erasesize = ersize;
207 		}
208 		for (j=0; j<cfi->numchips; j++) {
209 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
210 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
211 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
212 		}
213 		offset += (ersize * ernum);
214 		}
215 
216 		if (offset != devsize) {
217 			/* Argh */
218 			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
219 			kfree(mtd->eraseregions);
220 			kfree(cfi->cmdset_priv);
221 			kfree(mtd);
222 			return NULL;
223 		}
224 
225 		for (i=0; i<mtd->numeraseregions;i++){
226 			printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
227 			       i,mtd->eraseregions[i].offset,
228 			       mtd->eraseregions[i].erasesize,
229 			       mtd->eraseregions[i].numblocks);
230 		}
231 
232 	/* Also select the correct geometry setup too */
233 	mtd->erase = cfi_staa_erase_varsize;
234 	mtd->read = cfi_staa_read;
235         mtd->write = cfi_staa_write_buffers;
236 	mtd->writev = cfi_staa_writev;
237 	mtd->sync = cfi_staa_sync;
238 	mtd->lock = cfi_staa_lock;
239 	mtd->unlock = cfi_staa_unlock;
240 	mtd->suspend = cfi_staa_suspend;
241 	mtd->resume = cfi_staa_resume;
242 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
243 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
244 	map->fldrv = &cfi_staa_chipdrv;
245 	__module_get(THIS_MODULE);
246 	mtd->name = map->name;
247 	return mtd;
248 }
249 
250 
251 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
252 {
253 	map_word status, status_OK;
254 	unsigned long timeo;
255 	DECLARE_WAITQUEUE(wait, current);
256 	int suspended = 0;
257 	unsigned long cmd_addr;
258 	struct cfi_private *cfi = map->fldrv_priv;
259 
260 	adr += chip->start;
261 
262 	/* Ensure cmd read/writes are aligned. */
263 	cmd_addr = adr & ~(map_bankwidth(map)-1);
264 
265 	/* Let's determine this according to the interleave only once */
266 	status_OK = CMD(0x80);
267 
268 	timeo = jiffies + HZ;
269  retry:
270 	spin_lock_bh(chip->mutex);
271 
272 	/* Check that the chip's ready to talk to us.
273 	 * If it's in FL_ERASING state, suspend it and make it talk now.
274 	 */
275 	switch (chip->state) {
276 	case FL_ERASING:
277 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
278 			goto sleep; /* We don't support erase suspend */
279 
280 		map_write (map, CMD(0xb0), cmd_addr);
281 		/* If the flash has finished erasing, then 'erase suspend'
282 		 * appears to make some (28F320) flash devices switch to
283 		 * 'read' mode.  Make sure that we switch to 'read status'
284 		 * mode so we get the right data. --rmk
285 		 */
286 		map_write(map, CMD(0x70), cmd_addr);
287 		chip->oldstate = FL_ERASING;
288 		chip->state = FL_ERASE_SUSPENDING;
289 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
290 		for (;;) {
291 			status = map_read(map, cmd_addr);
292 			if (map_word_andequal(map, status, status_OK, status_OK))
293 				break;
294 
295 			if (time_after(jiffies, timeo)) {
296 				/* Urgh */
297 				map_write(map, CMD(0xd0), cmd_addr);
298 				/* make sure we're in 'read status' mode */
299 				map_write(map, CMD(0x70), cmd_addr);
300 				chip->state = FL_ERASING;
301 				spin_unlock_bh(chip->mutex);
302 				printk(KERN_ERR "Chip not ready after erase "
303 				       "suspended: status = 0x%lx\n", status.x[0]);
304 				return -EIO;
305 			}
306 
307 			spin_unlock_bh(chip->mutex);
308 			cfi_udelay(1);
309 			spin_lock_bh(chip->mutex);
310 		}
311 
312 		suspended = 1;
313 		map_write(map, CMD(0xff), cmd_addr);
314 		chip->state = FL_READY;
315 		break;
316 
317 #if 0
318 	case FL_WRITING:
319 		/* Not quite yet */
320 #endif
321 
322 	case FL_READY:
323 		break;
324 
325 	case FL_CFI_QUERY:
326 	case FL_JEDEC_QUERY:
327 		map_write(map, CMD(0x70), cmd_addr);
328 		chip->state = FL_STATUS;
329 
330 	case FL_STATUS:
331 		status = map_read(map, cmd_addr);
332 		if (map_word_andequal(map, status, status_OK, status_OK)) {
333 			map_write(map, CMD(0xff), cmd_addr);
334 			chip->state = FL_READY;
335 			break;
336 		}
337 
338 		/* Urgh. Chip not yet ready to talk to us. */
339 		if (time_after(jiffies, timeo)) {
340 			spin_unlock_bh(chip->mutex);
341 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
342 			return -EIO;
343 		}
344 
345 		/* Latency issues. Drop the lock, wait a while and retry */
346 		spin_unlock_bh(chip->mutex);
347 		cfi_udelay(1);
348 		goto retry;
349 
350 	default:
351 	sleep:
352 		/* Stick ourselves on a wait queue to be woken when
353 		   someone changes the status */
354 		set_current_state(TASK_UNINTERRUPTIBLE);
355 		add_wait_queue(&chip->wq, &wait);
356 		spin_unlock_bh(chip->mutex);
357 		schedule();
358 		remove_wait_queue(&chip->wq, &wait);
359 		timeo = jiffies + HZ;
360 		goto retry;
361 	}
362 
363 	map_copy_from(map, buf, adr, len);
364 
365 	if (suspended) {
366 		chip->state = chip->oldstate;
367 		/* What if one interleaved chip has finished and the
368 		   other hasn't? The old code would leave the finished
369 		   one in READY mode. That's bad, and caused -EROFS
370 		   errors to be returned from do_erase_oneblock because
371 		   that's the only bit it checked for at the time.
372 		   As the state machine appears to explicitly allow
373 		   sending the 0x70 (Read Status) command to an erasing
374 		   chip and expecting it to be ignored, that's what we
375 		   do. */
376 		map_write(map, CMD(0xd0), cmd_addr);
377 		map_write(map, CMD(0x70), cmd_addr);
378 	}
379 
380 	wake_up(&chip->wq);
381 	spin_unlock_bh(chip->mutex);
382 	return 0;
383 }
384 
385 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
386 {
387 	struct map_info *map = mtd->priv;
388 	struct cfi_private *cfi = map->fldrv_priv;
389 	unsigned long ofs;
390 	int chipnum;
391 	int ret = 0;
392 
393 	/* ofs: offset within the first chip that the first read should start */
394 	chipnum = (from >> cfi->chipshift);
395 	ofs = from - (chipnum <<  cfi->chipshift);
396 
397 	*retlen = 0;
398 
399 	while (len) {
400 		unsigned long thislen;
401 
402 		if (chipnum >= cfi->numchips)
403 			break;
404 
405 		if ((len + ofs -1) >> cfi->chipshift)
406 			thislen = (1<<cfi->chipshift) - ofs;
407 		else
408 			thislen = len;
409 
410 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
411 		if (ret)
412 			break;
413 
414 		*retlen += thislen;
415 		len -= thislen;
416 		buf += thislen;
417 
418 		ofs = 0;
419 		chipnum++;
420 	}
421 	return ret;
422 }
423 
424 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
425 				  unsigned long adr, const u_char *buf, int len)
426 {
427 	struct cfi_private *cfi = map->fldrv_priv;
428 	map_word status, status_OK;
429 	unsigned long cmd_adr, timeo;
430 	DECLARE_WAITQUEUE(wait, current);
431 	int wbufsize, z;
432 
433         /* M58LW064A requires bus alignment for buffer wriets -- saw */
434         if (adr & (map_bankwidth(map)-1))
435             return -EINVAL;
436 
437         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
438         adr += chip->start;
439 	cmd_adr = adr & ~(wbufsize-1);
440 
441 	/* Let's determine this according to the interleave only once */
442         status_OK = CMD(0x80);
443 
444 	timeo = jiffies + HZ;
445  retry:
446 
447 #ifdef DEBUG_CFI_FEATURES
448        printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
449 #endif
450 	spin_lock_bh(chip->mutex);
451 
452 	/* Check that the chip's ready to talk to us.
453 	 * Later, we can actually think about interrupting it
454 	 * if it's in FL_ERASING state.
455 	 * Not just yet, though.
456 	 */
457 	switch (chip->state) {
458 	case FL_READY:
459 		break;
460 
461 	case FL_CFI_QUERY:
462 	case FL_JEDEC_QUERY:
463 		map_write(map, CMD(0x70), cmd_adr);
464                 chip->state = FL_STATUS;
465 #ifdef DEBUG_CFI_FEATURES
466         printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
467 #endif
468 
469 	case FL_STATUS:
470 		status = map_read(map, cmd_adr);
471 		if (map_word_andequal(map, status, status_OK, status_OK))
472 			break;
473 		/* Urgh. Chip not yet ready to talk to us. */
474 		if (time_after(jiffies, timeo)) {
475 			spin_unlock_bh(chip->mutex);
476                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
477                                status.x[0], map_read(map, cmd_adr).x[0]);
478 			return -EIO;
479 		}
480 
481 		/* Latency issues. Drop the lock, wait a while and retry */
482 		spin_unlock_bh(chip->mutex);
483 		cfi_udelay(1);
484 		goto retry;
485 
486 	default:
487 		/* Stick ourselves on a wait queue to be woken when
488 		   someone changes the status */
489 		set_current_state(TASK_UNINTERRUPTIBLE);
490 		add_wait_queue(&chip->wq, &wait);
491 		spin_unlock_bh(chip->mutex);
492 		schedule();
493 		remove_wait_queue(&chip->wq, &wait);
494 		timeo = jiffies + HZ;
495 		goto retry;
496 	}
497 
498 	ENABLE_VPP(map);
499 	map_write(map, CMD(0xe8), cmd_adr);
500 	chip->state = FL_WRITING_TO_BUFFER;
501 
502 	z = 0;
503 	for (;;) {
504 		status = map_read(map, cmd_adr);
505 		if (map_word_andequal(map, status, status_OK, status_OK))
506 			break;
507 
508 		spin_unlock_bh(chip->mutex);
509 		cfi_udelay(1);
510 		spin_lock_bh(chip->mutex);
511 
512 		if (++z > 100) {
513 			/* Argh. Not ready for write to buffer */
514 			DISABLE_VPP(map);
515                         map_write(map, CMD(0x70), cmd_adr);
516 			chip->state = FL_STATUS;
517 			spin_unlock_bh(chip->mutex);
518 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
519 			return -EIO;
520 		}
521 	}
522 
523 	/* Write length of data to come */
524 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
525 
526 	/* Write data */
527 	for (z = 0; z < len;
528 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
529 		map_word d;
530 		d = map_word_load(map, buf);
531 		map_write(map, d, adr+z);
532 	}
533 	/* GO GO GO */
534 	map_write(map, CMD(0xd0), cmd_adr);
535 	chip->state = FL_WRITING;
536 
537 	spin_unlock_bh(chip->mutex);
538 	cfi_udelay(chip->buffer_write_time);
539 	spin_lock_bh(chip->mutex);
540 
541 	timeo = jiffies + (HZ/2);
542 	z = 0;
543 	for (;;) {
544 		if (chip->state != FL_WRITING) {
545 			/* Someone's suspended the write. Sleep */
546 			set_current_state(TASK_UNINTERRUPTIBLE);
547 			add_wait_queue(&chip->wq, &wait);
548 			spin_unlock_bh(chip->mutex);
549 			schedule();
550 			remove_wait_queue(&chip->wq, &wait);
551 			timeo = jiffies + (HZ / 2); /* FIXME */
552 			spin_lock_bh(chip->mutex);
553 			continue;
554 		}
555 
556 		status = map_read(map, cmd_adr);
557 		if (map_word_andequal(map, status, status_OK, status_OK))
558 			break;
559 
560 		/* OK Still waiting */
561 		if (time_after(jiffies, timeo)) {
562                         /* clear status */
563                         map_write(map, CMD(0x50), cmd_adr);
564                         /* put back into read status register mode */
565                         map_write(map, CMD(0x70), adr);
566 			chip->state = FL_STATUS;
567 			DISABLE_VPP(map);
568 			spin_unlock_bh(chip->mutex);
569 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
570 			return -EIO;
571 		}
572 
573 		/* Latency issues. Drop the lock, wait a while and retry */
574 		spin_unlock_bh(chip->mutex);
575 		cfi_udelay(1);
576 		z++;
577 		spin_lock_bh(chip->mutex);
578 	}
579 	if (!z) {
580 		chip->buffer_write_time--;
581 		if (!chip->buffer_write_time)
582 			chip->buffer_write_time++;
583 	}
584 	if (z > 1)
585 		chip->buffer_write_time++;
586 
587 	/* Done and happy. */
588 	DISABLE_VPP(map);
589 	chip->state = FL_STATUS;
590 
591         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
592         if (map_word_bitsset(map, status, CMD(0x3a))) {
593 #ifdef DEBUG_CFI_FEATURES
594 		printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
595 #endif
596 		/* clear status */
597 		map_write(map, CMD(0x50), cmd_adr);
598 		/* put back into read status register mode */
599 		map_write(map, CMD(0x70), adr);
600 		wake_up(&chip->wq);
601 		spin_unlock_bh(chip->mutex);
602 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
603 	}
604 	wake_up(&chip->wq);
605 	spin_unlock_bh(chip->mutex);
606 
607         return 0;
608 }
609 
610 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
611 				       size_t len, size_t *retlen, const u_char *buf)
612 {
613 	struct map_info *map = mtd->priv;
614 	struct cfi_private *cfi = map->fldrv_priv;
615 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
616 	int ret = 0;
617 	int chipnum;
618 	unsigned long ofs;
619 
620 	*retlen = 0;
621 	if (!len)
622 		return 0;
623 
624 	chipnum = to >> cfi->chipshift;
625 	ofs = to  - (chipnum << cfi->chipshift);
626 
627 #ifdef DEBUG_CFI_FEATURES
628         printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
629         printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
630         printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
631 #endif
632 
633         /* Write buffer is worth it only if more than one word to write... */
634         while (len > 0) {
635 		/* We must not cross write block boundaries */
636 		int size = wbufsize - (ofs & (wbufsize-1));
637 
638                 if (size > len)
639                     size = len;
640 
641                 ret = do_write_buffer(map, &cfi->chips[chipnum],
642 				      ofs, buf, size);
643 		if (ret)
644 			return ret;
645 
646 		ofs += size;
647 		buf += size;
648 		(*retlen) += size;
649 		len -= size;
650 
651 		if (ofs >> cfi->chipshift) {
652 			chipnum ++;
653 			ofs = 0;
654 			if (chipnum == cfi->numchips)
655 				return 0;
656 		}
657 	}
658 
659 	return 0;
660 }
661 
662 /*
663  * Writev for ECC-Flashes is a little more complicated. We need to maintain
664  * a small buffer for this.
665  * XXX: If the buffer size is not a multiple of 2, this will break
666  */
667 #define ECCBUF_SIZE (mtd->writesize)
668 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
669 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
670 static int
671 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
672 		unsigned long count, loff_t to, size_t *retlen)
673 {
674 	unsigned long i;
675 	size_t	 totlen = 0, thislen;
676 	int	 ret = 0;
677 	size_t	 buflen = 0;
678 	static char *buffer;
679 
680 	if (!ECCBUF_SIZE) {
681 		/* We should fall back to a general writev implementation.
682 		 * Until that is written, just break.
683 		 */
684 		return -EIO;
685 	}
686 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
687 	if (!buffer)
688 		return -ENOMEM;
689 
690 	for (i=0; i<count; i++) {
691 		size_t elem_len = vecs[i].iov_len;
692 		void *elem_base = vecs[i].iov_base;
693 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
694 			continue;
695 		if (buflen) { /* cut off head */
696 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
697 				memcpy(buffer+buflen, elem_base, elem_len);
698 				buflen += elem_len;
699 				continue;
700 			}
701 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
702 			ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
703 			totlen += thislen;
704 			if (ret || thislen != ECCBUF_SIZE)
705 				goto write_error;
706 			elem_len -= thislen-buflen;
707 			elem_base += thislen-buflen;
708 			to += ECCBUF_SIZE;
709 		}
710 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
711 			ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
712 			totlen += thislen;
713 			if (ret || thislen != ECCBUF_DIV(elem_len))
714 				goto write_error;
715 			to += thislen;
716 		}
717 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
718 		if (buflen) {
719 			memset(buffer, 0xff, ECCBUF_SIZE);
720 			memcpy(buffer, elem_base + thislen, buflen);
721 		}
722 	}
723 	if (buflen) { /* flush last page, even if not full */
724 		/* This is sometimes intended behaviour, really */
725 		ret = mtd->write(mtd, to, buflen, &thislen, buffer);
726 		totlen += thislen;
727 		if (ret || thislen != ECCBUF_SIZE)
728 			goto write_error;
729 	}
730 write_error:
731 	if (retlen)
732 		*retlen = totlen;
733 	kfree(buffer);
734 	return ret;
735 }
736 
737 
738 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
739 {
740 	struct cfi_private *cfi = map->fldrv_priv;
741 	map_word status, status_OK;
742 	unsigned long timeo;
743 	int retries = 3;
744 	DECLARE_WAITQUEUE(wait, current);
745 	int ret = 0;
746 
747 	adr += chip->start;
748 
749 	/* Let's determine this according to the interleave only once */
750 	status_OK = CMD(0x80);
751 
752 	timeo = jiffies + HZ;
753 retry:
754 	spin_lock_bh(chip->mutex);
755 
756 	/* Check that the chip's ready to talk to us. */
757 	switch (chip->state) {
758 	case FL_CFI_QUERY:
759 	case FL_JEDEC_QUERY:
760 	case FL_READY:
761 		map_write(map, CMD(0x70), adr);
762 		chip->state = FL_STATUS;
763 
764 	case FL_STATUS:
765 		status = map_read(map, adr);
766 		if (map_word_andequal(map, status, status_OK, status_OK))
767 			break;
768 
769 		/* Urgh. Chip not yet ready to talk to us. */
770 		if (time_after(jiffies, timeo)) {
771 			spin_unlock_bh(chip->mutex);
772 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
773 			return -EIO;
774 		}
775 
776 		/* Latency issues. Drop the lock, wait a while and retry */
777 		spin_unlock_bh(chip->mutex);
778 		cfi_udelay(1);
779 		goto retry;
780 
781 	default:
782 		/* Stick ourselves on a wait queue to be woken when
783 		   someone changes the status */
784 		set_current_state(TASK_UNINTERRUPTIBLE);
785 		add_wait_queue(&chip->wq, &wait);
786 		spin_unlock_bh(chip->mutex);
787 		schedule();
788 		remove_wait_queue(&chip->wq, &wait);
789 		timeo = jiffies + HZ;
790 		goto retry;
791 	}
792 
793 	ENABLE_VPP(map);
794 	/* Clear the status register first */
795 	map_write(map, CMD(0x50), adr);
796 
797 	/* Now erase */
798 	map_write(map, CMD(0x20), adr);
799 	map_write(map, CMD(0xD0), adr);
800 	chip->state = FL_ERASING;
801 
802 	spin_unlock_bh(chip->mutex);
803 	msleep(1000);
804 	spin_lock_bh(chip->mutex);
805 
806 	/* FIXME. Use a timer to check this, and return immediately. */
807 	/* Once the state machine's known to be working I'll do that */
808 
809 	timeo = jiffies + (HZ*20);
810 	for (;;) {
811 		if (chip->state != FL_ERASING) {
812 			/* Someone's suspended the erase. Sleep */
813 			set_current_state(TASK_UNINTERRUPTIBLE);
814 			add_wait_queue(&chip->wq, &wait);
815 			spin_unlock_bh(chip->mutex);
816 			schedule();
817 			remove_wait_queue(&chip->wq, &wait);
818 			timeo = jiffies + (HZ*20); /* FIXME */
819 			spin_lock_bh(chip->mutex);
820 			continue;
821 		}
822 
823 		status = map_read(map, adr);
824 		if (map_word_andequal(map, status, status_OK, status_OK))
825 			break;
826 
827 		/* OK Still waiting */
828 		if (time_after(jiffies, timeo)) {
829 			map_write(map, CMD(0x70), adr);
830 			chip->state = FL_STATUS;
831 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
832 			DISABLE_VPP(map);
833 			spin_unlock_bh(chip->mutex);
834 			return -EIO;
835 		}
836 
837 		/* Latency issues. Drop the lock, wait a while and retry */
838 		spin_unlock_bh(chip->mutex);
839 		cfi_udelay(1);
840 		spin_lock_bh(chip->mutex);
841 	}
842 
843 	DISABLE_VPP(map);
844 	ret = 0;
845 
846 	/* We've broken this before. It doesn't hurt to be safe */
847 	map_write(map, CMD(0x70), adr);
848 	chip->state = FL_STATUS;
849 	status = map_read(map, adr);
850 
851 	/* check for lock bit */
852 	if (map_word_bitsset(map, status, CMD(0x3a))) {
853 		unsigned char chipstatus = status.x[0];
854 		if (!map_word_equal(map, status, CMD(chipstatus))) {
855 			int i, w;
856 			for (w=0; w<map_words(map); w++) {
857 				for (i = 0; i<cfi_interleave(cfi); i++) {
858 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
859 				}
860 			}
861 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
862 			       status.x[0], chipstatus);
863 		}
864 		/* Reset the error bits */
865 		map_write(map, CMD(0x50), adr);
866 		map_write(map, CMD(0x70), adr);
867 
868 		if ((chipstatus & 0x30) == 0x30) {
869 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
870 			ret = -EIO;
871 		} else if (chipstatus & 0x02) {
872 			/* Protection bit set */
873 			ret = -EROFS;
874 		} else if (chipstatus & 0x8) {
875 			/* Voltage */
876 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
877 			ret = -EIO;
878 		} else if (chipstatus & 0x20) {
879 			if (retries--) {
880 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
881 				timeo = jiffies + HZ;
882 				chip->state = FL_STATUS;
883 				spin_unlock_bh(chip->mutex);
884 				goto retry;
885 			}
886 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
887 			ret = -EIO;
888 		}
889 	}
890 
891 	wake_up(&chip->wq);
892 	spin_unlock_bh(chip->mutex);
893 	return ret;
894 }
895 
896 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
897 {	struct map_info *map = mtd->priv;
898 	struct cfi_private *cfi = map->fldrv_priv;
899 	unsigned long adr, len;
900 	int chipnum, ret = 0;
901 	int i, first;
902 	struct mtd_erase_region_info *regions = mtd->eraseregions;
903 
904 	if (instr->addr > mtd->size)
905 		return -EINVAL;
906 
907 	if ((instr->len + instr->addr) > mtd->size)
908 		return -EINVAL;
909 
910 	/* Check that both start and end of the requested erase are
911 	 * aligned with the erasesize at the appropriate addresses.
912 	 */
913 
914 	i = 0;
915 
916 	/* Skip all erase regions which are ended before the start of
917 	   the requested erase. Actually, to save on the calculations,
918 	   we skip to the first erase region which starts after the
919 	   start of the requested erase, and then go back one.
920 	*/
921 
922 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
923 	       i++;
924 	i--;
925 
926 	/* OK, now i is pointing at the erase region in which this
927 	   erase request starts. Check the start of the requested
928 	   erase range is aligned with the erase size which is in
929 	   effect here.
930 	*/
931 
932 	if (instr->addr & (regions[i].erasesize-1))
933 		return -EINVAL;
934 
935 	/* Remember the erase region we start on */
936 	first = i;
937 
938 	/* Next, check that the end of the requested erase is aligned
939 	 * with the erase region at that address.
940 	 */
941 
942 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
943 		i++;
944 
945 	/* As before, drop back one to point at the region in which
946 	   the address actually falls
947 	*/
948 	i--;
949 
950 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
951 		return -EINVAL;
952 
953 	chipnum = instr->addr >> cfi->chipshift;
954 	adr = instr->addr - (chipnum << cfi->chipshift);
955 	len = instr->len;
956 
957 	i=first;
958 
959 	while(len) {
960 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
961 
962 		if (ret)
963 			return ret;
964 
965 		adr += regions[i].erasesize;
966 		len -= regions[i].erasesize;
967 
968 		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
969 			i++;
970 
971 		if (adr >> cfi->chipshift) {
972 			adr = 0;
973 			chipnum++;
974 
975 			if (chipnum >= cfi->numchips)
976 			break;
977 		}
978 	}
979 
980 	instr->state = MTD_ERASE_DONE;
981 	mtd_erase_callback(instr);
982 
983 	return 0;
984 }
985 
986 static void cfi_staa_sync (struct mtd_info *mtd)
987 {
988 	struct map_info *map = mtd->priv;
989 	struct cfi_private *cfi = map->fldrv_priv;
990 	int i;
991 	struct flchip *chip;
992 	int ret = 0;
993 	DECLARE_WAITQUEUE(wait, current);
994 
995 	for (i=0; !ret && i<cfi->numchips; i++) {
996 		chip = &cfi->chips[i];
997 
998 	retry:
999 		spin_lock_bh(chip->mutex);
1000 
1001 		switch(chip->state) {
1002 		case FL_READY:
1003 		case FL_STATUS:
1004 		case FL_CFI_QUERY:
1005 		case FL_JEDEC_QUERY:
1006 			chip->oldstate = chip->state;
1007 			chip->state = FL_SYNCING;
1008 			/* No need to wake_up() on this state change -
1009 			 * as the whole point is that nobody can do anything
1010 			 * with the chip now anyway.
1011 			 */
1012 		case FL_SYNCING:
1013 			spin_unlock_bh(chip->mutex);
1014 			break;
1015 
1016 		default:
1017 			/* Not an idle state */
1018 			set_current_state(TASK_UNINTERRUPTIBLE);
1019 			add_wait_queue(&chip->wq, &wait);
1020 
1021 			spin_unlock_bh(chip->mutex);
1022 			schedule();
1023 		        remove_wait_queue(&chip->wq, &wait);
1024 
1025 			goto retry;
1026 		}
1027 	}
1028 
1029 	/* Unlock the chips again */
1030 
1031 	for (i--; i >=0; i--) {
1032 		chip = &cfi->chips[i];
1033 
1034 		spin_lock_bh(chip->mutex);
1035 
1036 		if (chip->state == FL_SYNCING) {
1037 			chip->state = chip->oldstate;
1038 			wake_up(&chip->wq);
1039 		}
1040 		spin_unlock_bh(chip->mutex);
1041 	}
1042 }
1043 
1044 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1045 {
1046 	struct cfi_private *cfi = map->fldrv_priv;
1047 	map_word status, status_OK;
1048 	unsigned long timeo = jiffies + HZ;
1049 	DECLARE_WAITQUEUE(wait, current);
1050 
1051 	adr += chip->start;
1052 
1053 	/* Let's determine this according to the interleave only once */
1054 	status_OK = CMD(0x80);
1055 
1056 	timeo = jiffies + HZ;
1057 retry:
1058 	spin_lock_bh(chip->mutex);
1059 
1060 	/* Check that the chip's ready to talk to us. */
1061 	switch (chip->state) {
1062 	case FL_CFI_QUERY:
1063 	case FL_JEDEC_QUERY:
1064 	case FL_READY:
1065 		map_write(map, CMD(0x70), adr);
1066 		chip->state = FL_STATUS;
1067 
1068 	case FL_STATUS:
1069 		status = map_read(map, adr);
1070 		if (map_word_andequal(map, status, status_OK, status_OK))
1071 			break;
1072 
1073 		/* Urgh. Chip not yet ready to talk to us. */
1074 		if (time_after(jiffies, timeo)) {
1075 			spin_unlock_bh(chip->mutex);
1076 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1077 			return -EIO;
1078 		}
1079 
1080 		/* Latency issues. Drop the lock, wait a while and retry */
1081 		spin_unlock_bh(chip->mutex);
1082 		cfi_udelay(1);
1083 		goto retry;
1084 
1085 	default:
1086 		/* Stick ourselves on a wait queue to be woken when
1087 		   someone changes the status */
1088 		set_current_state(TASK_UNINTERRUPTIBLE);
1089 		add_wait_queue(&chip->wq, &wait);
1090 		spin_unlock_bh(chip->mutex);
1091 		schedule();
1092 		remove_wait_queue(&chip->wq, &wait);
1093 		timeo = jiffies + HZ;
1094 		goto retry;
1095 	}
1096 
1097 	ENABLE_VPP(map);
1098 	map_write(map, CMD(0x60), adr);
1099 	map_write(map, CMD(0x01), adr);
1100 	chip->state = FL_LOCKING;
1101 
1102 	spin_unlock_bh(chip->mutex);
1103 	msleep(1000);
1104 	spin_lock_bh(chip->mutex);
1105 
1106 	/* FIXME. Use a timer to check this, and return immediately. */
1107 	/* Once the state machine's known to be working I'll do that */
1108 
1109 	timeo = jiffies + (HZ*2);
1110 	for (;;) {
1111 
1112 		status = map_read(map, adr);
1113 		if (map_word_andequal(map, status, status_OK, status_OK))
1114 			break;
1115 
1116 		/* OK Still waiting */
1117 		if (time_after(jiffies, timeo)) {
1118 			map_write(map, CMD(0x70), adr);
1119 			chip->state = FL_STATUS;
1120 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1121 			DISABLE_VPP(map);
1122 			spin_unlock_bh(chip->mutex);
1123 			return -EIO;
1124 		}
1125 
1126 		/* Latency issues. Drop the lock, wait a while and retry */
1127 		spin_unlock_bh(chip->mutex);
1128 		cfi_udelay(1);
1129 		spin_lock_bh(chip->mutex);
1130 	}
1131 
1132 	/* Done and happy. */
1133 	chip->state = FL_STATUS;
1134 	DISABLE_VPP(map);
1135 	wake_up(&chip->wq);
1136 	spin_unlock_bh(chip->mutex);
1137 	return 0;
1138 }
1139 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1140 {
1141 	struct map_info *map = mtd->priv;
1142 	struct cfi_private *cfi = map->fldrv_priv;
1143 	unsigned long adr;
1144 	int chipnum, ret = 0;
1145 #ifdef DEBUG_LOCK_BITS
1146 	int ofs_factor = cfi->interleave * cfi->device_type;
1147 #endif
1148 
1149 	if (ofs & (mtd->erasesize - 1))
1150 		return -EINVAL;
1151 
1152 	if (len & (mtd->erasesize -1))
1153 		return -EINVAL;
1154 
1155 	if ((len + ofs) > mtd->size)
1156 		return -EINVAL;
1157 
1158 	chipnum = ofs >> cfi->chipshift;
1159 	adr = ofs - (chipnum << cfi->chipshift);
1160 
1161 	while(len) {
1162 
1163 #ifdef DEBUG_LOCK_BITS
1164 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1166 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1167 #endif
1168 
1169 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1170 
1171 #ifdef DEBUG_LOCK_BITS
1172 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1173 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1174 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1175 #endif
1176 
1177 		if (ret)
1178 			return ret;
1179 
1180 		adr += mtd->erasesize;
1181 		len -= mtd->erasesize;
1182 
1183 		if (adr >> cfi->chipshift) {
1184 			adr = 0;
1185 			chipnum++;
1186 
1187 			if (chipnum >= cfi->numchips)
1188 			break;
1189 		}
1190 	}
1191 	return 0;
1192 }
1193 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1194 {
1195 	struct cfi_private *cfi = map->fldrv_priv;
1196 	map_word status, status_OK;
1197 	unsigned long timeo = jiffies + HZ;
1198 	DECLARE_WAITQUEUE(wait, current);
1199 
1200 	adr += chip->start;
1201 
1202 	/* Let's determine this according to the interleave only once */
1203 	status_OK = CMD(0x80);
1204 
1205 	timeo = jiffies + HZ;
1206 retry:
1207 	spin_lock_bh(chip->mutex);
1208 
1209 	/* Check that the chip's ready to talk to us. */
1210 	switch (chip->state) {
1211 	case FL_CFI_QUERY:
1212 	case FL_JEDEC_QUERY:
1213 	case FL_READY:
1214 		map_write(map, CMD(0x70), adr);
1215 		chip->state = FL_STATUS;
1216 
1217 	case FL_STATUS:
1218 		status = map_read(map, adr);
1219 		if (map_word_andequal(map, status, status_OK, status_OK))
1220 			break;
1221 
1222 		/* Urgh. Chip not yet ready to talk to us. */
1223 		if (time_after(jiffies, timeo)) {
1224 			spin_unlock_bh(chip->mutex);
1225 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1226 			return -EIO;
1227 		}
1228 
1229 		/* Latency issues. Drop the lock, wait a while and retry */
1230 		spin_unlock_bh(chip->mutex);
1231 		cfi_udelay(1);
1232 		goto retry;
1233 
1234 	default:
1235 		/* Stick ourselves on a wait queue to be woken when
1236 		   someone changes the status */
1237 		set_current_state(TASK_UNINTERRUPTIBLE);
1238 		add_wait_queue(&chip->wq, &wait);
1239 		spin_unlock_bh(chip->mutex);
1240 		schedule();
1241 		remove_wait_queue(&chip->wq, &wait);
1242 		timeo = jiffies + HZ;
1243 		goto retry;
1244 	}
1245 
1246 	ENABLE_VPP(map);
1247 	map_write(map, CMD(0x60), adr);
1248 	map_write(map, CMD(0xD0), adr);
1249 	chip->state = FL_UNLOCKING;
1250 
1251 	spin_unlock_bh(chip->mutex);
1252 	msleep(1000);
1253 	spin_lock_bh(chip->mutex);
1254 
1255 	/* FIXME. Use a timer to check this, and return immediately. */
1256 	/* Once the state machine's known to be working I'll do that */
1257 
1258 	timeo = jiffies + (HZ*2);
1259 	for (;;) {
1260 
1261 		status = map_read(map, adr);
1262 		if (map_word_andequal(map, status, status_OK, status_OK))
1263 			break;
1264 
1265 		/* OK Still waiting */
1266 		if (time_after(jiffies, timeo)) {
1267 			map_write(map, CMD(0x70), adr);
1268 			chip->state = FL_STATUS;
1269 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1270 			DISABLE_VPP(map);
1271 			spin_unlock_bh(chip->mutex);
1272 			return -EIO;
1273 		}
1274 
1275 		/* Latency issues. Drop the unlock, wait a while and retry */
1276 		spin_unlock_bh(chip->mutex);
1277 		cfi_udelay(1);
1278 		spin_lock_bh(chip->mutex);
1279 	}
1280 
1281 	/* Done and happy. */
1282 	chip->state = FL_STATUS;
1283 	DISABLE_VPP(map);
1284 	wake_up(&chip->wq);
1285 	spin_unlock_bh(chip->mutex);
1286 	return 0;
1287 }
1288 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1289 {
1290 	struct map_info *map = mtd->priv;
1291 	struct cfi_private *cfi = map->fldrv_priv;
1292 	unsigned long adr;
1293 	int chipnum, ret = 0;
1294 #ifdef DEBUG_LOCK_BITS
1295 	int ofs_factor = cfi->interleave * cfi->device_type;
1296 #endif
1297 
1298 	chipnum = ofs >> cfi->chipshift;
1299 	adr = ofs - (chipnum << cfi->chipshift);
1300 
1301 #ifdef DEBUG_LOCK_BITS
1302 	{
1303 		unsigned long temp_adr = adr;
1304 		unsigned long temp_len = len;
1305 
1306 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307                 while (temp_len) {
1308 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1309 			temp_adr += mtd->erasesize;
1310 			temp_len -= mtd->erasesize;
1311 		}
1312 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1313 	}
1314 #endif
1315 
1316 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1317 
1318 #ifdef DEBUG_LOCK_BITS
1319 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1320 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1321 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1322 #endif
1323 
1324 	return ret;
1325 }
1326 
1327 static int cfi_staa_suspend(struct mtd_info *mtd)
1328 {
1329 	struct map_info *map = mtd->priv;
1330 	struct cfi_private *cfi = map->fldrv_priv;
1331 	int i;
1332 	struct flchip *chip;
1333 	int ret = 0;
1334 
1335 	for (i=0; !ret && i<cfi->numchips; i++) {
1336 		chip = &cfi->chips[i];
1337 
1338 		spin_lock_bh(chip->mutex);
1339 
1340 		switch(chip->state) {
1341 		case FL_READY:
1342 		case FL_STATUS:
1343 		case FL_CFI_QUERY:
1344 		case FL_JEDEC_QUERY:
1345 			chip->oldstate = chip->state;
1346 			chip->state = FL_PM_SUSPENDED;
1347 			/* No need to wake_up() on this state change -
1348 			 * as the whole point is that nobody can do anything
1349 			 * with the chip now anyway.
1350 			 */
1351 		case FL_PM_SUSPENDED:
1352 			break;
1353 
1354 		default:
1355 			ret = -EAGAIN;
1356 			break;
1357 		}
1358 		spin_unlock_bh(chip->mutex);
1359 	}
1360 
1361 	/* Unlock the chips again */
1362 
1363 	if (ret) {
1364 		for (i--; i >=0; i--) {
1365 			chip = &cfi->chips[i];
1366 
1367 			spin_lock_bh(chip->mutex);
1368 
1369 			if (chip->state == FL_PM_SUSPENDED) {
1370 				/* No need to force it into a known state here,
1371 				   because we're returning failure, and it didn't
1372 				   get power cycled */
1373 				chip->state = chip->oldstate;
1374 				wake_up(&chip->wq);
1375 			}
1376 			spin_unlock_bh(chip->mutex);
1377 		}
1378 	}
1379 
1380 	return ret;
1381 }
1382 
1383 static void cfi_staa_resume(struct mtd_info *mtd)
1384 {
1385 	struct map_info *map = mtd->priv;
1386 	struct cfi_private *cfi = map->fldrv_priv;
1387 	int i;
1388 	struct flchip *chip;
1389 
1390 	for (i=0; i<cfi->numchips; i++) {
1391 
1392 		chip = &cfi->chips[i];
1393 
1394 		spin_lock_bh(chip->mutex);
1395 
1396 		/* Go to known state. Chip may have been power cycled */
1397 		if (chip->state == FL_PM_SUSPENDED) {
1398 			map_write(map, CMD(0xFF), 0);
1399 			chip->state = FL_READY;
1400 			wake_up(&chip->wq);
1401 		}
1402 
1403 		spin_unlock_bh(chip->mutex);
1404 	}
1405 }
1406 
1407 static void cfi_staa_destroy(struct mtd_info *mtd)
1408 {
1409 	struct map_info *map = mtd->priv;
1410 	struct cfi_private *cfi = map->fldrv_priv;
1411 	kfree(cfi->cmdset_priv);
1412 	kfree(cfi);
1413 }
1414 
1415 MODULE_LICENSE("GPL");
1416