1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
8  *
9  * 10/10/2000	Nicolas Pitre <nico@cam.org>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
16  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
17  *	  (command set 0x0020)
18  *	- added a writev function
19  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
20  * 	- Plugged memory leak in cfi_staa_writev().
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30 
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/cfi.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/compatmac.h>
39 
40 
41 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
43 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
44 		unsigned long count, loff_t to, size_t *retlen);
45 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_staa_sync (struct mtd_info *);
47 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_staa_suspend (struct mtd_info *);
50 static void cfi_staa_resume (struct mtd_info *);
51 
52 static void cfi_staa_destroy(struct mtd_info *);
53 
54 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
55 
56 static struct mtd_info *cfi_staa_setup (struct map_info *);
57 
58 static struct mtd_chip_driver cfi_staa_chipdrv = {
59 	.probe		= NULL, /* Not usable directly */
60 	.destroy	= cfi_staa_destroy,
61 	.name		= "cfi_cmdset_0020",
62 	.module		= THIS_MODULE
63 };
64 
65 /* #define DEBUG_LOCK_BITS */
66 //#define DEBUG_CFI_FEATURES
67 
68 #ifdef DEBUG_CFI_FEATURES
69 static void cfi_tell_features(struct cfi_pri_intelext *extp)
70 {
71         int i;
72         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
73 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
74 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
75 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
76 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
77 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
78 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
79 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
80 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
81 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
82 	for (i=9; i<32; i++) {
83 		if (extp->FeatureSupport & (1<<i))
84 			printk("     - Unknown Bit %X:      supported\n", i);
85 	}
86 
87 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
88 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
89 	for (i=1; i<8; i++) {
90 		if (extp->SuspendCmdSupport & (1<<i))
91 			printk("     - Unknown Bit %X:               supported\n", i);
92 	}
93 
94 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
95 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
96 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
97 	for (i=2; i<16; i++) {
98 		if (extp->BlkStatusRegMask & (1<<i))
99 			printk("     - Unknown Bit %X Active: yes\n",i);
100 	}
101 
102 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
104 	if (extp->VppOptimal)
105 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
106 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
107 }
108 #endif
109 
110 /* This routine is made available to other mtd code via
111  * inter_module_register.  It must only be accessed through
112  * inter_module_get which will bump the use count of this module.  The
113  * addresses passed back in cfi are valid as long as the use count of
114  * this module is non-zero, i.e. between inter_module_get and
115  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
116  */
117 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
118 {
119 	struct cfi_private *cfi = map->fldrv_priv;
120 	int i;
121 
122 	if (cfi->cfi_mode) {
123 		/*
124 		 * It's a real CFI chip, not one for which the probe
125 		 * routine faked a CFI structure. So we read the feature
126 		 * table from it.
127 		 */
128 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
129 		struct cfi_pri_intelext *extp;
130 
131 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
132 		if (!extp)
133 			return NULL;
134 
135 		if (extp->MajorVersion != '1' ||
136 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
137 			printk(KERN_ERR "  Unknown ST Microelectronics"
138 			       " Extended Query version %c.%c.\n",
139 			       extp->MajorVersion, extp->MinorVersion);
140 			kfree(extp);
141 			return NULL;
142 		}
143 
144 		/* Do some byteswapping if necessary */
145 		extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
146 		extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
147 
148 #ifdef DEBUG_CFI_FEATURES
149 		/* Tell the user about it in lots of lovely detail */
150 		cfi_tell_features(extp);
151 #endif
152 
153 		/* Install our own private info structure */
154 		cfi->cmdset_priv = extp;
155 	}
156 
157 	for (i=0; i< cfi->numchips; i++) {
158 		cfi->chips[i].word_write_time = 128;
159 		cfi->chips[i].buffer_write_time = 128;
160 		cfi->chips[i].erase_time = 1024;
161 		cfi->chips[i].ref_point_counter = 0;
162 		init_waitqueue_head(&(cfi->chips[i].wq));
163 	}
164 
165 	return cfi_staa_setup(map);
166 }
167 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
168 
169 static struct mtd_info *cfi_staa_setup(struct map_info *map)
170 {
171 	struct cfi_private *cfi = map->fldrv_priv;
172 	struct mtd_info *mtd;
173 	unsigned long offset = 0;
174 	int i,j;
175 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
176 
177 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
178 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
179 
180 	if (!mtd) {
181 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
182 		kfree(cfi->cmdset_priv);
183 		return NULL;
184 	}
185 
186 	mtd->priv = map;
187 	mtd->type = MTD_NORFLASH;
188 	mtd->size = devsize * cfi->numchips;
189 
190 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
191 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
192 			* mtd->numeraseregions, GFP_KERNEL);
193 	if (!mtd->eraseregions) {
194 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
195 		kfree(cfi->cmdset_priv);
196 		kfree(mtd);
197 		return NULL;
198 	}
199 
200 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
201 		unsigned long ernum, ersize;
202 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
203 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
204 
205 		if (mtd->erasesize < ersize) {
206 			mtd->erasesize = ersize;
207 		}
208 		for (j=0; j<cfi->numchips; j++) {
209 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
210 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
211 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
212 		}
213 		offset += (ersize * ernum);
214 		}
215 
216 		if (offset != devsize) {
217 			/* Argh */
218 			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
219 			kfree(mtd->eraseregions);
220 			kfree(cfi->cmdset_priv);
221 			kfree(mtd);
222 			return NULL;
223 		}
224 
225 		for (i=0; i<mtd->numeraseregions;i++){
226 			printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
227 			       i,mtd->eraseregions[i].offset,
228 			       mtd->eraseregions[i].erasesize,
229 			       mtd->eraseregions[i].numblocks);
230 		}
231 
232 	/* Also select the correct geometry setup too */
233 	mtd->erase = cfi_staa_erase_varsize;
234 	mtd->read = cfi_staa_read;
235         mtd->write = cfi_staa_write_buffers;
236 	mtd->writev = cfi_staa_writev;
237 	mtd->sync = cfi_staa_sync;
238 	mtd->lock = cfi_staa_lock;
239 	mtd->unlock = cfi_staa_unlock;
240 	mtd->suspend = cfi_staa_suspend;
241 	mtd->resume = cfi_staa_resume;
242 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
243 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
244 	map->fldrv = &cfi_staa_chipdrv;
245 	__module_get(THIS_MODULE);
246 	mtd->name = map->name;
247 	return mtd;
248 }
249 
250 
251 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
252 {
253 	map_word status, status_OK;
254 	unsigned long timeo;
255 	DECLARE_WAITQUEUE(wait, current);
256 	int suspended = 0;
257 	unsigned long cmd_addr;
258 	struct cfi_private *cfi = map->fldrv_priv;
259 
260 	adr += chip->start;
261 
262 	/* Ensure cmd read/writes are aligned. */
263 	cmd_addr = adr & ~(map_bankwidth(map)-1);
264 
265 	/* Let's determine this according to the interleave only once */
266 	status_OK = CMD(0x80);
267 
268 	timeo = jiffies + HZ;
269  retry:
270 	spin_lock_bh(chip->mutex);
271 
272 	/* Check that the chip's ready to talk to us.
273 	 * If it's in FL_ERASING state, suspend it and make it talk now.
274 	 */
275 	switch (chip->state) {
276 	case FL_ERASING:
277 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
278 			goto sleep; /* We don't support erase suspend */
279 
280 		map_write (map, CMD(0xb0), cmd_addr);
281 		/* If the flash has finished erasing, then 'erase suspend'
282 		 * appears to make some (28F320) flash devices switch to
283 		 * 'read' mode.  Make sure that we switch to 'read status'
284 		 * mode so we get the right data. --rmk
285 		 */
286 		map_write(map, CMD(0x70), cmd_addr);
287 		chip->oldstate = FL_ERASING;
288 		chip->state = FL_ERASE_SUSPENDING;
289 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
290 		for (;;) {
291 			status = map_read(map, cmd_addr);
292 			if (map_word_andequal(map, status, status_OK, status_OK))
293 				break;
294 
295 			if (time_after(jiffies, timeo)) {
296 				/* Urgh */
297 				map_write(map, CMD(0xd0), cmd_addr);
298 				/* make sure we're in 'read status' mode */
299 				map_write(map, CMD(0x70), cmd_addr);
300 				chip->state = FL_ERASING;
301 				spin_unlock_bh(chip->mutex);
302 				printk(KERN_ERR "Chip not ready after erase "
303 				       "suspended: status = 0x%lx\n", status.x[0]);
304 				return -EIO;
305 			}
306 
307 			spin_unlock_bh(chip->mutex);
308 			cfi_udelay(1);
309 			spin_lock_bh(chip->mutex);
310 		}
311 
312 		suspended = 1;
313 		map_write(map, CMD(0xff), cmd_addr);
314 		chip->state = FL_READY;
315 		break;
316 
317 #if 0
318 	case FL_WRITING:
319 		/* Not quite yet */
320 #endif
321 
322 	case FL_READY:
323 		break;
324 
325 	case FL_CFI_QUERY:
326 	case FL_JEDEC_QUERY:
327 		map_write(map, CMD(0x70), cmd_addr);
328 		chip->state = FL_STATUS;
329 
330 	case FL_STATUS:
331 		status = map_read(map, cmd_addr);
332 		if (map_word_andequal(map, status, status_OK, status_OK)) {
333 			map_write(map, CMD(0xff), cmd_addr);
334 			chip->state = FL_READY;
335 			break;
336 		}
337 
338 		/* Urgh. Chip not yet ready to talk to us. */
339 		if (time_after(jiffies, timeo)) {
340 			spin_unlock_bh(chip->mutex);
341 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
342 			return -EIO;
343 		}
344 
345 		/* Latency issues. Drop the lock, wait a while and retry */
346 		spin_unlock_bh(chip->mutex);
347 		cfi_udelay(1);
348 		goto retry;
349 
350 	default:
351 	sleep:
352 		/* Stick ourselves on a wait queue to be woken when
353 		   someone changes the status */
354 		set_current_state(TASK_UNINTERRUPTIBLE);
355 		add_wait_queue(&chip->wq, &wait);
356 		spin_unlock_bh(chip->mutex);
357 		schedule();
358 		remove_wait_queue(&chip->wq, &wait);
359 		timeo = jiffies + HZ;
360 		goto retry;
361 	}
362 
363 	map_copy_from(map, buf, adr, len);
364 
365 	if (suspended) {
366 		chip->state = chip->oldstate;
367 		/* What if one interleaved chip has finished and the
368 		   other hasn't? The old code would leave the finished
369 		   one in READY mode. That's bad, and caused -EROFS
370 		   errors to be returned from do_erase_oneblock because
371 		   that's the only bit it checked for at the time.
372 		   As the state machine appears to explicitly allow
373 		   sending the 0x70 (Read Status) command to an erasing
374 		   chip and expecting it to be ignored, that's what we
375 		   do. */
376 		map_write(map, CMD(0xd0), cmd_addr);
377 		map_write(map, CMD(0x70), cmd_addr);
378 	}
379 
380 	wake_up(&chip->wq);
381 	spin_unlock_bh(chip->mutex);
382 	return 0;
383 }
384 
385 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
386 {
387 	struct map_info *map = mtd->priv;
388 	struct cfi_private *cfi = map->fldrv_priv;
389 	unsigned long ofs;
390 	int chipnum;
391 	int ret = 0;
392 
393 	/* ofs: offset within the first chip that the first read should start */
394 	chipnum = (from >> cfi->chipshift);
395 	ofs = from - (chipnum <<  cfi->chipshift);
396 
397 	*retlen = 0;
398 
399 	while (len) {
400 		unsigned long thislen;
401 
402 		if (chipnum >= cfi->numchips)
403 			break;
404 
405 		if ((len + ofs -1) >> cfi->chipshift)
406 			thislen = (1<<cfi->chipshift) - ofs;
407 		else
408 			thislen = len;
409 
410 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
411 		if (ret)
412 			break;
413 
414 		*retlen += thislen;
415 		len -= thislen;
416 		buf += thislen;
417 
418 		ofs = 0;
419 		chipnum++;
420 	}
421 	return ret;
422 }
423 
424 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
425 				  unsigned long adr, const u_char *buf, int len)
426 {
427 	struct cfi_private *cfi = map->fldrv_priv;
428 	map_word status, status_OK;
429 	unsigned long cmd_adr, timeo;
430 	DECLARE_WAITQUEUE(wait, current);
431 	int wbufsize, z;
432 
433         /* M58LW064A requires bus alignment for buffer wriets -- saw */
434         if (adr & (map_bankwidth(map)-1))
435             return -EINVAL;
436 
437         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
438         adr += chip->start;
439 	cmd_adr = adr & ~(wbufsize-1);
440 
441 	/* Let's determine this according to the interleave only once */
442         status_OK = CMD(0x80);
443 
444 	timeo = jiffies + HZ;
445  retry:
446 
447 #ifdef DEBUG_CFI_FEATURES
448        printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
449 #endif
450 	spin_lock_bh(chip->mutex);
451 
452 	/* Check that the chip's ready to talk to us.
453 	 * Later, we can actually think about interrupting it
454 	 * if it's in FL_ERASING state.
455 	 * Not just yet, though.
456 	 */
457 	switch (chip->state) {
458 	case FL_READY:
459 		break;
460 
461 	case FL_CFI_QUERY:
462 	case FL_JEDEC_QUERY:
463 		map_write(map, CMD(0x70), cmd_adr);
464                 chip->state = FL_STATUS;
465 #ifdef DEBUG_CFI_FEATURES
466         printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
467 #endif
468 
469 	case FL_STATUS:
470 		status = map_read(map, cmd_adr);
471 		if (map_word_andequal(map, status, status_OK, status_OK))
472 			break;
473 		/* Urgh. Chip not yet ready to talk to us. */
474 		if (time_after(jiffies, timeo)) {
475 			spin_unlock_bh(chip->mutex);
476                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
477                                status.x[0], map_read(map, cmd_adr).x[0]);
478 			return -EIO;
479 		}
480 
481 		/* Latency issues. Drop the lock, wait a while and retry */
482 		spin_unlock_bh(chip->mutex);
483 		cfi_udelay(1);
484 		goto retry;
485 
486 	default:
487 		/* Stick ourselves on a wait queue to be woken when
488 		   someone changes the status */
489 		set_current_state(TASK_UNINTERRUPTIBLE);
490 		add_wait_queue(&chip->wq, &wait);
491 		spin_unlock_bh(chip->mutex);
492 		schedule();
493 		remove_wait_queue(&chip->wq, &wait);
494 		timeo = jiffies + HZ;
495 		goto retry;
496 	}
497 
498 	ENABLE_VPP(map);
499 	map_write(map, CMD(0xe8), cmd_adr);
500 	chip->state = FL_WRITING_TO_BUFFER;
501 
502 	z = 0;
503 	for (;;) {
504 		status = map_read(map, cmd_adr);
505 		if (map_word_andequal(map, status, status_OK, status_OK))
506 			break;
507 
508 		spin_unlock_bh(chip->mutex);
509 		cfi_udelay(1);
510 		spin_lock_bh(chip->mutex);
511 
512 		if (++z > 100) {
513 			/* Argh. Not ready for write to buffer */
514 			DISABLE_VPP(map);
515                         map_write(map, CMD(0x70), cmd_adr);
516 			chip->state = FL_STATUS;
517 			spin_unlock_bh(chip->mutex);
518 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
519 			return -EIO;
520 		}
521 	}
522 
523 	/* Write length of data to come */
524 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
525 
526 	/* Write data */
527 	for (z = 0; z < len;
528 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
529 		map_word d;
530 		d = map_word_load(map, buf);
531 		map_write(map, d, adr+z);
532 	}
533 	/* GO GO GO */
534 	map_write(map, CMD(0xd0), cmd_adr);
535 	chip->state = FL_WRITING;
536 
537 	spin_unlock_bh(chip->mutex);
538 	cfi_udelay(chip->buffer_write_time);
539 	spin_lock_bh(chip->mutex);
540 
541 	timeo = jiffies + (HZ/2);
542 	z = 0;
543 	for (;;) {
544 		if (chip->state != FL_WRITING) {
545 			/* Someone's suspended the write. Sleep */
546 			set_current_state(TASK_UNINTERRUPTIBLE);
547 			add_wait_queue(&chip->wq, &wait);
548 			spin_unlock_bh(chip->mutex);
549 			schedule();
550 			remove_wait_queue(&chip->wq, &wait);
551 			timeo = jiffies + (HZ / 2); /* FIXME */
552 			spin_lock_bh(chip->mutex);
553 			continue;
554 		}
555 
556 		status = map_read(map, cmd_adr);
557 		if (map_word_andequal(map, status, status_OK, status_OK))
558 			break;
559 
560 		/* OK Still waiting */
561 		if (time_after(jiffies, timeo)) {
562                         /* clear status */
563                         map_write(map, CMD(0x50), cmd_adr);
564                         /* put back into read status register mode */
565                         map_write(map, CMD(0x70), adr);
566 			chip->state = FL_STATUS;
567 			DISABLE_VPP(map);
568 			spin_unlock_bh(chip->mutex);
569 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
570 			return -EIO;
571 		}
572 
573 		/* Latency issues. Drop the lock, wait a while and retry */
574 		spin_unlock_bh(chip->mutex);
575 		cfi_udelay(1);
576 		z++;
577 		spin_lock_bh(chip->mutex);
578 	}
579 	if (!z) {
580 		chip->buffer_write_time--;
581 		if (!chip->buffer_write_time)
582 			chip->buffer_write_time++;
583 	}
584 	if (z > 1)
585 		chip->buffer_write_time++;
586 
587 	/* Done and happy. */
588 	DISABLE_VPP(map);
589 	chip->state = FL_STATUS;
590 
591         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
592         if (map_word_bitsset(map, status, CMD(0x3a))) {
593 #ifdef DEBUG_CFI_FEATURES
594 		printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
595 #endif
596 		/* clear status */
597 		map_write(map, CMD(0x50), cmd_adr);
598 		/* put back into read status register mode */
599 		map_write(map, CMD(0x70), adr);
600 		wake_up(&chip->wq);
601 		spin_unlock_bh(chip->mutex);
602 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
603 	}
604 	wake_up(&chip->wq);
605 	spin_unlock_bh(chip->mutex);
606 
607         return 0;
608 }
609 
610 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
611 				       size_t len, size_t *retlen, const u_char *buf)
612 {
613 	struct map_info *map = mtd->priv;
614 	struct cfi_private *cfi = map->fldrv_priv;
615 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
616 	int ret = 0;
617 	int chipnum;
618 	unsigned long ofs;
619 
620 	*retlen = 0;
621 	if (!len)
622 		return 0;
623 
624 	chipnum = to >> cfi->chipshift;
625 	ofs = to  - (chipnum << cfi->chipshift);
626 
627 #ifdef DEBUG_CFI_FEATURES
628         printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
629         printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
630         printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
631 #endif
632 
633         /* Write buffer is worth it only if more than one word to write... */
634         while (len > 0) {
635 		/* We must not cross write block boundaries */
636 		int size = wbufsize - (ofs & (wbufsize-1));
637 
638                 if (size > len)
639                     size = len;
640 
641                 ret = do_write_buffer(map, &cfi->chips[chipnum],
642 				      ofs, buf, size);
643 		if (ret)
644 			return ret;
645 
646 		ofs += size;
647 		buf += size;
648 		(*retlen) += size;
649 		len -= size;
650 
651 		if (ofs >> cfi->chipshift) {
652 			chipnum ++;
653 			ofs = 0;
654 			if (chipnum == cfi->numchips)
655 				return 0;
656 		}
657 	}
658 
659 	return 0;
660 }
661 
662 /*
663  * Writev for ECC-Flashes is a little more complicated. We need to maintain
664  * a small buffer for this.
665  * XXX: If the buffer size is not a multiple of 2, this will break
666  */
667 #define ECCBUF_SIZE (mtd->writesize)
668 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
669 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
670 static int
671 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
672 		unsigned long count, loff_t to, size_t *retlen)
673 {
674 	unsigned long i;
675 	size_t	 totlen = 0, thislen;
676 	int	 ret = 0;
677 	size_t	 buflen = 0;
678 	static char *buffer;
679 
680 	if (!ECCBUF_SIZE) {
681 		/* We should fall back to a general writev implementation.
682 		 * Until that is written, just break.
683 		 */
684 		return -EIO;
685 	}
686 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
687 	if (!buffer)
688 		return -ENOMEM;
689 
690 	for (i=0; i<count; i++) {
691 		size_t elem_len = vecs[i].iov_len;
692 		void *elem_base = vecs[i].iov_base;
693 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
694 			continue;
695 		if (buflen) { /* cut off head */
696 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
697 				memcpy(buffer+buflen, elem_base, elem_len);
698 				buflen += elem_len;
699 				continue;
700 			}
701 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
702 			ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
703 			totlen += thislen;
704 			if (ret || thislen != ECCBUF_SIZE)
705 				goto write_error;
706 			elem_len -= thislen-buflen;
707 			elem_base += thislen-buflen;
708 			to += ECCBUF_SIZE;
709 		}
710 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
711 			ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
712 			totlen += thislen;
713 			if (ret || thislen != ECCBUF_DIV(elem_len))
714 				goto write_error;
715 			to += thislen;
716 		}
717 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
718 		if (buflen) {
719 			memset(buffer, 0xff, ECCBUF_SIZE);
720 			memcpy(buffer, elem_base + thislen, buflen);
721 		}
722 	}
723 	if (buflen) { /* flush last page, even if not full */
724 		/* This is sometimes intended behaviour, really */
725 		ret = mtd->write(mtd, to, buflen, &thislen, buffer);
726 		totlen += thislen;
727 		if (ret || thislen != ECCBUF_SIZE)
728 			goto write_error;
729 	}
730 write_error:
731 	if (retlen)
732 		*retlen = totlen;
733 	kfree(buffer);
734 	return ret;
735 }
736 
737 
738 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
739 {
740 	struct cfi_private *cfi = map->fldrv_priv;
741 	map_word status, status_OK;
742 	unsigned long timeo;
743 	int retries = 3;
744 	DECLARE_WAITQUEUE(wait, current);
745 	int ret = 0;
746 
747 	adr += chip->start;
748 
749 	/* Let's determine this according to the interleave only once */
750 	status_OK = CMD(0x80);
751 
752 	timeo = jiffies + HZ;
753 retry:
754 	spin_lock_bh(chip->mutex);
755 
756 	/* Check that the chip's ready to talk to us. */
757 	switch (chip->state) {
758 	case FL_CFI_QUERY:
759 	case FL_JEDEC_QUERY:
760 	case FL_READY:
761 		map_write(map, CMD(0x70), adr);
762 		chip->state = FL_STATUS;
763 
764 	case FL_STATUS:
765 		status = map_read(map, adr);
766 		if (map_word_andequal(map, status, status_OK, status_OK))
767 			break;
768 
769 		/* Urgh. Chip not yet ready to talk to us. */
770 		if (time_after(jiffies, timeo)) {
771 			spin_unlock_bh(chip->mutex);
772 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
773 			return -EIO;
774 		}
775 
776 		/* Latency issues. Drop the lock, wait a while and retry */
777 		spin_unlock_bh(chip->mutex);
778 		cfi_udelay(1);
779 		goto retry;
780 
781 	default:
782 		/* Stick ourselves on a wait queue to be woken when
783 		   someone changes the status */
784 		set_current_state(TASK_UNINTERRUPTIBLE);
785 		add_wait_queue(&chip->wq, &wait);
786 		spin_unlock_bh(chip->mutex);
787 		schedule();
788 		remove_wait_queue(&chip->wq, &wait);
789 		timeo = jiffies + HZ;
790 		goto retry;
791 	}
792 
793 	ENABLE_VPP(map);
794 	/* Clear the status register first */
795 	map_write(map, CMD(0x50), adr);
796 
797 	/* Now erase */
798 	map_write(map, CMD(0x20), adr);
799 	map_write(map, CMD(0xD0), adr);
800 	chip->state = FL_ERASING;
801 
802 	spin_unlock_bh(chip->mutex);
803 	msleep(1000);
804 	spin_lock_bh(chip->mutex);
805 
806 	/* FIXME. Use a timer to check this, and return immediately. */
807 	/* Once the state machine's known to be working I'll do that */
808 
809 	timeo = jiffies + (HZ*20);
810 	for (;;) {
811 		if (chip->state != FL_ERASING) {
812 			/* Someone's suspended the erase. Sleep */
813 			set_current_state(TASK_UNINTERRUPTIBLE);
814 			add_wait_queue(&chip->wq, &wait);
815 			spin_unlock_bh(chip->mutex);
816 			schedule();
817 			remove_wait_queue(&chip->wq, &wait);
818 			timeo = jiffies + (HZ*20); /* FIXME */
819 			spin_lock_bh(chip->mutex);
820 			continue;
821 		}
822 
823 		status = map_read(map, adr);
824 		if (map_word_andequal(map, status, status_OK, status_OK))
825 			break;
826 
827 		/* OK Still waiting */
828 		if (time_after(jiffies, timeo)) {
829 			map_write(map, CMD(0x70), adr);
830 			chip->state = FL_STATUS;
831 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
832 			DISABLE_VPP(map);
833 			spin_unlock_bh(chip->mutex);
834 			return -EIO;
835 		}
836 
837 		/* Latency issues. Drop the lock, wait a while and retry */
838 		spin_unlock_bh(chip->mutex);
839 		cfi_udelay(1);
840 		spin_lock_bh(chip->mutex);
841 	}
842 
843 	DISABLE_VPP(map);
844 	ret = 0;
845 
846 	/* We've broken this before. It doesn't hurt to be safe */
847 	map_write(map, CMD(0x70), adr);
848 	chip->state = FL_STATUS;
849 	status = map_read(map, adr);
850 
851 	/* check for lock bit */
852 	if (map_word_bitsset(map, status, CMD(0x3a))) {
853 		unsigned char chipstatus = status.x[0];
854 		if (!map_word_equal(map, status, CMD(chipstatus))) {
855 			int i, w;
856 			for (w=0; w<map_words(map); w++) {
857 				for (i = 0; i<cfi_interleave(cfi); i++) {
858 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
859 				}
860 			}
861 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
862 			       status.x[0], chipstatus);
863 		}
864 		/* Reset the error bits */
865 		map_write(map, CMD(0x50), adr);
866 		map_write(map, CMD(0x70), adr);
867 
868 		if ((chipstatus & 0x30) == 0x30) {
869 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
870 			ret = -EIO;
871 		} else if (chipstatus & 0x02) {
872 			/* Protection bit set */
873 			ret = -EROFS;
874 		} else if (chipstatus & 0x8) {
875 			/* Voltage */
876 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
877 			ret = -EIO;
878 		} else if (chipstatus & 0x20) {
879 			if (retries--) {
880 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
881 				timeo = jiffies + HZ;
882 				chip->state = FL_STATUS;
883 				spin_unlock_bh(chip->mutex);
884 				goto retry;
885 			}
886 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
887 			ret = -EIO;
888 		}
889 	}
890 
891 	wake_up(&chip->wq);
892 	spin_unlock_bh(chip->mutex);
893 	return ret;
894 }
895 
896 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
897 {	struct map_info *map = mtd->priv;
898 	struct cfi_private *cfi = map->fldrv_priv;
899 	unsigned long adr, len;
900 	int chipnum, ret = 0;
901 	int i, first;
902 	struct mtd_erase_region_info *regions = mtd->eraseregions;
903 
904 	if (instr->addr > mtd->size)
905 		return -EINVAL;
906 
907 	if ((instr->len + instr->addr) > mtd->size)
908 		return -EINVAL;
909 
910 	/* Check that both start and end of the requested erase are
911 	 * aligned with the erasesize at the appropriate addresses.
912 	 */
913 
914 	i = 0;
915 
916 	/* Skip all erase regions which are ended before the start of
917 	   the requested erase. Actually, to save on the calculations,
918 	   we skip to the first erase region which starts after the
919 	   start of the requested erase, and then go back one.
920 	*/
921 
922 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
923 	       i++;
924 	i--;
925 
926 	/* OK, now i is pointing at the erase region in which this
927 	   erase request starts. Check the start of the requested
928 	   erase range is aligned with the erase size which is in
929 	   effect here.
930 	*/
931 
932 	if (instr->addr & (regions[i].erasesize-1))
933 		return -EINVAL;
934 
935 	/* Remember the erase region we start on */
936 	first = i;
937 
938 	/* Next, check that the end of the requested erase is aligned
939 	 * with the erase region at that address.
940 	 */
941 
942 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
943 		i++;
944 
945 	/* As before, drop back one to point at the region in which
946 	   the address actually falls
947 	*/
948 	i--;
949 
950 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
951 		return -EINVAL;
952 
953 	chipnum = instr->addr >> cfi->chipshift;
954 	adr = instr->addr - (chipnum << cfi->chipshift);
955 	len = instr->len;
956 
957 	i=first;
958 
959 	while(len) {
960 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
961 
962 		if (ret)
963 			return ret;
964 
965 		adr += regions[i].erasesize;
966 		len -= regions[i].erasesize;
967 
968 		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
969 			i++;
970 
971 		if (adr >> cfi->chipshift) {
972 			adr = 0;
973 			chipnum++;
974 
975 			if (chipnum >= cfi->numchips)
976 			break;
977 		}
978 	}
979 
980 	instr->state = MTD_ERASE_DONE;
981 	mtd_erase_callback(instr);
982 
983 	return 0;
984 }
985 
986 static void cfi_staa_sync (struct mtd_info *mtd)
987 {
988 	struct map_info *map = mtd->priv;
989 	struct cfi_private *cfi = map->fldrv_priv;
990 	int i;
991 	struct flchip *chip;
992 	int ret = 0;
993 	DECLARE_WAITQUEUE(wait, current);
994 
995 	for (i=0; !ret && i<cfi->numchips; i++) {
996 		chip = &cfi->chips[i];
997 
998 	retry:
999 		spin_lock_bh(chip->mutex);
1000 
1001 		switch(chip->state) {
1002 		case FL_READY:
1003 		case FL_STATUS:
1004 		case FL_CFI_QUERY:
1005 		case FL_JEDEC_QUERY:
1006 			chip->oldstate = chip->state;
1007 			chip->state = FL_SYNCING;
1008 			/* No need to wake_up() on this state change -
1009 			 * as the whole point is that nobody can do anything
1010 			 * with the chip now anyway.
1011 			 */
1012 		case FL_SYNCING:
1013 			spin_unlock_bh(chip->mutex);
1014 			break;
1015 
1016 		default:
1017 			/* Not an idle state */
1018 			add_wait_queue(&chip->wq, &wait);
1019 
1020 			spin_unlock_bh(chip->mutex);
1021 			schedule();
1022 		        remove_wait_queue(&chip->wq, &wait);
1023 
1024 			goto retry;
1025 		}
1026 	}
1027 
1028 	/* Unlock the chips again */
1029 
1030 	for (i--; i >=0; i--) {
1031 		chip = &cfi->chips[i];
1032 
1033 		spin_lock_bh(chip->mutex);
1034 
1035 		if (chip->state == FL_SYNCING) {
1036 			chip->state = chip->oldstate;
1037 			wake_up(&chip->wq);
1038 		}
1039 		spin_unlock_bh(chip->mutex);
1040 	}
1041 }
1042 
1043 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1044 {
1045 	struct cfi_private *cfi = map->fldrv_priv;
1046 	map_word status, status_OK;
1047 	unsigned long timeo = jiffies + HZ;
1048 	DECLARE_WAITQUEUE(wait, current);
1049 
1050 	adr += chip->start;
1051 
1052 	/* Let's determine this according to the interleave only once */
1053 	status_OK = CMD(0x80);
1054 
1055 	timeo = jiffies + HZ;
1056 retry:
1057 	spin_lock_bh(chip->mutex);
1058 
1059 	/* Check that the chip's ready to talk to us. */
1060 	switch (chip->state) {
1061 	case FL_CFI_QUERY:
1062 	case FL_JEDEC_QUERY:
1063 	case FL_READY:
1064 		map_write(map, CMD(0x70), adr);
1065 		chip->state = FL_STATUS;
1066 
1067 	case FL_STATUS:
1068 		status = map_read(map, adr);
1069 		if (map_word_andequal(map, status, status_OK, status_OK))
1070 			break;
1071 
1072 		/* Urgh. Chip not yet ready to talk to us. */
1073 		if (time_after(jiffies, timeo)) {
1074 			spin_unlock_bh(chip->mutex);
1075 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1076 			return -EIO;
1077 		}
1078 
1079 		/* Latency issues. Drop the lock, wait a while and retry */
1080 		spin_unlock_bh(chip->mutex);
1081 		cfi_udelay(1);
1082 		goto retry;
1083 
1084 	default:
1085 		/* Stick ourselves on a wait queue to be woken when
1086 		   someone changes the status */
1087 		set_current_state(TASK_UNINTERRUPTIBLE);
1088 		add_wait_queue(&chip->wq, &wait);
1089 		spin_unlock_bh(chip->mutex);
1090 		schedule();
1091 		remove_wait_queue(&chip->wq, &wait);
1092 		timeo = jiffies + HZ;
1093 		goto retry;
1094 	}
1095 
1096 	ENABLE_VPP(map);
1097 	map_write(map, CMD(0x60), adr);
1098 	map_write(map, CMD(0x01), adr);
1099 	chip->state = FL_LOCKING;
1100 
1101 	spin_unlock_bh(chip->mutex);
1102 	msleep(1000);
1103 	spin_lock_bh(chip->mutex);
1104 
1105 	/* FIXME. Use a timer to check this, and return immediately. */
1106 	/* Once the state machine's known to be working I'll do that */
1107 
1108 	timeo = jiffies + (HZ*2);
1109 	for (;;) {
1110 
1111 		status = map_read(map, adr);
1112 		if (map_word_andequal(map, status, status_OK, status_OK))
1113 			break;
1114 
1115 		/* OK Still waiting */
1116 		if (time_after(jiffies, timeo)) {
1117 			map_write(map, CMD(0x70), adr);
1118 			chip->state = FL_STATUS;
1119 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1120 			DISABLE_VPP(map);
1121 			spin_unlock_bh(chip->mutex);
1122 			return -EIO;
1123 		}
1124 
1125 		/* Latency issues. Drop the lock, wait a while and retry */
1126 		spin_unlock_bh(chip->mutex);
1127 		cfi_udelay(1);
1128 		spin_lock_bh(chip->mutex);
1129 	}
1130 
1131 	/* Done and happy. */
1132 	chip->state = FL_STATUS;
1133 	DISABLE_VPP(map);
1134 	wake_up(&chip->wq);
1135 	spin_unlock_bh(chip->mutex);
1136 	return 0;
1137 }
1138 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1139 {
1140 	struct map_info *map = mtd->priv;
1141 	struct cfi_private *cfi = map->fldrv_priv;
1142 	unsigned long adr;
1143 	int chipnum, ret = 0;
1144 #ifdef DEBUG_LOCK_BITS
1145 	int ofs_factor = cfi->interleave * cfi->device_type;
1146 #endif
1147 
1148 	if (ofs & (mtd->erasesize - 1))
1149 		return -EINVAL;
1150 
1151 	if (len & (mtd->erasesize -1))
1152 		return -EINVAL;
1153 
1154 	if ((len + ofs) > mtd->size)
1155 		return -EINVAL;
1156 
1157 	chipnum = ofs >> cfi->chipshift;
1158 	adr = ofs - (chipnum << cfi->chipshift);
1159 
1160 	while(len) {
1161 
1162 #ifdef DEBUG_LOCK_BITS
1163 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1164 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1165 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1166 #endif
1167 
1168 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1169 
1170 #ifdef DEBUG_LOCK_BITS
1171 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1172 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1173 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1174 #endif
1175 
1176 		if (ret)
1177 			return ret;
1178 
1179 		adr += mtd->erasesize;
1180 		len -= mtd->erasesize;
1181 
1182 		if (adr >> cfi->chipshift) {
1183 			adr = 0;
1184 			chipnum++;
1185 
1186 			if (chipnum >= cfi->numchips)
1187 			break;
1188 		}
1189 	}
1190 	return 0;
1191 }
1192 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1193 {
1194 	struct cfi_private *cfi = map->fldrv_priv;
1195 	map_word status, status_OK;
1196 	unsigned long timeo = jiffies + HZ;
1197 	DECLARE_WAITQUEUE(wait, current);
1198 
1199 	adr += chip->start;
1200 
1201 	/* Let's determine this according to the interleave only once */
1202 	status_OK = CMD(0x80);
1203 
1204 	timeo = jiffies + HZ;
1205 retry:
1206 	spin_lock_bh(chip->mutex);
1207 
1208 	/* Check that the chip's ready to talk to us. */
1209 	switch (chip->state) {
1210 	case FL_CFI_QUERY:
1211 	case FL_JEDEC_QUERY:
1212 	case FL_READY:
1213 		map_write(map, CMD(0x70), adr);
1214 		chip->state = FL_STATUS;
1215 
1216 	case FL_STATUS:
1217 		status = map_read(map, adr);
1218 		if (map_word_andequal(map, status, status_OK, status_OK))
1219 			break;
1220 
1221 		/* Urgh. Chip not yet ready to talk to us. */
1222 		if (time_after(jiffies, timeo)) {
1223 			spin_unlock_bh(chip->mutex);
1224 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1225 			return -EIO;
1226 		}
1227 
1228 		/* Latency issues. Drop the lock, wait a while and retry */
1229 		spin_unlock_bh(chip->mutex);
1230 		cfi_udelay(1);
1231 		goto retry;
1232 
1233 	default:
1234 		/* Stick ourselves on a wait queue to be woken when
1235 		   someone changes the status */
1236 		set_current_state(TASK_UNINTERRUPTIBLE);
1237 		add_wait_queue(&chip->wq, &wait);
1238 		spin_unlock_bh(chip->mutex);
1239 		schedule();
1240 		remove_wait_queue(&chip->wq, &wait);
1241 		timeo = jiffies + HZ;
1242 		goto retry;
1243 	}
1244 
1245 	ENABLE_VPP(map);
1246 	map_write(map, CMD(0x60), adr);
1247 	map_write(map, CMD(0xD0), adr);
1248 	chip->state = FL_UNLOCKING;
1249 
1250 	spin_unlock_bh(chip->mutex);
1251 	msleep(1000);
1252 	spin_lock_bh(chip->mutex);
1253 
1254 	/* FIXME. Use a timer to check this, and return immediately. */
1255 	/* Once the state machine's known to be working I'll do that */
1256 
1257 	timeo = jiffies + (HZ*2);
1258 	for (;;) {
1259 
1260 		status = map_read(map, adr);
1261 		if (map_word_andequal(map, status, status_OK, status_OK))
1262 			break;
1263 
1264 		/* OK Still waiting */
1265 		if (time_after(jiffies, timeo)) {
1266 			map_write(map, CMD(0x70), adr);
1267 			chip->state = FL_STATUS;
1268 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1269 			DISABLE_VPP(map);
1270 			spin_unlock_bh(chip->mutex);
1271 			return -EIO;
1272 		}
1273 
1274 		/* Latency issues. Drop the unlock, wait a while and retry */
1275 		spin_unlock_bh(chip->mutex);
1276 		cfi_udelay(1);
1277 		spin_lock_bh(chip->mutex);
1278 	}
1279 
1280 	/* Done and happy. */
1281 	chip->state = FL_STATUS;
1282 	DISABLE_VPP(map);
1283 	wake_up(&chip->wq);
1284 	spin_unlock_bh(chip->mutex);
1285 	return 0;
1286 }
1287 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1288 {
1289 	struct map_info *map = mtd->priv;
1290 	struct cfi_private *cfi = map->fldrv_priv;
1291 	unsigned long adr;
1292 	int chipnum, ret = 0;
1293 #ifdef DEBUG_LOCK_BITS
1294 	int ofs_factor = cfi->interleave * cfi->device_type;
1295 #endif
1296 
1297 	chipnum = ofs >> cfi->chipshift;
1298 	adr = ofs - (chipnum << cfi->chipshift);
1299 
1300 #ifdef DEBUG_LOCK_BITS
1301 	{
1302 		unsigned long temp_adr = adr;
1303 		unsigned long temp_len = len;
1304 
1305 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1306                 while (temp_len) {
1307 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1308 			temp_adr += mtd->erasesize;
1309 			temp_len -= mtd->erasesize;
1310 		}
1311 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1312 	}
1313 #endif
1314 
1315 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1316 
1317 #ifdef DEBUG_LOCK_BITS
1318 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1319 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1320 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1321 #endif
1322 
1323 	return ret;
1324 }
1325 
1326 static int cfi_staa_suspend(struct mtd_info *mtd)
1327 {
1328 	struct map_info *map = mtd->priv;
1329 	struct cfi_private *cfi = map->fldrv_priv;
1330 	int i;
1331 	struct flchip *chip;
1332 	int ret = 0;
1333 
1334 	for (i=0; !ret && i<cfi->numchips; i++) {
1335 		chip = &cfi->chips[i];
1336 
1337 		spin_lock_bh(chip->mutex);
1338 
1339 		switch(chip->state) {
1340 		case FL_READY:
1341 		case FL_STATUS:
1342 		case FL_CFI_QUERY:
1343 		case FL_JEDEC_QUERY:
1344 			chip->oldstate = chip->state;
1345 			chip->state = FL_PM_SUSPENDED;
1346 			/* No need to wake_up() on this state change -
1347 			 * as the whole point is that nobody can do anything
1348 			 * with the chip now anyway.
1349 			 */
1350 		case FL_PM_SUSPENDED:
1351 			break;
1352 
1353 		default:
1354 			ret = -EAGAIN;
1355 			break;
1356 		}
1357 		spin_unlock_bh(chip->mutex);
1358 	}
1359 
1360 	/* Unlock the chips again */
1361 
1362 	if (ret) {
1363 		for (i--; i >=0; i--) {
1364 			chip = &cfi->chips[i];
1365 
1366 			spin_lock_bh(chip->mutex);
1367 
1368 			if (chip->state == FL_PM_SUSPENDED) {
1369 				/* No need to force it into a known state here,
1370 				   because we're returning failure, and it didn't
1371 				   get power cycled */
1372 				chip->state = chip->oldstate;
1373 				wake_up(&chip->wq);
1374 			}
1375 			spin_unlock_bh(chip->mutex);
1376 		}
1377 	}
1378 
1379 	return ret;
1380 }
1381 
1382 static void cfi_staa_resume(struct mtd_info *mtd)
1383 {
1384 	struct map_info *map = mtd->priv;
1385 	struct cfi_private *cfi = map->fldrv_priv;
1386 	int i;
1387 	struct flchip *chip;
1388 
1389 	for (i=0; i<cfi->numchips; i++) {
1390 
1391 		chip = &cfi->chips[i];
1392 
1393 		spin_lock_bh(chip->mutex);
1394 
1395 		/* Go to known state. Chip may have been power cycled */
1396 		if (chip->state == FL_PM_SUSPENDED) {
1397 			map_write(map, CMD(0xFF), 0);
1398 			chip->state = FL_READY;
1399 			wake_up(&chip->wq);
1400 		}
1401 
1402 		spin_unlock_bh(chip->mutex);
1403 	}
1404 }
1405 
1406 static void cfi_staa_destroy(struct mtd_info *mtd)
1407 {
1408 	struct map_info *map = mtd->priv;
1409 	struct cfi_private *cfi = map->fldrv_priv;
1410 	kfree(cfi->cmdset_priv);
1411 	kfree(cfi);
1412 }
1413 
1414 MODULE_LICENSE("GPL");
1415