1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
8  *
9  * 10/10/2000	Nicolas Pitre <nico@cam.org>
10  * 	- completely revamped method functions so they are aware and
11  * 	  independent of the flash geometry (buswidth, interleave, etc.)
12  * 	- scalability vs code size is completely set at compile-time
13  * 	  (see include/linux/mtd/cfi.h for selection)
14  *	- optimized write buffer method
15  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
16  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
17  *	  (command set 0x0020)
18  *	- added a writev function
19  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
20  * 	- Plugged memory leak in cfi_staa_writev().
21  */
22 
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
30 
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/cfi.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/compatmac.h>
39 
40 
41 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
42 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
43 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
44 		unsigned long count, loff_t to, size_t *retlen);
45 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
46 static void cfi_staa_sync (struct mtd_info *);
47 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
49 static int cfi_staa_suspend (struct mtd_info *);
50 static void cfi_staa_resume (struct mtd_info *);
51 
52 static void cfi_staa_destroy(struct mtd_info *);
53 
54 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
55 
56 static struct mtd_info *cfi_staa_setup (struct map_info *);
57 
58 static struct mtd_chip_driver cfi_staa_chipdrv = {
59 	.probe		= NULL, /* Not usable directly */
60 	.destroy	= cfi_staa_destroy,
61 	.name		= "cfi_cmdset_0020",
62 	.module		= THIS_MODULE
63 };
64 
65 /* #define DEBUG_LOCK_BITS */
66 //#define DEBUG_CFI_FEATURES
67 
68 #ifdef DEBUG_CFI_FEATURES
69 static void cfi_tell_features(struct cfi_pri_intelext *extp)
70 {
71         int i;
72         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
73 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
74 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
75 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
76 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
77 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
78 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
79 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
80 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
81 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
82 	for (i=9; i<32; i++) {
83 		if (extp->FeatureSupport & (1<<i))
84 			printk("     - Unknown Bit %X:      supported\n", i);
85 	}
86 
87 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
88 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
89 	for (i=1; i<8; i++) {
90 		if (extp->SuspendCmdSupport & (1<<i))
91 			printk("     - Unknown Bit %X:               supported\n", i);
92 	}
93 
94 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
95 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
96 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
97 	for (i=2; i<16; i++) {
98 		if (extp->BlkStatusRegMask & (1<<i))
99 			printk("     - Unknown Bit %X Active: yes\n",i);
100 	}
101 
102 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
104 	if (extp->VppOptimal)
105 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
106 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
107 }
108 #endif
109 
110 /* This routine is made available to other mtd code via
111  * inter_module_register.  It must only be accessed through
112  * inter_module_get which will bump the use count of this module.  The
113  * addresses passed back in cfi are valid as long as the use count of
114  * this module is non-zero, i.e. between inter_module_get and
115  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
116  */
117 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
118 {
119 	struct cfi_private *cfi = map->fldrv_priv;
120 	int i;
121 
122 	if (cfi->cfi_mode) {
123 		/*
124 		 * It's a real CFI chip, not one for which the probe
125 		 * routine faked a CFI structure. So we read the feature
126 		 * table from it.
127 		 */
128 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
129 		struct cfi_pri_intelext *extp;
130 
131 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
132 		if (!extp)
133 			return NULL;
134 
135 		if (extp->MajorVersion != '1' ||
136 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
137 			printk(KERN_ERR "  Unknown ST Microelectronics"
138 			       " Extended Query version %c.%c.\n",
139 			       extp->MajorVersion, extp->MinorVersion);
140 			kfree(extp);
141 			return NULL;
142 		}
143 
144 		/* Do some byteswapping if necessary */
145 		extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
146 		extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
147 
148 #ifdef DEBUG_CFI_FEATURES
149 		/* Tell the user about it in lots of lovely detail */
150 		cfi_tell_features(extp);
151 #endif
152 
153 		/* Install our own private info structure */
154 		cfi->cmdset_priv = extp;
155 	}
156 
157 	for (i=0; i< cfi->numchips; i++) {
158 		cfi->chips[i].word_write_time = 128;
159 		cfi->chips[i].buffer_write_time = 128;
160 		cfi->chips[i].erase_time = 1024;
161 	}
162 
163 	return cfi_staa_setup(map);
164 }
165 
166 static struct mtd_info *cfi_staa_setup(struct map_info *map)
167 {
168 	struct cfi_private *cfi = map->fldrv_priv;
169 	struct mtd_info *mtd;
170 	unsigned long offset = 0;
171 	int i,j;
172 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
173 
174 	mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
175 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
176 
177 	if (!mtd) {
178 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
179 		kfree(cfi->cmdset_priv);
180 		return NULL;
181 	}
182 
183 	memset(mtd, 0, sizeof(*mtd));
184 	mtd->priv = map;
185 	mtd->type = MTD_NORFLASH;
186 	mtd->size = devsize * cfi->numchips;
187 
188 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
189 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
190 			* mtd->numeraseregions, GFP_KERNEL);
191 	if (!mtd->eraseregions) {
192 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
193 		kfree(cfi->cmdset_priv);
194 		kfree(mtd);
195 		return NULL;
196 	}
197 
198 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
199 		unsigned long ernum, ersize;
200 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
201 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
202 
203 		if (mtd->erasesize < ersize) {
204 			mtd->erasesize = ersize;
205 		}
206 		for (j=0; j<cfi->numchips; j++) {
207 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
208 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
209 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
210 		}
211 		offset += (ersize * ernum);
212 		}
213 
214 		if (offset != devsize) {
215 			/* Argh */
216 			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
217 			kfree(mtd->eraseregions);
218 			kfree(cfi->cmdset_priv);
219 			kfree(mtd);
220 			return NULL;
221 		}
222 
223 		for (i=0; i<mtd->numeraseregions;i++){
224 			printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
225 			       i,mtd->eraseregions[i].offset,
226 			       mtd->eraseregions[i].erasesize,
227 			       mtd->eraseregions[i].numblocks);
228 		}
229 
230 	/* Also select the correct geometry setup too */
231 	mtd->erase = cfi_staa_erase_varsize;
232 	mtd->read = cfi_staa_read;
233         mtd->write = cfi_staa_write_buffers;
234 	mtd->writev = cfi_staa_writev;
235 	mtd->sync = cfi_staa_sync;
236 	mtd->lock = cfi_staa_lock;
237 	mtd->unlock = cfi_staa_unlock;
238 	mtd->suspend = cfi_staa_suspend;
239 	mtd->resume = cfi_staa_resume;
240 	mtd->flags = MTD_CAP_NORFLASH;
241 	mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
242 	mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
243 	map->fldrv = &cfi_staa_chipdrv;
244 	__module_get(THIS_MODULE);
245 	mtd->name = map->name;
246 	return mtd;
247 }
248 
249 
250 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
251 {
252 	map_word status, status_OK;
253 	unsigned long timeo;
254 	DECLARE_WAITQUEUE(wait, current);
255 	int suspended = 0;
256 	unsigned long cmd_addr;
257 	struct cfi_private *cfi = map->fldrv_priv;
258 
259 	adr += chip->start;
260 
261 	/* Ensure cmd read/writes are aligned. */
262 	cmd_addr = adr & ~(map_bankwidth(map)-1);
263 
264 	/* Let's determine this according to the interleave only once */
265 	status_OK = CMD(0x80);
266 
267 	timeo = jiffies + HZ;
268  retry:
269 	spin_lock_bh(chip->mutex);
270 
271 	/* Check that the chip's ready to talk to us.
272 	 * If it's in FL_ERASING state, suspend it and make it talk now.
273 	 */
274 	switch (chip->state) {
275 	case FL_ERASING:
276 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
277 			goto sleep; /* We don't support erase suspend */
278 
279 		map_write (map, CMD(0xb0), cmd_addr);
280 		/* If the flash has finished erasing, then 'erase suspend'
281 		 * appears to make some (28F320) flash devices switch to
282 		 * 'read' mode.  Make sure that we switch to 'read status'
283 		 * mode so we get the right data. --rmk
284 		 */
285 		map_write(map, CMD(0x70), cmd_addr);
286 		chip->oldstate = FL_ERASING;
287 		chip->state = FL_ERASE_SUSPENDING;
288 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
289 		for (;;) {
290 			status = map_read(map, cmd_addr);
291 			if (map_word_andequal(map, status, status_OK, status_OK))
292 				break;
293 
294 			if (time_after(jiffies, timeo)) {
295 				/* Urgh */
296 				map_write(map, CMD(0xd0), cmd_addr);
297 				/* make sure we're in 'read status' mode */
298 				map_write(map, CMD(0x70), cmd_addr);
299 				chip->state = FL_ERASING;
300 				spin_unlock_bh(chip->mutex);
301 				printk(KERN_ERR "Chip not ready after erase "
302 				       "suspended: status = 0x%lx\n", status.x[0]);
303 				return -EIO;
304 			}
305 
306 			spin_unlock_bh(chip->mutex);
307 			cfi_udelay(1);
308 			spin_lock_bh(chip->mutex);
309 		}
310 
311 		suspended = 1;
312 		map_write(map, CMD(0xff), cmd_addr);
313 		chip->state = FL_READY;
314 		break;
315 
316 #if 0
317 	case FL_WRITING:
318 		/* Not quite yet */
319 #endif
320 
321 	case FL_READY:
322 		break;
323 
324 	case FL_CFI_QUERY:
325 	case FL_JEDEC_QUERY:
326 		map_write(map, CMD(0x70), cmd_addr);
327 		chip->state = FL_STATUS;
328 
329 	case FL_STATUS:
330 		status = map_read(map, cmd_addr);
331 		if (map_word_andequal(map, status, status_OK, status_OK)) {
332 			map_write(map, CMD(0xff), cmd_addr);
333 			chip->state = FL_READY;
334 			break;
335 		}
336 
337 		/* Urgh. Chip not yet ready to talk to us. */
338 		if (time_after(jiffies, timeo)) {
339 			spin_unlock_bh(chip->mutex);
340 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
341 			return -EIO;
342 		}
343 
344 		/* Latency issues. Drop the lock, wait a while and retry */
345 		spin_unlock_bh(chip->mutex);
346 		cfi_udelay(1);
347 		goto retry;
348 
349 	default:
350 	sleep:
351 		/* Stick ourselves on a wait queue to be woken when
352 		   someone changes the status */
353 		set_current_state(TASK_UNINTERRUPTIBLE);
354 		add_wait_queue(&chip->wq, &wait);
355 		spin_unlock_bh(chip->mutex);
356 		schedule();
357 		remove_wait_queue(&chip->wq, &wait);
358 		timeo = jiffies + HZ;
359 		goto retry;
360 	}
361 
362 	map_copy_from(map, buf, adr, len);
363 
364 	if (suspended) {
365 		chip->state = chip->oldstate;
366 		/* What if one interleaved chip has finished and the
367 		   other hasn't? The old code would leave the finished
368 		   one in READY mode. That's bad, and caused -EROFS
369 		   errors to be returned from do_erase_oneblock because
370 		   that's the only bit it checked for at the time.
371 		   As the state machine appears to explicitly allow
372 		   sending the 0x70 (Read Status) command to an erasing
373 		   chip and expecting it to be ignored, that's what we
374 		   do. */
375 		map_write(map, CMD(0xd0), cmd_addr);
376 		map_write(map, CMD(0x70), cmd_addr);
377 	}
378 
379 	wake_up(&chip->wq);
380 	spin_unlock_bh(chip->mutex);
381 	return 0;
382 }
383 
384 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
385 {
386 	struct map_info *map = mtd->priv;
387 	struct cfi_private *cfi = map->fldrv_priv;
388 	unsigned long ofs;
389 	int chipnum;
390 	int ret = 0;
391 
392 	/* ofs: offset within the first chip that the first read should start */
393 	chipnum = (from >> cfi->chipshift);
394 	ofs = from - (chipnum <<  cfi->chipshift);
395 
396 	*retlen = 0;
397 
398 	while (len) {
399 		unsigned long thislen;
400 
401 		if (chipnum >= cfi->numchips)
402 			break;
403 
404 		if ((len + ofs -1) >> cfi->chipshift)
405 			thislen = (1<<cfi->chipshift) - ofs;
406 		else
407 			thislen = len;
408 
409 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
410 		if (ret)
411 			break;
412 
413 		*retlen += thislen;
414 		len -= thislen;
415 		buf += thislen;
416 
417 		ofs = 0;
418 		chipnum++;
419 	}
420 	return ret;
421 }
422 
423 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
424 				  unsigned long adr, const u_char *buf, int len)
425 {
426 	struct cfi_private *cfi = map->fldrv_priv;
427 	map_word status, status_OK;
428 	unsigned long cmd_adr, timeo;
429 	DECLARE_WAITQUEUE(wait, current);
430 	int wbufsize, z;
431 
432         /* M58LW064A requires bus alignment for buffer wriets -- saw */
433         if (adr & (map_bankwidth(map)-1))
434             return -EINVAL;
435 
436         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
437         adr += chip->start;
438 	cmd_adr = adr & ~(wbufsize-1);
439 
440 	/* Let's determine this according to the interleave only once */
441         status_OK = CMD(0x80);
442 
443 	timeo = jiffies + HZ;
444  retry:
445 
446 #ifdef DEBUG_CFI_FEATURES
447        printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
448 #endif
449 	spin_lock_bh(chip->mutex);
450 
451 	/* Check that the chip's ready to talk to us.
452 	 * Later, we can actually think about interrupting it
453 	 * if it's in FL_ERASING state.
454 	 * Not just yet, though.
455 	 */
456 	switch (chip->state) {
457 	case FL_READY:
458 		break;
459 
460 	case FL_CFI_QUERY:
461 	case FL_JEDEC_QUERY:
462 		map_write(map, CMD(0x70), cmd_adr);
463                 chip->state = FL_STATUS;
464 #ifdef DEBUG_CFI_FEATURES
465         printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
466 #endif
467 
468 	case FL_STATUS:
469 		status = map_read(map, cmd_adr);
470 		if (map_word_andequal(map, status, status_OK, status_OK))
471 			break;
472 		/* Urgh. Chip not yet ready to talk to us. */
473 		if (time_after(jiffies, timeo)) {
474 			spin_unlock_bh(chip->mutex);
475                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
476                                status.x[0], map_read(map, cmd_adr).x[0]);
477 			return -EIO;
478 		}
479 
480 		/* Latency issues. Drop the lock, wait a while and retry */
481 		spin_unlock_bh(chip->mutex);
482 		cfi_udelay(1);
483 		goto retry;
484 
485 	default:
486 		/* Stick ourselves on a wait queue to be woken when
487 		   someone changes the status */
488 		set_current_state(TASK_UNINTERRUPTIBLE);
489 		add_wait_queue(&chip->wq, &wait);
490 		spin_unlock_bh(chip->mutex);
491 		schedule();
492 		remove_wait_queue(&chip->wq, &wait);
493 		timeo = jiffies + HZ;
494 		goto retry;
495 	}
496 
497 	ENABLE_VPP(map);
498 	map_write(map, CMD(0xe8), cmd_adr);
499 	chip->state = FL_WRITING_TO_BUFFER;
500 
501 	z = 0;
502 	for (;;) {
503 		status = map_read(map, cmd_adr);
504 		if (map_word_andequal(map, status, status_OK, status_OK))
505 			break;
506 
507 		spin_unlock_bh(chip->mutex);
508 		cfi_udelay(1);
509 		spin_lock_bh(chip->mutex);
510 
511 		if (++z > 100) {
512 			/* Argh. Not ready for write to buffer */
513 			DISABLE_VPP(map);
514                         map_write(map, CMD(0x70), cmd_adr);
515 			chip->state = FL_STATUS;
516 			spin_unlock_bh(chip->mutex);
517 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
518 			return -EIO;
519 		}
520 	}
521 
522 	/* Write length of data to come */
523 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
524 
525 	/* Write data */
526 	for (z = 0; z < len;
527 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
528 		map_word d;
529 		d = map_word_load(map, buf);
530 		map_write(map, d, adr+z);
531 	}
532 	/* GO GO GO */
533 	map_write(map, CMD(0xd0), cmd_adr);
534 	chip->state = FL_WRITING;
535 
536 	spin_unlock_bh(chip->mutex);
537 	cfi_udelay(chip->buffer_write_time);
538 	spin_lock_bh(chip->mutex);
539 
540 	timeo = jiffies + (HZ/2);
541 	z = 0;
542 	for (;;) {
543 		if (chip->state != FL_WRITING) {
544 			/* Someone's suspended the write. Sleep */
545 			set_current_state(TASK_UNINTERRUPTIBLE);
546 			add_wait_queue(&chip->wq, &wait);
547 			spin_unlock_bh(chip->mutex);
548 			schedule();
549 			remove_wait_queue(&chip->wq, &wait);
550 			timeo = jiffies + (HZ / 2); /* FIXME */
551 			spin_lock_bh(chip->mutex);
552 			continue;
553 		}
554 
555 		status = map_read(map, cmd_adr);
556 		if (map_word_andequal(map, status, status_OK, status_OK))
557 			break;
558 
559 		/* OK Still waiting */
560 		if (time_after(jiffies, timeo)) {
561                         /* clear status */
562                         map_write(map, CMD(0x50), cmd_adr);
563                         /* put back into read status register mode */
564                         map_write(map, CMD(0x70), adr);
565 			chip->state = FL_STATUS;
566 			DISABLE_VPP(map);
567 			spin_unlock_bh(chip->mutex);
568 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
569 			return -EIO;
570 		}
571 
572 		/* Latency issues. Drop the lock, wait a while and retry */
573 		spin_unlock_bh(chip->mutex);
574 		cfi_udelay(1);
575 		z++;
576 		spin_lock_bh(chip->mutex);
577 	}
578 	if (!z) {
579 		chip->buffer_write_time--;
580 		if (!chip->buffer_write_time)
581 			chip->buffer_write_time++;
582 	}
583 	if (z > 1)
584 		chip->buffer_write_time++;
585 
586 	/* Done and happy. */
587 	DISABLE_VPP(map);
588 	chip->state = FL_STATUS;
589 
590         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
591         if (map_word_bitsset(map, status, CMD(0x3a))) {
592 #ifdef DEBUG_CFI_FEATURES
593 		printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
594 #endif
595 		/* clear status */
596 		map_write(map, CMD(0x50), cmd_adr);
597 		/* put back into read status register mode */
598 		map_write(map, CMD(0x70), adr);
599 		wake_up(&chip->wq);
600 		spin_unlock_bh(chip->mutex);
601 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
602 	}
603 	wake_up(&chip->wq);
604 	spin_unlock_bh(chip->mutex);
605 
606         return 0;
607 }
608 
609 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
610 				       size_t len, size_t *retlen, const u_char *buf)
611 {
612 	struct map_info *map = mtd->priv;
613 	struct cfi_private *cfi = map->fldrv_priv;
614 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
615 	int ret = 0;
616 	int chipnum;
617 	unsigned long ofs;
618 
619 	*retlen = 0;
620 	if (!len)
621 		return 0;
622 
623 	chipnum = to >> cfi->chipshift;
624 	ofs = to  - (chipnum << cfi->chipshift);
625 
626 #ifdef DEBUG_CFI_FEATURES
627         printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
628         printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
629         printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
630 #endif
631 
632         /* Write buffer is worth it only if more than one word to write... */
633         while (len > 0) {
634 		/* We must not cross write block boundaries */
635 		int size = wbufsize - (ofs & (wbufsize-1));
636 
637                 if (size > len)
638                     size = len;
639 
640                 ret = do_write_buffer(map, &cfi->chips[chipnum],
641 				      ofs, buf, size);
642 		if (ret)
643 			return ret;
644 
645 		ofs += size;
646 		buf += size;
647 		(*retlen) += size;
648 		len -= size;
649 
650 		if (ofs >> cfi->chipshift) {
651 			chipnum ++;
652 			ofs = 0;
653 			if (chipnum == cfi->numchips)
654 				return 0;
655 		}
656 	}
657 
658 	return 0;
659 }
660 
661 /*
662  * Writev for ECC-Flashes is a little more complicated. We need to maintain
663  * a small buffer for this.
664  * XXX: If the buffer size is not a multiple of 2, this will break
665  */
666 #define ECCBUF_SIZE (mtd->eccsize)
667 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
668 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
669 static int
670 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
671 		unsigned long count, loff_t to, size_t *retlen)
672 {
673 	unsigned long i;
674 	size_t	 totlen = 0, thislen;
675 	int	 ret = 0;
676 	size_t	 buflen = 0;
677 	static char *buffer;
678 
679 	if (!ECCBUF_SIZE) {
680 		/* We should fall back to a general writev implementation.
681 		 * Until that is written, just break.
682 		 */
683 		return -EIO;
684 	}
685 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
686 	if (!buffer)
687 		return -ENOMEM;
688 
689 	for (i=0; i<count; i++) {
690 		size_t elem_len = vecs[i].iov_len;
691 		void *elem_base = vecs[i].iov_base;
692 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
693 			continue;
694 		if (buflen) { /* cut off head */
695 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
696 				memcpy(buffer+buflen, elem_base, elem_len);
697 				buflen += elem_len;
698 				continue;
699 			}
700 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
701 			ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
702 			totlen += thislen;
703 			if (ret || thislen != ECCBUF_SIZE)
704 				goto write_error;
705 			elem_len -= thislen-buflen;
706 			elem_base += thislen-buflen;
707 			to += ECCBUF_SIZE;
708 		}
709 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
710 			ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
711 			totlen += thislen;
712 			if (ret || thislen != ECCBUF_DIV(elem_len))
713 				goto write_error;
714 			to += thislen;
715 		}
716 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
717 		if (buflen) {
718 			memset(buffer, 0xff, ECCBUF_SIZE);
719 			memcpy(buffer, elem_base + thislen, buflen);
720 		}
721 	}
722 	if (buflen) { /* flush last page, even if not full */
723 		/* This is sometimes intended behaviour, really */
724 		ret = mtd->write(mtd, to, buflen, &thislen, buffer);
725 		totlen += thislen;
726 		if (ret || thislen != ECCBUF_SIZE)
727 			goto write_error;
728 	}
729 write_error:
730 	if (retlen)
731 		*retlen = totlen;
732 	kfree(buffer);
733 	return ret;
734 }
735 
736 
737 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
738 {
739 	struct cfi_private *cfi = map->fldrv_priv;
740 	map_word status, status_OK;
741 	unsigned long timeo;
742 	int retries = 3;
743 	DECLARE_WAITQUEUE(wait, current);
744 	int ret = 0;
745 
746 	adr += chip->start;
747 
748 	/* Let's determine this according to the interleave only once */
749 	status_OK = CMD(0x80);
750 
751 	timeo = jiffies + HZ;
752 retry:
753 	spin_lock_bh(chip->mutex);
754 
755 	/* Check that the chip's ready to talk to us. */
756 	switch (chip->state) {
757 	case FL_CFI_QUERY:
758 	case FL_JEDEC_QUERY:
759 	case FL_READY:
760 		map_write(map, CMD(0x70), adr);
761 		chip->state = FL_STATUS;
762 
763 	case FL_STATUS:
764 		status = map_read(map, adr);
765 		if (map_word_andequal(map, status, status_OK, status_OK))
766 			break;
767 
768 		/* Urgh. Chip not yet ready to talk to us. */
769 		if (time_after(jiffies, timeo)) {
770 			spin_unlock_bh(chip->mutex);
771 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
772 			return -EIO;
773 		}
774 
775 		/* Latency issues. Drop the lock, wait a while and retry */
776 		spin_unlock_bh(chip->mutex);
777 		cfi_udelay(1);
778 		goto retry;
779 
780 	default:
781 		/* Stick ourselves on a wait queue to be woken when
782 		   someone changes the status */
783 		set_current_state(TASK_UNINTERRUPTIBLE);
784 		add_wait_queue(&chip->wq, &wait);
785 		spin_unlock_bh(chip->mutex);
786 		schedule();
787 		remove_wait_queue(&chip->wq, &wait);
788 		timeo = jiffies + HZ;
789 		goto retry;
790 	}
791 
792 	ENABLE_VPP(map);
793 	/* Clear the status register first */
794 	map_write(map, CMD(0x50), adr);
795 
796 	/* Now erase */
797 	map_write(map, CMD(0x20), adr);
798 	map_write(map, CMD(0xD0), adr);
799 	chip->state = FL_ERASING;
800 
801 	spin_unlock_bh(chip->mutex);
802 	msleep(1000);
803 	spin_lock_bh(chip->mutex);
804 
805 	/* FIXME. Use a timer to check this, and return immediately. */
806 	/* Once the state machine's known to be working I'll do that */
807 
808 	timeo = jiffies + (HZ*20);
809 	for (;;) {
810 		if (chip->state != FL_ERASING) {
811 			/* Someone's suspended the erase. Sleep */
812 			set_current_state(TASK_UNINTERRUPTIBLE);
813 			add_wait_queue(&chip->wq, &wait);
814 			spin_unlock_bh(chip->mutex);
815 			schedule();
816 			remove_wait_queue(&chip->wq, &wait);
817 			timeo = jiffies + (HZ*20); /* FIXME */
818 			spin_lock_bh(chip->mutex);
819 			continue;
820 		}
821 
822 		status = map_read(map, adr);
823 		if (map_word_andequal(map, status, status_OK, status_OK))
824 			break;
825 
826 		/* OK Still waiting */
827 		if (time_after(jiffies, timeo)) {
828 			map_write(map, CMD(0x70), adr);
829 			chip->state = FL_STATUS;
830 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
831 			DISABLE_VPP(map);
832 			spin_unlock_bh(chip->mutex);
833 			return -EIO;
834 		}
835 
836 		/* Latency issues. Drop the lock, wait a while and retry */
837 		spin_unlock_bh(chip->mutex);
838 		cfi_udelay(1);
839 		spin_lock_bh(chip->mutex);
840 	}
841 
842 	DISABLE_VPP(map);
843 	ret = 0;
844 
845 	/* We've broken this before. It doesn't hurt to be safe */
846 	map_write(map, CMD(0x70), adr);
847 	chip->state = FL_STATUS;
848 	status = map_read(map, adr);
849 
850 	/* check for lock bit */
851 	if (map_word_bitsset(map, status, CMD(0x3a))) {
852 		unsigned char chipstatus = status.x[0];
853 		if (!map_word_equal(map, status, CMD(chipstatus))) {
854 			int i, w;
855 			for (w=0; w<map_words(map); w++) {
856 				for (i = 0; i<cfi_interleave(cfi); i++) {
857 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
858 				}
859 			}
860 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
861 			       status.x[0], chipstatus);
862 		}
863 		/* Reset the error bits */
864 		map_write(map, CMD(0x50), adr);
865 		map_write(map, CMD(0x70), adr);
866 
867 		if ((chipstatus & 0x30) == 0x30) {
868 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
869 			ret = -EIO;
870 		} else if (chipstatus & 0x02) {
871 			/* Protection bit set */
872 			ret = -EROFS;
873 		} else if (chipstatus & 0x8) {
874 			/* Voltage */
875 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
876 			ret = -EIO;
877 		} else if (chipstatus & 0x20) {
878 			if (retries--) {
879 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
880 				timeo = jiffies + HZ;
881 				chip->state = FL_STATUS;
882 				spin_unlock_bh(chip->mutex);
883 				goto retry;
884 			}
885 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
886 			ret = -EIO;
887 		}
888 	}
889 
890 	wake_up(&chip->wq);
891 	spin_unlock_bh(chip->mutex);
892 	return ret;
893 }
894 
895 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
896 {	struct map_info *map = mtd->priv;
897 	struct cfi_private *cfi = map->fldrv_priv;
898 	unsigned long adr, len;
899 	int chipnum, ret = 0;
900 	int i, first;
901 	struct mtd_erase_region_info *regions = mtd->eraseregions;
902 
903 	if (instr->addr > mtd->size)
904 		return -EINVAL;
905 
906 	if ((instr->len + instr->addr) > mtd->size)
907 		return -EINVAL;
908 
909 	/* Check that both start and end of the requested erase are
910 	 * aligned with the erasesize at the appropriate addresses.
911 	 */
912 
913 	i = 0;
914 
915 	/* Skip all erase regions which are ended before the start of
916 	   the requested erase. Actually, to save on the calculations,
917 	   we skip to the first erase region which starts after the
918 	   start of the requested erase, and then go back one.
919 	*/
920 
921 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
922 	       i++;
923 	i--;
924 
925 	/* OK, now i is pointing at the erase region in which this
926 	   erase request starts. Check the start of the requested
927 	   erase range is aligned with the erase size which is in
928 	   effect here.
929 	*/
930 
931 	if (instr->addr & (regions[i].erasesize-1))
932 		return -EINVAL;
933 
934 	/* Remember the erase region we start on */
935 	first = i;
936 
937 	/* Next, check that the end of the requested erase is aligned
938 	 * with the erase region at that address.
939 	 */
940 
941 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
942 		i++;
943 
944 	/* As before, drop back one to point at the region in which
945 	   the address actually falls
946 	*/
947 	i--;
948 
949 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
950 		return -EINVAL;
951 
952 	chipnum = instr->addr >> cfi->chipshift;
953 	adr = instr->addr - (chipnum << cfi->chipshift);
954 	len = instr->len;
955 
956 	i=first;
957 
958 	while(len) {
959 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
960 
961 		if (ret)
962 			return ret;
963 
964 		adr += regions[i].erasesize;
965 		len -= regions[i].erasesize;
966 
967 		if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
968 			i++;
969 
970 		if (adr >> cfi->chipshift) {
971 			adr = 0;
972 			chipnum++;
973 
974 			if (chipnum >= cfi->numchips)
975 			break;
976 		}
977 	}
978 
979 	instr->state = MTD_ERASE_DONE;
980 	mtd_erase_callback(instr);
981 
982 	return 0;
983 }
984 
985 static void cfi_staa_sync (struct mtd_info *mtd)
986 {
987 	struct map_info *map = mtd->priv;
988 	struct cfi_private *cfi = map->fldrv_priv;
989 	int i;
990 	struct flchip *chip;
991 	int ret = 0;
992 	DECLARE_WAITQUEUE(wait, current);
993 
994 	for (i=0; !ret && i<cfi->numchips; i++) {
995 		chip = &cfi->chips[i];
996 
997 	retry:
998 		spin_lock_bh(chip->mutex);
999 
1000 		switch(chip->state) {
1001 		case FL_READY:
1002 		case FL_STATUS:
1003 		case FL_CFI_QUERY:
1004 		case FL_JEDEC_QUERY:
1005 			chip->oldstate = chip->state;
1006 			chip->state = FL_SYNCING;
1007 			/* No need to wake_up() on this state change -
1008 			 * as the whole point is that nobody can do anything
1009 			 * with the chip now anyway.
1010 			 */
1011 		case FL_SYNCING:
1012 			spin_unlock_bh(chip->mutex);
1013 			break;
1014 
1015 		default:
1016 			/* Not an idle state */
1017 			add_wait_queue(&chip->wq, &wait);
1018 
1019 			spin_unlock_bh(chip->mutex);
1020 			schedule();
1021 		        remove_wait_queue(&chip->wq, &wait);
1022 
1023 			goto retry;
1024 		}
1025 	}
1026 
1027 	/* Unlock the chips again */
1028 
1029 	for (i--; i >=0; i--) {
1030 		chip = &cfi->chips[i];
1031 
1032 		spin_lock_bh(chip->mutex);
1033 
1034 		if (chip->state == FL_SYNCING) {
1035 			chip->state = chip->oldstate;
1036 			wake_up(&chip->wq);
1037 		}
1038 		spin_unlock_bh(chip->mutex);
1039 	}
1040 }
1041 
1042 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1043 {
1044 	struct cfi_private *cfi = map->fldrv_priv;
1045 	map_word status, status_OK;
1046 	unsigned long timeo = jiffies + HZ;
1047 	DECLARE_WAITQUEUE(wait, current);
1048 
1049 	adr += chip->start;
1050 
1051 	/* Let's determine this according to the interleave only once */
1052 	status_OK = CMD(0x80);
1053 
1054 	timeo = jiffies + HZ;
1055 retry:
1056 	spin_lock_bh(chip->mutex);
1057 
1058 	/* Check that the chip's ready to talk to us. */
1059 	switch (chip->state) {
1060 	case FL_CFI_QUERY:
1061 	case FL_JEDEC_QUERY:
1062 	case FL_READY:
1063 		map_write(map, CMD(0x70), adr);
1064 		chip->state = FL_STATUS;
1065 
1066 	case FL_STATUS:
1067 		status = map_read(map, adr);
1068 		if (map_word_andequal(map, status, status_OK, status_OK))
1069 			break;
1070 
1071 		/* Urgh. Chip not yet ready to talk to us. */
1072 		if (time_after(jiffies, timeo)) {
1073 			spin_unlock_bh(chip->mutex);
1074 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1075 			return -EIO;
1076 		}
1077 
1078 		/* Latency issues. Drop the lock, wait a while and retry */
1079 		spin_unlock_bh(chip->mutex);
1080 		cfi_udelay(1);
1081 		goto retry;
1082 
1083 	default:
1084 		/* Stick ourselves on a wait queue to be woken when
1085 		   someone changes the status */
1086 		set_current_state(TASK_UNINTERRUPTIBLE);
1087 		add_wait_queue(&chip->wq, &wait);
1088 		spin_unlock_bh(chip->mutex);
1089 		schedule();
1090 		remove_wait_queue(&chip->wq, &wait);
1091 		timeo = jiffies + HZ;
1092 		goto retry;
1093 	}
1094 
1095 	ENABLE_VPP(map);
1096 	map_write(map, CMD(0x60), adr);
1097 	map_write(map, CMD(0x01), adr);
1098 	chip->state = FL_LOCKING;
1099 
1100 	spin_unlock_bh(chip->mutex);
1101 	msleep(1000);
1102 	spin_lock_bh(chip->mutex);
1103 
1104 	/* FIXME. Use a timer to check this, and return immediately. */
1105 	/* Once the state machine's known to be working I'll do that */
1106 
1107 	timeo = jiffies + (HZ*2);
1108 	for (;;) {
1109 
1110 		status = map_read(map, adr);
1111 		if (map_word_andequal(map, status, status_OK, status_OK))
1112 			break;
1113 
1114 		/* OK Still waiting */
1115 		if (time_after(jiffies, timeo)) {
1116 			map_write(map, CMD(0x70), adr);
1117 			chip->state = FL_STATUS;
1118 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1119 			DISABLE_VPP(map);
1120 			spin_unlock_bh(chip->mutex);
1121 			return -EIO;
1122 		}
1123 
1124 		/* Latency issues. Drop the lock, wait a while and retry */
1125 		spin_unlock_bh(chip->mutex);
1126 		cfi_udelay(1);
1127 		spin_lock_bh(chip->mutex);
1128 	}
1129 
1130 	/* Done and happy. */
1131 	chip->state = FL_STATUS;
1132 	DISABLE_VPP(map);
1133 	wake_up(&chip->wq);
1134 	spin_unlock_bh(chip->mutex);
1135 	return 0;
1136 }
1137 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1138 {
1139 	struct map_info *map = mtd->priv;
1140 	struct cfi_private *cfi = map->fldrv_priv;
1141 	unsigned long adr;
1142 	int chipnum, ret = 0;
1143 #ifdef DEBUG_LOCK_BITS
1144 	int ofs_factor = cfi->interleave * cfi->device_type;
1145 #endif
1146 
1147 	if (ofs & (mtd->erasesize - 1))
1148 		return -EINVAL;
1149 
1150 	if (len & (mtd->erasesize -1))
1151 		return -EINVAL;
1152 
1153 	if ((len + ofs) > mtd->size)
1154 		return -EINVAL;
1155 
1156 	chipnum = ofs >> cfi->chipshift;
1157 	adr = ofs - (chipnum << cfi->chipshift);
1158 
1159 	while(len) {
1160 
1161 #ifdef DEBUG_LOCK_BITS
1162 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1164 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1165 #endif
1166 
1167 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1168 
1169 #ifdef DEBUG_LOCK_BITS
1170 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1171 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1172 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1173 #endif
1174 
1175 		if (ret)
1176 			return ret;
1177 
1178 		adr += mtd->erasesize;
1179 		len -= mtd->erasesize;
1180 
1181 		if (adr >> cfi->chipshift) {
1182 			adr = 0;
1183 			chipnum++;
1184 
1185 			if (chipnum >= cfi->numchips)
1186 			break;
1187 		}
1188 	}
1189 	return 0;
1190 }
1191 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1192 {
1193 	struct cfi_private *cfi = map->fldrv_priv;
1194 	map_word status, status_OK;
1195 	unsigned long timeo = jiffies + HZ;
1196 	DECLARE_WAITQUEUE(wait, current);
1197 
1198 	adr += chip->start;
1199 
1200 	/* Let's determine this according to the interleave only once */
1201 	status_OK = CMD(0x80);
1202 
1203 	timeo = jiffies + HZ;
1204 retry:
1205 	spin_lock_bh(chip->mutex);
1206 
1207 	/* Check that the chip's ready to talk to us. */
1208 	switch (chip->state) {
1209 	case FL_CFI_QUERY:
1210 	case FL_JEDEC_QUERY:
1211 	case FL_READY:
1212 		map_write(map, CMD(0x70), adr);
1213 		chip->state = FL_STATUS;
1214 
1215 	case FL_STATUS:
1216 		status = map_read(map, adr);
1217 		if (map_word_andequal(map, status, status_OK, status_OK))
1218 			break;
1219 
1220 		/* Urgh. Chip not yet ready to talk to us. */
1221 		if (time_after(jiffies, timeo)) {
1222 			spin_unlock_bh(chip->mutex);
1223 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1224 			return -EIO;
1225 		}
1226 
1227 		/* Latency issues. Drop the lock, wait a while and retry */
1228 		spin_unlock_bh(chip->mutex);
1229 		cfi_udelay(1);
1230 		goto retry;
1231 
1232 	default:
1233 		/* Stick ourselves on a wait queue to be woken when
1234 		   someone changes the status */
1235 		set_current_state(TASK_UNINTERRUPTIBLE);
1236 		add_wait_queue(&chip->wq, &wait);
1237 		spin_unlock_bh(chip->mutex);
1238 		schedule();
1239 		remove_wait_queue(&chip->wq, &wait);
1240 		timeo = jiffies + HZ;
1241 		goto retry;
1242 	}
1243 
1244 	ENABLE_VPP(map);
1245 	map_write(map, CMD(0x60), adr);
1246 	map_write(map, CMD(0xD0), adr);
1247 	chip->state = FL_UNLOCKING;
1248 
1249 	spin_unlock_bh(chip->mutex);
1250 	msleep(1000);
1251 	spin_lock_bh(chip->mutex);
1252 
1253 	/* FIXME. Use a timer to check this, and return immediately. */
1254 	/* Once the state machine's known to be working I'll do that */
1255 
1256 	timeo = jiffies + (HZ*2);
1257 	for (;;) {
1258 
1259 		status = map_read(map, adr);
1260 		if (map_word_andequal(map, status, status_OK, status_OK))
1261 			break;
1262 
1263 		/* OK Still waiting */
1264 		if (time_after(jiffies, timeo)) {
1265 			map_write(map, CMD(0x70), adr);
1266 			chip->state = FL_STATUS;
1267 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1268 			DISABLE_VPP(map);
1269 			spin_unlock_bh(chip->mutex);
1270 			return -EIO;
1271 		}
1272 
1273 		/* Latency issues. Drop the unlock, wait a while and retry */
1274 		spin_unlock_bh(chip->mutex);
1275 		cfi_udelay(1);
1276 		spin_lock_bh(chip->mutex);
1277 	}
1278 
1279 	/* Done and happy. */
1280 	chip->state = FL_STATUS;
1281 	DISABLE_VPP(map);
1282 	wake_up(&chip->wq);
1283 	spin_unlock_bh(chip->mutex);
1284 	return 0;
1285 }
1286 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1287 {
1288 	struct map_info *map = mtd->priv;
1289 	struct cfi_private *cfi = map->fldrv_priv;
1290 	unsigned long adr;
1291 	int chipnum, ret = 0;
1292 #ifdef DEBUG_LOCK_BITS
1293 	int ofs_factor = cfi->interleave * cfi->device_type;
1294 #endif
1295 
1296 	chipnum = ofs >> cfi->chipshift;
1297 	adr = ofs - (chipnum << cfi->chipshift);
1298 
1299 #ifdef DEBUG_LOCK_BITS
1300 	{
1301 		unsigned long temp_adr = adr;
1302 		unsigned long temp_len = len;
1303 
1304 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1305                 while (temp_len) {
1306 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1307 			temp_adr += mtd->erasesize;
1308 			temp_len -= mtd->erasesize;
1309 		}
1310 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1311 	}
1312 #endif
1313 
1314 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1315 
1316 #ifdef DEBUG_LOCK_BITS
1317 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1318 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1319 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1320 #endif
1321 
1322 	return ret;
1323 }
1324 
1325 static int cfi_staa_suspend(struct mtd_info *mtd)
1326 {
1327 	struct map_info *map = mtd->priv;
1328 	struct cfi_private *cfi = map->fldrv_priv;
1329 	int i;
1330 	struct flchip *chip;
1331 	int ret = 0;
1332 
1333 	for (i=0; !ret && i<cfi->numchips; i++) {
1334 		chip = &cfi->chips[i];
1335 
1336 		spin_lock_bh(chip->mutex);
1337 
1338 		switch(chip->state) {
1339 		case FL_READY:
1340 		case FL_STATUS:
1341 		case FL_CFI_QUERY:
1342 		case FL_JEDEC_QUERY:
1343 			chip->oldstate = chip->state;
1344 			chip->state = FL_PM_SUSPENDED;
1345 			/* No need to wake_up() on this state change -
1346 			 * as the whole point is that nobody can do anything
1347 			 * with the chip now anyway.
1348 			 */
1349 		case FL_PM_SUSPENDED:
1350 			break;
1351 
1352 		default:
1353 			ret = -EAGAIN;
1354 			break;
1355 		}
1356 		spin_unlock_bh(chip->mutex);
1357 	}
1358 
1359 	/* Unlock the chips again */
1360 
1361 	if (ret) {
1362 		for (i--; i >=0; i--) {
1363 			chip = &cfi->chips[i];
1364 
1365 			spin_lock_bh(chip->mutex);
1366 
1367 			if (chip->state == FL_PM_SUSPENDED) {
1368 				/* No need to force it into a known state here,
1369 				   because we're returning failure, and it didn't
1370 				   get power cycled */
1371 				chip->state = chip->oldstate;
1372 				wake_up(&chip->wq);
1373 			}
1374 			spin_unlock_bh(chip->mutex);
1375 		}
1376 	}
1377 
1378 	return ret;
1379 }
1380 
1381 static void cfi_staa_resume(struct mtd_info *mtd)
1382 {
1383 	struct map_info *map = mtd->priv;
1384 	struct cfi_private *cfi = map->fldrv_priv;
1385 	int i;
1386 	struct flchip *chip;
1387 
1388 	for (i=0; i<cfi->numchips; i++) {
1389 
1390 		chip = &cfi->chips[i];
1391 
1392 		spin_lock_bh(chip->mutex);
1393 
1394 		/* Go to known state. Chip may have been power cycled */
1395 		if (chip->state == FL_PM_SUSPENDED) {
1396 			map_write(map, CMD(0xFF), 0);
1397 			chip->state = FL_READY;
1398 			wake_up(&chip->wq);
1399 		}
1400 
1401 		spin_unlock_bh(chip->mutex);
1402 	}
1403 }
1404 
1405 static void cfi_staa_destroy(struct mtd_info *mtd)
1406 {
1407 	struct map_info *map = mtd->priv;
1408 	struct cfi_private *cfi = map->fldrv_priv;
1409 	kfree(cfi->cmdset_priv);
1410 	kfree(cfi);
1411 }
1412 
1413 static char im_name[]="cfi_cmdset_0020";
1414 
1415 static int __init cfi_staa_init(void)
1416 {
1417 	inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1418 	return 0;
1419 }
1420 
1421 static void __exit cfi_staa_exit(void)
1422 {
1423 	inter_module_unregister(im_name);
1424 }
1425 
1426 module_init(cfi_staa_init);
1427 module_exit(cfi_staa_exit);
1428 
1429 MODULE_LICENSE("GPL");
1430