1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 10/10/2000	Nicolas Pitre <nico@fluxnic.net>
8  * 	- completely revamped method functions so they are aware and
9  * 	  independent of the flash geometry (buswidth, interleave, etc.)
10  * 	- scalability vs code size is completely set at compile-time
11  * 	  (see include/linux/mtd/cfi.h for selection)
12  *	- optimized write buffer method
13  * 06/21/2002	Joern Engel <joern@wh.fh-wedel.de> and others
14  *	- modified Intel Command Set 0x0001 to support ST Advanced Architecture
15  *	  (command set 0x0020)
16  *	- added a writev function
17  * 07/13/2005	Joern Engel <joern@wh.fh-wedel.de>
18  * 	- Plugged memory leak in cfi_staa_writev().
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28 
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/cfi.h>
35 #include <linux/mtd/mtd.h>
36 
37 
38 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41 		unsigned long count, loff_t to, size_t *retlen);
42 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43 static void cfi_staa_sync (struct mtd_info *);
44 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46 static int cfi_staa_suspend (struct mtd_info *);
47 static void cfi_staa_resume (struct mtd_info *);
48 
49 static void cfi_staa_destroy(struct mtd_info *);
50 
51 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52 
53 static struct mtd_info *cfi_staa_setup (struct map_info *);
54 
55 static struct mtd_chip_driver cfi_staa_chipdrv = {
56 	.probe		= NULL, /* Not usable directly */
57 	.destroy	= cfi_staa_destroy,
58 	.name		= "cfi_cmdset_0020",
59 	.module		= THIS_MODULE
60 };
61 
62 /* #define DEBUG_LOCK_BITS */
63 //#define DEBUG_CFI_FEATURES
64 
65 #ifdef DEBUG_CFI_FEATURES
66 static void cfi_tell_features(struct cfi_pri_intelext *extp)
67 {
68         int i;
69         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70 	printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71 	printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72 	printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73 	printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74 	printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75 	printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76 	printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77 	printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78 	printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79 	for (i=9; i<32; i++) {
80 		if (extp->FeatureSupport & (1<<i))
81 			printk("     - Unknown Bit %X:      supported\n", i);
82 	}
83 
84 	printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85 	printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86 	for (i=1; i<8; i++) {
87 		if (extp->SuspendCmdSupport & (1<<i))
88 			printk("     - Unknown Bit %X:               supported\n", i);
89 	}
90 
91 	printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92 	printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93 	printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94 	for (i=2; i<16; i++) {
95 		if (extp->BlkStatusRegMask & (1<<i))
96 			printk("     - Unknown Bit %X Active: yes\n",i);
97 	}
98 
99 	printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100 	       extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101 	if (extp->VppOptimal)
102 		printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103 		       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104 }
105 #endif
106 
107 /* This routine is made available to other mtd code via
108  * inter_module_register.  It must only be accessed through
109  * inter_module_get which will bump the use count of this module.  The
110  * addresses passed back in cfi are valid as long as the use count of
111  * this module is non-zero, i.e. between inter_module_get and
112  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
113  */
114 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115 {
116 	struct cfi_private *cfi = map->fldrv_priv;
117 	int i;
118 
119 	if (cfi->cfi_mode) {
120 		/*
121 		 * It's a real CFI chip, not one for which the probe
122 		 * routine faked a CFI structure. So we read the feature
123 		 * table from it.
124 		 */
125 		__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126 		struct cfi_pri_intelext *extp;
127 
128 		extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129 		if (!extp)
130 			return NULL;
131 
132 		if (extp->MajorVersion != '1' ||
133 		    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134 			printk(KERN_ERR "  Unknown ST Microelectronics"
135 			       " Extended Query version %c.%c.\n",
136 			       extp->MajorVersion, extp->MinorVersion);
137 			kfree(extp);
138 			return NULL;
139 		}
140 
141 		/* Do some byteswapping if necessary */
142 		extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
143 		extp->BlkStatusRegMask = cfi32_to_cpu(map,
144 						extp->BlkStatusRegMask);
145 
146 #ifdef DEBUG_CFI_FEATURES
147 		/* Tell the user about it in lots of lovely detail */
148 		cfi_tell_features(extp);
149 #endif
150 
151 		/* Install our own private info structure */
152 		cfi->cmdset_priv = extp;
153 	}
154 
155 	for (i=0; i< cfi->numchips; i++) {
156 		cfi->chips[i].word_write_time = 128;
157 		cfi->chips[i].buffer_write_time = 128;
158 		cfi->chips[i].erase_time = 1024;
159 		cfi->chips[i].ref_point_counter = 0;
160 		init_waitqueue_head(&(cfi->chips[i].wq));
161 	}
162 
163 	return cfi_staa_setup(map);
164 }
165 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
166 
167 static struct mtd_info *cfi_staa_setup(struct map_info *map)
168 {
169 	struct cfi_private *cfi = map->fldrv_priv;
170 	struct mtd_info *mtd;
171 	unsigned long offset = 0;
172 	int i,j;
173 	unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
174 
175 	mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
176 	//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
177 
178 	if (!mtd) {
179 		printk(KERN_ERR "Failed to allocate memory for MTD device\n");
180 		kfree(cfi->cmdset_priv);
181 		return NULL;
182 	}
183 
184 	mtd->priv = map;
185 	mtd->type = MTD_NORFLASH;
186 	mtd->size = devsize * cfi->numchips;
187 
188 	mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
189 	mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
190 			* mtd->numeraseregions, GFP_KERNEL);
191 	if (!mtd->eraseregions) {
192 		printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
193 		kfree(cfi->cmdset_priv);
194 		kfree(mtd);
195 		return NULL;
196 	}
197 
198 	for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
199 		unsigned long ernum, ersize;
200 		ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
201 		ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
202 
203 		if (mtd->erasesize < ersize) {
204 			mtd->erasesize = ersize;
205 		}
206 		for (j=0; j<cfi->numchips; j++) {
207 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
208 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
209 			mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
210 		}
211 		offset += (ersize * ernum);
212 		}
213 
214 		if (offset != devsize) {
215 			/* Argh */
216 			printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
217 			kfree(mtd->eraseregions);
218 			kfree(cfi->cmdset_priv);
219 			kfree(mtd);
220 			return NULL;
221 		}
222 
223 		for (i=0; i<mtd->numeraseregions;i++){
224 			printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
225 			       i, (unsigned long long)mtd->eraseregions[i].offset,
226 			       mtd->eraseregions[i].erasesize,
227 			       mtd->eraseregions[i].numblocks);
228 		}
229 
230 	/* Also select the correct geometry setup too */
231 	mtd->_erase = cfi_staa_erase_varsize;
232 	mtd->_read = cfi_staa_read;
233 	mtd->_write = cfi_staa_write_buffers;
234 	mtd->_writev = cfi_staa_writev;
235 	mtd->_sync = cfi_staa_sync;
236 	mtd->_lock = cfi_staa_lock;
237 	mtd->_unlock = cfi_staa_unlock;
238 	mtd->_suspend = cfi_staa_suspend;
239 	mtd->_resume = cfi_staa_resume;
240 	mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
241 	mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
242 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
243 	map->fldrv = &cfi_staa_chipdrv;
244 	__module_get(THIS_MODULE);
245 	mtd->name = map->name;
246 	return mtd;
247 }
248 
249 
250 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
251 {
252 	map_word status, status_OK;
253 	unsigned long timeo;
254 	DECLARE_WAITQUEUE(wait, current);
255 	int suspended = 0;
256 	unsigned long cmd_addr;
257 	struct cfi_private *cfi = map->fldrv_priv;
258 
259 	adr += chip->start;
260 
261 	/* Ensure cmd read/writes are aligned. */
262 	cmd_addr = adr & ~(map_bankwidth(map)-1);
263 
264 	/* Let's determine this according to the interleave only once */
265 	status_OK = CMD(0x80);
266 
267 	timeo = jiffies + HZ;
268  retry:
269 	mutex_lock(&chip->mutex);
270 
271 	/* Check that the chip's ready to talk to us.
272 	 * If it's in FL_ERASING state, suspend it and make it talk now.
273 	 */
274 	switch (chip->state) {
275 	case FL_ERASING:
276 		if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
277 			goto sleep; /* We don't support erase suspend */
278 
279 		map_write (map, CMD(0xb0), cmd_addr);
280 		/* If the flash has finished erasing, then 'erase suspend'
281 		 * appears to make some (28F320) flash devices switch to
282 		 * 'read' mode.  Make sure that we switch to 'read status'
283 		 * mode so we get the right data. --rmk
284 		 */
285 		map_write(map, CMD(0x70), cmd_addr);
286 		chip->oldstate = FL_ERASING;
287 		chip->state = FL_ERASE_SUSPENDING;
288 		//		printk("Erase suspending at 0x%lx\n", cmd_addr);
289 		for (;;) {
290 			status = map_read(map, cmd_addr);
291 			if (map_word_andequal(map, status, status_OK, status_OK))
292 				break;
293 
294 			if (time_after(jiffies, timeo)) {
295 				/* Urgh */
296 				map_write(map, CMD(0xd0), cmd_addr);
297 				/* make sure we're in 'read status' mode */
298 				map_write(map, CMD(0x70), cmd_addr);
299 				chip->state = FL_ERASING;
300 				wake_up(&chip->wq);
301 				mutex_unlock(&chip->mutex);
302 				printk(KERN_ERR "Chip not ready after erase "
303 				       "suspended: status = 0x%lx\n", status.x[0]);
304 				return -EIO;
305 			}
306 
307 			mutex_unlock(&chip->mutex);
308 			cfi_udelay(1);
309 			mutex_lock(&chip->mutex);
310 		}
311 
312 		suspended = 1;
313 		map_write(map, CMD(0xff), cmd_addr);
314 		chip->state = FL_READY;
315 		break;
316 
317 #if 0
318 	case FL_WRITING:
319 		/* Not quite yet */
320 #endif
321 
322 	case FL_READY:
323 		break;
324 
325 	case FL_CFI_QUERY:
326 	case FL_JEDEC_QUERY:
327 		map_write(map, CMD(0x70), cmd_addr);
328 		chip->state = FL_STATUS;
329 
330 	case FL_STATUS:
331 		status = map_read(map, cmd_addr);
332 		if (map_word_andequal(map, status, status_OK, status_OK)) {
333 			map_write(map, CMD(0xff), cmd_addr);
334 			chip->state = FL_READY;
335 			break;
336 		}
337 
338 		/* Urgh. Chip not yet ready to talk to us. */
339 		if (time_after(jiffies, timeo)) {
340 			mutex_unlock(&chip->mutex);
341 			printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
342 			return -EIO;
343 		}
344 
345 		/* Latency issues. Drop the lock, wait a while and retry */
346 		mutex_unlock(&chip->mutex);
347 		cfi_udelay(1);
348 		goto retry;
349 
350 	default:
351 	sleep:
352 		/* Stick ourselves on a wait queue to be woken when
353 		   someone changes the status */
354 		set_current_state(TASK_UNINTERRUPTIBLE);
355 		add_wait_queue(&chip->wq, &wait);
356 		mutex_unlock(&chip->mutex);
357 		schedule();
358 		remove_wait_queue(&chip->wq, &wait);
359 		timeo = jiffies + HZ;
360 		goto retry;
361 	}
362 
363 	map_copy_from(map, buf, adr, len);
364 
365 	if (suspended) {
366 		chip->state = chip->oldstate;
367 		/* What if one interleaved chip has finished and the
368 		   other hasn't? The old code would leave the finished
369 		   one in READY mode. That's bad, and caused -EROFS
370 		   errors to be returned from do_erase_oneblock because
371 		   that's the only bit it checked for at the time.
372 		   As the state machine appears to explicitly allow
373 		   sending the 0x70 (Read Status) command to an erasing
374 		   chip and expecting it to be ignored, that's what we
375 		   do. */
376 		map_write(map, CMD(0xd0), cmd_addr);
377 		map_write(map, CMD(0x70), cmd_addr);
378 	}
379 
380 	wake_up(&chip->wq);
381 	mutex_unlock(&chip->mutex);
382 	return 0;
383 }
384 
385 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
386 {
387 	struct map_info *map = mtd->priv;
388 	struct cfi_private *cfi = map->fldrv_priv;
389 	unsigned long ofs;
390 	int chipnum;
391 	int ret = 0;
392 
393 	/* ofs: offset within the first chip that the first read should start */
394 	chipnum = (from >> cfi->chipshift);
395 	ofs = from - (chipnum <<  cfi->chipshift);
396 
397 	while (len) {
398 		unsigned long thislen;
399 
400 		if (chipnum >= cfi->numchips)
401 			break;
402 
403 		if ((len + ofs -1) >> cfi->chipshift)
404 			thislen = (1<<cfi->chipshift) - ofs;
405 		else
406 			thislen = len;
407 
408 		ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
409 		if (ret)
410 			break;
411 
412 		*retlen += thislen;
413 		len -= thislen;
414 		buf += thislen;
415 
416 		ofs = 0;
417 		chipnum++;
418 	}
419 	return ret;
420 }
421 
422 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
423 				  unsigned long adr, const u_char *buf, int len)
424 {
425 	struct cfi_private *cfi = map->fldrv_priv;
426 	map_word status, status_OK;
427 	unsigned long cmd_adr, timeo;
428 	DECLARE_WAITQUEUE(wait, current);
429 	int wbufsize, z;
430 
431         /* M58LW064A requires bus alignment for buffer wriets -- saw */
432         if (adr & (map_bankwidth(map)-1))
433             return -EINVAL;
434 
435         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
436         adr += chip->start;
437 	cmd_adr = adr & ~(wbufsize-1);
438 
439 	/* Let's determine this according to the interleave only once */
440         status_OK = CMD(0x80);
441 
442 	timeo = jiffies + HZ;
443  retry:
444 
445 #ifdef DEBUG_CFI_FEATURES
446        printk("%s: chip->state[%d]\n", __func__, chip->state);
447 #endif
448 	mutex_lock(&chip->mutex);
449 
450 	/* Check that the chip's ready to talk to us.
451 	 * Later, we can actually think about interrupting it
452 	 * if it's in FL_ERASING state.
453 	 * Not just yet, though.
454 	 */
455 	switch (chip->state) {
456 	case FL_READY:
457 		break;
458 
459 	case FL_CFI_QUERY:
460 	case FL_JEDEC_QUERY:
461 		map_write(map, CMD(0x70), cmd_adr);
462                 chip->state = FL_STATUS;
463 #ifdef DEBUG_CFI_FEATURES
464 	printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
465 #endif
466 
467 	case FL_STATUS:
468 		status = map_read(map, cmd_adr);
469 		if (map_word_andequal(map, status, status_OK, status_OK))
470 			break;
471 		/* Urgh. Chip not yet ready to talk to us. */
472 		if (time_after(jiffies, timeo)) {
473 			mutex_unlock(&chip->mutex);
474                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
475                                status.x[0], map_read(map, cmd_adr).x[0]);
476 			return -EIO;
477 		}
478 
479 		/* Latency issues. Drop the lock, wait a while and retry */
480 		mutex_unlock(&chip->mutex);
481 		cfi_udelay(1);
482 		goto retry;
483 
484 	default:
485 		/* Stick ourselves on a wait queue to be woken when
486 		   someone changes the status */
487 		set_current_state(TASK_UNINTERRUPTIBLE);
488 		add_wait_queue(&chip->wq, &wait);
489 		mutex_unlock(&chip->mutex);
490 		schedule();
491 		remove_wait_queue(&chip->wq, &wait);
492 		timeo = jiffies + HZ;
493 		goto retry;
494 	}
495 
496 	ENABLE_VPP(map);
497 	map_write(map, CMD(0xe8), cmd_adr);
498 	chip->state = FL_WRITING_TO_BUFFER;
499 
500 	z = 0;
501 	for (;;) {
502 		status = map_read(map, cmd_adr);
503 		if (map_word_andequal(map, status, status_OK, status_OK))
504 			break;
505 
506 		mutex_unlock(&chip->mutex);
507 		cfi_udelay(1);
508 		mutex_lock(&chip->mutex);
509 
510 		if (++z > 100) {
511 			/* Argh. Not ready for write to buffer */
512 			DISABLE_VPP(map);
513                         map_write(map, CMD(0x70), cmd_adr);
514 			chip->state = FL_STATUS;
515 			mutex_unlock(&chip->mutex);
516 			printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
517 			return -EIO;
518 		}
519 	}
520 
521 	/* Write length of data to come */
522 	map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
523 
524 	/* Write data */
525 	for (z = 0; z < len;
526 	     z += map_bankwidth(map), buf += map_bankwidth(map)) {
527 		map_word d;
528 		d = map_word_load(map, buf);
529 		map_write(map, d, adr+z);
530 	}
531 	/* GO GO GO */
532 	map_write(map, CMD(0xd0), cmd_adr);
533 	chip->state = FL_WRITING;
534 
535 	mutex_unlock(&chip->mutex);
536 	cfi_udelay(chip->buffer_write_time);
537 	mutex_lock(&chip->mutex);
538 
539 	timeo = jiffies + (HZ/2);
540 	z = 0;
541 	for (;;) {
542 		if (chip->state != FL_WRITING) {
543 			/* Someone's suspended the write. Sleep */
544 			set_current_state(TASK_UNINTERRUPTIBLE);
545 			add_wait_queue(&chip->wq, &wait);
546 			mutex_unlock(&chip->mutex);
547 			schedule();
548 			remove_wait_queue(&chip->wq, &wait);
549 			timeo = jiffies + (HZ / 2); /* FIXME */
550 			mutex_lock(&chip->mutex);
551 			continue;
552 		}
553 
554 		status = map_read(map, cmd_adr);
555 		if (map_word_andequal(map, status, status_OK, status_OK))
556 			break;
557 
558 		/* OK Still waiting */
559 		if (time_after(jiffies, timeo)) {
560                         /* clear status */
561                         map_write(map, CMD(0x50), cmd_adr);
562                         /* put back into read status register mode */
563                         map_write(map, CMD(0x70), adr);
564 			chip->state = FL_STATUS;
565 			DISABLE_VPP(map);
566 			mutex_unlock(&chip->mutex);
567 			printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
568 			return -EIO;
569 		}
570 
571 		/* Latency issues. Drop the lock, wait a while and retry */
572 		mutex_unlock(&chip->mutex);
573 		cfi_udelay(1);
574 		z++;
575 		mutex_lock(&chip->mutex);
576 	}
577 	if (!z) {
578 		chip->buffer_write_time--;
579 		if (!chip->buffer_write_time)
580 			chip->buffer_write_time++;
581 	}
582 	if (z > 1)
583 		chip->buffer_write_time++;
584 
585 	/* Done and happy. */
586 	DISABLE_VPP(map);
587 	chip->state = FL_STATUS;
588 
589         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
590         if (map_word_bitsset(map, status, CMD(0x3a))) {
591 #ifdef DEBUG_CFI_FEATURES
592 		printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
593 #endif
594 		/* clear status */
595 		map_write(map, CMD(0x50), cmd_adr);
596 		/* put back into read status register mode */
597 		map_write(map, CMD(0x70), adr);
598 		wake_up(&chip->wq);
599 		mutex_unlock(&chip->mutex);
600 		return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
601 	}
602 	wake_up(&chip->wq);
603 	mutex_unlock(&chip->mutex);
604 
605         return 0;
606 }
607 
608 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
609 				       size_t len, size_t *retlen, const u_char *buf)
610 {
611 	struct map_info *map = mtd->priv;
612 	struct cfi_private *cfi = map->fldrv_priv;
613 	int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
614 	int ret = 0;
615 	int chipnum;
616 	unsigned long ofs;
617 
618 	chipnum = to >> cfi->chipshift;
619 	ofs = to  - (chipnum << cfi->chipshift);
620 
621 #ifdef DEBUG_CFI_FEATURES
622 	printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
623 	printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
624 	printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
625 #endif
626 
627         /* Write buffer is worth it only if more than one word to write... */
628         while (len > 0) {
629 		/* We must not cross write block boundaries */
630 		int size = wbufsize - (ofs & (wbufsize-1));
631 
632                 if (size > len)
633                     size = len;
634 
635                 ret = do_write_buffer(map, &cfi->chips[chipnum],
636 				      ofs, buf, size);
637 		if (ret)
638 			return ret;
639 
640 		ofs += size;
641 		buf += size;
642 		(*retlen) += size;
643 		len -= size;
644 
645 		if (ofs >> cfi->chipshift) {
646 			chipnum ++;
647 			ofs = 0;
648 			if (chipnum == cfi->numchips)
649 				return 0;
650 		}
651 	}
652 
653 	return 0;
654 }
655 
656 /*
657  * Writev for ECC-Flashes is a little more complicated. We need to maintain
658  * a small buffer for this.
659  * XXX: If the buffer size is not a multiple of 2, this will break
660  */
661 #define ECCBUF_SIZE (mtd->writesize)
662 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
663 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
664 static int
665 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
666 		unsigned long count, loff_t to, size_t *retlen)
667 {
668 	unsigned long i;
669 	size_t	 totlen = 0, thislen;
670 	int	 ret = 0;
671 	size_t	 buflen = 0;
672 	static char *buffer;
673 
674 	if (!ECCBUF_SIZE) {
675 		/* We should fall back to a general writev implementation.
676 		 * Until that is written, just break.
677 		 */
678 		return -EIO;
679 	}
680 	buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
681 	if (!buffer)
682 		return -ENOMEM;
683 
684 	for (i=0; i<count; i++) {
685 		size_t elem_len = vecs[i].iov_len;
686 		void *elem_base = vecs[i].iov_base;
687 		if (!elem_len) /* FIXME: Might be unnecessary. Check that */
688 			continue;
689 		if (buflen) { /* cut off head */
690 			if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
691 				memcpy(buffer+buflen, elem_base, elem_len);
692 				buflen += elem_len;
693 				continue;
694 			}
695 			memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
696 			ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
697 					buffer);
698 			totlen += thislen;
699 			if (ret || thislen != ECCBUF_SIZE)
700 				goto write_error;
701 			elem_len -= thislen-buflen;
702 			elem_base += thislen-buflen;
703 			to += ECCBUF_SIZE;
704 		}
705 		if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
706 			ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
707 					&thislen, elem_base);
708 			totlen += thislen;
709 			if (ret || thislen != ECCBUF_DIV(elem_len))
710 				goto write_error;
711 			to += thislen;
712 		}
713 		buflen = ECCBUF_MOD(elem_len); /* cut off tail */
714 		if (buflen) {
715 			memset(buffer, 0xff, ECCBUF_SIZE);
716 			memcpy(buffer, elem_base + thislen, buflen);
717 		}
718 	}
719 	if (buflen) { /* flush last page, even if not full */
720 		/* This is sometimes intended behaviour, really */
721 		ret = mtd_write(mtd, to, buflen, &thislen, buffer);
722 		totlen += thislen;
723 		if (ret || thislen != ECCBUF_SIZE)
724 			goto write_error;
725 	}
726 write_error:
727 	if (retlen)
728 		*retlen = totlen;
729 	kfree(buffer);
730 	return ret;
731 }
732 
733 
734 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
735 {
736 	struct cfi_private *cfi = map->fldrv_priv;
737 	map_word status, status_OK;
738 	unsigned long timeo;
739 	int retries = 3;
740 	DECLARE_WAITQUEUE(wait, current);
741 	int ret = 0;
742 
743 	adr += chip->start;
744 
745 	/* Let's determine this according to the interleave only once */
746 	status_OK = CMD(0x80);
747 
748 	timeo = jiffies + HZ;
749 retry:
750 	mutex_lock(&chip->mutex);
751 
752 	/* Check that the chip's ready to talk to us. */
753 	switch (chip->state) {
754 	case FL_CFI_QUERY:
755 	case FL_JEDEC_QUERY:
756 	case FL_READY:
757 		map_write(map, CMD(0x70), adr);
758 		chip->state = FL_STATUS;
759 
760 	case FL_STATUS:
761 		status = map_read(map, adr);
762 		if (map_word_andequal(map, status, status_OK, status_OK))
763 			break;
764 
765 		/* Urgh. Chip not yet ready to talk to us. */
766 		if (time_after(jiffies, timeo)) {
767 			mutex_unlock(&chip->mutex);
768 			printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
769 			return -EIO;
770 		}
771 
772 		/* Latency issues. Drop the lock, wait a while and retry */
773 		mutex_unlock(&chip->mutex);
774 		cfi_udelay(1);
775 		goto retry;
776 
777 	default:
778 		/* Stick ourselves on a wait queue to be woken when
779 		   someone changes the status */
780 		set_current_state(TASK_UNINTERRUPTIBLE);
781 		add_wait_queue(&chip->wq, &wait);
782 		mutex_unlock(&chip->mutex);
783 		schedule();
784 		remove_wait_queue(&chip->wq, &wait);
785 		timeo = jiffies + HZ;
786 		goto retry;
787 	}
788 
789 	ENABLE_VPP(map);
790 	/* Clear the status register first */
791 	map_write(map, CMD(0x50), adr);
792 
793 	/* Now erase */
794 	map_write(map, CMD(0x20), adr);
795 	map_write(map, CMD(0xD0), adr);
796 	chip->state = FL_ERASING;
797 
798 	mutex_unlock(&chip->mutex);
799 	msleep(1000);
800 	mutex_lock(&chip->mutex);
801 
802 	/* FIXME. Use a timer to check this, and return immediately. */
803 	/* Once the state machine's known to be working I'll do that */
804 
805 	timeo = jiffies + (HZ*20);
806 	for (;;) {
807 		if (chip->state != FL_ERASING) {
808 			/* Someone's suspended the erase. Sleep */
809 			set_current_state(TASK_UNINTERRUPTIBLE);
810 			add_wait_queue(&chip->wq, &wait);
811 			mutex_unlock(&chip->mutex);
812 			schedule();
813 			remove_wait_queue(&chip->wq, &wait);
814 			timeo = jiffies + (HZ*20); /* FIXME */
815 			mutex_lock(&chip->mutex);
816 			continue;
817 		}
818 
819 		status = map_read(map, adr);
820 		if (map_word_andequal(map, status, status_OK, status_OK))
821 			break;
822 
823 		/* OK Still waiting */
824 		if (time_after(jiffies, timeo)) {
825 			map_write(map, CMD(0x70), adr);
826 			chip->state = FL_STATUS;
827 			printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
828 			DISABLE_VPP(map);
829 			mutex_unlock(&chip->mutex);
830 			return -EIO;
831 		}
832 
833 		/* Latency issues. Drop the lock, wait a while and retry */
834 		mutex_unlock(&chip->mutex);
835 		cfi_udelay(1);
836 		mutex_lock(&chip->mutex);
837 	}
838 
839 	DISABLE_VPP(map);
840 	ret = 0;
841 
842 	/* We've broken this before. It doesn't hurt to be safe */
843 	map_write(map, CMD(0x70), adr);
844 	chip->state = FL_STATUS;
845 	status = map_read(map, adr);
846 
847 	/* check for lock bit */
848 	if (map_word_bitsset(map, status, CMD(0x3a))) {
849 		unsigned char chipstatus = status.x[0];
850 		if (!map_word_equal(map, status, CMD(chipstatus))) {
851 			int i, w;
852 			for (w=0; w<map_words(map); w++) {
853 				for (i = 0; i<cfi_interleave(cfi); i++) {
854 					chipstatus |= status.x[w] >> (cfi->device_type * 8);
855 				}
856 			}
857 			printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
858 			       status.x[0], chipstatus);
859 		}
860 		/* Reset the error bits */
861 		map_write(map, CMD(0x50), adr);
862 		map_write(map, CMD(0x70), adr);
863 
864 		if ((chipstatus & 0x30) == 0x30) {
865 			printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
866 			ret = -EIO;
867 		} else if (chipstatus & 0x02) {
868 			/* Protection bit set */
869 			ret = -EROFS;
870 		} else if (chipstatus & 0x8) {
871 			/* Voltage */
872 			printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
873 			ret = -EIO;
874 		} else if (chipstatus & 0x20) {
875 			if (retries--) {
876 				printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
877 				timeo = jiffies + HZ;
878 				chip->state = FL_STATUS;
879 				mutex_unlock(&chip->mutex);
880 				goto retry;
881 			}
882 			printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
883 			ret = -EIO;
884 		}
885 	}
886 
887 	wake_up(&chip->wq);
888 	mutex_unlock(&chip->mutex);
889 	return ret;
890 }
891 
892 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
893 				  struct erase_info *instr)
894 {	struct map_info *map = mtd->priv;
895 	struct cfi_private *cfi = map->fldrv_priv;
896 	unsigned long adr, len;
897 	int chipnum, ret = 0;
898 	int i, first;
899 	struct mtd_erase_region_info *regions = mtd->eraseregions;
900 
901 	/* Check that both start and end of the requested erase are
902 	 * aligned with the erasesize at the appropriate addresses.
903 	 */
904 
905 	i = 0;
906 
907 	/* Skip all erase regions which are ended before the start of
908 	   the requested erase. Actually, to save on the calculations,
909 	   we skip to the first erase region which starts after the
910 	   start of the requested erase, and then go back one.
911 	*/
912 
913 	while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
914 	       i++;
915 	i--;
916 
917 	/* OK, now i is pointing at the erase region in which this
918 	   erase request starts. Check the start of the requested
919 	   erase range is aligned with the erase size which is in
920 	   effect here.
921 	*/
922 
923 	if (instr->addr & (regions[i].erasesize-1))
924 		return -EINVAL;
925 
926 	/* Remember the erase region we start on */
927 	first = i;
928 
929 	/* Next, check that the end of the requested erase is aligned
930 	 * with the erase region at that address.
931 	 */
932 
933 	while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
934 		i++;
935 
936 	/* As before, drop back one to point at the region in which
937 	   the address actually falls
938 	*/
939 	i--;
940 
941 	if ((instr->addr + instr->len) & (regions[i].erasesize-1))
942 		return -EINVAL;
943 
944 	chipnum = instr->addr >> cfi->chipshift;
945 	adr = instr->addr - (chipnum << cfi->chipshift);
946 	len = instr->len;
947 
948 	i=first;
949 
950 	while(len) {
951 		ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
952 
953 		if (ret)
954 			return ret;
955 
956 		adr += regions[i].erasesize;
957 		len -= regions[i].erasesize;
958 
959 		if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
960 			i++;
961 
962 		if (adr >> cfi->chipshift) {
963 			adr = 0;
964 			chipnum++;
965 
966 			if (chipnum >= cfi->numchips)
967 			break;
968 		}
969 	}
970 
971 	instr->state = MTD_ERASE_DONE;
972 	mtd_erase_callback(instr);
973 
974 	return 0;
975 }
976 
977 static void cfi_staa_sync (struct mtd_info *mtd)
978 {
979 	struct map_info *map = mtd->priv;
980 	struct cfi_private *cfi = map->fldrv_priv;
981 	int i;
982 	struct flchip *chip;
983 	int ret = 0;
984 	DECLARE_WAITQUEUE(wait, current);
985 
986 	for (i=0; !ret && i<cfi->numchips; i++) {
987 		chip = &cfi->chips[i];
988 
989 	retry:
990 		mutex_lock(&chip->mutex);
991 
992 		switch(chip->state) {
993 		case FL_READY:
994 		case FL_STATUS:
995 		case FL_CFI_QUERY:
996 		case FL_JEDEC_QUERY:
997 			chip->oldstate = chip->state;
998 			chip->state = FL_SYNCING;
999 			/* No need to wake_up() on this state change -
1000 			 * as the whole point is that nobody can do anything
1001 			 * with the chip now anyway.
1002 			 */
1003 		case FL_SYNCING:
1004 			mutex_unlock(&chip->mutex);
1005 			break;
1006 
1007 		default:
1008 			/* Not an idle state */
1009 			set_current_state(TASK_UNINTERRUPTIBLE);
1010 			add_wait_queue(&chip->wq, &wait);
1011 
1012 			mutex_unlock(&chip->mutex);
1013 			schedule();
1014 		        remove_wait_queue(&chip->wq, &wait);
1015 
1016 			goto retry;
1017 		}
1018 	}
1019 
1020 	/* Unlock the chips again */
1021 
1022 	for (i--; i >=0; i--) {
1023 		chip = &cfi->chips[i];
1024 
1025 		mutex_lock(&chip->mutex);
1026 
1027 		if (chip->state == FL_SYNCING) {
1028 			chip->state = chip->oldstate;
1029 			wake_up(&chip->wq);
1030 		}
1031 		mutex_unlock(&chip->mutex);
1032 	}
1033 }
1034 
1035 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1036 {
1037 	struct cfi_private *cfi = map->fldrv_priv;
1038 	map_word status, status_OK;
1039 	unsigned long timeo = jiffies + HZ;
1040 	DECLARE_WAITQUEUE(wait, current);
1041 
1042 	adr += chip->start;
1043 
1044 	/* Let's determine this according to the interleave only once */
1045 	status_OK = CMD(0x80);
1046 
1047 	timeo = jiffies + HZ;
1048 retry:
1049 	mutex_lock(&chip->mutex);
1050 
1051 	/* Check that the chip's ready to talk to us. */
1052 	switch (chip->state) {
1053 	case FL_CFI_QUERY:
1054 	case FL_JEDEC_QUERY:
1055 	case FL_READY:
1056 		map_write(map, CMD(0x70), adr);
1057 		chip->state = FL_STATUS;
1058 
1059 	case FL_STATUS:
1060 		status = map_read(map, adr);
1061 		if (map_word_andequal(map, status, status_OK, status_OK))
1062 			break;
1063 
1064 		/* Urgh. Chip not yet ready to talk to us. */
1065 		if (time_after(jiffies, timeo)) {
1066 			mutex_unlock(&chip->mutex);
1067 			printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1068 			return -EIO;
1069 		}
1070 
1071 		/* Latency issues. Drop the lock, wait a while and retry */
1072 		mutex_unlock(&chip->mutex);
1073 		cfi_udelay(1);
1074 		goto retry;
1075 
1076 	default:
1077 		/* Stick ourselves on a wait queue to be woken when
1078 		   someone changes the status */
1079 		set_current_state(TASK_UNINTERRUPTIBLE);
1080 		add_wait_queue(&chip->wq, &wait);
1081 		mutex_unlock(&chip->mutex);
1082 		schedule();
1083 		remove_wait_queue(&chip->wq, &wait);
1084 		timeo = jiffies + HZ;
1085 		goto retry;
1086 	}
1087 
1088 	ENABLE_VPP(map);
1089 	map_write(map, CMD(0x60), adr);
1090 	map_write(map, CMD(0x01), adr);
1091 	chip->state = FL_LOCKING;
1092 
1093 	mutex_unlock(&chip->mutex);
1094 	msleep(1000);
1095 	mutex_lock(&chip->mutex);
1096 
1097 	/* FIXME. Use a timer to check this, and return immediately. */
1098 	/* Once the state machine's known to be working I'll do that */
1099 
1100 	timeo = jiffies + (HZ*2);
1101 	for (;;) {
1102 
1103 		status = map_read(map, adr);
1104 		if (map_word_andequal(map, status, status_OK, status_OK))
1105 			break;
1106 
1107 		/* OK Still waiting */
1108 		if (time_after(jiffies, timeo)) {
1109 			map_write(map, CMD(0x70), adr);
1110 			chip->state = FL_STATUS;
1111 			printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1112 			DISABLE_VPP(map);
1113 			mutex_unlock(&chip->mutex);
1114 			return -EIO;
1115 		}
1116 
1117 		/* Latency issues. Drop the lock, wait a while and retry */
1118 		mutex_unlock(&chip->mutex);
1119 		cfi_udelay(1);
1120 		mutex_lock(&chip->mutex);
1121 	}
1122 
1123 	/* Done and happy. */
1124 	chip->state = FL_STATUS;
1125 	DISABLE_VPP(map);
1126 	wake_up(&chip->wq);
1127 	mutex_unlock(&chip->mutex);
1128 	return 0;
1129 }
1130 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1131 {
1132 	struct map_info *map = mtd->priv;
1133 	struct cfi_private *cfi = map->fldrv_priv;
1134 	unsigned long adr;
1135 	int chipnum, ret = 0;
1136 #ifdef DEBUG_LOCK_BITS
1137 	int ofs_factor = cfi->interleave * cfi->device_type;
1138 #endif
1139 
1140 	if (ofs & (mtd->erasesize - 1))
1141 		return -EINVAL;
1142 
1143 	if (len & (mtd->erasesize -1))
1144 		return -EINVAL;
1145 
1146 	chipnum = ofs >> cfi->chipshift;
1147 	adr = ofs - (chipnum << cfi->chipshift);
1148 
1149 	while(len) {
1150 
1151 #ifdef DEBUG_LOCK_BITS
1152 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1153 		printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1154 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1155 #endif
1156 
1157 		ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1158 
1159 #ifdef DEBUG_LOCK_BITS
1160 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1161 		printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1162 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1163 #endif
1164 
1165 		if (ret)
1166 			return ret;
1167 
1168 		adr += mtd->erasesize;
1169 		len -= mtd->erasesize;
1170 
1171 		if (adr >> cfi->chipshift) {
1172 			adr = 0;
1173 			chipnum++;
1174 
1175 			if (chipnum >= cfi->numchips)
1176 			break;
1177 		}
1178 	}
1179 	return 0;
1180 }
1181 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1182 {
1183 	struct cfi_private *cfi = map->fldrv_priv;
1184 	map_word status, status_OK;
1185 	unsigned long timeo = jiffies + HZ;
1186 	DECLARE_WAITQUEUE(wait, current);
1187 
1188 	adr += chip->start;
1189 
1190 	/* Let's determine this according to the interleave only once */
1191 	status_OK = CMD(0x80);
1192 
1193 	timeo = jiffies + HZ;
1194 retry:
1195 	mutex_lock(&chip->mutex);
1196 
1197 	/* Check that the chip's ready to talk to us. */
1198 	switch (chip->state) {
1199 	case FL_CFI_QUERY:
1200 	case FL_JEDEC_QUERY:
1201 	case FL_READY:
1202 		map_write(map, CMD(0x70), adr);
1203 		chip->state = FL_STATUS;
1204 
1205 	case FL_STATUS:
1206 		status = map_read(map, adr);
1207 		if (map_word_andequal(map, status, status_OK, status_OK))
1208 			break;
1209 
1210 		/* Urgh. Chip not yet ready to talk to us. */
1211 		if (time_after(jiffies, timeo)) {
1212 			mutex_unlock(&chip->mutex);
1213 			printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1214 			return -EIO;
1215 		}
1216 
1217 		/* Latency issues. Drop the lock, wait a while and retry */
1218 		mutex_unlock(&chip->mutex);
1219 		cfi_udelay(1);
1220 		goto retry;
1221 
1222 	default:
1223 		/* Stick ourselves on a wait queue to be woken when
1224 		   someone changes the status */
1225 		set_current_state(TASK_UNINTERRUPTIBLE);
1226 		add_wait_queue(&chip->wq, &wait);
1227 		mutex_unlock(&chip->mutex);
1228 		schedule();
1229 		remove_wait_queue(&chip->wq, &wait);
1230 		timeo = jiffies + HZ;
1231 		goto retry;
1232 	}
1233 
1234 	ENABLE_VPP(map);
1235 	map_write(map, CMD(0x60), adr);
1236 	map_write(map, CMD(0xD0), adr);
1237 	chip->state = FL_UNLOCKING;
1238 
1239 	mutex_unlock(&chip->mutex);
1240 	msleep(1000);
1241 	mutex_lock(&chip->mutex);
1242 
1243 	/* FIXME. Use a timer to check this, and return immediately. */
1244 	/* Once the state machine's known to be working I'll do that */
1245 
1246 	timeo = jiffies + (HZ*2);
1247 	for (;;) {
1248 
1249 		status = map_read(map, adr);
1250 		if (map_word_andequal(map, status, status_OK, status_OK))
1251 			break;
1252 
1253 		/* OK Still waiting */
1254 		if (time_after(jiffies, timeo)) {
1255 			map_write(map, CMD(0x70), adr);
1256 			chip->state = FL_STATUS;
1257 			printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1258 			DISABLE_VPP(map);
1259 			mutex_unlock(&chip->mutex);
1260 			return -EIO;
1261 		}
1262 
1263 		/* Latency issues. Drop the unlock, wait a while and retry */
1264 		mutex_unlock(&chip->mutex);
1265 		cfi_udelay(1);
1266 		mutex_lock(&chip->mutex);
1267 	}
1268 
1269 	/* Done and happy. */
1270 	chip->state = FL_STATUS;
1271 	DISABLE_VPP(map);
1272 	wake_up(&chip->wq);
1273 	mutex_unlock(&chip->mutex);
1274 	return 0;
1275 }
1276 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1277 {
1278 	struct map_info *map = mtd->priv;
1279 	struct cfi_private *cfi = map->fldrv_priv;
1280 	unsigned long adr;
1281 	int chipnum, ret = 0;
1282 #ifdef DEBUG_LOCK_BITS
1283 	int ofs_factor = cfi->interleave * cfi->device_type;
1284 #endif
1285 
1286 	chipnum = ofs >> cfi->chipshift;
1287 	adr = ofs - (chipnum << cfi->chipshift);
1288 
1289 #ifdef DEBUG_LOCK_BITS
1290 	{
1291 		unsigned long temp_adr = adr;
1292 		unsigned long temp_len = len;
1293 
1294 		cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1295                 while (temp_len) {
1296 			printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1297 			temp_adr += mtd->erasesize;
1298 			temp_len -= mtd->erasesize;
1299 		}
1300 		cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1301 	}
1302 #endif
1303 
1304 	ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1305 
1306 #ifdef DEBUG_LOCK_BITS
1307 	cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1308 	printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1309 	cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1310 #endif
1311 
1312 	return ret;
1313 }
1314 
1315 static int cfi_staa_suspend(struct mtd_info *mtd)
1316 {
1317 	struct map_info *map = mtd->priv;
1318 	struct cfi_private *cfi = map->fldrv_priv;
1319 	int i;
1320 	struct flchip *chip;
1321 	int ret = 0;
1322 
1323 	for (i=0; !ret && i<cfi->numchips; i++) {
1324 		chip = &cfi->chips[i];
1325 
1326 		mutex_lock(&chip->mutex);
1327 
1328 		switch(chip->state) {
1329 		case FL_READY:
1330 		case FL_STATUS:
1331 		case FL_CFI_QUERY:
1332 		case FL_JEDEC_QUERY:
1333 			chip->oldstate = chip->state;
1334 			chip->state = FL_PM_SUSPENDED;
1335 			/* No need to wake_up() on this state change -
1336 			 * as the whole point is that nobody can do anything
1337 			 * with the chip now anyway.
1338 			 */
1339 		case FL_PM_SUSPENDED:
1340 			break;
1341 
1342 		default:
1343 			ret = -EAGAIN;
1344 			break;
1345 		}
1346 		mutex_unlock(&chip->mutex);
1347 	}
1348 
1349 	/* Unlock the chips again */
1350 
1351 	if (ret) {
1352 		for (i--; i >=0; i--) {
1353 			chip = &cfi->chips[i];
1354 
1355 			mutex_lock(&chip->mutex);
1356 
1357 			if (chip->state == FL_PM_SUSPENDED) {
1358 				/* No need to force it into a known state here,
1359 				   because we're returning failure, and it didn't
1360 				   get power cycled */
1361 				chip->state = chip->oldstate;
1362 				wake_up(&chip->wq);
1363 			}
1364 			mutex_unlock(&chip->mutex);
1365 		}
1366 	}
1367 
1368 	return ret;
1369 }
1370 
1371 static void cfi_staa_resume(struct mtd_info *mtd)
1372 {
1373 	struct map_info *map = mtd->priv;
1374 	struct cfi_private *cfi = map->fldrv_priv;
1375 	int i;
1376 	struct flchip *chip;
1377 
1378 	for (i=0; i<cfi->numchips; i++) {
1379 
1380 		chip = &cfi->chips[i];
1381 
1382 		mutex_lock(&chip->mutex);
1383 
1384 		/* Go to known state. Chip may have been power cycled */
1385 		if (chip->state == FL_PM_SUSPENDED) {
1386 			map_write(map, CMD(0xFF), 0);
1387 			chip->state = FL_READY;
1388 			wake_up(&chip->wq);
1389 		}
1390 
1391 		mutex_unlock(&chip->mutex);
1392 	}
1393 }
1394 
1395 static void cfi_staa_destroy(struct mtd_info *mtd)
1396 {
1397 	struct map_info *map = mtd->priv;
1398 	struct cfi_private *cfi = map->fldrv_priv;
1399 	kfree(cfi->cmdset_priv);
1400 	kfree(cfi);
1401 }
1402 
1403 MODULE_LICENSE("GPL");
1404