xref: /openbmc/linux/drivers/mtd/sm_ftl.c (revision e2d413f9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright © 2009 - Maxim Levitsky
4  * SmartMedia/xD translation layer
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/random.h>
10 #include <linux/hdreg.h>
11 #include <linux/kthread.h>
12 #include <linux/freezer.h>
13 #include <linux/sysfs.h>
14 #include <linux/bitops.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/nand_ecc.h>
17 #include "nand/raw/sm_common.h"
18 #include "sm_ftl.h"
19 
20 
21 
22 static struct workqueue_struct *cache_flush_workqueue;
23 
24 static int cache_timeout = 1000;
25 module_param(cache_timeout, int, S_IRUGO);
26 MODULE_PARM_DESC(cache_timeout,
27 	"Timeout (in ms) for cache flush (1000 ms default");
28 
29 static int debug;
30 module_param(debug, int, S_IRUGO | S_IWUSR);
31 MODULE_PARM_DESC(debug, "Debug level (0-2)");
32 
33 
34 /* ------------------- sysfs attributes ---------------------------------- */
35 struct sm_sysfs_attribute {
36 	struct device_attribute dev_attr;
37 	char *data;
38 	int len;
39 };
40 
41 static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
42 		     char *buf)
43 {
44 	struct sm_sysfs_attribute *sm_attr =
45 		container_of(attr, struct sm_sysfs_attribute, dev_attr);
46 
47 	strncpy(buf, sm_attr->data, sm_attr->len);
48 	return sm_attr->len;
49 }
50 
51 
52 #define NUM_ATTRIBUTES 1
53 #define SM_CIS_VENDOR_OFFSET 0x59
54 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
55 {
56 	struct attribute_group *attr_group;
57 	struct attribute **attributes;
58 	struct sm_sysfs_attribute *vendor_attribute;
59 	char *vendor;
60 
61 	vendor = kstrndup(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
62 			  SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET, GFP_KERNEL);
63 	if (!vendor)
64 		goto error1;
65 
66 	/* Initialize sysfs attributes */
67 	vendor_attribute =
68 		kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
69 	if (!vendor_attribute)
70 		goto error2;
71 
72 	sysfs_attr_init(&vendor_attribute->dev_attr.attr);
73 
74 	vendor_attribute->data = vendor;
75 	vendor_attribute->len = strlen(vendor);
76 	vendor_attribute->dev_attr.attr.name = "vendor";
77 	vendor_attribute->dev_attr.attr.mode = S_IRUGO;
78 	vendor_attribute->dev_attr.show = sm_attr_show;
79 
80 
81 	/* Create array of pointers to the attributes */
82 	attributes = kcalloc(NUM_ATTRIBUTES + 1, sizeof(struct attribute *),
83 								GFP_KERNEL);
84 	if (!attributes)
85 		goto error3;
86 	attributes[0] = &vendor_attribute->dev_attr.attr;
87 
88 	/* Finally create the attribute group */
89 	attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
90 	if (!attr_group)
91 		goto error4;
92 	attr_group->attrs = attributes;
93 	return attr_group;
94 error4:
95 	kfree(attributes);
96 error3:
97 	kfree(vendor_attribute);
98 error2:
99 	kfree(vendor);
100 error1:
101 	return NULL;
102 }
103 
104 static void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
105 {
106 	struct attribute **attributes = ftl->disk_attributes->attrs;
107 	int i;
108 
109 	for (i = 0; attributes[i] ; i++) {
110 
111 		struct device_attribute *dev_attr = container_of(attributes[i],
112 			struct device_attribute, attr);
113 
114 		struct sm_sysfs_attribute *sm_attr =
115 			container_of(dev_attr,
116 				struct sm_sysfs_attribute, dev_attr);
117 
118 		kfree(sm_attr->data);
119 		kfree(sm_attr);
120 	}
121 
122 	kfree(ftl->disk_attributes->attrs);
123 	kfree(ftl->disk_attributes);
124 }
125 
126 
127 /* ----------------------- oob helpers -------------------------------------- */
128 
129 static int sm_get_lba(uint8_t *lba)
130 {
131 	/* check fixed bits */
132 	if ((lba[0] & 0xF8) != 0x10)
133 		return -2;
134 
135 	/* check parity - endianness doesn't matter */
136 	if (hweight16(*(uint16_t *)lba) & 1)
137 		return -2;
138 
139 	return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
140 }
141 
142 
143 /*
144  * Read LBA associated with block
145  * returns -1, if block is erased
146  * returns -2 if error happens
147  */
148 static int sm_read_lba(struct sm_oob *oob)
149 {
150 	static const uint32_t erased_pattern[4] = {
151 		0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
152 
153 	uint16_t lba_test;
154 	int lba;
155 
156 	/* First test for erased block */
157 	if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
158 		return -1;
159 
160 	/* Now check is both copies of the LBA differ too much */
161 	lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
162 	if (lba_test && !is_power_of_2(lba_test))
163 		return -2;
164 
165 	/* And read it */
166 	lba = sm_get_lba(oob->lba_copy1);
167 
168 	if (lba == -2)
169 		lba = sm_get_lba(oob->lba_copy2);
170 
171 	return lba;
172 }
173 
174 static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
175 {
176 	uint8_t tmp[2];
177 
178 	WARN_ON(lba >= 1000);
179 
180 	tmp[0] = 0x10 | ((lba >> 7) & 0x07);
181 	tmp[1] = (lba << 1) & 0xFF;
182 
183 	if (hweight16(*(uint16_t *)tmp) & 0x01)
184 		tmp[1] |= 1;
185 
186 	oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
187 	oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
188 }
189 
190 
191 /* Make offset from parts */
192 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
193 {
194 	WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
195 	WARN_ON(zone < 0 || zone >= ftl->zone_count);
196 	WARN_ON(block >= ftl->zone_size);
197 	WARN_ON(boffset >= ftl->block_size);
198 
199 	if (block == -1)
200 		return -1;
201 
202 	return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
203 }
204 
205 /* Breaks offset into parts */
206 static void sm_break_offset(struct sm_ftl *ftl, loff_t loffset,
207 			    int *zone, int *block, int *boffset)
208 {
209 	u64 offset = loffset;
210 	*boffset = do_div(offset, ftl->block_size);
211 	*block = do_div(offset, ftl->max_lba);
212 	*zone = offset >= ftl->zone_count ? -1 : offset;
213 }
214 
215 /* ---------------------- low level IO ------------------------------------- */
216 
217 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
218 {
219 	uint8_t ecc[3];
220 
221 	__nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
222 			     IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
223 	if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE,
224 				IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
225 		return -EIO;
226 
227 	buffer += SM_SMALL_PAGE;
228 
229 	__nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc,
230 			     IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
231 	if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE,
232 				IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) < 0)
233 		return -EIO;
234 	return 0;
235 }
236 
237 /* Reads a sector + oob*/
238 static int sm_read_sector(struct sm_ftl *ftl,
239 			  int zone, int block, int boffset,
240 			  uint8_t *buffer, struct sm_oob *oob)
241 {
242 	struct mtd_info *mtd = ftl->trans->mtd;
243 	struct mtd_oob_ops ops;
244 	struct sm_oob tmp_oob;
245 	int ret = -EIO;
246 	int try = 0;
247 
248 	/* FTL can contain -1 entries that are by default filled with bits */
249 	if (block == -1) {
250 		if (buffer)
251 			memset(buffer, 0xFF, SM_SECTOR_SIZE);
252 		return 0;
253 	}
254 
255 	/* User might not need the oob, but we do for data verification */
256 	if (!oob)
257 		oob = &tmp_oob;
258 
259 	ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
260 	ops.ooboffs = 0;
261 	ops.ooblen = SM_OOB_SIZE;
262 	ops.oobbuf = (void *)oob;
263 	ops.len = SM_SECTOR_SIZE;
264 	ops.datbuf = buffer;
265 
266 again:
267 	if (try++) {
268 		/* Avoid infinite recursion on CIS reads, sm_recheck_media
269 			won't help anyway */
270 		if (zone == 0 && block == ftl->cis_block && boffset ==
271 			ftl->cis_boffset)
272 			return ret;
273 
274 		/* Test if media is stable */
275 		if (try == 3 || sm_recheck_media(ftl))
276 			return ret;
277 	}
278 
279 	/* Unfortunately, oob read will _always_ succeed,
280 		despite card removal..... */
281 	ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
282 
283 	/* Test for unknown errors */
284 	if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
285 		dbg("read of block %d at zone %d, failed due to error (%d)",
286 			block, zone, ret);
287 		goto again;
288 	}
289 
290 	/* Do a basic test on the oob, to guard against returned garbage */
291 	if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
292 		goto again;
293 
294 	/* This should never happen, unless there is a bug in the mtd driver */
295 	WARN_ON(ops.oobretlen != SM_OOB_SIZE);
296 	WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
297 
298 	if (!buffer)
299 		return 0;
300 
301 	/* Test if sector marked as bad */
302 	if (!sm_sector_valid(oob)) {
303 		dbg("read of block %d at zone %d, failed because it is marked"
304 			" as bad" , block, zone);
305 		goto again;
306 	}
307 
308 	/* Test ECC*/
309 	if (mtd_is_eccerr(ret) ||
310 		(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
311 
312 		dbg("read of block %d at zone %d, failed due to ECC error",
313 			block, zone);
314 		goto again;
315 	}
316 
317 	return 0;
318 }
319 
320 /* Writes a sector to media */
321 static int sm_write_sector(struct sm_ftl *ftl,
322 			   int zone, int block, int boffset,
323 			   uint8_t *buffer, struct sm_oob *oob)
324 {
325 	struct mtd_oob_ops ops;
326 	struct mtd_info *mtd = ftl->trans->mtd;
327 	int ret;
328 
329 	BUG_ON(ftl->readonly);
330 
331 	if (zone == 0 && (block == ftl->cis_block || block == 0)) {
332 		dbg("attempted to write the CIS!");
333 		return -EIO;
334 	}
335 
336 	if (ftl->unstable)
337 		return -EIO;
338 
339 	ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
340 	ops.len = SM_SECTOR_SIZE;
341 	ops.datbuf = buffer;
342 	ops.ooboffs = 0;
343 	ops.ooblen = SM_OOB_SIZE;
344 	ops.oobbuf = (void *)oob;
345 
346 	ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
347 
348 	/* Now we assume that hardware will catch write bitflip errors */
349 
350 	if (ret) {
351 		dbg("write to block %d at zone %d, failed with error %d",
352 			block, zone, ret);
353 
354 		sm_recheck_media(ftl);
355 		return ret;
356 	}
357 
358 	/* This should never happen, unless there is a bug in the driver */
359 	WARN_ON(ops.oobretlen != SM_OOB_SIZE);
360 	WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
361 
362 	return 0;
363 }
364 
365 /* ------------------------ block IO ------------------------------------- */
366 
367 /* Write a block using data and lba, and invalid sector bitmap */
368 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
369 			  int zone, int block, int lba,
370 			  unsigned long invalid_bitmap)
371 {
372 	struct sm_oob oob;
373 	int boffset;
374 	int retry = 0;
375 
376 	/* Initialize the oob with requested values */
377 	memset(&oob, 0xFF, SM_OOB_SIZE);
378 	sm_write_lba(&oob, lba);
379 restart:
380 	if (ftl->unstable)
381 		return -EIO;
382 
383 	for (boffset = 0; boffset < ftl->block_size;
384 				boffset += SM_SECTOR_SIZE) {
385 
386 		oob.data_status = 0xFF;
387 
388 		if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
389 
390 			sm_printk("sector %d of block at LBA %d of zone %d"
391 				" couldn't be read, marking it as invalid",
392 				boffset / SM_SECTOR_SIZE, lba, zone);
393 
394 			oob.data_status = 0;
395 		}
396 
397 		if (ftl->smallpagenand) {
398 			__nand_calculate_ecc(buf + boffset, SM_SMALL_PAGE,
399 					oob.ecc1,
400 					IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
401 
402 			__nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
403 					SM_SMALL_PAGE, oob.ecc2,
404 					IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC));
405 		}
406 		if (!sm_write_sector(ftl, zone, block, boffset,
407 							buf + boffset, &oob))
408 			continue;
409 
410 		if (!retry) {
411 
412 			/* If write fails. try to erase the block */
413 			/* This is safe, because we never write in blocks
414 				that contain valuable data.
415 			This is intended to repair block that are marked
416 			as erased, but that isn't fully erased*/
417 
418 			if (sm_erase_block(ftl, zone, block, 0))
419 				return -EIO;
420 
421 			retry = 1;
422 			goto restart;
423 		} else {
424 			sm_mark_block_bad(ftl, zone, block);
425 			return -EIO;
426 		}
427 	}
428 	return 0;
429 }
430 
431 
432 /* Mark whole block at offset 'offs' as bad. */
433 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
434 {
435 	struct sm_oob oob;
436 	int boffset;
437 
438 	memset(&oob, 0xFF, SM_OOB_SIZE);
439 	oob.block_status = 0xF0;
440 
441 	if (ftl->unstable)
442 		return;
443 
444 	if (sm_recheck_media(ftl))
445 		return;
446 
447 	sm_printk("marking block %d of zone %d as bad", block, zone);
448 
449 	/* We aren't checking the return value, because we don't care */
450 	/* This also fails on fake xD cards, but I guess these won't expose
451 		any bad blocks till fail completely */
452 	for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
453 		sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
454 }
455 
456 /*
457  * Erase a block within a zone
458  * If erase succeeds, it updates free block fifo, otherwise marks block as bad
459  */
460 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
461 			  int put_free)
462 {
463 	struct ftl_zone *zone = &ftl->zones[zone_num];
464 	struct mtd_info *mtd = ftl->trans->mtd;
465 	struct erase_info erase;
466 
467 	erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
468 	erase.len = ftl->block_size;
469 
470 	if (ftl->unstable)
471 		return -EIO;
472 
473 	BUG_ON(ftl->readonly);
474 
475 	if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
476 		sm_printk("attempted to erase the CIS!");
477 		return -EIO;
478 	}
479 
480 	if (mtd_erase(mtd, &erase)) {
481 		sm_printk("erase of block %d in zone %d failed",
482 							block, zone_num);
483 		goto error;
484 	}
485 
486 	if (put_free)
487 		kfifo_in(&zone->free_sectors,
488 			(const unsigned char *)&block, sizeof(block));
489 
490 	return 0;
491 error:
492 	sm_mark_block_bad(ftl, zone_num, block);
493 	return -EIO;
494 }
495 
496 /* Thoroughly test that block is valid. */
497 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
498 {
499 	int boffset;
500 	struct sm_oob oob;
501 	int lbas[] = { -3, 0, 0, 0 };
502 	int i = 0;
503 	int test_lba;
504 
505 
506 	/* First just check that block doesn't look fishy */
507 	/* Only blocks that are valid or are sliced in two parts, are
508 		accepted */
509 	for (boffset = 0; boffset < ftl->block_size;
510 					boffset += SM_SECTOR_SIZE) {
511 
512 		/* This shouldn't happen anyway */
513 		if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
514 			return -2;
515 
516 		test_lba = sm_read_lba(&oob);
517 
518 		if (lbas[i] != test_lba)
519 			lbas[++i] = test_lba;
520 
521 		/* If we found three different LBAs, something is fishy */
522 		if (i == 3)
523 			return -EIO;
524 	}
525 
526 	/* If the block is sliced (partially erased usually) erase it */
527 	if (i == 2) {
528 		sm_erase_block(ftl, zone, block, 1);
529 		return 1;
530 	}
531 
532 	return 0;
533 }
534 
535 /* ----------------- media scanning --------------------------------- */
536 static const struct chs_entry chs_table[] = {
537 	{ 1,    125,  4,  4  },
538 	{ 2,    125,  4,  8  },
539 	{ 4,    250,  4,  8  },
540 	{ 8,    250,  4,  16 },
541 	{ 16,   500,  4,  16 },
542 	{ 32,   500,  8,  16 },
543 	{ 64,   500,  8,  32 },
544 	{ 128,  500,  16, 32 },
545 	{ 256,  1000, 16, 32 },
546 	{ 512,  1015, 32, 63 },
547 	{ 1024, 985,  33, 63 },
548 	{ 2048, 985,  33, 63 },
549 	{ 0 },
550 };
551 
552 
553 static const uint8_t cis_signature[] = {
554 	0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
555 };
556 /* Find out media parameters.
557  * This ideally has to be based on nand id, but for now device size is enough */
558 static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
559 {
560 	int i;
561 	int size_in_megs = mtd->size / (1024 * 1024);
562 
563 	ftl->readonly = mtd->type == MTD_ROM;
564 
565 	/* Manual settings for very old devices */
566 	ftl->zone_count = 1;
567 	ftl->smallpagenand = 0;
568 
569 	switch (size_in_megs) {
570 	case 1:
571 		/* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
572 		ftl->zone_size = 256;
573 		ftl->max_lba = 250;
574 		ftl->block_size = 8 * SM_SECTOR_SIZE;
575 		ftl->smallpagenand = 1;
576 
577 		break;
578 	case 2:
579 		/* 2 MiB flash SmartMedia (256 byte pages)*/
580 		if (mtd->writesize == SM_SMALL_PAGE) {
581 			ftl->zone_size = 512;
582 			ftl->max_lba = 500;
583 			ftl->block_size = 8 * SM_SECTOR_SIZE;
584 			ftl->smallpagenand = 1;
585 		/* 2 MiB rom SmartMedia */
586 		} else {
587 
588 			if (!ftl->readonly)
589 				return -ENODEV;
590 
591 			ftl->zone_size = 256;
592 			ftl->max_lba = 250;
593 			ftl->block_size = 16 * SM_SECTOR_SIZE;
594 		}
595 		break;
596 	case 4:
597 		/* 4 MiB flash/rom SmartMedia device */
598 		ftl->zone_size = 512;
599 		ftl->max_lba = 500;
600 		ftl->block_size = 16 * SM_SECTOR_SIZE;
601 		break;
602 	case 8:
603 		/* 8 MiB flash/rom SmartMedia device */
604 		ftl->zone_size = 1024;
605 		ftl->max_lba = 1000;
606 		ftl->block_size = 16 * SM_SECTOR_SIZE;
607 	}
608 
609 	/* Minimum xD size is 16MiB. Also, all xD cards have standard zone
610 	   sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
611 	if (size_in_megs >= 16) {
612 		ftl->zone_count = size_in_megs / 16;
613 		ftl->zone_size = 1024;
614 		ftl->max_lba = 1000;
615 		ftl->block_size = 32 * SM_SECTOR_SIZE;
616 	}
617 
618 	/* Test for proper write,erase and oob sizes */
619 	if (mtd->erasesize > ftl->block_size)
620 		return -ENODEV;
621 
622 	if (mtd->writesize > SM_SECTOR_SIZE)
623 		return -ENODEV;
624 
625 	if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
626 		return -ENODEV;
627 
628 	if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
629 		return -ENODEV;
630 
631 	/* We use OOB */
632 	if (!mtd_has_oob(mtd))
633 		return -ENODEV;
634 
635 	/* Find geometry information */
636 	for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
637 		if (chs_table[i].size == size_in_megs) {
638 			ftl->cylinders = chs_table[i].cyl;
639 			ftl->heads = chs_table[i].head;
640 			ftl->sectors = chs_table[i].sec;
641 			return 0;
642 		}
643 	}
644 
645 	sm_printk("media has unknown size : %dMiB", size_in_megs);
646 	ftl->cylinders = 985;
647 	ftl->heads =  33;
648 	ftl->sectors = 63;
649 	return 0;
650 }
651 
652 /* Validate the CIS */
653 static int sm_read_cis(struct sm_ftl *ftl)
654 {
655 	struct sm_oob oob;
656 
657 	if (sm_read_sector(ftl,
658 		0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
659 			return -EIO;
660 
661 	if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
662 		return -EIO;
663 
664 	if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
665 			cis_signature, sizeof(cis_signature))) {
666 		return 0;
667 	}
668 
669 	return -EIO;
670 }
671 
672 /* Scan the media for the CIS */
673 static int sm_find_cis(struct sm_ftl *ftl)
674 {
675 	struct sm_oob oob;
676 	int block, boffset;
677 	int block_found = 0;
678 	int cis_found = 0;
679 
680 	/* Search for first valid block */
681 	for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
682 
683 		if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
684 			continue;
685 
686 		if (!sm_block_valid(&oob))
687 			continue;
688 		block_found = 1;
689 		break;
690 	}
691 
692 	if (!block_found)
693 		return -EIO;
694 
695 	/* Search for first valid sector in this block */
696 	for (boffset = 0 ; boffset < ftl->block_size;
697 						boffset += SM_SECTOR_SIZE) {
698 
699 		if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
700 			continue;
701 
702 		if (!sm_sector_valid(&oob))
703 			continue;
704 		break;
705 	}
706 
707 	if (boffset == ftl->block_size)
708 		return -EIO;
709 
710 	ftl->cis_block = block;
711 	ftl->cis_boffset = boffset;
712 	ftl->cis_page_offset = 0;
713 
714 	cis_found = !sm_read_cis(ftl);
715 
716 	if (!cis_found) {
717 		ftl->cis_page_offset = SM_SMALL_PAGE;
718 		cis_found = !sm_read_cis(ftl);
719 	}
720 
721 	if (cis_found) {
722 		dbg("CIS block found at offset %x",
723 			block * ftl->block_size +
724 				boffset + ftl->cis_page_offset);
725 		return 0;
726 	}
727 	return -EIO;
728 }
729 
730 /* Basic test to determine if underlying mtd device if functional */
731 static int sm_recheck_media(struct sm_ftl *ftl)
732 {
733 	if (sm_read_cis(ftl)) {
734 
735 		if (!ftl->unstable) {
736 			sm_printk("media unstable, not allowing writes");
737 			ftl->unstable = 1;
738 		}
739 		return -EIO;
740 	}
741 	return 0;
742 }
743 
744 /* Initialize a FTL zone */
745 static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
746 {
747 	struct ftl_zone *zone = &ftl->zones[zone_num];
748 	struct sm_oob oob;
749 	uint16_t block;
750 	int lba;
751 	int i = 0;
752 	int len;
753 
754 	dbg("initializing zone %d", zone_num);
755 
756 	/* Allocate memory for FTL table */
757 	zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
758 
759 	if (!zone->lba_to_phys_table)
760 		return -ENOMEM;
761 	memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
762 
763 
764 	/* Allocate memory for free sectors FIFO */
765 	if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
766 		kfree(zone->lba_to_phys_table);
767 		return -ENOMEM;
768 	}
769 
770 	/* Now scan the zone */
771 	for (block = 0 ; block < ftl->zone_size ; block++) {
772 
773 		/* Skip blocks till the CIS (including) */
774 		if (zone_num == 0 && block <= ftl->cis_block)
775 			continue;
776 
777 		/* Read the oob of first sector */
778 		if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
779 			kfifo_free(&zone->free_sectors);
780 			kfree(zone->lba_to_phys_table);
781 			return -EIO;
782 		}
783 
784 		/* Test to see if block is erased. It is enough to test
785 			first sector, because erase happens in one shot */
786 		if (sm_block_erased(&oob)) {
787 			kfifo_in(&zone->free_sectors,
788 				(unsigned char *)&block, 2);
789 			continue;
790 		}
791 
792 		/* If block is marked as bad, skip it */
793 		/* This assumes we can trust first sector*/
794 		/* However the way the block valid status is defined, ensures
795 			very low probability of failure here */
796 		if (!sm_block_valid(&oob)) {
797 			dbg("PH %04d <-> <marked bad>", block);
798 			continue;
799 		}
800 
801 
802 		lba = sm_read_lba(&oob);
803 
804 		/* Invalid LBA means that block is damaged. */
805 		/* We can try to erase it, or mark it as bad, but
806 			lets leave that to recovery application */
807 		if (lba == -2 || lba >= ftl->max_lba) {
808 			dbg("PH %04d <-> LBA %04d(bad)", block, lba);
809 			continue;
810 		}
811 
812 
813 		/* If there is no collision,
814 			just put the sector in the FTL table */
815 		if (zone->lba_to_phys_table[lba] < 0) {
816 			dbg_verbose("PH %04d <-> LBA %04d", block, lba);
817 			zone->lba_to_phys_table[lba] = block;
818 			continue;
819 		}
820 
821 		sm_printk("collision"
822 			" of LBA %d between blocks %d and %d in zone %d",
823 			lba, zone->lba_to_phys_table[lba], block, zone_num);
824 
825 		/* Test that this block is valid*/
826 		if (sm_check_block(ftl, zone_num, block))
827 			continue;
828 
829 		/* Test now the old block */
830 		if (sm_check_block(ftl, zone_num,
831 					zone->lba_to_phys_table[lba])) {
832 			zone->lba_to_phys_table[lba] = block;
833 			continue;
834 		}
835 
836 		/* If both blocks are valid and share same LBA, it means that
837 			they hold different versions of same data. It not
838 			known which is more recent, thus just erase one of them
839 		*/
840 		sm_printk("both blocks are valid, erasing the later");
841 		sm_erase_block(ftl, zone_num, block, 1);
842 	}
843 
844 	dbg("zone initialized");
845 	zone->initialized = 1;
846 
847 	/* No free sectors, means that the zone is heavily damaged, write won't
848 		work, but it can still can be (partially) read */
849 	if (!kfifo_len(&zone->free_sectors)) {
850 		sm_printk("no free blocks in zone %d", zone_num);
851 		return 0;
852 	}
853 
854 	/* Randomize first block we write to */
855 	get_random_bytes(&i, 2);
856 	i %= (kfifo_len(&zone->free_sectors) / 2);
857 
858 	while (i--) {
859 		len = kfifo_out(&zone->free_sectors,
860 					(unsigned char *)&block, 2);
861 		WARN_ON(len != 2);
862 		kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
863 	}
864 	return 0;
865 }
866 
867 /* Get and automatically initialize an FTL mapping for one zone */
868 static struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
869 {
870 	struct ftl_zone *zone;
871 	int error;
872 
873 	BUG_ON(zone_num >= ftl->zone_count);
874 	zone = &ftl->zones[zone_num];
875 
876 	if (!zone->initialized) {
877 		error = sm_init_zone(ftl, zone_num);
878 
879 		if (error)
880 			return ERR_PTR(error);
881 	}
882 	return zone;
883 }
884 
885 
886 /* ----------------- cache handling ------------------------------------------*/
887 
888 /* Initialize the one block cache */
889 static void sm_cache_init(struct sm_ftl *ftl)
890 {
891 	ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
892 	ftl->cache_clean = 1;
893 	ftl->cache_zone = -1;
894 	ftl->cache_block = -1;
895 	/*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
896 }
897 
898 /* Put sector in one block cache */
899 static void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
900 {
901 	memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
902 	clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
903 	ftl->cache_clean = 0;
904 }
905 
906 /* Read a sector from the cache */
907 static int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
908 {
909 	if (test_bit(boffset / SM_SECTOR_SIZE,
910 		&ftl->cache_data_invalid_bitmap))
911 			return -1;
912 
913 	memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
914 	return 0;
915 }
916 
917 /* Write the cache to hardware */
918 static int sm_cache_flush(struct sm_ftl *ftl)
919 {
920 	struct ftl_zone *zone;
921 
922 	int sector_num;
923 	uint16_t write_sector;
924 	int zone_num = ftl->cache_zone;
925 	int block_num;
926 
927 	if (ftl->cache_clean)
928 		return 0;
929 
930 	if (ftl->unstable)
931 		return -EIO;
932 
933 	BUG_ON(zone_num < 0);
934 	zone = &ftl->zones[zone_num];
935 	block_num = zone->lba_to_phys_table[ftl->cache_block];
936 
937 
938 	/* Try to read all unread areas of the cache block*/
939 	for_each_set_bit(sector_num, &ftl->cache_data_invalid_bitmap,
940 		ftl->block_size / SM_SECTOR_SIZE) {
941 
942 		if (!sm_read_sector(ftl,
943 			zone_num, block_num, sector_num * SM_SECTOR_SIZE,
944 			ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
945 				clear_bit(sector_num,
946 					&ftl->cache_data_invalid_bitmap);
947 	}
948 restart:
949 
950 	if (ftl->unstable)
951 		return -EIO;
952 
953 	/* If there are no spare blocks, */
954 	/* we could still continue by erasing/writing the current block,
955 		but for such worn out media it doesn't worth the trouble,
956 			and the dangers */
957 	if (kfifo_out(&zone->free_sectors,
958 				(unsigned char *)&write_sector, 2) != 2) {
959 		dbg("no free sectors for write!");
960 		return -EIO;
961 	}
962 
963 
964 	if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
965 		ftl->cache_block, ftl->cache_data_invalid_bitmap))
966 			goto restart;
967 
968 	/* Update the FTL table */
969 	zone->lba_to_phys_table[ftl->cache_block] = write_sector;
970 
971 	/* Write succesfull, so erase and free the old block */
972 	if (block_num > 0)
973 		sm_erase_block(ftl, zone_num, block_num, 1);
974 
975 	sm_cache_init(ftl);
976 	return 0;
977 }
978 
979 
980 /* flush timer, runs a second after last write */
981 static void sm_cache_flush_timer(struct timer_list *t)
982 {
983 	struct sm_ftl *ftl = from_timer(ftl, t, timer);
984 	queue_work(cache_flush_workqueue, &ftl->flush_work);
985 }
986 
987 /* cache flush work, kicked by timer */
988 static void sm_cache_flush_work(struct work_struct *work)
989 {
990 	struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
991 	mutex_lock(&ftl->mutex);
992 	sm_cache_flush(ftl);
993 	mutex_unlock(&ftl->mutex);
994 	return;
995 }
996 
997 /* ---------------- outside interface -------------------------------------- */
998 
999 /* outside interface: read a sector */
1000 static int sm_read(struct mtd_blktrans_dev *dev,
1001 		   unsigned long sect_no, char *buf)
1002 {
1003 	struct sm_ftl *ftl = dev->priv;
1004 	struct ftl_zone *zone;
1005 	int error = 0, in_cache = 0;
1006 	int zone_num, block, boffset;
1007 
1008 	sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1009 	mutex_lock(&ftl->mutex);
1010 
1011 
1012 	zone = sm_get_zone(ftl, zone_num);
1013 	if (IS_ERR(zone)) {
1014 		error = PTR_ERR(zone);
1015 		goto unlock;
1016 	}
1017 
1018 	/* Have to look at cache first */
1019 	if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1020 		in_cache = 1;
1021 		if (!sm_cache_get(ftl, buf, boffset))
1022 			goto unlock;
1023 	}
1024 
1025 	/* Translate the block and return if doesn't exist in the table */
1026 	block = zone->lba_to_phys_table[block];
1027 
1028 	if (block == -1) {
1029 		memset(buf, 0xFF, SM_SECTOR_SIZE);
1030 		goto unlock;
1031 	}
1032 
1033 	if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1034 		error = -EIO;
1035 		goto unlock;
1036 	}
1037 
1038 	if (in_cache)
1039 		sm_cache_put(ftl, buf, boffset);
1040 unlock:
1041 	mutex_unlock(&ftl->mutex);
1042 	return error;
1043 }
1044 
1045 /* outside interface: write a sector */
1046 static int sm_write(struct mtd_blktrans_dev *dev,
1047 				unsigned long sec_no, char *buf)
1048 {
1049 	struct sm_ftl *ftl = dev->priv;
1050 	struct ftl_zone *zone;
1051 	int error = 0, zone_num, block, boffset;
1052 
1053 	BUG_ON(ftl->readonly);
1054 	sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1055 
1056 	/* No need in flush thread running now */
1057 	del_timer(&ftl->timer);
1058 	mutex_lock(&ftl->mutex);
1059 
1060 	zone = sm_get_zone(ftl, zone_num);
1061 	if (IS_ERR(zone)) {
1062 		error = PTR_ERR(zone);
1063 		goto unlock;
1064 	}
1065 
1066 	/* If entry is not in cache, flush it */
1067 	if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1068 
1069 		error = sm_cache_flush(ftl);
1070 		if (error)
1071 			goto unlock;
1072 
1073 		ftl->cache_block = block;
1074 		ftl->cache_zone = zone_num;
1075 	}
1076 
1077 	sm_cache_put(ftl, buf, boffset);
1078 unlock:
1079 	mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1080 	mutex_unlock(&ftl->mutex);
1081 	return error;
1082 }
1083 
1084 /* outside interface: flush everything */
1085 static int sm_flush(struct mtd_blktrans_dev *dev)
1086 {
1087 	struct sm_ftl *ftl = dev->priv;
1088 	int retval;
1089 
1090 	mutex_lock(&ftl->mutex);
1091 	retval =  sm_cache_flush(ftl);
1092 	mutex_unlock(&ftl->mutex);
1093 	return retval;
1094 }
1095 
1096 /* outside interface: device is released */
1097 static void sm_release(struct mtd_blktrans_dev *dev)
1098 {
1099 	struct sm_ftl *ftl = dev->priv;
1100 
1101 	mutex_lock(&ftl->mutex);
1102 	del_timer_sync(&ftl->timer);
1103 	cancel_work_sync(&ftl->flush_work);
1104 	sm_cache_flush(ftl);
1105 	mutex_unlock(&ftl->mutex);
1106 }
1107 
1108 /* outside interface: get geometry */
1109 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1110 {
1111 	struct sm_ftl *ftl = dev->priv;
1112 	geo->heads = ftl->heads;
1113 	geo->sectors = ftl->sectors;
1114 	geo->cylinders = ftl->cylinders;
1115 	return 0;
1116 }
1117 
1118 /* external interface: main initialization function */
1119 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1120 {
1121 	struct mtd_blktrans_dev *trans;
1122 	struct sm_ftl *ftl;
1123 
1124 	/* Allocate & initialize our private structure */
1125 	ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1126 	if (!ftl)
1127 		goto error1;
1128 
1129 
1130 	mutex_init(&ftl->mutex);
1131 	timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
1132 	INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1133 
1134 	/* Read media information */
1135 	if (sm_get_media_info(ftl, mtd)) {
1136 		dbg("found unsupported mtd device, aborting");
1137 		goto error2;
1138 	}
1139 
1140 
1141 	/* Allocate temporary CIS buffer for read retry support */
1142 	ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1143 	if (!ftl->cis_buffer)
1144 		goto error2;
1145 
1146 	/* Allocate zone array, it will be initialized on demand */
1147 	ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
1148 								GFP_KERNEL);
1149 	if (!ftl->zones)
1150 		goto error3;
1151 
1152 	/* Allocate the cache*/
1153 	ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1154 
1155 	if (!ftl->cache_data)
1156 		goto error4;
1157 
1158 	sm_cache_init(ftl);
1159 
1160 
1161 	/* Allocate upper layer structure and initialize it */
1162 	trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1163 	if (!trans)
1164 		goto error5;
1165 
1166 	ftl->trans = trans;
1167 	trans->priv = ftl;
1168 
1169 	trans->tr = tr;
1170 	trans->mtd = mtd;
1171 	trans->devnum = -1;
1172 	trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1173 	trans->readonly = ftl->readonly;
1174 
1175 	if (sm_find_cis(ftl)) {
1176 		dbg("CIS not found on mtd device, aborting");
1177 		goto error6;
1178 	}
1179 
1180 	ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1181 	if (!ftl->disk_attributes)
1182 		goto error6;
1183 	trans->disk_attributes = ftl->disk_attributes;
1184 
1185 	sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1186 		(int)(mtd->size / (1024 * 1024)), mtd->index);
1187 
1188 	dbg("FTL layout:");
1189 	dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1190 		ftl->zone_count, ftl->max_lba,
1191 		ftl->zone_size - ftl->max_lba);
1192 	dbg("each block consists of %d bytes",
1193 		ftl->block_size);
1194 
1195 
1196 	/* Register device*/
1197 	if (add_mtd_blktrans_dev(trans)) {
1198 		dbg("error in mtdblktrans layer");
1199 		goto error6;
1200 	}
1201 	return;
1202 error6:
1203 	kfree(trans);
1204 error5:
1205 	kfree(ftl->cache_data);
1206 error4:
1207 	kfree(ftl->zones);
1208 error3:
1209 	kfree(ftl->cis_buffer);
1210 error2:
1211 	kfree(ftl);
1212 error1:
1213 	return;
1214 }
1215 
1216 /* main interface: device {surprise,} removal */
1217 static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1218 {
1219 	struct sm_ftl *ftl = dev->priv;
1220 	int i;
1221 
1222 	del_mtd_blktrans_dev(dev);
1223 	ftl->trans = NULL;
1224 
1225 	for (i = 0 ; i < ftl->zone_count; i++) {
1226 
1227 		if (!ftl->zones[i].initialized)
1228 			continue;
1229 
1230 		kfree(ftl->zones[i].lba_to_phys_table);
1231 		kfifo_free(&ftl->zones[i].free_sectors);
1232 	}
1233 
1234 	sm_delete_sysfs_attributes(ftl);
1235 	kfree(ftl->cis_buffer);
1236 	kfree(ftl->zones);
1237 	kfree(ftl->cache_data);
1238 	kfree(ftl);
1239 }
1240 
1241 static struct mtd_blktrans_ops sm_ftl_ops = {
1242 	.name		= "smblk",
1243 	.major		= 0,
1244 	.part_bits	= SM_FTL_PARTN_BITS,
1245 	.blksize	= SM_SECTOR_SIZE,
1246 	.getgeo		= sm_getgeo,
1247 
1248 	.add_mtd	= sm_add_mtd,
1249 	.remove_dev	= sm_remove_dev,
1250 
1251 	.readsect	= sm_read,
1252 	.writesect	= sm_write,
1253 
1254 	.flush		= sm_flush,
1255 	.release	= sm_release,
1256 
1257 	.owner		= THIS_MODULE,
1258 };
1259 
1260 static __init int sm_module_init(void)
1261 {
1262 	int error = 0;
1263 
1264 	cache_flush_workqueue = create_freezable_workqueue("smflush");
1265 	if (!cache_flush_workqueue)
1266 		return -ENOMEM;
1267 
1268 	error = register_mtd_blktrans(&sm_ftl_ops);
1269 	if (error)
1270 		destroy_workqueue(cache_flush_workqueue);
1271 	return error;
1272 
1273 }
1274 
1275 static void __exit sm_module_exit(void)
1276 {
1277 	destroy_workqueue(cache_flush_workqueue);
1278 	deregister_mtd_blktrans(&sm_ftl_ops);
1279 }
1280 
1281 module_init(sm_module_init);
1282 module_exit(sm_module_exit);
1283 
1284 MODULE_LICENSE("GPL");
1285 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1286 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");
1287