xref: /openbmc/linux/drivers/mtd/devices/block2mtd.c (revision 1da177e4)
1 /*
2  * $Id: block2mtd.c,v 1.23 2005/01/05 17:05:46 dwmw2 Exp $
3  *
4  * block2mtd.c - create an mtd from a block device
5  *
6  * Copyright (C) 2001,2002	Simon Evans <spse@secret.org.uk>
7  * Copyright (C) 2004		Gareth Bult <Gareth@Encryptec.net>
8  * Copyright (C) 2004,2005	J�rn Engel <joern@wh.fh-wedel.de>
9  *
10  * Licence: GPL
11  */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/pagemap.h>
18 #include <linux/list.h>
19 #include <linux/init.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/buffer_head.h>
22 
23 #define VERSION "$Revision: 1.23 $"
24 
25 
26 #define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
27 #define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
28 
29 
30 /* Info for the block device */
31 struct block2mtd_dev {
32 	struct list_head list;
33 	struct block_device *blkdev;
34 	struct mtd_info mtd;
35 	struct semaphore write_mutex;
36 };
37 
38 
39 /* Static info about the MTD, used in cleanup_module */
40 static LIST_HEAD(blkmtd_device_list);
41 
42 
43 #define PAGE_READAHEAD 64
44 void cache_readahead(struct address_space *mapping, int index)
45 {
46 	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
47 	int i, pagei;
48 	unsigned ret = 0;
49 	unsigned long end_index;
50 	struct page *page;
51 	LIST_HEAD(page_pool);
52 	struct inode *inode = mapping->host;
53 	loff_t isize = i_size_read(inode);
54 
55 	if (!isize) {
56 		INFO("iSize=0 in cache_readahead\n");
57 		return;
58 	}
59 
60 	end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
61 
62 	read_lock_irq(&mapping->tree_lock);
63 	for (i = 0; i < PAGE_READAHEAD; i++) {
64 		pagei = index + i;
65 		if (pagei > end_index) {
66 			INFO("Overrun end of disk in cache readahead\n");
67 			break;
68 		}
69 		page = radix_tree_lookup(&mapping->page_tree, pagei);
70 		if (page && (!i))
71 			break;
72 		if (page)
73 			continue;
74 		read_unlock_irq(&mapping->tree_lock);
75 		page = page_cache_alloc_cold(mapping);
76 		read_lock_irq(&mapping->tree_lock);
77 		if (!page)
78 			break;
79 		page->index = pagei;
80 		list_add(&page->lru, &page_pool);
81 		ret++;
82 	}
83 	read_unlock_irq(&mapping->tree_lock);
84 	if (ret)
85 		read_cache_pages(mapping, &page_pool, filler, NULL);
86 }
87 
88 
89 static struct page* page_readahead(struct address_space *mapping, int index)
90 {
91 	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
92 	//do_page_cache_readahead(mapping, index, XXX, 64);
93 	cache_readahead(mapping, index);
94 	return read_cache_page(mapping, index, filler, NULL);
95 }
96 
97 
98 /* erase a specified part of the device */
99 static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
100 {
101 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
102 	struct page *page;
103 	int index = to >> PAGE_SHIFT;	// page index
104 	int pages = len >> PAGE_SHIFT;
105 	u_long *p;
106 	u_long *max;
107 
108 	while (pages) {
109 		page = page_readahead(mapping, index);
110 		if (!page)
111 			return -ENOMEM;
112 		if (IS_ERR(page))
113 			return PTR_ERR(page);
114 
115 		max = (u_long*)page_address(page) + PAGE_SIZE;
116 		for (p=(u_long*)page_address(page); p<max; p++)
117 			if (*p != -1UL) {
118 				lock_page(page);
119 				memset(page_address(page), 0xff, PAGE_SIZE);
120 				set_page_dirty(page);
121 				unlock_page(page);
122 				break;
123 			}
124 
125 		page_cache_release(page);
126 		pages--;
127 		index++;
128 	}
129 	return 0;
130 }
131 static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
132 {
133 	struct block2mtd_dev *dev = mtd->priv;
134 	size_t from = instr->addr;
135 	size_t len = instr->len;
136 	int err;
137 
138 	instr->state = MTD_ERASING;
139 	down(&dev->write_mutex);
140 	err = _block2mtd_erase(dev, from, len);
141 	up(&dev->write_mutex);
142 	if (err) {
143 		ERROR("erase failed err = %d", err);
144 		instr->state = MTD_ERASE_FAILED;
145 	} else
146 		instr->state = MTD_ERASE_DONE;
147 
148 	instr->state = MTD_ERASE_DONE;
149 	mtd_erase_callback(instr);
150 	return err;
151 }
152 
153 
154 static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
155 		size_t *retlen, u_char *buf)
156 {
157 	struct block2mtd_dev *dev = mtd->priv;
158 	struct page *page;
159 	int index = from >> PAGE_SHIFT;
160 	int offset = from & (PAGE_SHIFT-1);
161 	int cpylen;
162 
163 	if (from > mtd->size)
164 		return -EINVAL;
165 	if (from + len > mtd->size)
166 		len = mtd->size - from;
167 
168 	if (retlen)
169 		*retlen = 0;
170 
171 	while (len) {
172 		if ((offset + len) > PAGE_SIZE)
173 			cpylen = PAGE_SIZE - offset;	// multiple pages
174 		else
175 			cpylen = len;	// this page
176 		len = len - cpylen;
177 
178 		//      Get page
179 		page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
180 		if (!page)
181 			return -ENOMEM;
182 		if (IS_ERR(page))
183 			return PTR_ERR(page);
184 
185 		memcpy(buf, page_address(page) + offset, cpylen);
186 		page_cache_release(page);
187 
188 		if (retlen)
189 			*retlen += cpylen;
190 		buf += cpylen;
191 		offset = 0;
192 		index++;
193 	}
194 	return 0;
195 }
196 
197 
198 /* write data to the underlying device */
199 static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
200 		loff_t to, size_t len, size_t *retlen)
201 {
202 	struct page *page;
203 	struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
204 	int index = to >> PAGE_SHIFT;	// page index
205 	int offset = to & ~PAGE_MASK;	// page offset
206 	int cpylen;
207 
208 	if (retlen)
209 		*retlen = 0;
210 	while (len) {
211 		if ((offset+len) > PAGE_SIZE)
212 			cpylen = PAGE_SIZE - offset;	// multiple pages
213 		else
214 			cpylen = len;			// this page
215 		len = len - cpylen;
216 
217 		//	Get page
218 		page = page_readahead(mapping, index);
219 		if (!page)
220 			return -ENOMEM;
221 		if (IS_ERR(page))
222 			return PTR_ERR(page);
223 
224 		if (memcmp(page_address(page)+offset, buf, cpylen)) {
225 			lock_page(page);
226 			memcpy(page_address(page) + offset, buf, cpylen);
227 			set_page_dirty(page);
228 			unlock_page(page);
229 		}
230 		page_cache_release(page);
231 
232 		if (retlen)
233 			*retlen += cpylen;
234 
235 		buf += cpylen;
236 		offset = 0;
237 		index++;
238 	}
239 	return 0;
240 }
241 static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
242 		size_t *retlen, const u_char *buf)
243 {
244 	struct block2mtd_dev *dev = mtd->priv;
245 	int err;
246 
247 	if (!len)
248 		return 0;
249 	if (to >= mtd->size)
250 		return -ENOSPC;
251 	if (to + len > mtd->size)
252 		len = mtd->size - to;
253 
254 	down(&dev->write_mutex);
255 	err = _block2mtd_write(dev, buf, to, len, retlen);
256 	up(&dev->write_mutex);
257 	if (err > 0)
258 		err = 0;
259 	return err;
260 }
261 
262 
263 /* sync the device - wait until the write queue is empty */
264 static void block2mtd_sync(struct mtd_info *mtd)
265 {
266 	struct block2mtd_dev *dev = mtd->priv;
267 	sync_blockdev(dev->blkdev);
268 	return;
269 }
270 
271 
272 static void block2mtd_free_device(struct block2mtd_dev *dev)
273 {
274 	if (!dev)
275 		return;
276 
277 	kfree(dev->mtd.name);
278 
279 	if (dev->blkdev) {
280 		invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
281 		close_bdev_excl(dev->blkdev);
282 	}
283 
284 	kfree(dev);
285 }
286 
287 
288 /* FIXME: ensure that mtd->size % erase_size == 0 */
289 static struct block2mtd_dev *add_device(char *devname, int erase_size)
290 {
291 	struct block_device *bdev;
292 	struct block2mtd_dev *dev;
293 
294 	if (!devname)
295 		return NULL;
296 
297 	dev = kmalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
298 	if (!dev)
299 		return NULL;
300 	memset(dev, 0, sizeof(*dev));
301 
302 	/* Get a handle on the device */
303 	bdev = open_bdev_excl(devname, O_RDWR, NULL);
304 	if (IS_ERR(bdev)) {
305 		ERROR("error: cannot open device %s", devname);
306 		goto devinit_err;
307 	}
308 	dev->blkdev = bdev;
309 
310 	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
311 		ERROR("attempting to use an MTD device as a block device");
312 		goto devinit_err;
313 	}
314 
315 	init_MUTEX(&dev->write_mutex);
316 
317 	/* Setup the MTD structure */
318 	/* make the name contain the block device in */
319 	dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
320 			GFP_KERNEL);
321 	if (!dev->mtd.name)
322 		goto devinit_err;
323 
324 	sprintf(dev->mtd.name, "block2mtd: %s", devname);
325 
326 	dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
327 	dev->mtd.erasesize = erase_size;
328 	dev->mtd.type = MTD_RAM;
329 	dev->mtd.flags = MTD_CAP_RAM;
330 	dev->mtd.erase = block2mtd_erase;
331 	dev->mtd.write = block2mtd_write;
332 	dev->mtd.writev = default_mtd_writev;
333 	dev->mtd.sync = block2mtd_sync;
334 	dev->mtd.read = block2mtd_read;
335 	dev->mtd.readv = default_mtd_readv;
336 	dev->mtd.priv = dev;
337 	dev->mtd.owner = THIS_MODULE;
338 
339 	if (add_mtd_device(&dev->mtd)) {
340 		/* Device didnt get added, so free the entry */
341 		goto devinit_err;
342 	}
343 	list_add(&dev->list, &blkmtd_device_list);
344 	INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
345 			dev->mtd.name + strlen("blkmtd: "),
346 			dev->mtd.erasesize >> 10, dev->mtd.erasesize);
347 	return dev;
348 
349 devinit_err:
350 	block2mtd_free_device(dev);
351 	return NULL;
352 }
353 
354 
355 static int ustrtoul(const char *cp, char **endp, unsigned int base)
356 {
357 	unsigned long result = simple_strtoul(cp, endp, base);
358 	switch (**endp) {
359 	case 'G' :
360 		result *= 1024;
361 	case 'M':
362 		result *= 1024;
363 	case 'k':
364 		result *= 1024;
365 	/* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
366 		if ((*endp)[1] == 'i')
367 			(*endp) += 2;
368 	}
369 	return result;
370 }
371 
372 
373 static int parse_num32(u32 *num32, const char *token)
374 {
375 	char *endp;
376 	unsigned long n;
377 
378 	n = ustrtoul(token, &endp, 0);
379 	if (*endp)
380 		return -EINVAL;
381 
382 	*num32 = n;
383 	return 0;
384 }
385 
386 
387 static int parse_name(char **pname, const char *token, size_t limit)
388 {
389 	size_t len;
390 	char *name;
391 
392 	len = strlen(token) + 1;
393 	if (len > limit)
394 		return -ENOSPC;
395 
396 	name = kmalloc(len, GFP_KERNEL);
397 	if (!name)
398 		return -ENOMEM;
399 
400 	strcpy(name, token);
401 
402 	*pname = name;
403 	return 0;
404 }
405 
406 
407 static inline void kill_final_newline(char *str)
408 {
409 	char *newline = strrchr(str, '\n');
410 	if (newline && !newline[1])
411 		*newline = 0;
412 }
413 
414 
415 #define parse_err(fmt, args...) do {		\
416 	ERROR("block2mtd: " fmt "\n", ## args);	\
417 	return 0;				\
418 } while (0)
419 
420 static int block2mtd_setup(const char *val, struct kernel_param *kp)
421 {
422 	char buf[80+12], *str=buf; /* 80 for device, 12 for erase size */
423 	char *token[2];
424 	char *name;
425 	u32 erase_size = PAGE_SIZE;
426 	int i, ret;
427 
428 	if (strnlen(val, sizeof(buf)) >= sizeof(buf))
429 		parse_err("parameter too long");
430 
431 	strcpy(str, val);
432 	kill_final_newline(str);
433 
434 	for (i=0; i<2; i++)
435 		token[i] = strsep(&str, ",");
436 
437 	if (str)
438 		parse_err("too many arguments");
439 
440 	if (!token[0])
441 		parse_err("no argument");
442 
443 	ret = parse_name(&name, token[0], 80);
444 	if (ret == -ENOMEM)
445 		parse_err("out of memory");
446 	if (ret == -ENOSPC)
447 		parse_err("name too long");
448 	if (ret)
449 		return 0;
450 
451 	if (token[1]) {
452 		ret = parse_num32(&erase_size, token[1]);
453 		if (ret)
454 			parse_err("illegal erase size");
455 	}
456 
457 	add_device(name, erase_size);
458 
459 	return 0;
460 }
461 
462 
463 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
464 MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
465 
466 static int __init block2mtd_init(void)
467 {
468 	INFO("version " VERSION);
469 	return 0;
470 }
471 
472 
473 static void __devexit block2mtd_exit(void)
474 {
475 	struct list_head *pos, *next;
476 
477 	/* Remove the MTD devices */
478 	list_for_each_safe(pos, next, &blkmtd_device_list) {
479 		struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
480 		block2mtd_sync(&dev->mtd);
481 		del_mtd_device(&dev->mtd);
482 		INFO("mtd%d: [%s] removed", dev->mtd.index,
483 				dev->mtd.name + strlen("blkmtd: "));
484 		list_del(&dev->list);
485 		block2mtd_free_device(dev);
486 	}
487 }
488 
489 
490 module_init(block2mtd_init);
491 module_exit(block2mtd_exit);
492 
493 MODULE_LICENSE("GPL");
494 MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
495 MODULE_DESCRIPTION("Emulate an MTD using a block device");
496