xref: /openbmc/u-boot/drivers/mtd/mtdpart.c (revision 08592136)
1 /*
2  * Simple MTD partitioning layer
3  *
4  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7  *
8  * SPDX-License-Identifier:	GPL-2.0+
9  *
10  */
11 
12 #ifndef __UBOOT__
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/kmod.h>
19 #endif
20 
21 #include <common.h>
22 #include <malloc.h>
23 #include <asm/errno.h>
24 #include <linux/compat.h>
25 #include <ubi_uboot.h>
26 
27 #include <linux/mtd/mtd.h>
28 #include <linux/mtd/partitions.h>
29 #include <linux/err.h>
30 
31 #include "mtdcore.h"
32 
33 /* Our partition linked list */
34 static LIST_HEAD(mtd_partitions);
35 #ifndef __UBOOT__
36 static DEFINE_MUTEX(mtd_partitions_mutex);
37 #else
38 DEFINE_MUTEX(mtd_partitions_mutex);
39 #endif
40 
41 /* Our partition node structure */
42 struct mtd_part {
43 	struct mtd_info mtd;
44 	struct mtd_info *master;
45 	uint64_t offset;
46 	struct list_head list;
47 };
48 
49 /*
50  * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
51  * the pointer to that structure with this macro.
52  */
53 #define PART(x)  ((struct mtd_part *)(x))
54 
55 
56 #ifdef __UBOOT__
57 /* from mm/util.c */
58 
59 /**
60  * kstrdup - allocate space for and copy an existing string
61  * @s: the string to duplicate
62  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
63  */
64 char *kstrdup(const char *s, gfp_t gfp)
65 {
66 	size_t len;
67 	char *buf;
68 
69 	if (!s)
70 		return NULL;
71 
72 	len = strlen(s) + 1;
73 	buf = kmalloc(len, gfp);
74 	if (buf)
75 		memcpy(buf, s, len);
76 	return buf;
77 }
78 #endif
79 
80 /*
81  * MTD methods which simply translate the effective address and pass through
82  * to the _real_ device.
83  */
84 
85 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
86 		size_t *retlen, u_char *buf)
87 {
88 	struct mtd_part *part = PART(mtd);
89 	struct mtd_ecc_stats stats;
90 	int res;
91 
92 	stats = part->master->ecc_stats;
93 	res = part->master->_read(part->master, from + part->offset, len,
94 				  retlen, buf);
95 	if (unlikely(mtd_is_eccerr(res)))
96 		mtd->ecc_stats.failed +=
97 			part->master->ecc_stats.failed - stats.failed;
98 	else
99 		mtd->ecc_stats.corrected +=
100 			part->master->ecc_stats.corrected - stats.corrected;
101 	return res;
102 }
103 
104 #ifndef __UBOOT__
105 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
106 		size_t *retlen, void **virt, resource_size_t *phys)
107 {
108 	struct mtd_part *part = PART(mtd);
109 
110 	return part->master->_point(part->master, from + part->offset, len,
111 				    retlen, virt, phys);
112 }
113 
114 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
115 {
116 	struct mtd_part *part = PART(mtd);
117 
118 	return part->master->_unpoint(part->master, from + part->offset, len);
119 }
120 #endif
121 
122 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
123 					    unsigned long len,
124 					    unsigned long offset,
125 					    unsigned long flags)
126 {
127 	struct mtd_part *part = PART(mtd);
128 
129 	offset += part->offset;
130 	return part->master->_get_unmapped_area(part->master, len, offset,
131 						flags);
132 }
133 
134 static int part_read_oob(struct mtd_info *mtd, loff_t from,
135 		struct mtd_oob_ops *ops)
136 {
137 	struct mtd_part *part = PART(mtd);
138 	int res;
139 
140 	if (from >= mtd->size)
141 		return -EINVAL;
142 	if (ops->datbuf && from + ops->len > mtd->size)
143 		return -EINVAL;
144 
145 	/*
146 	 * If OOB is also requested, make sure that we do not read past the end
147 	 * of this partition.
148 	 */
149 	if (ops->oobbuf) {
150 		size_t len, pages;
151 
152 		if (ops->mode == MTD_OPS_AUTO_OOB)
153 			len = mtd->oobavail;
154 		else
155 			len = mtd->oobsize;
156 		pages = mtd_div_by_ws(mtd->size, mtd);
157 		pages -= mtd_div_by_ws(from, mtd);
158 		if (ops->ooboffs + ops->ooblen > pages * len)
159 			return -EINVAL;
160 	}
161 
162 	res = part->master->_read_oob(part->master, from + part->offset, ops);
163 	if (unlikely(res)) {
164 		if (mtd_is_bitflip(res))
165 			mtd->ecc_stats.corrected++;
166 		if (mtd_is_eccerr(res))
167 			mtd->ecc_stats.failed++;
168 	}
169 	return res;
170 }
171 
172 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
173 		size_t len, size_t *retlen, u_char *buf)
174 {
175 	struct mtd_part *part = PART(mtd);
176 	return part->master->_read_user_prot_reg(part->master, from, len,
177 						 retlen, buf);
178 }
179 
180 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
181 				   size_t *retlen, struct otp_info *buf)
182 {
183 	struct mtd_part *part = PART(mtd);
184 	return part->master->_get_user_prot_info(part->master, len, retlen,
185 						 buf);
186 }
187 
188 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
189 		size_t len, size_t *retlen, u_char *buf)
190 {
191 	struct mtd_part *part = PART(mtd);
192 	return part->master->_read_fact_prot_reg(part->master, from, len,
193 						 retlen, buf);
194 }
195 
196 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
197 				   size_t *retlen, struct otp_info *buf)
198 {
199 	struct mtd_part *part = PART(mtd);
200 	return part->master->_get_fact_prot_info(part->master, len, retlen,
201 						 buf);
202 }
203 
204 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
205 		size_t *retlen, const u_char *buf)
206 {
207 	struct mtd_part *part = PART(mtd);
208 	return part->master->_write(part->master, to + part->offset, len,
209 				    retlen, buf);
210 }
211 
212 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
213 		size_t *retlen, const u_char *buf)
214 {
215 	struct mtd_part *part = PART(mtd);
216 	return part->master->_panic_write(part->master, to + part->offset, len,
217 					  retlen, buf);
218 }
219 
220 static int part_write_oob(struct mtd_info *mtd, loff_t to,
221 		struct mtd_oob_ops *ops)
222 {
223 	struct mtd_part *part = PART(mtd);
224 
225 	if (to >= mtd->size)
226 		return -EINVAL;
227 	if (ops->datbuf && to + ops->len > mtd->size)
228 		return -EINVAL;
229 	return part->master->_write_oob(part->master, to + part->offset, ops);
230 }
231 
232 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
233 		size_t len, size_t *retlen, u_char *buf)
234 {
235 	struct mtd_part *part = PART(mtd);
236 	return part->master->_write_user_prot_reg(part->master, from, len,
237 						  retlen, buf);
238 }
239 
240 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
241 		size_t len)
242 {
243 	struct mtd_part *part = PART(mtd);
244 	return part->master->_lock_user_prot_reg(part->master, from, len);
245 }
246 
247 #ifndef __UBOOT__
248 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
249 		unsigned long count, loff_t to, size_t *retlen)
250 {
251 	struct mtd_part *part = PART(mtd);
252 	return part->master->_writev(part->master, vecs, count,
253 				     to + part->offset, retlen);
254 }
255 #endif
256 
257 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
258 {
259 	struct mtd_part *part = PART(mtd);
260 	int ret;
261 
262 	instr->addr += part->offset;
263 	ret = part->master->_erase(part->master, instr);
264 	if (ret) {
265 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
266 			instr->fail_addr -= part->offset;
267 		instr->addr -= part->offset;
268 	}
269 	return ret;
270 }
271 
272 void mtd_erase_callback(struct erase_info *instr)
273 {
274 	if (instr->mtd->_erase == part_erase) {
275 		struct mtd_part *part = PART(instr->mtd);
276 
277 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
278 			instr->fail_addr -= part->offset;
279 		instr->addr -= part->offset;
280 	}
281 	if (instr->callback)
282 		instr->callback(instr);
283 }
284 EXPORT_SYMBOL_GPL(mtd_erase_callback);
285 
286 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
287 {
288 	struct mtd_part *part = PART(mtd);
289 	return part->master->_lock(part->master, ofs + part->offset, len);
290 }
291 
292 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
293 {
294 	struct mtd_part *part = PART(mtd);
295 	return part->master->_unlock(part->master, ofs + part->offset, len);
296 }
297 
298 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
299 {
300 	struct mtd_part *part = PART(mtd);
301 	return part->master->_is_locked(part->master, ofs + part->offset, len);
302 }
303 
304 static void part_sync(struct mtd_info *mtd)
305 {
306 	struct mtd_part *part = PART(mtd);
307 	part->master->_sync(part->master);
308 }
309 
310 #ifndef __UBOOT__
311 static int part_suspend(struct mtd_info *mtd)
312 {
313 	struct mtd_part *part = PART(mtd);
314 	return part->master->_suspend(part->master);
315 }
316 
317 static void part_resume(struct mtd_info *mtd)
318 {
319 	struct mtd_part *part = PART(mtd);
320 	part->master->_resume(part->master);
321 }
322 #endif
323 
324 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
325 {
326 	struct mtd_part *part = PART(mtd);
327 	ofs += part->offset;
328 	return part->master->_block_isreserved(part->master, ofs);
329 }
330 
331 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
332 {
333 	struct mtd_part *part = PART(mtd);
334 	ofs += part->offset;
335 	return part->master->_block_isbad(part->master, ofs);
336 }
337 
338 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
339 {
340 	struct mtd_part *part = PART(mtd);
341 	int res;
342 
343 	ofs += part->offset;
344 	res = part->master->_block_markbad(part->master, ofs);
345 	if (!res)
346 		mtd->ecc_stats.badblocks++;
347 	return res;
348 }
349 
350 static inline void free_partition(struct mtd_part *p)
351 {
352 	kfree(p->mtd.name);
353 	kfree(p);
354 }
355 
356 /*
357  * This function unregisters and destroy all slave MTD objects which are
358  * attached to the given master MTD object.
359  */
360 
361 int del_mtd_partitions(struct mtd_info *master)
362 {
363 	struct mtd_part *slave, *next;
364 	int ret, err = 0;
365 
366 	mutex_lock(&mtd_partitions_mutex);
367 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
368 		if (slave->master == master) {
369 			ret = del_mtd_device(&slave->mtd);
370 			if (ret < 0) {
371 				err = ret;
372 				continue;
373 			}
374 			list_del(&slave->list);
375 			free_partition(slave);
376 		}
377 	mutex_unlock(&mtd_partitions_mutex);
378 
379 	return err;
380 }
381 
382 static struct mtd_part *allocate_partition(struct mtd_info *master,
383 			const struct mtd_partition *part, int partno,
384 			uint64_t cur_offset)
385 {
386 	struct mtd_part *slave;
387 	char *name;
388 
389 	/* allocate the partition structure */
390 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
391 	name = kstrdup(part->name, GFP_KERNEL);
392 	if (!name || !slave) {
393 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
394 		       master->name);
395 		kfree(name);
396 		kfree(slave);
397 		return ERR_PTR(-ENOMEM);
398 	}
399 
400 	/* set up the MTD object for this partition */
401 	slave->mtd.type = master->type;
402 	slave->mtd.flags = master->flags & ~part->mask_flags;
403 	slave->mtd.size = part->size;
404 	slave->mtd.writesize = master->writesize;
405 	slave->mtd.writebufsize = master->writebufsize;
406 	slave->mtd.oobsize = master->oobsize;
407 	slave->mtd.oobavail = master->oobavail;
408 	slave->mtd.subpage_sft = master->subpage_sft;
409 
410 	slave->mtd.name = name;
411 	slave->mtd.owner = master->owner;
412 #ifndef __UBOOT__
413 	slave->mtd.backing_dev_info = master->backing_dev_info;
414 
415 	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
416 	 * to have the same data be in two different partitions.
417 	 */
418 	slave->mtd.dev.parent = master->dev.parent;
419 #endif
420 
421 	slave->mtd._read = part_read;
422 	slave->mtd._write = part_write;
423 
424 	if (master->_panic_write)
425 		slave->mtd._panic_write = part_panic_write;
426 
427 #ifndef __UBOOT__
428 	if (master->_point && master->_unpoint) {
429 		slave->mtd._point = part_point;
430 		slave->mtd._unpoint = part_unpoint;
431 	}
432 #endif
433 
434 	if (master->_get_unmapped_area)
435 		slave->mtd._get_unmapped_area = part_get_unmapped_area;
436 	if (master->_read_oob)
437 		slave->mtd._read_oob = part_read_oob;
438 	if (master->_write_oob)
439 		slave->mtd._write_oob = part_write_oob;
440 	if (master->_read_user_prot_reg)
441 		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
442 	if (master->_read_fact_prot_reg)
443 		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
444 	if (master->_write_user_prot_reg)
445 		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
446 	if (master->_lock_user_prot_reg)
447 		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
448 	if (master->_get_user_prot_info)
449 		slave->mtd._get_user_prot_info = part_get_user_prot_info;
450 	if (master->_get_fact_prot_info)
451 		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
452 	if (master->_sync)
453 		slave->mtd._sync = part_sync;
454 #ifndef __UBOOT__
455 	if (!partno && !master->dev.class && master->_suspend &&
456 	    master->_resume) {
457 			slave->mtd._suspend = part_suspend;
458 			slave->mtd._resume = part_resume;
459 	}
460 	if (master->_writev)
461 		slave->mtd._writev = part_writev;
462 #endif
463 	if (master->_lock)
464 		slave->mtd._lock = part_lock;
465 	if (master->_unlock)
466 		slave->mtd._unlock = part_unlock;
467 	if (master->_is_locked)
468 		slave->mtd._is_locked = part_is_locked;
469 	if (master->_block_isreserved)
470 		slave->mtd._block_isreserved = part_block_isreserved;
471 	if (master->_block_isbad)
472 		slave->mtd._block_isbad = part_block_isbad;
473 	if (master->_block_markbad)
474 		slave->mtd._block_markbad = part_block_markbad;
475 	slave->mtd._erase = part_erase;
476 	slave->master = master;
477 	slave->offset = part->offset;
478 
479 	if (slave->offset == MTDPART_OFS_APPEND)
480 		slave->offset = cur_offset;
481 	if (slave->offset == MTDPART_OFS_NXTBLK) {
482 		slave->offset = cur_offset;
483 		if (mtd_mod_by_eb(cur_offset, master) != 0) {
484 			/* Round up to next erasesize */
485 			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
486 			debug("Moving partition %d: "
487 			       "0x%012llx -> 0x%012llx\n", partno,
488 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
489 		}
490 	}
491 	if (slave->offset == MTDPART_OFS_RETAIN) {
492 		slave->offset = cur_offset;
493 		if (master->size - slave->offset >= slave->mtd.size) {
494 			slave->mtd.size = master->size - slave->offset
495 							- slave->mtd.size;
496 		} else {
497 			debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
498 				part->name, master->size - slave->offset,
499 				slave->mtd.size);
500 			/* register to preserve ordering */
501 			goto out_register;
502 		}
503 	}
504 	if (slave->mtd.size == MTDPART_SIZ_FULL)
505 		slave->mtd.size = master->size - slave->offset;
506 
507 	debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
508 		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
509 
510 	/* let's do some sanity checks */
511 	if (slave->offset >= master->size) {
512 		/* let's register it anyway to preserve ordering */
513 		slave->offset = 0;
514 		slave->mtd.size = 0;
515 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
516 			part->name);
517 		goto out_register;
518 	}
519 	if (slave->offset + slave->mtd.size > master->size) {
520 		slave->mtd.size = master->size - slave->offset;
521 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
522 			part->name, master->name, (unsigned long long)slave->mtd.size);
523 	}
524 	if (master->numeraseregions > 1) {
525 		/* Deal with variable erase size stuff */
526 		int i, max = master->numeraseregions;
527 		u64 end = slave->offset + slave->mtd.size;
528 		struct mtd_erase_region_info *regions = master->eraseregions;
529 
530 		/* Find the first erase regions which is part of this
531 		 * partition. */
532 		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
533 			;
534 		/* The loop searched for the region _behind_ the first one */
535 		if (i > 0)
536 			i--;
537 
538 		/* Pick biggest erasesize */
539 		for (; i < max && regions[i].offset < end; i++) {
540 			if (slave->mtd.erasesize < regions[i].erasesize) {
541 				slave->mtd.erasesize = regions[i].erasesize;
542 			}
543 		}
544 		BUG_ON(slave->mtd.erasesize == 0);
545 	} else {
546 		/* Single erase size */
547 		slave->mtd.erasesize = master->erasesize;
548 	}
549 
550 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
551 	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
552 		/* Doesn't start on a boundary of major erase size */
553 		/* FIXME: Let it be writable if it is on a boundary of
554 		 * _minor_ erase size though */
555 		slave->mtd.flags &= ~MTD_WRITEABLE;
556 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
557 			part->name);
558 	}
559 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
560 	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
561 		slave->mtd.flags &= ~MTD_WRITEABLE;
562 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
563 			part->name);
564 	}
565 
566 	slave->mtd.ecclayout = master->ecclayout;
567 	slave->mtd.ecc_step_size = master->ecc_step_size;
568 	slave->mtd.ecc_strength = master->ecc_strength;
569 	slave->mtd.bitflip_threshold = master->bitflip_threshold;
570 
571 	if (master->_block_isbad) {
572 		uint64_t offs = 0;
573 
574 		while (offs < slave->mtd.size) {
575 			if (mtd_block_isbad(master, offs + slave->offset))
576 				slave->mtd.ecc_stats.badblocks++;
577 			offs += slave->mtd.erasesize;
578 		}
579 	}
580 
581 out_register:
582 	return slave;
583 }
584 
585 #ifndef __UBOOT__
586 int mtd_add_partition(struct mtd_info *master, const char *name,
587 		      long long offset, long long length)
588 {
589 	struct mtd_partition part;
590 	struct mtd_part *p, *new;
591 	uint64_t start, end;
592 	int ret = 0;
593 
594 	/* the direct offset is expected */
595 	if (offset == MTDPART_OFS_APPEND ||
596 	    offset == MTDPART_OFS_NXTBLK)
597 		return -EINVAL;
598 
599 	if (length == MTDPART_SIZ_FULL)
600 		length = master->size - offset;
601 
602 	if (length <= 0)
603 		return -EINVAL;
604 
605 	part.name = name;
606 	part.size = length;
607 	part.offset = offset;
608 	part.mask_flags = 0;
609 	part.ecclayout = NULL;
610 
611 	new = allocate_partition(master, &part, -1, offset);
612 	if (IS_ERR(new))
613 		return PTR_ERR(new);
614 
615 	start = offset;
616 	end = offset + length;
617 
618 	mutex_lock(&mtd_partitions_mutex);
619 	list_for_each_entry(p, &mtd_partitions, list)
620 		if (p->master == master) {
621 			if ((start >= p->offset) &&
622 			    (start < (p->offset + p->mtd.size)))
623 				goto err_inv;
624 
625 			if ((end >= p->offset) &&
626 			    (end < (p->offset + p->mtd.size)))
627 				goto err_inv;
628 		}
629 
630 	list_add(&new->list, &mtd_partitions);
631 	mutex_unlock(&mtd_partitions_mutex);
632 
633 	add_mtd_device(&new->mtd);
634 
635 	return ret;
636 err_inv:
637 	mutex_unlock(&mtd_partitions_mutex);
638 	free_partition(new);
639 	return -EINVAL;
640 }
641 EXPORT_SYMBOL_GPL(mtd_add_partition);
642 
643 int mtd_del_partition(struct mtd_info *master, int partno)
644 {
645 	struct mtd_part *slave, *next;
646 	int ret = -EINVAL;
647 
648 	mutex_lock(&mtd_partitions_mutex);
649 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
650 		if ((slave->master == master) &&
651 		    (slave->mtd.index == partno)) {
652 			ret = del_mtd_device(&slave->mtd);
653 			if (ret < 0)
654 				break;
655 
656 			list_del(&slave->list);
657 			free_partition(slave);
658 			break;
659 		}
660 	mutex_unlock(&mtd_partitions_mutex);
661 
662 	return ret;
663 }
664 EXPORT_SYMBOL_GPL(mtd_del_partition);
665 #endif
666 
667 /*
668  * This function, given a master MTD object and a partition table, creates
669  * and registers slave MTD objects which are bound to the master according to
670  * the partition definitions.
671  *
672  * We don't register the master, or expect the caller to have done so,
673  * for reasons of data integrity.
674  */
675 
676 int add_mtd_partitions(struct mtd_info *master,
677 		       const struct mtd_partition *parts,
678 		       int nbparts)
679 {
680 	struct mtd_part *slave;
681 	uint64_t cur_offset = 0;
682 	int i;
683 
684 #ifdef __UBOOT__
685 	/*
686 	 * Need to init the list here, since LIST_INIT() does not
687 	 * work on platforms where relocation has problems (like MIPS
688 	 * & PPC).
689 	 */
690 	if (mtd_partitions.next == NULL)
691 		INIT_LIST_HEAD(&mtd_partitions);
692 #endif
693 
694 	debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
695 
696 	for (i = 0; i < nbparts; i++) {
697 		slave = allocate_partition(master, parts + i, i, cur_offset);
698 		if (IS_ERR(slave))
699 			return PTR_ERR(slave);
700 
701 		mutex_lock(&mtd_partitions_mutex);
702 		list_add(&slave->list, &mtd_partitions);
703 		mutex_unlock(&mtd_partitions_mutex);
704 
705 		add_mtd_device(&slave->mtd);
706 
707 		cur_offset = slave->offset + slave->mtd.size;
708 	}
709 
710 	return 0;
711 }
712 
713 #ifndef __UBOOT__
714 static DEFINE_SPINLOCK(part_parser_lock);
715 static LIST_HEAD(part_parsers);
716 
717 static struct mtd_part_parser *get_partition_parser(const char *name)
718 {
719 	struct mtd_part_parser *p, *ret = NULL;
720 
721 	spin_lock(&part_parser_lock);
722 
723 	list_for_each_entry(p, &part_parsers, list)
724 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
725 			ret = p;
726 			break;
727 		}
728 
729 	spin_unlock(&part_parser_lock);
730 
731 	return ret;
732 }
733 
734 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
735 
736 void register_mtd_parser(struct mtd_part_parser *p)
737 {
738 	spin_lock(&part_parser_lock);
739 	list_add(&p->list, &part_parsers);
740 	spin_unlock(&part_parser_lock);
741 }
742 EXPORT_SYMBOL_GPL(register_mtd_parser);
743 
744 void deregister_mtd_parser(struct mtd_part_parser *p)
745 {
746 	spin_lock(&part_parser_lock);
747 	list_del(&p->list);
748 	spin_unlock(&part_parser_lock);
749 }
750 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
751 
752 /*
753  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
754  * are changing this array!
755  */
756 static const char * const default_mtd_part_types[] = {
757 	"cmdlinepart",
758 	"ofpart",
759 	NULL
760 };
761 
762 /**
763  * parse_mtd_partitions - parse MTD partitions
764  * @master: the master partition (describes whole MTD device)
765  * @types: names of partition parsers to try or %NULL
766  * @pparts: array of partitions found is returned here
767  * @data: MTD partition parser-specific data
768  *
769  * This function tries to find partition on MTD device @master. It uses MTD
770  * partition parsers, specified in @types. However, if @types is %NULL, then
771  * the default list of parsers is used. The default list contains only the
772  * "cmdlinepart" and "ofpart" parsers ATM.
773  * Note: If there are more then one parser in @types, the kernel only takes the
774  * partitions parsed out by the first parser.
775  *
776  * This function may return:
777  * o a negative error code in case of failure
778  * o zero if no partitions were found
779  * o a positive number of found partitions, in which case on exit @pparts will
780  *   point to an array containing this number of &struct mtd_info objects.
781  */
782 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
783 			 struct mtd_partition **pparts,
784 			 struct mtd_part_parser_data *data)
785 {
786 	struct mtd_part_parser *parser;
787 	int ret = 0;
788 
789 	if (!types)
790 		types = default_mtd_part_types;
791 
792 	for ( ; ret <= 0 && *types; types++) {
793 		parser = get_partition_parser(*types);
794 		if (!parser && !request_module("%s", *types))
795 			parser = get_partition_parser(*types);
796 		if (!parser)
797 			continue;
798 		ret = (*parser->parse_fn)(master, pparts, data);
799 		put_partition_parser(parser);
800 		if (ret > 0) {
801 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
802 			       ret, parser->name, master->name);
803 			break;
804 		}
805 	}
806 	return ret;
807 }
808 #endif
809 
810 int mtd_is_partition(const struct mtd_info *mtd)
811 {
812 	struct mtd_part *part;
813 	int ispart = 0;
814 
815 	mutex_lock(&mtd_partitions_mutex);
816 	list_for_each_entry(part, &mtd_partitions, list)
817 		if (&part->mtd == mtd) {
818 			ispart = 1;
819 			break;
820 		}
821 	mutex_unlock(&mtd_partitions_mutex);
822 
823 	return ispart;
824 }
825 EXPORT_SYMBOL_GPL(mtd_is_partition);
826 
827 /* Returns the size of the entire flash chip */
828 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
829 {
830 	if (!mtd_is_partition(mtd))
831 		return mtd->size;
832 
833 	return PART(mtd)->master->size;
834 }
835 EXPORT_SYMBOL_GPL(mtd_get_device_size);
836