xref: /openbmc/u-boot/include/linux/mtd/mtd.h (revision 6c6add60)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
4  *
5  */
6 
7 #ifndef __MTD_MTD_H__
8 #define __MTD_MTD_H__
9 
10 #ifndef __UBOOT__
11 #include <linux/types.h>
12 #include <linux/uio.h>
13 #include <linux/notifier.h>
14 #include <linux/device.h>
15 
16 #include <mtd/mtd-abi.h>
17 
18 #include <asm/div64.h>
19 #else
20 #include <linux/compat.h>
21 #include <mtd/mtd-abi.h>
22 #include <linux/errno.h>
23 #include <linux/list.h>
24 #include <div64.h>
25 #if IS_ENABLED(CONFIG_DM)
26 #include <dm/device.h>
27 #endif
28 
29 #define MAX_MTD_DEVICES 32
30 #endif
31 
32 #define MTD_ERASE_PENDING	0x01
33 #define MTD_ERASING		0x02
34 #define MTD_ERASE_SUSPEND	0x04
35 #define MTD_ERASE_DONE		0x08
36 #define MTD_ERASE_FAILED	0x10
37 
38 #define MTD_FAIL_ADDR_UNKNOWN -1LL
39 
40 /*
41  * If the erase fails, fail_addr might indicate exactly which block failed. If
42  * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
43  * or was not specific to any particular block.
44  */
45 struct erase_info {
46 	struct mtd_info *mtd;
47 	uint64_t addr;
48 	uint64_t len;
49 	uint64_t fail_addr;
50 	u_long time;
51 	u_long retries;
52 	unsigned dev;
53 	unsigned cell;
54 	void (*callback) (struct erase_info *self);
55 	u_long priv;
56 	u_char state;
57 	struct erase_info *next;
58 	int scrub;
59 };
60 
61 struct mtd_erase_region_info {
62 	uint64_t offset;		/* At which this region starts, from the beginning of the MTD */
63 	uint32_t erasesize;		/* For this region */
64 	uint32_t numblocks;		/* Number of blocks of erasesize in this region */
65 	unsigned long *lockmap;		/* If keeping bitmap of locks */
66 };
67 
68 /**
69  * struct mtd_oob_ops - oob operation operands
70  * @mode:	operation mode
71  *
72  * @len:	number of data bytes to write/read
73  *
74  * @retlen:	number of data bytes written/read
75  *
76  * @ooblen:	number of oob bytes to write/read
77  * @oobretlen:	number of oob bytes written/read
78  * @ooboffs:	offset of oob data in the oob area (only relevant when
79  *		mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
80  * @datbuf:	data buffer - if NULL only oob data are read/written
81  * @oobbuf:	oob data buffer
82  */
83 struct mtd_oob_ops {
84 	unsigned int	mode;
85 	size_t		len;
86 	size_t		retlen;
87 	size_t		ooblen;
88 	size_t		oobretlen;
89 	uint32_t	ooboffs;
90 	uint8_t		*datbuf;
91 	uint8_t		*oobbuf;
92 };
93 
94 #ifdef CONFIG_SYS_NAND_MAX_OOBFREE
95 #define MTD_MAX_OOBFREE_ENTRIES_LARGE	CONFIG_SYS_NAND_MAX_OOBFREE
96 #else
97 #define MTD_MAX_OOBFREE_ENTRIES_LARGE	32
98 #endif
99 
100 #ifdef CONFIG_SYS_NAND_MAX_ECCPOS
101 #define MTD_MAX_ECCPOS_ENTRIES_LARGE	CONFIG_SYS_NAND_MAX_ECCPOS
102 #else
103 #define MTD_MAX_ECCPOS_ENTRIES_LARGE	680
104 #endif
105 /**
106  * struct mtd_oob_region - oob region definition
107  * @offset: region offset
108  * @length: region length
109  *
110  * This structure describes a region of the OOB area, and is used
111  * to retrieve ECC or free bytes sections.
112  * Each section is defined by an offset within the OOB area and a
113  * length.
114  */
115 struct mtd_oob_region {
116 	u32 offset;
117 	u32 length;
118 };
119 
120 /*
121  * struct mtd_ooblayout_ops - NAND OOB layout operations
122  * @ecc: function returning an ECC region in the OOB area.
123  *	 Should return -ERANGE if %section exceeds the total number of
124  *	 ECC sections.
125  * @free: function returning a free region in the OOB area.
126  *	  Should return -ERANGE if %section exceeds the total number of
127  *	  free sections.
128  */
129 struct mtd_ooblayout_ops {
130 	int (*ecc)(struct mtd_info *mtd, int section,
131 		   struct mtd_oob_region *oobecc);
132 	int (*free)(struct mtd_info *mtd, int section,
133 		    struct mtd_oob_region *oobfree);
134 };
135 
136 /*
137  * Internal ECC layout control structure. For historical reasons, there is a
138  * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
139  * for export to user-space via the ECCGETLAYOUT ioctl.
140  * nand_ecclayout should be expandable in the future simply by the above macros.
141  */
142 struct nand_ecclayout {
143 	__u32 eccbytes;
144 	__u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
145 	__u32 oobavail;
146 	struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
147 };
148 
149 struct module;	/* only needed for owner field in mtd_info */
150 
151 struct mtd_info {
152 	u_char type;
153 	uint32_t flags;
154 	uint64_t size;	 // Total size of the MTD
155 
156 	/* "Major" erase size for the device. Naïve users may take this
157 	 * to be the only erase size available, or may use the more detailed
158 	 * information below if they desire
159 	 */
160 	uint32_t erasesize;
161 	/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
162 	 * though individual bits can be cleared), in case of NAND flash it is
163 	 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
164 	 * it is of ECC block size, etc. It is illegal to have writesize = 0.
165 	 * Any driver registering a struct mtd_info must ensure a writesize of
166 	 * 1 or larger.
167 	 */
168 	uint32_t writesize;
169 
170 	/*
171 	 * Size of the write buffer used by the MTD. MTD devices having a write
172 	 * buffer can write multiple writesize chunks at a time. E.g. while
173 	 * writing 4 * writesize bytes to a device with 2 * writesize bytes
174 	 * buffer the MTD driver can (but doesn't have to) do 2 writesize
175 	 * operations, but not 4. Currently, all NANDs have writebufsize
176 	 * equivalent to writesize (NAND page size). Some NOR flashes do have
177 	 * writebufsize greater than writesize.
178 	 */
179 	uint32_t writebufsize;
180 
181 	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
182 	uint32_t oobavail;  // Available OOB bytes per block
183 
184 	/*
185 	 * If erasesize is a power of 2 then the shift is stored in
186 	 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
187 	 */
188 	unsigned int erasesize_shift;
189 	unsigned int writesize_shift;
190 	/* Masks based on erasesize_shift and writesize_shift */
191 	unsigned int erasesize_mask;
192 	unsigned int writesize_mask;
193 
194 	/*
195 	 * read ops return -EUCLEAN if max number of bitflips corrected on any
196 	 * one region comprising an ecc step equals or exceeds this value.
197 	 * Settable by driver, else defaults to ecc_strength.  User can override
198 	 * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
199 	 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
200 	 */
201 	unsigned int bitflip_threshold;
202 
203 	// Kernel-only stuff starts here.
204 #ifndef __UBOOT__
205 	const char *name;
206 #else
207 	char *name;
208 #endif
209 	int index;
210 
211 	/* OOB layout description */
212 	const struct mtd_ooblayout_ops *ooblayout;
213 
214 	/* ECC layout structure pointer - read only! */
215 	struct nand_ecclayout *ecclayout;
216 
217 	/* the ecc step size. */
218 	unsigned int ecc_step_size;
219 
220 	/* max number of correctible bit errors per ecc step */
221 	unsigned int ecc_strength;
222 
223 	/* Data for variable erase regions. If numeraseregions is zero,
224 	 * it means that the whole device has erasesize as given above.
225 	 */
226 	int numeraseregions;
227 	struct mtd_erase_region_info *eraseregions;
228 
229 	/*
230 	 * Do not call via these pointers, use corresponding mtd_*()
231 	 * wrappers instead.
232 	 */
233 	int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
234 #ifndef __UBOOT__
235 	int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
236 		       size_t *retlen, void **virt, resource_size_t *phys);
237 	int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
238 #endif
239 	unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
240 					     unsigned long len,
241 					     unsigned long offset,
242 					     unsigned long flags);
243 	int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
244 		      size_t *retlen, u_char *buf);
245 	int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
246 		       size_t *retlen, const u_char *buf);
247 	int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
248 			     size_t *retlen, const u_char *buf);
249 	int (*_read_oob) (struct mtd_info *mtd, loff_t from,
250 			  struct mtd_oob_ops *ops);
251 	int (*_write_oob) (struct mtd_info *mtd, loff_t to,
252 			   struct mtd_oob_ops *ops);
253 	int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
254 				    size_t *retlen, struct otp_info *buf);
255 	int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
256 				    size_t len, size_t *retlen, u_char *buf);
257 	int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
258 				    size_t *retlen, struct otp_info *buf);
259 	int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
260 				    size_t len, size_t *retlen, u_char *buf);
261 	int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
262 				     size_t len, size_t *retlen, u_char *buf);
263 	int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
264 				    size_t len);
265 #ifndef __UBOOT__
266 	int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
267 			unsigned long count, loff_t to, size_t *retlen);
268 #endif
269 	void (*_sync) (struct mtd_info *mtd);
270 	int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
271 	int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
272 	int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
273 	int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
274 	int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
275 	int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
276 #ifndef __UBOOT__
277 	int (*_suspend) (struct mtd_info *mtd);
278 	void (*_resume) (struct mtd_info *mtd);
279 	void (*_reboot) (struct mtd_info *mtd);
280 #endif
281 	/*
282 	 * If the driver is something smart, like UBI, it may need to maintain
283 	 * its own reference counting. The below functions are only for driver.
284 	 */
285 	int (*_get_device) (struct mtd_info *mtd);
286 	void (*_put_device) (struct mtd_info *mtd);
287 
288 #ifndef __UBOOT__
289 	/* Backing device capabilities for this device
290 	 * - provides mmap capabilities
291 	 */
292 	struct backing_dev_info *backing_dev_info;
293 
294 	struct notifier_block reboot_notifier;  /* default mode before reboot */
295 #endif
296 
297 	/* ECC status information */
298 	struct mtd_ecc_stats ecc_stats;
299 	/* Subpage shift (NAND) */
300 	int subpage_sft;
301 
302 	void *priv;
303 
304 	struct module *owner;
305 #ifndef __UBOOT__
306 	struct device dev;
307 #else
308 	struct udevice *dev;
309 #endif
310 	int usecount;
311 
312 	/* MTD devices do not have any parent. MTD partitions do. */
313 	struct mtd_info *parent;
314 
315 	/*
316 	 * Offset of the partition relatively to the parent offset.
317 	 * Is 0 for real MTD devices (ie. not partitions).
318 	 */
319 	u64 offset;
320 
321 	/*
322 	 * List node used to add an MTD partition to the parent
323 	 * partition list.
324 	 */
325 	struct list_head node;
326 
327 	/*
328 	 * List of partitions attached to this MTD device (the parent
329 	 * MTD device can itself be a partition).
330 	 */
331 	struct list_head partitions;
332 };
333 
334 #if IS_ENABLED(CONFIG_DM)
335 static inline void mtd_set_of_node(struct mtd_info *mtd,
336 				   const struct device_node *np)
337 {
338 	mtd->dev->node.np = np;
339 }
340 
341 static inline const struct device_node *mtd_get_of_node(struct mtd_info *mtd)
342 {
343 	return mtd->dev->node.np;
344 }
345 #else
346 struct device_node;
347 
348 static inline void mtd_set_of_node(struct mtd_info *mtd,
349 				   const struct device_node *np)
350 {
351 }
352 
353 static inline const struct device_node *mtd_get_of_node(struct mtd_info *mtd)
354 {
355 	return NULL;
356 }
357 #endif
358 
359 static inline bool mtd_is_partition(const struct mtd_info *mtd)
360 {
361 	return mtd->parent;
362 }
363 
364 static inline bool mtd_has_partitions(const struct mtd_info *mtd)
365 {
366 	return !list_empty(&mtd->partitions);
367 }
368 
369 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
370 		      struct mtd_oob_region *oobecc);
371 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
372 				 int *section,
373 				 struct mtd_oob_region *oobregion);
374 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
375 			       const u8 *oobbuf, int start, int nbytes);
376 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
377 			       u8 *oobbuf, int start, int nbytes);
378 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
379 		       struct mtd_oob_region *oobfree);
380 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
381 				const u8 *oobbuf, int start, int nbytes);
382 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
383 				u8 *oobbuf, int start, int nbytes);
384 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
385 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
386 
387 static inline void mtd_set_ooblayout(struct mtd_info *mtd,
388 				     const struct mtd_ooblayout_ops *ooblayout)
389 {
390 	mtd->ooblayout = ooblayout;
391 }
392 
393 static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
394 {
395 	return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
396 }
397 
398 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
399 #ifndef __UBOOT__
400 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
401 	      void **virt, resource_size_t *phys);
402 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
403 #endif
404 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
405 				    unsigned long offset, unsigned long flags);
406 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
407 	     u_char *buf);
408 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
409 	      const u_char *buf);
410 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
411 		    const u_char *buf);
412 
413 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
414 int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
415 
416 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
417 			   struct otp_info *buf);
418 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
419 			   size_t *retlen, u_char *buf);
420 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
421 			   struct otp_info *buf);
422 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
423 			   size_t *retlen, u_char *buf);
424 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
425 			    size_t *retlen, u_char *buf);
426 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
427 
428 #ifndef __UBOOT__
429 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
430 	       unsigned long count, loff_t to, size_t *retlen);
431 #endif
432 
433 static inline void mtd_sync(struct mtd_info *mtd)
434 {
435 	if (mtd->_sync)
436 		mtd->_sync(mtd);
437 }
438 
439 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
440 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
441 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
442 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
443 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
444 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
445 
446 #ifndef __UBOOT__
447 static inline int mtd_suspend(struct mtd_info *mtd)
448 {
449 	return mtd->_suspend ? mtd->_suspend(mtd) : 0;
450 }
451 
452 static inline void mtd_resume(struct mtd_info *mtd)
453 {
454 	if (mtd->_resume)
455 		mtd->_resume(mtd);
456 }
457 #endif
458 
459 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
460 {
461 	if (mtd->erasesize_shift)
462 		return sz >> mtd->erasesize_shift;
463 	do_div(sz, mtd->erasesize);
464 	return sz;
465 }
466 
467 static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
468 {
469 	if (mtd->erasesize_shift)
470 		return sz & mtd->erasesize_mask;
471 	return do_div(sz, mtd->erasesize);
472 }
473 
474 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
475 {
476 	if (mtd->writesize_shift)
477 		return sz >> mtd->writesize_shift;
478 	do_div(sz, mtd->writesize);
479 	return sz;
480 }
481 
482 static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
483 {
484 	if (mtd->writesize_shift)
485 		return sz & mtd->writesize_mask;
486 	return do_div(sz, mtd->writesize);
487 }
488 
489 static inline int mtd_has_oob(const struct mtd_info *mtd)
490 {
491 	return mtd->_read_oob && mtd->_write_oob;
492 }
493 
494 static inline int mtd_type_is_nand(const struct mtd_info *mtd)
495 {
496 	return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
497 }
498 
499 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
500 {
501 	return !!mtd->_block_isbad;
502 }
503 
504 	/* Kernel-side ioctl definitions */
505 
506 struct mtd_partition;
507 struct mtd_part_parser_data;
508 
509 extern int mtd_device_parse_register(struct mtd_info *mtd,
510 				     const char * const *part_probe_types,
511 				     struct mtd_part_parser_data *parser_data,
512 				     const struct mtd_partition *defparts,
513 				     int defnr_parts);
514 #define mtd_device_register(master, parts, nr_parts)	\
515 	mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
516 extern int mtd_device_unregister(struct mtd_info *master);
517 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
518 extern int __get_mtd_device(struct mtd_info *mtd);
519 extern void __put_mtd_device(struct mtd_info *mtd);
520 extern struct mtd_info *get_mtd_device_nm(const char *name);
521 extern void put_mtd_device(struct mtd_info *mtd);
522 
523 
524 #ifndef __UBOOT__
525 struct mtd_notifier {
526 	void (*add)(struct mtd_info *mtd);
527 	void (*remove)(struct mtd_info *mtd);
528 	struct list_head list;
529 };
530 
531 
532 extern void register_mtd_user (struct mtd_notifier *new);
533 extern int unregister_mtd_user (struct mtd_notifier *old);
534 #endif
535 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
536 
537 #ifdef CONFIG_MTD_PARTITIONS
538 void mtd_erase_callback(struct erase_info *instr);
539 #else
540 static inline void mtd_erase_callback(struct erase_info *instr)
541 {
542 	if (instr->callback)
543 		instr->callback(instr);
544 }
545 #endif
546 
547 static inline int mtd_is_bitflip(int err) {
548 	return err == -EUCLEAN;
549 }
550 
551 static inline int mtd_is_eccerr(int err) {
552 	return err == -EBADMSG;
553 }
554 
555 static inline int mtd_is_bitflip_or_eccerr(int err) {
556 	return mtd_is_bitflip(err) || mtd_is_eccerr(err);
557 }
558 
559 unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
560 
561 #ifdef __UBOOT__
562 /* drivers/mtd/mtdcore.h */
563 int add_mtd_device(struct mtd_info *mtd);
564 int del_mtd_device(struct mtd_info *mtd);
565 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
566 int del_mtd_partitions(struct mtd_info *);
567 
568 struct mtd_info *__mtd_next_device(int i);
569 #define mtd_for_each_device(mtd)			\
570 	for ((mtd) = __mtd_next_device(0);		\
571 	     (mtd) != NULL;				\
572 	     (mtd) = __mtd_next_device(mtd->index + 1))
573 
574 int mtd_arg_off(const char *arg, int *idx, loff_t *off, loff_t *size,
575 		loff_t *maxsize, int devtype, uint64_t chipsize);
576 int mtd_arg_off_size(int argc, char *const argv[], int *idx, loff_t *off,
577 		     loff_t *size, loff_t *maxsize, int devtype,
578 		     uint64_t chipsize);
579 
580 /* drivers/mtd/mtdcore.c */
581 void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
582 			  const uint64_t length, uint64_t *len_incl_bad,
583 			  int *truncated);
584 
585 /* drivers/mtd/mtd_uboot.c */
586 int mtd_search_alternate_name(const char *mtdname, char *altname,
587 			      unsigned int max_len);
588 
589 #endif
590 #endif /* __MTD_MTD_H__ */
591