12b27bdccSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21e51764aSArtem Bityutskiy /*
31e51764aSArtem Bityutskiy * This file is part of UBIFS.
41e51764aSArtem Bityutskiy *
51e51764aSArtem Bityutskiy * Copyright (C) 2006-2008 Nokia Corporation.
61e51764aSArtem Bityutskiy *
71e51764aSArtem Bityutskiy * Authors: Adrian Hunter
81e51764aSArtem Bityutskiy * Artem Bityutskiy (Битюцкий Артём)
91e51764aSArtem Bityutskiy */
101e51764aSArtem Bityutskiy
111e51764aSArtem Bityutskiy /*
121e51764aSArtem Bityutskiy * This file implements garbage collection. The procedure for garbage collection
131e51764aSArtem Bityutskiy * is different depending on whether a LEB as an index LEB (contains index
141e51764aSArtem Bityutskiy * nodes) or not. For non-index LEBs, garbage collection finds a LEB which
151e51764aSArtem Bityutskiy * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete
161e51764aSArtem Bityutskiy * nodes to the journal, at which point the garbage-collected LEB is free to be
171e51764aSArtem Bityutskiy * reused. For index LEBs, garbage collection marks the non-obsolete index nodes
181e51764aSArtem Bityutskiy * dirty in the TNC, and after the next commit, the garbage-collected LEB is
191e51764aSArtem Bityutskiy * to be reused. Garbage collection will cause the number of dirty index nodes
201e51764aSArtem Bityutskiy * to grow, however sufficient space is reserved for the index to ensure the
211e51764aSArtem Bityutskiy * commit will never run out of space.
227078202eSArtem Bityutskiy *
237078202eSArtem Bityutskiy * Notes about dead watermark. At current UBIFS implementation we assume that
247078202eSArtem Bityutskiy * LEBs which have less than @c->dead_wm bytes of free + dirty space are full
257078202eSArtem Bityutskiy * and not worth garbage-collecting. The dead watermark is one min. I/O unit
267078202eSArtem Bityutskiy * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS
277078202eSArtem Bityutskiy * Garbage Collector has to synchronize the GC head's write buffer before
287078202eSArtem Bityutskiy * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can
297078202eSArtem Bityutskiy * actually reclaim even very small pieces of dirty space by garbage collecting
307078202eSArtem Bityutskiy * enough dirty LEBs, but we do not bother doing this at this implementation.
317078202eSArtem Bityutskiy *
327078202eSArtem Bityutskiy * Notes about dark watermark. The results of GC work depends on how big are
337078202eSArtem Bityutskiy * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
347078202eSArtem Bityutskiy * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
357078202eSArtem Bityutskiy * have to waste large pieces of free space at the end of LEB B, because nodes
367078202eSArtem Bityutskiy * from LEB A would not fit. And the worst situation is when all nodes are of
377078202eSArtem Bityutskiy * maximum size. So dark watermark is the amount of free + dirty space in LEB
38f10770f5SArtem Bityutskiy * which are guaranteed to be reclaimable. If LEB has less space, the GC might
397078202eSArtem Bityutskiy * be unable to reclaim it. So, LEBs with free + dirty greater than dark
4028e5dfd8SSascha Hauer * watermark are "good" LEBs from GC's point of view. The other LEBs are not so
417078202eSArtem Bityutskiy * good, and GC takes extra care when moving them.
421e51764aSArtem Bityutskiy */
431e51764aSArtem Bityutskiy
445a0e3ad6STejun Heo #include <linux/slab.h>
451e51764aSArtem Bityutskiy #include <linux/pagemap.h>
462c761270SDave Chinner #include <linux/list_sort.h>
471e51764aSArtem Bityutskiy #include "ubifs.h"
481e51764aSArtem Bityutskiy
491e51764aSArtem Bityutskiy /*
50025dfdafSFrederik Schwarzer * GC may need to move more than one LEB to make progress. The below constants
511e51764aSArtem Bityutskiy * define "soft" and "hard" limits on the number of LEBs the garbage collector
521e51764aSArtem Bityutskiy * may move.
531e51764aSArtem Bityutskiy */
541e51764aSArtem Bityutskiy #define SOFT_LEBS_LIMIT 4
551e51764aSArtem Bityutskiy #define HARD_LEBS_LIMIT 32
561e51764aSArtem Bityutskiy
571e51764aSArtem Bityutskiy /**
581e51764aSArtem Bityutskiy * switch_gc_head - switch the garbage collection journal head.
591e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
601e51764aSArtem Bityutskiy *
611e51764aSArtem Bityutskiy * This function switch the GC head to the next LEB which is reserved in
621e51764aSArtem Bityutskiy * @c->gc_lnum. Returns %0 in case of success, %-EAGAIN if commit is required,
631e51764aSArtem Bityutskiy * and other negative error code in case of failures.
641e51764aSArtem Bityutskiy */
switch_gc_head(struct ubifs_info * c)651e51764aSArtem Bityutskiy static int switch_gc_head(struct ubifs_info *c)
661e51764aSArtem Bityutskiy {
671e51764aSArtem Bityutskiy int err, gc_lnum = c->gc_lnum;
681e51764aSArtem Bityutskiy struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
691e51764aSArtem Bityutskiy
706eb61d58SRichard Weinberger ubifs_assert(c, gc_lnum != -1);
711e51764aSArtem Bityutskiy dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
721e51764aSArtem Bityutskiy wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
731e51764aSArtem Bityutskiy c->leb_size - wbuf->offs - wbuf->used);
741e51764aSArtem Bityutskiy
751e51764aSArtem Bityutskiy err = ubifs_wbuf_sync_nolock(wbuf);
761e51764aSArtem Bityutskiy if (err)
771e51764aSArtem Bityutskiy return err;
781e51764aSArtem Bityutskiy
791e51764aSArtem Bityutskiy /*
801e51764aSArtem Bityutskiy * The GC write-buffer was synchronized, we may safely unmap
811e51764aSArtem Bityutskiy * 'c->gc_lnum'.
821e51764aSArtem Bityutskiy */
831e51764aSArtem Bityutskiy err = ubifs_leb_unmap(c, gc_lnum);
841e51764aSArtem Bityutskiy if (err)
851e51764aSArtem Bityutskiy return err;
861e51764aSArtem Bityutskiy
871e51764aSArtem Bityutskiy err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
881e51764aSArtem Bityutskiy if (err)
891e51764aSArtem Bityutskiy return err;
901e51764aSArtem Bityutskiy
911e51764aSArtem Bityutskiy c->gc_lnum = -1;
92b36a261eSRichard Weinberger err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0);
931e51764aSArtem Bityutskiy return err;
941e51764aSArtem Bityutskiy }
951e51764aSArtem Bityutskiy
961e51764aSArtem Bityutskiy /**
97f10770f5SArtem Bityutskiy * data_nodes_cmp - compare 2 data nodes.
98f10770f5SArtem Bityutskiy * @priv: UBIFS file-system description object
99f10770f5SArtem Bityutskiy * @a: first data node
100ec037dfcSJulia Lawall * @b: second data node
1011e51764aSArtem Bityutskiy *
102f10770f5SArtem Bityutskiy * This function compares data nodes @a and @b. Returns %1 if @a has greater
103f10770f5SArtem Bityutskiy * inode or block number, and %-1 otherwise.
1041e51764aSArtem Bityutskiy */
data_nodes_cmp(void * priv,const struct list_head * a,const struct list_head * b)1054f0f586bSSami Tolvanen static int data_nodes_cmp(void *priv, const struct list_head *a,
1064f0f586bSSami Tolvanen const struct list_head *b)
107f10770f5SArtem Bityutskiy {
108f10770f5SArtem Bityutskiy ino_t inuma, inumb;
109f10770f5SArtem Bityutskiy struct ubifs_info *c = priv;
110f10770f5SArtem Bityutskiy struct ubifs_scan_node *sa, *sb;
111f10770f5SArtem Bityutskiy
112f10770f5SArtem Bityutskiy cond_resched();
1131a9476a7SArtem Bityutskiy if (a == b)
1141a9476a7SArtem Bityutskiy return 0;
1151a9476a7SArtem Bityutskiy
116f10770f5SArtem Bityutskiy sa = list_entry(a, struct ubifs_scan_node, list);
117f10770f5SArtem Bityutskiy sb = list_entry(b, struct ubifs_scan_node, list);
11866576833SArtem Bityutskiy
1196eb61d58SRichard Weinberger ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DATA_KEY);
1206eb61d58SRichard Weinberger ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DATA_KEY);
1216eb61d58SRichard Weinberger ubifs_assert(c, sa->type == UBIFS_DATA_NODE);
1226eb61d58SRichard Weinberger ubifs_assert(c, sb->type == UBIFS_DATA_NODE);
123f10770f5SArtem Bityutskiy
124f10770f5SArtem Bityutskiy inuma = key_inum(c, &sa->key);
125f10770f5SArtem Bityutskiy inumb = key_inum(c, &sb->key);
126f10770f5SArtem Bityutskiy
127f10770f5SArtem Bityutskiy if (inuma == inumb) {
128f10770f5SArtem Bityutskiy unsigned int blka = key_block(c, &sa->key);
129f10770f5SArtem Bityutskiy unsigned int blkb = key_block(c, &sb->key);
130f10770f5SArtem Bityutskiy
131f10770f5SArtem Bityutskiy if (blka <= blkb)
132f10770f5SArtem Bityutskiy return -1;
133f10770f5SArtem Bityutskiy } else if (inuma <= inumb)
134f10770f5SArtem Bityutskiy return -1;
135f10770f5SArtem Bityutskiy
136f10770f5SArtem Bityutskiy return 1;
137f10770f5SArtem Bityutskiy }
138f10770f5SArtem Bityutskiy
139f10770f5SArtem Bityutskiy /*
140f10770f5SArtem Bityutskiy * nondata_nodes_cmp - compare 2 non-data nodes.
141f10770f5SArtem Bityutskiy * @priv: UBIFS file-system description object
142f10770f5SArtem Bityutskiy * @a: first node
143f10770f5SArtem Bityutskiy * @a: second node
144f10770f5SArtem Bityutskiy *
145f10770f5SArtem Bityutskiy * This function compares nodes @a and @b. It makes sure that inode nodes go
146f10770f5SArtem Bityutskiy * first and sorted by length in descending order. Directory entry nodes go
147f10770f5SArtem Bityutskiy * after inode nodes and are sorted in ascending hash valuer order.
148f10770f5SArtem Bityutskiy */
nondata_nodes_cmp(void * priv,const struct list_head * a,const struct list_head * b)1494f0f586bSSami Tolvanen static int nondata_nodes_cmp(void *priv, const struct list_head *a,
1504f0f586bSSami Tolvanen const struct list_head *b)
151f10770f5SArtem Bityutskiy {
152f10770f5SArtem Bityutskiy ino_t inuma, inumb;
153f10770f5SArtem Bityutskiy struct ubifs_info *c = priv;
154f10770f5SArtem Bityutskiy struct ubifs_scan_node *sa, *sb;
155f10770f5SArtem Bityutskiy
156f10770f5SArtem Bityutskiy cond_resched();
1571a9476a7SArtem Bityutskiy if (a == b)
1581a9476a7SArtem Bityutskiy return 0;
1591a9476a7SArtem Bityutskiy
160f10770f5SArtem Bityutskiy sa = list_entry(a, struct ubifs_scan_node, list);
161f10770f5SArtem Bityutskiy sb = list_entry(b, struct ubifs_scan_node, list);
16266576833SArtem Bityutskiy
1636eb61d58SRichard Weinberger ubifs_assert(c, key_type(c, &sa->key) != UBIFS_DATA_KEY &&
16466576833SArtem Bityutskiy key_type(c, &sb->key) != UBIFS_DATA_KEY);
1656eb61d58SRichard Weinberger ubifs_assert(c, sa->type != UBIFS_DATA_NODE &&
166ab87118dSArtem Bityutskiy sb->type != UBIFS_DATA_NODE);
167f10770f5SArtem Bityutskiy
168f10770f5SArtem Bityutskiy /* Inodes go before directory entries */
169ab87118dSArtem Bityutskiy if (sa->type == UBIFS_INO_NODE) {
170ab87118dSArtem Bityutskiy if (sb->type == UBIFS_INO_NODE)
171f10770f5SArtem Bityutskiy return sb->len - sa->len;
172f10770f5SArtem Bityutskiy return -1;
173f10770f5SArtem Bityutskiy }
174ab87118dSArtem Bityutskiy if (sb->type == UBIFS_INO_NODE)
175f10770f5SArtem Bityutskiy return 1;
176f10770f5SArtem Bityutskiy
1776eb61d58SRichard Weinberger ubifs_assert(c, key_type(c, &sa->key) == UBIFS_DENT_KEY ||
17866576833SArtem Bityutskiy key_type(c, &sa->key) == UBIFS_XENT_KEY);
1796eb61d58SRichard Weinberger ubifs_assert(c, key_type(c, &sb->key) == UBIFS_DENT_KEY ||
18066576833SArtem Bityutskiy key_type(c, &sb->key) == UBIFS_XENT_KEY);
1816eb61d58SRichard Weinberger ubifs_assert(c, sa->type == UBIFS_DENT_NODE ||
182ab87118dSArtem Bityutskiy sa->type == UBIFS_XENT_NODE);
1836eb61d58SRichard Weinberger ubifs_assert(c, sb->type == UBIFS_DENT_NODE ||
184ab87118dSArtem Bityutskiy sb->type == UBIFS_XENT_NODE);
18566576833SArtem Bityutskiy
186f10770f5SArtem Bityutskiy inuma = key_inum(c, &sa->key);
187f10770f5SArtem Bityutskiy inumb = key_inum(c, &sb->key);
188f10770f5SArtem Bityutskiy
189f10770f5SArtem Bityutskiy if (inuma == inumb) {
190f10770f5SArtem Bityutskiy uint32_t hasha = key_hash(c, &sa->key);
191f10770f5SArtem Bityutskiy uint32_t hashb = key_hash(c, &sb->key);
192f10770f5SArtem Bityutskiy
193f10770f5SArtem Bityutskiy if (hasha <= hashb)
194f10770f5SArtem Bityutskiy return -1;
195f10770f5SArtem Bityutskiy } else if (inuma <= inumb)
196f10770f5SArtem Bityutskiy return -1;
197f10770f5SArtem Bityutskiy
198f10770f5SArtem Bityutskiy return 1;
199f10770f5SArtem Bityutskiy }
200f10770f5SArtem Bityutskiy
201f10770f5SArtem Bityutskiy /**
202f10770f5SArtem Bityutskiy * sort_nodes - sort nodes for GC.
203f10770f5SArtem Bityutskiy * @c: UBIFS file-system description object
204f10770f5SArtem Bityutskiy * @sleb: describes nodes to sort and contains the result on exit
205f10770f5SArtem Bityutskiy * @nondata: contains non-data nodes on exit
206f10770f5SArtem Bityutskiy * @min: minimum node size is returned here
207f10770f5SArtem Bityutskiy *
208f10770f5SArtem Bityutskiy * This function sorts the list of inodes to garbage collect. First of all, it
209f10770f5SArtem Bityutskiy * kills obsolete nodes and separates data and non-data nodes to the
210f10770f5SArtem Bityutskiy * @sleb->nodes and @nondata lists correspondingly.
211f10770f5SArtem Bityutskiy *
212f10770f5SArtem Bityutskiy * Data nodes are then sorted in block number order - this is important for
213f10770f5SArtem Bityutskiy * bulk-read; data nodes with lower inode number go before data nodes with
214f10770f5SArtem Bityutskiy * higher inode number, and data nodes with lower block number go before data
215f10770f5SArtem Bityutskiy * nodes with higher block number;
216f10770f5SArtem Bityutskiy *
217f10770f5SArtem Bityutskiy * Non-data nodes are sorted as follows.
218f10770f5SArtem Bityutskiy * o First go inode nodes - they are sorted in descending length order.
219f10770f5SArtem Bityutskiy * o Then go directory entry nodes - they are sorted in hash order, which
220f10770f5SArtem Bityutskiy * should supposedly optimize 'readdir()'. Direntry nodes with lower parent
221f10770f5SArtem Bityutskiy * inode number go before direntry nodes with higher parent inode number,
222f10770f5SArtem Bityutskiy * and direntry nodes with lower name hash values go before direntry nodes
223f10770f5SArtem Bityutskiy * with higher name hash values.
224f10770f5SArtem Bityutskiy *
225f10770f5SArtem Bityutskiy * This function returns zero in case of success and a negative error code in
226f10770f5SArtem Bityutskiy * case of failure.
227f10770f5SArtem Bityutskiy */
sort_nodes(struct ubifs_info * c,struct ubifs_scan_leb * sleb,struct list_head * nondata,int * min)228f10770f5SArtem Bityutskiy static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
229f10770f5SArtem Bityutskiy struct list_head *nondata, int *min)
2301e51764aSArtem Bityutskiy {
2313bb66b47SArtem Bityutskiy int err;
2321e51764aSArtem Bityutskiy struct ubifs_scan_node *snod, *tmp;
2331e51764aSArtem Bityutskiy
234f10770f5SArtem Bityutskiy *min = INT_MAX;
2351e51764aSArtem Bityutskiy
236f10770f5SArtem Bityutskiy /* Separate data nodes and non-data nodes */
237f10770f5SArtem Bityutskiy list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
2386eb61d58SRichard Weinberger ubifs_assert(c, snod->type == UBIFS_INO_NODE ||
23944ec83b8SArtem Bityutskiy snod->type == UBIFS_DATA_NODE ||
24044ec83b8SArtem Bityutskiy snod->type == UBIFS_DENT_NODE ||
24144ec83b8SArtem Bityutskiy snod->type == UBIFS_XENT_NODE ||
2426a98bc46SSascha Hauer snod->type == UBIFS_TRUN_NODE ||
2436a98bc46SSascha Hauer snod->type == UBIFS_AUTH_NODE);
24444ec83b8SArtem Bityutskiy
24544ec83b8SArtem Bityutskiy if (snod->type != UBIFS_INO_NODE &&
24644ec83b8SArtem Bityutskiy snod->type != UBIFS_DATA_NODE &&
24744ec83b8SArtem Bityutskiy snod->type != UBIFS_DENT_NODE &&
24844ec83b8SArtem Bityutskiy snod->type != UBIFS_XENT_NODE) {
24944ec83b8SArtem Bityutskiy /* Probably truncation node, zap it */
25044ec83b8SArtem Bityutskiy list_del(&snod->list);
25144ec83b8SArtem Bityutskiy kfree(snod);
25244ec83b8SArtem Bityutskiy continue;
25344ec83b8SArtem Bityutskiy }
25444ec83b8SArtem Bityutskiy
2556eb61d58SRichard Weinberger ubifs_assert(c, key_type(c, &snod->key) == UBIFS_DATA_KEY ||
25644ec83b8SArtem Bityutskiy key_type(c, &snod->key) == UBIFS_INO_KEY ||
25744ec83b8SArtem Bityutskiy key_type(c, &snod->key) == UBIFS_DENT_KEY ||
25844ec83b8SArtem Bityutskiy key_type(c, &snod->key) == UBIFS_XENT_KEY);
2591e51764aSArtem Bityutskiy
2601e51764aSArtem Bityutskiy err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
2611e51764aSArtem Bityutskiy snod->offs, 0);
2621e51764aSArtem Bityutskiy if (err < 0)
263f10770f5SArtem Bityutskiy return err;
2641e51764aSArtem Bityutskiy
2651e51764aSArtem Bityutskiy if (!err) {
2661e51764aSArtem Bityutskiy /* The node is obsolete, remove it from the list */
267f10770f5SArtem Bityutskiy list_del(&snod->list);
2681e51764aSArtem Bityutskiy kfree(snod);
2691e51764aSArtem Bityutskiy continue;
2701e51764aSArtem Bityutskiy }
2711e51764aSArtem Bityutskiy
272f10770f5SArtem Bityutskiy if (snod->len < *min)
273f10770f5SArtem Bityutskiy *min = snod->len;
274f10770f5SArtem Bityutskiy
275f10770f5SArtem Bityutskiy if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
276f10770f5SArtem Bityutskiy list_move_tail(&snod->list, nondata);
277f10770f5SArtem Bityutskiy }
278f10770f5SArtem Bityutskiy
279f10770f5SArtem Bityutskiy /* Sort data and non-data nodes */
280f10770f5SArtem Bityutskiy list_sort(c, &sleb->nodes, &data_nodes_cmp);
281f10770f5SArtem Bityutskiy list_sort(c, nondata, &nondata_nodes_cmp);
2823bb66b47SArtem Bityutskiy
2833bb66b47SArtem Bityutskiy err = dbg_check_data_nodes_order(c, &sleb->nodes);
2843bb66b47SArtem Bityutskiy if (err)
2853bb66b47SArtem Bityutskiy return err;
2863bb66b47SArtem Bityutskiy err = dbg_check_nondata_nodes_order(c, nondata);
2873bb66b47SArtem Bityutskiy if (err)
2883bb66b47SArtem Bityutskiy return err;
289f10770f5SArtem Bityutskiy return 0;
290f10770f5SArtem Bityutskiy }
291f10770f5SArtem Bityutskiy
292f10770f5SArtem Bityutskiy /**
293f10770f5SArtem Bityutskiy * move_node - move a node.
294f10770f5SArtem Bityutskiy * @c: UBIFS file-system description object
295f10770f5SArtem Bityutskiy * @sleb: describes the LEB to move nodes from
296f10770f5SArtem Bityutskiy * @snod: the mode to move
297f10770f5SArtem Bityutskiy * @wbuf: write-buffer to move node to
298f10770f5SArtem Bityutskiy *
299f10770f5SArtem Bityutskiy * This function moves node @snod to @wbuf, changes TNC correspondingly, and
300f10770f5SArtem Bityutskiy * destroys @snod. Returns zero in case of success and a negative error code in
301f10770f5SArtem Bityutskiy * case of failure.
3021e51764aSArtem Bityutskiy */
move_node(struct ubifs_info * c,struct ubifs_scan_leb * sleb,struct ubifs_scan_node * snod,struct ubifs_wbuf * wbuf)303f10770f5SArtem Bityutskiy static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
304f10770f5SArtem Bityutskiy struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
305f10770f5SArtem Bityutskiy {
306f10770f5SArtem Bityutskiy int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
307f10770f5SArtem Bityutskiy
308f10770f5SArtem Bityutskiy cond_resched();
309f10770f5SArtem Bityutskiy err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
31046773be4SAdrian Hunter if (err)
311f10770f5SArtem Bityutskiy return err;
3121e51764aSArtem Bityutskiy
313f10770f5SArtem Bityutskiy err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
314f10770f5SArtem Bityutskiy snod->offs, new_lnum, new_offs,
315f10770f5SArtem Bityutskiy snod->len);
316f10770f5SArtem Bityutskiy list_del(&snod->list);
317f10770f5SArtem Bityutskiy kfree(snod);
318f10770f5SArtem Bityutskiy return err;
3191e51764aSArtem Bityutskiy }
3201e51764aSArtem Bityutskiy
321f10770f5SArtem Bityutskiy /**
322f10770f5SArtem Bityutskiy * move_nodes - move nodes.
323f10770f5SArtem Bityutskiy * @c: UBIFS file-system description object
324f10770f5SArtem Bityutskiy * @sleb: describes the LEB to move nodes from
325f10770f5SArtem Bityutskiy *
326f10770f5SArtem Bityutskiy * This function moves valid nodes from data LEB described by @sleb to the GC
327f10770f5SArtem Bityutskiy * journal head. This function returns zero in case of success, %-EAGAIN if
328f10770f5SArtem Bityutskiy * commit is required, and other negative error codes in case of other
329f10770f5SArtem Bityutskiy * failures.
3301e51764aSArtem Bityutskiy */
move_nodes(struct ubifs_info * c,struct ubifs_scan_leb * sleb)331f10770f5SArtem Bityutskiy static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
332f10770f5SArtem Bityutskiy {
333f10770f5SArtem Bityutskiy int err, min;
334f10770f5SArtem Bityutskiy LIST_HEAD(nondata);
335f10770f5SArtem Bityutskiy struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
3361e51764aSArtem Bityutskiy
3371e51764aSArtem Bityutskiy if (wbuf->lnum == -1) {
3381e51764aSArtem Bityutskiy /*
3391e51764aSArtem Bityutskiy * The GC journal head is not set, because it is the first GC
3401e51764aSArtem Bityutskiy * invocation since mount.
3411e51764aSArtem Bityutskiy */
3421e51764aSArtem Bityutskiy err = switch_gc_head(c);
3431e51764aSArtem Bityutskiy if (err)
344f10770f5SArtem Bityutskiy return err;
3451e51764aSArtem Bityutskiy }
3461e51764aSArtem Bityutskiy
347f10770f5SArtem Bityutskiy err = sort_nodes(c, sleb, &nondata, &min);
348f10770f5SArtem Bityutskiy if (err)
349f10770f5SArtem Bityutskiy goto out;
350f10770f5SArtem Bityutskiy
3511e51764aSArtem Bityutskiy /* Write nodes to their new location. Use the first-fit strategy */
3521e51764aSArtem Bityutskiy while (1) {
3536f06d96fSSascha Hauer int avail, moved = 0;
354f10770f5SArtem Bityutskiy struct ubifs_scan_node *snod, *tmp;
3551e51764aSArtem Bityutskiy
356f10770f5SArtem Bityutskiy /* Move data nodes */
357f10770f5SArtem Bityutskiy list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
3586f06d96fSSascha Hauer avail = c->leb_size - wbuf->offs - wbuf->used -
3596f06d96fSSascha Hauer ubifs_auth_node_sz(c);
360f10770f5SArtem Bityutskiy if (snod->len > avail)
361f10770f5SArtem Bityutskiy /*
362f10770f5SArtem Bityutskiy * Do not skip data nodes in order to optimize
363f10770f5SArtem Bityutskiy * bulk-read.
364f10770f5SArtem Bityutskiy */
365f10770f5SArtem Bityutskiy break;
366f10770f5SArtem Bityutskiy
3676f06d96fSSascha Hauer err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
3686f06d96fSSascha Hauer snod->node, snod->len);
3696f06d96fSSascha Hauer if (err)
3706f06d96fSSascha Hauer goto out;
3716f06d96fSSascha Hauer
372f10770f5SArtem Bityutskiy err = move_node(c, sleb, snod, wbuf);
373f10770f5SArtem Bityutskiy if (err)
374f10770f5SArtem Bityutskiy goto out;
3756f06d96fSSascha Hauer moved = 1;
376f10770f5SArtem Bityutskiy }
377f10770f5SArtem Bityutskiy
378f10770f5SArtem Bityutskiy /* Move non-data nodes */
379f10770f5SArtem Bityutskiy list_for_each_entry_safe(snod, tmp, &nondata, list) {
3806f06d96fSSascha Hauer avail = c->leb_size - wbuf->offs - wbuf->used -
3816f06d96fSSascha Hauer ubifs_auth_node_sz(c);
3821e51764aSArtem Bityutskiy if (avail < min)
3831e51764aSArtem Bityutskiy break;
3841e51764aSArtem Bityutskiy
385f10770f5SArtem Bityutskiy if (snod->len > avail) {
386f10770f5SArtem Bityutskiy /*
387f10770f5SArtem Bityutskiy * Keep going only if this is an inode with
388f10770f5SArtem Bityutskiy * some data. Otherwise stop and switch the GC
389f10770f5SArtem Bityutskiy * head. IOW, we assume that data-less inode
390f10770f5SArtem Bityutskiy * nodes and direntry nodes are roughly of the
391f10770f5SArtem Bityutskiy * same size.
392f10770f5SArtem Bityutskiy */
393f10770f5SArtem Bityutskiy if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
394f10770f5SArtem Bityutskiy snod->len == UBIFS_INO_NODE_SZ)
395f10770f5SArtem Bityutskiy break;
3961e51764aSArtem Bityutskiy continue;
3971e51764aSArtem Bityutskiy }
3981e51764aSArtem Bityutskiy
3996f06d96fSSascha Hauer err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
4006f06d96fSSascha Hauer snod->node, snod->len);
4016f06d96fSSascha Hauer if (err)
4026f06d96fSSascha Hauer goto out;
4036f06d96fSSascha Hauer
404f10770f5SArtem Bityutskiy err = move_node(c, sleb, snod, wbuf);
405f10770f5SArtem Bityutskiy if (err)
406f10770f5SArtem Bityutskiy goto out;
4076f06d96fSSascha Hauer moved = 1;
4086f06d96fSSascha Hauer }
4096f06d96fSSascha Hauer
4106f06d96fSSascha Hauer if (ubifs_authenticated(c) && moved) {
4116f06d96fSSascha Hauer struct ubifs_auth_node *auth;
4126f06d96fSSascha Hauer
4136f06d96fSSascha Hauer auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS);
4146f06d96fSSascha Hauer if (!auth) {
4156f06d96fSSascha Hauer err = -ENOMEM;
4166f06d96fSSascha Hauer goto out;
4176f06d96fSSascha Hauer }
4186f06d96fSSascha Hauer
4196f06d96fSSascha Hauer err = ubifs_prepare_auth_node(c, auth,
4206f06d96fSSascha Hauer c->jheads[GCHD].log_hash);
4216f06d96fSSascha Hauer if (err) {
4226f06d96fSSascha Hauer kfree(auth);
4236f06d96fSSascha Hauer goto out;
4246f06d96fSSascha Hauer }
4256f06d96fSSascha Hauer
4266f06d96fSSascha Hauer err = ubifs_wbuf_write_nolock(wbuf, auth,
4276f06d96fSSascha Hauer ubifs_auth_node_sz(c));
4286f06d96fSSascha Hauer if (err) {
4296f06d96fSSascha Hauer kfree(auth);
4306f06d96fSSascha Hauer goto out;
4316f06d96fSSascha Hauer }
4326f06d96fSSascha Hauer
4336f06d96fSSascha Hauer ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c));
434f10770f5SArtem Bityutskiy }
435f10770f5SArtem Bityutskiy
436f10770f5SArtem Bityutskiy if (list_empty(&sleb->nodes) && list_empty(&nondata))
4371e51764aSArtem Bityutskiy break;
4381e51764aSArtem Bityutskiy
4391e51764aSArtem Bityutskiy /*
4401e51764aSArtem Bityutskiy * Waste the rest of the space in the LEB and switch to the
4411e51764aSArtem Bityutskiy * next LEB.
4421e51764aSArtem Bityutskiy */
4431e51764aSArtem Bityutskiy err = switch_gc_head(c);
4441e51764aSArtem Bityutskiy if (err)
4451e51764aSArtem Bityutskiy goto out;
4461e51764aSArtem Bityutskiy }
4471e51764aSArtem Bityutskiy
4481e51764aSArtem Bityutskiy return 0;
4491e51764aSArtem Bityutskiy
4501e51764aSArtem Bityutskiy out:
451f10770f5SArtem Bityutskiy list_splice_tail(&nondata, &sleb->nodes);
4521e51764aSArtem Bityutskiy return err;
4531e51764aSArtem Bityutskiy }
4541e51764aSArtem Bityutskiy
4551e51764aSArtem Bityutskiy /**
4561e51764aSArtem Bityutskiy * gc_sync_wbufs - sync write-buffers for GC.
4571e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
4581e51764aSArtem Bityutskiy *
4591e51764aSArtem Bityutskiy * We must guarantee that obsoleting nodes are on flash. Unfortunately they may
4601e51764aSArtem Bityutskiy * be in a write-buffer instead. That is, a node could be written to a
4611e51764aSArtem Bityutskiy * write-buffer, obsoleting another node in a LEB that is GC'd. If that LEB is
4621e51764aSArtem Bityutskiy * erased before the write-buffer is sync'd and then there is an unclean
4631e51764aSArtem Bityutskiy * unmount, then an existing node is lost. To avoid this, we sync all
4641e51764aSArtem Bityutskiy * write-buffers.
4651e51764aSArtem Bityutskiy *
4661e51764aSArtem Bityutskiy * This function returns %0 on success or a negative error code on failure.
4671e51764aSArtem Bityutskiy */
gc_sync_wbufs(struct ubifs_info * c)4681e51764aSArtem Bityutskiy static int gc_sync_wbufs(struct ubifs_info *c)
4691e51764aSArtem Bityutskiy {
4701e51764aSArtem Bityutskiy int err, i;
4711e51764aSArtem Bityutskiy
4721e51764aSArtem Bityutskiy for (i = 0; i < c->jhead_cnt; i++) {
4731e51764aSArtem Bityutskiy if (i == GCHD)
4741e51764aSArtem Bityutskiy continue;
4751e51764aSArtem Bityutskiy err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
4761e51764aSArtem Bityutskiy if (err)
4771e51764aSArtem Bityutskiy return err;
4781e51764aSArtem Bityutskiy }
4791e51764aSArtem Bityutskiy return 0;
4801e51764aSArtem Bityutskiy }
4811e51764aSArtem Bityutskiy
4821e51764aSArtem Bityutskiy /**
4831e51764aSArtem Bityutskiy * ubifs_garbage_collect_leb - garbage-collect a logical eraseblock.
4841e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
4851e51764aSArtem Bityutskiy * @lp: describes the LEB to garbage collect
4861e51764aSArtem Bityutskiy *
4871e51764aSArtem Bityutskiy * This function garbage-collects an LEB and returns one of the @LEB_FREED,
4881e51764aSArtem Bityutskiy * @LEB_RETAINED, etc positive codes in case of success, %-EAGAIN if commit is
4891e51764aSArtem Bityutskiy * required, and other negative error codes in case of failures.
4901e51764aSArtem Bityutskiy */
ubifs_garbage_collect_leb(struct ubifs_info * c,struct ubifs_lprops * lp)4911e51764aSArtem Bityutskiy int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
4921e51764aSArtem Bityutskiy {
4931e51764aSArtem Bityutskiy struct ubifs_scan_leb *sleb;
4941e51764aSArtem Bityutskiy struct ubifs_scan_node *snod;
4951e51764aSArtem Bityutskiy struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
4961e51764aSArtem Bityutskiy int err = 0, lnum = lp->lnum;
4971e51764aSArtem Bityutskiy
4986eb61d58SRichard Weinberger ubifs_assert(c, c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
4991e51764aSArtem Bityutskiy c->need_recovery);
5006eb61d58SRichard Weinberger ubifs_assert(c, c->gc_lnum != lnum);
5016eb61d58SRichard Weinberger ubifs_assert(c, wbuf->lnum != lnum);
5021e51764aSArtem Bityutskiy
5032405f594SArtem Bityutskiy if (lp->free + lp->dirty == c->leb_size) {
5042405f594SArtem Bityutskiy /* Special case - a free LEB */
5052405f594SArtem Bityutskiy dbg_gc("LEB %d is free, return it", lp->lnum);
5066eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
5072405f594SArtem Bityutskiy
5082405f594SArtem Bityutskiy if (lp->free != c->leb_size) {
5092405f594SArtem Bityutskiy /*
5102405f594SArtem Bityutskiy * Write buffers must be sync'd before unmapping
5112405f594SArtem Bityutskiy * freeable LEBs, because one of them may contain data
512312c39bdSRichard Weinberger * which obsoletes something in 'lp->lnum'.
5132405f594SArtem Bityutskiy */
5142405f594SArtem Bityutskiy err = gc_sync_wbufs(c);
5152405f594SArtem Bityutskiy if (err)
5162405f594SArtem Bityutskiy return err;
5172405f594SArtem Bityutskiy err = ubifs_change_one_lp(c, lp->lnum, c->leb_size,
5182405f594SArtem Bityutskiy 0, 0, 0, 0);
5192405f594SArtem Bityutskiy if (err)
5202405f594SArtem Bityutskiy return err;
5212405f594SArtem Bityutskiy }
5222405f594SArtem Bityutskiy err = ubifs_leb_unmap(c, lp->lnum);
5232405f594SArtem Bityutskiy if (err)
5242405f594SArtem Bityutskiy return err;
5252405f594SArtem Bityutskiy
5262405f594SArtem Bityutskiy if (c->gc_lnum == -1) {
5272405f594SArtem Bityutskiy c->gc_lnum = lnum;
5282405f594SArtem Bityutskiy return LEB_RETAINED;
5292405f594SArtem Bityutskiy }
5302405f594SArtem Bityutskiy
5312405f594SArtem Bityutskiy return LEB_FREED;
5322405f594SArtem Bityutskiy }
5332405f594SArtem Bityutskiy
5341e51764aSArtem Bityutskiy /*
5351e51764aSArtem Bityutskiy * We scan the entire LEB even though we only really need to scan up to
5361e51764aSArtem Bityutskiy * (c->leb_size - lp->free).
5371e51764aSArtem Bityutskiy */
538348709baSArtem Bityutskiy sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
5391e51764aSArtem Bityutskiy if (IS_ERR(sleb))
5401e51764aSArtem Bityutskiy return PTR_ERR(sleb);
5411e51764aSArtem Bityutskiy
5426eb61d58SRichard Weinberger ubifs_assert(c, !list_empty(&sleb->nodes));
5431e51764aSArtem Bityutskiy snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
5441e51764aSArtem Bityutskiy
5451e51764aSArtem Bityutskiy if (snod->type == UBIFS_IDX_NODE) {
5461e51764aSArtem Bityutskiy struct ubifs_gced_idx_leb *idx_gc;
5471e51764aSArtem Bityutskiy
5481e51764aSArtem Bityutskiy dbg_gc("indexing LEB %d (free %d, dirty %d)",
5491e51764aSArtem Bityutskiy lnum, lp->free, lp->dirty);
5501e51764aSArtem Bityutskiy list_for_each_entry(snod, &sleb->nodes, list) {
5511e51764aSArtem Bityutskiy struct ubifs_idx_node *idx = snod->node;
5521e51764aSArtem Bityutskiy int level = le16_to_cpu(idx->level);
5531e51764aSArtem Bityutskiy
5546eb61d58SRichard Weinberger ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
5551e51764aSArtem Bityutskiy key_read(c, ubifs_idx_key(c, idx), &snod->key);
5561e51764aSArtem Bityutskiy err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
5571e51764aSArtem Bityutskiy snod->offs);
5581e51764aSArtem Bityutskiy if (err)
5591e51764aSArtem Bityutskiy goto out;
5601e51764aSArtem Bityutskiy }
5611e51764aSArtem Bityutskiy
5621e51764aSArtem Bityutskiy idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
5631e51764aSArtem Bityutskiy if (!idx_gc) {
5641e51764aSArtem Bityutskiy err = -ENOMEM;
5651e51764aSArtem Bityutskiy goto out;
5661e51764aSArtem Bityutskiy }
5671e51764aSArtem Bityutskiy
5681e51764aSArtem Bityutskiy idx_gc->lnum = lnum;
5691e51764aSArtem Bityutskiy idx_gc->unmap = 0;
5701e51764aSArtem Bityutskiy list_add(&idx_gc->list, &c->idx_gc);
5711e51764aSArtem Bityutskiy
5721e51764aSArtem Bityutskiy /*
5731e51764aSArtem Bityutskiy * Don't release the LEB until after the next commit, because
574227c75c9SAdrian Hunter * it may contain data which is needed for recovery. So
5751e51764aSArtem Bityutskiy * although we freed this LEB, it will become usable only after
5761e51764aSArtem Bityutskiy * the commit.
5771e51764aSArtem Bityutskiy */
5781e51764aSArtem Bityutskiy err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
5791e51764aSArtem Bityutskiy LPROPS_INDEX, 1);
5801e51764aSArtem Bityutskiy if (err)
5811e51764aSArtem Bityutskiy goto out;
5821e51764aSArtem Bityutskiy err = LEB_FREED_IDX;
5831e51764aSArtem Bityutskiy } else {
5841e51764aSArtem Bityutskiy dbg_gc("data LEB %d (free %d, dirty %d)",
5851e51764aSArtem Bityutskiy lnum, lp->free, lp->dirty);
5861e51764aSArtem Bityutskiy
5871e51764aSArtem Bityutskiy err = move_nodes(c, sleb);
5881e51764aSArtem Bityutskiy if (err)
5896dcfac4fSAdrian Hunter goto out_inc_seq;
5901e51764aSArtem Bityutskiy
5911e51764aSArtem Bityutskiy err = gc_sync_wbufs(c);
5921e51764aSArtem Bityutskiy if (err)
5936dcfac4fSAdrian Hunter goto out_inc_seq;
5941e51764aSArtem Bityutskiy
5951e51764aSArtem Bityutskiy err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
5961e51764aSArtem Bityutskiy if (err)
5976dcfac4fSAdrian Hunter goto out_inc_seq;
5981e51764aSArtem Bityutskiy
599601c0bc4SAdrian Hunter /* Allow for races with TNC */
600601c0bc4SAdrian Hunter c->gced_lnum = lnum;
601601c0bc4SAdrian Hunter smp_wmb();
602601c0bc4SAdrian Hunter c->gc_seq += 1;
603601c0bc4SAdrian Hunter smp_wmb();
604601c0bc4SAdrian Hunter
6051e51764aSArtem Bityutskiy if (c->gc_lnum == -1) {
6061e51764aSArtem Bityutskiy c->gc_lnum = lnum;
6071e51764aSArtem Bityutskiy err = LEB_RETAINED;
6081e51764aSArtem Bityutskiy } else {
6091e51764aSArtem Bityutskiy err = ubifs_wbuf_sync_nolock(wbuf);
6101e51764aSArtem Bityutskiy if (err)
6111e51764aSArtem Bityutskiy goto out;
6121e51764aSArtem Bityutskiy
6131e51764aSArtem Bityutskiy err = ubifs_leb_unmap(c, lnum);
6141e51764aSArtem Bityutskiy if (err)
6151e51764aSArtem Bityutskiy goto out;
6161e51764aSArtem Bityutskiy
6171e51764aSArtem Bityutskiy err = LEB_FREED;
6181e51764aSArtem Bityutskiy }
6191e51764aSArtem Bityutskiy }
6201e51764aSArtem Bityutskiy
6211e51764aSArtem Bityutskiy out:
6221e51764aSArtem Bityutskiy ubifs_scan_destroy(sleb);
6231e51764aSArtem Bityutskiy return err;
6246dcfac4fSAdrian Hunter
6256dcfac4fSAdrian Hunter out_inc_seq:
6266dcfac4fSAdrian Hunter /* We may have moved at least some nodes so allow for races with TNC */
6276dcfac4fSAdrian Hunter c->gced_lnum = lnum;
6286dcfac4fSAdrian Hunter smp_wmb();
6296dcfac4fSAdrian Hunter c->gc_seq += 1;
6306dcfac4fSAdrian Hunter smp_wmb();
6316dcfac4fSAdrian Hunter goto out;
6321e51764aSArtem Bityutskiy }
6331e51764aSArtem Bityutskiy
6341e51764aSArtem Bityutskiy /**
6351e51764aSArtem Bityutskiy * ubifs_garbage_collect - UBIFS garbage collector.
6361e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
6371e51764aSArtem Bityutskiy * @anyway: do GC even if there are free LEBs
6381e51764aSArtem Bityutskiy *
6391e51764aSArtem Bityutskiy * This function does out-of-place garbage collection. The return codes are:
6401e51764aSArtem Bityutskiy * o positive LEB number if the LEB has been freed and may be used;
6411e51764aSArtem Bityutskiy * o %-EAGAIN if the caller has to run commit;
6421e51764aSArtem Bityutskiy * o %-ENOSPC if GC failed to make any progress;
6431e51764aSArtem Bityutskiy * o other negative error codes in case of other errors.
6441e51764aSArtem Bityutskiy *
6451e51764aSArtem Bityutskiy * Garbage collector writes data to the journal when GC'ing data LEBs, and just
6461e51764aSArtem Bityutskiy * marking indexing nodes dirty when GC'ing indexing LEBs. Thus, at some point
6471e51764aSArtem Bityutskiy * commit may be required. But commit cannot be run from inside GC, because the
6481e51764aSArtem Bityutskiy * caller might be holding the commit lock, so %-EAGAIN is returned instead;
6491e51764aSArtem Bityutskiy * And this error code means that the caller has to run commit, and re-run GC
6501e51764aSArtem Bityutskiy * if there is still no free space.
6511e51764aSArtem Bityutskiy *
6521e51764aSArtem Bityutskiy * There are many reasons why this function may return %-EAGAIN:
6531e51764aSArtem Bityutskiy * o the log is full and there is no space to write an LEB reference for
6541e51764aSArtem Bityutskiy * @c->gc_lnum;
6551e51764aSArtem Bityutskiy * o the journal is too large and exceeds size limitations;
6561e51764aSArtem Bityutskiy * o GC moved indexing LEBs, but they can be used only after the commit;
6571e51764aSArtem Bityutskiy * o the shrinker fails to find clean znodes to free and requests the commit;
6581e51764aSArtem Bityutskiy * o etc.
6591e51764aSArtem Bityutskiy *
6601e51764aSArtem Bityutskiy * Note, if the file-system is close to be full, this function may return
6611e51764aSArtem Bityutskiy * %-EAGAIN infinitely, so the caller has to limit amount of re-invocations of
6621e51764aSArtem Bityutskiy * the function. E.g., this happens if the limits on the journal size are too
6631e51764aSArtem Bityutskiy * tough and GC writes too much to the journal before an LEB is freed. This
6641e51764aSArtem Bityutskiy * might also mean that the journal is too large, and the TNC becomes to big,
6651e51764aSArtem Bityutskiy * so that the shrinker is constantly called, finds not clean znodes to free,
6661e51764aSArtem Bityutskiy * and requests commit. Well, this may also happen if the journal is all right,
6671e51764aSArtem Bityutskiy * but another kernel process consumes too much memory. Anyway, infinite
6681e51764aSArtem Bityutskiy * %-EAGAIN may happen, but in some extreme/misconfiguration cases.
6691e51764aSArtem Bityutskiy */
ubifs_garbage_collect(struct ubifs_info * c,int anyway)6701e51764aSArtem Bityutskiy int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
6711e51764aSArtem Bityutskiy {
6721e51764aSArtem Bityutskiy int i, err, ret, min_space = c->dead_wm;
6731e51764aSArtem Bityutskiy struct ubifs_lprops lp;
6741e51764aSArtem Bityutskiy struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
6751e51764aSArtem Bityutskiy
6761e51764aSArtem Bityutskiy ubifs_assert_cmt_locked(c);
6776eb61d58SRichard Weinberger ubifs_assert(c, !c->ro_media && !c->ro_mount);
6781e51764aSArtem Bityutskiy
6791e51764aSArtem Bityutskiy if (ubifs_gc_should_commit(c))
6801e51764aSArtem Bityutskiy return -EAGAIN;
6811e51764aSArtem Bityutskiy
6821e51764aSArtem Bityutskiy mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
6831e51764aSArtem Bityutskiy
6842680d722SArtem Bityutskiy if (c->ro_error) {
6851e51764aSArtem Bityutskiy ret = -EROFS;
6861e51764aSArtem Bityutskiy goto out_unlock;
6871e51764aSArtem Bityutskiy }
6881e51764aSArtem Bityutskiy
6891e51764aSArtem Bityutskiy /* We expect the write-buffer to be empty on entry */
6906eb61d58SRichard Weinberger ubifs_assert(c, !wbuf->used);
6911e51764aSArtem Bityutskiy
6921e51764aSArtem Bityutskiy for (i = 0; ; i++) {
693e71d1a59Swang.bo116@zte.com.cn int space_before, space_after;
6941e51764aSArtem Bityutskiy
69588618feeSBaokun Li /* Maybe continue after find and break before find */
69688618feeSBaokun Li lp.lnum = -1;
69788618feeSBaokun Li
6981e51764aSArtem Bityutskiy cond_resched();
6991e51764aSArtem Bityutskiy
7001e51764aSArtem Bityutskiy /* Give the commit an opportunity to run */
7011e51764aSArtem Bityutskiy if (ubifs_gc_should_commit(c)) {
7021e51764aSArtem Bityutskiy ret = -EAGAIN;
7031e51764aSArtem Bityutskiy break;
7041e51764aSArtem Bityutskiy }
7051e51764aSArtem Bityutskiy
7061e51764aSArtem Bityutskiy if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
7071e51764aSArtem Bityutskiy /*
7081e51764aSArtem Bityutskiy * We've done enough iterations. Indexing LEBs were
7091e51764aSArtem Bityutskiy * moved and will be available after the commit.
7101e51764aSArtem Bityutskiy */
7111e51764aSArtem Bityutskiy dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
7121e51764aSArtem Bityutskiy ubifs_commit_required(c);
7131e51764aSArtem Bityutskiy ret = -EAGAIN;
7141e51764aSArtem Bityutskiy break;
7151e51764aSArtem Bityutskiy }
7161e51764aSArtem Bityutskiy
7171e51764aSArtem Bityutskiy if (i > HARD_LEBS_LIMIT) {
7181e51764aSArtem Bityutskiy /*
7191e51764aSArtem Bityutskiy * We've moved too many LEBs and have not made
7201e51764aSArtem Bityutskiy * progress, give up.
7211e51764aSArtem Bityutskiy */
7221e51764aSArtem Bityutskiy dbg_gc("hard limit, -ENOSPC");
7231e51764aSArtem Bityutskiy ret = -ENOSPC;
7241e51764aSArtem Bityutskiy break;
7251e51764aSArtem Bityutskiy }
7261e51764aSArtem Bityutskiy
7271e51764aSArtem Bityutskiy /*
7281e51764aSArtem Bityutskiy * Empty and freeable LEBs can turn up while we waited for
7291e51764aSArtem Bityutskiy * the wbuf lock, or while we have been running GC. In that
7301e51764aSArtem Bityutskiy * case, we should just return one of those instead of
7311e51764aSArtem Bityutskiy * continuing to GC dirty LEBs. Hence we request
7321e51764aSArtem Bityutskiy * 'ubifs_find_dirty_leb()' to return an empty LEB if it can.
7331e51764aSArtem Bityutskiy */
7341e51764aSArtem Bityutskiy ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
7351e51764aSArtem Bityutskiy if (ret) {
7361e51764aSArtem Bityutskiy if (ret == -ENOSPC)
7371e51764aSArtem Bityutskiy dbg_gc("no more dirty LEBs");
7381e51764aSArtem Bityutskiy break;
7391e51764aSArtem Bityutskiy }
7401e51764aSArtem Bityutskiy
74179fda517SArtem Bityutskiy dbg_gc("found LEB %d: free %d, dirty %d, sum %d (min. space %d)",
74279fda517SArtem Bityutskiy lp.lnum, lp.free, lp.dirty, lp.free + lp.dirty,
74379fda517SArtem Bityutskiy min_space);
7441e51764aSArtem Bityutskiy
7451e51764aSArtem Bityutskiy space_before = c->leb_size - wbuf->offs - wbuf->used;
7461e51764aSArtem Bityutskiy if (wbuf->lnum == -1)
7471e51764aSArtem Bityutskiy space_before = 0;
7481e51764aSArtem Bityutskiy
7491e51764aSArtem Bityutskiy ret = ubifs_garbage_collect_leb(c, &lp);
7501e51764aSArtem Bityutskiy if (ret < 0) {
751efe1881fSArtem Bityutskiy if (ret == -EAGAIN) {
7521e51764aSArtem Bityutskiy /*
753efe1881fSArtem Bityutskiy * This is not error, so we have to return the
754efe1881fSArtem Bityutskiy * LEB to lprops. But if 'ubifs_return_leb()'
755efe1881fSArtem Bityutskiy * fails, its failure code is propagated to the
756efe1881fSArtem Bityutskiy * caller instead of the original '-EAGAIN'.
7571e51764aSArtem Bityutskiy */
7581e51764aSArtem Bityutskiy err = ubifs_return_leb(c, lp.lnum);
759*50cb4373SBaokun Li if (err) {
7601e51764aSArtem Bityutskiy ret = err;
761*50cb4373SBaokun Li /*
762*50cb4373SBaokun Li * An LEB may always be "taken",
763*50cb4373SBaokun Li * so setting ubifs to read-only,
764*50cb4373SBaokun Li * and then executing sync wbuf will
765*50cb4373SBaokun Li * return -EROFS and enter the "out"
766*50cb4373SBaokun Li * error branch.
767*50cb4373SBaokun Li */
768*50cb4373SBaokun Li ubifs_ro_mode(c, ret);
769*50cb4373SBaokun Li }
7700d765021SBaokun Li /* Maybe double return LEB if goto out */
7710d765021SBaokun Li lp.lnum = -1;
7721e51764aSArtem Bityutskiy break;
7731e51764aSArtem Bityutskiy }
7741e51764aSArtem Bityutskiy goto out;
7751e51764aSArtem Bityutskiy }
7761e51764aSArtem Bityutskiy
7771e51764aSArtem Bityutskiy if (ret == LEB_FREED) {
7781e51764aSArtem Bityutskiy /* An LEB has been freed and is ready for use */
7791e51764aSArtem Bityutskiy dbg_gc("LEB %d freed, return", lp.lnum);
7801e51764aSArtem Bityutskiy ret = lp.lnum;
7811e51764aSArtem Bityutskiy break;
7821e51764aSArtem Bityutskiy }
7831e51764aSArtem Bityutskiy
7841e51764aSArtem Bityutskiy if (ret == LEB_FREED_IDX) {
7851e51764aSArtem Bityutskiy /*
7861e51764aSArtem Bityutskiy * This was an indexing LEB and it cannot be
7871e51764aSArtem Bityutskiy * immediately used. And instead of requesting the
7881e51764aSArtem Bityutskiy * commit straight away, we try to garbage collect some
7891e51764aSArtem Bityutskiy * more.
7901e51764aSArtem Bityutskiy */
7911e51764aSArtem Bityutskiy dbg_gc("indexing LEB %d freed, continue", lp.lnum);
7921e51764aSArtem Bityutskiy continue;
7931e51764aSArtem Bityutskiy }
7941e51764aSArtem Bityutskiy
7956eb61d58SRichard Weinberger ubifs_assert(c, ret == LEB_RETAINED);
7961e51764aSArtem Bityutskiy space_after = c->leb_size - wbuf->offs - wbuf->used;
7971e51764aSArtem Bityutskiy dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
7981e51764aSArtem Bityutskiy space_after - space_before);
7991e51764aSArtem Bityutskiy
8001e51764aSArtem Bityutskiy if (space_after > space_before) {
8011e51764aSArtem Bityutskiy /* GC makes progress, keep working */
8021e51764aSArtem Bityutskiy min_space >>= 1;
8031e51764aSArtem Bityutskiy if (min_space < c->dead_wm)
8041e51764aSArtem Bityutskiy min_space = c->dead_wm;
8051e51764aSArtem Bityutskiy continue;
8061e51764aSArtem Bityutskiy }
8071e51764aSArtem Bityutskiy
8081e51764aSArtem Bityutskiy dbg_gc("did not make progress");
8091e51764aSArtem Bityutskiy
8101e51764aSArtem Bityutskiy /*
8111e51764aSArtem Bityutskiy * GC moved an LEB bud have not done any progress. This means
8121e51764aSArtem Bityutskiy * that the previous GC head LEB contained too few free space
8131e51764aSArtem Bityutskiy * and the LEB which was GC'ed contained only large nodes which
8141e51764aSArtem Bityutskiy * did not fit that space.
8151e51764aSArtem Bityutskiy *
8161e51764aSArtem Bityutskiy * We can do 2 things:
8171e51764aSArtem Bityutskiy * 1. pick another LEB in a hope it'll contain a small node
8181e51764aSArtem Bityutskiy * which will fit the space we have at the end of current GC
8191e51764aSArtem Bityutskiy * head LEB, but there is no guarantee, so we try this out
8201e51764aSArtem Bityutskiy * unless we have already been working for too long;
8211e51764aSArtem Bityutskiy * 2. request an LEB with more dirty space, which will force
8221e51764aSArtem Bityutskiy * 'ubifs_find_dirty_leb()' to start scanning the lprops
8231e51764aSArtem Bityutskiy * table, instead of just picking one from the heap
8241e51764aSArtem Bityutskiy * (previously it already picked the dirtiest LEB).
8251e51764aSArtem Bityutskiy */
8261e51764aSArtem Bityutskiy if (i < SOFT_LEBS_LIMIT) {
8271e51764aSArtem Bityutskiy dbg_gc("try again");
8281e51764aSArtem Bityutskiy continue;
8291e51764aSArtem Bityutskiy }
8301e51764aSArtem Bityutskiy
8311e51764aSArtem Bityutskiy min_space <<= 1;
8321e51764aSArtem Bityutskiy if (min_space > c->dark_wm)
8331e51764aSArtem Bityutskiy min_space = c->dark_wm;
8341e51764aSArtem Bityutskiy dbg_gc("set min. space to %d", min_space);
8351e51764aSArtem Bityutskiy }
8361e51764aSArtem Bityutskiy
8371e51764aSArtem Bityutskiy if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
8381e51764aSArtem Bityutskiy dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
8391e51764aSArtem Bityutskiy ubifs_commit_required(c);
8401e51764aSArtem Bityutskiy ret = -EAGAIN;
8411e51764aSArtem Bityutskiy }
8421e51764aSArtem Bityutskiy
8431e51764aSArtem Bityutskiy err = ubifs_wbuf_sync_nolock(wbuf);
8441e51764aSArtem Bityutskiy if (!err)
8451e51764aSArtem Bityutskiy err = ubifs_leb_unmap(c, c->gc_lnum);
8461e51764aSArtem Bityutskiy if (err) {
8471e51764aSArtem Bityutskiy ret = err;
8481e51764aSArtem Bityutskiy goto out;
8491e51764aSArtem Bityutskiy }
8501e51764aSArtem Bityutskiy out_unlock:
8511e51764aSArtem Bityutskiy mutex_unlock(&wbuf->io_mutex);
8521e51764aSArtem Bityutskiy return ret;
8531e51764aSArtem Bityutskiy
8541e51764aSArtem Bityutskiy out:
8556eb61d58SRichard Weinberger ubifs_assert(c, ret < 0);
8566eb61d58SRichard Weinberger ubifs_assert(c, ret != -ENOSPC && ret != -EAGAIN);
8571e51764aSArtem Bityutskiy ubifs_wbuf_sync_nolock(wbuf);
8585ffef88fSArtem Bityutskiy ubifs_ro_mode(c, ret);
8591e51764aSArtem Bityutskiy mutex_unlock(&wbuf->io_mutex);
86088618feeSBaokun Li if (lp.lnum != -1)
8611e51764aSArtem Bityutskiy ubifs_return_leb(c, lp.lnum);
8621e51764aSArtem Bityutskiy return ret;
8631e51764aSArtem Bityutskiy }
8641e51764aSArtem Bityutskiy
8651e51764aSArtem Bityutskiy /**
8661e51764aSArtem Bityutskiy * ubifs_gc_start_commit - garbage collection at start of commit.
8671e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
8681e51764aSArtem Bityutskiy *
8691e51764aSArtem Bityutskiy * If a LEB has only dirty and free space, then we may safely unmap it and make
8701e51764aSArtem Bityutskiy * it free. Note, we cannot do this with indexing LEBs because dirty space may
8711e51764aSArtem Bityutskiy * correspond index nodes that are required for recovery. In that case, the
8721e51764aSArtem Bityutskiy * LEB cannot be unmapped until after the next commit.
8731e51764aSArtem Bityutskiy *
8741e51764aSArtem Bityutskiy * This function returns %0 upon success and a negative error code upon failure.
8751e51764aSArtem Bityutskiy */
ubifs_gc_start_commit(struct ubifs_info * c)8761e51764aSArtem Bityutskiy int ubifs_gc_start_commit(struct ubifs_info *c)
8771e51764aSArtem Bityutskiy {
8781e51764aSArtem Bityutskiy struct ubifs_gced_idx_leb *idx_gc;
8791e51764aSArtem Bityutskiy const struct ubifs_lprops *lp;
8801e51764aSArtem Bityutskiy int err = 0, flags;
8811e51764aSArtem Bityutskiy
8821e51764aSArtem Bityutskiy ubifs_get_lprops(c);
8831e51764aSArtem Bityutskiy
8841e51764aSArtem Bityutskiy /*
8851e51764aSArtem Bityutskiy * Unmap (non-index) freeable LEBs. Note that recovery requires that all
8861e51764aSArtem Bityutskiy * wbufs are sync'd before this, which is done in 'do_commit()'.
8871e51764aSArtem Bityutskiy */
8881e51764aSArtem Bityutskiy while (1) {
8891e51764aSArtem Bityutskiy lp = ubifs_fast_find_freeable(c);
8901e51764aSArtem Bityutskiy if (!lp)
8911e51764aSArtem Bityutskiy break;
8926eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
8936eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
8941e51764aSArtem Bityutskiy err = ubifs_leb_unmap(c, lp->lnum);
8951e51764aSArtem Bityutskiy if (err)
8961e51764aSArtem Bityutskiy goto out;
8971e51764aSArtem Bityutskiy lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
8988d47aef4SHirofumi Nakagawa if (IS_ERR(lp)) {
8991e51764aSArtem Bityutskiy err = PTR_ERR(lp);
9001e51764aSArtem Bityutskiy goto out;
9011e51764aSArtem Bityutskiy }
9026eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
9036eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
9041e51764aSArtem Bityutskiy }
9051e51764aSArtem Bityutskiy
9061e51764aSArtem Bityutskiy /* Mark GC'd index LEBs OK to unmap after this commit finishes */
9071e51764aSArtem Bityutskiy list_for_each_entry(idx_gc, &c->idx_gc, list)
9081e51764aSArtem Bityutskiy idx_gc->unmap = 1;
9091e51764aSArtem Bityutskiy
9101e51764aSArtem Bityutskiy /* Record index freeable LEBs for unmapping after commit */
9111e51764aSArtem Bityutskiy while (1) {
9121e51764aSArtem Bityutskiy lp = ubifs_fast_find_frdi_idx(c);
9138d47aef4SHirofumi Nakagawa if (IS_ERR(lp)) {
9141e51764aSArtem Bityutskiy err = PTR_ERR(lp);
9151e51764aSArtem Bityutskiy goto out;
9161e51764aSArtem Bityutskiy }
9171e51764aSArtem Bityutskiy if (!lp)
9181e51764aSArtem Bityutskiy break;
9191e51764aSArtem Bityutskiy idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
9201e51764aSArtem Bityutskiy if (!idx_gc) {
9211e51764aSArtem Bityutskiy err = -ENOMEM;
9221e51764aSArtem Bityutskiy goto out;
9231e51764aSArtem Bityutskiy }
9246eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_TAKEN));
9256eb61d58SRichard Weinberger ubifs_assert(c, lp->flags & LPROPS_INDEX);
9261e51764aSArtem Bityutskiy /* Don't release the LEB until after the next commit */
9271e51764aSArtem Bityutskiy flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
9281e51764aSArtem Bityutskiy lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
9298d47aef4SHirofumi Nakagawa if (IS_ERR(lp)) {
9301e51764aSArtem Bityutskiy err = PTR_ERR(lp);
9311e51764aSArtem Bityutskiy kfree(idx_gc);
9321e51764aSArtem Bityutskiy goto out;
9331e51764aSArtem Bityutskiy }
9346eb61d58SRichard Weinberger ubifs_assert(c, lp->flags & LPROPS_TAKEN);
9356eb61d58SRichard Weinberger ubifs_assert(c, !(lp->flags & LPROPS_INDEX));
9361e51764aSArtem Bityutskiy idx_gc->lnum = lp->lnum;
9371e51764aSArtem Bityutskiy idx_gc->unmap = 1;
9381e51764aSArtem Bityutskiy list_add(&idx_gc->list, &c->idx_gc);
9391e51764aSArtem Bityutskiy }
9401e51764aSArtem Bityutskiy out:
9411e51764aSArtem Bityutskiy ubifs_release_lprops(c);
9421e51764aSArtem Bityutskiy return err;
9431e51764aSArtem Bityutskiy }
9441e51764aSArtem Bityutskiy
9451e51764aSArtem Bityutskiy /**
9461e51764aSArtem Bityutskiy * ubifs_gc_end_commit - garbage collection at end of commit.
9471e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
9481e51764aSArtem Bityutskiy *
9491e51764aSArtem Bityutskiy * This function completes out-of-place garbage collection of index LEBs.
9501e51764aSArtem Bityutskiy */
ubifs_gc_end_commit(struct ubifs_info * c)9511e51764aSArtem Bityutskiy int ubifs_gc_end_commit(struct ubifs_info *c)
9521e51764aSArtem Bityutskiy {
9531e51764aSArtem Bityutskiy struct ubifs_gced_idx_leb *idx_gc, *tmp;
9541e51764aSArtem Bityutskiy struct ubifs_wbuf *wbuf;
9551e51764aSArtem Bityutskiy int err = 0;
9561e51764aSArtem Bityutskiy
9571e51764aSArtem Bityutskiy wbuf = &c->jheads[GCHD].wbuf;
9581e51764aSArtem Bityutskiy mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
9591e51764aSArtem Bityutskiy list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
9601e51764aSArtem Bityutskiy if (idx_gc->unmap) {
9611e51764aSArtem Bityutskiy dbg_gc("LEB %d", idx_gc->lnum);
9621e51764aSArtem Bityutskiy err = ubifs_leb_unmap(c, idx_gc->lnum);
9631e51764aSArtem Bityutskiy if (err)
9641e51764aSArtem Bityutskiy goto out;
9651e51764aSArtem Bityutskiy err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
9661e51764aSArtem Bityutskiy LPROPS_NC, 0, LPROPS_TAKEN, -1);
9671e51764aSArtem Bityutskiy if (err)
9681e51764aSArtem Bityutskiy goto out;
9691e51764aSArtem Bityutskiy list_del(&idx_gc->list);
9701e51764aSArtem Bityutskiy kfree(idx_gc);
9711e51764aSArtem Bityutskiy }
9721e51764aSArtem Bityutskiy out:
9731e51764aSArtem Bityutskiy mutex_unlock(&wbuf->io_mutex);
9741e51764aSArtem Bityutskiy return err;
9751e51764aSArtem Bityutskiy }
9761e51764aSArtem Bityutskiy
9771e51764aSArtem Bityutskiy /**
9781e51764aSArtem Bityutskiy * ubifs_destroy_idx_gc - destroy idx_gc list.
9791e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
9801e51764aSArtem Bityutskiy *
981b466f17dSAdrian Hunter * This function destroys the @c->idx_gc list. It is called when unmounting
982b466f17dSAdrian Hunter * so locks are not needed. Returns zero in case of success and a negative
983b466f17dSAdrian Hunter * error code in case of failure.
9841e51764aSArtem Bityutskiy */
ubifs_destroy_idx_gc(struct ubifs_info * c)985b466f17dSAdrian Hunter void ubifs_destroy_idx_gc(struct ubifs_info *c)
9861e51764aSArtem Bityutskiy {
9871e51764aSArtem Bityutskiy while (!list_empty(&c->idx_gc)) {
9881e51764aSArtem Bityutskiy struct ubifs_gced_idx_leb *idx_gc;
9891e51764aSArtem Bityutskiy
9901e51764aSArtem Bityutskiy idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
9911e51764aSArtem Bityutskiy list);
992b466f17dSAdrian Hunter c->idx_gc_cnt -= 1;
9931e51764aSArtem Bityutskiy list_del(&idx_gc->list);
9941e51764aSArtem Bityutskiy kfree(idx_gc);
9951e51764aSArtem Bityutskiy }
9961e51764aSArtem Bityutskiy }
9971e51764aSArtem Bityutskiy
9981e51764aSArtem Bityutskiy /**
9991e51764aSArtem Bityutskiy * ubifs_get_idx_gc_leb - get a LEB from GC'd index LEB list.
10001e51764aSArtem Bityutskiy * @c: UBIFS file-system description object
10011e51764aSArtem Bityutskiy *
10021e51764aSArtem Bityutskiy * Called during start commit so locks are not needed.
10031e51764aSArtem Bityutskiy */
ubifs_get_idx_gc_leb(struct ubifs_info * c)10041e51764aSArtem Bityutskiy int ubifs_get_idx_gc_leb(struct ubifs_info *c)
10051e51764aSArtem Bityutskiy {
10061e51764aSArtem Bityutskiy struct ubifs_gced_idx_leb *idx_gc;
10071e51764aSArtem Bityutskiy int lnum;
10081e51764aSArtem Bityutskiy
10091e51764aSArtem Bityutskiy if (list_empty(&c->idx_gc))
10101e51764aSArtem Bityutskiy return -ENOSPC;
10111e51764aSArtem Bityutskiy idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
10121e51764aSArtem Bityutskiy lnum = idx_gc->lnum;
10131e51764aSArtem Bityutskiy /* c->idx_gc_cnt is updated by the caller when lprops are updated */
10141e51764aSArtem Bityutskiy list_del(&idx_gc->list);
10151e51764aSArtem Bityutskiy kfree(idx_gc);
10161e51764aSArtem Bityutskiy return lnum;
10171e51764aSArtem Bityutskiy }
1018