1ae98043fSRyusuke Konishi // SPDX-License-Identifier: GPL-2.0+
26c98cd4eSKoji Sato /*
394ee1d91SRyusuke Konishi * NILFS segment usage file.
46c98cd4eSKoji Sato *
56c98cd4eSKoji Sato * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
66c98cd4eSKoji Sato *
74b420ab4SRyusuke Konishi * Written by Koji Sato.
84b420ab4SRyusuke Konishi * Revised by Ryusuke Konishi.
96c98cd4eSKoji Sato */
106c98cd4eSKoji Sato
116c98cd4eSKoji Sato #include <linux/kernel.h>
126c98cd4eSKoji Sato #include <linux/fs.h>
136c98cd4eSKoji Sato #include <linux/string.h>
146c98cd4eSKoji Sato #include <linux/buffer_head.h>
156c98cd4eSKoji Sato #include <linux/errno.h>
166c98cd4eSKoji Sato #include "mdt.h"
176c98cd4eSKoji Sato #include "sufile.h"
186c98cd4eSKoji Sato
1983eec5e6SHitoshi Mitake #include <trace/events/nilfs2.h>
2083eec5e6SHitoshi Mitake
21f5974c8fSVyacheslav Dubeyko /**
22f5974c8fSVyacheslav Dubeyko * struct nilfs_sufile_info - on-memory private data of sufile
23f5974c8fSVyacheslav Dubeyko * @mi: on-memory private data of metadata file
24f5974c8fSVyacheslav Dubeyko * @ncleansegs: number of clean segments
25f5974c8fSVyacheslav Dubeyko * @allocmin: lower limit of allocatable segment range
26f5974c8fSVyacheslav Dubeyko * @allocmax: upper limit of allocatable segment range
27f5974c8fSVyacheslav Dubeyko */
28aa474a22SRyusuke Konishi struct nilfs_sufile_info {
29aa474a22SRyusuke Konishi struct nilfs_mdt_info mi;
30619205daSRyusuke Konishi unsigned long ncleansegs;/* number of clean segments */
31619205daSRyusuke Konishi __u64 allocmin; /* lower limit of allocatable segment range */
32619205daSRyusuke Konishi __u64 allocmax; /* upper limit of allocatable segment range */
33aa474a22SRyusuke Konishi };
34aa474a22SRyusuke Konishi
NILFS_SUI(struct inode * sufile)35aa474a22SRyusuke Konishi static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
36aa474a22SRyusuke Konishi {
37aa474a22SRyusuke Konishi return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
38aa474a22SRyusuke Konishi }
39aa474a22SRyusuke Konishi
406c98cd4eSKoji Sato static inline unsigned long
nilfs_sufile_segment_usages_per_block(const struct inode * sufile)416c98cd4eSKoji Sato nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
426c98cd4eSKoji Sato {
436c98cd4eSKoji Sato return NILFS_MDT(sufile)->mi_entries_per_block;
446c98cd4eSKoji Sato }
456c98cd4eSKoji Sato
466c98cd4eSKoji Sato static unsigned long
nilfs_sufile_get_blkoff(const struct inode * sufile,__u64 segnum)476c98cd4eSKoji Sato nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
486c98cd4eSKoji Sato {
496c98cd4eSKoji Sato __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
504ad364caSRyusuke Konishi
516c98cd4eSKoji Sato do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
526c98cd4eSKoji Sato return (unsigned long)t;
536c98cd4eSKoji Sato }
546c98cd4eSKoji Sato
556c98cd4eSKoji Sato static unsigned long
nilfs_sufile_get_offset(const struct inode * sufile,__u64 segnum)566c98cd4eSKoji Sato nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
576c98cd4eSKoji Sato {
586c98cd4eSKoji Sato __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
594ad364caSRyusuke Konishi
606c98cd4eSKoji Sato return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
616c98cd4eSKoji Sato }
626c98cd4eSKoji Sato
636c98cd4eSKoji Sato static unsigned long
nilfs_sufile_segment_usages_in_block(const struct inode * sufile,__u64 curr,__u64 max)646c98cd4eSKoji Sato nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
656c98cd4eSKoji Sato __u64 max)
666c98cd4eSKoji Sato {
676c98cd4eSKoji Sato return min_t(unsigned long,
686c98cd4eSKoji Sato nilfs_sufile_segment_usages_per_block(sufile) -
696c98cd4eSKoji Sato nilfs_sufile_get_offset(sufile, curr),
706c98cd4eSKoji Sato max - curr + 1);
716c98cd4eSKoji Sato }
726c98cd4eSKoji Sato
736c98cd4eSKoji Sato static struct nilfs_segment_usage *
nilfs_sufile_block_get_segment_usage(const struct inode * sufile,__u64 segnum,struct buffer_head * bh,void * kaddr)746c98cd4eSKoji Sato nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
756c98cd4eSKoji Sato struct buffer_head *bh, void *kaddr)
766c98cd4eSKoji Sato {
776c98cd4eSKoji Sato return kaddr + bh_offset(bh) +
786c98cd4eSKoji Sato nilfs_sufile_get_offset(sufile, segnum) *
796c98cd4eSKoji Sato NILFS_MDT(sufile)->mi_entry_size;
806c98cd4eSKoji Sato }
816c98cd4eSKoji Sato
nilfs_sufile_get_header_block(struct inode * sufile,struct buffer_head ** bhp)826c98cd4eSKoji Sato static inline int nilfs_sufile_get_header_block(struct inode *sufile,
836c98cd4eSKoji Sato struct buffer_head **bhp)
846c98cd4eSKoji Sato {
856c98cd4eSKoji Sato return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
866c98cd4eSKoji Sato }
876c98cd4eSKoji Sato
886c98cd4eSKoji Sato static inline int
nilfs_sufile_get_segment_usage_block(struct inode * sufile,__u64 segnum,int create,struct buffer_head ** bhp)896c98cd4eSKoji Sato nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
906c98cd4eSKoji Sato int create, struct buffer_head **bhp)
916c98cd4eSKoji Sato {
926c98cd4eSKoji Sato return nilfs_mdt_get_block(sufile,
936c98cd4eSKoji Sato nilfs_sufile_get_blkoff(sufile, segnum),
946c98cd4eSKoji Sato create, NULL, bhp);
956c98cd4eSKoji Sato }
966c98cd4eSKoji Sato
nilfs_sufile_delete_segment_usage_block(struct inode * sufile,__u64 segnum)9778eb64c2SRyusuke Konishi static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
9878eb64c2SRyusuke Konishi __u64 segnum)
9978eb64c2SRyusuke Konishi {
10078eb64c2SRyusuke Konishi return nilfs_mdt_delete_block(sufile,
10178eb64c2SRyusuke Konishi nilfs_sufile_get_blkoff(sufile, segnum));
10278eb64c2SRyusuke Konishi }
10378eb64c2SRyusuke Konishi
nilfs_sufile_mod_counter(struct buffer_head * header_bh,u64 ncleanadd,u64 ndirtyadd)104a703018fSRyusuke Konishi static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
105a703018fSRyusuke Konishi u64 ncleanadd, u64 ndirtyadd)
106a703018fSRyusuke Konishi {
107a703018fSRyusuke Konishi struct nilfs_sufile_header *header;
108a703018fSRyusuke Konishi void *kaddr;
109a703018fSRyusuke Konishi
1107b9c0976SCong Wang kaddr = kmap_atomic(header_bh->b_page);
111a703018fSRyusuke Konishi header = kaddr + bh_offset(header_bh);
112a703018fSRyusuke Konishi le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
113a703018fSRyusuke Konishi le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
1147b9c0976SCong Wang kunmap_atomic(kaddr);
115a703018fSRyusuke Konishi
1165fc7b141SRyusuke Konishi mark_buffer_dirty(header_bh);
117a703018fSRyusuke Konishi }
118a703018fSRyusuke Konishi
119dda54f4bSRyusuke Konishi /**
120ef7d4757SRyusuke Konishi * nilfs_sufile_get_ncleansegs - return the number of clean segments
121ef7d4757SRyusuke Konishi * @sufile: inode of segment usage file
122ef7d4757SRyusuke Konishi */
nilfs_sufile_get_ncleansegs(struct inode * sufile)123ef7d4757SRyusuke Konishi unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
124ef7d4757SRyusuke Konishi {
125ef7d4757SRyusuke Konishi return NILFS_SUI(sufile)->ncleansegs;
126ef7d4757SRyusuke Konishi }
127ef7d4757SRyusuke Konishi
128ef7d4757SRyusuke Konishi /**
129dda54f4bSRyusuke Konishi * nilfs_sufile_updatev - modify multiple segment usages at a time
130dda54f4bSRyusuke Konishi * @sufile: inode of segment usage file
131dda54f4bSRyusuke Konishi * @segnumv: array of segment numbers
132dda54f4bSRyusuke Konishi * @nsegs: size of @segnumv array
133dda54f4bSRyusuke Konishi * @create: creation flag
134dda54f4bSRyusuke Konishi * @ndone: place to store number of modified segments on @segnumv
135dda54f4bSRyusuke Konishi * @dofunc: primitive operation for the update
136dda54f4bSRyusuke Konishi *
137dda54f4bSRyusuke Konishi * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
138dda54f4bSRyusuke Konishi * against the given array of segments. The @dofunc is called with
139dda54f4bSRyusuke Konishi * buffers of a header block and the sufile block in which the target
140dda54f4bSRyusuke Konishi * segment usage entry is contained. If @ndone is given, the number
141dda54f4bSRyusuke Konishi * of successfully modified segments from the head is stored in the
142dda54f4bSRyusuke Konishi * place @ndone points to.
143dda54f4bSRyusuke Konishi *
144dda54f4bSRyusuke Konishi * Return Value: On success, zero is returned. On error, one of the
145dda54f4bSRyusuke Konishi * following negative error codes is returned.
146dda54f4bSRyusuke Konishi *
147dda54f4bSRyusuke Konishi * %-EIO - I/O error.
148dda54f4bSRyusuke Konishi *
149dda54f4bSRyusuke Konishi * %-ENOMEM - Insufficient amount of memory available.
150dda54f4bSRyusuke Konishi *
151dda54f4bSRyusuke Konishi * %-ENOENT - Given segment usage is in hole block (may be returned if
152dda54f4bSRyusuke Konishi * @create is zero)
153dda54f4bSRyusuke Konishi *
154dda54f4bSRyusuke Konishi * %-EINVAL - Invalid segment usage number
155dda54f4bSRyusuke Konishi */
nilfs_sufile_updatev(struct inode * sufile,__u64 * segnumv,size_t nsegs,int create,size_t * ndone,void (* dofunc)(struct inode *,__u64,struct buffer_head *,struct buffer_head *))156dda54f4bSRyusuke Konishi int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
157dda54f4bSRyusuke Konishi int create, size_t *ndone,
158dda54f4bSRyusuke Konishi void (*dofunc)(struct inode *, __u64,
159dda54f4bSRyusuke Konishi struct buffer_head *,
160dda54f4bSRyusuke Konishi struct buffer_head *))
161dda54f4bSRyusuke Konishi {
162dda54f4bSRyusuke Konishi struct buffer_head *header_bh, *bh;
163dda54f4bSRyusuke Konishi unsigned long blkoff, prev_blkoff;
164dda54f4bSRyusuke Konishi __u64 *seg;
165dda54f4bSRyusuke Konishi size_t nerr = 0, n = 0;
166dda54f4bSRyusuke Konishi int ret = 0;
167dda54f4bSRyusuke Konishi
168dda54f4bSRyusuke Konishi if (unlikely(nsegs == 0))
169dda54f4bSRyusuke Konishi goto out;
170dda54f4bSRyusuke Konishi
171dda54f4bSRyusuke Konishi down_write(&NILFS_MDT(sufile)->mi_sem);
172dda54f4bSRyusuke Konishi for (seg = segnumv; seg < segnumv + nsegs; seg++) {
173dda54f4bSRyusuke Konishi if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
174a1d0747aSJoe Perches nilfs_warn(sufile->i_sb,
175feee880fSRyusuke Konishi "%s: invalid segment number: %llu",
176feee880fSRyusuke Konishi __func__, (unsigned long long)*seg);
177dda54f4bSRyusuke Konishi nerr++;
178dda54f4bSRyusuke Konishi }
179dda54f4bSRyusuke Konishi }
180dda54f4bSRyusuke Konishi if (nerr > 0) {
181dda54f4bSRyusuke Konishi ret = -EINVAL;
182dda54f4bSRyusuke Konishi goto out_sem;
183dda54f4bSRyusuke Konishi }
184dda54f4bSRyusuke Konishi
185dda54f4bSRyusuke Konishi ret = nilfs_sufile_get_header_block(sufile, &header_bh);
186dda54f4bSRyusuke Konishi if (ret < 0)
187dda54f4bSRyusuke Konishi goto out_sem;
188dda54f4bSRyusuke Konishi
189dda54f4bSRyusuke Konishi seg = segnumv;
190dda54f4bSRyusuke Konishi blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
191dda54f4bSRyusuke Konishi ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
192dda54f4bSRyusuke Konishi if (ret < 0)
193dda54f4bSRyusuke Konishi goto out_header;
194dda54f4bSRyusuke Konishi
195dda54f4bSRyusuke Konishi for (;;) {
196dda54f4bSRyusuke Konishi dofunc(sufile, *seg, header_bh, bh);
197dda54f4bSRyusuke Konishi
198dda54f4bSRyusuke Konishi if (++seg >= segnumv + nsegs)
199dda54f4bSRyusuke Konishi break;
200dda54f4bSRyusuke Konishi prev_blkoff = blkoff;
201dda54f4bSRyusuke Konishi blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
202dda54f4bSRyusuke Konishi if (blkoff == prev_blkoff)
203dda54f4bSRyusuke Konishi continue;
204dda54f4bSRyusuke Konishi
205dda54f4bSRyusuke Konishi /* get different block */
206dda54f4bSRyusuke Konishi brelse(bh);
207dda54f4bSRyusuke Konishi ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
208dda54f4bSRyusuke Konishi if (unlikely(ret < 0))
209dda54f4bSRyusuke Konishi goto out_header;
210dda54f4bSRyusuke Konishi }
211dda54f4bSRyusuke Konishi brelse(bh);
212dda54f4bSRyusuke Konishi
213dda54f4bSRyusuke Konishi out_header:
214dda54f4bSRyusuke Konishi n = seg - segnumv;
215dda54f4bSRyusuke Konishi brelse(header_bh);
216dda54f4bSRyusuke Konishi out_sem:
217dda54f4bSRyusuke Konishi up_write(&NILFS_MDT(sufile)->mi_sem);
218dda54f4bSRyusuke Konishi out:
219dda54f4bSRyusuke Konishi if (ndone)
220dda54f4bSRyusuke Konishi *ndone = n;
221dda54f4bSRyusuke Konishi return ret;
222dda54f4bSRyusuke Konishi }
223dda54f4bSRyusuke Konishi
nilfs_sufile_update(struct inode * sufile,__u64 segnum,int create,void (* dofunc)(struct inode *,__u64,struct buffer_head *,struct buffer_head *))224a703018fSRyusuke Konishi int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
225a703018fSRyusuke Konishi void (*dofunc)(struct inode *, __u64,
226a703018fSRyusuke Konishi struct buffer_head *,
227a703018fSRyusuke Konishi struct buffer_head *))
228a703018fSRyusuke Konishi {
229a703018fSRyusuke Konishi struct buffer_head *header_bh, *bh;
230a703018fSRyusuke Konishi int ret;
231a703018fSRyusuke Konishi
232a703018fSRyusuke Konishi if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
233a1d0747aSJoe Perches nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu",
234a703018fSRyusuke Konishi __func__, (unsigned long long)segnum);
235a703018fSRyusuke Konishi return -EINVAL;
236a703018fSRyusuke Konishi }
237a703018fSRyusuke Konishi down_write(&NILFS_MDT(sufile)->mi_sem);
238a703018fSRyusuke Konishi
239a703018fSRyusuke Konishi ret = nilfs_sufile_get_header_block(sufile, &header_bh);
240a703018fSRyusuke Konishi if (ret < 0)
241a703018fSRyusuke Konishi goto out_sem;
242a703018fSRyusuke Konishi
243a703018fSRyusuke Konishi ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
244a703018fSRyusuke Konishi if (!ret) {
245a703018fSRyusuke Konishi dofunc(sufile, segnum, header_bh, bh);
246a703018fSRyusuke Konishi brelse(bh);
247a703018fSRyusuke Konishi }
248a703018fSRyusuke Konishi brelse(header_bh);
249a703018fSRyusuke Konishi
250a703018fSRyusuke Konishi out_sem:
251a703018fSRyusuke Konishi up_write(&NILFS_MDT(sufile)->mi_sem);
252a703018fSRyusuke Konishi return ret;
253a703018fSRyusuke Konishi }
254a703018fSRyusuke Konishi
2556c98cd4eSKoji Sato /**
256619205daSRyusuke Konishi * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
257619205daSRyusuke Konishi * @sufile: inode of segment usage file
258619205daSRyusuke Konishi * @start: minimum segment number of allocatable region (inclusive)
259619205daSRyusuke Konishi * @end: maximum segment number of allocatable region (inclusive)
260619205daSRyusuke Konishi *
261619205daSRyusuke Konishi * Return Value: On success, 0 is returned. On error, one of the
262619205daSRyusuke Konishi * following negative error codes is returned.
263619205daSRyusuke Konishi *
264619205daSRyusuke Konishi * %-ERANGE - invalid segment region
265619205daSRyusuke Konishi */
nilfs_sufile_set_alloc_range(struct inode * sufile,__u64 start,__u64 end)266619205daSRyusuke Konishi int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
267619205daSRyusuke Konishi {
268619205daSRyusuke Konishi struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
269619205daSRyusuke Konishi __u64 nsegs;
270619205daSRyusuke Konishi int ret = -ERANGE;
271619205daSRyusuke Konishi
272619205daSRyusuke Konishi down_write(&NILFS_MDT(sufile)->mi_sem);
273619205daSRyusuke Konishi nsegs = nilfs_sufile_get_nsegments(sufile);
274619205daSRyusuke Konishi
275619205daSRyusuke Konishi if (start <= end && end < nsegs) {
276619205daSRyusuke Konishi sui->allocmin = start;
277619205daSRyusuke Konishi sui->allocmax = end;
278619205daSRyusuke Konishi ret = 0;
279619205daSRyusuke Konishi }
280619205daSRyusuke Konishi up_write(&NILFS_MDT(sufile)->mi_sem);
281619205daSRyusuke Konishi return ret;
282619205daSRyusuke Konishi }
283619205daSRyusuke Konishi
284619205daSRyusuke Konishi /**
2856c98cd4eSKoji Sato * nilfs_sufile_alloc - allocate a segment
2866c98cd4eSKoji Sato * @sufile: inode of segment usage file
2876c98cd4eSKoji Sato * @segnump: pointer to segment number
2886c98cd4eSKoji Sato *
2896c98cd4eSKoji Sato * Description: nilfs_sufile_alloc() allocates a clean segment.
2906c98cd4eSKoji Sato *
2916c98cd4eSKoji Sato * Return Value: On success, 0 is returned and the segment number of the
2926c98cd4eSKoji Sato * allocated segment is stored in the place pointed by @segnump. On error, one
2936c98cd4eSKoji Sato * of the following negative error codes is returned.
2946c98cd4eSKoji Sato *
2956c98cd4eSKoji Sato * %-EIO - I/O error.
2966c98cd4eSKoji Sato *
2976c98cd4eSKoji Sato * %-ENOMEM - Insufficient amount of memory available.
2986c98cd4eSKoji Sato *
2996c98cd4eSKoji Sato * %-ENOSPC - No clean segment left.
3006c98cd4eSKoji Sato */
nilfs_sufile_alloc(struct inode * sufile,__u64 * segnump)3016c98cd4eSKoji Sato int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
3026c98cd4eSKoji Sato {
3036c98cd4eSKoji Sato struct buffer_head *header_bh, *su_bh;
3046c98cd4eSKoji Sato struct nilfs_sufile_header *header;
3056c98cd4eSKoji Sato struct nilfs_segment_usage *su;
306619205daSRyusuke Konishi struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
3076c98cd4eSKoji Sato size_t susz = NILFS_MDT(sufile)->mi_entry_size;
3086c98cd4eSKoji Sato __u64 segnum, maxsegnum, last_alloc;
3096c98cd4eSKoji Sato void *kaddr;
31009ef29e0SRyusuke Konishi unsigned long nsegments, nsus, cnt;
311619205daSRyusuke Konishi int ret, j;
3126c98cd4eSKoji Sato
3136c98cd4eSKoji Sato down_write(&NILFS_MDT(sufile)->mi_sem);
3146c98cd4eSKoji Sato
3156c98cd4eSKoji Sato ret = nilfs_sufile_get_header_block(sufile, &header_bh);
3166c98cd4eSKoji Sato if (ret < 0)
3176c98cd4eSKoji Sato goto out_sem;
3187b9c0976SCong Wang kaddr = kmap_atomic(header_bh->b_page);
3197b16c8a2SRyusuke Konishi header = kaddr + bh_offset(header_bh);
3206c98cd4eSKoji Sato last_alloc = le64_to_cpu(header->sh_last_alloc);
3217b9c0976SCong Wang kunmap_atomic(kaddr);
3226c98cd4eSKoji Sato
3236c98cd4eSKoji Sato nsegments = nilfs_sufile_get_nsegments(sufile);
324619205daSRyusuke Konishi maxsegnum = sui->allocmax;
3256c98cd4eSKoji Sato segnum = last_alloc + 1;
326619205daSRyusuke Konishi if (segnum < sui->allocmin || segnum > sui->allocmax)
327619205daSRyusuke Konishi segnum = sui->allocmin;
328619205daSRyusuke Konishi
329619205daSRyusuke Konishi for (cnt = 0; cnt < nsegments; cnt += nsus) {
330619205daSRyusuke Konishi if (segnum > maxsegnum) {
331619205daSRyusuke Konishi if (cnt < sui->allocmax - sui->allocmin + 1) {
332619205daSRyusuke Konishi /*
333619205daSRyusuke Konishi * wrap around in the limited region.
334619205daSRyusuke Konishi * if allocation started from
335619205daSRyusuke Konishi * sui->allocmin, this never happens.
336619205daSRyusuke Konishi */
337619205daSRyusuke Konishi segnum = sui->allocmin;
3386c98cd4eSKoji Sato maxsegnum = last_alloc;
339619205daSRyusuke Konishi } else if (segnum > sui->allocmin &&
340619205daSRyusuke Konishi sui->allocmax + 1 < nsegments) {
341619205daSRyusuke Konishi segnum = sui->allocmax + 1;
342619205daSRyusuke Konishi maxsegnum = nsegments - 1;
343619205daSRyusuke Konishi } else if (sui->allocmin > 0) {
344619205daSRyusuke Konishi segnum = 0;
345619205daSRyusuke Konishi maxsegnum = sui->allocmin - 1;
346619205daSRyusuke Konishi } else {
347619205daSRyusuke Konishi break; /* never happens */
348619205daSRyusuke Konishi }
3496c98cd4eSKoji Sato }
35083eec5e6SHitoshi Mitake trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
3516c98cd4eSKoji Sato ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
3526c98cd4eSKoji Sato &su_bh);
3536c98cd4eSKoji Sato if (ret < 0)
3546c98cd4eSKoji Sato goto out_header;
3557b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
3566c98cd4eSKoji Sato su = nilfs_sufile_block_get_segment_usage(
3576c98cd4eSKoji Sato sufile, segnum, su_bh, kaddr);
3586c98cd4eSKoji Sato
3596c98cd4eSKoji Sato nsus = nilfs_sufile_segment_usages_in_block(
3606c98cd4eSKoji Sato sufile, segnum, maxsegnum);
3616c98cd4eSKoji Sato for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
3626c98cd4eSKoji Sato if (!nilfs_segment_usage_clean(su))
3636c98cd4eSKoji Sato continue;
3646c98cd4eSKoji Sato /* found a clean segment */
3656c98cd4eSKoji Sato nilfs_segment_usage_set_dirty(su);
3667b9c0976SCong Wang kunmap_atomic(kaddr);
3676c98cd4eSKoji Sato
3687b9c0976SCong Wang kaddr = kmap_atomic(header_bh->b_page);
3697b16c8a2SRyusuke Konishi header = kaddr + bh_offset(header_bh);
3706c98cd4eSKoji Sato le64_add_cpu(&header->sh_ncleansegs, -1);
3716c98cd4eSKoji Sato le64_add_cpu(&header->sh_ndirtysegs, 1);
3726c98cd4eSKoji Sato header->sh_last_alloc = cpu_to_le64(segnum);
3737b9c0976SCong Wang kunmap_atomic(kaddr);
3746c98cd4eSKoji Sato
375619205daSRyusuke Konishi sui->ncleansegs--;
3765fc7b141SRyusuke Konishi mark_buffer_dirty(header_bh);
3775fc7b141SRyusuke Konishi mark_buffer_dirty(su_bh);
3786c98cd4eSKoji Sato nilfs_mdt_mark_dirty(sufile);
3796c98cd4eSKoji Sato brelse(su_bh);
3806c98cd4eSKoji Sato *segnump = segnum;
38183eec5e6SHitoshi Mitake
38283eec5e6SHitoshi Mitake trace_nilfs2_segment_usage_allocated(sufile, segnum);
38383eec5e6SHitoshi Mitake
3846c98cd4eSKoji Sato goto out_header;
3856c98cd4eSKoji Sato }
3866c98cd4eSKoji Sato
3877b9c0976SCong Wang kunmap_atomic(kaddr);
3886c98cd4eSKoji Sato brelse(su_bh);
3896c98cd4eSKoji Sato }
3906c98cd4eSKoji Sato
3916c98cd4eSKoji Sato /* no segments left */
3926c98cd4eSKoji Sato ret = -ENOSPC;
3936c98cd4eSKoji Sato
3946c98cd4eSKoji Sato out_header:
3956c98cd4eSKoji Sato brelse(header_bh);
3966c98cd4eSKoji Sato
3976c98cd4eSKoji Sato out_sem:
3986c98cd4eSKoji Sato up_write(&NILFS_MDT(sufile)->mi_sem);
3996c98cd4eSKoji Sato return ret;
4006c98cd4eSKoji Sato }
4016c98cd4eSKoji Sato
nilfs_sufile_do_cancel_free(struct inode * sufile,__u64 segnum,struct buffer_head * header_bh,struct buffer_head * su_bh)402a703018fSRyusuke Konishi void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
403a703018fSRyusuke Konishi struct buffer_head *header_bh,
404a703018fSRyusuke Konishi struct buffer_head *su_bh)
4056c98cd4eSKoji Sato {
4066c98cd4eSKoji Sato struct nilfs_segment_usage *su;
4076c98cd4eSKoji Sato void *kaddr;
4086c98cd4eSKoji Sato
4097b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
410a703018fSRyusuke Konishi su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
4111f5abe7eSRyusuke Konishi if (unlikely(!nilfs_segment_usage_clean(su))) {
412a1d0747aSJoe Perches nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
413a1d0747aSJoe Perches __func__, (unsigned long long)segnum);
4147b9c0976SCong Wang kunmap_atomic(kaddr);
415a703018fSRyusuke Konishi return;
4166c98cd4eSKoji Sato }
4176c98cd4eSKoji Sato nilfs_segment_usage_set_dirty(su);
4187b9c0976SCong Wang kunmap_atomic(kaddr);
4196c98cd4eSKoji Sato
420a703018fSRyusuke Konishi nilfs_sufile_mod_counter(header_bh, -1, 1);
421aa474a22SRyusuke Konishi NILFS_SUI(sufile)->ncleansegs--;
422aa474a22SRyusuke Konishi
4235fc7b141SRyusuke Konishi mark_buffer_dirty(su_bh);
4246c98cd4eSKoji Sato nilfs_mdt_mark_dirty(sufile);
4256c98cd4eSKoji Sato }
4266c98cd4eSKoji Sato
nilfs_sufile_do_scrap(struct inode * sufile,__u64 segnum,struct buffer_head * header_bh,struct buffer_head * su_bh)427c85399c2SRyusuke Konishi void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
428c85399c2SRyusuke Konishi struct buffer_head *header_bh,
429c85399c2SRyusuke Konishi struct buffer_head *su_bh)
430c85399c2SRyusuke Konishi {
431c85399c2SRyusuke Konishi struct nilfs_segment_usage *su;
432c85399c2SRyusuke Konishi void *kaddr;
433c85399c2SRyusuke Konishi int clean, dirty;
434c85399c2SRyusuke Konishi
4357b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
436c85399c2SRyusuke Konishi su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
4374ce5c342SRyusuke Konishi if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
438c85399c2SRyusuke Konishi su->su_nblocks == cpu_to_le32(0)) {
4397b9c0976SCong Wang kunmap_atomic(kaddr);
440c85399c2SRyusuke Konishi return;
441c85399c2SRyusuke Konishi }
442c85399c2SRyusuke Konishi clean = nilfs_segment_usage_clean(su);
443c85399c2SRyusuke Konishi dirty = nilfs_segment_usage_dirty(su);
444c85399c2SRyusuke Konishi
445c85399c2SRyusuke Konishi /* make the segment garbage */
446c85399c2SRyusuke Konishi su->su_lastmod = cpu_to_le64(0);
447c85399c2SRyusuke Konishi su->su_nblocks = cpu_to_le32(0);
4484ce5c342SRyusuke Konishi su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
4497b9c0976SCong Wang kunmap_atomic(kaddr);
450c85399c2SRyusuke Konishi
451c85399c2SRyusuke Konishi nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
452aa474a22SRyusuke Konishi NILFS_SUI(sufile)->ncleansegs -= clean;
453aa474a22SRyusuke Konishi
4545fc7b141SRyusuke Konishi mark_buffer_dirty(su_bh);
455c85399c2SRyusuke Konishi nilfs_mdt_mark_dirty(sufile);
456c85399c2SRyusuke Konishi }
457c85399c2SRyusuke Konishi
nilfs_sufile_do_free(struct inode * sufile,__u64 segnum,struct buffer_head * header_bh,struct buffer_head * su_bh)458a703018fSRyusuke Konishi void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
459a703018fSRyusuke Konishi struct buffer_head *header_bh,
460a703018fSRyusuke Konishi struct buffer_head *su_bh)
4616c98cd4eSKoji Sato {
4626c98cd4eSKoji Sato struct nilfs_segment_usage *su;
4636c98cd4eSKoji Sato void *kaddr;
464a703018fSRyusuke Konishi int sudirty;
4656c98cd4eSKoji Sato
4667b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
467a703018fSRyusuke Konishi su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
468a703018fSRyusuke Konishi if (nilfs_segment_usage_clean(su)) {
469a1d0747aSJoe Perches nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
470a703018fSRyusuke Konishi __func__, (unsigned long long)segnum);
4717b9c0976SCong Wang kunmap_atomic(kaddr);
472a703018fSRyusuke Konishi return;
4736c98cd4eSKoji Sato }
4741f5abe7eSRyusuke Konishi WARN_ON(nilfs_segment_usage_error(su));
475a703018fSRyusuke Konishi WARN_ON(!nilfs_segment_usage_dirty(su));
476a703018fSRyusuke Konishi
477a703018fSRyusuke Konishi sudirty = nilfs_segment_usage_dirty(su);
4786c98cd4eSKoji Sato nilfs_segment_usage_set_clean(su);
4797b9c0976SCong Wang kunmap_atomic(kaddr);
4805fc7b141SRyusuke Konishi mark_buffer_dirty(su_bh);
481a703018fSRyusuke Konishi
482a703018fSRyusuke Konishi nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
483aa474a22SRyusuke Konishi NILFS_SUI(sufile)->ncleansegs++;
484aa474a22SRyusuke Konishi
4856c98cd4eSKoji Sato nilfs_mdt_mark_dirty(sufile);
48683eec5e6SHitoshi Mitake
48783eec5e6SHitoshi Mitake trace_nilfs2_segment_usage_freed(sufile, segnum);
4886c98cd4eSKoji Sato }
4896c98cd4eSKoji Sato
4906c98cd4eSKoji Sato /**
49161a189e9SRyusuke Konishi * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
49261a189e9SRyusuke Konishi * @sufile: inode of segment usage file
49361a189e9SRyusuke Konishi * @segnum: segment number
49461a189e9SRyusuke Konishi */
nilfs_sufile_mark_dirty(struct inode * sufile,__u64 segnum)49561a189e9SRyusuke Konishi int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
49661a189e9SRyusuke Konishi {
49761a189e9SRyusuke Konishi struct buffer_head *bh;
498512c5ca0SChen Zhongjin void *kaddr;
499512c5ca0SChen Zhongjin struct nilfs_segment_usage *su;
50061a189e9SRyusuke Konishi int ret;
50161a189e9SRyusuke Konishi
502512c5ca0SChen Zhongjin down_write(&NILFS_MDT(sufile)->mi_sem);
50361a189e9SRyusuke Konishi ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
504*0b14276bSRyusuke Konishi if (ret)
505*0b14276bSRyusuke Konishi goto out_sem;
506*0b14276bSRyusuke Konishi
507512c5ca0SChen Zhongjin kaddr = kmap_atomic(bh->b_page);
508512c5ca0SChen Zhongjin su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
509*0b14276bSRyusuke Konishi if (unlikely(nilfs_segment_usage_error(su))) {
510*0b14276bSRyusuke Konishi struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
511*0b14276bSRyusuke Konishi
512512c5ca0SChen Zhongjin kunmap_atomic(kaddr);
51361a189e9SRyusuke Konishi brelse(bh);
514*0b14276bSRyusuke Konishi if (nilfs_segment_is_active(nilfs, segnum)) {
515*0b14276bSRyusuke Konishi nilfs_error(sufile->i_sb,
516*0b14276bSRyusuke Konishi "active segment %llu is erroneous",
517*0b14276bSRyusuke Konishi (unsigned long long)segnum);
518*0b14276bSRyusuke Konishi } else {
519*0b14276bSRyusuke Konishi /*
520*0b14276bSRyusuke Konishi * Segments marked erroneous are never allocated by
521*0b14276bSRyusuke Konishi * nilfs_sufile_alloc(); only active segments, ie,
522*0b14276bSRyusuke Konishi * the segments indexed by ns_segnum or ns_nextnum,
523*0b14276bSRyusuke Konishi * can be erroneous here.
524*0b14276bSRyusuke Konishi */
525*0b14276bSRyusuke Konishi WARN_ON_ONCE(1);
52661a189e9SRyusuke Konishi }
527*0b14276bSRyusuke Konishi ret = -EIO;
528*0b14276bSRyusuke Konishi } else {
529*0b14276bSRyusuke Konishi nilfs_segment_usage_set_dirty(su);
530*0b14276bSRyusuke Konishi kunmap_atomic(kaddr);
531*0b14276bSRyusuke Konishi mark_buffer_dirty(bh);
532*0b14276bSRyusuke Konishi nilfs_mdt_mark_dirty(sufile);
533*0b14276bSRyusuke Konishi brelse(bh);
534*0b14276bSRyusuke Konishi }
535*0b14276bSRyusuke Konishi out_sem:
536512c5ca0SChen Zhongjin up_write(&NILFS_MDT(sufile)->mi_sem);
53761a189e9SRyusuke Konishi return ret;
53861a189e9SRyusuke Konishi }
53961a189e9SRyusuke Konishi
54061a189e9SRyusuke Konishi /**
541071ec54dSRyusuke Konishi * nilfs_sufile_set_segment_usage - set usage of a segment
542071ec54dSRyusuke Konishi * @sufile: inode of segment usage file
543071ec54dSRyusuke Konishi * @segnum: segment number
544071ec54dSRyusuke Konishi * @nblocks: number of live blocks in the segment
545071ec54dSRyusuke Konishi * @modtime: modification time (option)
546071ec54dSRyusuke Konishi */
nilfs_sufile_set_segment_usage(struct inode * sufile,__u64 segnum,unsigned long nblocks,time64_t modtime)547071ec54dSRyusuke Konishi int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
548fb04b91bSArnd Bergmann unsigned long nblocks, time64_t modtime)
549071ec54dSRyusuke Konishi {
550071ec54dSRyusuke Konishi struct buffer_head *bh;
551071ec54dSRyusuke Konishi struct nilfs_segment_usage *su;
552071ec54dSRyusuke Konishi void *kaddr;
553071ec54dSRyusuke Konishi int ret;
554071ec54dSRyusuke Konishi
555071ec54dSRyusuke Konishi down_write(&NILFS_MDT(sufile)->mi_sem);
556071ec54dSRyusuke Konishi ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
557071ec54dSRyusuke Konishi if (ret < 0)
558071ec54dSRyusuke Konishi goto out_sem;
559071ec54dSRyusuke Konishi
5607b9c0976SCong Wang kaddr = kmap_atomic(bh->b_page);
561071ec54dSRyusuke Konishi su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
562*0b14276bSRyusuke Konishi if (modtime) {
563*0b14276bSRyusuke Konishi /*
564*0b14276bSRyusuke Konishi * Check segusage error and set su_lastmod only when updating
565*0b14276bSRyusuke Konishi * this entry with a valid timestamp, not for cancellation.
566*0b14276bSRyusuke Konishi */
567*0b14276bSRyusuke Konishi WARN_ON_ONCE(nilfs_segment_usage_error(su));
568071ec54dSRyusuke Konishi su->su_lastmod = cpu_to_le64(modtime);
569*0b14276bSRyusuke Konishi }
570071ec54dSRyusuke Konishi su->su_nblocks = cpu_to_le32(nblocks);
5717b9c0976SCong Wang kunmap_atomic(kaddr);
572071ec54dSRyusuke Konishi
5735fc7b141SRyusuke Konishi mark_buffer_dirty(bh);
574071ec54dSRyusuke Konishi nilfs_mdt_mark_dirty(sufile);
575071ec54dSRyusuke Konishi brelse(bh);
576071ec54dSRyusuke Konishi
577071ec54dSRyusuke Konishi out_sem:
578071ec54dSRyusuke Konishi up_write(&NILFS_MDT(sufile)->mi_sem);
579071ec54dSRyusuke Konishi return ret;
580071ec54dSRyusuke Konishi }
581071ec54dSRyusuke Konishi
582071ec54dSRyusuke Konishi /**
5836c98cd4eSKoji Sato * nilfs_sufile_get_stat - get segment usage statistics
5846c98cd4eSKoji Sato * @sufile: inode of segment usage file
58564ead520SWang Hai * @sustat: pointer to a structure of segment usage statistics
5866c98cd4eSKoji Sato *
5876c98cd4eSKoji Sato * Description: nilfs_sufile_get_stat() returns information about segment
5886c98cd4eSKoji Sato * usage.
5896c98cd4eSKoji Sato *
5906c98cd4eSKoji Sato * Return Value: On success, 0 is returned, and segment usage information is
59164ead520SWang Hai * stored in the place pointed by @sustat. On error, one of the following
5926c98cd4eSKoji Sato * negative error codes is returned.
5936c98cd4eSKoji Sato *
5946c98cd4eSKoji Sato * %-EIO - I/O error.
5956c98cd4eSKoji Sato *
5966c98cd4eSKoji Sato * %-ENOMEM - Insufficient amount of memory available.
5976c98cd4eSKoji Sato */
nilfs_sufile_get_stat(struct inode * sufile,struct nilfs_sustat * sustat)5986c98cd4eSKoji Sato int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
5996c98cd4eSKoji Sato {
6006c98cd4eSKoji Sato struct buffer_head *header_bh;
6016c98cd4eSKoji Sato struct nilfs_sufile_header *header;
6020ef28f9aSRyusuke Konishi struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
6036c98cd4eSKoji Sato void *kaddr;
6046c98cd4eSKoji Sato int ret;
6056c98cd4eSKoji Sato
6066c98cd4eSKoji Sato down_read(&NILFS_MDT(sufile)->mi_sem);
6076c98cd4eSKoji Sato
6086c98cd4eSKoji Sato ret = nilfs_sufile_get_header_block(sufile, &header_bh);
6096c98cd4eSKoji Sato if (ret < 0)
6106c98cd4eSKoji Sato goto out_sem;
6116c98cd4eSKoji Sato
6127b9c0976SCong Wang kaddr = kmap_atomic(header_bh->b_page);
6137b16c8a2SRyusuke Konishi header = kaddr + bh_offset(header_bh);
6146c98cd4eSKoji Sato sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
6156c98cd4eSKoji Sato sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
6166c98cd4eSKoji Sato sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
6172c2e52fcSRyusuke Konishi sustat->ss_ctime = nilfs->ns_ctime;
6182c2e52fcSRyusuke Konishi sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
6192c2e52fcSRyusuke Konishi spin_lock(&nilfs->ns_last_segment_lock);
6202c2e52fcSRyusuke Konishi sustat->ss_prot_seq = nilfs->ns_prot_seq;
6212c2e52fcSRyusuke Konishi spin_unlock(&nilfs->ns_last_segment_lock);
6227b9c0976SCong Wang kunmap_atomic(kaddr);
6236c98cd4eSKoji Sato brelse(header_bh);
6246c98cd4eSKoji Sato
6256c98cd4eSKoji Sato out_sem:
6266c98cd4eSKoji Sato up_read(&NILFS_MDT(sufile)->mi_sem);
6276c98cd4eSKoji Sato return ret;
6286c98cd4eSKoji Sato }
6296c98cd4eSKoji Sato
nilfs_sufile_do_set_error(struct inode * sufile,__u64 segnum,struct buffer_head * header_bh,struct buffer_head * su_bh)630a703018fSRyusuke Konishi void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
631a703018fSRyusuke Konishi struct buffer_head *header_bh,
632a703018fSRyusuke Konishi struct buffer_head *su_bh)
6336c98cd4eSKoji Sato {
6346c98cd4eSKoji Sato struct nilfs_segment_usage *su;
6356c98cd4eSKoji Sato void *kaddr;
636a703018fSRyusuke Konishi int suclean;
6376c98cd4eSKoji Sato
6387b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
6396c98cd4eSKoji Sato su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
6406c98cd4eSKoji Sato if (nilfs_segment_usage_error(su)) {
6417b9c0976SCong Wang kunmap_atomic(kaddr);
642a703018fSRyusuke Konishi return;
6436c98cd4eSKoji Sato }
64488072fafSRyusuke Konishi suclean = nilfs_segment_usage_clean(su);
6456c98cd4eSKoji Sato nilfs_segment_usage_set_error(su);
6467b9c0976SCong Wang kunmap_atomic(kaddr);
6476c98cd4eSKoji Sato
648aa474a22SRyusuke Konishi if (suclean) {
649a703018fSRyusuke Konishi nilfs_sufile_mod_counter(header_bh, -1, 0);
650aa474a22SRyusuke Konishi NILFS_SUI(sufile)->ncleansegs--;
651aa474a22SRyusuke Konishi }
6525fc7b141SRyusuke Konishi mark_buffer_dirty(su_bh);
6536c98cd4eSKoji Sato nilfs_mdt_mark_dirty(sufile);
6546c98cd4eSKoji Sato }
6556c98cd4eSKoji Sato
6566c98cd4eSKoji Sato /**
65778eb64c2SRyusuke Konishi * nilfs_sufile_truncate_range - truncate range of segment array
65878eb64c2SRyusuke Konishi * @sufile: inode of segment usage file
65978eb64c2SRyusuke Konishi * @start: start segment number (inclusive)
66078eb64c2SRyusuke Konishi * @end: end segment number (inclusive)
66178eb64c2SRyusuke Konishi *
66278eb64c2SRyusuke Konishi * Return Value: On success, 0 is returned. On error, one of the
66378eb64c2SRyusuke Konishi * following negative error codes is returned.
66478eb64c2SRyusuke Konishi *
66578eb64c2SRyusuke Konishi * %-EIO - I/O error.
66678eb64c2SRyusuke Konishi *
66778eb64c2SRyusuke Konishi * %-ENOMEM - Insufficient amount of memory available.
66878eb64c2SRyusuke Konishi *
66978eb64c2SRyusuke Konishi * %-EINVAL - Invalid number of segments specified
67078eb64c2SRyusuke Konishi *
67178eb64c2SRyusuke Konishi * %-EBUSY - Dirty or active segments are present in the range
67278eb64c2SRyusuke Konishi */
nilfs_sufile_truncate_range(struct inode * sufile,__u64 start,__u64 end)67378eb64c2SRyusuke Konishi static int nilfs_sufile_truncate_range(struct inode *sufile,
67478eb64c2SRyusuke Konishi __u64 start, __u64 end)
67578eb64c2SRyusuke Konishi {
67678eb64c2SRyusuke Konishi struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
67778eb64c2SRyusuke Konishi struct buffer_head *header_bh;
67878eb64c2SRyusuke Konishi struct buffer_head *su_bh;
67978eb64c2SRyusuke Konishi struct nilfs_segment_usage *su, *su2;
68078eb64c2SRyusuke Konishi size_t susz = NILFS_MDT(sufile)->mi_entry_size;
68178eb64c2SRyusuke Konishi unsigned long segusages_per_block;
68278eb64c2SRyusuke Konishi unsigned long nsegs, ncleaned;
68378eb64c2SRyusuke Konishi __u64 segnum;
68478eb64c2SRyusuke Konishi void *kaddr;
68578eb64c2SRyusuke Konishi ssize_t n, nc;
68678eb64c2SRyusuke Konishi int ret;
68778eb64c2SRyusuke Konishi int j;
68878eb64c2SRyusuke Konishi
68978eb64c2SRyusuke Konishi nsegs = nilfs_sufile_get_nsegments(sufile);
69078eb64c2SRyusuke Konishi
69178eb64c2SRyusuke Konishi ret = -EINVAL;
69278eb64c2SRyusuke Konishi if (start > end || start >= nsegs)
69378eb64c2SRyusuke Konishi goto out;
69478eb64c2SRyusuke Konishi
69578eb64c2SRyusuke Konishi ret = nilfs_sufile_get_header_block(sufile, &header_bh);
69678eb64c2SRyusuke Konishi if (ret < 0)
69778eb64c2SRyusuke Konishi goto out;
69878eb64c2SRyusuke Konishi
69978eb64c2SRyusuke Konishi segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
70078eb64c2SRyusuke Konishi ncleaned = 0;
70178eb64c2SRyusuke Konishi
70278eb64c2SRyusuke Konishi for (segnum = start; segnum <= end; segnum += n) {
70378eb64c2SRyusuke Konishi n = min_t(unsigned long,
70478eb64c2SRyusuke Konishi segusages_per_block -
70578eb64c2SRyusuke Konishi nilfs_sufile_get_offset(sufile, segnum),
70678eb64c2SRyusuke Konishi end - segnum + 1);
70778eb64c2SRyusuke Konishi ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
70878eb64c2SRyusuke Konishi &su_bh);
70978eb64c2SRyusuke Konishi if (ret < 0) {
71078eb64c2SRyusuke Konishi if (ret != -ENOENT)
71178eb64c2SRyusuke Konishi goto out_header;
71278eb64c2SRyusuke Konishi /* hole */
71378eb64c2SRyusuke Konishi continue;
71478eb64c2SRyusuke Konishi }
7157b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
71678eb64c2SRyusuke Konishi su = nilfs_sufile_block_get_segment_usage(
71778eb64c2SRyusuke Konishi sufile, segnum, su_bh, kaddr);
71878eb64c2SRyusuke Konishi su2 = su;
71978eb64c2SRyusuke Konishi for (j = 0; j < n; j++, su = (void *)su + susz) {
72078eb64c2SRyusuke Konishi if ((le32_to_cpu(su->su_flags) &
7214ce5c342SRyusuke Konishi ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
72278eb64c2SRyusuke Konishi nilfs_segment_is_active(nilfs, segnum + j)) {
72378eb64c2SRyusuke Konishi ret = -EBUSY;
7247b9c0976SCong Wang kunmap_atomic(kaddr);
72578eb64c2SRyusuke Konishi brelse(su_bh);
72678eb64c2SRyusuke Konishi goto out_header;
72778eb64c2SRyusuke Konishi }
72878eb64c2SRyusuke Konishi }
72978eb64c2SRyusuke Konishi nc = 0;
73078eb64c2SRyusuke Konishi for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
73178eb64c2SRyusuke Konishi if (nilfs_segment_usage_error(su)) {
73278eb64c2SRyusuke Konishi nilfs_segment_usage_set_clean(su);
73378eb64c2SRyusuke Konishi nc++;
73478eb64c2SRyusuke Konishi }
73578eb64c2SRyusuke Konishi }
7367b9c0976SCong Wang kunmap_atomic(kaddr);
73778eb64c2SRyusuke Konishi if (nc > 0) {
7385fc7b141SRyusuke Konishi mark_buffer_dirty(su_bh);
73978eb64c2SRyusuke Konishi ncleaned += nc;
74078eb64c2SRyusuke Konishi }
74178eb64c2SRyusuke Konishi brelse(su_bh);
74278eb64c2SRyusuke Konishi
74378eb64c2SRyusuke Konishi if (n == segusages_per_block) {
74478eb64c2SRyusuke Konishi /* make hole */
74578eb64c2SRyusuke Konishi nilfs_sufile_delete_segment_usage_block(sufile, segnum);
74678eb64c2SRyusuke Konishi }
74778eb64c2SRyusuke Konishi }
74878eb64c2SRyusuke Konishi ret = 0;
74978eb64c2SRyusuke Konishi
75078eb64c2SRyusuke Konishi out_header:
75178eb64c2SRyusuke Konishi if (ncleaned > 0) {
75278eb64c2SRyusuke Konishi NILFS_SUI(sufile)->ncleansegs += ncleaned;
75378eb64c2SRyusuke Konishi nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
75478eb64c2SRyusuke Konishi nilfs_mdt_mark_dirty(sufile);
75578eb64c2SRyusuke Konishi }
75678eb64c2SRyusuke Konishi brelse(header_bh);
75778eb64c2SRyusuke Konishi out:
75878eb64c2SRyusuke Konishi return ret;
75978eb64c2SRyusuke Konishi }
76078eb64c2SRyusuke Konishi
76178eb64c2SRyusuke Konishi /**
7624e33f9eaSRyusuke Konishi * nilfs_sufile_resize - resize segment array
7634e33f9eaSRyusuke Konishi * @sufile: inode of segment usage file
7644e33f9eaSRyusuke Konishi * @newnsegs: new number of segments
7654e33f9eaSRyusuke Konishi *
7664e33f9eaSRyusuke Konishi * Return Value: On success, 0 is returned. On error, one of the
7674e33f9eaSRyusuke Konishi * following negative error codes is returned.
7684e33f9eaSRyusuke Konishi *
7694e33f9eaSRyusuke Konishi * %-EIO - I/O error.
7704e33f9eaSRyusuke Konishi *
7714e33f9eaSRyusuke Konishi * %-ENOMEM - Insufficient amount of memory available.
7724e33f9eaSRyusuke Konishi *
7734e33f9eaSRyusuke Konishi * %-ENOSPC - Enough free space is not left for shrinking
7744e33f9eaSRyusuke Konishi *
7754e33f9eaSRyusuke Konishi * %-EBUSY - Dirty or active segments exist in the region to be truncated
7764e33f9eaSRyusuke Konishi */
nilfs_sufile_resize(struct inode * sufile,__u64 newnsegs)7774e33f9eaSRyusuke Konishi int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
7784e33f9eaSRyusuke Konishi {
7794e33f9eaSRyusuke Konishi struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
7804e33f9eaSRyusuke Konishi struct buffer_head *header_bh;
7814e33f9eaSRyusuke Konishi struct nilfs_sufile_header *header;
7824e33f9eaSRyusuke Konishi struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
7834e33f9eaSRyusuke Konishi void *kaddr;
7844e33f9eaSRyusuke Konishi unsigned long nsegs, nrsvsegs;
7854e33f9eaSRyusuke Konishi int ret = 0;
7864e33f9eaSRyusuke Konishi
7874e33f9eaSRyusuke Konishi down_write(&NILFS_MDT(sufile)->mi_sem);
7884e33f9eaSRyusuke Konishi
7894e33f9eaSRyusuke Konishi nsegs = nilfs_sufile_get_nsegments(sufile);
7904e33f9eaSRyusuke Konishi if (nsegs == newnsegs)
7914e33f9eaSRyusuke Konishi goto out;
7924e33f9eaSRyusuke Konishi
7934e33f9eaSRyusuke Konishi ret = -ENOSPC;
7944e33f9eaSRyusuke Konishi nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
7954e33f9eaSRyusuke Konishi if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
7964e33f9eaSRyusuke Konishi goto out;
7974e33f9eaSRyusuke Konishi
7984e33f9eaSRyusuke Konishi ret = nilfs_sufile_get_header_block(sufile, &header_bh);
7994e33f9eaSRyusuke Konishi if (ret < 0)
8004e33f9eaSRyusuke Konishi goto out;
8014e33f9eaSRyusuke Konishi
8024e33f9eaSRyusuke Konishi if (newnsegs > nsegs) {
8034e33f9eaSRyusuke Konishi sui->ncleansegs += newnsegs - nsegs;
8044e33f9eaSRyusuke Konishi } else /* newnsegs < nsegs */ {
8054e33f9eaSRyusuke Konishi ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
8064e33f9eaSRyusuke Konishi if (ret < 0)
8074e33f9eaSRyusuke Konishi goto out_header;
8084e33f9eaSRyusuke Konishi
8094e33f9eaSRyusuke Konishi sui->ncleansegs -= nsegs - newnsegs;
810fee5eaecSRyusuke Konishi
811fee5eaecSRyusuke Konishi /*
812fee5eaecSRyusuke Konishi * If the sufile is successfully truncated, immediately adjust
813fee5eaecSRyusuke Konishi * the segment allocation space while locking the semaphore
814fee5eaecSRyusuke Konishi * "mi_sem" so that nilfs_sufile_alloc() never allocates
815fee5eaecSRyusuke Konishi * segments in the truncated space.
816fee5eaecSRyusuke Konishi */
817fee5eaecSRyusuke Konishi sui->allocmax = newnsegs - 1;
818fee5eaecSRyusuke Konishi sui->allocmin = 0;
8194e33f9eaSRyusuke Konishi }
8204e33f9eaSRyusuke Konishi
8217b9c0976SCong Wang kaddr = kmap_atomic(header_bh->b_page);
8224e33f9eaSRyusuke Konishi header = kaddr + bh_offset(header_bh);
8234e33f9eaSRyusuke Konishi header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
8247b9c0976SCong Wang kunmap_atomic(kaddr);
8254e33f9eaSRyusuke Konishi
8265fc7b141SRyusuke Konishi mark_buffer_dirty(header_bh);
8274e33f9eaSRyusuke Konishi nilfs_mdt_mark_dirty(sufile);
8284e33f9eaSRyusuke Konishi nilfs_set_nsegments(nilfs, newnsegs);
8294e33f9eaSRyusuke Konishi
8304e33f9eaSRyusuke Konishi out_header:
8314e33f9eaSRyusuke Konishi brelse(header_bh);
8324e33f9eaSRyusuke Konishi out:
8334e33f9eaSRyusuke Konishi up_write(&NILFS_MDT(sufile)->mi_sem);
8344e33f9eaSRyusuke Konishi return ret;
8354e33f9eaSRyusuke Konishi }
8364e33f9eaSRyusuke Konishi
8374e33f9eaSRyusuke Konishi /**
8386c98cd4eSKoji Sato * nilfs_sufile_get_suinfo -
8396c98cd4eSKoji Sato * @sufile: inode of segment usage file
8406c98cd4eSKoji Sato * @segnum: segment number to start looking
841003ff182SRyusuke Konishi * @buf: array of suinfo
842003ff182SRyusuke Konishi * @sisz: byte size of suinfo
8436c98cd4eSKoji Sato * @nsi: size of suinfo array
8446c98cd4eSKoji Sato *
8456c98cd4eSKoji Sato * Description:
8466c98cd4eSKoji Sato *
8476c98cd4eSKoji Sato * Return Value: On success, 0 is returned and .... On error, one of the
8486c98cd4eSKoji Sato * following negative error codes is returned.
8496c98cd4eSKoji Sato *
8506c98cd4eSKoji Sato * %-EIO - I/O error.
8516c98cd4eSKoji Sato *
8526c98cd4eSKoji Sato * %-ENOMEM - Insufficient amount of memory available.
8536c98cd4eSKoji Sato */
nilfs_sufile_get_suinfo(struct inode * sufile,__u64 segnum,void * buf,unsigned int sisz,size_t nsi)854003ff182SRyusuke Konishi ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
855f19e78deSRyusuke Konishi unsigned int sisz, size_t nsi)
8566c98cd4eSKoji Sato {
8576c98cd4eSKoji Sato struct buffer_head *su_bh;
8586c98cd4eSKoji Sato struct nilfs_segment_usage *su;
859003ff182SRyusuke Konishi struct nilfs_suinfo *si = buf;
8606c98cd4eSKoji Sato size_t susz = NILFS_MDT(sufile)->mi_entry_size;
8610ef28f9aSRyusuke Konishi struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
8626c98cd4eSKoji Sato void *kaddr;
8636c98cd4eSKoji Sato unsigned long nsegs, segusages_per_block;
8646c98cd4eSKoji Sato ssize_t n;
8656c98cd4eSKoji Sato int ret, i, j;
8666c98cd4eSKoji Sato
8676c98cd4eSKoji Sato down_read(&NILFS_MDT(sufile)->mi_sem);
8686c98cd4eSKoji Sato
8696c98cd4eSKoji Sato segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
8706c98cd4eSKoji Sato nsegs = min_t(unsigned long,
8716c98cd4eSKoji Sato nilfs_sufile_get_nsegments(sufile) - segnum,
8726c98cd4eSKoji Sato nsi);
8736c98cd4eSKoji Sato for (i = 0; i < nsegs; i += n, segnum += n) {
8746c98cd4eSKoji Sato n = min_t(unsigned long,
8756c98cd4eSKoji Sato segusages_per_block -
8766c98cd4eSKoji Sato nilfs_sufile_get_offset(sufile, segnum),
8776c98cd4eSKoji Sato nsegs - i);
8786c98cd4eSKoji Sato ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
8796c98cd4eSKoji Sato &su_bh);
8806c98cd4eSKoji Sato if (ret < 0) {
8816c98cd4eSKoji Sato if (ret != -ENOENT)
8826c98cd4eSKoji Sato goto out;
8836c98cd4eSKoji Sato /* hole */
884003ff182SRyusuke Konishi memset(si, 0, sisz * n);
885003ff182SRyusuke Konishi si = (void *)si + sisz * n;
8866c98cd4eSKoji Sato continue;
8876c98cd4eSKoji Sato }
8886c98cd4eSKoji Sato
8897b9c0976SCong Wang kaddr = kmap_atomic(su_bh->b_page);
8906c98cd4eSKoji Sato su = nilfs_sufile_block_get_segment_usage(
8916c98cd4eSKoji Sato sufile, segnum, su_bh, kaddr);
892003ff182SRyusuke Konishi for (j = 0; j < n;
893003ff182SRyusuke Konishi j++, su = (void *)su + susz, si = (void *)si + sisz) {
894003ff182SRyusuke Konishi si->sui_lastmod = le64_to_cpu(su->su_lastmod);
895003ff182SRyusuke Konishi si->sui_nblocks = le32_to_cpu(su->su_nblocks);
896003ff182SRyusuke Konishi si->sui_flags = le32_to_cpu(su->su_flags) &
8974ce5c342SRyusuke Konishi ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
8983efb55b4SRyusuke Konishi if (nilfs_segment_is_active(nilfs, segnum + j))
899003ff182SRyusuke Konishi si->sui_flags |=
9004ce5c342SRyusuke Konishi BIT(NILFS_SEGMENT_USAGE_ACTIVE);
9016c98cd4eSKoji Sato }
9027b9c0976SCong Wang kunmap_atomic(kaddr);
9036c98cd4eSKoji Sato brelse(su_bh);
9046c98cd4eSKoji Sato }
9056c98cd4eSKoji Sato ret = nsegs;
9066c98cd4eSKoji Sato
9076c98cd4eSKoji Sato out:
9086c98cd4eSKoji Sato up_read(&NILFS_MDT(sufile)->mi_sem);
9096c98cd4eSKoji Sato return ret;
9106c98cd4eSKoji Sato }
91179739565SRyusuke Konishi
91279739565SRyusuke Konishi /**
91300e9ffcdSAndreas Rohner * nilfs_sufile_set_suinfo - sets segment usage info
91400e9ffcdSAndreas Rohner * @sufile: inode of segment usage file
91500e9ffcdSAndreas Rohner * @buf: array of suinfo_update
91600e9ffcdSAndreas Rohner * @supsz: byte size of suinfo_update
91700e9ffcdSAndreas Rohner * @nsup: size of suinfo_update array
91800e9ffcdSAndreas Rohner *
91900e9ffcdSAndreas Rohner * Description: Takes an array of nilfs_suinfo_update structs and updates
92000e9ffcdSAndreas Rohner * segment usage accordingly. Only the fields indicated by the sup_flags
92100e9ffcdSAndreas Rohner * are updated.
92200e9ffcdSAndreas Rohner *
92300e9ffcdSAndreas Rohner * Return Value: On success, 0 is returned. On error, one of the
92400e9ffcdSAndreas Rohner * following negative error codes is returned.
92500e9ffcdSAndreas Rohner *
92600e9ffcdSAndreas Rohner * %-EIO - I/O error.
92700e9ffcdSAndreas Rohner *
92800e9ffcdSAndreas Rohner * %-ENOMEM - Insufficient amount of memory available.
92900e9ffcdSAndreas Rohner *
93000e9ffcdSAndreas Rohner * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
93100e9ffcdSAndreas Rohner */
nilfs_sufile_set_suinfo(struct inode * sufile,void * buf,unsigned int supsz,size_t nsup)93200e9ffcdSAndreas Rohner ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
933f19e78deSRyusuke Konishi unsigned int supsz, size_t nsup)
93400e9ffcdSAndreas Rohner {
93500e9ffcdSAndreas Rohner struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
93600e9ffcdSAndreas Rohner struct buffer_head *header_bh, *bh;
93700e9ffcdSAndreas Rohner struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
93800e9ffcdSAndreas Rohner struct nilfs_segment_usage *su;
93900e9ffcdSAndreas Rohner void *kaddr;
94000e9ffcdSAndreas Rohner unsigned long blkoff, prev_blkoff;
94100e9ffcdSAndreas Rohner int cleansi, cleansu, dirtysi, dirtysu;
94200e9ffcdSAndreas Rohner long ncleaned = 0, ndirtied = 0;
94300e9ffcdSAndreas Rohner int ret = 0;
94400e9ffcdSAndreas Rohner
94500e9ffcdSAndreas Rohner if (unlikely(nsup == 0))
94600e9ffcdSAndreas Rohner return ret;
94700e9ffcdSAndreas Rohner
94800e9ffcdSAndreas Rohner for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
94900e9ffcdSAndreas Rohner if (sup->sup_segnum >= nilfs->ns_nsegments
95000e9ffcdSAndreas Rohner || (sup->sup_flags &
95100e9ffcdSAndreas Rohner (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
95200e9ffcdSAndreas Rohner || (nilfs_suinfo_update_nblocks(sup) &&
95300e9ffcdSAndreas Rohner sup->sup_sui.sui_nblocks >
95400e9ffcdSAndreas Rohner nilfs->ns_blocks_per_segment))
95500e9ffcdSAndreas Rohner return -EINVAL;
95600e9ffcdSAndreas Rohner }
95700e9ffcdSAndreas Rohner
95800e9ffcdSAndreas Rohner down_write(&NILFS_MDT(sufile)->mi_sem);
95900e9ffcdSAndreas Rohner
96000e9ffcdSAndreas Rohner ret = nilfs_sufile_get_header_block(sufile, &header_bh);
96100e9ffcdSAndreas Rohner if (ret < 0)
96200e9ffcdSAndreas Rohner goto out_sem;
96300e9ffcdSAndreas Rohner
96400e9ffcdSAndreas Rohner sup = buf;
96500e9ffcdSAndreas Rohner blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
96600e9ffcdSAndreas Rohner ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
96700e9ffcdSAndreas Rohner if (ret < 0)
96800e9ffcdSAndreas Rohner goto out_header;
96900e9ffcdSAndreas Rohner
97000e9ffcdSAndreas Rohner for (;;) {
97100e9ffcdSAndreas Rohner kaddr = kmap_atomic(bh->b_page);
97200e9ffcdSAndreas Rohner su = nilfs_sufile_block_get_segment_usage(
97300e9ffcdSAndreas Rohner sufile, sup->sup_segnum, bh, kaddr);
97400e9ffcdSAndreas Rohner
97500e9ffcdSAndreas Rohner if (nilfs_suinfo_update_lastmod(sup))
97600e9ffcdSAndreas Rohner su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
97700e9ffcdSAndreas Rohner
97800e9ffcdSAndreas Rohner if (nilfs_suinfo_update_nblocks(sup))
97900e9ffcdSAndreas Rohner su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
98000e9ffcdSAndreas Rohner
98100e9ffcdSAndreas Rohner if (nilfs_suinfo_update_flags(sup)) {
98200e9ffcdSAndreas Rohner /*
98300e9ffcdSAndreas Rohner * Active flag is a virtual flag projected by running
98400e9ffcdSAndreas Rohner * nilfs kernel code - drop it not to write it to
98500e9ffcdSAndreas Rohner * disk.
98600e9ffcdSAndreas Rohner */
98700e9ffcdSAndreas Rohner sup->sup_sui.sui_flags &=
9884ce5c342SRyusuke Konishi ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
98900e9ffcdSAndreas Rohner
99000e9ffcdSAndreas Rohner cleansi = nilfs_suinfo_clean(&sup->sup_sui);
99100e9ffcdSAndreas Rohner cleansu = nilfs_segment_usage_clean(su);
99200e9ffcdSAndreas Rohner dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
99300e9ffcdSAndreas Rohner dirtysu = nilfs_segment_usage_dirty(su);
99400e9ffcdSAndreas Rohner
99500e9ffcdSAndreas Rohner if (cleansi && !cleansu)
99600e9ffcdSAndreas Rohner ++ncleaned;
99700e9ffcdSAndreas Rohner else if (!cleansi && cleansu)
99800e9ffcdSAndreas Rohner --ncleaned;
99900e9ffcdSAndreas Rohner
100000e9ffcdSAndreas Rohner if (dirtysi && !dirtysu)
100100e9ffcdSAndreas Rohner ++ndirtied;
100200e9ffcdSAndreas Rohner else if (!dirtysi && dirtysu)
100300e9ffcdSAndreas Rohner --ndirtied;
100400e9ffcdSAndreas Rohner
100500e9ffcdSAndreas Rohner su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
100600e9ffcdSAndreas Rohner }
100700e9ffcdSAndreas Rohner
100800e9ffcdSAndreas Rohner kunmap_atomic(kaddr);
100900e9ffcdSAndreas Rohner
101000e9ffcdSAndreas Rohner sup = (void *)sup + supsz;
101100e9ffcdSAndreas Rohner if (sup >= supend)
101200e9ffcdSAndreas Rohner break;
101300e9ffcdSAndreas Rohner
101400e9ffcdSAndreas Rohner prev_blkoff = blkoff;
101500e9ffcdSAndreas Rohner blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
101600e9ffcdSAndreas Rohner if (blkoff == prev_blkoff)
101700e9ffcdSAndreas Rohner continue;
101800e9ffcdSAndreas Rohner
101900e9ffcdSAndreas Rohner /* get different block */
102000e9ffcdSAndreas Rohner mark_buffer_dirty(bh);
102100e9ffcdSAndreas Rohner put_bh(bh);
102200e9ffcdSAndreas Rohner ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
102300e9ffcdSAndreas Rohner if (unlikely(ret < 0))
102400e9ffcdSAndreas Rohner goto out_mark;
102500e9ffcdSAndreas Rohner }
102600e9ffcdSAndreas Rohner mark_buffer_dirty(bh);
102700e9ffcdSAndreas Rohner put_bh(bh);
102800e9ffcdSAndreas Rohner
102900e9ffcdSAndreas Rohner out_mark:
103000e9ffcdSAndreas Rohner if (ncleaned || ndirtied) {
103100e9ffcdSAndreas Rohner nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
103200e9ffcdSAndreas Rohner (u64)ndirtied);
103300e9ffcdSAndreas Rohner NILFS_SUI(sufile)->ncleansegs += ncleaned;
103400e9ffcdSAndreas Rohner }
103500e9ffcdSAndreas Rohner nilfs_mdt_mark_dirty(sufile);
103600e9ffcdSAndreas Rohner out_header:
103700e9ffcdSAndreas Rohner put_bh(header_bh);
103800e9ffcdSAndreas Rohner out_sem:
103900e9ffcdSAndreas Rohner up_write(&NILFS_MDT(sufile)->mi_sem);
104000e9ffcdSAndreas Rohner return ret;
104100e9ffcdSAndreas Rohner }
104200e9ffcdSAndreas Rohner
104300e9ffcdSAndreas Rohner /**
104482e11e85SAndreas Rohner * nilfs_sufile_trim_fs() - trim ioctl handle function
104582e11e85SAndreas Rohner * @sufile: inode of segment usage file
104682e11e85SAndreas Rohner * @range: fstrim_range structure
104782e11e85SAndreas Rohner *
104882e11e85SAndreas Rohner * start: First Byte to trim
104982e11e85SAndreas Rohner * len: number of Bytes to trim from start
105082e11e85SAndreas Rohner * minlen: minimum extent length in Bytes
105182e11e85SAndreas Rohner *
105282e11e85SAndreas Rohner * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
105382e11e85SAndreas Rohner * from start to start+len. start is rounded up to the next block boundary
105482e11e85SAndreas Rohner * and start+len is rounded down. For each clean segment blkdev_issue_discard
105582e11e85SAndreas Rohner * function is invoked.
105682e11e85SAndreas Rohner *
105782e11e85SAndreas Rohner * Return Value: On success, 0 is returned or negative error code, otherwise.
105882e11e85SAndreas Rohner */
nilfs_sufile_trim_fs(struct inode * sufile,struct fstrim_range * range)105982e11e85SAndreas Rohner int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
106082e11e85SAndreas Rohner {
106182e11e85SAndreas Rohner struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
106282e11e85SAndreas Rohner struct buffer_head *su_bh;
106382e11e85SAndreas Rohner struct nilfs_segment_usage *su;
106482e11e85SAndreas Rohner void *kaddr;
106582e11e85SAndreas Rohner size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
106682e11e85SAndreas Rohner sector_t seg_start, seg_end, start_block, end_block;
106782e11e85SAndreas Rohner sector_t start = 0, nblocks = 0;
106882e11e85SAndreas Rohner u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
106982e11e85SAndreas Rohner int ret = 0;
107082e11e85SAndreas Rohner unsigned int sects_per_block;
107182e11e85SAndreas Rohner
107282e11e85SAndreas Rohner sects_per_block = (1 << nilfs->ns_blocksize_bits) /
107382e11e85SAndreas Rohner bdev_logical_block_size(nilfs->ns_bdev);
107482e11e85SAndreas Rohner len = range->len >> nilfs->ns_blocksize_bits;
107582e11e85SAndreas Rohner minlen = range->minlen >> nilfs->ns_blocksize_bits;
107682e11e85SAndreas Rohner max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
107782e11e85SAndreas Rohner
107882e11e85SAndreas Rohner if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
107982e11e85SAndreas Rohner return -EINVAL;
108082e11e85SAndreas Rohner
108182e11e85SAndreas Rohner start_block = (range->start + nilfs->ns_blocksize - 1) >>
108282e11e85SAndreas Rohner nilfs->ns_blocksize_bits;
108382e11e85SAndreas Rohner
108482e11e85SAndreas Rohner /*
108582e11e85SAndreas Rohner * range->len can be very large (actually, it is set to
108682e11e85SAndreas Rohner * ULLONG_MAX by default) - truncate upper end of the range
108782e11e85SAndreas Rohner * carefully so as not to overflow.
108882e11e85SAndreas Rohner */
108982e11e85SAndreas Rohner if (max_blocks - start_block < len)
109082e11e85SAndreas Rohner end_block = max_blocks - 1;
109182e11e85SAndreas Rohner else
109282e11e85SAndreas Rohner end_block = start_block + len - 1;
109382e11e85SAndreas Rohner
109482e11e85SAndreas Rohner segnum = nilfs_get_segnum_of_block(nilfs, start_block);
109582e11e85SAndreas Rohner segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
109682e11e85SAndreas Rohner
109782e11e85SAndreas Rohner down_read(&NILFS_MDT(sufile)->mi_sem);
109882e11e85SAndreas Rohner
109982e11e85SAndreas Rohner while (segnum <= segnum_end) {
110082e11e85SAndreas Rohner n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
110182e11e85SAndreas Rohner segnum_end);
110282e11e85SAndreas Rohner
110382e11e85SAndreas Rohner ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
110482e11e85SAndreas Rohner &su_bh);
110582e11e85SAndreas Rohner if (ret < 0) {
110682e11e85SAndreas Rohner if (ret != -ENOENT)
110782e11e85SAndreas Rohner goto out_sem;
110882e11e85SAndreas Rohner /* hole */
110982e11e85SAndreas Rohner segnum += n;
111082e11e85SAndreas Rohner continue;
111182e11e85SAndreas Rohner }
111282e11e85SAndreas Rohner
111382e11e85SAndreas Rohner kaddr = kmap_atomic(su_bh->b_page);
111482e11e85SAndreas Rohner su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
111582e11e85SAndreas Rohner su_bh, kaddr);
111682e11e85SAndreas Rohner for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
111782e11e85SAndreas Rohner if (!nilfs_segment_usage_clean(su))
111882e11e85SAndreas Rohner continue;
111982e11e85SAndreas Rohner
112082e11e85SAndreas Rohner nilfs_get_segment_range(nilfs, segnum, &seg_start,
112182e11e85SAndreas Rohner &seg_end);
112282e11e85SAndreas Rohner
112382e11e85SAndreas Rohner if (!nblocks) {
112482e11e85SAndreas Rohner /* start new extent */
112582e11e85SAndreas Rohner start = seg_start;
112682e11e85SAndreas Rohner nblocks = seg_end - seg_start + 1;
112782e11e85SAndreas Rohner continue;
112882e11e85SAndreas Rohner }
112982e11e85SAndreas Rohner
113082e11e85SAndreas Rohner if (start + nblocks == seg_start) {
113182e11e85SAndreas Rohner /* add to previous extent */
113282e11e85SAndreas Rohner nblocks += seg_end - seg_start + 1;
113382e11e85SAndreas Rohner continue;
113482e11e85SAndreas Rohner }
113582e11e85SAndreas Rohner
113682e11e85SAndreas Rohner /* discard previous extent */
113782e11e85SAndreas Rohner if (start < start_block) {
113882e11e85SAndreas Rohner nblocks -= start_block - start;
113982e11e85SAndreas Rohner start = start_block;
114082e11e85SAndreas Rohner }
114182e11e85SAndreas Rohner
114282e11e85SAndreas Rohner if (nblocks >= minlen) {
114382e11e85SAndreas Rohner kunmap_atomic(kaddr);
114482e11e85SAndreas Rohner
114582e11e85SAndreas Rohner ret = blkdev_issue_discard(nilfs->ns_bdev,
114682e11e85SAndreas Rohner start * sects_per_block,
114782e11e85SAndreas Rohner nblocks * sects_per_block,
114844abff2cSChristoph Hellwig GFP_NOFS);
114982e11e85SAndreas Rohner if (ret < 0) {
115082e11e85SAndreas Rohner put_bh(su_bh);
115182e11e85SAndreas Rohner goto out_sem;
115282e11e85SAndreas Rohner }
115382e11e85SAndreas Rohner
115482e11e85SAndreas Rohner ndiscarded += nblocks;
115582e11e85SAndreas Rohner kaddr = kmap_atomic(su_bh->b_page);
115682e11e85SAndreas Rohner su = nilfs_sufile_block_get_segment_usage(
115782e11e85SAndreas Rohner sufile, segnum, su_bh, kaddr);
115882e11e85SAndreas Rohner }
115982e11e85SAndreas Rohner
116082e11e85SAndreas Rohner /* start new extent */
116182e11e85SAndreas Rohner start = seg_start;
116282e11e85SAndreas Rohner nblocks = seg_end - seg_start + 1;
116382e11e85SAndreas Rohner }
116482e11e85SAndreas Rohner kunmap_atomic(kaddr);
116582e11e85SAndreas Rohner put_bh(su_bh);
116682e11e85SAndreas Rohner }
116782e11e85SAndreas Rohner
116882e11e85SAndreas Rohner
116982e11e85SAndreas Rohner if (nblocks) {
117082e11e85SAndreas Rohner /* discard last extent */
117182e11e85SAndreas Rohner if (start < start_block) {
117282e11e85SAndreas Rohner nblocks -= start_block - start;
117382e11e85SAndreas Rohner start = start_block;
117482e11e85SAndreas Rohner }
117582e11e85SAndreas Rohner if (start + nblocks > end_block + 1)
117682e11e85SAndreas Rohner nblocks = end_block - start + 1;
117782e11e85SAndreas Rohner
117882e11e85SAndreas Rohner if (nblocks >= minlen) {
117982e11e85SAndreas Rohner ret = blkdev_issue_discard(nilfs->ns_bdev,
118082e11e85SAndreas Rohner start * sects_per_block,
118182e11e85SAndreas Rohner nblocks * sects_per_block,
118244abff2cSChristoph Hellwig GFP_NOFS);
118382e11e85SAndreas Rohner if (!ret)
118482e11e85SAndreas Rohner ndiscarded += nblocks;
118582e11e85SAndreas Rohner }
118682e11e85SAndreas Rohner }
118782e11e85SAndreas Rohner
118882e11e85SAndreas Rohner out_sem:
118982e11e85SAndreas Rohner up_read(&NILFS_MDT(sufile)->mi_sem);
119082e11e85SAndreas Rohner
119182e11e85SAndreas Rohner range->len = ndiscarded << nilfs->ns_blocksize_bits;
119282e11e85SAndreas Rohner return ret;
119382e11e85SAndreas Rohner }
119482e11e85SAndreas Rohner
119582e11e85SAndreas Rohner /**
1196f1e89c86SRyusuke Konishi * nilfs_sufile_read - read or get sufile inode
1197f1e89c86SRyusuke Konishi * @sb: super block instance
1198f1e89c86SRyusuke Konishi * @susize: size of a segment usage entry
11998707df38SRyusuke Konishi * @raw_inode: on-disk sufile inode
1200f1e89c86SRyusuke Konishi * @inodep: buffer to store the inode
12018707df38SRyusuke Konishi */
nilfs_sufile_read(struct super_block * sb,size_t susize,struct nilfs_inode * raw_inode,struct inode ** inodep)1202f1e89c86SRyusuke Konishi int nilfs_sufile_read(struct super_block *sb, size_t susize,
1203f1e89c86SRyusuke Konishi struct nilfs_inode *raw_inode, struct inode **inodep)
12048707df38SRyusuke Konishi {
1205f1e89c86SRyusuke Konishi struct inode *sufile;
1206f1e89c86SRyusuke Konishi struct nilfs_sufile_info *sui;
1207aa474a22SRyusuke Konishi struct buffer_head *header_bh;
1208aa474a22SRyusuke Konishi struct nilfs_sufile_header *header;
1209aa474a22SRyusuke Konishi void *kaddr;
1210f1e89c86SRyusuke Konishi int err;
1211aa474a22SRyusuke Konishi
12120ec060d1SRyusuke Konishi if (susize > sb->s_blocksize) {
1213a1d0747aSJoe Perches nilfs_err(sb, "too large segment usage size: %zu bytes",
1214a1d0747aSJoe Perches susize);
12150ec060d1SRyusuke Konishi return -EINVAL;
12160ec060d1SRyusuke Konishi } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1217a1d0747aSJoe Perches nilfs_err(sb, "too small segment usage size: %zu bytes",
1218a1d0747aSJoe Perches susize);
12190ec060d1SRyusuke Konishi return -EINVAL;
12200ec060d1SRyusuke Konishi }
12210ec060d1SRyusuke Konishi
1222f1e89c86SRyusuke Konishi sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1223f1e89c86SRyusuke Konishi if (unlikely(!sufile))
1224f1e89c86SRyusuke Konishi return -ENOMEM;
1225f1e89c86SRyusuke Konishi if (!(sufile->i_state & I_NEW))
1226f1e89c86SRyusuke Konishi goto out;
1227aa474a22SRyusuke Konishi
1228f1e89c86SRyusuke Konishi err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1229f1e89c86SRyusuke Konishi if (err)
1230f1e89c86SRyusuke Konishi goto failed;
1231f1e89c86SRyusuke Konishi
1232f1e89c86SRyusuke Konishi nilfs_mdt_set_entry_size(sufile, susize,
1233f1e89c86SRyusuke Konishi sizeof(struct nilfs_sufile_header));
1234f1e89c86SRyusuke Konishi
1235f1e89c86SRyusuke Konishi err = nilfs_read_inode_common(sufile, raw_inode);
1236f1e89c86SRyusuke Konishi if (err)
1237f1e89c86SRyusuke Konishi goto failed;
1238f1e89c86SRyusuke Konishi
1239f1e89c86SRyusuke Konishi err = nilfs_sufile_get_header_block(sufile, &header_bh);
1240f1e89c86SRyusuke Konishi if (err)
1241f1e89c86SRyusuke Konishi goto failed;
1242f1e89c86SRyusuke Konishi
1243f1e89c86SRyusuke Konishi sui = NILFS_SUI(sufile);
12447b9c0976SCong Wang kaddr = kmap_atomic(header_bh->b_page);
1245aa474a22SRyusuke Konishi header = kaddr + bh_offset(header_bh);
1246aa474a22SRyusuke Konishi sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
12477b9c0976SCong Wang kunmap_atomic(kaddr);
1248aa474a22SRyusuke Konishi brelse(header_bh);
12498707df38SRyusuke Konishi
1250619205daSRyusuke Konishi sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1251619205daSRyusuke Konishi sui->allocmin = 0;
1252619205daSRyusuke Konishi
1253f1e89c86SRyusuke Konishi unlock_new_inode(sufile);
1254f1e89c86SRyusuke Konishi out:
1255f1e89c86SRyusuke Konishi *inodep = sufile;
1256f1e89c86SRyusuke Konishi return 0;
1257f1e89c86SRyusuke Konishi failed:
1258f1e89c86SRyusuke Konishi iget_failed(sufile);
1259f1e89c86SRyusuke Konishi return err;
126079739565SRyusuke Konishi }
1261