xref: /openbmc/linux/fs/nilfs2/sufile.c (revision d7955ce4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS segment usage file.
4  *
5  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Koji Sato.
8  * Revised by Ryusuke Konishi.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/string.h>
14 #include <linux/buffer_head.h>
15 #include <linux/errno.h>
16 #include "mdt.h"
17 #include "sufile.h"
18 
19 #include <trace/events/nilfs2.h>
20 
21 /**
22  * struct nilfs_sufile_info - on-memory private data of sufile
23  * @mi: on-memory private data of metadata file
24  * @ncleansegs: number of clean segments
25  * @allocmin: lower limit of allocatable segment range
26  * @allocmax: upper limit of allocatable segment range
27  */
28 struct nilfs_sufile_info {
29 	struct nilfs_mdt_info mi;
30 	unsigned long ncleansegs;/* number of clean segments */
31 	__u64 allocmin;		/* lower limit of allocatable segment range */
32 	__u64 allocmax;		/* upper limit of allocatable segment range */
33 };
34 
35 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
36 {
37 	return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
38 }
39 
40 static inline unsigned long
41 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
42 {
43 	return NILFS_MDT(sufile)->mi_entries_per_block;
44 }
45 
46 static unsigned long
47 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
48 {
49 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
50 
51 	do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
52 	return (unsigned long)t;
53 }
54 
55 static unsigned long
56 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
57 {
58 	__u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
59 
60 	return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
61 }
62 
63 static unsigned long
64 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
65 				     __u64 max)
66 {
67 	return min_t(unsigned long,
68 		     nilfs_sufile_segment_usages_per_block(sufile) -
69 		     nilfs_sufile_get_offset(sufile, curr),
70 		     max - curr + 1);
71 }
72 
73 static struct nilfs_segment_usage *
74 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 				     struct buffer_head *bh, void *kaddr)
76 {
77 	return kaddr + bh_offset(bh) +
78 		nilfs_sufile_get_offset(sufile, segnum) *
79 		NILFS_MDT(sufile)->mi_entry_size;
80 }
81 
82 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
83 						struct buffer_head **bhp)
84 {
85 	return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
86 }
87 
88 static inline int
89 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
90 				     int create, struct buffer_head **bhp)
91 {
92 	return nilfs_mdt_get_block(sufile,
93 				   nilfs_sufile_get_blkoff(sufile, segnum),
94 				   create, NULL, bhp);
95 }
96 
97 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
98 						   __u64 segnum)
99 {
100 	return nilfs_mdt_delete_block(sufile,
101 				      nilfs_sufile_get_blkoff(sufile, segnum));
102 }
103 
104 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
105 				     u64 ncleanadd, u64 ndirtyadd)
106 {
107 	struct nilfs_sufile_header *header;
108 	void *kaddr;
109 
110 	kaddr = kmap_atomic(header_bh->b_page);
111 	header = kaddr + bh_offset(header_bh);
112 	le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
113 	le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
114 	kunmap_atomic(kaddr);
115 
116 	mark_buffer_dirty(header_bh);
117 }
118 
119 /**
120  * nilfs_sufile_get_ncleansegs - return the number of clean segments
121  * @sufile: inode of segment usage file
122  */
123 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
124 {
125 	return NILFS_SUI(sufile)->ncleansegs;
126 }
127 
128 /**
129  * nilfs_sufile_updatev - modify multiple segment usages at a time
130  * @sufile: inode of segment usage file
131  * @segnumv: array of segment numbers
132  * @nsegs: size of @segnumv array
133  * @create: creation flag
134  * @ndone: place to store number of modified segments on @segnumv
135  * @dofunc: primitive operation for the update
136  *
137  * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
138  * against the given array of segments.  The @dofunc is called with
139  * buffers of a header block and the sufile block in which the target
140  * segment usage entry is contained.  If @ndone is given, the number
141  * of successfully modified segments from the head is stored in the
142  * place @ndone points to.
143  *
144  * Return Value: On success, zero is returned.  On error, one of the
145  * following negative error codes is returned.
146  *
147  * %-EIO - I/O error.
148  *
149  * %-ENOMEM - Insufficient amount of memory available.
150  *
151  * %-ENOENT - Given segment usage is in hole block (may be returned if
152  *            @create is zero)
153  *
154  * %-EINVAL - Invalid segment usage number
155  */
156 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
157 			 int create, size_t *ndone,
158 			 void (*dofunc)(struct inode *, __u64,
159 					struct buffer_head *,
160 					struct buffer_head *))
161 {
162 	struct buffer_head *header_bh, *bh;
163 	unsigned long blkoff, prev_blkoff;
164 	__u64 *seg;
165 	size_t nerr = 0, n = 0;
166 	int ret = 0;
167 
168 	if (unlikely(nsegs == 0))
169 		goto out;
170 
171 	down_write(&NILFS_MDT(sufile)->mi_sem);
172 	for (seg = segnumv; seg < segnumv + nsegs; seg++) {
173 		if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
174 			nilfs_warn(sufile->i_sb,
175 				   "%s: invalid segment number: %llu",
176 				   __func__, (unsigned long long)*seg);
177 			nerr++;
178 		}
179 	}
180 	if (nerr > 0) {
181 		ret = -EINVAL;
182 		goto out_sem;
183 	}
184 
185 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
186 	if (ret < 0)
187 		goto out_sem;
188 
189 	seg = segnumv;
190 	blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
191 	ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
192 	if (ret < 0)
193 		goto out_header;
194 
195 	for (;;) {
196 		dofunc(sufile, *seg, header_bh, bh);
197 
198 		if (++seg >= segnumv + nsegs)
199 			break;
200 		prev_blkoff = blkoff;
201 		blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
202 		if (blkoff == prev_blkoff)
203 			continue;
204 
205 		/* get different block */
206 		brelse(bh);
207 		ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
208 		if (unlikely(ret < 0))
209 			goto out_header;
210 	}
211 	brelse(bh);
212 
213  out_header:
214 	n = seg - segnumv;
215 	brelse(header_bh);
216  out_sem:
217 	up_write(&NILFS_MDT(sufile)->mi_sem);
218  out:
219 	if (ndone)
220 		*ndone = n;
221 	return ret;
222 }
223 
224 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
225 			void (*dofunc)(struct inode *, __u64,
226 				       struct buffer_head *,
227 				       struct buffer_head *))
228 {
229 	struct buffer_head *header_bh, *bh;
230 	int ret;
231 
232 	if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
233 		nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu",
234 			   __func__, (unsigned long long)segnum);
235 		return -EINVAL;
236 	}
237 	down_write(&NILFS_MDT(sufile)->mi_sem);
238 
239 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
240 	if (ret < 0)
241 		goto out_sem;
242 
243 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
244 	if (!ret) {
245 		dofunc(sufile, segnum, header_bh, bh);
246 		brelse(bh);
247 	}
248 	brelse(header_bh);
249 
250  out_sem:
251 	up_write(&NILFS_MDT(sufile)->mi_sem);
252 	return ret;
253 }
254 
255 /**
256  * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
257  * @sufile: inode of segment usage file
258  * @start: minimum segment number of allocatable region (inclusive)
259  * @end: maximum segment number of allocatable region (inclusive)
260  *
261  * Return Value: On success, 0 is returned.  On error, one of the
262  * following negative error codes is returned.
263  *
264  * %-ERANGE - invalid segment region
265  */
266 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
267 {
268 	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
269 	__u64 nsegs;
270 	int ret = -ERANGE;
271 
272 	down_write(&NILFS_MDT(sufile)->mi_sem);
273 	nsegs = nilfs_sufile_get_nsegments(sufile);
274 
275 	if (start <= end && end < nsegs) {
276 		sui->allocmin = start;
277 		sui->allocmax = end;
278 		ret = 0;
279 	}
280 	up_write(&NILFS_MDT(sufile)->mi_sem);
281 	return ret;
282 }
283 
284 /**
285  * nilfs_sufile_alloc - allocate a segment
286  * @sufile: inode of segment usage file
287  * @segnump: pointer to segment number
288  *
289  * Description: nilfs_sufile_alloc() allocates a clean segment.
290  *
291  * Return Value: On success, 0 is returned and the segment number of the
292  * allocated segment is stored in the place pointed by @segnump. On error, one
293  * of the following negative error codes is returned.
294  *
295  * %-EIO - I/O error.
296  *
297  * %-ENOMEM - Insufficient amount of memory available.
298  *
299  * %-ENOSPC - No clean segment left.
300  */
301 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
302 {
303 	struct buffer_head *header_bh, *su_bh;
304 	struct nilfs_sufile_header *header;
305 	struct nilfs_segment_usage *su;
306 	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
307 	size_t susz = NILFS_MDT(sufile)->mi_entry_size;
308 	__u64 segnum, maxsegnum, last_alloc;
309 	void *kaddr;
310 	unsigned long nsegments, nsus, cnt;
311 	int ret, j;
312 
313 	down_write(&NILFS_MDT(sufile)->mi_sem);
314 
315 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
316 	if (ret < 0)
317 		goto out_sem;
318 	kaddr = kmap_atomic(header_bh->b_page);
319 	header = kaddr + bh_offset(header_bh);
320 	last_alloc = le64_to_cpu(header->sh_last_alloc);
321 	kunmap_atomic(kaddr);
322 
323 	nsegments = nilfs_sufile_get_nsegments(sufile);
324 	maxsegnum = sui->allocmax;
325 	segnum = last_alloc + 1;
326 	if (segnum < sui->allocmin || segnum > sui->allocmax)
327 		segnum = sui->allocmin;
328 
329 	for (cnt = 0; cnt < nsegments; cnt += nsus) {
330 		if (segnum > maxsegnum) {
331 			if (cnt < sui->allocmax - sui->allocmin + 1) {
332 				/*
333 				 * wrap around in the limited region.
334 				 * if allocation started from
335 				 * sui->allocmin, this never happens.
336 				 */
337 				segnum = sui->allocmin;
338 				maxsegnum = last_alloc;
339 			} else if (segnum > sui->allocmin &&
340 				   sui->allocmax + 1 < nsegments) {
341 				segnum = sui->allocmax + 1;
342 				maxsegnum = nsegments - 1;
343 			} else if (sui->allocmin > 0)  {
344 				segnum = 0;
345 				maxsegnum = sui->allocmin - 1;
346 			} else {
347 				break; /* never happens */
348 			}
349 		}
350 		trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
351 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
352 							   &su_bh);
353 		if (ret < 0)
354 			goto out_header;
355 		kaddr = kmap_atomic(su_bh->b_page);
356 		su = nilfs_sufile_block_get_segment_usage(
357 			sufile, segnum, su_bh, kaddr);
358 
359 		nsus = nilfs_sufile_segment_usages_in_block(
360 			sufile, segnum, maxsegnum);
361 		for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
362 			if (!nilfs_segment_usage_clean(su))
363 				continue;
364 			/* found a clean segment */
365 			nilfs_segment_usage_set_dirty(su);
366 			kunmap_atomic(kaddr);
367 
368 			kaddr = kmap_atomic(header_bh->b_page);
369 			header = kaddr + bh_offset(header_bh);
370 			le64_add_cpu(&header->sh_ncleansegs, -1);
371 			le64_add_cpu(&header->sh_ndirtysegs, 1);
372 			header->sh_last_alloc = cpu_to_le64(segnum);
373 			kunmap_atomic(kaddr);
374 
375 			sui->ncleansegs--;
376 			mark_buffer_dirty(header_bh);
377 			mark_buffer_dirty(su_bh);
378 			nilfs_mdt_mark_dirty(sufile);
379 			brelse(su_bh);
380 			*segnump = segnum;
381 
382 			trace_nilfs2_segment_usage_allocated(sufile, segnum);
383 
384 			goto out_header;
385 		}
386 
387 		kunmap_atomic(kaddr);
388 		brelse(su_bh);
389 	}
390 
391 	/* no segments left */
392 	ret = -ENOSPC;
393 
394  out_header:
395 	brelse(header_bh);
396 
397  out_sem:
398 	up_write(&NILFS_MDT(sufile)->mi_sem);
399 	return ret;
400 }
401 
402 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
403 				 struct buffer_head *header_bh,
404 				 struct buffer_head *su_bh)
405 {
406 	struct nilfs_segment_usage *su;
407 	void *kaddr;
408 
409 	kaddr = kmap_atomic(su_bh->b_page);
410 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
411 	if (unlikely(!nilfs_segment_usage_clean(su))) {
412 		nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
413 			   __func__, (unsigned long long)segnum);
414 		kunmap_atomic(kaddr);
415 		return;
416 	}
417 	nilfs_segment_usage_set_dirty(su);
418 	kunmap_atomic(kaddr);
419 
420 	nilfs_sufile_mod_counter(header_bh, -1, 1);
421 	NILFS_SUI(sufile)->ncleansegs--;
422 
423 	mark_buffer_dirty(su_bh);
424 	nilfs_mdt_mark_dirty(sufile);
425 }
426 
427 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
428 			   struct buffer_head *header_bh,
429 			   struct buffer_head *su_bh)
430 {
431 	struct nilfs_segment_usage *su;
432 	void *kaddr;
433 	int clean, dirty;
434 
435 	kaddr = kmap_atomic(su_bh->b_page);
436 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
437 	if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
438 	    su->su_nblocks == cpu_to_le32(0)) {
439 		kunmap_atomic(kaddr);
440 		return;
441 	}
442 	clean = nilfs_segment_usage_clean(su);
443 	dirty = nilfs_segment_usage_dirty(su);
444 
445 	/* make the segment garbage */
446 	su->su_lastmod = cpu_to_le64(0);
447 	su->su_nblocks = cpu_to_le32(0);
448 	su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
449 	kunmap_atomic(kaddr);
450 
451 	nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
452 	NILFS_SUI(sufile)->ncleansegs -= clean;
453 
454 	mark_buffer_dirty(su_bh);
455 	nilfs_mdt_mark_dirty(sufile);
456 }
457 
458 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
459 			  struct buffer_head *header_bh,
460 			  struct buffer_head *su_bh)
461 {
462 	struct nilfs_segment_usage *su;
463 	void *kaddr;
464 	int sudirty;
465 
466 	kaddr = kmap_atomic(su_bh->b_page);
467 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
468 	if (nilfs_segment_usage_clean(su)) {
469 		nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
470 			   __func__, (unsigned long long)segnum);
471 		kunmap_atomic(kaddr);
472 		return;
473 	}
474 	WARN_ON(nilfs_segment_usage_error(su));
475 	WARN_ON(!nilfs_segment_usage_dirty(su));
476 
477 	sudirty = nilfs_segment_usage_dirty(su);
478 	nilfs_segment_usage_set_clean(su);
479 	kunmap_atomic(kaddr);
480 	mark_buffer_dirty(su_bh);
481 
482 	nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
483 	NILFS_SUI(sufile)->ncleansegs++;
484 
485 	nilfs_mdt_mark_dirty(sufile);
486 
487 	trace_nilfs2_segment_usage_freed(sufile, segnum);
488 }
489 
490 /**
491  * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
492  * @sufile: inode of segment usage file
493  * @segnum: segment number
494  */
495 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
496 {
497 	struct buffer_head *bh;
498 	void *kaddr;
499 	struct nilfs_segment_usage *su;
500 	int ret;
501 
502 	down_write(&NILFS_MDT(sufile)->mi_sem);
503 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
504 	if (!ret) {
505 		mark_buffer_dirty(bh);
506 		nilfs_mdt_mark_dirty(sufile);
507 		kaddr = kmap_atomic(bh->b_page);
508 		su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
509 		nilfs_segment_usage_set_dirty(su);
510 		kunmap_atomic(kaddr);
511 		brelse(bh);
512 	}
513 	up_write(&NILFS_MDT(sufile)->mi_sem);
514 	return ret;
515 }
516 
517 /**
518  * nilfs_sufile_set_segment_usage - set usage of a segment
519  * @sufile: inode of segment usage file
520  * @segnum: segment number
521  * @nblocks: number of live blocks in the segment
522  * @modtime: modification time (option)
523  */
524 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
525 				   unsigned long nblocks, time64_t modtime)
526 {
527 	struct buffer_head *bh;
528 	struct nilfs_segment_usage *su;
529 	void *kaddr;
530 	int ret;
531 
532 	down_write(&NILFS_MDT(sufile)->mi_sem);
533 	ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
534 	if (ret < 0)
535 		goto out_sem;
536 
537 	kaddr = kmap_atomic(bh->b_page);
538 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
539 	WARN_ON(nilfs_segment_usage_error(su));
540 	if (modtime)
541 		su->su_lastmod = cpu_to_le64(modtime);
542 	su->su_nblocks = cpu_to_le32(nblocks);
543 	kunmap_atomic(kaddr);
544 
545 	mark_buffer_dirty(bh);
546 	nilfs_mdt_mark_dirty(sufile);
547 	brelse(bh);
548 
549  out_sem:
550 	up_write(&NILFS_MDT(sufile)->mi_sem);
551 	return ret;
552 }
553 
554 /**
555  * nilfs_sufile_get_stat - get segment usage statistics
556  * @sufile: inode of segment usage file
557  * @sustat: pointer to a structure of segment usage statistics
558  *
559  * Description: nilfs_sufile_get_stat() returns information about segment
560  * usage.
561  *
562  * Return Value: On success, 0 is returned, and segment usage information is
563  * stored in the place pointed by @sustat. On error, one of the following
564  * negative error codes is returned.
565  *
566  * %-EIO - I/O error.
567  *
568  * %-ENOMEM - Insufficient amount of memory available.
569  */
570 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
571 {
572 	struct buffer_head *header_bh;
573 	struct nilfs_sufile_header *header;
574 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
575 	void *kaddr;
576 	int ret;
577 
578 	down_read(&NILFS_MDT(sufile)->mi_sem);
579 
580 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
581 	if (ret < 0)
582 		goto out_sem;
583 
584 	kaddr = kmap_atomic(header_bh->b_page);
585 	header = kaddr + bh_offset(header_bh);
586 	sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
587 	sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
588 	sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
589 	sustat->ss_ctime = nilfs->ns_ctime;
590 	sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
591 	spin_lock(&nilfs->ns_last_segment_lock);
592 	sustat->ss_prot_seq = nilfs->ns_prot_seq;
593 	spin_unlock(&nilfs->ns_last_segment_lock);
594 	kunmap_atomic(kaddr);
595 	brelse(header_bh);
596 
597  out_sem:
598 	up_read(&NILFS_MDT(sufile)->mi_sem);
599 	return ret;
600 }
601 
602 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
603 			       struct buffer_head *header_bh,
604 			       struct buffer_head *su_bh)
605 {
606 	struct nilfs_segment_usage *su;
607 	void *kaddr;
608 	int suclean;
609 
610 	kaddr = kmap_atomic(su_bh->b_page);
611 	su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
612 	if (nilfs_segment_usage_error(su)) {
613 		kunmap_atomic(kaddr);
614 		return;
615 	}
616 	suclean = nilfs_segment_usage_clean(su);
617 	nilfs_segment_usage_set_error(su);
618 	kunmap_atomic(kaddr);
619 
620 	if (suclean) {
621 		nilfs_sufile_mod_counter(header_bh, -1, 0);
622 		NILFS_SUI(sufile)->ncleansegs--;
623 	}
624 	mark_buffer_dirty(su_bh);
625 	nilfs_mdt_mark_dirty(sufile);
626 }
627 
628 /**
629  * nilfs_sufile_truncate_range - truncate range of segment array
630  * @sufile: inode of segment usage file
631  * @start: start segment number (inclusive)
632  * @end: end segment number (inclusive)
633  *
634  * Return Value: On success, 0 is returned.  On error, one of the
635  * following negative error codes is returned.
636  *
637  * %-EIO - I/O error.
638  *
639  * %-ENOMEM - Insufficient amount of memory available.
640  *
641  * %-EINVAL - Invalid number of segments specified
642  *
643  * %-EBUSY - Dirty or active segments are present in the range
644  */
645 static int nilfs_sufile_truncate_range(struct inode *sufile,
646 				       __u64 start, __u64 end)
647 {
648 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
649 	struct buffer_head *header_bh;
650 	struct buffer_head *su_bh;
651 	struct nilfs_segment_usage *su, *su2;
652 	size_t susz = NILFS_MDT(sufile)->mi_entry_size;
653 	unsigned long segusages_per_block;
654 	unsigned long nsegs, ncleaned;
655 	__u64 segnum;
656 	void *kaddr;
657 	ssize_t n, nc;
658 	int ret;
659 	int j;
660 
661 	nsegs = nilfs_sufile_get_nsegments(sufile);
662 
663 	ret = -EINVAL;
664 	if (start > end || start >= nsegs)
665 		goto out;
666 
667 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
668 	if (ret < 0)
669 		goto out;
670 
671 	segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
672 	ncleaned = 0;
673 
674 	for (segnum = start; segnum <= end; segnum += n) {
675 		n = min_t(unsigned long,
676 			  segusages_per_block -
677 				  nilfs_sufile_get_offset(sufile, segnum),
678 			  end - segnum + 1);
679 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
680 							   &su_bh);
681 		if (ret < 0) {
682 			if (ret != -ENOENT)
683 				goto out_header;
684 			/* hole */
685 			continue;
686 		}
687 		kaddr = kmap_atomic(su_bh->b_page);
688 		su = nilfs_sufile_block_get_segment_usage(
689 			sufile, segnum, su_bh, kaddr);
690 		su2 = su;
691 		for (j = 0; j < n; j++, su = (void *)su + susz) {
692 			if ((le32_to_cpu(su->su_flags) &
693 			     ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
694 			    nilfs_segment_is_active(nilfs, segnum + j)) {
695 				ret = -EBUSY;
696 				kunmap_atomic(kaddr);
697 				brelse(su_bh);
698 				goto out_header;
699 			}
700 		}
701 		nc = 0;
702 		for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
703 			if (nilfs_segment_usage_error(su)) {
704 				nilfs_segment_usage_set_clean(su);
705 				nc++;
706 			}
707 		}
708 		kunmap_atomic(kaddr);
709 		if (nc > 0) {
710 			mark_buffer_dirty(su_bh);
711 			ncleaned += nc;
712 		}
713 		brelse(su_bh);
714 
715 		if (n == segusages_per_block) {
716 			/* make hole */
717 			nilfs_sufile_delete_segment_usage_block(sufile, segnum);
718 		}
719 	}
720 	ret = 0;
721 
722 out_header:
723 	if (ncleaned > 0) {
724 		NILFS_SUI(sufile)->ncleansegs += ncleaned;
725 		nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
726 		nilfs_mdt_mark_dirty(sufile);
727 	}
728 	brelse(header_bh);
729 out:
730 	return ret;
731 }
732 
733 /**
734  * nilfs_sufile_resize - resize segment array
735  * @sufile: inode of segment usage file
736  * @newnsegs: new number of segments
737  *
738  * Return Value: On success, 0 is returned.  On error, one of the
739  * following negative error codes is returned.
740  *
741  * %-EIO - I/O error.
742  *
743  * %-ENOMEM - Insufficient amount of memory available.
744  *
745  * %-ENOSPC - Enough free space is not left for shrinking
746  *
747  * %-EBUSY - Dirty or active segments exist in the region to be truncated
748  */
749 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
750 {
751 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
752 	struct buffer_head *header_bh;
753 	struct nilfs_sufile_header *header;
754 	struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
755 	void *kaddr;
756 	unsigned long nsegs, nrsvsegs;
757 	int ret = 0;
758 
759 	down_write(&NILFS_MDT(sufile)->mi_sem);
760 
761 	nsegs = nilfs_sufile_get_nsegments(sufile);
762 	if (nsegs == newnsegs)
763 		goto out;
764 
765 	ret = -ENOSPC;
766 	nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
767 	if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
768 		goto out;
769 
770 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
771 	if (ret < 0)
772 		goto out;
773 
774 	if (newnsegs > nsegs) {
775 		sui->ncleansegs += newnsegs - nsegs;
776 	} else /* newnsegs < nsegs */ {
777 		ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
778 		if (ret < 0)
779 			goto out_header;
780 
781 		sui->ncleansegs -= nsegs - newnsegs;
782 
783 		/*
784 		 * If the sufile is successfully truncated, immediately adjust
785 		 * the segment allocation space while locking the semaphore
786 		 * "mi_sem" so that nilfs_sufile_alloc() never allocates
787 		 * segments in the truncated space.
788 		 */
789 		sui->allocmax = newnsegs - 1;
790 		sui->allocmin = 0;
791 	}
792 
793 	kaddr = kmap_atomic(header_bh->b_page);
794 	header = kaddr + bh_offset(header_bh);
795 	header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
796 	kunmap_atomic(kaddr);
797 
798 	mark_buffer_dirty(header_bh);
799 	nilfs_mdt_mark_dirty(sufile);
800 	nilfs_set_nsegments(nilfs, newnsegs);
801 
802 out_header:
803 	brelse(header_bh);
804 out:
805 	up_write(&NILFS_MDT(sufile)->mi_sem);
806 	return ret;
807 }
808 
809 /**
810  * nilfs_sufile_get_suinfo -
811  * @sufile: inode of segment usage file
812  * @segnum: segment number to start looking
813  * @buf: array of suinfo
814  * @sisz: byte size of suinfo
815  * @nsi: size of suinfo array
816  *
817  * Description:
818  *
819  * Return Value: On success, 0 is returned and .... On error, one of the
820  * following negative error codes is returned.
821  *
822  * %-EIO - I/O error.
823  *
824  * %-ENOMEM - Insufficient amount of memory available.
825  */
826 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
827 				unsigned int sisz, size_t nsi)
828 {
829 	struct buffer_head *su_bh;
830 	struct nilfs_segment_usage *su;
831 	struct nilfs_suinfo *si = buf;
832 	size_t susz = NILFS_MDT(sufile)->mi_entry_size;
833 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
834 	void *kaddr;
835 	unsigned long nsegs, segusages_per_block;
836 	ssize_t n;
837 	int ret, i, j;
838 
839 	down_read(&NILFS_MDT(sufile)->mi_sem);
840 
841 	segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
842 	nsegs = min_t(unsigned long,
843 		      nilfs_sufile_get_nsegments(sufile) - segnum,
844 		      nsi);
845 	for (i = 0; i < nsegs; i += n, segnum += n) {
846 		n = min_t(unsigned long,
847 			  segusages_per_block -
848 				  nilfs_sufile_get_offset(sufile, segnum),
849 			  nsegs - i);
850 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
851 							   &su_bh);
852 		if (ret < 0) {
853 			if (ret != -ENOENT)
854 				goto out;
855 			/* hole */
856 			memset(si, 0, sisz * n);
857 			si = (void *)si + sisz * n;
858 			continue;
859 		}
860 
861 		kaddr = kmap_atomic(su_bh->b_page);
862 		su = nilfs_sufile_block_get_segment_usage(
863 			sufile, segnum, su_bh, kaddr);
864 		for (j = 0; j < n;
865 		     j++, su = (void *)su + susz, si = (void *)si + sisz) {
866 			si->sui_lastmod = le64_to_cpu(su->su_lastmod);
867 			si->sui_nblocks = le32_to_cpu(su->su_nblocks);
868 			si->sui_flags = le32_to_cpu(su->su_flags) &
869 				~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
870 			if (nilfs_segment_is_active(nilfs, segnum + j))
871 				si->sui_flags |=
872 					BIT(NILFS_SEGMENT_USAGE_ACTIVE);
873 		}
874 		kunmap_atomic(kaddr);
875 		brelse(su_bh);
876 	}
877 	ret = nsegs;
878 
879  out:
880 	up_read(&NILFS_MDT(sufile)->mi_sem);
881 	return ret;
882 }
883 
884 /**
885  * nilfs_sufile_set_suinfo - sets segment usage info
886  * @sufile: inode of segment usage file
887  * @buf: array of suinfo_update
888  * @supsz: byte size of suinfo_update
889  * @nsup: size of suinfo_update array
890  *
891  * Description: Takes an array of nilfs_suinfo_update structs and updates
892  * segment usage accordingly. Only the fields indicated by the sup_flags
893  * are updated.
894  *
895  * Return Value: On success, 0 is returned. On error, one of the
896  * following negative error codes is returned.
897  *
898  * %-EIO - I/O error.
899  *
900  * %-ENOMEM - Insufficient amount of memory available.
901  *
902  * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
903  */
904 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
905 				unsigned int supsz, size_t nsup)
906 {
907 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
908 	struct buffer_head *header_bh, *bh;
909 	struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
910 	struct nilfs_segment_usage *su;
911 	void *kaddr;
912 	unsigned long blkoff, prev_blkoff;
913 	int cleansi, cleansu, dirtysi, dirtysu;
914 	long ncleaned = 0, ndirtied = 0;
915 	int ret = 0;
916 
917 	if (unlikely(nsup == 0))
918 		return ret;
919 
920 	for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
921 		if (sup->sup_segnum >= nilfs->ns_nsegments
922 			|| (sup->sup_flags &
923 				(~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
924 			|| (nilfs_suinfo_update_nblocks(sup) &&
925 				sup->sup_sui.sui_nblocks >
926 				nilfs->ns_blocks_per_segment))
927 			return -EINVAL;
928 	}
929 
930 	down_write(&NILFS_MDT(sufile)->mi_sem);
931 
932 	ret = nilfs_sufile_get_header_block(sufile, &header_bh);
933 	if (ret < 0)
934 		goto out_sem;
935 
936 	sup = buf;
937 	blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
938 	ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
939 	if (ret < 0)
940 		goto out_header;
941 
942 	for (;;) {
943 		kaddr = kmap_atomic(bh->b_page);
944 		su = nilfs_sufile_block_get_segment_usage(
945 			sufile, sup->sup_segnum, bh, kaddr);
946 
947 		if (nilfs_suinfo_update_lastmod(sup))
948 			su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
949 
950 		if (nilfs_suinfo_update_nblocks(sup))
951 			su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
952 
953 		if (nilfs_suinfo_update_flags(sup)) {
954 			/*
955 			 * Active flag is a virtual flag projected by running
956 			 * nilfs kernel code - drop it not to write it to
957 			 * disk.
958 			 */
959 			sup->sup_sui.sui_flags &=
960 					~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
961 
962 			cleansi = nilfs_suinfo_clean(&sup->sup_sui);
963 			cleansu = nilfs_segment_usage_clean(su);
964 			dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
965 			dirtysu = nilfs_segment_usage_dirty(su);
966 
967 			if (cleansi && !cleansu)
968 				++ncleaned;
969 			else if (!cleansi && cleansu)
970 				--ncleaned;
971 
972 			if (dirtysi && !dirtysu)
973 				++ndirtied;
974 			else if (!dirtysi && dirtysu)
975 				--ndirtied;
976 
977 			su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
978 		}
979 
980 		kunmap_atomic(kaddr);
981 
982 		sup = (void *)sup + supsz;
983 		if (sup >= supend)
984 			break;
985 
986 		prev_blkoff = blkoff;
987 		blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
988 		if (blkoff == prev_blkoff)
989 			continue;
990 
991 		/* get different block */
992 		mark_buffer_dirty(bh);
993 		put_bh(bh);
994 		ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
995 		if (unlikely(ret < 0))
996 			goto out_mark;
997 	}
998 	mark_buffer_dirty(bh);
999 	put_bh(bh);
1000 
1001  out_mark:
1002 	if (ncleaned || ndirtied) {
1003 		nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
1004 				(u64)ndirtied);
1005 		NILFS_SUI(sufile)->ncleansegs += ncleaned;
1006 	}
1007 	nilfs_mdt_mark_dirty(sufile);
1008  out_header:
1009 	put_bh(header_bh);
1010  out_sem:
1011 	up_write(&NILFS_MDT(sufile)->mi_sem);
1012 	return ret;
1013 }
1014 
1015 /**
1016  * nilfs_sufile_trim_fs() - trim ioctl handle function
1017  * @sufile: inode of segment usage file
1018  * @range: fstrim_range structure
1019  *
1020  * start:	First Byte to trim
1021  * len:		number of Bytes to trim from start
1022  * minlen:	minimum extent length in Bytes
1023  *
1024  * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
1025  * from start to start+len. start is rounded up to the next block boundary
1026  * and start+len is rounded down. For each clean segment blkdev_issue_discard
1027  * function is invoked.
1028  *
1029  * Return Value: On success, 0 is returned or negative error code, otherwise.
1030  */
1031 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
1032 {
1033 	struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
1034 	struct buffer_head *su_bh;
1035 	struct nilfs_segment_usage *su;
1036 	void *kaddr;
1037 	size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
1038 	sector_t seg_start, seg_end, start_block, end_block;
1039 	sector_t start = 0, nblocks = 0;
1040 	u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
1041 	int ret = 0;
1042 	unsigned int sects_per_block;
1043 
1044 	sects_per_block = (1 << nilfs->ns_blocksize_bits) /
1045 			bdev_logical_block_size(nilfs->ns_bdev);
1046 	len = range->len >> nilfs->ns_blocksize_bits;
1047 	minlen = range->minlen >> nilfs->ns_blocksize_bits;
1048 	max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
1049 
1050 	if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
1051 		return -EINVAL;
1052 
1053 	start_block = (range->start + nilfs->ns_blocksize - 1) >>
1054 			nilfs->ns_blocksize_bits;
1055 
1056 	/*
1057 	 * range->len can be very large (actually, it is set to
1058 	 * ULLONG_MAX by default) - truncate upper end of the range
1059 	 * carefully so as not to overflow.
1060 	 */
1061 	if (max_blocks - start_block < len)
1062 		end_block = max_blocks - 1;
1063 	else
1064 		end_block = start_block + len - 1;
1065 
1066 	segnum = nilfs_get_segnum_of_block(nilfs, start_block);
1067 	segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
1068 
1069 	down_read(&NILFS_MDT(sufile)->mi_sem);
1070 
1071 	while (segnum <= segnum_end) {
1072 		n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
1073 				segnum_end);
1074 
1075 		ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
1076 							   &su_bh);
1077 		if (ret < 0) {
1078 			if (ret != -ENOENT)
1079 				goto out_sem;
1080 			/* hole */
1081 			segnum += n;
1082 			continue;
1083 		}
1084 
1085 		kaddr = kmap_atomic(su_bh->b_page);
1086 		su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
1087 				su_bh, kaddr);
1088 		for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
1089 			if (!nilfs_segment_usage_clean(su))
1090 				continue;
1091 
1092 			nilfs_get_segment_range(nilfs, segnum, &seg_start,
1093 						&seg_end);
1094 
1095 			if (!nblocks) {
1096 				/* start new extent */
1097 				start = seg_start;
1098 				nblocks = seg_end - seg_start + 1;
1099 				continue;
1100 			}
1101 
1102 			if (start + nblocks == seg_start) {
1103 				/* add to previous extent */
1104 				nblocks += seg_end - seg_start + 1;
1105 				continue;
1106 			}
1107 
1108 			/* discard previous extent */
1109 			if (start < start_block) {
1110 				nblocks -= start_block - start;
1111 				start = start_block;
1112 			}
1113 
1114 			if (nblocks >= minlen) {
1115 				kunmap_atomic(kaddr);
1116 
1117 				ret = blkdev_issue_discard(nilfs->ns_bdev,
1118 						start * sects_per_block,
1119 						nblocks * sects_per_block,
1120 						GFP_NOFS);
1121 				if (ret < 0) {
1122 					put_bh(su_bh);
1123 					goto out_sem;
1124 				}
1125 
1126 				ndiscarded += nblocks;
1127 				kaddr = kmap_atomic(su_bh->b_page);
1128 				su = nilfs_sufile_block_get_segment_usage(
1129 					sufile, segnum, su_bh, kaddr);
1130 			}
1131 
1132 			/* start new extent */
1133 			start = seg_start;
1134 			nblocks = seg_end - seg_start + 1;
1135 		}
1136 		kunmap_atomic(kaddr);
1137 		put_bh(su_bh);
1138 	}
1139 
1140 
1141 	if (nblocks) {
1142 		/* discard last extent */
1143 		if (start < start_block) {
1144 			nblocks -= start_block - start;
1145 			start = start_block;
1146 		}
1147 		if (start + nblocks > end_block + 1)
1148 			nblocks = end_block - start + 1;
1149 
1150 		if (nblocks >= minlen) {
1151 			ret = blkdev_issue_discard(nilfs->ns_bdev,
1152 					start * sects_per_block,
1153 					nblocks * sects_per_block,
1154 					GFP_NOFS);
1155 			if (!ret)
1156 				ndiscarded += nblocks;
1157 		}
1158 	}
1159 
1160 out_sem:
1161 	up_read(&NILFS_MDT(sufile)->mi_sem);
1162 
1163 	range->len = ndiscarded << nilfs->ns_blocksize_bits;
1164 	return ret;
1165 }
1166 
1167 /**
1168  * nilfs_sufile_read - read or get sufile inode
1169  * @sb: super block instance
1170  * @susize: size of a segment usage entry
1171  * @raw_inode: on-disk sufile inode
1172  * @inodep: buffer to store the inode
1173  */
1174 int nilfs_sufile_read(struct super_block *sb, size_t susize,
1175 		      struct nilfs_inode *raw_inode, struct inode **inodep)
1176 {
1177 	struct inode *sufile;
1178 	struct nilfs_sufile_info *sui;
1179 	struct buffer_head *header_bh;
1180 	struct nilfs_sufile_header *header;
1181 	void *kaddr;
1182 	int err;
1183 
1184 	if (susize > sb->s_blocksize) {
1185 		nilfs_err(sb, "too large segment usage size: %zu bytes",
1186 			  susize);
1187 		return -EINVAL;
1188 	} else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
1189 		nilfs_err(sb, "too small segment usage size: %zu bytes",
1190 			  susize);
1191 		return -EINVAL;
1192 	}
1193 
1194 	sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
1195 	if (unlikely(!sufile))
1196 		return -ENOMEM;
1197 	if (!(sufile->i_state & I_NEW))
1198 		goto out;
1199 
1200 	err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
1201 	if (err)
1202 		goto failed;
1203 
1204 	nilfs_mdt_set_entry_size(sufile, susize,
1205 				 sizeof(struct nilfs_sufile_header));
1206 
1207 	err = nilfs_read_inode_common(sufile, raw_inode);
1208 	if (err)
1209 		goto failed;
1210 
1211 	err = nilfs_sufile_get_header_block(sufile, &header_bh);
1212 	if (err)
1213 		goto failed;
1214 
1215 	sui = NILFS_SUI(sufile);
1216 	kaddr = kmap_atomic(header_bh->b_page);
1217 	header = kaddr + bh_offset(header_bh);
1218 	sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
1219 	kunmap_atomic(kaddr);
1220 	brelse(header_bh);
1221 
1222 	sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
1223 	sui->allocmin = 0;
1224 
1225 	unlock_new_inode(sufile);
1226  out:
1227 	*inodep = sufile;
1228 	return 0;
1229  failed:
1230 	iget_failed(sufile);
1231 	return err;
1232 }
1233