xref: /openbmc/linux/fs/nilfs2/dat.c (revision 5626af8f)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS disk address translation.
4  *
5  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Koji Sato.
8  */
9 
10 #include <linux/types.h>
11 #include <linux/buffer_head.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
14 #include "nilfs.h"
15 #include "mdt.h"
16 #include "alloc.h"
17 #include "dat.h"
18 
19 
20 #define NILFS_CNO_MIN	((__u64)1)
21 #define NILFS_CNO_MAX	(~(__u64)0)
22 
23 /**
24  * struct nilfs_dat_info - on-memory private data of DAT file
25  * @mi: on-memory private data of metadata file
26  * @palloc_cache: persistent object allocator cache of DAT file
27  * @shadow: shadow map of DAT file
28  */
29 struct nilfs_dat_info {
30 	struct nilfs_mdt_info mi;
31 	struct nilfs_palloc_cache palloc_cache;
32 	struct nilfs_shadow_map shadow;
33 };
34 
35 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
36 {
37 	return (struct nilfs_dat_info *)NILFS_MDT(dat);
38 }
39 
40 static int nilfs_dat_prepare_entry(struct inode *dat,
41 				   struct nilfs_palloc_req *req, int create)
42 {
43 	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
44 					    create, &req->pr_entry_bh);
45 }
46 
47 static void nilfs_dat_commit_entry(struct inode *dat,
48 				   struct nilfs_palloc_req *req)
49 {
50 	mark_buffer_dirty(req->pr_entry_bh);
51 	nilfs_mdt_mark_dirty(dat);
52 	brelse(req->pr_entry_bh);
53 }
54 
55 static void nilfs_dat_abort_entry(struct inode *dat,
56 				  struct nilfs_palloc_req *req)
57 {
58 	brelse(req->pr_entry_bh);
59 }
60 
61 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
62 {
63 	int ret;
64 
65 	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
66 	if (ret < 0)
67 		return ret;
68 
69 	ret = nilfs_dat_prepare_entry(dat, req, 1);
70 	if (ret < 0)
71 		nilfs_palloc_abort_alloc_entry(dat, req);
72 
73 	return ret;
74 }
75 
76 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
77 {
78 	struct nilfs_dat_entry *entry;
79 	void *kaddr;
80 
81 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
82 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
83 					     req->pr_entry_bh, kaddr);
84 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
85 	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
86 	entry->de_blocknr = cpu_to_le64(0);
87 	kunmap_atomic(kaddr);
88 
89 	nilfs_palloc_commit_alloc_entry(dat, req);
90 	nilfs_dat_commit_entry(dat, req);
91 }
92 
93 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
94 {
95 	nilfs_dat_abort_entry(dat, req);
96 	nilfs_palloc_abort_alloc_entry(dat, req);
97 }
98 
99 static void nilfs_dat_commit_free(struct inode *dat,
100 				  struct nilfs_palloc_req *req)
101 {
102 	struct nilfs_dat_entry *entry;
103 	void *kaddr;
104 
105 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
106 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
107 					     req->pr_entry_bh, kaddr);
108 	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
109 	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
110 	entry->de_blocknr = cpu_to_le64(0);
111 	kunmap_atomic(kaddr);
112 
113 	nilfs_dat_commit_entry(dat, req);
114 
115 	if (unlikely(req->pr_desc_bh == NULL || req->pr_bitmap_bh == NULL)) {
116 		nilfs_error(dat->i_sb,
117 			    "state inconsistency probably due to duplicate use of vblocknr = %llu",
118 			    (unsigned long long)req->pr_entry_nr);
119 		return;
120 	}
121 	nilfs_palloc_commit_free_entry(dat, req);
122 }
123 
124 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
125 {
126 	int ret;
127 
128 	ret = nilfs_dat_prepare_entry(dat, req, 0);
129 	WARN_ON(ret == -ENOENT);
130 	return ret;
131 }
132 
133 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
134 			    sector_t blocknr)
135 {
136 	struct nilfs_dat_entry *entry;
137 	void *kaddr;
138 
139 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
140 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
141 					     req->pr_entry_bh, kaddr);
142 	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
143 	entry->de_blocknr = cpu_to_le64(blocknr);
144 	kunmap_atomic(kaddr);
145 
146 	nilfs_dat_commit_entry(dat, req);
147 }
148 
149 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
150 {
151 	struct nilfs_dat_entry *entry;
152 	sector_t blocknr;
153 	void *kaddr;
154 	int ret;
155 
156 	ret = nilfs_dat_prepare_entry(dat, req, 0);
157 	if (ret < 0) {
158 		WARN_ON(ret == -ENOENT);
159 		return ret;
160 	}
161 
162 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
163 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
164 					     req->pr_entry_bh, kaddr);
165 	blocknr = le64_to_cpu(entry->de_blocknr);
166 	kunmap_atomic(kaddr);
167 
168 	if (blocknr == 0) {
169 		ret = nilfs_palloc_prepare_free_entry(dat, req);
170 		if (ret < 0) {
171 			nilfs_dat_abort_entry(dat, req);
172 			return ret;
173 		}
174 	}
175 
176 	return 0;
177 }
178 
179 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
180 			  int dead)
181 {
182 	struct nilfs_dat_entry *entry;
183 	__u64 start, end;
184 	sector_t blocknr;
185 	void *kaddr;
186 
187 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
188 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
189 					     req->pr_entry_bh, kaddr);
190 	end = start = le64_to_cpu(entry->de_start);
191 	if (!dead) {
192 		end = nilfs_mdt_cno(dat);
193 		WARN_ON(start > end);
194 	}
195 	entry->de_end = cpu_to_le64(end);
196 	blocknr = le64_to_cpu(entry->de_blocknr);
197 	kunmap_atomic(kaddr);
198 
199 	if (blocknr == 0)
200 		nilfs_dat_commit_free(dat, req);
201 	else
202 		nilfs_dat_commit_entry(dat, req);
203 }
204 
205 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
206 {
207 	struct nilfs_dat_entry *entry;
208 	__u64 start;
209 	sector_t blocknr;
210 	void *kaddr;
211 
212 	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
213 	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
214 					     req->pr_entry_bh, kaddr);
215 	start = le64_to_cpu(entry->de_start);
216 	blocknr = le64_to_cpu(entry->de_blocknr);
217 	kunmap_atomic(kaddr);
218 
219 	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
220 		nilfs_palloc_abort_free_entry(dat, req);
221 	nilfs_dat_abort_entry(dat, req);
222 }
223 
224 int nilfs_dat_prepare_update(struct inode *dat,
225 			     struct nilfs_palloc_req *oldreq,
226 			     struct nilfs_palloc_req *newreq)
227 {
228 	int ret;
229 
230 	ret = nilfs_dat_prepare_end(dat, oldreq);
231 	if (!ret) {
232 		ret = nilfs_dat_prepare_alloc(dat, newreq);
233 		if (ret < 0)
234 			nilfs_dat_abort_end(dat, oldreq);
235 	}
236 	return ret;
237 }
238 
239 void nilfs_dat_commit_update(struct inode *dat,
240 			     struct nilfs_palloc_req *oldreq,
241 			     struct nilfs_palloc_req *newreq, int dead)
242 {
243 	nilfs_dat_commit_end(dat, oldreq, dead);
244 	nilfs_dat_commit_alloc(dat, newreq);
245 }
246 
247 void nilfs_dat_abort_update(struct inode *dat,
248 			    struct nilfs_palloc_req *oldreq,
249 			    struct nilfs_palloc_req *newreq)
250 {
251 	nilfs_dat_abort_end(dat, oldreq);
252 	nilfs_dat_abort_alloc(dat, newreq);
253 }
254 
255 /**
256  * nilfs_dat_mark_dirty -
257  * @dat: DAT file inode
258  * @vblocknr: virtual block number
259  *
260  * Description:
261  *
262  * Return Value: On success, 0 is returned. On error, one of the following
263  * negative error codes is returned.
264  *
265  * %-EIO - I/O error.
266  *
267  * %-ENOMEM - Insufficient amount of memory available.
268  */
269 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
270 {
271 	struct nilfs_palloc_req req;
272 	int ret;
273 
274 	req.pr_entry_nr = vblocknr;
275 	ret = nilfs_dat_prepare_entry(dat, &req, 0);
276 	if (ret == 0)
277 		nilfs_dat_commit_entry(dat, &req);
278 	return ret;
279 }
280 
281 /**
282  * nilfs_dat_freev - free virtual block numbers
283  * @dat: DAT file inode
284  * @vblocknrs: array of virtual block numbers
285  * @nitems: number of virtual block numbers
286  *
287  * Description: nilfs_dat_freev() frees the virtual block numbers specified by
288  * @vblocknrs and @nitems.
289  *
290  * Return Value: On success, 0 is returned. On error, one of the following
291  * negative error codes is returned.
292  *
293  * %-EIO - I/O error.
294  *
295  * %-ENOMEM - Insufficient amount of memory available.
296  *
297  * %-ENOENT - The virtual block number have not been allocated.
298  */
299 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
300 {
301 	return nilfs_palloc_freev(dat, vblocknrs, nitems);
302 }
303 
304 /**
305  * nilfs_dat_move - change a block number
306  * @dat: DAT file inode
307  * @vblocknr: virtual block number
308  * @blocknr: block number
309  *
310  * Description: nilfs_dat_move() changes the block number associated with
311  * @vblocknr to @blocknr.
312  *
313  * Return Value: On success, 0 is returned. On error, one of the following
314  * negative error codes is returned.
315  *
316  * %-EIO - I/O error.
317  *
318  * %-ENOMEM - Insufficient amount of memory available.
319  */
320 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
321 {
322 	struct buffer_head *entry_bh;
323 	struct nilfs_dat_entry *entry;
324 	void *kaddr;
325 	int ret;
326 
327 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
328 	if (ret < 0)
329 		return ret;
330 
331 	/*
332 	 * The given disk block number (blocknr) is not yet written to
333 	 * the device at this point.
334 	 *
335 	 * To prevent nilfs_dat_translate() from returning the
336 	 * uncommitted block number, this makes a copy of the entry
337 	 * buffer and redirects nilfs_dat_translate() to the copy.
338 	 */
339 	if (!buffer_nilfs_redirected(entry_bh)) {
340 		ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
341 		if (ret) {
342 			brelse(entry_bh);
343 			return ret;
344 		}
345 	}
346 
347 	kaddr = kmap_atomic(entry_bh->b_page);
348 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
349 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
350 		nilfs_crit(dat->i_sb,
351 			   "%s: invalid vblocknr = %llu, [%llu, %llu)",
352 			   __func__, (unsigned long long)vblocknr,
353 			   (unsigned long long)le64_to_cpu(entry->de_start),
354 			   (unsigned long long)le64_to_cpu(entry->de_end));
355 		kunmap_atomic(kaddr);
356 		brelse(entry_bh);
357 		return -EINVAL;
358 	}
359 	WARN_ON(blocknr == 0);
360 	entry->de_blocknr = cpu_to_le64(blocknr);
361 	kunmap_atomic(kaddr);
362 
363 	mark_buffer_dirty(entry_bh);
364 	nilfs_mdt_mark_dirty(dat);
365 
366 	brelse(entry_bh);
367 
368 	return 0;
369 }
370 
371 /**
372  * nilfs_dat_translate - translate a virtual block number to a block number
373  * @dat: DAT file inode
374  * @vblocknr: virtual block number
375  * @blocknrp: pointer to a block number
376  *
377  * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
378  * to the corresponding block number.
379  *
380  * Return Value: On success, 0 is returned and the block number associated
381  * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
382  * of the following negative error codes is returned.
383  *
384  * %-EIO - I/O error.
385  *
386  * %-ENOMEM - Insufficient amount of memory available.
387  *
388  * %-ENOENT - A block number associated with @vblocknr does not exist.
389  */
390 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
391 {
392 	struct buffer_head *entry_bh, *bh;
393 	struct nilfs_dat_entry *entry;
394 	sector_t blocknr;
395 	void *kaddr;
396 	int ret;
397 
398 	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
399 	if (ret < 0)
400 		return ret;
401 
402 	if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
403 		bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
404 		if (bh) {
405 			WARN_ON(!buffer_uptodate(bh));
406 			brelse(entry_bh);
407 			entry_bh = bh;
408 		}
409 	}
410 
411 	kaddr = kmap_atomic(entry_bh->b_page);
412 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
413 	blocknr = le64_to_cpu(entry->de_blocknr);
414 	if (blocknr == 0) {
415 		ret = -ENOENT;
416 		goto out;
417 	}
418 	*blocknrp = blocknr;
419 
420  out:
421 	kunmap_atomic(kaddr);
422 	brelse(entry_bh);
423 	return ret;
424 }
425 
426 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
427 			    size_t nvi)
428 {
429 	struct buffer_head *entry_bh;
430 	struct nilfs_dat_entry *entry;
431 	struct nilfs_vinfo *vinfo = buf;
432 	__u64 first, last;
433 	void *kaddr;
434 	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
435 	int i, j, n, ret;
436 
437 	for (i = 0; i < nvi; i += n) {
438 		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
439 						   0, &entry_bh);
440 		if (ret < 0)
441 			return ret;
442 		kaddr = kmap_atomic(entry_bh->b_page);
443 		/* last virtual block number in this block */
444 		first = vinfo->vi_vblocknr;
445 		do_div(first, entries_per_block);
446 		first *= entries_per_block;
447 		last = first + entries_per_block - 1;
448 		for (j = i, n = 0;
449 		     j < nvi && vinfo->vi_vblocknr >= first &&
450 			     vinfo->vi_vblocknr <= last;
451 		     j++, n++, vinfo = (void *)vinfo + visz) {
452 			entry = nilfs_palloc_block_get_entry(
453 				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
454 			vinfo->vi_start = le64_to_cpu(entry->de_start);
455 			vinfo->vi_end = le64_to_cpu(entry->de_end);
456 			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
457 		}
458 		kunmap_atomic(kaddr);
459 		brelse(entry_bh);
460 	}
461 
462 	return nvi;
463 }
464 
465 /**
466  * nilfs_dat_read - read or get dat inode
467  * @sb: super block instance
468  * @entry_size: size of a dat entry
469  * @raw_inode: on-disk dat inode
470  * @inodep: buffer to store the inode
471  */
472 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
473 		   struct nilfs_inode *raw_inode, struct inode **inodep)
474 {
475 	static struct lock_class_key dat_lock_key;
476 	struct inode *dat;
477 	struct nilfs_dat_info *di;
478 	int err;
479 
480 	if (entry_size > sb->s_blocksize) {
481 		nilfs_err(sb, "too large DAT entry size: %zu bytes",
482 			  entry_size);
483 		return -EINVAL;
484 	} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
485 		nilfs_err(sb, "too small DAT entry size: %zu bytes",
486 			  entry_size);
487 		return -EINVAL;
488 	}
489 
490 	dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
491 	if (unlikely(!dat))
492 		return -ENOMEM;
493 	if (!(dat->i_state & I_NEW))
494 		goto out;
495 
496 	err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
497 	if (err)
498 		goto failed;
499 
500 	err = nilfs_palloc_init_blockgroup(dat, entry_size);
501 	if (err)
502 		goto failed;
503 
504 	di = NILFS_DAT_I(dat);
505 	lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
506 	nilfs_palloc_setup_cache(dat, &di->palloc_cache);
507 	err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
508 	if (err)
509 		goto failed;
510 
511 	err = nilfs_read_inode_common(dat, raw_inode);
512 	if (err)
513 		goto failed;
514 
515 	unlock_new_inode(dat);
516  out:
517 	*inodep = dat;
518 	return 0;
519  failed:
520 	iget_failed(dat);
521 	return err;
522 }
523