1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Buffer/page management specific to NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi and Seiji Kihara.
8 */
9
10 #include <linux/pagemap.h>
11 #include <linux/writeback.h>
12 #include <linux/swap.h>
13 #include <linux/bitops.h>
14 #include <linux/page-flags.h>
15 #include <linux/list.h>
16 #include <linux/highmem.h>
17 #include <linux/pagevec.h>
18 #include <linux/gfp.h>
19 #include "nilfs.h"
20 #include "page.h"
21 #include "mdt.h"
22
23
24 #define NILFS_BUFFER_INHERENT_BITS \
25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28 static struct buffer_head *
__nilfs_get_page_block(struct page * page,unsigned long block,pgoff_t index,int blkbits,unsigned long b_state)29 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30 int blkbits, unsigned long b_state)
31
32 {
33 unsigned long first_block;
34 struct buffer_head *bh;
35
36 if (!page_has_buffers(page))
37 create_empty_buffers(page, 1 << blkbits, b_state);
38
39 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40 bh = nilfs_page_get_nth_block(page, block - first_block);
41
42 touch_buffer(bh);
43 wait_on_buffer(bh);
44 return bh;
45 }
46
nilfs_grab_buffer(struct inode * inode,struct address_space * mapping,unsigned long blkoff,unsigned long b_state)47 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48 struct address_space *mapping,
49 unsigned long blkoff,
50 unsigned long b_state)
51 {
52 int blkbits = inode->i_blkbits;
53 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54 struct page *page;
55 struct buffer_head *bh;
56
57 page = grab_cache_page(mapping, index);
58 if (unlikely(!page))
59 return NULL;
60
61 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62 if (unlikely(!bh)) {
63 unlock_page(page);
64 put_page(page);
65 return NULL;
66 }
67 return bh;
68 }
69
70 /**
71 * nilfs_forget_buffer - discard dirty state
72 * @bh: buffer head of the buffer to be discarded
73 */
nilfs_forget_buffer(struct buffer_head * bh)74 void nilfs_forget_buffer(struct buffer_head *bh)
75 {
76 struct page *page = bh->b_page;
77 const unsigned long clear_bits =
78 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
81 BIT(BH_Delay));
82
83 lock_buffer(bh);
84 set_mask_bits(&bh->b_state, clear_bits, 0);
85 if (nilfs_page_buffers_clean(page))
86 __nilfs_clear_page_dirty(page);
87
88 bh->b_blocknr = -1;
89 ClearPageUptodate(page);
90 ClearPageMappedToDisk(page);
91 unlock_buffer(bh);
92 brelse(bh);
93 }
94
95 /**
96 * nilfs_copy_buffer -- copy buffer data and flags
97 * @dbh: destination buffer
98 * @sbh: source buffer
99 */
nilfs_copy_buffer(struct buffer_head * dbh,struct buffer_head * sbh)100 void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
101 {
102 void *kaddr0, *kaddr1;
103 unsigned long bits;
104 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
105 struct buffer_head *bh;
106
107 kaddr0 = kmap_atomic(spage);
108 kaddr1 = kmap_atomic(dpage);
109 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
110 kunmap_atomic(kaddr1);
111 kunmap_atomic(kaddr0);
112
113 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
114 dbh->b_blocknr = sbh->b_blocknr;
115 dbh->b_bdev = sbh->b_bdev;
116
117 bh = dbh;
118 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
119 while ((bh = bh->b_this_page) != dbh) {
120 lock_buffer(bh);
121 bits &= bh->b_state;
122 unlock_buffer(bh);
123 }
124 if (bits & BIT(BH_Uptodate))
125 SetPageUptodate(dpage);
126 else
127 ClearPageUptodate(dpage);
128 if (bits & BIT(BH_Mapped))
129 SetPageMappedToDisk(dpage);
130 else
131 ClearPageMappedToDisk(dpage);
132 }
133
134 /**
135 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
136 * @page: page to be checked
137 *
138 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
139 * Otherwise, it returns non-zero value.
140 */
nilfs_page_buffers_clean(struct page * page)141 int nilfs_page_buffers_clean(struct page *page)
142 {
143 struct buffer_head *bh, *head;
144
145 bh = head = page_buffers(page);
146 do {
147 if (buffer_dirty(bh))
148 return 0;
149 bh = bh->b_this_page;
150 } while (bh != head);
151 return 1;
152 }
153
nilfs_page_bug(struct page * page)154 void nilfs_page_bug(struct page *page)
155 {
156 struct address_space *m;
157 unsigned long ino;
158
159 if (unlikely(!page)) {
160 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
161 return;
162 }
163
164 m = page->mapping;
165 ino = m ? m->host->i_ino : 0;
166
167 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
168 "mapping=%p ino=%lu\n",
169 page, page_ref_count(page),
170 (unsigned long long)page->index, page->flags, m, ino);
171
172 if (page_has_buffers(page)) {
173 struct buffer_head *bh, *head;
174 int i = 0;
175
176 bh = head = page_buffers(page);
177 do {
178 printk(KERN_CRIT
179 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
180 i++, bh, atomic_read(&bh->b_count),
181 (unsigned long long)bh->b_blocknr, bh->b_state);
182 bh = bh->b_this_page;
183 } while (bh != head);
184 }
185 }
186
187 /**
188 * nilfs_copy_page -- copy the page with buffers
189 * @dst: destination page
190 * @src: source page
191 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
192 *
193 * This function is for both data pages and btnode pages. The dirty flag
194 * should be treated by caller. The page must not be under i/o.
195 * Both src and dst page must be locked
196 */
nilfs_copy_page(struct page * dst,struct page * src,int copy_dirty)197 static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
198 {
199 struct buffer_head *dbh, *dbufs, *sbh;
200 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
201
202 BUG_ON(PageWriteback(dst));
203
204 sbh = page_buffers(src);
205 if (!page_has_buffers(dst))
206 create_empty_buffers(dst, sbh->b_size, 0);
207
208 if (copy_dirty)
209 mask |= BIT(BH_Dirty);
210
211 dbh = dbufs = page_buffers(dst);
212 do {
213 lock_buffer(sbh);
214 lock_buffer(dbh);
215 dbh->b_state = sbh->b_state & mask;
216 dbh->b_blocknr = sbh->b_blocknr;
217 dbh->b_bdev = sbh->b_bdev;
218 sbh = sbh->b_this_page;
219 dbh = dbh->b_this_page;
220 } while (dbh != dbufs);
221
222 copy_highpage(dst, src);
223
224 if (PageUptodate(src) && !PageUptodate(dst))
225 SetPageUptodate(dst);
226 else if (!PageUptodate(src) && PageUptodate(dst))
227 ClearPageUptodate(dst);
228 if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
229 SetPageMappedToDisk(dst);
230 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
231 ClearPageMappedToDisk(dst);
232
233 do {
234 unlock_buffer(sbh);
235 unlock_buffer(dbh);
236 sbh = sbh->b_this_page;
237 dbh = dbh->b_this_page;
238 } while (dbh != dbufs);
239 }
240
nilfs_copy_dirty_pages(struct address_space * dmap,struct address_space * smap)241 int nilfs_copy_dirty_pages(struct address_space *dmap,
242 struct address_space *smap)
243 {
244 struct folio_batch fbatch;
245 unsigned int i;
246 pgoff_t index = 0;
247 int err = 0;
248
249 folio_batch_init(&fbatch);
250 repeat:
251 if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
252 PAGECACHE_TAG_DIRTY, &fbatch))
253 return 0;
254
255 for (i = 0; i < folio_batch_count(&fbatch); i++) {
256 struct folio *folio = fbatch.folios[i], *dfolio;
257
258 folio_lock(folio);
259 if (unlikely(!folio_test_dirty(folio)))
260 NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
261
262 dfolio = filemap_grab_folio(dmap, folio->index);
263 if (unlikely(IS_ERR(dfolio))) {
264 /* No empty page is added to the page cache */
265 folio_unlock(folio);
266 err = PTR_ERR(dfolio);
267 break;
268 }
269 if (unlikely(!folio_buffers(folio)))
270 NILFS_PAGE_BUG(&folio->page,
271 "found empty page in dat page cache");
272
273 nilfs_copy_page(&dfolio->page, &folio->page, 1);
274 filemap_dirty_folio(folio_mapping(dfolio), dfolio);
275
276 folio_unlock(dfolio);
277 folio_put(dfolio);
278 folio_unlock(folio);
279 }
280 folio_batch_release(&fbatch);
281 cond_resched();
282
283 if (likely(!err))
284 goto repeat;
285 return err;
286 }
287
288 /**
289 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
290 * @dmap: destination page cache
291 * @smap: source page cache
292 *
293 * No pages must be added to the cache during this process.
294 * This must be ensured by the caller.
295 */
nilfs_copy_back_pages(struct address_space * dmap,struct address_space * smap)296 void nilfs_copy_back_pages(struct address_space *dmap,
297 struct address_space *smap)
298 {
299 struct folio_batch fbatch;
300 unsigned int i, n;
301 pgoff_t start = 0;
302
303 folio_batch_init(&fbatch);
304 repeat:
305 n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
306 if (!n)
307 return;
308
309 for (i = 0; i < folio_batch_count(&fbatch); i++) {
310 struct folio *folio = fbatch.folios[i], *dfolio;
311 pgoff_t index = folio->index;
312
313 folio_lock(folio);
314 dfolio = filemap_lock_folio(dmap, index);
315 if (!IS_ERR(dfolio)) {
316 /* overwrite existing folio in the destination cache */
317 WARN_ON(folio_test_dirty(dfolio));
318 nilfs_copy_page(&dfolio->page, &folio->page, 0);
319 folio_unlock(dfolio);
320 folio_put(dfolio);
321 /* Do we not need to remove folio from smap here? */
322 } else {
323 struct folio *f;
324
325 /* move the folio to the destination cache */
326 xa_lock_irq(&smap->i_pages);
327 f = __xa_erase(&smap->i_pages, index);
328 WARN_ON(folio != f);
329 smap->nrpages--;
330 xa_unlock_irq(&smap->i_pages);
331
332 xa_lock_irq(&dmap->i_pages);
333 f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
334 if (unlikely(f)) {
335 /* Probably -ENOMEM */
336 folio->mapping = NULL;
337 folio_put(folio);
338 } else {
339 folio->mapping = dmap;
340 dmap->nrpages++;
341 if (folio_test_dirty(folio))
342 __xa_set_mark(&dmap->i_pages, index,
343 PAGECACHE_TAG_DIRTY);
344 }
345 xa_unlock_irq(&dmap->i_pages);
346 }
347 folio_unlock(folio);
348 }
349 folio_batch_release(&fbatch);
350 cond_resched();
351
352 goto repeat;
353 }
354
355 /**
356 * nilfs_clear_dirty_pages - discard dirty pages in address space
357 * @mapping: address space with dirty pages for discarding
358 * @silent: suppress [true] or print [false] warning messages
359 */
nilfs_clear_dirty_pages(struct address_space * mapping,bool silent)360 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
361 {
362 struct folio_batch fbatch;
363 unsigned int i;
364 pgoff_t index = 0;
365
366 folio_batch_init(&fbatch);
367
368 while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
369 PAGECACHE_TAG_DIRTY, &fbatch)) {
370 for (i = 0; i < folio_batch_count(&fbatch); i++) {
371 struct folio *folio = fbatch.folios[i];
372
373 folio_lock(folio);
374
375 /*
376 * This folio may have been removed from the address
377 * space by truncation or invalidation when the lock
378 * was acquired. Skip processing in that case.
379 */
380 if (likely(folio->mapping == mapping))
381 nilfs_clear_dirty_page(&folio->page, silent);
382
383 folio_unlock(folio);
384 }
385 folio_batch_release(&fbatch);
386 cond_resched();
387 }
388 }
389
390 /**
391 * nilfs_clear_dirty_page - discard dirty page
392 * @page: dirty page that will be discarded
393 * @silent: suppress [true] or print [false] warning messages
394 */
nilfs_clear_dirty_page(struct page * page,bool silent)395 void nilfs_clear_dirty_page(struct page *page, bool silent)
396 {
397 struct inode *inode = page->mapping->host;
398 struct super_block *sb = inode->i_sb;
399
400 BUG_ON(!PageLocked(page));
401
402 if (!silent)
403 nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
404 page_offset(page), inode->i_ino);
405
406 ClearPageUptodate(page);
407 ClearPageMappedToDisk(page);
408 ClearPageChecked(page);
409
410 if (page_has_buffers(page)) {
411 struct buffer_head *bh, *head;
412 const unsigned long clear_bits =
413 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
414 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
415 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
416 BIT(BH_Delay));
417
418 bh = head = page_buffers(page);
419 do {
420 lock_buffer(bh);
421 if (!silent)
422 nilfs_warn(sb,
423 "discard dirty block: blocknr=%llu, size=%zu",
424 (u64)bh->b_blocknr, bh->b_size);
425
426 set_mask_bits(&bh->b_state, clear_bits, 0);
427 unlock_buffer(bh);
428 } while (bh = bh->b_this_page, bh != head);
429 }
430
431 __nilfs_clear_page_dirty(page);
432 }
433
nilfs_page_count_clean_buffers(struct page * page,unsigned int from,unsigned int to)434 unsigned int nilfs_page_count_clean_buffers(struct page *page,
435 unsigned int from, unsigned int to)
436 {
437 unsigned int block_start, block_end;
438 struct buffer_head *bh, *head;
439 unsigned int nc = 0;
440
441 for (bh = head = page_buffers(page), block_start = 0;
442 bh != head || !block_start;
443 block_start = block_end, bh = bh->b_this_page) {
444 block_end = block_start + bh->b_size;
445 if (block_end > from && block_start < to && !buffer_dirty(bh))
446 nc++;
447 }
448 return nc;
449 }
450
451 /*
452 * NILFS2 needs clear_page_dirty() in the following two cases:
453 *
454 * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
455 * flag of pages when it copies back pages from shadow cache to the
456 * original cache.
457 *
458 * 2) Some B-tree operations like insertion or deletion may dispose buffers
459 * in dirty state, and this needs to cancel the dirty state of their pages.
460 */
__nilfs_clear_page_dirty(struct page * page)461 int __nilfs_clear_page_dirty(struct page *page)
462 {
463 struct address_space *mapping = page->mapping;
464
465 if (mapping) {
466 xa_lock_irq(&mapping->i_pages);
467 if (test_bit(PG_dirty, &page->flags)) {
468 __xa_clear_mark(&mapping->i_pages, page_index(page),
469 PAGECACHE_TAG_DIRTY);
470 xa_unlock_irq(&mapping->i_pages);
471 return clear_page_dirty_for_io(page);
472 }
473 xa_unlock_irq(&mapping->i_pages);
474 return 0;
475 }
476 return TestClearPageDirty(page);
477 }
478
479 /**
480 * nilfs_find_uncommitted_extent - find extent of uncommitted data
481 * @inode: inode
482 * @start_blk: start block offset (in)
483 * @blkoff: start offset of the found extent (out)
484 *
485 * This function searches an extent of buffers marked "delayed" which
486 * starts from a block offset equal to or larger than @start_blk. If
487 * such an extent was found, this will store the start offset in
488 * @blkoff and return its length in blocks. Otherwise, zero is
489 * returned.
490 */
nilfs_find_uncommitted_extent(struct inode * inode,sector_t start_blk,sector_t * blkoff)491 unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
492 sector_t start_blk,
493 sector_t *blkoff)
494 {
495 unsigned int i, nr_folios;
496 pgoff_t index;
497 unsigned long length = 0;
498 struct folio_batch fbatch;
499 struct folio *folio;
500
501 if (inode->i_mapping->nrpages == 0)
502 return 0;
503
504 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
505
506 folio_batch_init(&fbatch);
507
508 repeat:
509 nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
510 &fbatch);
511 if (nr_folios == 0)
512 return length;
513
514 i = 0;
515 do {
516 folio = fbatch.folios[i];
517
518 folio_lock(folio);
519 if (folio_buffers(folio)) {
520 struct buffer_head *bh, *head;
521 sector_t b;
522
523 b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
524 bh = head = folio_buffers(folio);
525 do {
526 if (b < start_blk)
527 continue;
528 if (buffer_delay(bh)) {
529 if (length == 0)
530 *blkoff = b;
531 length++;
532 } else if (length > 0) {
533 goto out_locked;
534 }
535 } while (++b, bh = bh->b_this_page, bh != head);
536 } else {
537 if (length > 0)
538 goto out_locked;
539 }
540 folio_unlock(folio);
541
542 } while (++i < nr_folios);
543
544 folio_batch_release(&fbatch);
545 cond_resched();
546 goto repeat;
547
548 out_locked:
549 folio_unlock(folio);
550 folio_batch_release(&fbatch);
551 return length;
552 }
553