1a1d312deSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2aa0b42b7SRandy Dunlap /*
31da177e4SLinus Torvalds * aops.c - NTFS kernel address space operations and page cache handling.
41da177e4SLinus Torvalds *
5ce1bafa0SAnton Altaparmakov * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
61da177e4SLinus Torvalds * Copyright (c) 2002 Richard Russon
71da177e4SLinus Torvalds */
81da177e4SLinus Torvalds
91da177e4SLinus Torvalds #include <linux/errno.h>
1078264bd9SAnton Altaparmakov #include <linux/fs.h>
115a0e3ad6STejun Heo #include <linux/gfp.h>
121da177e4SLinus Torvalds #include <linux/mm.h>
131da177e4SLinus Torvalds #include <linux/pagemap.h>
141da177e4SLinus Torvalds #include <linux/swap.h>
151da177e4SLinus Torvalds #include <linux/buffer_head.h>
161da177e4SLinus Torvalds #include <linux/writeback.h>
17b4012a98SAndrew Morton #include <linux/bit_spinlock.h>
18be297968SChristoph Hellwig #include <linux/bio.h>
191da177e4SLinus Torvalds
201da177e4SLinus Torvalds #include "aops.h"
211da177e4SLinus Torvalds #include "attrib.h"
221da177e4SLinus Torvalds #include "debug.h"
231da177e4SLinus Torvalds #include "inode.h"
241da177e4SLinus Torvalds #include "mft.h"
251da177e4SLinus Torvalds #include "runlist.h"
261da177e4SLinus Torvalds #include "types.h"
271da177e4SLinus Torvalds #include "ntfs.h"
281da177e4SLinus Torvalds
291da177e4SLinus Torvalds /**
301da177e4SLinus Torvalds * ntfs_end_buffer_async_read - async io completion for reading attributes
311da177e4SLinus Torvalds * @bh: buffer head on which io is completed
321da177e4SLinus Torvalds * @uptodate: whether @bh is now uptodate or not
331da177e4SLinus Torvalds *
341da177e4SLinus Torvalds * Asynchronous I/O completion handler for reading pages belonging to the
351da177e4SLinus Torvalds * attribute address space of an inode. The inodes can either be files or
361da177e4SLinus Torvalds * directories or they can be fake inodes describing some attribute.
371da177e4SLinus Torvalds *
381da177e4SLinus Torvalds * If NInoMstProtected(), perform the post read mst fixups when all IO on the
391da177e4SLinus Torvalds * page has been completed and mark the page uptodate or set the error bit on
401da177e4SLinus Torvalds * the page. To determine the size of the records that need fixing up, we
411da177e4SLinus Torvalds * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
421da177e4SLinus Torvalds * record size, and index_block_size_bits, to the log(base 2) of the ntfs
431da177e4SLinus Torvalds * record size.
441da177e4SLinus Torvalds */
ntfs_end_buffer_async_read(struct buffer_head * bh,int uptodate)451da177e4SLinus Torvalds static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
461da177e4SLinus Torvalds {
471da177e4SLinus Torvalds unsigned long flags;
48e604635cSAnton Altaparmakov struct buffer_head *first, *tmp;
491da177e4SLinus Torvalds struct page *page;
50f6098cf4SAnton Altaparmakov struct inode *vi;
511da177e4SLinus Torvalds ntfs_inode *ni;
521da177e4SLinus Torvalds int page_uptodate = 1;
531da177e4SLinus Torvalds
541da177e4SLinus Torvalds page = bh->b_page;
55f6098cf4SAnton Altaparmakov vi = page->mapping->host;
56f6098cf4SAnton Altaparmakov ni = NTFS_I(vi);
571da177e4SLinus Torvalds
581da177e4SLinus Torvalds if (likely(uptodate)) {
59f6098cf4SAnton Altaparmakov loff_t i_size;
60f6098cf4SAnton Altaparmakov s64 file_ofs, init_size;
611da177e4SLinus Torvalds
621da177e4SLinus Torvalds set_buffer_uptodate(bh);
631da177e4SLinus Torvalds
6409cbfeafSKirill A. Shutemov file_ofs = ((s64)page->index << PAGE_SHIFT) +
651da177e4SLinus Torvalds bh_offset(bh);
6607a4e2daSAnton Altaparmakov read_lock_irqsave(&ni->size_lock, flags);
67f6098cf4SAnton Altaparmakov init_size = ni->initialized_size;
68f6098cf4SAnton Altaparmakov i_size = i_size_read(vi);
6907a4e2daSAnton Altaparmakov read_unlock_irqrestore(&ni->size_lock, flags);
70f6098cf4SAnton Altaparmakov if (unlikely(init_size > i_size)) {
71f6098cf4SAnton Altaparmakov /* Race with shrinking truncate. */
72f6098cf4SAnton Altaparmakov init_size = i_size;
73f6098cf4SAnton Altaparmakov }
741da177e4SLinus Torvalds /* Check for the current buffer head overflowing. */
75f6098cf4SAnton Altaparmakov if (unlikely(file_ofs + bh->b_size > init_size)) {
76f6098cf4SAnton Altaparmakov int ofs;
77eebd2aa3SChristoph Lameter void *kaddr;
781da177e4SLinus Torvalds
79f6098cf4SAnton Altaparmakov ofs = 0;
80f6098cf4SAnton Altaparmakov if (file_ofs < init_size)
81f6098cf4SAnton Altaparmakov ofs = init_size - file_ofs;
82a3ac1414SCong Wang kaddr = kmap_atomic(page);
83eebd2aa3SChristoph Lameter memset(kaddr + bh_offset(bh) + ofs, 0,
84eebd2aa3SChristoph Lameter bh->b_size - ofs);
85eebd2aa3SChristoph Lameter flush_dcache_page(page);
86a3ac1414SCong Wang kunmap_atomic(kaddr);
871da177e4SLinus Torvalds }
881da177e4SLinus Torvalds } else {
891da177e4SLinus Torvalds clear_buffer_uptodate(bh);
90e604635cSAnton Altaparmakov SetPageError(page);
91f6098cf4SAnton Altaparmakov ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
92f6098cf4SAnton Altaparmakov "0x%llx.", (unsigned long long)bh->b_blocknr);
931da177e4SLinus Torvalds }
94e604635cSAnton Altaparmakov first = page_buffers(page);
95f1e67e35SThomas Gleixner spin_lock_irqsave(&first->b_uptodate_lock, flags);
961da177e4SLinus Torvalds clear_buffer_async_read(bh);
971da177e4SLinus Torvalds unlock_buffer(bh);
981da177e4SLinus Torvalds tmp = bh;
991da177e4SLinus Torvalds do {
1001da177e4SLinus Torvalds if (!buffer_uptodate(tmp))
1011da177e4SLinus Torvalds page_uptodate = 0;
1021da177e4SLinus Torvalds if (buffer_async_read(tmp)) {
1031da177e4SLinus Torvalds if (likely(buffer_locked(tmp)))
1041da177e4SLinus Torvalds goto still_busy;
1051da177e4SLinus Torvalds /* Async buffers must be locked. */
1061da177e4SLinus Torvalds BUG();
1071da177e4SLinus Torvalds }
1081da177e4SLinus Torvalds tmp = tmp->b_this_page;
1091da177e4SLinus Torvalds } while (tmp != bh);
110f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
1111da177e4SLinus Torvalds /*
1121da177e4SLinus Torvalds * If none of the buffers had errors then we can set the page uptodate,
1131da177e4SLinus Torvalds * but we first have to perform the post read mst fixups, if the
1141da177e4SLinus Torvalds * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
1151da177e4SLinus Torvalds * Note we ignore fixup errors as those are detected when
1161da177e4SLinus Torvalds * map_mft_record() is called which gives us per record granularity
1171da177e4SLinus Torvalds * rather than per page granularity.
1181da177e4SLinus Torvalds */
1191da177e4SLinus Torvalds if (!NInoMstProtected(ni)) {
1201da177e4SLinus Torvalds if (likely(page_uptodate && !PageError(page)))
1211da177e4SLinus Torvalds SetPageUptodate(page);
1221da177e4SLinus Torvalds } else {
123f6098cf4SAnton Altaparmakov u8 *kaddr;
1241da177e4SLinus Torvalds unsigned int i, recs;
1251da177e4SLinus Torvalds u32 rec_size;
1261da177e4SLinus Torvalds
1271da177e4SLinus Torvalds rec_size = ni->itype.index.block_size;
12809cbfeafSKirill A. Shutemov recs = PAGE_SIZE / rec_size;
1291da177e4SLinus Torvalds /* Should have been verified before we got here... */
1301da177e4SLinus Torvalds BUG_ON(!recs);
131a3ac1414SCong Wang kaddr = kmap_atomic(page);
1321da177e4SLinus Torvalds for (i = 0; i < recs; i++)
133f6098cf4SAnton Altaparmakov post_read_mst_fixup((NTFS_RECORD*)(kaddr +
1341da177e4SLinus Torvalds i * rec_size), rec_size);
135a3ac1414SCong Wang kunmap_atomic(kaddr);
1361da177e4SLinus Torvalds flush_dcache_page(page);
137b6ad6c52SAnton Altaparmakov if (likely(page_uptodate && !PageError(page)))
1381da177e4SLinus Torvalds SetPageUptodate(page);
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds unlock_page(page);
1411da177e4SLinus Torvalds return;
1421da177e4SLinus Torvalds still_busy:
143f1e67e35SThomas Gleixner spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
1441da177e4SLinus Torvalds return;
1451da177e4SLinus Torvalds }
1461da177e4SLinus Torvalds
1471da177e4SLinus Torvalds /**
1481da177e4SLinus Torvalds * ntfs_read_block - fill a @page of an address space with data
1491da177e4SLinus Torvalds * @page: page cache page to fill with data
1501da177e4SLinus Torvalds *
1511da177e4SLinus Torvalds * Fill the page @page of the address space belonging to the @page->host inode.
1521da177e4SLinus Torvalds * We read each buffer asynchronously and when all buffers are read in, our io
1531da177e4SLinus Torvalds * completion handler ntfs_end_buffer_read_async(), if required, automatically
1541da177e4SLinus Torvalds * applies the mst fixups to the page before finally marking it uptodate and
1551da177e4SLinus Torvalds * unlocking it.
1561da177e4SLinus Torvalds *
1571da177e4SLinus Torvalds * We only enforce allocated_size limit because i_size is checked for in
1581da177e4SLinus Torvalds * generic_file_read().
1591da177e4SLinus Torvalds *
1601da177e4SLinus Torvalds * Return 0 on success and -errno on error.
1611da177e4SLinus Torvalds *
162933906f8SMatthew Wilcox (Oracle) * Contains an adapted version of fs/buffer.c::block_read_full_folio().
1631da177e4SLinus Torvalds */
ntfs_read_block(struct page * page)1641da177e4SLinus Torvalds static int ntfs_read_block(struct page *page)
1651da177e4SLinus Torvalds {
166f6098cf4SAnton Altaparmakov loff_t i_size;
1671da177e4SLinus Torvalds VCN vcn;
1681da177e4SLinus Torvalds LCN lcn;
169f6098cf4SAnton Altaparmakov s64 init_size;
170f6098cf4SAnton Altaparmakov struct inode *vi;
1711da177e4SLinus Torvalds ntfs_inode *ni;
1721da177e4SLinus Torvalds ntfs_volume *vol;
1731da177e4SLinus Torvalds runlist_element *rl;
1741da177e4SLinus Torvalds struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
1751da177e4SLinus Torvalds sector_t iblock, lblock, zblock;
17607a4e2daSAnton Altaparmakov unsigned long flags;
1771da177e4SLinus Torvalds unsigned int blocksize, vcn_ofs;
1781da177e4SLinus Torvalds int i, nr;
1791da177e4SLinus Torvalds unsigned char blocksize_bits;
1801da177e4SLinus Torvalds
181f6098cf4SAnton Altaparmakov vi = page->mapping->host;
182f6098cf4SAnton Altaparmakov ni = NTFS_I(vi);
1831da177e4SLinus Torvalds vol = ni->vol;
1841da177e4SLinus Torvalds
1851da177e4SLinus Torvalds /* $MFT/$DATA must have its complete runlist in memory at all times. */
1861da177e4SLinus Torvalds BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
1871da177e4SLinus Torvalds
18878af34f0SAnton Altaparmakov blocksize = vol->sb->s_blocksize;
18978af34f0SAnton Altaparmakov blocksize_bits = vol->sb->s_blocksize_bits;
1901da177e4SLinus Torvalds
191a01ac532SAnton Altaparmakov if (!page_has_buffers(page)) {
1921da177e4SLinus Torvalds create_empty_buffers(page, blocksize, 0);
193a01ac532SAnton Altaparmakov if (unlikely(!page_has_buffers(page))) {
1941da177e4SLinus Torvalds unlock_page(page);
1951da177e4SLinus Torvalds return -ENOMEM;
1961da177e4SLinus Torvalds }
197a01ac532SAnton Altaparmakov }
198a01ac532SAnton Altaparmakov bh = head = page_buffers(page);
199a01ac532SAnton Altaparmakov BUG_ON(!bh);
2001da177e4SLinus Torvalds
201f6098cf4SAnton Altaparmakov /*
202f6098cf4SAnton Altaparmakov * We may be racing with truncate. To avoid some of the problems we
203f6098cf4SAnton Altaparmakov * now take a snapshot of the various sizes and use those for the whole
204f6098cf4SAnton Altaparmakov * of the function. In case of an extending truncate it just means we
205f6098cf4SAnton Altaparmakov * may leave some buffers unmapped which are now allocated. This is
206f6098cf4SAnton Altaparmakov * not a problem since these buffers will just get mapped when a write
207f6098cf4SAnton Altaparmakov * occurs. In case of a shrinking truncate, we will detect this later
208f6098cf4SAnton Altaparmakov * on due to the runlist being incomplete and if the page is being
209f6098cf4SAnton Altaparmakov * fully truncated, truncate will throw it away as soon as we unlock
210f6098cf4SAnton Altaparmakov * it so no need to worry what we do with it.
211f6098cf4SAnton Altaparmakov */
21209cbfeafSKirill A. Shutemov iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
21307a4e2daSAnton Altaparmakov read_lock_irqsave(&ni->size_lock, flags);
2141da177e4SLinus Torvalds lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
215f6098cf4SAnton Altaparmakov init_size = ni->initialized_size;
216f6098cf4SAnton Altaparmakov i_size = i_size_read(vi);
21707a4e2daSAnton Altaparmakov read_unlock_irqrestore(&ni->size_lock, flags);
218f6098cf4SAnton Altaparmakov if (unlikely(init_size > i_size)) {
219f6098cf4SAnton Altaparmakov /* Race with shrinking truncate. */
220f6098cf4SAnton Altaparmakov init_size = i_size;
221f6098cf4SAnton Altaparmakov }
222f6098cf4SAnton Altaparmakov zblock = (init_size + blocksize - 1) >> blocksize_bits;
2231da177e4SLinus Torvalds
2241da177e4SLinus Torvalds /* Loop through all the buffers in the page. */
2251da177e4SLinus Torvalds rl = NULL;
2261da177e4SLinus Torvalds nr = i = 0;
2271da177e4SLinus Torvalds do {
228e3bf460fSNate Diller int err = 0;
2291da177e4SLinus Torvalds
2301da177e4SLinus Torvalds if (unlikely(buffer_uptodate(bh)))
2311da177e4SLinus Torvalds continue;
2321da177e4SLinus Torvalds if (unlikely(buffer_mapped(bh))) {
2331da177e4SLinus Torvalds arr[nr++] = bh;
2341da177e4SLinus Torvalds continue;
2351da177e4SLinus Torvalds }
2361da177e4SLinus Torvalds bh->b_bdev = vol->sb->s_bdev;
2371da177e4SLinus Torvalds /* Is the block within the allowed limits? */
2381da177e4SLinus Torvalds if (iblock < lblock) {
239c49c3111SRichard Knutsson bool is_retry = false;
2401da177e4SLinus Torvalds
2411da177e4SLinus Torvalds /* Convert iblock into corresponding vcn and offset. */
2421da177e4SLinus Torvalds vcn = (VCN)iblock << blocksize_bits >>
2431da177e4SLinus Torvalds vol->cluster_size_bits;
2441da177e4SLinus Torvalds vcn_ofs = ((VCN)iblock << blocksize_bits) &
2451da177e4SLinus Torvalds vol->cluster_size_mask;
2461da177e4SLinus Torvalds if (!rl) {
2471da177e4SLinus Torvalds lock_retry_remap:
2481da177e4SLinus Torvalds down_read(&ni->runlist.lock);
2491da177e4SLinus Torvalds rl = ni->runlist.rl;
2501da177e4SLinus Torvalds }
2511da177e4SLinus Torvalds if (likely(rl != NULL)) {
2521da177e4SLinus Torvalds /* Seek to element containing target vcn. */
2531da177e4SLinus Torvalds while (rl->length && rl[1].vcn <= vcn)
2541da177e4SLinus Torvalds rl++;
2551da177e4SLinus Torvalds lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
2561da177e4SLinus Torvalds } else
2571da177e4SLinus Torvalds lcn = LCN_RL_NOT_MAPPED;
2581da177e4SLinus Torvalds /* Successful remap. */
2591da177e4SLinus Torvalds if (lcn >= 0) {
2601da177e4SLinus Torvalds /* Setup buffer head to correct block. */
2611da177e4SLinus Torvalds bh->b_blocknr = ((lcn << vol->cluster_size_bits)
2621da177e4SLinus Torvalds + vcn_ofs) >> blocksize_bits;
2631da177e4SLinus Torvalds set_buffer_mapped(bh);
2641da177e4SLinus Torvalds /* Only read initialized data blocks. */
2651da177e4SLinus Torvalds if (iblock < zblock) {
2661da177e4SLinus Torvalds arr[nr++] = bh;
2671da177e4SLinus Torvalds continue;
2681da177e4SLinus Torvalds }
2691da177e4SLinus Torvalds /* Fully non-initialized data block, zero it. */
2701da177e4SLinus Torvalds goto handle_zblock;
2711da177e4SLinus Torvalds }
2721da177e4SLinus Torvalds /* It is a hole, need to zero it. */
2731da177e4SLinus Torvalds if (lcn == LCN_HOLE)
2741da177e4SLinus Torvalds goto handle_hole;
2751da177e4SLinus Torvalds /* If first try and runlist unmapped, map and retry. */
2761da177e4SLinus Torvalds if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
277c49c3111SRichard Knutsson is_retry = true;
2781da177e4SLinus Torvalds /*
2791da177e4SLinus Torvalds * Attempt to map runlist, dropping lock for
2801da177e4SLinus Torvalds * the duration.
2811da177e4SLinus Torvalds */
2821da177e4SLinus Torvalds up_read(&ni->runlist.lock);
2831da177e4SLinus Torvalds err = ntfs_map_runlist(ni, vcn);
2841da177e4SLinus Torvalds if (likely(!err))
2851da177e4SLinus Torvalds goto lock_retry_remap;
2861da177e4SLinus Torvalds rl = NULL;
2879f993fe4SAnton Altaparmakov } else if (!rl)
2889f993fe4SAnton Altaparmakov up_read(&ni->runlist.lock);
2898273d5d4SAnton Altaparmakov /*
2908273d5d4SAnton Altaparmakov * If buffer is outside the runlist, treat it as a
2918273d5d4SAnton Altaparmakov * hole. This can happen due to concurrent truncate
2928273d5d4SAnton Altaparmakov * for example.
2938273d5d4SAnton Altaparmakov */
2948273d5d4SAnton Altaparmakov if (err == -ENOENT || lcn == LCN_ENOENT) {
2958273d5d4SAnton Altaparmakov err = 0;
2968273d5d4SAnton Altaparmakov goto handle_hole;
2978273d5d4SAnton Altaparmakov }
2981da177e4SLinus Torvalds /* Hard error, zero out region. */
2998273d5d4SAnton Altaparmakov if (!err)
3008273d5d4SAnton Altaparmakov err = -EIO;
3011da177e4SLinus Torvalds bh->b_blocknr = -1;
3021da177e4SLinus Torvalds SetPageError(page);
3031da177e4SLinus Torvalds ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
3041da177e4SLinus Torvalds "attribute type 0x%x, vcn 0x%llx, "
3051da177e4SLinus Torvalds "offset 0x%x because its location on "
3061da177e4SLinus Torvalds "disk could not be determined%s "
3078273d5d4SAnton Altaparmakov "(error code %i).", ni->mft_no,
3081da177e4SLinus Torvalds ni->type, (unsigned long long)vcn,
3091da177e4SLinus Torvalds vcn_ofs, is_retry ? " even after "
3108273d5d4SAnton Altaparmakov "retrying" : "", err);
3111da177e4SLinus Torvalds }
3121da177e4SLinus Torvalds /*
3131da177e4SLinus Torvalds * Either iblock was outside lblock limits or
3141da177e4SLinus Torvalds * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion
3151da177e4SLinus Torvalds * of the page and set the buffer uptodate.
3161da177e4SLinus Torvalds */
3171da177e4SLinus Torvalds handle_hole:
3181da177e4SLinus Torvalds bh->b_blocknr = -1UL;
3191da177e4SLinus Torvalds clear_buffer_mapped(bh);
3201da177e4SLinus Torvalds handle_zblock:
321eebd2aa3SChristoph Lameter zero_user(page, i * blocksize, blocksize);
3228273d5d4SAnton Altaparmakov if (likely(!err))
3231da177e4SLinus Torvalds set_buffer_uptodate(bh);
3241da177e4SLinus Torvalds } while (i++, iblock++, (bh = bh->b_this_page) != head);
3251da177e4SLinus Torvalds
3261da177e4SLinus Torvalds /* Release the lock if we took it. */
3271da177e4SLinus Torvalds if (rl)
3281da177e4SLinus Torvalds up_read(&ni->runlist.lock);
3291da177e4SLinus Torvalds
3301da177e4SLinus Torvalds /* Check we have at least one buffer ready for i/o. */
3311da177e4SLinus Torvalds if (nr) {
3321da177e4SLinus Torvalds struct buffer_head *tbh;
3331da177e4SLinus Torvalds
3341da177e4SLinus Torvalds /* Lock the buffers. */
3351da177e4SLinus Torvalds for (i = 0; i < nr; i++) {
3361da177e4SLinus Torvalds tbh = arr[i];
3371da177e4SLinus Torvalds lock_buffer(tbh);
3381da177e4SLinus Torvalds tbh->b_end_io = ntfs_end_buffer_async_read;
3391da177e4SLinus Torvalds set_buffer_async_read(tbh);
3401da177e4SLinus Torvalds }
3411da177e4SLinus Torvalds /* Finally, start i/o on the buffers. */
3421da177e4SLinus Torvalds for (i = 0; i < nr; i++) {
3431da177e4SLinus Torvalds tbh = arr[i];
3441da177e4SLinus Torvalds if (likely(!buffer_uptodate(tbh)))
3451420c4a5SBart Van Assche submit_bh(REQ_OP_READ, tbh);
3461da177e4SLinus Torvalds else
3471da177e4SLinus Torvalds ntfs_end_buffer_async_read(tbh, 1);
3481da177e4SLinus Torvalds }
3491da177e4SLinus Torvalds return 0;
3501da177e4SLinus Torvalds }
3511da177e4SLinus Torvalds /* No i/o was scheduled on any of the buffers. */
3521da177e4SLinus Torvalds if (likely(!PageError(page)))
3531da177e4SLinus Torvalds SetPageUptodate(page);
3541da177e4SLinus Torvalds else /* Signal synchronous i/o error. */
3551da177e4SLinus Torvalds nr = -EIO;
3561da177e4SLinus Torvalds unlock_page(page);
3571da177e4SLinus Torvalds return nr;
3581da177e4SLinus Torvalds }
3591da177e4SLinus Torvalds
3601da177e4SLinus Torvalds /**
361933906f8SMatthew Wilcox (Oracle) * ntfs_read_folio - fill a @folio of a @file with data from the device
362933906f8SMatthew Wilcox (Oracle) * @file: open file to which the folio @folio belongs or NULL
363933906f8SMatthew Wilcox (Oracle) * @folio: page cache folio to fill with data
3641da177e4SLinus Torvalds *
365933906f8SMatthew Wilcox (Oracle) * For non-resident attributes, ntfs_read_folio() fills the @folio of the open
366933906f8SMatthew Wilcox (Oracle) * file @file by calling the ntfs version of the generic block_read_full_folio()
3671da177e4SLinus Torvalds * function, ntfs_read_block(), which in turn creates and reads in the buffers
368933906f8SMatthew Wilcox (Oracle) * associated with the folio asynchronously.
3691da177e4SLinus Torvalds *
370933906f8SMatthew Wilcox (Oracle) * For resident attributes, OTOH, ntfs_read_folio() fills @folio by copying the
3711da177e4SLinus Torvalds * data from the mft record (which at this stage is most likely in memory) and
3721da177e4SLinus Torvalds * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
3731da177e4SLinus Torvalds * even if the mft record is not cached at this point in time, we need to wait
3741da177e4SLinus Torvalds * for it to be read in before we can do the copy.
3751da177e4SLinus Torvalds *
3761da177e4SLinus Torvalds * Return 0 on success and -errno on error.
3771da177e4SLinus Torvalds */
ntfs_read_folio(struct file * file,struct folio * folio)378933906f8SMatthew Wilcox (Oracle) static int ntfs_read_folio(struct file *file, struct folio *folio)
3791da177e4SLinus Torvalds {
380933906f8SMatthew Wilcox (Oracle) struct page *page = &folio->page;
381f6098cf4SAnton Altaparmakov loff_t i_size;
382f6098cf4SAnton Altaparmakov struct inode *vi;
3831da177e4SLinus Torvalds ntfs_inode *ni, *base_ni;
384bfab36e8SAnton Altaparmakov u8 *addr;
3851da177e4SLinus Torvalds ntfs_attr_search_ctx *ctx;
3861da177e4SLinus Torvalds MFT_RECORD *mrec;
387b6ad6c52SAnton Altaparmakov unsigned long flags;
3881da177e4SLinus Torvalds u32 attr_len;
3891da177e4SLinus Torvalds int err = 0;
3901da177e4SLinus Torvalds
391905685f6SAnton Altaparmakov retry_readpage:
3921da177e4SLinus Torvalds BUG_ON(!PageLocked(page));
393ebab8990SAnton Altaparmakov vi = page->mapping->host;
394ebab8990SAnton Altaparmakov i_size = i_size_read(vi);
395ebab8990SAnton Altaparmakov /* Is the page fully outside i_size? (truncate in progress) */
39609cbfeafSKirill A. Shutemov if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
39709cbfeafSKirill A. Shutemov PAGE_SHIFT)) {
39809cbfeafSKirill A. Shutemov zero_user(page, 0, PAGE_SIZE);
399ebab8990SAnton Altaparmakov ntfs_debug("Read outside i_size - truncated?");
400ebab8990SAnton Altaparmakov goto done;
401ebab8990SAnton Altaparmakov }
4021da177e4SLinus Torvalds /*
4031da177e4SLinus Torvalds * This can potentially happen because we clear PageUptodate() during
4041da177e4SLinus Torvalds * ntfs_writepage() of MstProtected() attributes.
4051da177e4SLinus Torvalds */
4061da177e4SLinus Torvalds if (PageUptodate(page)) {
4071da177e4SLinus Torvalds unlock_page(page);
4081da177e4SLinus Torvalds return 0;
4091da177e4SLinus Torvalds }
410f6098cf4SAnton Altaparmakov ni = NTFS_I(vi);
4111da177e4SLinus Torvalds /*
412311120ecSAnton Altaparmakov * Only $DATA attributes can be encrypted and only unnamed $DATA
413311120ecSAnton Altaparmakov * attributes can be compressed. Index root can have the flags set but
414311120ecSAnton Altaparmakov * this means to create compressed/encrypted files, not that the
4154e64c886SAnton Altaparmakov * attribute is compressed/encrypted. Note we need to check for
4164e64c886SAnton Altaparmakov * AT_INDEX_ALLOCATION since this is the type of both directory and
4174e64c886SAnton Altaparmakov * index inodes.
4181da177e4SLinus Torvalds */
4194e64c886SAnton Altaparmakov if (ni->type != AT_INDEX_ALLOCATION) {
420311120ecSAnton Altaparmakov /* If attribute is encrypted, deny access, just like NT4. */
4211da177e4SLinus Torvalds if (NInoEncrypted(ni)) {
422311120ecSAnton Altaparmakov BUG_ON(ni->type != AT_DATA);
4231da177e4SLinus Torvalds err = -EACCES;
4241da177e4SLinus Torvalds goto err_out;
4251da177e4SLinus Torvalds }
4261da177e4SLinus Torvalds /* Compressed data streams are handled in compress.c. */
427311120ecSAnton Altaparmakov if (NInoNonResident(ni) && NInoCompressed(ni)) {
428311120ecSAnton Altaparmakov BUG_ON(ni->type != AT_DATA);
429311120ecSAnton Altaparmakov BUG_ON(ni->name_len);
4301da177e4SLinus Torvalds return ntfs_read_compressed_block(page);
4311da177e4SLinus Torvalds }
432311120ecSAnton Altaparmakov }
433311120ecSAnton Altaparmakov /* NInoNonResident() == NInoIndexAllocPresent() */
434311120ecSAnton Altaparmakov if (NInoNonResident(ni)) {
435311120ecSAnton Altaparmakov /* Normal, non-resident data stream. */
4361da177e4SLinus Torvalds return ntfs_read_block(page);
4371da177e4SLinus Torvalds }
4381da177e4SLinus Torvalds /*
4391da177e4SLinus Torvalds * Attribute is resident, implying it is not compressed or encrypted.
4401da177e4SLinus Torvalds * This also means the attribute is smaller than an mft record and
4411da177e4SLinus Torvalds * hence smaller than a page, so can simply zero out any pages with
442311120ecSAnton Altaparmakov * index above 0. Note the attribute can actually be marked compressed
443311120ecSAnton Altaparmakov * but if it is resident the actual data is not compressed so we are
444311120ecSAnton Altaparmakov * ok to ignore the compressed flag here.
4451da177e4SLinus Torvalds */
446b6ad6c52SAnton Altaparmakov if (unlikely(page->index > 0)) {
44709cbfeafSKirill A. Shutemov zero_user(page, 0, PAGE_SIZE);
4481da177e4SLinus Torvalds goto done;
4491da177e4SLinus Torvalds }
4501da177e4SLinus Torvalds if (!NInoAttr(ni))
4511da177e4SLinus Torvalds base_ni = ni;
4521da177e4SLinus Torvalds else
4531da177e4SLinus Torvalds base_ni = ni->ext.base_ntfs_ino;
4541da177e4SLinus Torvalds /* Map, pin, and lock the mft record. */
4551da177e4SLinus Torvalds mrec = map_mft_record(base_ni);
4561da177e4SLinus Torvalds if (IS_ERR(mrec)) {
4571da177e4SLinus Torvalds err = PTR_ERR(mrec);
4581da177e4SLinus Torvalds goto err_out;
4591da177e4SLinus Torvalds }
460905685f6SAnton Altaparmakov /*
461905685f6SAnton Altaparmakov * If a parallel write made the attribute non-resident, drop the mft
462933906f8SMatthew Wilcox (Oracle) * record and retry the read_folio.
463905685f6SAnton Altaparmakov */
464905685f6SAnton Altaparmakov if (unlikely(NInoNonResident(ni))) {
465905685f6SAnton Altaparmakov unmap_mft_record(base_ni);
466905685f6SAnton Altaparmakov goto retry_readpage;
467905685f6SAnton Altaparmakov }
4681da177e4SLinus Torvalds ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
4691da177e4SLinus Torvalds if (unlikely(!ctx)) {
4701da177e4SLinus Torvalds err = -ENOMEM;
4711da177e4SLinus Torvalds goto unm_err_out;
4721da177e4SLinus Torvalds }
4731da177e4SLinus Torvalds err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
4741da177e4SLinus Torvalds CASE_SENSITIVE, 0, NULL, 0, ctx);
4751da177e4SLinus Torvalds if (unlikely(err))
4761da177e4SLinus Torvalds goto put_unm_err_out;
4771da177e4SLinus Torvalds attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
478b6ad6c52SAnton Altaparmakov read_lock_irqsave(&ni->size_lock, flags);
479b6ad6c52SAnton Altaparmakov if (unlikely(attr_len > ni->initialized_size))
480b6ad6c52SAnton Altaparmakov attr_len = ni->initialized_size;
481f6098cf4SAnton Altaparmakov i_size = i_size_read(vi);
482b6ad6c52SAnton Altaparmakov read_unlock_irqrestore(&ni->size_lock, flags);
483f6098cf4SAnton Altaparmakov if (unlikely(attr_len > i_size)) {
484f6098cf4SAnton Altaparmakov /* Race with shrinking truncate. */
485f6098cf4SAnton Altaparmakov attr_len = i_size;
486f6098cf4SAnton Altaparmakov }
487a3ac1414SCong Wang addr = kmap_atomic(page);
4881da177e4SLinus Torvalds /* Copy the data to the page. */
489bfab36e8SAnton Altaparmakov memcpy(addr, (u8*)ctx->attr +
4901da177e4SLinus Torvalds le16_to_cpu(ctx->attr->data.resident.value_offset),
4911da177e4SLinus Torvalds attr_len);
4921da177e4SLinus Torvalds /* Zero the remainder of the page. */
49309cbfeafSKirill A. Shutemov memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
4941da177e4SLinus Torvalds flush_dcache_page(page);
495a3ac1414SCong Wang kunmap_atomic(addr);
4961da177e4SLinus Torvalds put_unm_err_out:
4971da177e4SLinus Torvalds ntfs_attr_put_search_ctx(ctx);
4981da177e4SLinus Torvalds unm_err_out:
4991da177e4SLinus Torvalds unmap_mft_record(base_ni);
5001da177e4SLinus Torvalds done:
5011da177e4SLinus Torvalds SetPageUptodate(page);
5021da177e4SLinus Torvalds err_out:
5031da177e4SLinus Torvalds unlock_page(page);
5041da177e4SLinus Torvalds return err;
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds
5071da177e4SLinus Torvalds #ifdef NTFS_RW
5081da177e4SLinus Torvalds
5091da177e4SLinus Torvalds /**
5101da177e4SLinus Torvalds * ntfs_write_block - write a @page to the backing store
5111da177e4SLinus Torvalds * @page: page cache page to write out
5121da177e4SLinus Torvalds * @wbc: writeback control structure
5131da177e4SLinus Torvalds *
5141da177e4SLinus Torvalds * This function is for writing pages belonging to non-resident, non-mst
5151da177e4SLinus Torvalds * protected attributes to their backing store.
5161da177e4SLinus Torvalds *
5171da177e4SLinus Torvalds * For a page with buffers, map and write the dirty buffers asynchronously
5181da177e4SLinus Torvalds * under page writeback. For a page without buffers, create buffers for the
5191da177e4SLinus Torvalds * page, then proceed as above.
5201da177e4SLinus Torvalds *
5211da177e4SLinus Torvalds * If a page doesn't have buffers the page dirty state is definitive. If a page
5221da177e4SLinus Torvalds * does have buffers, the page dirty state is just a hint, and the buffer dirty
5231da177e4SLinus Torvalds * state is definitive. (A hint which has rules: dirty buffers against a clean
5241da177e4SLinus Torvalds * page is illegal. Other combinations are legal and need to be handled. In
5251da177e4SLinus Torvalds * particular a dirty page containing clean buffers for example.)
5261da177e4SLinus Torvalds *
5271da177e4SLinus Torvalds * Return 0 on success and -errno on error.
5281da177e4SLinus Torvalds *
529*53418a18SMatthew Wilcox (Oracle) * Based on ntfs_read_block() and __block_write_full_folio().
5301da177e4SLinus Torvalds */
ntfs_write_block(struct page * page,struct writeback_control * wbc)5311da177e4SLinus Torvalds static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
5321da177e4SLinus Torvalds {
5331da177e4SLinus Torvalds VCN vcn;
5341da177e4SLinus Torvalds LCN lcn;
53507a4e2daSAnton Altaparmakov s64 initialized_size;
53607a4e2daSAnton Altaparmakov loff_t i_size;
5371da177e4SLinus Torvalds sector_t block, dblock, iblock;
5381da177e4SLinus Torvalds struct inode *vi;
5391da177e4SLinus Torvalds ntfs_inode *ni;
5401da177e4SLinus Torvalds ntfs_volume *vol;
5411da177e4SLinus Torvalds runlist_element *rl;
5421da177e4SLinus Torvalds struct buffer_head *bh, *head;
54307a4e2daSAnton Altaparmakov unsigned long flags;
5441da177e4SLinus Torvalds unsigned int blocksize, vcn_ofs;
5451da177e4SLinus Torvalds int err;
546c49c3111SRichard Knutsson bool need_end_writeback;
5471da177e4SLinus Torvalds unsigned char blocksize_bits;
5481da177e4SLinus Torvalds
5491da177e4SLinus Torvalds vi = page->mapping->host;
5501da177e4SLinus Torvalds ni = NTFS_I(vi);
5511da177e4SLinus Torvalds vol = ni->vol;
5521da177e4SLinus Torvalds
5531da177e4SLinus Torvalds ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
5541da177e4SLinus Torvalds "0x%lx.", ni->mft_no, ni->type, page->index);
5551da177e4SLinus Torvalds
5561da177e4SLinus Torvalds BUG_ON(!NInoNonResident(ni));
5571da177e4SLinus Torvalds BUG_ON(NInoMstProtected(ni));
55878af34f0SAnton Altaparmakov blocksize = vol->sb->s_blocksize;
55978af34f0SAnton Altaparmakov blocksize_bits = vol->sb->s_blocksize_bits;
5601da177e4SLinus Torvalds if (!page_has_buffers(page)) {
5611da177e4SLinus Torvalds BUG_ON(!PageUptodate(page));
5621da177e4SLinus Torvalds create_empty_buffers(page, blocksize,
5631da177e4SLinus Torvalds (1 << BH_Uptodate) | (1 << BH_Dirty));
564a01ac532SAnton Altaparmakov if (unlikely(!page_has_buffers(page))) {
565a01ac532SAnton Altaparmakov ntfs_warning(vol->sb, "Error allocating page "
566a01ac532SAnton Altaparmakov "buffers. Redirtying page so we try "
567a01ac532SAnton Altaparmakov "again later.");
5681da177e4SLinus Torvalds /*
569a01ac532SAnton Altaparmakov * Put the page back on mapping->dirty_pages, but leave
570a01ac532SAnton Altaparmakov * its buffers' dirty state as-is.
5711da177e4SLinus Torvalds */
5721da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page);
5731da177e4SLinus Torvalds unlock_page(page);
5741da177e4SLinus Torvalds return 0;
5751da177e4SLinus Torvalds }
576a01ac532SAnton Altaparmakov }
577a01ac532SAnton Altaparmakov bh = head = page_buffers(page);
578a01ac532SAnton Altaparmakov BUG_ON(!bh);
5791da177e4SLinus Torvalds
5801da177e4SLinus Torvalds /* NOTE: Different naming scheme to ntfs_read_block()! */
5811da177e4SLinus Torvalds
5821da177e4SLinus Torvalds /* The first block in the page. */
58309cbfeafSKirill A. Shutemov block = (s64)page->index << (PAGE_SHIFT - blocksize_bits);
5841da177e4SLinus Torvalds
58507a4e2daSAnton Altaparmakov read_lock_irqsave(&ni->size_lock, flags);
58607a4e2daSAnton Altaparmakov i_size = i_size_read(vi);
58707a4e2daSAnton Altaparmakov initialized_size = ni->initialized_size;
58807a4e2daSAnton Altaparmakov read_unlock_irqrestore(&ni->size_lock, flags);
58907a4e2daSAnton Altaparmakov
5901da177e4SLinus Torvalds /* The first out of bounds block for the data size. */
59107a4e2daSAnton Altaparmakov dblock = (i_size + blocksize - 1) >> blocksize_bits;
5921da177e4SLinus Torvalds
5931da177e4SLinus Torvalds /* The last (fully or partially) initialized block. */
59407a4e2daSAnton Altaparmakov iblock = initialized_size >> blocksize_bits;
5951da177e4SLinus Torvalds
5961da177e4SLinus Torvalds /*
597e621900aSMatthew Wilcox (Oracle) * Be very careful. We have no exclusion from block_dirty_folio
5981da177e4SLinus Torvalds * here, and the (potentially unmapped) buffers may become dirty at
5991da177e4SLinus Torvalds * any time. If a buffer becomes dirty here after we've inspected it
6001da177e4SLinus Torvalds * then we just miss that fact, and the page stays dirty.
6011da177e4SLinus Torvalds *
602e621900aSMatthew Wilcox (Oracle) * Buffers outside i_size may be dirtied by block_dirty_folio;
6031da177e4SLinus Torvalds * handle that here by just cleaning them.
6041da177e4SLinus Torvalds */
6051da177e4SLinus Torvalds
6061da177e4SLinus Torvalds /*
6071da177e4SLinus Torvalds * Loop through all the buffers in the page, mapping all the dirty
6081da177e4SLinus Torvalds * buffers to disk addresses and handling any aliases from the
6091da177e4SLinus Torvalds * underlying block device's mapping.
6101da177e4SLinus Torvalds */
6111da177e4SLinus Torvalds rl = NULL;
6121da177e4SLinus Torvalds err = 0;
6131da177e4SLinus Torvalds do {
614c49c3111SRichard Knutsson bool is_retry = false;
6151da177e4SLinus Torvalds
6161da177e4SLinus Torvalds if (unlikely(block >= dblock)) {
6171da177e4SLinus Torvalds /*
6181da177e4SLinus Torvalds * Mapped buffers outside i_size will occur, because
6191da177e4SLinus Torvalds * this page can be outside i_size when there is a
6201da177e4SLinus Torvalds * truncate in progress. The contents of such buffers
6211da177e4SLinus Torvalds * were zeroed by ntfs_writepage().
6221da177e4SLinus Torvalds *
6231da177e4SLinus Torvalds * FIXME: What about the small race window where
6241da177e4SLinus Torvalds * ntfs_writepage() has not done any clearing because
6251da177e4SLinus Torvalds * the page was within i_size but before we get here,
6261da177e4SLinus Torvalds * vmtruncate() modifies i_size?
6271da177e4SLinus Torvalds */
6281da177e4SLinus Torvalds clear_buffer_dirty(bh);
6291da177e4SLinus Torvalds set_buffer_uptodate(bh);
6301da177e4SLinus Torvalds continue;
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds
6331da177e4SLinus Torvalds /* Clean buffers are not written out, so no need to map them. */
6341da177e4SLinus Torvalds if (!buffer_dirty(bh))
6351da177e4SLinus Torvalds continue;
6361da177e4SLinus Torvalds
6371da177e4SLinus Torvalds /* Make sure we have enough initialized size. */
6381da177e4SLinus Torvalds if (unlikely((block >= iblock) &&
63907a4e2daSAnton Altaparmakov (initialized_size < i_size))) {
6401da177e4SLinus Torvalds /*
641933906f8SMatthew Wilcox (Oracle) * If this page is fully outside initialized
642933906f8SMatthew Wilcox (Oracle) * size, zero out all pages between the current
643933906f8SMatthew Wilcox (Oracle) * initialized size and the current page. Just
644933906f8SMatthew Wilcox (Oracle) * use ntfs_read_folio() to do the zeroing
645933906f8SMatthew Wilcox (Oracle) * transparently.
6461da177e4SLinus Torvalds */
6471da177e4SLinus Torvalds if (block > iblock) {
6481da177e4SLinus Torvalds // TODO:
6491da177e4SLinus Torvalds // For each page do:
6501da177e4SLinus Torvalds // - read_cache_page()
6511da177e4SLinus Torvalds // Again for each page do:
6521da177e4SLinus Torvalds // - wait_on_page_locked()
6531da177e4SLinus Torvalds // - Check (PageUptodate(page) &&
6541da177e4SLinus Torvalds // !PageError(page))
6551da177e4SLinus Torvalds // Update initialized size in the attribute and
6561da177e4SLinus Torvalds // in the inode.
6571da177e4SLinus Torvalds // Again, for each page do:
658e621900aSMatthew Wilcox (Oracle) // block_dirty_folio();
659ea1754a0SKirill A. Shutemov // put_page()
6601da177e4SLinus Torvalds // We don't need to wait on the writes.
6611da177e4SLinus Torvalds // Update iblock.
6621da177e4SLinus Torvalds }
6631da177e4SLinus Torvalds /*
6641da177e4SLinus Torvalds * The current page straddles initialized size. Zero
6651da177e4SLinus Torvalds * all non-uptodate buffers and set them uptodate (and
6661da177e4SLinus Torvalds * dirty?). Note, there aren't any non-uptodate buffers
6671da177e4SLinus Torvalds * if the page is uptodate.
6681da177e4SLinus Torvalds * FIXME: For an uptodate page, the buffers may need to
6691da177e4SLinus Torvalds * be written out because they were not initialized on
6701da177e4SLinus Torvalds * disk before.
6711da177e4SLinus Torvalds */
6721da177e4SLinus Torvalds if (!PageUptodate(page)) {
6731da177e4SLinus Torvalds // TODO:
6741da177e4SLinus Torvalds // Zero any non-uptodate buffers up to i_size.
6751da177e4SLinus Torvalds // Set them uptodate and dirty.
6761da177e4SLinus Torvalds }
6771da177e4SLinus Torvalds // TODO:
6781da177e4SLinus Torvalds // Update initialized size in the attribute and in the
6791da177e4SLinus Torvalds // inode (up to i_size).
6801da177e4SLinus Torvalds // Update iblock.
6811da177e4SLinus Torvalds // FIXME: This is inefficient. Try to batch the two
6821da177e4SLinus Torvalds // size changes to happen in one go.
6831da177e4SLinus Torvalds ntfs_error(vol->sb, "Writing beyond initialized size "
6841da177e4SLinus Torvalds "is not supported yet. Sorry.");
6851da177e4SLinus Torvalds err = -EOPNOTSUPP;
6861da177e4SLinus Torvalds break;
6871da177e4SLinus Torvalds // Do NOT set_buffer_new() BUT DO clear buffer range
6881da177e4SLinus Torvalds // outside write request range.
6891da177e4SLinus Torvalds // set_buffer_uptodate() on complete buffers as well as
6901da177e4SLinus Torvalds // set_buffer_dirty().
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds
6931da177e4SLinus Torvalds /* No need to map buffers that are already mapped. */
6941da177e4SLinus Torvalds if (buffer_mapped(bh))
6951da177e4SLinus Torvalds continue;
6961da177e4SLinus Torvalds
6971da177e4SLinus Torvalds /* Unmapped, dirty buffer. Need to map it. */
6981da177e4SLinus Torvalds bh->b_bdev = vol->sb->s_bdev;
6991da177e4SLinus Torvalds
7001da177e4SLinus Torvalds /* Convert block into corresponding vcn and offset. */
7011da177e4SLinus Torvalds vcn = (VCN)block << blocksize_bits;
7021da177e4SLinus Torvalds vcn_ofs = vcn & vol->cluster_size_mask;
7031da177e4SLinus Torvalds vcn >>= vol->cluster_size_bits;
7041da177e4SLinus Torvalds if (!rl) {
7051da177e4SLinus Torvalds lock_retry_remap:
7061da177e4SLinus Torvalds down_read(&ni->runlist.lock);
7071da177e4SLinus Torvalds rl = ni->runlist.rl;
7081da177e4SLinus Torvalds }
7091da177e4SLinus Torvalds if (likely(rl != NULL)) {
7101da177e4SLinus Torvalds /* Seek to element containing target vcn. */
7111da177e4SLinus Torvalds while (rl->length && rl[1].vcn <= vcn)
7121da177e4SLinus Torvalds rl++;
7131da177e4SLinus Torvalds lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
7141da177e4SLinus Torvalds } else
7151da177e4SLinus Torvalds lcn = LCN_RL_NOT_MAPPED;
7161da177e4SLinus Torvalds /* Successful remap. */
7171da177e4SLinus Torvalds if (lcn >= 0) {
7181da177e4SLinus Torvalds /* Setup buffer head to point to correct block. */
7191da177e4SLinus Torvalds bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
7201da177e4SLinus Torvalds vcn_ofs) >> blocksize_bits;
7211da177e4SLinus Torvalds set_buffer_mapped(bh);
7221da177e4SLinus Torvalds continue;
7231da177e4SLinus Torvalds }
7241da177e4SLinus Torvalds /* It is a hole, need to instantiate it. */
7251da177e4SLinus Torvalds if (lcn == LCN_HOLE) {
7268dcdebafSAnton Altaparmakov u8 *kaddr;
7278dcdebafSAnton Altaparmakov unsigned long *bpos, *bend;
7288dcdebafSAnton Altaparmakov
7298dcdebafSAnton Altaparmakov /* Check if the buffer is zero. */
730a3ac1414SCong Wang kaddr = kmap_atomic(page);
7318dcdebafSAnton Altaparmakov bpos = (unsigned long *)(kaddr + bh_offset(bh));
7328dcdebafSAnton Altaparmakov bend = (unsigned long *)((u8*)bpos + blocksize);
7338dcdebafSAnton Altaparmakov do {
7348dcdebafSAnton Altaparmakov if (unlikely(*bpos))
7358dcdebafSAnton Altaparmakov break;
7368dcdebafSAnton Altaparmakov } while (likely(++bpos < bend));
737a3ac1414SCong Wang kunmap_atomic(kaddr);
7388dcdebafSAnton Altaparmakov if (bpos == bend) {
7398dcdebafSAnton Altaparmakov /*
7408dcdebafSAnton Altaparmakov * Buffer is zero and sparse, no need to write
7418dcdebafSAnton Altaparmakov * it.
7428dcdebafSAnton Altaparmakov */
7438dcdebafSAnton Altaparmakov bh->b_blocknr = -1;
7448dcdebafSAnton Altaparmakov clear_buffer_dirty(bh);
7458dcdebafSAnton Altaparmakov continue;
7468dcdebafSAnton Altaparmakov }
7471da177e4SLinus Torvalds // TODO: Instantiate the hole.
7481da177e4SLinus Torvalds // clear_buffer_new(bh);
749e64855c6SJan Kara // clean_bdev_bh_alias(bh);
7501da177e4SLinus Torvalds ntfs_error(vol->sb, "Writing into sparse regions is "
7511da177e4SLinus Torvalds "not supported yet. Sorry.");
7521da177e4SLinus Torvalds err = -EOPNOTSUPP;
7531da177e4SLinus Torvalds break;
7541da177e4SLinus Torvalds }
7551da177e4SLinus Torvalds /* If first try and runlist unmapped, map and retry. */
7561da177e4SLinus Torvalds if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
757c49c3111SRichard Knutsson is_retry = true;
7581da177e4SLinus Torvalds /*
7591da177e4SLinus Torvalds * Attempt to map runlist, dropping lock for
7601da177e4SLinus Torvalds * the duration.
7611da177e4SLinus Torvalds */
7621da177e4SLinus Torvalds up_read(&ni->runlist.lock);
7631da177e4SLinus Torvalds err = ntfs_map_runlist(ni, vcn);
7641da177e4SLinus Torvalds if (likely(!err))
7651da177e4SLinus Torvalds goto lock_retry_remap;
7661da177e4SLinus Torvalds rl = NULL;
7679f993fe4SAnton Altaparmakov } else if (!rl)
7689f993fe4SAnton Altaparmakov up_read(&ni->runlist.lock);
7698273d5d4SAnton Altaparmakov /*
7708273d5d4SAnton Altaparmakov * If buffer is outside the runlist, truncate has cut it out
7718273d5d4SAnton Altaparmakov * of the runlist. Just clean and clear the buffer and set it
7728273d5d4SAnton Altaparmakov * uptodate so it can get discarded by the VM.
7738273d5d4SAnton Altaparmakov */
7748273d5d4SAnton Altaparmakov if (err == -ENOENT || lcn == LCN_ENOENT) {
7758273d5d4SAnton Altaparmakov bh->b_blocknr = -1;
7768273d5d4SAnton Altaparmakov clear_buffer_dirty(bh);
777eebd2aa3SChristoph Lameter zero_user(page, bh_offset(bh), blocksize);
7788273d5d4SAnton Altaparmakov set_buffer_uptodate(bh);
7798273d5d4SAnton Altaparmakov err = 0;
7808273d5d4SAnton Altaparmakov continue;
7818273d5d4SAnton Altaparmakov }
7821da177e4SLinus Torvalds /* Failed to map the buffer, even after retrying. */
7838273d5d4SAnton Altaparmakov if (!err)
7848273d5d4SAnton Altaparmakov err = -EIO;
7851da177e4SLinus Torvalds bh->b_blocknr = -1;
7861da177e4SLinus Torvalds ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
7871da177e4SLinus Torvalds "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
7881da177e4SLinus Torvalds "because its location on disk could not be "
7898273d5d4SAnton Altaparmakov "determined%s (error code %i).", ni->mft_no,
7901da177e4SLinus Torvalds ni->type, (unsigned long long)vcn,
7911da177e4SLinus Torvalds vcn_ofs, is_retry ? " even after "
7928273d5d4SAnton Altaparmakov "retrying" : "", err);
7931da177e4SLinus Torvalds break;
7941da177e4SLinus Torvalds } while (block++, (bh = bh->b_this_page) != head);
7951da177e4SLinus Torvalds
7961da177e4SLinus Torvalds /* Release the lock if we took it. */
7971da177e4SLinus Torvalds if (rl)
7981da177e4SLinus Torvalds up_read(&ni->runlist.lock);
7991da177e4SLinus Torvalds
8001da177e4SLinus Torvalds /* For the error case, need to reset bh to the beginning. */
8011da177e4SLinus Torvalds bh = head;
8021da177e4SLinus Torvalds
803933906f8SMatthew Wilcox (Oracle) /* Just an optimization, so ->read_folio() is not called later. */
8041da177e4SLinus Torvalds if (unlikely(!PageUptodate(page))) {
8051da177e4SLinus Torvalds int uptodate = 1;
8061da177e4SLinus Torvalds do {
8071da177e4SLinus Torvalds if (!buffer_uptodate(bh)) {
8081da177e4SLinus Torvalds uptodate = 0;
8091da177e4SLinus Torvalds bh = head;
8101da177e4SLinus Torvalds break;
8111da177e4SLinus Torvalds }
8121da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
8131da177e4SLinus Torvalds if (uptodate)
8141da177e4SLinus Torvalds SetPageUptodate(page);
8151da177e4SLinus Torvalds }
8161da177e4SLinus Torvalds
8171da177e4SLinus Torvalds /* Setup all mapped, dirty buffers for async write i/o. */
8181da177e4SLinus Torvalds do {
8191da177e4SLinus Torvalds if (buffer_mapped(bh) && buffer_dirty(bh)) {
8201da177e4SLinus Torvalds lock_buffer(bh);
8211da177e4SLinus Torvalds if (test_clear_buffer_dirty(bh)) {
8221da177e4SLinus Torvalds BUG_ON(!buffer_uptodate(bh));
8231da177e4SLinus Torvalds mark_buffer_async_write(bh);
8241da177e4SLinus Torvalds } else
8251da177e4SLinus Torvalds unlock_buffer(bh);
8261da177e4SLinus Torvalds } else if (unlikely(err)) {
8271da177e4SLinus Torvalds /*
8281da177e4SLinus Torvalds * For the error case. The buffer may have been set
8291da177e4SLinus Torvalds * dirty during attachment to a dirty page.
8301da177e4SLinus Torvalds */
8311da177e4SLinus Torvalds if (err != -ENOMEM)
8321da177e4SLinus Torvalds clear_buffer_dirty(bh);
8331da177e4SLinus Torvalds }
8341da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
8351da177e4SLinus Torvalds
8361da177e4SLinus Torvalds if (unlikely(err)) {
8371da177e4SLinus Torvalds // TODO: Remove the -EOPNOTSUPP check later on...
8381da177e4SLinus Torvalds if (unlikely(err == -EOPNOTSUPP))
8391da177e4SLinus Torvalds err = 0;
8401da177e4SLinus Torvalds else if (err == -ENOMEM) {
8411da177e4SLinus Torvalds ntfs_warning(vol->sb, "Error allocating memory. "
8421da177e4SLinus Torvalds "Redirtying page so we try again "
8431da177e4SLinus Torvalds "later.");
8441da177e4SLinus Torvalds /*
8451da177e4SLinus Torvalds * Put the page back on mapping->dirty_pages, but
8461da177e4SLinus Torvalds * leave its buffer's dirty state as-is.
8471da177e4SLinus Torvalds */
8481da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page);
8491da177e4SLinus Torvalds err = 0;
8501da177e4SLinus Torvalds } else
8511da177e4SLinus Torvalds SetPageError(page);
8521da177e4SLinus Torvalds }
8531da177e4SLinus Torvalds
8541da177e4SLinus Torvalds BUG_ON(PageWriteback(page));
8551da177e4SLinus Torvalds set_page_writeback(page); /* Keeps try_to_free_buffers() away. */
8561da177e4SLinus Torvalds
85754b02eb0SAnton Altaparmakov /* Submit the prepared buffers for i/o. */
858c49c3111SRichard Knutsson need_end_writeback = true;
8591da177e4SLinus Torvalds do {
8601da177e4SLinus Torvalds struct buffer_head *next = bh->b_this_page;
8611da177e4SLinus Torvalds if (buffer_async_write(bh)) {
8621420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE, bh);
863c49c3111SRichard Knutsson need_end_writeback = false;
8641da177e4SLinus Torvalds }
8651da177e4SLinus Torvalds bh = next;
8661da177e4SLinus Torvalds } while (bh != head);
86754b02eb0SAnton Altaparmakov unlock_page(page);
8681da177e4SLinus Torvalds
8691da177e4SLinus Torvalds /* If no i/o was started, need to end_page_writeback(). */
8701da177e4SLinus Torvalds if (unlikely(need_end_writeback))
8711da177e4SLinus Torvalds end_page_writeback(page);
8721da177e4SLinus Torvalds
8731da177e4SLinus Torvalds ntfs_debug("Done.");
8741da177e4SLinus Torvalds return err;
8751da177e4SLinus Torvalds }
8761da177e4SLinus Torvalds
8771da177e4SLinus Torvalds /**
8781da177e4SLinus Torvalds * ntfs_write_mst_block - write a @page to the backing store
8791da177e4SLinus Torvalds * @page: page cache page to write out
8801da177e4SLinus Torvalds * @wbc: writeback control structure
8811da177e4SLinus Torvalds *
8821da177e4SLinus Torvalds * This function is for writing pages belonging to non-resident, mst protected
8831da177e4SLinus Torvalds * attributes to their backing store. The only supported attributes are index
8841da177e4SLinus Torvalds * allocation and $MFT/$DATA. Both directory inodes and index inodes are
8851da177e4SLinus Torvalds * supported for the index allocation case.
8861da177e4SLinus Torvalds *
8871da177e4SLinus Torvalds * The page must remain locked for the duration of the write because we apply
8881da177e4SLinus Torvalds * the mst fixups, write, and then undo the fixups, so if we were to unlock the
8891da177e4SLinus Torvalds * page before undoing the fixups, any other user of the page will see the
8901da177e4SLinus Torvalds * page contents as corrupt.
8911da177e4SLinus Torvalds *
8921da177e4SLinus Torvalds * We clear the page uptodate flag for the duration of the function to ensure
8931da177e4SLinus Torvalds * exclusion for the $MFT/$DATA case against someone mapping an mft record we
8941da177e4SLinus Torvalds * are about to apply the mst fixups to.
8951da177e4SLinus Torvalds *
8961da177e4SLinus Torvalds * Return 0 on success and -errno on error.
8971da177e4SLinus Torvalds *
8981da177e4SLinus Torvalds * Based on ntfs_write_block(), ntfs_mft_writepage(), and
8991da177e4SLinus Torvalds * write_mft_record_nolock().
9001da177e4SLinus Torvalds */
ntfs_write_mst_block(struct page * page,struct writeback_control * wbc)9011da177e4SLinus Torvalds static int ntfs_write_mst_block(struct page *page,
9021da177e4SLinus Torvalds struct writeback_control *wbc)
9031da177e4SLinus Torvalds {
9041da177e4SLinus Torvalds sector_t block, dblock, rec_block;
9051da177e4SLinus Torvalds struct inode *vi = page->mapping->host;
9061da177e4SLinus Torvalds ntfs_inode *ni = NTFS_I(vi);
9071da177e4SLinus Torvalds ntfs_volume *vol = ni->vol;
9081da177e4SLinus Torvalds u8 *kaddr;
9091da177e4SLinus Torvalds unsigned int rec_size = ni->itype.index.block_size;
910ac4ecf96SKees Cook ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE];
9111da177e4SLinus Torvalds struct buffer_head *bh, *head, *tbh, *rec_start_bh;
912d53ee322SAnton Altaparmakov struct buffer_head *bhs[MAX_BUF_PER_PAGE];
9131da177e4SLinus Torvalds runlist_element *rl;
914d53ee322SAnton Altaparmakov int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
915d53ee322SAnton Altaparmakov unsigned bh_size, rec_size_bits;
916c49c3111SRichard Knutsson bool sync, is_mft, page_is_dirty, rec_is_dirty;
917d53ee322SAnton Altaparmakov unsigned char bh_size_bits;
9181da177e4SLinus Torvalds
919ac4ecf96SKees Cook if (WARN_ON(rec_size < NTFS_BLOCK_SIZE))
920ac4ecf96SKees Cook return -EINVAL;
921ac4ecf96SKees Cook
9221da177e4SLinus Torvalds ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
9231da177e4SLinus Torvalds "0x%lx.", vi->i_ino, ni->type, page->index);
9241da177e4SLinus Torvalds BUG_ON(!NInoNonResident(ni));
9251da177e4SLinus Torvalds BUG_ON(!NInoMstProtected(ni));
9261da177e4SLinus Torvalds is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
9271da177e4SLinus Torvalds /*
9281da177e4SLinus Torvalds * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
9291da177e4SLinus Torvalds * in its page cache were to be marked dirty. However this should
9301da177e4SLinus Torvalds * never happen with the current driver and considering we do not
9311da177e4SLinus Torvalds * handle this case here we do want to BUG(), at least for now.
9321da177e4SLinus Torvalds */
9331da177e4SLinus Torvalds BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
9341da177e4SLinus Torvalds (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
93578af34f0SAnton Altaparmakov bh_size = vol->sb->s_blocksize;
93678af34f0SAnton Altaparmakov bh_size_bits = vol->sb->s_blocksize_bits;
93709cbfeafSKirill A. Shutemov max_bhs = PAGE_SIZE / bh_size;
9381da177e4SLinus Torvalds BUG_ON(!max_bhs);
939d53ee322SAnton Altaparmakov BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
9401da177e4SLinus Torvalds
9411da177e4SLinus Torvalds /* Were we called for sync purposes? */
9421da177e4SLinus Torvalds sync = (wbc->sync_mode == WB_SYNC_ALL);
9431da177e4SLinus Torvalds
9441da177e4SLinus Torvalds /* Make sure we have mapped buffers. */
9451da177e4SLinus Torvalds bh = head = page_buffers(page);
9461da177e4SLinus Torvalds BUG_ON(!bh);
9471da177e4SLinus Torvalds
9481da177e4SLinus Torvalds rec_size_bits = ni->itype.index.block_size_bits;
94909cbfeafSKirill A. Shutemov BUG_ON(!(PAGE_SIZE >> rec_size_bits));
9501da177e4SLinus Torvalds bhs_per_rec = rec_size >> bh_size_bits;
9511da177e4SLinus Torvalds BUG_ON(!bhs_per_rec);
9521da177e4SLinus Torvalds
9531da177e4SLinus Torvalds /* The first block in the page. */
9541da177e4SLinus Torvalds rec_block = block = (sector_t)page->index <<
95509cbfeafSKirill A. Shutemov (PAGE_SHIFT - bh_size_bits);
9561da177e4SLinus Torvalds
9571da177e4SLinus Torvalds /* The first out of bounds block for the data size. */
95807a4e2daSAnton Altaparmakov dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
9591da177e4SLinus Torvalds
9601da177e4SLinus Torvalds rl = NULL;
9611da177e4SLinus Torvalds err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
962c49c3111SRichard Knutsson page_is_dirty = rec_is_dirty = false;
9631da177e4SLinus Torvalds rec_start_bh = NULL;
9641da177e4SLinus Torvalds do {
965c49c3111SRichard Knutsson bool is_retry = false;
9661da177e4SLinus Torvalds
9671da177e4SLinus Torvalds if (likely(block < rec_block)) {
9681da177e4SLinus Torvalds if (unlikely(block >= dblock)) {
9691da177e4SLinus Torvalds clear_buffer_dirty(bh);
970946929d8SAnton Altaparmakov set_buffer_uptodate(bh);
9711da177e4SLinus Torvalds continue;
9721da177e4SLinus Torvalds }
9731da177e4SLinus Torvalds /*
9741da177e4SLinus Torvalds * This block is not the first one in the record. We
9751da177e4SLinus Torvalds * ignore the buffer's dirty state because we could
9761da177e4SLinus Torvalds * have raced with a parallel mark_ntfs_record_dirty().
9771da177e4SLinus Torvalds */
9781da177e4SLinus Torvalds if (!rec_is_dirty)
9791da177e4SLinus Torvalds continue;
9801da177e4SLinus Torvalds if (unlikely(err2)) {
9811da177e4SLinus Torvalds if (err2 != -ENOMEM)
9821da177e4SLinus Torvalds clear_buffer_dirty(bh);
9831da177e4SLinus Torvalds continue;
9841da177e4SLinus Torvalds }
9851da177e4SLinus Torvalds } else /* if (block == rec_block) */ {
9861da177e4SLinus Torvalds BUG_ON(block > rec_block);
9871da177e4SLinus Torvalds /* This block is the first one in the record. */
9881da177e4SLinus Torvalds rec_block += bhs_per_rec;
9891da177e4SLinus Torvalds err2 = 0;
9901da177e4SLinus Torvalds if (unlikely(block >= dblock)) {
9911da177e4SLinus Torvalds clear_buffer_dirty(bh);
9921da177e4SLinus Torvalds continue;
9931da177e4SLinus Torvalds }
9941da177e4SLinus Torvalds if (!buffer_dirty(bh)) {
9951da177e4SLinus Torvalds /* Clean records are not written out. */
996c49c3111SRichard Knutsson rec_is_dirty = false;
9971da177e4SLinus Torvalds continue;
9981da177e4SLinus Torvalds }
999c49c3111SRichard Knutsson rec_is_dirty = true;
10001da177e4SLinus Torvalds rec_start_bh = bh;
10011da177e4SLinus Torvalds }
10021da177e4SLinus Torvalds /* Need to map the buffer if it is not mapped already. */
10031da177e4SLinus Torvalds if (unlikely(!buffer_mapped(bh))) {
10041da177e4SLinus Torvalds VCN vcn;
10051da177e4SLinus Torvalds LCN lcn;
10061da177e4SLinus Torvalds unsigned int vcn_ofs;
10071da177e4SLinus Torvalds
1008481d0374SAnton Altaparmakov bh->b_bdev = vol->sb->s_bdev;
10091da177e4SLinus Torvalds /* Obtain the vcn and offset of the current block. */
10101da177e4SLinus Torvalds vcn = (VCN)block << bh_size_bits;
10111da177e4SLinus Torvalds vcn_ofs = vcn & vol->cluster_size_mask;
10121da177e4SLinus Torvalds vcn >>= vol->cluster_size_bits;
10131da177e4SLinus Torvalds if (!rl) {
10141da177e4SLinus Torvalds lock_retry_remap:
10151da177e4SLinus Torvalds down_read(&ni->runlist.lock);
10161da177e4SLinus Torvalds rl = ni->runlist.rl;
10171da177e4SLinus Torvalds }
10181da177e4SLinus Torvalds if (likely(rl != NULL)) {
10191da177e4SLinus Torvalds /* Seek to element containing target vcn. */
10201da177e4SLinus Torvalds while (rl->length && rl[1].vcn <= vcn)
10211da177e4SLinus Torvalds rl++;
10221da177e4SLinus Torvalds lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
10231da177e4SLinus Torvalds } else
10241da177e4SLinus Torvalds lcn = LCN_RL_NOT_MAPPED;
10251da177e4SLinus Torvalds /* Successful remap. */
10261da177e4SLinus Torvalds if (likely(lcn >= 0)) {
10271da177e4SLinus Torvalds /* Setup buffer head to correct block. */
10281da177e4SLinus Torvalds bh->b_blocknr = ((lcn <<
10291da177e4SLinus Torvalds vol->cluster_size_bits) +
10301da177e4SLinus Torvalds vcn_ofs) >> bh_size_bits;
10311da177e4SLinus Torvalds set_buffer_mapped(bh);
10321da177e4SLinus Torvalds } else {
10331da177e4SLinus Torvalds /*
10341da177e4SLinus Torvalds * Remap failed. Retry to map the runlist once
10351da177e4SLinus Torvalds * unless we are working on $MFT which always
10361da177e4SLinus Torvalds * has the whole of its runlist in memory.
10371da177e4SLinus Torvalds */
10381da177e4SLinus Torvalds if (!is_mft && !is_retry &&
10391da177e4SLinus Torvalds lcn == LCN_RL_NOT_MAPPED) {
1040c49c3111SRichard Knutsson is_retry = true;
10411da177e4SLinus Torvalds /*
10421da177e4SLinus Torvalds * Attempt to map runlist, dropping
10431da177e4SLinus Torvalds * lock for the duration.
10441da177e4SLinus Torvalds */
10451da177e4SLinus Torvalds up_read(&ni->runlist.lock);
10461da177e4SLinus Torvalds err2 = ntfs_map_runlist(ni, vcn);
10471da177e4SLinus Torvalds if (likely(!err2))
10481da177e4SLinus Torvalds goto lock_retry_remap;
10491da177e4SLinus Torvalds if (err2 == -ENOMEM)
1050c49c3111SRichard Knutsson page_is_dirty = true;
10511da177e4SLinus Torvalds lcn = err2;
10529f993fe4SAnton Altaparmakov } else {
10531da177e4SLinus Torvalds err2 = -EIO;
10549f993fe4SAnton Altaparmakov if (!rl)
10559f993fe4SAnton Altaparmakov up_read(&ni->runlist.lock);
10569f993fe4SAnton Altaparmakov }
10571da177e4SLinus Torvalds /* Hard error. Abort writing this record. */
10581da177e4SLinus Torvalds if (!err || err == -ENOMEM)
10591da177e4SLinus Torvalds err = err2;
10601da177e4SLinus Torvalds bh->b_blocknr = -1;
10611da177e4SLinus Torvalds ntfs_error(vol->sb, "Cannot write ntfs record "
10621da177e4SLinus Torvalds "0x%llx (inode 0x%lx, "
10631da177e4SLinus Torvalds "attribute type 0x%x) because "
10641da177e4SLinus Torvalds "its location on disk could "
10651da177e4SLinus Torvalds "not be determined (error "
10668907547dSRandy Dunlap "code %lli).",
10678907547dSRandy Dunlap (long long)block <<
10681da177e4SLinus Torvalds bh_size_bits >>
10691da177e4SLinus Torvalds vol->mft_record_size_bits,
10701da177e4SLinus Torvalds ni->mft_no, ni->type,
10711da177e4SLinus Torvalds (long long)lcn);
10721da177e4SLinus Torvalds /*
10731da177e4SLinus Torvalds * If this is not the first buffer, remove the
10741da177e4SLinus Torvalds * buffers in this record from the list of
10751da177e4SLinus Torvalds * buffers to write and clear their dirty bit
10761da177e4SLinus Torvalds * if not error -ENOMEM.
10771da177e4SLinus Torvalds */
10781da177e4SLinus Torvalds if (rec_start_bh != bh) {
10791da177e4SLinus Torvalds while (bhs[--nr_bhs] != rec_start_bh)
10801da177e4SLinus Torvalds ;
10811da177e4SLinus Torvalds if (err2 != -ENOMEM) {
10821da177e4SLinus Torvalds do {
10831da177e4SLinus Torvalds clear_buffer_dirty(
10841da177e4SLinus Torvalds rec_start_bh);
10851da177e4SLinus Torvalds } while ((rec_start_bh =
10861da177e4SLinus Torvalds rec_start_bh->
10871da177e4SLinus Torvalds b_this_page) !=
10881da177e4SLinus Torvalds bh);
10891da177e4SLinus Torvalds }
10901da177e4SLinus Torvalds }
10911da177e4SLinus Torvalds continue;
10921da177e4SLinus Torvalds }
10931da177e4SLinus Torvalds }
10941da177e4SLinus Torvalds BUG_ON(!buffer_uptodate(bh));
10951da177e4SLinus Torvalds BUG_ON(nr_bhs >= max_bhs);
10961da177e4SLinus Torvalds bhs[nr_bhs++] = bh;
10971da177e4SLinus Torvalds } while (block++, (bh = bh->b_this_page) != head);
10981da177e4SLinus Torvalds if (unlikely(rl))
10991da177e4SLinus Torvalds up_read(&ni->runlist.lock);
11001da177e4SLinus Torvalds /* If there were no dirty buffers, we are done. */
11011da177e4SLinus Torvalds if (!nr_bhs)
11021da177e4SLinus Torvalds goto done;
11031da177e4SLinus Torvalds /* Map the page so we can access its contents. */
11041da177e4SLinus Torvalds kaddr = kmap(page);
11051da177e4SLinus Torvalds /* Clear the page uptodate flag whilst the mst fixups are applied. */
11061da177e4SLinus Torvalds BUG_ON(!PageUptodate(page));
11071da177e4SLinus Torvalds ClearPageUptodate(page);
11081da177e4SLinus Torvalds for (i = 0; i < nr_bhs; i++) {
11091da177e4SLinus Torvalds unsigned int ofs;
11101da177e4SLinus Torvalds
11111da177e4SLinus Torvalds /* Skip buffers which are not at the beginning of records. */
11121da177e4SLinus Torvalds if (i % bhs_per_rec)
11131da177e4SLinus Torvalds continue;
11141da177e4SLinus Torvalds tbh = bhs[i];
11151da177e4SLinus Torvalds ofs = bh_offset(tbh);
11161da177e4SLinus Torvalds if (is_mft) {
11171da177e4SLinus Torvalds ntfs_inode *tni;
11181da177e4SLinus Torvalds unsigned long mft_no;
11191da177e4SLinus Torvalds
11201da177e4SLinus Torvalds /* Get the mft record number. */
112109cbfeafSKirill A. Shutemov mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
11221da177e4SLinus Torvalds >> rec_size_bits;
11231da177e4SLinus Torvalds /* Check whether to write this mft record. */
11241da177e4SLinus Torvalds tni = NULL;
11251da177e4SLinus Torvalds if (!ntfs_may_write_mft_record(vol, mft_no,
11261da177e4SLinus Torvalds (MFT_RECORD*)(kaddr + ofs), &tni)) {
11271da177e4SLinus Torvalds /*
11281da177e4SLinus Torvalds * The record should not be written. This
11291da177e4SLinus Torvalds * means we need to redirty the page before
11301da177e4SLinus Torvalds * returning.
11311da177e4SLinus Torvalds */
1132c49c3111SRichard Knutsson page_is_dirty = true;
11331da177e4SLinus Torvalds /*
11341da177e4SLinus Torvalds * Remove the buffers in this mft record from
11351da177e4SLinus Torvalds * the list of buffers to write.
11361da177e4SLinus Torvalds */
11371da177e4SLinus Torvalds do {
11381da177e4SLinus Torvalds bhs[i] = NULL;
11391da177e4SLinus Torvalds } while (++i % bhs_per_rec);
11401da177e4SLinus Torvalds continue;
11411da177e4SLinus Torvalds }
11421da177e4SLinus Torvalds /*
11431da177e4SLinus Torvalds * The record should be written. If a locked ntfs
11441da177e4SLinus Torvalds * inode was returned, add it to the array of locked
11451da177e4SLinus Torvalds * ntfs inodes.
11461da177e4SLinus Torvalds */
11471da177e4SLinus Torvalds if (tni)
11481da177e4SLinus Torvalds locked_nis[nr_locked_nis++] = tni;
11491da177e4SLinus Torvalds }
11501da177e4SLinus Torvalds /* Apply the mst protection fixups. */
11511da177e4SLinus Torvalds err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
11521da177e4SLinus Torvalds rec_size);
11531da177e4SLinus Torvalds if (unlikely(err2)) {
11541da177e4SLinus Torvalds if (!err || err == -ENOMEM)
11551da177e4SLinus Torvalds err = -EIO;
11561da177e4SLinus Torvalds ntfs_error(vol->sb, "Failed to apply mst fixups "
11571da177e4SLinus Torvalds "(inode 0x%lx, attribute type 0x%x, "
11581da177e4SLinus Torvalds "page index 0x%lx, page offset 0x%x)!"
11591da177e4SLinus Torvalds " Unmount and run chkdsk.", vi->i_ino,
11601da177e4SLinus Torvalds ni->type, page->index, ofs);
11611da177e4SLinus Torvalds /*
11621da177e4SLinus Torvalds * Mark all the buffers in this record clean as we do
11631da177e4SLinus Torvalds * not want to write corrupt data to disk.
11641da177e4SLinus Torvalds */
11651da177e4SLinus Torvalds do {
11661da177e4SLinus Torvalds clear_buffer_dirty(bhs[i]);
11671da177e4SLinus Torvalds bhs[i] = NULL;
11681da177e4SLinus Torvalds } while (++i % bhs_per_rec);
11691da177e4SLinus Torvalds continue;
11701da177e4SLinus Torvalds }
11711da177e4SLinus Torvalds nr_recs++;
11721da177e4SLinus Torvalds }
11731da177e4SLinus Torvalds /* If no records are to be written out, we are done. */
11741da177e4SLinus Torvalds if (!nr_recs)
11751da177e4SLinus Torvalds goto unm_done;
11761da177e4SLinus Torvalds flush_dcache_page(page);
11771da177e4SLinus Torvalds /* Lock buffers and start synchronous write i/o on them. */
11781da177e4SLinus Torvalds for (i = 0; i < nr_bhs; i++) {
11791da177e4SLinus Torvalds tbh = bhs[i];
11801da177e4SLinus Torvalds if (!tbh)
11811da177e4SLinus Torvalds continue;
1182ca5de404SNick Piggin if (!trylock_buffer(tbh))
11831da177e4SLinus Torvalds BUG();
11841da177e4SLinus Torvalds /* The buffer dirty state is now irrelevant, just clean it. */
11851da177e4SLinus Torvalds clear_buffer_dirty(tbh);
11861da177e4SLinus Torvalds BUG_ON(!buffer_uptodate(tbh));
11871da177e4SLinus Torvalds BUG_ON(!buffer_mapped(tbh));
11881da177e4SLinus Torvalds get_bh(tbh);
11891da177e4SLinus Torvalds tbh->b_end_io = end_buffer_write_sync;
11901420c4a5SBart Van Assche submit_bh(REQ_OP_WRITE, tbh);
11911da177e4SLinus Torvalds }
11921da177e4SLinus Torvalds /* Synchronize the mft mirror now if not @sync. */
11931da177e4SLinus Torvalds if (is_mft && !sync)
11941da177e4SLinus Torvalds goto do_mirror;
11951da177e4SLinus Torvalds do_wait:
11961da177e4SLinus Torvalds /* Wait on i/o completion of buffers. */
11971da177e4SLinus Torvalds for (i = 0; i < nr_bhs; i++) {
11981da177e4SLinus Torvalds tbh = bhs[i];
11991da177e4SLinus Torvalds if (!tbh)
12001da177e4SLinus Torvalds continue;
12011da177e4SLinus Torvalds wait_on_buffer(tbh);
12021da177e4SLinus Torvalds if (unlikely(!buffer_uptodate(tbh))) {
12031da177e4SLinus Torvalds ntfs_error(vol->sb, "I/O error while writing ntfs "
12041da177e4SLinus Torvalds "record buffer (inode 0x%lx, "
12051da177e4SLinus Torvalds "attribute type 0x%x, page index "
12061da177e4SLinus Torvalds "0x%lx, page offset 0x%lx)! Unmount "
12071da177e4SLinus Torvalds "and run chkdsk.", vi->i_ino, ni->type,
12081da177e4SLinus Torvalds page->index, bh_offset(tbh));
12091da177e4SLinus Torvalds if (!err || err == -ENOMEM)
12101da177e4SLinus Torvalds err = -EIO;
12111da177e4SLinus Torvalds /*
12121da177e4SLinus Torvalds * Set the buffer uptodate so the page and buffer
12131da177e4SLinus Torvalds * states do not become out of sync.
12141da177e4SLinus Torvalds */
12151da177e4SLinus Torvalds set_buffer_uptodate(tbh);
12161da177e4SLinus Torvalds }
12171da177e4SLinus Torvalds }
12181da177e4SLinus Torvalds /* If @sync, now synchronize the mft mirror. */
12191da177e4SLinus Torvalds if (is_mft && sync) {
12201da177e4SLinus Torvalds do_mirror:
12211da177e4SLinus Torvalds for (i = 0; i < nr_bhs; i++) {
12221da177e4SLinus Torvalds unsigned long mft_no;
12231da177e4SLinus Torvalds unsigned int ofs;
12241da177e4SLinus Torvalds
12251da177e4SLinus Torvalds /*
12261da177e4SLinus Torvalds * Skip buffers which are not at the beginning of
12271da177e4SLinus Torvalds * records.
12281da177e4SLinus Torvalds */
12291da177e4SLinus Torvalds if (i % bhs_per_rec)
12301da177e4SLinus Torvalds continue;
12311da177e4SLinus Torvalds tbh = bhs[i];
12321da177e4SLinus Torvalds /* Skip removed buffers (and hence records). */
12331da177e4SLinus Torvalds if (!tbh)
12341da177e4SLinus Torvalds continue;
12351da177e4SLinus Torvalds ofs = bh_offset(tbh);
12361da177e4SLinus Torvalds /* Get the mft record number. */
123709cbfeafSKirill A. Shutemov mft_no = (((s64)page->index << PAGE_SHIFT) + ofs)
12381da177e4SLinus Torvalds >> rec_size_bits;
12391da177e4SLinus Torvalds if (mft_no < vol->mftmirr_size)
12401da177e4SLinus Torvalds ntfs_sync_mft_mirror(vol, mft_no,
12411da177e4SLinus Torvalds (MFT_RECORD*)(kaddr + ofs),
12421da177e4SLinus Torvalds sync);
12431da177e4SLinus Torvalds }
12441da177e4SLinus Torvalds if (!sync)
12451da177e4SLinus Torvalds goto do_wait;
12461da177e4SLinus Torvalds }
12471da177e4SLinus Torvalds /* Remove the mst protection fixups again. */
12481da177e4SLinus Torvalds for (i = 0; i < nr_bhs; i++) {
12491da177e4SLinus Torvalds if (!(i % bhs_per_rec)) {
12501da177e4SLinus Torvalds tbh = bhs[i];
12511da177e4SLinus Torvalds if (!tbh)
12521da177e4SLinus Torvalds continue;
12531da177e4SLinus Torvalds post_write_mst_fixup((NTFS_RECORD*)(kaddr +
12541da177e4SLinus Torvalds bh_offset(tbh)));
12551da177e4SLinus Torvalds }
12561da177e4SLinus Torvalds }
12571da177e4SLinus Torvalds flush_dcache_page(page);
12581da177e4SLinus Torvalds unm_done:
12591da177e4SLinus Torvalds /* Unlock any locked inodes. */
12601da177e4SLinus Torvalds while (nr_locked_nis-- > 0) {
12611da177e4SLinus Torvalds ntfs_inode *tni, *base_tni;
12621da177e4SLinus Torvalds
12631da177e4SLinus Torvalds tni = locked_nis[nr_locked_nis];
12641da177e4SLinus Torvalds /* Get the base inode. */
12654e5e529aSIngo Molnar mutex_lock(&tni->extent_lock);
12661da177e4SLinus Torvalds if (tni->nr_extents >= 0)
12671da177e4SLinus Torvalds base_tni = tni;
12681da177e4SLinus Torvalds else {
12691da177e4SLinus Torvalds base_tni = tni->ext.base_ntfs_ino;
12701da177e4SLinus Torvalds BUG_ON(!base_tni);
12711da177e4SLinus Torvalds }
12724e5e529aSIngo Molnar mutex_unlock(&tni->extent_lock);
12731da177e4SLinus Torvalds ntfs_debug("Unlocking %s inode 0x%lx.",
12741da177e4SLinus Torvalds tni == base_tni ? "base" : "extent",
12751da177e4SLinus Torvalds tni->mft_no);
12764e5e529aSIngo Molnar mutex_unlock(&tni->mrec_lock);
12771da177e4SLinus Torvalds atomic_dec(&tni->count);
12781da177e4SLinus Torvalds iput(VFS_I(base_tni));
12791da177e4SLinus Torvalds }
12801da177e4SLinus Torvalds SetPageUptodate(page);
12811da177e4SLinus Torvalds kunmap(page);
12821da177e4SLinus Torvalds done:
12831da177e4SLinus Torvalds if (unlikely(err && err != -ENOMEM)) {
12841da177e4SLinus Torvalds /*
12851da177e4SLinus Torvalds * Set page error if there is only one ntfs record in the page.
12861da177e4SLinus Torvalds * Otherwise we would loose per-record granularity.
12871da177e4SLinus Torvalds */
128809cbfeafSKirill A. Shutemov if (ni->itype.index.block_size == PAGE_SIZE)
12891da177e4SLinus Torvalds SetPageError(page);
12901da177e4SLinus Torvalds NVolSetErrors(vol);
12911da177e4SLinus Torvalds }
12921da177e4SLinus Torvalds if (page_is_dirty) {
12931da177e4SLinus Torvalds ntfs_debug("Page still contains one or more dirty ntfs "
12941da177e4SLinus Torvalds "records. Redirtying the page starting at "
12951da177e4SLinus Torvalds "record 0x%lx.", page->index <<
129609cbfeafSKirill A. Shutemov (PAGE_SHIFT - rec_size_bits));
12971da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page);
12981da177e4SLinus Torvalds unlock_page(page);
12991da177e4SLinus Torvalds } else {
13001da177e4SLinus Torvalds /*
13011da177e4SLinus Torvalds * Keep the VM happy. This must be done otherwise the
13021da177e4SLinus Torvalds * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
13031da177e4SLinus Torvalds * the page is clean.
13041da177e4SLinus Torvalds */
13051da177e4SLinus Torvalds BUG_ON(PageWriteback(page));
13061da177e4SLinus Torvalds set_page_writeback(page);
13071da177e4SLinus Torvalds unlock_page(page);
13081da177e4SLinus Torvalds end_page_writeback(page);
13091da177e4SLinus Torvalds }
13101da177e4SLinus Torvalds if (likely(!err))
13111da177e4SLinus Torvalds ntfs_debug("Done.");
13121da177e4SLinus Torvalds return err;
13131da177e4SLinus Torvalds }
13141da177e4SLinus Torvalds
13151da177e4SLinus Torvalds /**
13161da177e4SLinus Torvalds * ntfs_writepage - write a @page to the backing store
13171da177e4SLinus Torvalds * @page: page cache page to write out
13181da177e4SLinus Torvalds * @wbc: writeback control structure
13191da177e4SLinus Torvalds *
13201da177e4SLinus Torvalds * This is called from the VM when it wants to have a dirty ntfs page cache
13211da177e4SLinus Torvalds * page cleaned. The VM has already locked the page and marked it clean.
13221da177e4SLinus Torvalds *
13231da177e4SLinus Torvalds * For non-resident attributes, ntfs_writepage() writes the @page by calling
13241da177e4SLinus Torvalds * the ntfs version of the generic block_write_full_page() function,
13251da177e4SLinus Torvalds * ntfs_write_block(), which in turn if necessary creates and writes the
13261da177e4SLinus Torvalds * buffers associated with the page asynchronously.
13271da177e4SLinus Torvalds *
13281da177e4SLinus Torvalds * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
13291da177e4SLinus Torvalds * the data to the mft record (which at this stage is most likely in memory).
13301da177e4SLinus Torvalds * The mft record is then marked dirty and written out asynchronously via the
13311da177e4SLinus Torvalds * vfs inode dirty code path for the inode the mft record belongs to or via the
13321da177e4SLinus Torvalds * vm page dirty code path for the page the mft record is in.
13331da177e4SLinus Torvalds *
1334933906f8SMatthew Wilcox (Oracle) * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page().
13351da177e4SLinus Torvalds *
13361da177e4SLinus Torvalds * Return 0 on success and -errno on error.
13371da177e4SLinus Torvalds */
ntfs_writepage(struct page * page,struct writeback_control * wbc)13381da177e4SLinus Torvalds static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
13391da177e4SLinus Torvalds {
13401da177e4SLinus Torvalds loff_t i_size;
1341149f0c52SAnton Altaparmakov struct inode *vi = page->mapping->host;
1342149f0c52SAnton Altaparmakov ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1343bfab36e8SAnton Altaparmakov char *addr;
1344149f0c52SAnton Altaparmakov ntfs_attr_search_ctx *ctx = NULL;
1345149f0c52SAnton Altaparmakov MFT_RECORD *m = NULL;
13461da177e4SLinus Torvalds u32 attr_len;
13471da177e4SLinus Torvalds int err;
13481da177e4SLinus Torvalds
1349905685f6SAnton Altaparmakov retry_writepage:
13501da177e4SLinus Torvalds BUG_ON(!PageLocked(page));
13511da177e4SLinus Torvalds i_size = i_size_read(vi);
13521da177e4SLinus Torvalds /* Is the page fully outside i_size? (truncate in progress) */
135309cbfeafSKirill A. Shutemov if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
135409cbfeafSKirill A. Shutemov PAGE_SHIFT)) {
13557ba13abbSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page);
13561da177e4SLinus Torvalds /*
13571da177e4SLinus Torvalds * The page may have dirty, unmapped buffers. Make them
13581da177e4SLinus Torvalds * freeable here, so the page does not leak.
13591da177e4SLinus Torvalds */
13607ba13abbSMatthew Wilcox (Oracle) block_invalidate_folio(folio, 0, folio_size(folio));
13617ba13abbSMatthew Wilcox (Oracle) folio_unlock(folio);
13621da177e4SLinus Torvalds ntfs_debug("Write outside i_size - truncated?");
13631da177e4SLinus Torvalds return 0;
13641da177e4SLinus Torvalds }
13651da177e4SLinus Torvalds /*
1366bd45fdd2SAnton Altaparmakov * Only $DATA attributes can be encrypted and only unnamed $DATA
1367bd45fdd2SAnton Altaparmakov * attributes can be compressed. Index root can have the flags set but
1368bd45fdd2SAnton Altaparmakov * this means to create compressed/encrypted files, not that the
13694e64c886SAnton Altaparmakov * attribute is compressed/encrypted. Note we need to check for
13704e64c886SAnton Altaparmakov * AT_INDEX_ALLOCATION since this is the type of both directory and
13714e64c886SAnton Altaparmakov * index inodes.
13721da177e4SLinus Torvalds */
13734e64c886SAnton Altaparmakov if (ni->type != AT_INDEX_ALLOCATION) {
13741da177e4SLinus Torvalds /* If file is encrypted, deny access, just like NT4. */
13751da177e4SLinus Torvalds if (NInoEncrypted(ni)) {
13761da177e4SLinus Torvalds unlock_page(page);
1377bd45fdd2SAnton Altaparmakov BUG_ON(ni->type != AT_DATA);
13787d0ffdb2SAnton Altaparmakov ntfs_debug("Denying write access to encrypted file.");
13791da177e4SLinus Torvalds return -EACCES;
13801da177e4SLinus Torvalds }
13811da177e4SLinus Torvalds /* Compressed data streams are handled in compress.c. */
1382bd45fdd2SAnton Altaparmakov if (NInoNonResident(ni) && NInoCompressed(ni)) {
1383bd45fdd2SAnton Altaparmakov BUG_ON(ni->type != AT_DATA);
1384bd45fdd2SAnton Altaparmakov BUG_ON(ni->name_len);
1385bd45fdd2SAnton Altaparmakov // TODO: Implement and replace this with
13861da177e4SLinus Torvalds // return ntfs_write_compressed_block(page);
13871da177e4SLinus Torvalds unlock_page(page);
1388bd45fdd2SAnton Altaparmakov ntfs_error(vi->i_sb, "Writing to compressed files is "
1389bd45fdd2SAnton Altaparmakov "not supported yet. Sorry.");
13901da177e4SLinus Torvalds return -EOPNOTSUPP;
13911da177e4SLinus Torvalds }
13921da177e4SLinus Torvalds // TODO: Implement and remove this check.
1393bd45fdd2SAnton Altaparmakov if (NInoNonResident(ni) && NInoSparse(ni)) {
13941da177e4SLinus Torvalds unlock_page(page);
1395bd45fdd2SAnton Altaparmakov ntfs_error(vi->i_sb, "Writing to sparse files is not "
1396bd45fdd2SAnton Altaparmakov "supported yet. Sorry.");
13971da177e4SLinus Torvalds return -EOPNOTSUPP;
13981da177e4SLinus Torvalds }
13991da177e4SLinus Torvalds }
1400bd45fdd2SAnton Altaparmakov /* NInoNonResident() == NInoIndexAllocPresent() */
1401bd45fdd2SAnton Altaparmakov if (NInoNonResident(ni)) {
14021da177e4SLinus Torvalds /* We have to zero every time due to mmap-at-end-of-file. */
140309cbfeafSKirill A. Shutemov if (page->index >= (i_size >> PAGE_SHIFT)) {
14041da177e4SLinus Torvalds /* The page straddles i_size. */
140509cbfeafSKirill A. Shutemov unsigned int ofs = i_size & ~PAGE_MASK;
140609cbfeafSKirill A. Shutemov zero_user_segment(page, ofs, PAGE_SIZE);
14071da177e4SLinus Torvalds }
14081da177e4SLinus Torvalds /* Handle mst protected attributes. */
14091da177e4SLinus Torvalds if (NInoMstProtected(ni))
14101da177e4SLinus Torvalds return ntfs_write_mst_block(page, wbc);
1411bd45fdd2SAnton Altaparmakov /* Normal, non-resident data stream. */
14121da177e4SLinus Torvalds return ntfs_write_block(page, wbc);
14131da177e4SLinus Torvalds }
14141da177e4SLinus Torvalds /*
1415bd45fdd2SAnton Altaparmakov * Attribute is resident, implying it is not compressed, encrypted, or
1416bd45fdd2SAnton Altaparmakov * mst protected. This also means the attribute is smaller than an mft
1417bd45fdd2SAnton Altaparmakov * record and hence smaller than a page, so can simply return error on
1418bd45fdd2SAnton Altaparmakov * any pages with index above 0. Note the attribute can actually be
1419bd45fdd2SAnton Altaparmakov * marked compressed but if it is resident the actual data is not
1420bd45fdd2SAnton Altaparmakov * compressed so we are ok to ignore the compressed flag here.
14211da177e4SLinus Torvalds */
14221da177e4SLinus Torvalds BUG_ON(page_has_buffers(page));
14231da177e4SLinus Torvalds BUG_ON(!PageUptodate(page));
14241da177e4SLinus Torvalds if (unlikely(page->index > 0)) {
14251da177e4SLinus Torvalds ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. "
14261da177e4SLinus Torvalds "Aborting write.", page->index);
14271da177e4SLinus Torvalds BUG_ON(PageWriteback(page));
14281da177e4SLinus Torvalds set_page_writeback(page);
14291da177e4SLinus Torvalds unlock_page(page);
14301da177e4SLinus Torvalds end_page_writeback(page);
14311da177e4SLinus Torvalds return -EIO;
14321da177e4SLinus Torvalds }
14331da177e4SLinus Torvalds if (!NInoAttr(ni))
14341da177e4SLinus Torvalds base_ni = ni;
14351da177e4SLinus Torvalds else
14361da177e4SLinus Torvalds base_ni = ni->ext.base_ntfs_ino;
14371da177e4SLinus Torvalds /* Map, pin, and lock the mft record. */
14381da177e4SLinus Torvalds m = map_mft_record(base_ni);
14391da177e4SLinus Torvalds if (IS_ERR(m)) {
14401da177e4SLinus Torvalds err = PTR_ERR(m);
14411da177e4SLinus Torvalds m = NULL;
14421da177e4SLinus Torvalds ctx = NULL;
14431da177e4SLinus Torvalds goto err_out;
14441da177e4SLinus Torvalds }
1445905685f6SAnton Altaparmakov /*
1446905685f6SAnton Altaparmakov * If a parallel write made the attribute non-resident, drop the mft
1447905685f6SAnton Altaparmakov * record and retry the writepage.
1448905685f6SAnton Altaparmakov */
1449905685f6SAnton Altaparmakov if (unlikely(NInoNonResident(ni))) {
1450905685f6SAnton Altaparmakov unmap_mft_record(base_ni);
1451905685f6SAnton Altaparmakov goto retry_writepage;
1452905685f6SAnton Altaparmakov }
14531da177e4SLinus Torvalds ctx = ntfs_attr_get_search_ctx(base_ni, m);
14541da177e4SLinus Torvalds if (unlikely(!ctx)) {
14551da177e4SLinus Torvalds err = -ENOMEM;
14561da177e4SLinus Torvalds goto err_out;
14571da177e4SLinus Torvalds }
14581da177e4SLinus Torvalds err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
14591da177e4SLinus Torvalds CASE_SENSITIVE, 0, NULL, 0, ctx);
14601da177e4SLinus Torvalds if (unlikely(err))
14611da177e4SLinus Torvalds goto err_out;
14621da177e4SLinus Torvalds /*
14631da177e4SLinus Torvalds * Keep the VM happy. This must be done otherwise the radix-tree tag
14641da177e4SLinus Torvalds * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
14651da177e4SLinus Torvalds */
14661da177e4SLinus Torvalds BUG_ON(PageWriteback(page));
14671da177e4SLinus Torvalds set_page_writeback(page);
14681da177e4SLinus Torvalds unlock_page(page);
14691da177e4SLinus Torvalds attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
147007a4e2daSAnton Altaparmakov i_size = i_size_read(vi);
14711da177e4SLinus Torvalds if (unlikely(attr_len > i_size)) {
1472f6098cf4SAnton Altaparmakov /* Race with shrinking truncate or a failed truncate. */
14731da177e4SLinus Torvalds attr_len = i_size;
1474f6098cf4SAnton Altaparmakov /*
1475f6098cf4SAnton Altaparmakov * If the truncate failed, fix it up now. If a concurrent
1476f6098cf4SAnton Altaparmakov * truncate, we do its job, so it does not have to do anything.
1477f6098cf4SAnton Altaparmakov */
1478f6098cf4SAnton Altaparmakov err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1479f6098cf4SAnton Altaparmakov attr_len);
1480f6098cf4SAnton Altaparmakov /* Shrinking cannot fail. */
1481f6098cf4SAnton Altaparmakov BUG_ON(err);
14821da177e4SLinus Torvalds }
1483a3ac1414SCong Wang addr = kmap_atomic(page);
14841da177e4SLinus Torvalds /* Copy the data from the page to the mft record. */
14851da177e4SLinus Torvalds memcpy((u8*)ctx->attr +
14861da177e4SLinus Torvalds le16_to_cpu(ctx->attr->data.resident.value_offset),
1487bfab36e8SAnton Altaparmakov addr, attr_len);
14881da177e4SLinus Torvalds /* Zero out of bounds area in the page cache page. */
148909cbfeafSKirill A. Shutemov memset(addr + attr_len, 0, PAGE_SIZE - attr_len);
1490a3ac1414SCong Wang kunmap_atomic(addr);
1491f6098cf4SAnton Altaparmakov flush_dcache_page(page);
14927d0ffdb2SAnton Altaparmakov flush_dcache_mft_record_page(ctx->ntfs_ino);
1493f6098cf4SAnton Altaparmakov /* We are done with the page. */
14941da177e4SLinus Torvalds end_page_writeback(page);
1495f6098cf4SAnton Altaparmakov /* Finally, mark the mft record dirty, so it gets written back. */
14961da177e4SLinus Torvalds mark_mft_record_dirty(ctx->ntfs_ino);
14971da177e4SLinus Torvalds ntfs_attr_put_search_ctx(ctx);
14981da177e4SLinus Torvalds unmap_mft_record(base_ni);
14991da177e4SLinus Torvalds return 0;
15001da177e4SLinus Torvalds err_out:
15011da177e4SLinus Torvalds if (err == -ENOMEM) {
15021da177e4SLinus Torvalds ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
15031da177e4SLinus Torvalds "page so we try again later.");
15041da177e4SLinus Torvalds /*
15051da177e4SLinus Torvalds * Put the page back on mapping->dirty_pages, but leave its
15061da177e4SLinus Torvalds * buffers' dirty state as-is.
15071da177e4SLinus Torvalds */
15081da177e4SLinus Torvalds redirty_page_for_writepage(wbc, page);
15091da177e4SLinus Torvalds err = 0;
15101da177e4SLinus Torvalds } else {
15111da177e4SLinus Torvalds ntfs_error(vi->i_sb, "Resident attribute write failed with "
1512149f0c52SAnton Altaparmakov "error %i.", err);
15131da177e4SLinus Torvalds SetPageError(page);
1514149f0c52SAnton Altaparmakov NVolSetErrors(ni->vol);
15151da177e4SLinus Torvalds }
15161da177e4SLinus Torvalds unlock_page(page);
15171da177e4SLinus Torvalds if (ctx)
15181da177e4SLinus Torvalds ntfs_attr_put_search_ctx(ctx);
15191da177e4SLinus Torvalds if (m)
15201da177e4SLinus Torvalds unmap_mft_record(base_ni);
15211da177e4SLinus Torvalds return err;
15221da177e4SLinus Torvalds }
15231da177e4SLinus Torvalds
15241da177e4SLinus Torvalds #endif /* NTFS_RW */
15251da177e4SLinus Torvalds
15261da177e4SLinus Torvalds /**
15273f7fc6f2SAnton Altaparmakov * ntfs_bmap - map logical file block to physical device block
15283f7fc6f2SAnton Altaparmakov * @mapping: address space mapping to which the block to be mapped belongs
15293f7fc6f2SAnton Altaparmakov * @block: logical block to map to its physical device block
15303f7fc6f2SAnton Altaparmakov *
15313f7fc6f2SAnton Altaparmakov * For regular, non-resident files (i.e. not compressed and not encrypted), map
15323f7fc6f2SAnton Altaparmakov * the logical @block belonging to the file described by the address space
15333f7fc6f2SAnton Altaparmakov * mapping @mapping to its physical device block.
15343f7fc6f2SAnton Altaparmakov *
15353f7fc6f2SAnton Altaparmakov * The size of the block is equal to the @s_blocksize field of the super block
15363f7fc6f2SAnton Altaparmakov * of the mounted file system which is guaranteed to be smaller than or equal
15373f7fc6f2SAnton Altaparmakov * to the cluster size thus the block is guaranteed to fit entirely inside the
15383f7fc6f2SAnton Altaparmakov * cluster which means we do not need to care how many contiguous bytes are
15393f7fc6f2SAnton Altaparmakov * available after the beginning of the block.
15403f7fc6f2SAnton Altaparmakov *
15413f7fc6f2SAnton Altaparmakov * Return the physical device block if the mapping succeeded or 0 if the block
15423f7fc6f2SAnton Altaparmakov * is sparse or there was an error.
15433f7fc6f2SAnton Altaparmakov *
15443f7fc6f2SAnton Altaparmakov * Note: This is a problem if someone tries to run bmap() on $Boot system file
15453f7fc6f2SAnton Altaparmakov * as that really is in block zero but there is nothing we can do. bmap() is
15463f7fc6f2SAnton Altaparmakov * just broken in that respect (just like it cannot distinguish sparse from
15473f7fc6f2SAnton Altaparmakov * not available or error).
15483f7fc6f2SAnton Altaparmakov */
ntfs_bmap(struct address_space * mapping,sector_t block)15493f7fc6f2SAnton Altaparmakov static sector_t ntfs_bmap(struct address_space *mapping, sector_t block)
15503f7fc6f2SAnton Altaparmakov {
15513f7fc6f2SAnton Altaparmakov s64 ofs, size;
15523f7fc6f2SAnton Altaparmakov loff_t i_size;
15533f7fc6f2SAnton Altaparmakov LCN lcn;
15543f7fc6f2SAnton Altaparmakov unsigned long blocksize, flags;
15553f7fc6f2SAnton Altaparmakov ntfs_inode *ni = NTFS_I(mapping->host);
15563f7fc6f2SAnton Altaparmakov ntfs_volume *vol = ni->vol;
15573f7fc6f2SAnton Altaparmakov unsigned delta;
15583f7fc6f2SAnton Altaparmakov unsigned char blocksize_bits, cluster_size_shift;
15593f7fc6f2SAnton Altaparmakov
15603f7fc6f2SAnton Altaparmakov ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.",
15613f7fc6f2SAnton Altaparmakov ni->mft_no, (unsigned long long)block);
15623f7fc6f2SAnton Altaparmakov if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) {
15633f7fc6f2SAnton Altaparmakov ntfs_error(vol->sb, "BMAP does not make sense for %s "
15643f7fc6f2SAnton Altaparmakov "attributes, returning 0.",
15653f7fc6f2SAnton Altaparmakov (ni->type != AT_DATA) ? "non-data" :
15663f7fc6f2SAnton Altaparmakov (!NInoNonResident(ni) ? "resident" :
15673f7fc6f2SAnton Altaparmakov "encrypted"));
15683f7fc6f2SAnton Altaparmakov return 0;
15693f7fc6f2SAnton Altaparmakov }
15703f7fc6f2SAnton Altaparmakov /* None of these can happen. */
15713f7fc6f2SAnton Altaparmakov BUG_ON(NInoCompressed(ni));
15723f7fc6f2SAnton Altaparmakov BUG_ON(NInoMstProtected(ni));
15733f7fc6f2SAnton Altaparmakov blocksize = vol->sb->s_blocksize;
15743f7fc6f2SAnton Altaparmakov blocksize_bits = vol->sb->s_blocksize_bits;
15753f7fc6f2SAnton Altaparmakov ofs = (s64)block << blocksize_bits;
15763f7fc6f2SAnton Altaparmakov read_lock_irqsave(&ni->size_lock, flags);
15773f7fc6f2SAnton Altaparmakov size = ni->initialized_size;
15783f7fc6f2SAnton Altaparmakov i_size = i_size_read(VFS_I(ni));
15793f7fc6f2SAnton Altaparmakov read_unlock_irqrestore(&ni->size_lock, flags);
15803f7fc6f2SAnton Altaparmakov /*
15813f7fc6f2SAnton Altaparmakov * If the offset is outside the initialized size or the block straddles
15823f7fc6f2SAnton Altaparmakov * the initialized size then pretend it is a hole unless the
15833f7fc6f2SAnton Altaparmakov * initialized size equals the file size.
15843f7fc6f2SAnton Altaparmakov */
15853f7fc6f2SAnton Altaparmakov if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size)))
15863f7fc6f2SAnton Altaparmakov goto hole;
15873f7fc6f2SAnton Altaparmakov cluster_size_shift = vol->cluster_size_bits;
15883f7fc6f2SAnton Altaparmakov down_read(&ni->runlist.lock);
15893f7fc6f2SAnton Altaparmakov lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false);
15903f7fc6f2SAnton Altaparmakov up_read(&ni->runlist.lock);
15913f7fc6f2SAnton Altaparmakov if (unlikely(lcn < LCN_HOLE)) {
15923f7fc6f2SAnton Altaparmakov /*
15933f7fc6f2SAnton Altaparmakov * Step down to an integer to avoid gcc doing a long long
15943f7fc6f2SAnton Altaparmakov * comparision in the switch when we know @lcn is between
15953f7fc6f2SAnton Altaparmakov * LCN_HOLE and LCN_EIO (i.e. -1 to -5).
15963f7fc6f2SAnton Altaparmakov *
15973f7fc6f2SAnton Altaparmakov * Otherwise older gcc (at least on some architectures) will
15983f7fc6f2SAnton Altaparmakov * try to use __cmpdi2() which is of course not available in
15993f7fc6f2SAnton Altaparmakov * the kernel.
16003f7fc6f2SAnton Altaparmakov */
16013f7fc6f2SAnton Altaparmakov switch ((int)lcn) {
16023f7fc6f2SAnton Altaparmakov case LCN_ENOENT:
16033f7fc6f2SAnton Altaparmakov /*
16043f7fc6f2SAnton Altaparmakov * If the offset is out of bounds then pretend it is a
16053f7fc6f2SAnton Altaparmakov * hole.
16063f7fc6f2SAnton Altaparmakov */
16073f7fc6f2SAnton Altaparmakov goto hole;
16083f7fc6f2SAnton Altaparmakov case LCN_ENOMEM:
16093f7fc6f2SAnton Altaparmakov ntfs_error(vol->sb, "Not enough memory to complete "
16103f7fc6f2SAnton Altaparmakov "mapping for inode 0x%lx. "
16113f7fc6f2SAnton Altaparmakov "Returning 0.", ni->mft_no);
16123f7fc6f2SAnton Altaparmakov break;
16133f7fc6f2SAnton Altaparmakov default:
16143f7fc6f2SAnton Altaparmakov ntfs_error(vol->sb, "Failed to complete mapping for "
16153f7fc6f2SAnton Altaparmakov "inode 0x%lx. Run chkdsk. "
16163f7fc6f2SAnton Altaparmakov "Returning 0.", ni->mft_no);
16173f7fc6f2SAnton Altaparmakov break;
16183f7fc6f2SAnton Altaparmakov }
16193f7fc6f2SAnton Altaparmakov return 0;
16203f7fc6f2SAnton Altaparmakov }
16213f7fc6f2SAnton Altaparmakov if (lcn < 0) {
16223f7fc6f2SAnton Altaparmakov /* It is a hole. */
16233f7fc6f2SAnton Altaparmakov hole:
16243f7fc6f2SAnton Altaparmakov ntfs_debug("Done (returning hole).");
16253f7fc6f2SAnton Altaparmakov return 0;
16263f7fc6f2SAnton Altaparmakov }
16273f7fc6f2SAnton Altaparmakov /*
16283f7fc6f2SAnton Altaparmakov * The block is really allocated and fullfils all our criteria.
16293f7fc6f2SAnton Altaparmakov * Convert the cluster to units of block size and return the result.
16303f7fc6f2SAnton Altaparmakov */
16313f7fc6f2SAnton Altaparmakov delta = ofs & vol->cluster_size_mask;
16323f7fc6f2SAnton Altaparmakov if (unlikely(sizeof(block) < sizeof(lcn))) {
16333f7fc6f2SAnton Altaparmakov block = lcn = ((lcn << cluster_size_shift) + delta) >>
16343f7fc6f2SAnton Altaparmakov blocksize_bits;
16353f7fc6f2SAnton Altaparmakov /* If the block number was truncated return 0. */
16363f7fc6f2SAnton Altaparmakov if (unlikely(block != lcn)) {
16373f7fc6f2SAnton Altaparmakov ntfs_error(vol->sb, "Physical block 0x%llx is too "
16383f7fc6f2SAnton Altaparmakov "large to be returned, returning 0.",
16393f7fc6f2SAnton Altaparmakov (long long)lcn);
16403f7fc6f2SAnton Altaparmakov return 0;
16413f7fc6f2SAnton Altaparmakov }
16423f7fc6f2SAnton Altaparmakov } else
16433f7fc6f2SAnton Altaparmakov block = ((lcn << cluster_size_shift) + delta) >>
16443f7fc6f2SAnton Altaparmakov blocksize_bits;
16453f7fc6f2SAnton Altaparmakov ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn);
16463f7fc6f2SAnton Altaparmakov return block;
16473f7fc6f2SAnton Altaparmakov }
16483f7fc6f2SAnton Altaparmakov
1649aa0b42b7SRandy Dunlap /*
1650ce1bafa0SAnton Altaparmakov * ntfs_normal_aops - address space operations for normal inodes and attributes
1651ce1bafa0SAnton Altaparmakov *
1652ce1bafa0SAnton Altaparmakov * Note these are not used for compressed or mst protected inodes and
1653ce1bafa0SAnton Altaparmakov * attributes.
16541da177e4SLinus Torvalds */
1655ce1bafa0SAnton Altaparmakov const struct address_space_operations ntfs_normal_aops = {
1656933906f8SMatthew Wilcox (Oracle) .read_folio = ntfs_read_folio,
16571da177e4SLinus Torvalds #ifdef NTFS_RW
1658ce1bafa0SAnton Altaparmakov .writepage = ntfs_writepage,
1659e621900aSMatthew Wilcox (Oracle) .dirty_folio = block_dirty_folio,
16601da177e4SLinus Torvalds #endif /* NTFS_RW */
16613f7fc6f2SAnton Altaparmakov .bmap = ntfs_bmap,
166267235182SMatthew Wilcox (Oracle) .migrate_folio = buffer_migrate_folio,
1663ce1bafa0SAnton Altaparmakov .is_partially_uptodate = block_is_partially_uptodate,
1664ce1bafa0SAnton Altaparmakov .error_remove_page = generic_error_remove_page,
1665ce1bafa0SAnton Altaparmakov };
1666ce1bafa0SAnton Altaparmakov
1667aa0b42b7SRandy Dunlap /*
1668ce1bafa0SAnton Altaparmakov * ntfs_compressed_aops - address space operations for compressed inodes
1669ce1bafa0SAnton Altaparmakov */
1670ce1bafa0SAnton Altaparmakov const struct address_space_operations ntfs_compressed_aops = {
1671933906f8SMatthew Wilcox (Oracle) .read_folio = ntfs_read_folio,
1672ce1bafa0SAnton Altaparmakov #ifdef NTFS_RW
1673ce1bafa0SAnton Altaparmakov .writepage = ntfs_writepage,
1674e621900aSMatthew Wilcox (Oracle) .dirty_folio = block_dirty_folio,
1675ce1bafa0SAnton Altaparmakov #endif /* NTFS_RW */
167667235182SMatthew Wilcox (Oracle) .migrate_folio = buffer_migrate_folio,
1677ce1bafa0SAnton Altaparmakov .is_partially_uptodate = block_is_partially_uptodate,
1678aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page,
16791da177e4SLinus Torvalds };
16801da177e4SLinus Torvalds
1681aa0b42b7SRandy Dunlap /*
16821da177e4SLinus Torvalds * ntfs_mst_aops - general address space operations for mst protecteed inodes
16831da177e4SLinus Torvalds * and attributes
16841da177e4SLinus Torvalds */
1685f5e54d6eSChristoph Hellwig const struct address_space_operations ntfs_mst_aops = {
1686933906f8SMatthew Wilcox (Oracle) .read_folio = ntfs_read_folio, /* Fill page with data. */
16871da177e4SLinus Torvalds #ifdef NTFS_RW
16881da177e4SLinus Torvalds .writepage = ntfs_writepage, /* Write dirty page to disk. */
1689187c82cbSMatthew Wilcox (Oracle) .dirty_folio = filemap_dirty_folio,
16901da177e4SLinus Torvalds #endif /* NTFS_RW */
169167235182SMatthew Wilcox (Oracle) .migrate_folio = buffer_migrate_folio,
1692ce1bafa0SAnton Altaparmakov .is_partially_uptodate = block_is_partially_uptodate,
1693aa261f54SAndi Kleen .error_remove_page = generic_error_remove_page,
16941da177e4SLinus Torvalds };
16951da177e4SLinus Torvalds
16961da177e4SLinus Torvalds #ifdef NTFS_RW
16971da177e4SLinus Torvalds
16981da177e4SLinus Torvalds /**
16991da177e4SLinus Torvalds * mark_ntfs_record_dirty - mark an ntfs record dirty
17001da177e4SLinus Torvalds * @page: page containing the ntfs record to mark dirty
17011da177e4SLinus Torvalds * @ofs: byte offset within @page at which the ntfs record begins
17021da177e4SLinus Torvalds *
17031da177e4SLinus Torvalds * Set the buffers and the page in which the ntfs record is located dirty.
17041da177e4SLinus Torvalds *
17051da177e4SLinus Torvalds * The latter also marks the vfs inode the ntfs record belongs to dirty
17061da177e4SLinus Torvalds * (I_DIRTY_PAGES only).
17071da177e4SLinus Torvalds *
17081da177e4SLinus Torvalds * If the page does not have buffers, we create them and set them uptodate.
17091da177e4SLinus Torvalds * The page may not be locked which is why we need to handle the buffers under
17101da177e4SLinus Torvalds * the mapping->private_lock. Once the buffers are marked dirty we no longer
17111da177e4SLinus Torvalds * need the lock since try_to_free_buffers() does not free dirty buffers.
17121da177e4SLinus Torvalds */
mark_ntfs_record_dirty(struct page * page,const unsigned int ofs)17131da177e4SLinus Torvalds void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
17141da177e4SLinus Torvalds struct address_space *mapping = page->mapping;
17151da177e4SLinus Torvalds ntfs_inode *ni = NTFS_I(mapping->host);
17161da177e4SLinus Torvalds struct buffer_head *bh, *head, *buffers_to_free = NULL;
17171da177e4SLinus Torvalds unsigned int end, bh_size, bh_ofs;
17181da177e4SLinus Torvalds
17191da177e4SLinus Torvalds BUG_ON(!PageUptodate(page));
17201da177e4SLinus Torvalds end = ofs + ni->itype.index.block_size;
172178af34f0SAnton Altaparmakov bh_size = VFS_I(ni)->i_sb->s_blocksize;
17221da177e4SLinus Torvalds spin_lock(&mapping->private_lock);
17231da177e4SLinus Torvalds if (unlikely(!page_has_buffers(page))) {
17241da177e4SLinus Torvalds spin_unlock(&mapping->private_lock);
1725640ab98fSJens Axboe bh = head = alloc_page_buffers(page, bh_size, true);
17261da177e4SLinus Torvalds spin_lock(&mapping->private_lock);
17271da177e4SLinus Torvalds if (likely(!page_has_buffers(page))) {
17281da177e4SLinus Torvalds struct buffer_head *tail;
17291da177e4SLinus Torvalds
17301da177e4SLinus Torvalds do {
17311da177e4SLinus Torvalds set_buffer_uptodate(bh);
17321da177e4SLinus Torvalds tail = bh;
17331da177e4SLinus Torvalds bh = bh->b_this_page;
17341da177e4SLinus Torvalds } while (bh);
17351da177e4SLinus Torvalds tail->b_this_page = head;
173614ed109eSGuoqing Jiang attach_page_private(page, head);
17371da177e4SLinus Torvalds } else
17381da177e4SLinus Torvalds buffers_to_free = bh;
17391da177e4SLinus Torvalds }
17401da177e4SLinus Torvalds bh = head = page_buffers(page);
1741a01ac532SAnton Altaparmakov BUG_ON(!bh);
17421da177e4SLinus Torvalds do {
17431da177e4SLinus Torvalds bh_ofs = bh_offset(bh);
17441da177e4SLinus Torvalds if (bh_ofs + bh_size <= ofs)
17451da177e4SLinus Torvalds continue;
17461da177e4SLinus Torvalds if (unlikely(bh_ofs >= end))
17471da177e4SLinus Torvalds break;
17481da177e4SLinus Torvalds set_buffer_dirty(bh);
17491da177e4SLinus Torvalds } while ((bh = bh->b_this_page) != head);
17501da177e4SLinus Torvalds spin_unlock(&mapping->private_lock);
1751c3773130SMatthew Wilcox (Oracle) filemap_dirty_folio(mapping, page_folio(page));
17521da177e4SLinus Torvalds if (unlikely(buffers_to_free)) {
17531da177e4SLinus Torvalds do {
17541da177e4SLinus Torvalds bh = buffers_to_free->b_this_page;
17551da177e4SLinus Torvalds free_buffer_head(buffers_to_free);
17561da177e4SLinus Torvalds buffers_to_free = bh;
17571da177e4SLinus Torvalds } while (buffers_to_free);
17581da177e4SLinus Torvalds }
17591da177e4SLinus Torvalds }
17601da177e4SLinus Torvalds
17611da177e4SLinus Torvalds #endif /* NTFS_RW */
1762