data.c (47e4937a4a7ca4184fd282791dfee76c6799966a) data.c (8d8a09b093d7073465c824f74caf315c073d3875)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "internal.h"
8#include <linux/prefetch.h>

--- 13 unchanged lines hidden (view full) ---

22 }
23
24 bio_for_each_segment_all(bvec, bio, iter_all) {
25 struct page *page = bvec->bv_page;
26
27 /* page is already locked */
28 DBG_BUGON(PageUptodate(page));
29
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "internal.h"
8#include <linux/prefetch.h>

--- 13 unchanged lines hidden (view full) ---

22 }
23
24 bio_for_each_segment_all(bvec, bio, iter_all) {
25 struct page *page = bvec->bv_page;
26
27 /* page is already locked */
28 DBG_BUGON(PageUptodate(page));
29
30 if (unlikely(err))
30 if (err)
31 SetPageError(page);
32 else
33 SetPageUptodate(page);
34
35 unlock_page(page);
36 /* page could be reclaimed now */
37 }
38 bio_put(bio);

--- 9 unchanged lines hidden (view full) ---

48 const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
49 (nofail ? __GFP_NOFAIL : 0);
50 unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
51 struct page *page;
52 int err;
53
54repeat:
55 page = find_or_create_page(mapping, blkaddr, gfp);
31 SetPageError(page);
32 else
33 SetPageUptodate(page);
34
35 unlock_page(page);
36 /* page could be reclaimed now */
37 }
38 bio_put(bio);

--- 9 unchanged lines hidden (view full) ---

48 const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) |
49 (nofail ? __GFP_NOFAIL : 0);
50 unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0;
51 struct page *page;
52 int err;
53
54repeat:
55 page = find_or_create_page(mapping, blkaddr, gfp);
56 if (unlikely(!page)) {
56 if (!page) {
57 DBG_BUGON(nofail);
58 return ERR_PTR(-ENOMEM);
59 }
60 DBG_BUGON(!PageLocked(page));
61
62 if (!PageUptodate(page)) {
63 struct bio *bio;
64
65 bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
66 if (IS_ERR(bio)) {
67 DBG_BUGON(nofail);
68 err = PTR_ERR(bio);
69 goto err_out;
70 }
71
72 err = bio_add_page(bio, page, PAGE_SIZE, 0);
57 DBG_BUGON(nofail);
58 return ERR_PTR(-ENOMEM);
59 }
60 DBG_BUGON(!PageLocked(page));
61
62 if (!PageUptodate(page)) {
63 struct bio *bio;
64
65 bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail);
66 if (IS_ERR(bio)) {
67 DBG_BUGON(nofail);
68 err = PTR_ERR(bio);
69 goto err_out;
70 }
71
72 err = bio_add_page(bio, page, PAGE_SIZE, 0);
73 if (unlikely(err != PAGE_SIZE)) {
73 if (err != PAGE_SIZE) {
74 err = -EFAULT;
75 goto err_out;
76 }
77
78 __submit_bio(bio, REQ_OP_READ,
79 REQ_META | (prio ? REQ_PRIO : 0));
80
81 lock_page(page);
82
83 /* this page has been truncated by others */
74 err = -EFAULT;
75 goto err_out;
76 }
77
78 __submit_bio(bio, REQ_OP_READ,
79 REQ_META | (prio ? REQ_PRIO : 0));
80
81 lock_page(page);
82
83 /* this page has been truncated by others */
84 if (unlikely(page->mapping != mapping)) {
84 if (page->mapping != mapping) {
85unlock_repeat:
86 unlock_page(page);
87 put_page(page);
88 goto repeat;
89 }
90
91 /* more likely a read error */
85unlock_repeat:
86 unlock_page(page);
87 put_page(page);
88 goto repeat;
89 }
90
91 /* more likely a read error */
92 if (unlikely(!PageUptodate(page))) {
92 if (!PageUptodate(page)) {
93 if (io_retries) {
94 --io_retries;
95 goto unlock_repeat;
96 }
97 err = -EIO;
98 goto err_out;
99 }
100 }

--- 14 unchanged lines hidden (view full) ---

115 u64 offset = map->m_la;
116 struct erofs_vnode *vi = EROFS_V(inode);
117
118 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
119
120 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
121 lastblk = nblocks - is_inode_flat_inline(inode);
122
93 if (io_retries) {
94 --io_retries;
95 goto unlock_repeat;
96 }
97 err = -EIO;
98 goto err_out;
99 }
100 }

--- 14 unchanged lines hidden (view full) ---

115 u64 offset = map->m_la;
116 struct erofs_vnode *vi = EROFS_V(inode);
117
118 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
119
120 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
121 lastblk = nblocks - is_inode_flat_inline(inode);
122
123 if (unlikely(offset >= inode->i_size)) {
123 if (offset >= inode->i_size) {
124 /* leave out-of-bound access unmapped */
125 map->m_flags = 0;
126 map->m_plen = 0;
127 goto out;
128 }
129
130 /* there is no hole in flatmode */
131 map->m_flags = EROFS_MAP_MAPPED;

--- 33 unchanged lines hidden (view full) ---

165err_out:
166 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
167 return err;
168}
169
170int erofs_map_blocks(struct inode *inode,
171 struct erofs_map_blocks *map, int flags)
172{
124 /* leave out-of-bound access unmapped */
125 map->m_flags = 0;
126 map->m_plen = 0;
127 goto out;
128 }
129
130 /* there is no hole in flatmode */
131 map->m_flags = EROFS_MAP_MAPPED;

--- 33 unchanged lines hidden (view full) ---

165err_out:
166 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
167 return err;
168}
169
170int erofs_map_blocks(struct inode *inode,
171 struct erofs_map_blocks *map, int flags)
172{
173 if (unlikely(is_inode_layout_compression(inode))) {
173 if (is_inode_layout_compression(inode)) {
174 int err = z_erofs_map_blocks_iter(inode, map, flags);
175
176 if (map->mpage) {
177 put_page(map->mpage);
178 map->mpage = NULL;
179 }
180 return err;
181 }

--- 31 unchanged lines hidden (view full) ---

213 if (!bio) {
214 struct erofs_map_blocks map = {
215 .m_la = blknr_to_addr(current_block),
216 };
217 erofs_blk_t blknr;
218 unsigned int blkoff;
219
220 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
174 int err = z_erofs_map_blocks_iter(inode, map, flags);
175
176 if (map->mpage) {
177 put_page(map->mpage);
178 map->mpage = NULL;
179 }
180 return err;
181 }

--- 31 unchanged lines hidden (view full) ---

213 if (!bio) {
214 struct erofs_map_blocks map = {
215 .m_la = blknr_to_addr(current_block),
216 };
217 erofs_blk_t blknr;
218 unsigned int blkoff;
219
220 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
221 if (unlikely(err))
221 if (err)
222 goto err_out;
223
224 /* zero out the holed page */
222 goto err_out;
223
224 /* zero out the holed page */
225 if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) {
225 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
226 zero_user_segment(page, 0, PAGE_SIZE);
227 SetPageUptodate(page);
228
229 /* imply err = 0, see erofs_map_blocks */
230 goto has_updated;
231 }
232
233 /* for RAW access mode, m_plen must be equal to m_llen */

--- 76 unchanged lines hidden (view full) ---

310has_updated:
311 unlock_page(page);
312
313 /* if updated manually, continuous pages has a gap */
314 if (bio)
315submit_bio_out:
316 __submit_bio(bio, REQ_OP_READ, 0);
317
226 zero_user_segment(page, 0, PAGE_SIZE);
227 SetPageUptodate(page);
228
229 /* imply err = 0, see erofs_map_blocks */
230 goto has_updated;
231 }
232
233 /* for RAW access mode, m_plen must be equal to m_llen */

--- 76 unchanged lines hidden (view full) ---

310has_updated:
311 unlock_page(page);
312
313 /* if updated manually, continuous pages has a gap */
314 if (bio)
315submit_bio_out:
316 __submit_bio(bio, REQ_OP_READ, 0);
317
318 return unlikely(err) ? ERR_PTR(err) : NULL;
318 return err ? ERR_PTR(err) : NULL;
319}
320
321/*
322 * since we dont have write or truncate flows, so no inode
323 * locking needs to be held at the moment.
324 */
325static int erofs_raw_access_readpage(struct file *file, struct page *page)
326{

--- 45 unchanged lines hidden (view full) ---

372 }
373
374 /* pages could still be locked */
375 put_page(page);
376 }
377 DBG_BUGON(!list_empty(pages));
378
379 /* the rare case (end in gaps) */
319}
320
321/*
322 * since we dont have write or truncate flows, so no inode
323 * locking needs to be held at the moment.
324 */
325static int erofs_raw_access_readpage(struct file *file, struct page *page)
326{

--- 45 unchanged lines hidden (view full) ---

372 }
373
374 /* pages could still be locked */
375 put_page(page);
376 }
377 DBG_BUGON(!list_empty(pages));
378
379 /* the rare case (end in gaps) */
380 if (unlikely(bio))
380 if (bio)
381 __submit_bio(bio, REQ_OP_READ, 0);
382 return 0;
383}
384
385static int erofs_get_block(struct inode *inode, sector_t iblock,
386 struct buffer_head *bh, int create)
387{
388 struct erofs_map_blocks map = {

--- 35 unchanged lines hidden ---
381 __submit_bio(bio, REQ_OP_READ, 0);
382 return 0;
383}
384
385static int erofs_get_block(struct inode *inode, sector_t iblock,
386 struct buffer_head *bh, int create)
387{
388 struct erofs_map_blocks map = {

--- 35 unchanged lines hidden ---