1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018-2019 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 */ 6 #include "internal.h" 7 #include <asm/unaligned.h> 8 #include <trace/events/erofs.h> 9 10 static int z_erofs_do_map_blocks(struct inode *inode, 11 struct erofs_map_blocks *map, 12 int flags); 13 14 int z_erofs_fill_inode(struct inode *inode) 15 { 16 struct erofs_inode *const vi = EROFS_I(inode); 17 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 18 19 if (!erofs_sb_has_big_pcluster(sbi) && 20 !erofs_sb_has_ztailpacking(sbi) && 21 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { 22 vi->z_advise = 0; 23 vi->z_algorithmtype[0] = 0; 24 vi->z_algorithmtype[1] = 0; 25 vi->z_logical_clusterbits = LOG_BLOCK_SIZE; 26 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); 27 } 28 inode->i_mapping->a_ops = &z_erofs_aops; 29 return 0; 30 } 31 32 static int z_erofs_fill_inode_lazy(struct inode *inode) 33 { 34 struct erofs_inode *const vi = EROFS_I(inode); 35 struct super_block *const sb = inode->i_sb; 36 int err, headnr; 37 erofs_off_t pos; 38 struct page *page; 39 void *kaddr; 40 struct z_erofs_map_header *h; 41 42 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { 43 /* 44 * paired with smp_mb() at the end of the function to ensure 45 * fields will only be observed after the bit is set. 46 */ 47 smp_mb(); 48 return 0; 49 } 50 51 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) 52 return -ERESTARTSYS; 53 54 err = 0; 55 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) 56 goto out_unlock; 57 58 DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && 59 !erofs_sb_has_ztailpacking(EROFS_SB(sb)) && 60 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); 61 62 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + 63 vi->xattr_isize, 8); 64 page = erofs_get_meta_page(sb, erofs_blknr(pos)); 65 if (IS_ERR(page)) { 66 err = PTR_ERR(page); 67 goto out_unlock; 68 } 69 70 kaddr = kmap_atomic(page); 71 72 h = kaddr + erofs_blkoff(pos); 73 vi->z_advise = le16_to_cpu(h->h_advise); 74 vi->z_algorithmtype[0] = h->h_algorithmtype & 15; 75 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; 76 77 headnr = 0; 78 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX || 79 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) { 80 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", 81 headnr + 1, vi->z_algorithmtype[headnr], vi->nid); 82 err = -EOPNOTSUPP; 83 goto unmap_done; 84 } 85 86 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); 87 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && 88 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | 89 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { 90 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", 91 vi->nid); 92 err = -EFSCORRUPTED; 93 goto unmap_done; 94 } 95 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && 96 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ 97 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { 98 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", 99 vi->nid); 100 err = -EFSCORRUPTED; 101 goto unmap_done; 102 } 103 unmap_done: 104 kunmap_atomic(kaddr); 105 unlock_page(page); 106 put_page(page); 107 if (err) 108 goto out_unlock; 109 110 if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) { 111 struct erofs_map_blocks map = { .mpage = NULL }; 112 113 vi->z_idata_size = le16_to_cpu(h->h_idata_size); 114 err = z_erofs_do_map_blocks(inode, &map, 115 EROFS_GET_BLOCKS_FINDTAIL); 116 if (map.mpage) 117 put_page(map.mpage); 118 119 if (!map.m_plen || 120 erofs_blkoff(map.m_pa) + map.m_plen > EROFS_BLKSIZ) { 121 erofs_err(sb, "invalid tail-packing pclustersize %llu", 122 map.m_plen); 123 err = -EFSCORRUPTED; 124 } 125 if (err < 0) 126 goto out_unlock; 127 } 128 /* paired with smp_mb() at the beginning of the function */ 129 smp_mb(); 130 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); 131 out_unlock: 132 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); 133 return err; 134 } 135 136 struct z_erofs_maprecorder { 137 struct inode *inode; 138 struct erofs_map_blocks *map; 139 void *kaddr; 140 141 unsigned long lcn; 142 /* compression extent information gathered */ 143 u8 type, headtype; 144 u16 clusterofs; 145 u16 delta[2]; 146 erofs_blk_t pblk, compressedlcs; 147 erofs_off_t nextpackoff; 148 }; 149 150 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, 151 erofs_blk_t eblk) 152 { 153 struct super_block *const sb = m->inode->i_sb; 154 struct erofs_map_blocks *const map = m->map; 155 struct page *mpage = map->mpage; 156 157 if (mpage) { 158 if (mpage->index == eblk) { 159 if (!m->kaddr) 160 m->kaddr = kmap_atomic(mpage); 161 return 0; 162 } 163 164 if (m->kaddr) { 165 kunmap_atomic(m->kaddr); 166 m->kaddr = NULL; 167 } 168 put_page(mpage); 169 } 170 171 mpage = erofs_get_meta_page(sb, eblk); 172 if (IS_ERR(mpage)) { 173 map->mpage = NULL; 174 return PTR_ERR(mpage); 175 } 176 m->kaddr = kmap_atomic(mpage); 177 unlock_page(mpage); 178 map->mpage = mpage; 179 return 0; 180 } 181 182 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, 183 unsigned long lcn) 184 { 185 struct inode *const inode = m->inode; 186 struct erofs_inode *const vi = EROFS_I(inode); 187 const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); 188 const erofs_off_t pos = 189 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + 190 vi->xattr_isize) + 191 lcn * sizeof(struct z_erofs_vle_decompressed_index); 192 struct z_erofs_vle_decompressed_index *di; 193 unsigned int advise, type; 194 int err; 195 196 err = z_erofs_reload_indexes(m, erofs_blknr(pos)); 197 if (err) 198 return err; 199 200 m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index); 201 m->lcn = lcn; 202 di = m->kaddr + erofs_blkoff(pos); 203 204 advise = le16_to_cpu(di->di_advise); 205 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & 206 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); 207 switch (type) { 208 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 209 m->clusterofs = 1 << vi->z_logical_clusterbits; 210 m->delta[0] = le16_to_cpu(di->di_u.delta[0]); 211 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) { 212 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | 213 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { 214 DBG_BUGON(1); 215 return -EFSCORRUPTED; 216 } 217 m->compressedlcs = m->delta[0] & 218 ~Z_EROFS_VLE_DI_D0_CBLKCNT; 219 m->delta[0] = 1; 220 } 221 m->delta[1] = le16_to_cpu(di->di_u.delta[1]); 222 break; 223 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 224 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 225 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 226 m->clusterofs = le16_to_cpu(di->di_clusterofs); 227 m->pblk = le32_to_cpu(di->di_u.blkaddr); 228 break; 229 default: 230 DBG_BUGON(1); 231 return -EOPNOTSUPP; 232 } 233 m->type = type; 234 return 0; 235 } 236 237 static unsigned int decode_compactedbits(unsigned int lobits, 238 unsigned int lomask, 239 u8 *in, unsigned int pos, u8 *type) 240 { 241 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); 242 const unsigned int lo = v & lomask; 243 244 *type = (v >> lobits) & 3; 245 return lo; 246 } 247 248 static int get_compacted_la_distance(unsigned int lclusterbits, 249 unsigned int encodebits, 250 unsigned int vcnt, u8 *in, int i) 251 { 252 const unsigned int lomask = (1 << lclusterbits) - 1; 253 unsigned int lo, d1 = 0; 254 u8 type; 255 256 DBG_BUGON(i >= vcnt); 257 258 do { 259 lo = decode_compactedbits(lclusterbits, lomask, 260 in, encodebits * i, &type); 261 262 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 263 return d1; 264 ++d1; 265 } while (++i < vcnt); 266 267 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */ 268 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT)) 269 d1 += lo - 1; 270 return d1; 271 } 272 273 static int unpack_compacted_index(struct z_erofs_maprecorder *m, 274 unsigned int amortizedshift, 275 erofs_off_t pos, bool lookahead) 276 { 277 struct erofs_inode *const vi = EROFS_I(m->inode); 278 const unsigned int lclusterbits = vi->z_logical_clusterbits; 279 const unsigned int lomask = (1 << lclusterbits) - 1; 280 unsigned int vcnt, base, lo, encodebits, nblk, eofs; 281 int i; 282 u8 *in, type; 283 bool big_pcluster; 284 285 if (1 << amortizedshift == 4) 286 vcnt = 2; 287 else if (1 << amortizedshift == 2 && lclusterbits == 12) 288 vcnt = 16; 289 else 290 return -EOPNOTSUPP; 291 292 /* it doesn't equal to round_up(..) */ 293 m->nextpackoff = round_down(pos, vcnt << amortizedshift) + 294 (vcnt << amortizedshift); 295 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; 296 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; 297 eofs = erofs_blkoff(pos); 298 base = round_down(eofs, vcnt << amortizedshift); 299 in = m->kaddr + base; 300 301 i = (eofs - base) >> amortizedshift; 302 303 lo = decode_compactedbits(lclusterbits, lomask, 304 in, encodebits * i, &type); 305 m->type = type; 306 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 307 m->clusterofs = 1 << lclusterbits; 308 309 /* figure out lookahead_distance: delta[1] if needed */ 310 if (lookahead) 311 m->delta[1] = get_compacted_la_distance(lclusterbits, 312 encodebits, vcnt, in, i); 313 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { 314 if (!big_pcluster) { 315 DBG_BUGON(1); 316 return -EFSCORRUPTED; 317 } 318 m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; 319 m->delta[0] = 1; 320 return 0; 321 } else if (i + 1 != (int)vcnt) { 322 m->delta[0] = lo; 323 return 0; 324 } 325 /* 326 * since the last lcluster in the pack is special, 327 * of which lo saves delta[1] rather than delta[0]. 328 * Hence, get delta[0] by the previous lcluster indirectly. 329 */ 330 lo = decode_compactedbits(lclusterbits, lomask, 331 in, encodebits * (i - 1), &type); 332 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 333 lo = 0; 334 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) 335 lo = 1; 336 m->delta[0] = lo + 1; 337 return 0; 338 } 339 m->clusterofs = lo; 340 m->delta[0] = 0; 341 /* figout out blkaddr (pblk) for HEAD lclusters */ 342 if (!big_pcluster) { 343 nblk = 1; 344 while (i > 0) { 345 --i; 346 lo = decode_compactedbits(lclusterbits, lomask, 347 in, encodebits * i, &type); 348 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 349 i -= lo; 350 351 if (i >= 0) 352 ++nblk; 353 } 354 } else { 355 nblk = 0; 356 while (i > 0) { 357 --i; 358 lo = decode_compactedbits(lclusterbits, lomask, 359 in, encodebits * i, &type); 360 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 361 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { 362 --i; 363 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; 364 continue; 365 } 366 /* bigpcluster shouldn't have plain d0 == 1 */ 367 if (lo <= 1) { 368 DBG_BUGON(1); 369 return -EFSCORRUPTED; 370 } 371 i -= lo - 2; 372 continue; 373 } 374 ++nblk; 375 } 376 } 377 in += (vcnt << amortizedshift) - sizeof(__le32); 378 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; 379 return 0; 380 } 381 382 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, 383 unsigned long lcn, bool lookahead) 384 { 385 struct inode *const inode = m->inode; 386 struct erofs_inode *const vi = EROFS_I(inode); 387 const unsigned int lclusterbits = vi->z_logical_clusterbits; 388 const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + 389 vi->inode_isize + vi->xattr_isize, 8) + 390 sizeof(struct z_erofs_map_header); 391 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); 392 unsigned int compacted_4b_initial, compacted_2b; 393 unsigned int amortizedshift; 394 erofs_off_t pos; 395 int err; 396 397 if (lclusterbits != 12) 398 return -EOPNOTSUPP; 399 400 if (lcn >= totalidx) 401 return -EINVAL; 402 403 m->lcn = lcn; 404 /* used to align to 32-byte (compacted_2b) alignment */ 405 compacted_4b_initial = (32 - ebase % 32) / 4; 406 if (compacted_4b_initial == 32 / 4) 407 compacted_4b_initial = 0; 408 409 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && 410 compacted_4b_initial < totalidx) 411 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); 412 else 413 compacted_2b = 0; 414 415 pos = ebase; 416 if (lcn < compacted_4b_initial) { 417 amortizedshift = 2; 418 goto out; 419 } 420 pos += compacted_4b_initial * 4; 421 lcn -= compacted_4b_initial; 422 423 if (lcn < compacted_2b) { 424 amortizedshift = 1; 425 goto out; 426 } 427 pos += compacted_2b * 2; 428 lcn -= compacted_2b; 429 amortizedshift = 2; 430 out: 431 pos += lcn * (1 << amortizedshift); 432 err = z_erofs_reload_indexes(m, erofs_blknr(pos)); 433 if (err) 434 return err; 435 return unpack_compacted_index(m, amortizedshift, pos, lookahead); 436 } 437 438 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, 439 unsigned int lcn, bool lookahead) 440 { 441 const unsigned int datamode = EROFS_I(m->inode)->datalayout; 442 443 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) 444 return legacy_load_cluster_from_disk(m, lcn); 445 446 if (datamode == EROFS_INODE_FLAT_COMPRESSION) 447 return compacted_load_cluster_from_disk(m, lcn, lookahead); 448 449 return -EINVAL; 450 } 451 452 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, 453 unsigned int lookback_distance) 454 { 455 struct erofs_inode *const vi = EROFS_I(m->inode); 456 struct erofs_map_blocks *const map = m->map; 457 const unsigned int lclusterbits = vi->z_logical_clusterbits; 458 unsigned long lcn = m->lcn; 459 int err; 460 461 if (lcn < lookback_distance) { 462 erofs_err(m->inode->i_sb, 463 "bogus lookback distance @ nid %llu", vi->nid); 464 DBG_BUGON(1); 465 return -EFSCORRUPTED; 466 } 467 468 /* load extent head logical cluster if needed */ 469 lcn -= lookback_distance; 470 err = z_erofs_load_cluster_from_disk(m, lcn, false); 471 if (err) 472 return err; 473 474 switch (m->type) { 475 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 476 if (!m->delta[0]) { 477 erofs_err(m->inode->i_sb, 478 "invalid lookback distance 0 @ nid %llu", 479 vi->nid); 480 DBG_BUGON(1); 481 return -EFSCORRUPTED; 482 } 483 return z_erofs_extent_lookback(m, m->delta[0]); 484 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 485 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 486 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 487 m->headtype = m->type; 488 map->m_la = (lcn << lclusterbits) | m->clusterofs; 489 break; 490 default: 491 erofs_err(m->inode->i_sb, 492 "unknown type %u @ lcn %lu of nid %llu", 493 m->type, lcn, vi->nid); 494 DBG_BUGON(1); 495 return -EOPNOTSUPP; 496 } 497 return 0; 498 } 499 500 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, 501 unsigned int initial_lcn) 502 { 503 struct erofs_inode *const vi = EROFS_I(m->inode); 504 struct erofs_map_blocks *const map = m->map; 505 const unsigned int lclusterbits = vi->z_logical_clusterbits; 506 unsigned long lcn; 507 int err; 508 509 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN && 510 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 && 511 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2); 512 DBG_BUGON(m->type != m->headtype); 513 514 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || 515 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) && 516 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) || 517 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) && 518 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { 519 map->m_plen = 1 << lclusterbits; 520 return 0; 521 } 522 lcn = m->lcn + 1; 523 if (m->compressedlcs) 524 goto out; 525 526 err = z_erofs_load_cluster_from_disk(m, lcn, false); 527 if (err) 528 return err; 529 530 /* 531 * If the 1st NONHEAD lcluster has already been handled initially w/o 532 * valid compressedlcs, which means at least it mustn't be CBLKCNT, or 533 * an internal implemenatation error is detected. 534 * 535 * The following code can also handle it properly anyway, but let's 536 * BUG_ON in the debugging mode only for developers to notice that. 537 */ 538 DBG_BUGON(lcn == initial_lcn && 539 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD); 540 541 switch (m->type) { 542 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 543 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 544 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 545 /* 546 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type 547 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster. 548 */ 549 m->compressedlcs = 1; 550 break; 551 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 552 if (m->delta[0] != 1) 553 goto err_bonus_cblkcnt; 554 if (m->compressedlcs) 555 break; 556 fallthrough; 557 default: 558 erofs_err(m->inode->i_sb, 559 "cannot found CBLKCNT @ lcn %lu of nid %llu", 560 lcn, vi->nid); 561 DBG_BUGON(1); 562 return -EFSCORRUPTED; 563 } 564 out: 565 map->m_plen = m->compressedlcs << lclusterbits; 566 return 0; 567 err_bonus_cblkcnt: 568 erofs_err(m->inode->i_sb, 569 "bogus CBLKCNT @ lcn %lu of nid %llu", 570 lcn, vi->nid); 571 DBG_BUGON(1); 572 return -EFSCORRUPTED; 573 } 574 575 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) 576 { 577 struct inode *inode = m->inode; 578 struct erofs_inode *vi = EROFS_I(inode); 579 struct erofs_map_blocks *map = m->map; 580 unsigned int lclusterbits = vi->z_logical_clusterbits; 581 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits; 582 int err; 583 584 do { 585 /* handle the last EOF pcluster (no next HEAD lcluster) */ 586 if ((lcn << lclusterbits) >= inode->i_size) { 587 map->m_llen = inode->i_size - map->m_la; 588 return 0; 589 } 590 591 err = z_erofs_load_cluster_from_disk(m, lcn, true); 592 if (err) 593 return err; 594 595 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 596 DBG_BUGON(!m->delta[1] && 597 m->clusterofs != 1 << lclusterbits); 598 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || 599 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 || 600 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) { 601 /* go on until the next HEAD lcluster */ 602 if (lcn != headlcn) 603 break; 604 m->delta[1] = 1; 605 } else { 606 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu", 607 m->type, lcn, vi->nid); 608 DBG_BUGON(1); 609 return -EOPNOTSUPP; 610 } 611 lcn += m->delta[1]; 612 } while (m->delta[1]); 613 614 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la; 615 return 0; 616 } 617 618 static int z_erofs_do_map_blocks(struct inode *inode, 619 struct erofs_map_blocks *map, 620 int flags) 621 { 622 struct erofs_inode *const vi = EROFS_I(inode); 623 bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER; 624 struct z_erofs_maprecorder m = { 625 .inode = inode, 626 .map = map, 627 }; 628 int err = 0; 629 unsigned int lclusterbits, endoff; 630 unsigned long initial_lcn; 631 unsigned long long ofs, end; 632 633 lclusterbits = vi->z_logical_clusterbits; 634 ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la; 635 initial_lcn = ofs >> lclusterbits; 636 endoff = ofs & ((1 << lclusterbits) - 1); 637 638 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false); 639 if (err) 640 goto unmap_out; 641 642 if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL)) 643 vi->z_idataoff = m.nextpackoff; 644 645 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED; 646 end = (m.lcn + 1ULL) << lclusterbits; 647 648 switch (m.type) { 649 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 650 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 651 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 652 if (endoff >= m.clusterofs) { 653 m.headtype = m.type; 654 map->m_la = (m.lcn << lclusterbits) | m.clusterofs; 655 break; 656 } 657 /* m.lcn should be >= 1 if endoff < m.clusterofs */ 658 if (!m.lcn) { 659 erofs_err(inode->i_sb, 660 "invalid logical cluster 0 at nid %llu", 661 vi->nid); 662 err = -EFSCORRUPTED; 663 goto unmap_out; 664 } 665 end = (m.lcn << lclusterbits) | m.clusterofs; 666 map->m_flags |= EROFS_MAP_FULL_MAPPED; 667 m.delta[0] = 1; 668 fallthrough; 669 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 670 /* get the corresponding first chunk */ 671 err = z_erofs_extent_lookback(&m, m.delta[0]); 672 if (err) 673 goto unmap_out; 674 break; 675 default: 676 erofs_err(inode->i_sb, 677 "unknown type %u @ offset %llu of nid %llu", 678 m.type, ofs, vi->nid); 679 err = -EOPNOTSUPP; 680 goto unmap_out; 681 } 682 683 map->m_llen = end - map->m_la; 684 685 if (flags & EROFS_GET_BLOCKS_FINDTAIL) 686 vi->z_tailextent_headlcn = m.lcn; 687 if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) { 688 map->m_flags |= EROFS_MAP_META; 689 map->m_pa = vi->z_idataoff; 690 map->m_plen = vi->z_idata_size; 691 } else { 692 map->m_pa = blknr_to_addr(m.pblk); 693 err = z_erofs_get_extent_compressedlen(&m, initial_lcn); 694 if (err) 695 goto out; 696 } 697 698 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) 699 map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; 700 else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) 701 map->m_algorithmformat = vi->z_algorithmtype[1]; 702 else 703 map->m_algorithmformat = vi->z_algorithmtype[0]; 704 705 if ((flags & EROFS_GET_BLOCKS_FIEMAP) || 706 ((flags & EROFS_GET_BLOCKS_READMORE) && 707 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA && 708 map->m_llen >= EROFS_BLKSIZ)) { 709 err = z_erofs_get_extent_decompressedlen(&m); 710 if (!err) 711 map->m_flags |= EROFS_MAP_FULL_MAPPED; 712 } 713 unmap_out: 714 if (m.kaddr) 715 kunmap_atomic(m.kaddr); 716 717 out: 718 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", 719 __func__, map->m_la, map->m_pa, 720 map->m_llen, map->m_plen, map->m_flags); 721 722 return err; 723 } 724 725 int z_erofs_map_blocks_iter(struct inode *inode, 726 struct erofs_map_blocks *map, 727 int flags) 728 { 729 int err = 0; 730 731 trace_z_erofs_map_blocks_iter_enter(inode, map, flags); 732 733 /* when trying to read beyond EOF, leave it unmapped */ 734 if (map->m_la >= inode->i_size) { 735 map->m_llen = map->m_la + 1 - inode->i_size; 736 map->m_la = inode->i_size; 737 map->m_flags = 0; 738 goto out; 739 } 740 741 err = z_erofs_fill_inode_lazy(inode); 742 if (err) 743 goto out; 744 745 err = z_erofs_do_map_blocks(inode, map, flags); 746 out: 747 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); 748 749 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ 750 DBG_BUGON(err < 0 && err != -ENOMEM); 751 return err; 752 } 753 754 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset, 755 loff_t length, unsigned int flags, 756 struct iomap *iomap, struct iomap *srcmap) 757 { 758 int ret; 759 struct erofs_map_blocks map = { .m_la = offset }; 760 761 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP); 762 if (map.mpage) 763 put_page(map.mpage); 764 if (ret < 0) 765 return ret; 766 767 iomap->bdev = inode->i_sb->s_bdev; 768 iomap->offset = map.m_la; 769 iomap->length = map.m_llen; 770 if (map.m_flags & EROFS_MAP_MAPPED) { 771 iomap->type = IOMAP_MAPPED; 772 iomap->addr = map.m_pa; 773 } else { 774 iomap->type = IOMAP_HOLE; 775 iomap->addr = IOMAP_NULL_ADDR; 776 /* 777 * No strict rule how to describe extents for post EOF, yet 778 * we need do like below. Otherwise, iomap itself will get 779 * into an endless loop on post EOF. 780 */ 781 if (iomap->offset >= inode->i_size) 782 iomap->length = length + map.m_la - offset; 783 } 784 iomap->flags = 0; 785 return 0; 786 } 787 788 const struct iomap_ops z_erofs_iomap_report_ops = { 789 .iomap_begin = z_erofs_iomap_begin_report, 790 }; 791