1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2018-2019 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 */ 6 #include "internal.h" 7 #include <asm/unaligned.h> 8 #include <trace/events/erofs.h> 9 10 int z_erofs_fill_inode(struct inode *inode) 11 { 12 struct erofs_inode *const vi = EROFS_I(inode); 13 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); 14 15 if (!erofs_sb_has_big_pcluster(sbi) && 16 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { 17 vi->z_advise = 0; 18 vi->z_algorithmtype[0] = 0; 19 vi->z_algorithmtype[1] = 0; 20 vi->z_logical_clusterbits = LOG_BLOCK_SIZE; 21 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); 22 } 23 inode->i_mapping->a_ops = &z_erofs_aops; 24 return 0; 25 } 26 27 static int z_erofs_fill_inode_lazy(struct inode *inode) 28 { 29 struct erofs_inode *const vi = EROFS_I(inode); 30 struct super_block *const sb = inode->i_sb; 31 int err, headnr; 32 erofs_off_t pos; 33 struct page *page; 34 void *kaddr; 35 struct z_erofs_map_header *h; 36 37 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { 38 /* 39 * paired with smp_mb() at the end of the function to ensure 40 * fields will only be observed after the bit is set. 41 */ 42 smp_mb(); 43 return 0; 44 } 45 46 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) 47 return -ERESTARTSYS; 48 49 err = 0; 50 if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) 51 goto out_unlock; 52 53 DBG_BUGON(!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && 54 vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY); 55 56 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + 57 vi->xattr_isize, 8); 58 page = erofs_get_meta_page(sb, erofs_blknr(pos)); 59 if (IS_ERR(page)) { 60 err = PTR_ERR(page); 61 goto out_unlock; 62 } 63 64 kaddr = kmap_atomic(page); 65 66 h = kaddr + erofs_blkoff(pos); 67 vi->z_advise = le16_to_cpu(h->h_advise); 68 vi->z_algorithmtype[0] = h->h_algorithmtype & 15; 69 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; 70 71 headnr = 0; 72 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX || 73 vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) { 74 erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", 75 headnr + 1, vi->z_algorithmtype[headnr], vi->nid); 76 err = -EOPNOTSUPP; 77 goto unmap_done; 78 } 79 80 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); 81 if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && 82 vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | 83 Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { 84 erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", 85 vi->nid); 86 err = -EFSCORRUPTED; 87 goto unmap_done; 88 } 89 if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && 90 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ 91 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { 92 erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", 93 vi->nid); 94 err = -EFSCORRUPTED; 95 goto unmap_done; 96 } 97 /* paired with smp_mb() at the beginning of the function */ 98 smp_mb(); 99 set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); 100 unmap_done: 101 kunmap_atomic(kaddr); 102 unlock_page(page); 103 put_page(page); 104 out_unlock: 105 clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); 106 return err; 107 } 108 109 struct z_erofs_maprecorder { 110 struct inode *inode; 111 struct erofs_map_blocks *map; 112 void *kaddr; 113 114 unsigned long lcn; 115 /* compression extent information gathered */ 116 u8 type, headtype; 117 u16 clusterofs; 118 u16 delta[2]; 119 erofs_blk_t pblk, compressedlcs; 120 }; 121 122 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, 123 erofs_blk_t eblk) 124 { 125 struct super_block *const sb = m->inode->i_sb; 126 struct erofs_map_blocks *const map = m->map; 127 struct page *mpage = map->mpage; 128 129 if (mpage) { 130 if (mpage->index == eblk) { 131 if (!m->kaddr) 132 m->kaddr = kmap_atomic(mpage); 133 return 0; 134 } 135 136 if (m->kaddr) { 137 kunmap_atomic(m->kaddr); 138 m->kaddr = NULL; 139 } 140 put_page(mpage); 141 } 142 143 mpage = erofs_get_meta_page(sb, eblk); 144 if (IS_ERR(mpage)) { 145 map->mpage = NULL; 146 return PTR_ERR(mpage); 147 } 148 m->kaddr = kmap_atomic(mpage); 149 unlock_page(mpage); 150 map->mpage = mpage; 151 return 0; 152 } 153 154 static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, 155 unsigned long lcn) 156 { 157 struct inode *const inode = m->inode; 158 struct erofs_inode *const vi = EROFS_I(inode); 159 const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); 160 const erofs_off_t pos = 161 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + 162 vi->xattr_isize) + 163 lcn * sizeof(struct z_erofs_vle_decompressed_index); 164 struct z_erofs_vle_decompressed_index *di; 165 unsigned int advise, type; 166 int err; 167 168 err = z_erofs_reload_indexes(m, erofs_blknr(pos)); 169 if (err) 170 return err; 171 172 m->lcn = lcn; 173 di = m->kaddr + erofs_blkoff(pos); 174 175 advise = le16_to_cpu(di->di_advise); 176 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & 177 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); 178 switch (type) { 179 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 180 m->clusterofs = 1 << vi->z_logical_clusterbits; 181 m->delta[0] = le16_to_cpu(di->di_u.delta[0]); 182 if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) { 183 if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | 184 Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { 185 DBG_BUGON(1); 186 return -EFSCORRUPTED; 187 } 188 m->compressedlcs = m->delta[0] & 189 ~Z_EROFS_VLE_DI_D0_CBLKCNT; 190 m->delta[0] = 1; 191 } 192 m->delta[1] = le16_to_cpu(di->di_u.delta[1]); 193 break; 194 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 195 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 196 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 197 m->clusterofs = le16_to_cpu(di->di_clusterofs); 198 m->pblk = le32_to_cpu(di->di_u.blkaddr); 199 break; 200 default: 201 DBG_BUGON(1); 202 return -EOPNOTSUPP; 203 } 204 m->type = type; 205 return 0; 206 } 207 208 static unsigned int decode_compactedbits(unsigned int lobits, 209 unsigned int lomask, 210 u8 *in, unsigned int pos, u8 *type) 211 { 212 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); 213 const unsigned int lo = v & lomask; 214 215 *type = (v >> lobits) & 3; 216 return lo; 217 } 218 219 static int get_compacted_la_distance(unsigned int lclusterbits, 220 unsigned int encodebits, 221 unsigned int vcnt, u8 *in, int i) 222 { 223 const unsigned int lomask = (1 << lclusterbits) - 1; 224 unsigned int lo, d1 = 0; 225 u8 type; 226 227 DBG_BUGON(i >= vcnt); 228 229 do { 230 lo = decode_compactedbits(lclusterbits, lomask, 231 in, encodebits * i, &type); 232 233 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 234 return d1; 235 ++d1; 236 } while (++i < vcnt); 237 238 /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */ 239 if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT)) 240 d1 += lo - 1; 241 return d1; 242 } 243 244 static int unpack_compacted_index(struct z_erofs_maprecorder *m, 245 unsigned int amortizedshift, 246 unsigned int eofs, bool lookahead) 247 { 248 struct erofs_inode *const vi = EROFS_I(m->inode); 249 const unsigned int lclusterbits = vi->z_logical_clusterbits; 250 const unsigned int lomask = (1 << lclusterbits) - 1; 251 unsigned int vcnt, base, lo, encodebits, nblk; 252 int i; 253 u8 *in, type; 254 bool big_pcluster; 255 256 if (1 << amortizedshift == 4) 257 vcnt = 2; 258 else if (1 << amortizedshift == 2 && lclusterbits == 12) 259 vcnt = 16; 260 else 261 return -EOPNOTSUPP; 262 263 big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; 264 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; 265 base = round_down(eofs, vcnt << amortizedshift); 266 in = m->kaddr + base; 267 268 i = (eofs - base) >> amortizedshift; 269 270 lo = decode_compactedbits(lclusterbits, lomask, 271 in, encodebits * i, &type); 272 m->type = type; 273 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 274 m->clusterofs = 1 << lclusterbits; 275 276 /* figure out lookahead_distance: delta[1] if needed */ 277 if (lookahead) 278 m->delta[1] = get_compacted_la_distance(lclusterbits, 279 encodebits, vcnt, in, i); 280 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { 281 if (!big_pcluster) { 282 DBG_BUGON(1); 283 return -EFSCORRUPTED; 284 } 285 m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; 286 m->delta[0] = 1; 287 return 0; 288 } else if (i + 1 != (int)vcnt) { 289 m->delta[0] = lo; 290 return 0; 291 } 292 /* 293 * since the last lcluster in the pack is special, 294 * of which lo saves delta[1] rather than delta[0]. 295 * Hence, get delta[0] by the previous lcluster indirectly. 296 */ 297 lo = decode_compactedbits(lclusterbits, lomask, 298 in, encodebits * (i - 1), &type); 299 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 300 lo = 0; 301 else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) 302 lo = 1; 303 m->delta[0] = lo + 1; 304 return 0; 305 } 306 m->clusterofs = lo; 307 m->delta[0] = 0; 308 /* figout out blkaddr (pblk) for HEAD lclusters */ 309 if (!big_pcluster) { 310 nblk = 1; 311 while (i > 0) { 312 --i; 313 lo = decode_compactedbits(lclusterbits, lomask, 314 in, encodebits * i, &type); 315 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) 316 i -= lo; 317 318 if (i >= 0) 319 ++nblk; 320 } 321 } else { 322 nblk = 0; 323 while (i > 0) { 324 --i; 325 lo = decode_compactedbits(lclusterbits, lomask, 326 in, encodebits * i, &type); 327 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 328 if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { 329 --i; 330 nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; 331 continue; 332 } 333 /* bigpcluster shouldn't have plain d0 == 1 */ 334 if (lo <= 1) { 335 DBG_BUGON(1); 336 return -EFSCORRUPTED; 337 } 338 i -= lo - 2; 339 continue; 340 } 341 ++nblk; 342 } 343 } 344 in += (vcnt << amortizedshift) - sizeof(__le32); 345 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; 346 return 0; 347 } 348 349 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, 350 unsigned long lcn, bool lookahead) 351 { 352 struct inode *const inode = m->inode; 353 struct erofs_inode *const vi = EROFS_I(inode); 354 const unsigned int lclusterbits = vi->z_logical_clusterbits; 355 const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + 356 vi->inode_isize + vi->xattr_isize, 8) + 357 sizeof(struct z_erofs_map_header); 358 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); 359 unsigned int compacted_4b_initial, compacted_2b; 360 unsigned int amortizedshift; 361 erofs_off_t pos; 362 int err; 363 364 if (lclusterbits != 12) 365 return -EOPNOTSUPP; 366 367 if (lcn >= totalidx) 368 return -EINVAL; 369 370 m->lcn = lcn; 371 /* used to align to 32-byte (compacted_2b) alignment */ 372 compacted_4b_initial = (32 - ebase % 32) / 4; 373 if (compacted_4b_initial == 32 / 4) 374 compacted_4b_initial = 0; 375 376 if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && 377 compacted_4b_initial < totalidx) 378 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); 379 else 380 compacted_2b = 0; 381 382 pos = ebase; 383 if (lcn < compacted_4b_initial) { 384 amortizedshift = 2; 385 goto out; 386 } 387 pos += compacted_4b_initial * 4; 388 lcn -= compacted_4b_initial; 389 390 if (lcn < compacted_2b) { 391 amortizedshift = 1; 392 goto out; 393 } 394 pos += compacted_2b * 2; 395 lcn -= compacted_2b; 396 amortizedshift = 2; 397 out: 398 pos += lcn * (1 << amortizedshift); 399 err = z_erofs_reload_indexes(m, erofs_blknr(pos)); 400 if (err) 401 return err; 402 return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos), 403 lookahead); 404 } 405 406 static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, 407 unsigned int lcn, bool lookahead) 408 { 409 const unsigned int datamode = EROFS_I(m->inode)->datalayout; 410 411 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) 412 return legacy_load_cluster_from_disk(m, lcn); 413 414 if (datamode == EROFS_INODE_FLAT_COMPRESSION) 415 return compacted_load_cluster_from_disk(m, lcn, lookahead); 416 417 return -EINVAL; 418 } 419 420 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, 421 unsigned int lookback_distance) 422 { 423 struct erofs_inode *const vi = EROFS_I(m->inode); 424 struct erofs_map_blocks *const map = m->map; 425 const unsigned int lclusterbits = vi->z_logical_clusterbits; 426 unsigned long lcn = m->lcn; 427 int err; 428 429 if (lcn < lookback_distance) { 430 erofs_err(m->inode->i_sb, 431 "bogus lookback distance @ nid %llu", vi->nid); 432 DBG_BUGON(1); 433 return -EFSCORRUPTED; 434 } 435 436 /* load extent head logical cluster if needed */ 437 lcn -= lookback_distance; 438 err = z_erofs_load_cluster_from_disk(m, lcn, false); 439 if (err) 440 return err; 441 442 switch (m->type) { 443 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 444 if (!m->delta[0]) { 445 erofs_err(m->inode->i_sb, 446 "invalid lookback distance 0 @ nid %llu", 447 vi->nid); 448 DBG_BUGON(1); 449 return -EFSCORRUPTED; 450 } 451 return z_erofs_extent_lookback(m, m->delta[0]); 452 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 453 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 454 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 455 m->headtype = m->type; 456 map->m_la = (lcn << lclusterbits) | m->clusterofs; 457 break; 458 default: 459 erofs_err(m->inode->i_sb, 460 "unknown type %u @ lcn %lu of nid %llu", 461 m->type, lcn, vi->nid); 462 DBG_BUGON(1); 463 return -EOPNOTSUPP; 464 } 465 return 0; 466 } 467 468 static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, 469 unsigned int initial_lcn) 470 { 471 struct erofs_inode *const vi = EROFS_I(m->inode); 472 struct erofs_map_blocks *const map = m->map; 473 const unsigned int lclusterbits = vi->z_logical_clusterbits; 474 unsigned long lcn; 475 int err; 476 477 DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN && 478 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 && 479 m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2); 480 DBG_BUGON(m->type != m->headtype); 481 482 if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || 483 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) && 484 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) || 485 ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) && 486 !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { 487 map->m_plen = 1 << lclusterbits; 488 return 0; 489 } 490 lcn = m->lcn + 1; 491 if (m->compressedlcs) 492 goto out; 493 494 err = z_erofs_load_cluster_from_disk(m, lcn, false); 495 if (err) 496 return err; 497 498 /* 499 * If the 1st NONHEAD lcluster has already been handled initially w/o 500 * valid compressedlcs, which means at least it mustn't be CBLKCNT, or 501 * an internal implemenatation error is detected. 502 * 503 * The following code can also handle it properly anyway, but let's 504 * BUG_ON in the debugging mode only for developers to notice that. 505 */ 506 DBG_BUGON(lcn == initial_lcn && 507 m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD); 508 509 switch (m->type) { 510 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 511 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 512 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 513 /* 514 * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type 515 * rather than CBLKCNT, it's a 1 lcluster-sized pcluster. 516 */ 517 m->compressedlcs = 1; 518 break; 519 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 520 if (m->delta[0] != 1) 521 goto err_bonus_cblkcnt; 522 if (m->compressedlcs) 523 break; 524 fallthrough; 525 default: 526 erofs_err(m->inode->i_sb, 527 "cannot found CBLKCNT @ lcn %lu of nid %llu", 528 lcn, vi->nid); 529 DBG_BUGON(1); 530 return -EFSCORRUPTED; 531 } 532 out: 533 map->m_plen = m->compressedlcs << lclusterbits; 534 return 0; 535 err_bonus_cblkcnt: 536 erofs_err(m->inode->i_sb, 537 "bogus CBLKCNT @ lcn %lu of nid %llu", 538 lcn, vi->nid); 539 DBG_BUGON(1); 540 return -EFSCORRUPTED; 541 } 542 543 static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) 544 { 545 struct inode *inode = m->inode; 546 struct erofs_inode *vi = EROFS_I(inode); 547 struct erofs_map_blocks *map = m->map; 548 unsigned int lclusterbits = vi->z_logical_clusterbits; 549 u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits; 550 int err; 551 552 do { 553 /* handle the last EOF pcluster (no next HEAD lcluster) */ 554 if ((lcn << lclusterbits) >= inode->i_size) { 555 map->m_llen = inode->i_size - map->m_la; 556 return 0; 557 } 558 559 err = z_erofs_load_cluster_from_disk(m, lcn, true); 560 if (err) 561 return err; 562 563 if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { 564 DBG_BUGON(!m->delta[1] && 565 m->clusterofs != 1 << lclusterbits); 566 } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || 567 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 || 568 m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) { 569 /* go on until the next HEAD lcluster */ 570 if (lcn != headlcn) 571 break; 572 m->delta[1] = 1; 573 } else { 574 erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu", 575 m->type, lcn, vi->nid); 576 DBG_BUGON(1); 577 return -EOPNOTSUPP; 578 } 579 lcn += m->delta[1]; 580 } while (m->delta[1]); 581 582 map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la; 583 return 0; 584 } 585 586 int z_erofs_map_blocks_iter(struct inode *inode, 587 struct erofs_map_blocks *map, 588 int flags) 589 { 590 struct erofs_inode *const vi = EROFS_I(inode); 591 struct z_erofs_maprecorder m = { 592 .inode = inode, 593 .map = map, 594 }; 595 int err = 0; 596 unsigned int lclusterbits, endoff; 597 unsigned long initial_lcn; 598 unsigned long long ofs, end; 599 600 trace_z_erofs_map_blocks_iter_enter(inode, map, flags); 601 602 /* when trying to read beyond EOF, leave it unmapped */ 603 if (map->m_la >= inode->i_size) { 604 map->m_llen = map->m_la + 1 - inode->i_size; 605 map->m_la = inode->i_size; 606 map->m_flags = 0; 607 goto out; 608 } 609 610 err = z_erofs_fill_inode_lazy(inode); 611 if (err) 612 goto out; 613 614 lclusterbits = vi->z_logical_clusterbits; 615 ofs = map->m_la; 616 initial_lcn = ofs >> lclusterbits; 617 endoff = ofs & ((1 << lclusterbits) - 1); 618 619 err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false); 620 if (err) 621 goto unmap_out; 622 623 map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED; 624 end = (m.lcn + 1ULL) << lclusterbits; 625 626 switch (m.type) { 627 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: 628 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: 629 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: 630 if (endoff >= m.clusterofs) { 631 m.headtype = m.type; 632 map->m_la = (m.lcn << lclusterbits) | m.clusterofs; 633 break; 634 } 635 /* m.lcn should be >= 1 if endoff < m.clusterofs */ 636 if (!m.lcn) { 637 erofs_err(inode->i_sb, 638 "invalid logical cluster 0 at nid %llu", 639 vi->nid); 640 err = -EFSCORRUPTED; 641 goto unmap_out; 642 } 643 end = (m.lcn << lclusterbits) | m.clusterofs; 644 map->m_flags |= EROFS_MAP_FULL_MAPPED; 645 m.delta[0] = 1; 646 fallthrough; 647 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: 648 /* get the corresponding first chunk */ 649 err = z_erofs_extent_lookback(&m, m.delta[0]); 650 if (err) 651 goto unmap_out; 652 break; 653 default: 654 erofs_err(inode->i_sb, 655 "unknown type %u @ offset %llu of nid %llu", 656 m.type, ofs, vi->nid); 657 err = -EOPNOTSUPP; 658 goto unmap_out; 659 } 660 661 map->m_llen = end - map->m_la; 662 map->m_pa = blknr_to_addr(m.pblk); 663 664 err = z_erofs_get_extent_compressedlen(&m, initial_lcn); 665 if (err) 666 goto out; 667 668 if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) 669 map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; 670 else if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) 671 map->m_algorithmformat = vi->z_algorithmtype[1]; 672 else 673 map->m_algorithmformat = vi->z_algorithmtype[0]; 674 675 if ((flags & EROFS_GET_BLOCKS_FIEMAP) || 676 ((flags & EROFS_GET_BLOCKS_READMORE) && 677 map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA && 678 map->m_llen >= EROFS_BLKSIZ)) { 679 err = z_erofs_get_extent_decompressedlen(&m); 680 if (!err) 681 map->m_flags |= EROFS_MAP_FULL_MAPPED; 682 } 683 unmap_out: 684 if (m.kaddr) 685 kunmap_atomic(m.kaddr); 686 687 out: 688 erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", 689 __func__, map->m_la, map->m_pa, 690 map->m_llen, map->m_plen, map->m_flags); 691 692 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); 693 694 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ 695 DBG_BUGON(err < 0 && err != -ENOMEM); 696 return err; 697 } 698 699 static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset, 700 loff_t length, unsigned int flags, 701 struct iomap *iomap, struct iomap *srcmap) 702 { 703 int ret; 704 struct erofs_map_blocks map = { .m_la = offset }; 705 706 ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP); 707 if (map.mpage) 708 put_page(map.mpage); 709 if (ret < 0) 710 return ret; 711 712 iomap->bdev = inode->i_sb->s_bdev; 713 iomap->offset = map.m_la; 714 iomap->length = map.m_llen; 715 if (map.m_flags & EROFS_MAP_MAPPED) { 716 iomap->type = IOMAP_MAPPED; 717 iomap->addr = map.m_pa; 718 } else { 719 iomap->type = IOMAP_HOLE; 720 iomap->addr = IOMAP_NULL_ADDR; 721 /* 722 * No strict rule how to describe extents for post EOF, yet 723 * we need do like below. Otherwise, iomap itself will get 724 * into an endless loop on post EOF. 725 */ 726 if (iomap->offset >= inode->i_size) 727 iomap->length = length + map.m_la - offset; 728 } 729 iomap->flags = 0; 730 return 0; 731 } 732 733 const struct iomap_ops z_erofs_iomap_report_ops = { 734 .iomap_begin = z_erofs_iomap_begin_report, 735 }; 736