1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include "xattr.h" 8 9 #include <trace/events/erofs.h> 10 11 static void *erofs_read_inode(struct erofs_buf *buf, 12 struct inode *inode, unsigned int *ofs) 13 { 14 struct super_block *sb = inode->i_sb; 15 struct erofs_sb_info *sbi = EROFS_SB(sb); 16 struct erofs_inode *vi = EROFS_I(inode); 17 const erofs_off_t inode_loc = erofs_iloc(inode); 18 19 erofs_blk_t blkaddr, nblks = 0; 20 void *kaddr; 21 struct erofs_inode_compact *dic; 22 struct erofs_inode_extended *die, *copied = NULL; 23 unsigned int ifmt; 24 int err; 25 26 blkaddr = erofs_blknr(sb, inode_loc); 27 *ofs = erofs_blkoff(sb, inode_loc); 28 29 kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP); 30 if (IS_ERR(kaddr)) { 31 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", 32 vi->nid, PTR_ERR(kaddr)); 33 return kaddr; 34 } 35 36 dic = kaddr + *ofs; 37 ifmt = le16_to_cpu(dic->i_format); 38 39 if (ifmt & ~EROFS_I_ALL) { 40 erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", 41 ifmt, vi->nid); 42 err = -EOPNOTSUPP; 43 goto err_out; 44 } 45 46 vi->datalayout = erofs_inode_datalayout(ifmt); 47 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { 48 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", 49 vi->datalayout, vi->nid); 50 err = -EOPNOTSUPP; 51 goto err_out; 52 } 53 54 switch (erofs_inode_version(ifmt)) { 55 case EROFS_INODE_LAYOUT_EXTENDED: 56 vi->inode_isize = sizeof(struct erofs_inode_extended); 57 /* check if the extended inode acrosses block boundary */ 58 if (*ofs + vi->inode_isize <= sb->s_blocksize) { 59 *ofs += vi->inode_isize; 60 die = (struct erofs_inode_extended *)dic; 61 } else { 62 const unsigned int gotten = sb->s_blocksize - *ofs; 63 64 copied = kmalloc(vi->inode_isize, GFP_NOFS); 65 if (!copied) { 66 err = -ENOMEM; 67 goto err_out; 68 } 69 memcpy(copied, dic, gotten); 70 kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1, 71 EROFS_KMAP); 72 if (IS_ERR(kaddr)) { 73 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld", 74 vi->nid, PTR_ERR(kaddr)); 75 kfree(copied); 76 return kaddr; 77 } 78 *ofs = vi->inode_isize - gotten; 79 memcpy((u8 *)copied + gotten, kaddr, *ofs); 80 die = copied; 81 } 82 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); 83 84 inode->i_mode = le16_to_cpu(die->i_mode); 85 switch (inode->i_mode & S_IFMT) { 86 case S_IFREG: 87 case S_IFDIR: 88 case S_IFLNK: 89 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); 90 break; 91 case S_IFCHR: 92 case S_IFBLK: 93 inode->i_rdev = 94 new_decode_dev(le32_to_cpu(die->i_u.rdev)); 95 break; 96 case S_IFIFO: 97 case S_IFSOCK: 98 inode->i_rdev = 0; 99 break; 100 default: 101 goto bogusimode; 102 } 103 i_uid_write(inode, le32_to_cpu(die->i_uid)); 104 i_gid_write(inode, le32_to_cpu(die->i_gid)); 105 set_nlink(inode, le32_to_cpu(die->i_nlink)); 106 107 /* extended inode has its own timestamp */ 108 inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime); 109 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec); 110 111 inode->i_size = le64_to_cpu(die->i_size); 112 113 /* total blocks for compressed files */ 114 if (erofs_inode_is_data_compressed(vi->datalayout)) 115 nblks = le32_to_cpu(die->i_u.compressed_blocks); 116 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) 117 /* fill chunked inode summary info */ 118 vi->chunkformat = le16_to_cpu(die->i_u.c.format); 119 kfree(copied); 120 copied = NULL; 121 break; 122 case EROFS_INODE_LAYOUT_COMPACT: 123 vi->inode_isize = sizeof(struct erofs_inode_compact); 124 *ofs += vi->inode_isize; 125 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); 126 127 inode->i_mode = le16_to_cpu(dic->i_mode); 128 switch (inode->i_mode & S_IFMT) { 129 case S_IFREG: 130 case S_IFDIR: 131 case S_IFLNK: 132 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); 133 break; 134 case S_IFCHR: 135 case S_IFBLK: 136 inode->i_rdev = 137 new_decode_dev(le32_to_cpu(dic->i_u.rdev)); 138 break; 139 case S_IFIFO: 140 case S_IFSOCK: 141 inode->i_rdev = 0; 142 break; 143 default: 144 goto bogusimode; 145 } 146 i_uid_write(inode, le16_to_cpu(dic->i_uid)); 147 i_gid_write(inode, le16_to_cpu(dic->i_gid)); 148 set_nlink(inode, le16_to_cpu(dic->i_nlink)); 149 150 /* use build time for compact inodes */ 151 inode->i_ctime.tv_sec = sbi->build_time; 152 inode->i_ctime.tv_nsec = sbi->build_time_nsec; 153 154 inode->i_size = le32_to_cpu(dic->i_size); 155 if (erofs_inode_is_data_compressed(vi->datalayout)) 156 nblks = le32_to_cpu(dic->i_u.compressed_blocks); 157 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) 158 vi->chunkformat = le16_to_cpu(dic->i_u.c.format); 159 break; 160 default: 161 erofs_err(inode->i_sb, 162 "unsupported on-disk inode version %u of nid %llu", 163 erofs_inode_version(ifmt), vi->nid); 164 err = -EOPNOTSUPP; 165 goto err_out; 166 } 167 168 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { 169 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { 170 erofs_err(inode->i_sb, 171 "unsupported chunk format %x of nid %llu", 172 vi->chunkformat, vi->nid); 173 err = -EOPNOTSUPP; 174 goto err_out; 175 } 176 vi->chunkbits = sb->s_blocksize_bits + 177 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK); 178 } 179 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec; 180 inode->i_atime.tv_sec = inode->i_ctime.tv_sec; 181 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec; 182 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec; 183 184 inode->i_flags &= ~S_DAX; 185 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) && 186 (vi->datalayout == EROFS_INODE_FLAT_PLAIN || 187 vi->datalayout == EROFS_INODE_CHUNK_BASED)) 188 inode->i_flags |= S_DAX; 189 190 if (!nblks) 191 /* measure inode.i_blocks as generic filesystems */ 192 inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9; 193 else 194 inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); 195 return kaddr; 196 197 bogusimode: 198 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", 199 inode->i_mode, vi->nid); 200 err = -EFSCORRUPTED; 201 err_out: 202 DBG_BUGON(1); 203 kfree(copied); 204 erofs_put_metabuf(buf); 205 return ERR_PTR(err); 206 } 207 208 static int erofs_fill_symlink(struct inode *inode, void *kaddr, 209 unsigned int m_pofs) 210 { 211 struct erofs_inode *vi = EROFS_I(inode); 212 unsigned int bsz = i_blocksize(inode); 213 char *lnk; 214 215 /* if it cannot be handled with fast symlink scheme */ 216 if (vi->datalayout != EROFS_INODE_FLAT_INLINE || 217 inode->i_size >= bsz || inode->i_size < 0) { 218 inode->i_op = &erofs_symlink_iops; 219 return 0; 220 } 221 222 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL); 223 if (!lnk) 224 return -ENOMEM; 225 226 m_pofs += vi->xattr_isize; 227 /* inline symlink data shouldn't cross block boundary */ 228 if (m_pofs + inode->i_size > bsz) { 229 kfree(lnk); 230 erofs_err(inode->i_sb, 231 "inline data cross block boundary @ nid %llu", 232 vi->nid); 233 DBG_BUGON(1); 234 return -EFSCORRUPTED; 235 } 236 memcpy(lnk, kaddr + m_pofs, inode->i_size); 237 lnk[inode->i_size] = '\0'; 238 239 inode->i_link = lnk; 240 inode->i_op = &erofs_fast_symlink_iops; 241 return 0; 242 } 243 244 static int erofs_fill_inode(struct inode *inode) 245 { 246 struct erofs_inode *vi = EROFS_I(inode); 247 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 248 void *kaddr; 249 unsigned int ofs; 250 int err = 0; 251 252 trace_erofs_fill_inode(inode); 253 254 /* read inode base data from disk */ 255 kaddr = erofs_read_inode(&buf, inode, &ofs); 256 if (IS_ERR(kaddr)) 257 return PTR_ERR(kaddr); 258 259 /* setup the new inode */ 260 switch (inode->i_mode & S_IFMT) { 261 case S_IFREG: 262 inode->i_op = &erofs_generic_iops; 263 if (erofs_inode_is_data_compressed(vi->datalayout)) 264 inode->i_fop = &generic_ro_fops; 265 else 266 inode->i_fop = &erofs_file_fops; 267 break; 268 case S_IFDIR: 269 inode->i_op = &erofs_dir_iops; 270 inode->i_fop = &erofs_dir_fops; 271 inode_nohighmem(inode); 272 break; 273 case S_IFLNK: 274 err = erofs_fill_symlink(inode, kaddr, ofs); 275 if (err) 276 goto out_unlock; 277 inode_nohighmem(inode); 278 break; 279 case S_IFCHR: 280 case S_IFBLK: 281 case S_IFIFO: 282 case S_IFSOCK: 283 inode->i_op = &erofs_generic_iops; 284 init_special_inode(inode, inode->i_mode, inode->i_rdev); 285 goto out_unlock; 286 default: 287 err = -EFSCORRUPTED; 288 goto out_unlock; 289 } 290 291 if (erofs_inode_is_data_compressed(vi->datalayout)) { 292 #ifdef CONFIG_EROFS_FS_ZIP 293 if (!erofs_is_fscache_mode(inode->i_sb) && 294 inode->i_sb->s_blocksize_bits == PAGE_SHIFT) { 295 inode->i_mapping->a_ops = &z_erofs_aops; 296 err = 0; 297 goto out_unlock; 298 } 299 #endif 300 err = -EOPNOTSUPP; 301 goto out_unlock; 302 } 303 inode->i_mapping->a_ops = &erofs_raw_access_aops; 304 mapping_set_large_folios(inode->i_mapping); 305 #ifdef CONFIG_EROFS_FS_ONDEMAND 306 if (erofs_is_fscache_mode(inode->i_sb)) 307 inode->i_mapping->a_ops = &erofs_fscache_access_aops; 308 #endif 309 310 out_unlock: 311 erofs_put_metabuf(&buf); 312 return err; 313 } 314 315 /* 316 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down 317 * so that it will fit. 318 */ 319 static ino_t erofs_squash_ino(erofs_nid_t nid) 320 { 321 ino_t ino = (ino_t)nid; 322 323 if (sizeof(ino_t) < sizeof(erofs_nid_t)) 324 ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8; 325 return ino; 326 } 327 328 static int erofs_iget5_eq(struct inode *inode, void *opaque) 329 { 330 return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque; 331 } 332 333 static int erofs_iget5_set(struct inode *inode, void *opaque) 334 { 335 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 336 337 inode->i_ino = erofs_squash_ino(nid); 338 EROFS_I(inode)->nid = nid; 339 return 0; 340 } 341 342 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid) 343 { 344 struct inode *inode; 345 346 inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq, 347 erofs_iget5_set, &nid); 348 if (!inode) 349 return ERR_PTR(-ENOMEM); 350 351 if (inode->i_state & I_NEW) { 352 int err = erofs_fill_inode(inode); 353 354 if (err) { 355 iget_failed(inode); 356 return ERR_PTR(err); 357 } 358 unlock_new_inode(inode); 359 } 360 return inode; 361 } 362 363 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, 364 struct kstat *stat, u32 request_mask, 365 unsigned int query_flags) 366 { 367 struct inode *const inode = d_inode(path->dentry); 368 369 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) 370 stat->attributes |= STATX_ATTR_COMPRESSED; 371 372 stat->attributes |= STATX_ATTR_IMMUTABLE; 373 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 374 STATX_ATTR_IMMUTABLE); 375 376 generic_fillattr(idmap, inode, stat); 377 return 0; 378 } 379 380 const struct inode_operations erofs_generic_iops = { 381 .getattr = erofs_getattr, 382 .listxattr = erofs_listxattr, 383 .get_inode_acl = erofs_get_acl, 384 .fiemap = erofs_fiemap, 385 }; 386 387 const struct inode_operations erofs_symlink_iops = { 388 .get_link = page_get_link, 389 .getattr = erofs_getattr, 390 .listxattr = erofs_listxattr, 391 .get_inode_acl = erofs_get_acl, 392 }; 393 394 const struct inode_operations erofs_fast_symlink_iops = { 395 .get_link = simple_get_link, 396 .getattr = erofs_getattr, 397 .listxattr = erofs_listxattr, 398 .get_inode_acl = erofs_get_acl, 399 }; 400