1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Created by Gao Xiang <gaoxiang25@huawei.com> 6 */ 7 #include "xattr.h" 8 9 #include <trace/events/erofs.h> 10 11 /* 12 * if inode is successfully read, return its inode page (or sometimes 13 * the inode payload page if it's an extended inode) in order to fill 14 * inline data if possible. 15 */ 16 static struct page *erofs_read_inode(struct inode *inode, 17 unsigned int *ofs) 18 { 19 struct super_block *sb = inode->i_sb; 20 struct erofs_sb_info *sbi = EROFS_SB(sb); 21 struct erofs_inode *vi = EROFS_I(inode); 22 const erofs_off_t inode_loc = iloc(sbi, vi->nid); 23 24 erofs_blk_t blkaddr, nblks = 0; 25 struct page *page; 26 struct erofs_inode_compact *dic; 27 struct erofs_inode_extended *die, *copied = NULL; 28 unsigned int ifmt; 29 int err; 30 31 blkaddr = erofs_blknr(inode_loc); 32 *ofs = erofs_blkoff(inode_loc); 33 34 erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", 35 __func__, vi->nid, *ofs, blkaddr); 36 37 page = erofs_get_meta_page(sb, blkaddr); 38 if (IS_ERR(page)) { 39 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", 40 vi->nid, PTR_ERR(page)); 41 return page; 42 } 43 44 dic = page_address(page) + *ofs; 45 ifmt = le16_to_cpu(dic->i_format); 46 47 vi->datalayout = erofs_inode_datalayout(ifmt); 48 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { 49 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", 50 vi->datalayout, vi->nid); 51 err = -EOPNOTSUPP; 52 goto err_out; 53 } 54 55 switch (erofs_inode_version(ifmt)) { 56 case EROFS_INODE_LAYOUT_EXTENDED: 57 vi->inode_isize = sizeof(struct erofs_inode_extended); 58 /* check if the inode acrosses page boundary */ 59 if (*ofs + vi->inode_isize <= PAGE_SIZE) { 60 *ofs += vi->inode_isize; 61 die = (struct erofs_inode_extended *)dic; 62 } else { 63 const unsigned int gotten = PAGE_SIZE - *ofs; 64 65 copied = kmalloc(vi->inode_isize, GFP_NOFS); 66 if (!copied) { 67 err = -ENOMEM; 68 goto err_out; 69 } 70 memcpy(copied, dic, gotten); 71 unlock_page(page); 72 put_page(page); 73 74 page = erofs_get_meta_page(sb, blkaddr + 1); 75 if (IS_ERR(page)) { 76 erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld", 77 vi->nid, PTR_ERR(page)); 78 kfree(copied); 79 return page; 80 } 81 *ofs = vi->inode_isize - gotten; 82 memcpy((u8 *)copied + gotten, page_address(page), *ofs); 83 die = copied; 84 } 85 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); 86 87 inode->i_mode = le16_to_cpu(die->i_mode); 88 switch (inode->i_mode & S_IFMT) { 89 case S_IFREG: 90 case S_IFDIR: 91 case S_IFLNK: 92 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); 93 break; 94 case S_IFCHR: 95 case S_IFBLK: 96 inode->i_rdev = 97 new_decode_dev(le32_to_cpu(die->i_u.rdev)); 98 break; 99 case S_IFIFO: 100 case S_IFSOCK: 101 inode->i_rdev = 0; 102 break; 103 default: 104 goto bogusimode; 105 } 106 i_uid_write(inode, le32_to_cpu(die->i_uid)); 107 i_gid_write(inode, le32_to_cpu(die->i_gid)); 108 set_nlink(inode, le32_to_cpu(die->i_nlink)); 109 110 /* ns timestamp */ 111 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = 112 le64_to_cpu(die->i_ctime); 113 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 114 le32_to_cpu(die->i_ctime_nsec); 115 116 inode->i_size = le64_to_cpu(die->i_size); 117 118 /* total blocks for compressed files */ 119 if (erofs_inode_is_data_compressed(vi->datalayout)) 120 nblks = le32_to_cpu(die->i_u.compressed_blocks); 121 122 kfree(copied); 123 break; 124 case EROFS_INODE_LAYOUT_COMPACT: 125 vi->inode_isize = sizeof(struct erofs_inode_compact); 126 *ofs += vi->inode_isize; 127 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); 128 129 inode->i_mode = le16_to_cpu(dic->i_mode); 130 switch (inode->i_mode & S_IFMT) { 131 case S_IFREG: 132 case S_IFDIR: 133 case S_IFLNK: 134 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); 135 break; 136 case S_IFCHR: 137 case S_IFBLK: 138 inode->i_rdev = 139 new_decode_dev(le32_to_cpu(dic->i_u.rdev)); 140 break; 141 case S_IFIFO: 142 case S_IFSOCK: 143 inode->i_rdev = 0; 144 break; 145 default: 146 goto bogusimode; 147 } 148 i_uid_write(inode, le16_to_cpu(dic->i_uid)); 149 i_gid_write(inode, le16_to_cpu(dic->i_gid)); 150 set_nlink(inode, le16_to_cpu(dic->i_nlink)); 151 152 /* use build time to derive all file time */ 153 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = 154 sbi->build_time; 155 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 156 sbi->build_time_nsec; 157 158 inode->i_size = le32_to_cpu(dic->i_size); 159 if (erofs_inode_is_data_compressed(vi->datalayout)) 160 nblks = le32_to_cpu(dic->i_u.compressed_blocks); 161 break; 162 default: 163 erofs_err(inode->i_sb, 164 "unsupported on-disk inode version %u of nid %llu", 165 erofs_inode_version(ifmt), vi->nid); 166 err = -EOPNOTSUPP; 167 goto err_out; 168 } 169 170 if (!nblks) 171 /* measure inode.i_blocks as generic filesystems */ 172 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; 173 else 174 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; 175 return page; 176 177 bogusimode: 178 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", 179 inode->i_mode, vi->nid); 180 err = -EFSCORRUPTED; 181 err_out: 182 DBG_BUGON(1); 183 kfree(copied); 184 unlock_page(page); 185 put_page(page); 186 return ERR_PTR(err); 187 } 188 189 static int erofs_fill_symlink(struct inode *inode, void *data, 190 unsigned int m_pofs) 191 { 192 struct erofs_inode *vi = EROFS_I(inode); 193 char *lnk; 194 195 /* if it cannot be handled with fast symlink scheme */ 196 if (vi->datalayout != EROFS_INODE_FLAT_INLINE || 197 inode->i_size >= PAGE_SIZE) { 198 inode->i_op = &erofs_symlink_iops; 199 return 0; 200 } 201 202 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL); 203 if (!lnk) 204 return -ENOMEM; 205 206 m_pofs += vi->xattr_isize; 207 /* inline symlink data shouldn't cross page boundary as well */ 208 if (m_pofs + inode->i_size > PAGE_SIZE) { 209 kfree(lnk); 210 erofs_err(inode->i_sb, 211 "inline data cross block boundary @ nid %llu", 212 vi->nid); 213 DBG_BUGON(1); 214 return -EFSCORRUPTED; 215 } 216 217 memcpy(lnk, data + m_pofs, inode->i_size); 218 lnk[inode->i_size] = '\0'; 219 220 inode->i_link = lnk; 221 inode->i_op = &erofs_fast_symlink_iops; 222 return 0; 223 } 224 225 static int erofs_fill_inode(struct inode *inode, int isdir) 226 { 227 struct erofs_inode *vi = EROFS_I(inode); 228 struct page *page; 229 unsigned int ofs; 230 int err = 0; 231 232 trace_erofs_fill_inode(inode, isdir); 233 234 /* read inode base data from disk */ 235 page = erofs_read_inode(inode, &ofs); 236 if (IS_ERR(page)) 237 return PTR_ERR(page); 238 239 /* setup the new inode */ 240 switch (inode->i_mode & S_IFMT) { 241 case S_IFREG: 242 inode->i_op = &erofs_generic_iops; 243 inode->i_fop = &generic_ro_fops; 244 break; 245 case S_IFDIR: 246 inode->i_op = &erofs_dir_iops; 247 inode->i_fop = &erofs_dir_fops; 248 break; 249 case S_IFLNK: 250 err = erofs_fill_symlink(inode, page_address(page), ofs); 251 if (err) 252 goto out_unlock; 253 inode_nohighmem(inode); 254 break; 255 case S_IFCHR: 256 case S_IFBLK: 257 case S_IFIFO: 258 case S_IFSOCK: 259 inode->i_op = &erofs_generic_iops; 260 init_special_inode(inode, inode->i_mode, inode->i_rdev); 261 goto out_unlock; 262 default: 263 err = -EFSCORRUPTED; 264 goto out_unlock; 265 } 266 267 if (erofs_inode_is_data_compressed(vi->datalayout)) { 268 err = z_erofs_fill_inode(inode); 269 goto out_unlock; 270 } 271 inode->i_mapping->a_ops = &erofs_raw_access_aops; 272 273 out_unlock: 274 unlock_page(page); 275 put_page(page); 276 return err; 277 } 278 279 /* 280 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore 281 * we should do more for 32-bit platform to find the right inode. 282 */ 283 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) 284 { 285 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 286 287 return EROFS_I(inode)->nid == nid; 288 } 289 290 static int erofs_iget_set_actor(struct inode *inode, void *opaque) 291 { 292 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 293 294 inode->i_ino = erofs_inode_hash(nid); 295 return 0; 296 } 297 298 static inline struct inode *erofs_iget_locked(struct super_block *sb, 299 erofs_nid_t nid) 300 { 301 const unsigned long hashval = erofs_inode_hash(nid); 302 303 return iget5_locked(sb, hashval, erofs_ilookup_test_actor, 304 erofs_iget_set_actor, &nid); 305 } 306 307 struct inode *erofs_iget(struct super_block *sb, 308 erofs_nid_t nid, 309 bool isdir) 310 { 311 struct inode *inode = erofs_iget_locked(sb, nid); 312 313 if (!inode) 314 return ERR_PTR(-ENOMEM); 315 316 if (inode->i_state & I_NEW) { 317 int err; 318 struct erofs_inode *vi = EROFS_I(inode); 319 320 vi->nid = nid; 321 322 err = erofs_fill_inode(inode, isdir); 323 if (!err) 324 unlock_new_inode(inode); 325 else { 326 iget_failed(inode); 327 inode = ERR_PTR(err); 328 } 329 } 330 return inode; 331 } 332 333 int erofs_getattr(const struct path *path, struct kstat *stat, 334 u32 request_mask, unsigned int query_flags) 335 { 336 struct inode *const inode = d_inode(path->dentry); 337 338 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) 339 stat->attributes |= STATX_ATTR_COMPRESSED; 340 341 stat->attributes |= STATX_ATTR_IMMUTABLE; 342 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 343 STATX_ATTR_IMMUTABLE); 344 345 generic_fillattr(inode, stat); 346 return 0; 347 } 348 349 const struct inode_operations erofs_generic_iops = { 350 .getattr = erofs_getattr, 351 .listxattr = erofs_listxattr, 352 .get_acl = erofs_get_acl, 353 }; 354 355 const struct inode_operations erofs_symlink_iops = { 356 .get_link = page_get_link, 357 .getattr = erofs_getattr, 358 .listxattr = erofs_listxattr, 359 .get_acl = erofs_get_acl, 360 }; 361 362 const struct inode_operations erofs_fast_symlink_iops = { 363 .get_link = simple_get_link, 364 .getattr = erofs_getattr, 365 .listxattr = erofs_listxattr, 366 .get_acl = erofs_get_acl, 367 }; 368 369