1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Created by Gao Xiang <gaoxiang25@huawei.com> 6 */ 7 #include "xattr.h" 8 9 #include <trace/events/erofs.h> 10 11 /* 12 * if inode is successfully read, return its inode page (or sometimes 13 * the inode payload page if it's an extended inode) in order to fill 14 * inline data if possible. 15 */ 16 static struct page *erofs_read_inode(struct inode *inode, 17 unsigned int *ofs) 18 { 19 struct super_block *sb = inode->i_sb; 20 struct erofs_sb_info *sbi = EROFS_SB(sb); 21 struct erofs_inode *vi = EROFS_I(inode); 22 const erofs_off_t inode_loc = iloc(sbi, vi->nid); 23 24 erofs_blk_t blkaddr, nblks = 0; 25 struct page *page; 26 struct erofs_inode_compact *dic; 27 struct erofs_inode_extended *die, *copied = NULL; 28 unsigned int ifmt; 29 int err; 30 31 blkaddr = erofs_blknr(inode_loc); 32 *ofs = erofs_blkoff(inode_loc); 33 34 erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", 35 __func__, vi->nid, *ofs, blkaddr); 36 37 page = erofs_get_meta_page(sb, blkaddr); 38 if (IS_ERR(page)) { 39 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", 40 vi->nid, PTR_ERR(page)); 41 return page; 42 } 43 44 dic = page_address(page) + *ofs; 45 ifmt = le16_to_cpu(dic->i_format); 46 47 vi->datalayout = erofs_inode_datalayout(ifmt); 48 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { 49 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", 50 vi->datalayout, vi->nid); 51 err = -EOPNOTSUPP; 52 goto err_out; 53 } 54 55 switch (erofs_inode_version(ifmt)) { 56 case EROFS_INODE_LAYOUT_EXTENDED: 57 vi->inode_isize = sizeof(struct erofs_inode_extended); 58 /* check if the inode acrosses page boundary */ 59 if (*ofs + vi->inode_isize <= PAGE_SIZE) { 60 *ofs += vi->inode_isize; 61 die = (struct erofs_inode_extended *)dic; 62 } else { 63 const unsigned int gotten = PAGE_SIZE - *ofs; 64 65 copied = kmalloc(vi->inode_isize, GFP_NOFS); 66 if (!copied) { 67 err = -ENOMEM; 68 goto err_out; 69 } 70 memcpy(copied, dic, gotten); 71 unlock_page(page); 72 put_page(page); 73 74 page = erofs_get_meta_page(sb, blkaddr + 1); 75 if (IS_ERR(page)) { 76 erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld", 77 vi->nid, PTR_ERR(page)); 78 kfree(copied); 79 return page; 80 } 81 *ofs = vi->inode_isize - gotten; 82 memcpy((u8 *)copied + gotten, page_address(page), *ofs); 83 die = copied; 84 } 85 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); 86 87 inode->i_mode = le16_to_cpu(die->i_mode); 88 switch (inode->i_mode & S_IFMT) { 89 case S_IFREG: 90 case S_IFDIR: 91 case S_IFLNK: 92 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); 93 break; 94 case S_IFCHR: 95 case S_IFBLK: 96 inode->i_rdev = 97 new_decode_dev(le32_to_cpu(die->i_u.rdev)); 98 break; 99 case S_IFIFO: 100 case S_IFSOCK: 101 inode->i_rdev = 0; 102 break; 103 default: 104 goto bogusimode; 105 } 106 i_uid_write(inode, le32_to_cpu(die->i_uid)); 107 i_gid_write(inode, le32_to_cpu(die->i_gid)); 108 set_nlink(inode, le32_to_cpu(die->i_nlink)); 109 110 /* extended inode has its own timestamp */ 111 inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime); 112 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec); 113 114 inode->i_size = le64_to_cpu(die->i_size); 115 116 /* total blocks for compressed files */ 117 if (erofs_inode_is_data_compressed(vi->datalayout)) 118 nblks = le32_to_cpu(die->i_u.compressed_blocks); 119 120 kfree(copied); 121 break; 122 case EROFS_INODE_LAYOUT_COMPACT: 123 vi->inode_isize = sizeof(struct erofs_inode_compact); 124 *ofs += vi->inode_isize; 125 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); 126 127 inode->i_mode = le16_to_cpu(dic->i_mode); 128 switch (inode->i_mode & S_IFMT) { 129 case S_IFREG: 130 case S_IFDIR: 131 case S_IFLNK: 132 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); 133 break; 134 case S_IFCHR: 135 case S_IFBLK: 136 inode->i_rdev = 137 new_decode_dev(le32_to_cpu(dic->i_u.rdev)); 138 break; 139 case S_IFIFO: 140 case S_IFSOCK: 141 inode->i_rdev = 0; 142 break; 143 default: 144 goto bogusimode; 145 } 146 i_uid_write(inode, le16_to_cpu(dic->i_uid)); 147 i_gid_write(inode, le16_to_cpu(dic->i_gid)); 148 set_nlink(inode, le16_to_cpu(dic->i_nlink)); 149 150 /* use build time for compact inodes */ 151 inode->i_ctime.tv_sec = sbi->build_time; 152 inode->i_ctime.tv_nsec = sbi->build_time_nsec; 153 154 inode->i_size = le32_to_cpu(dic->i_size); 155 if (erofs_inode_is_data_compressed(vi->datalayout)) 156 nblks = le32_to_cpu(dic->i_u.compressed_blocks); 157 break; 158 default: 159 erofs_err(inode->i_sb, 160 "unsupported on-disk inode version %u of nid %llu", 161 erofs_inode_version(ifmt), vi->nid); 162 err = -EOPNOTSUPP; 163 goto err_out; 164 } 165 166 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec; 167 inode->i_atime.tv_sec = inode->i_ctime.tv_sec; 168 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec; 169 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec; 170 171 if (!nblks) 172 /* measure inode.i_blocks as generic filesystems */ 173 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; 174 else 175 inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; 176 return page; 177 178 bogusimode: 179 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", 180 inode->i_mode, vi->nid); 181 err = -EFSCORRUPTED; 182 err_out: 183 DBG_BUGON(1); 184 kfree(copied); 185 unlock_page(page); 186 put_page(page); 187 return ERR_PTR(err); 188 } 189 190 static int erofs_fill_symlink(struct inode *inode, void *data, 191 unsigned int m_pofs) 192 { 193 struct erofs_inode *vi = EROFS_I(inode); 194 char *lnk; 195 196 /* if it cannot be handled with fast symlink scheme */ 197 if (vi->datalayout != EROFS_INODE_FLAT_INLINE || 198 inode->i_size >= PAGE_SIZE) { 199 inode->i_op = &erofs_symlink_iops; 200 return 0; 201 } 202 203 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL); 204 if (!lnk) 205 return -ENOMEM; 206 207 m_pofs += vi->xattr_isize; 208 /* inline symlink data shouldn't cross page boundary as well */ 209 if (m_pofs + inode->i_size > PAGE_SIZE) { 210 kfree(lnk); 211 erofs_err(inode->i_sb, 212 "inline data cross block boundary @ nid %llu", 213 vi->nid); 214 DBG_BUGON(1); 215 return -EFSCORRUPTED; 216 } 217 218 memcpy(lnk, data + m_pofs, inode->i_size); 219 lnk[inode->i_size] = '\0'; 220 221 inode->i_link = lnk; 222 inode->i_op = &erofs_fast_symlink_iops; 223 return 0; 224 } 225 226 static int erofs_fill_inode(struct inode *inode, int isdir) 227 { 228 struct erofs_inode *vi = EROFS_I(inode); 229 struct page *page; 230 unsigned int ofs; 231 int err = 0; 232 233 trace_erofs_fill_inode(inode, isdir); 234 235 /* read inode base data from disk */ 236 page = erofs_read_inode(inode, &ofs); 237 if (IS_ERR(page)) 238 return PTR_ERR(page); 239 240 /* setup the new inode */ 241 switch (inode->i_mode & S_IFMT) { 242 case S_IFREG: 243 inode->i_op = &erofs_generic_iops; 244 inode->i_fop = &generic_ro_fops; 245 break; 246 case S_IFDIR: 247 inode->i_op = &erofs_dir_iops; 248 inode->i_fop = &erofs_dir_fops; 249 break; 250 case S_IFLNK: 251 err = erofs_fill_symlink(inode, page_address(page), ofs); 252 if (err) 253 goto out_unlock; 254 inode_nohighmem(inode); 255 break; 256 case S_IFCHR: 257 case S_IFBLK: 258 case S_IFIFO: 259 case S_IFSOCK: 260 inode->i_op = &erofs_generic_iops; 261 init_special_inode(inode, inode->i_mode, inode->i_rdev); 262 goto out_unlock; 263 default: 264 err = -EFSCORRUPTED; 265 goto out_unlock; 266 } 267 268 if (erofs_inode_is_data_compressed(vi->datalayout)) { 269 err = z_erofs_fill_inode(inode); 270 goto out_unlock; 271 } 272 inode->i_mapping->a_ops = &erofs_raw_access_aops; 273 274 out_unlock: 275 unlock_page(page); 276 put_page(page); 277 return err; 278 } 279 280 /* 281 * erofs nid is 64bits, but i_ino is 'unsigned long', therefore 282 * we should do more for 32-bit platform to find the right inode. 283 */ 284 static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) 285 { 286 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 287 288 return EROFS_I(inode)->nid == nid; 289 } 290 291 static int erofs_iget_set_actor(struct inode *inode, void *opaque) 292 { 293 const erofs_nid_t nid = *(erofs_nid_t *)opaque; 294 295 inode->i_ino = erofs_inode_hash(nid); 296 return 0; 297 } 298 299 static inline struct inode *erofs_iget_locked(struct super_block *sb, 300 erofs_nid_t nid) 301 { 302 const unsigned long hashval = erofs_inode_hash(nid); 303 304 return iget5_locked(sb, hashval, erofs_ilookup_test_actor, 305 erofs_iget_set_actor, &nid); 306 } 307 308 struct inode *erofs_iget(struct super_block *sb, 309 erofs_nid_t nid, 310 bool isdir) 311 { 312 struct inode *inode = erofs_iget_locked(sb, nid); 313 314 if (!inode) 315 return ERR_PTR(-ENOMEM); 316 317 if (inode->i_state & I_NEW) { 318 int err; 319 struct erofs_inode *vi = EROFS_I(inode); 320 321 vi->nid = nid; 322 323 err = erofs_fill_inode(inode, isdir); 324 if (!err) 325 unlock_new_inode(inode); 326 else { 327 iget_failed(inode); 328 inode = ERR_PTR(err); 329 } 330 } 331 return inode; 332 } 333 334 int erofs_getattr(struct user_namespace *mnt_userns, const struct path *path, 335 struct kstat *stat, u32 request_mask, 336 unsigned int query_flags) 337 { 338 struct inode *const inode = d_inode(path->dentry); 339 340 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) 341 stat->attributes |= STATX_ATTR_COMPRESSED; 342 343 stat->attributes |= STATX_ATTR_IMMUTABLE; 344 stat->attributes_mask |= (STATX_ATTR_COMPRESSED | 345 STATX_ATTR_IMMUTABLE); 346 347 generic_fillattr(&init_user_ns, inode, stat); 348 return 0; 349 } 350 351 const struct inode_operations erofs_generic_iops = { 352 .getattr = erofs_getattr, 353 .listxattr = erofs_listxattr, 354 .get_acl = erofs_get_acl, 355 }; 356 357 const struct inode_operations erofs_symlink_iops = { 358 .get_link = page_get_link, 359 .getattr = erofs_getattr, 360 .listxattr = erofs_listxattr, 361 .get_acl = erofs_get_acl, 362 }; 363 364 const struct inode_operations erofs_fast_symlink_iops = { 365 .get_link = simple_get_link, 366 .getattr = erofs_getattr, 367 .listxattr = erofs_listxattr, 368 .get_acl = erofs_get_acl, 369 }; 370 371