node.h (6bfaf7b150f7dba04024b7b6420773c09606538c) | node.h (ced2c7ea8e99b46755a270872cd5ba61c27cffad) |
---|---|
1/* 2 * fs/f2fs/node.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 286 unchanged lines hidden (view full) --- 295 struct f2fs_node *dst_rn = F2FS_NODE(dst); 296 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); 297} 298 299static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) 300{ 301 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); 302 struct f2fs_node *rn = F2FS_NODE(page); | 1/* 2 * fs/f2fs/node.h 3 * 4 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as --- 286 unchanged lines hidden (view full) --- 295 struct f2fs_node *dst_rn = F2FS_NODE(dst); 296 memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer)); 297} 298 299static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr) 300{ 301 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); 302 struct f2fs_node *rn = F2FS_NODE(page); |
303 size_t crc_offset = le32_to_cpu(ckpt->checksum_offset); 304 __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver); | 303 __u64 cp_ver = cur_cp_version(ckpt); |
305 | 304 |
306 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) { 307 __u64 crc = le32_to_cpu(*((__le32 *) 308 ((unsigned char *)ckpt + crc_offset))); 309 cp_ver |= (crc << 32); 310 } | 305 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 306 cp_ver |= (cur_cp_crc(ckpt) << 32); 307 |
311 rn->footer.cp_ver = cpu_to_le64(cp_ver); 312 rn->footer.next_blkaddr = cpu_to_le32(blkaddr); 313} 314 315static inline bool is_recoverable_dnode(struct page *page) 316{ 317 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); | 308 rn->footer.cp_ver = cpu_to_le64(cp_ver); 309 rn->footer.next_blkaddr = cpu_to_le32(blkaddr); 310} 311 312static inline bool is_recoverable_dnode(struct page *page) 313{ 314 struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page)); |
318 size_t crc_offset = le32_to_cpu(ckpt->checksum_offset); | |
319 __u64 cp_ver = cur_cp_version(ckpt); 320 | 315 __u64 cp_ver = cur_cp_version(ckpt); 316 |
321 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) { 322 __u64 crc = le32_to_cpu(*((__le32 *) 323 ((unsigned char *)ckpt + crc_offset))); 324 cp_ver |= (crc << 32); 325 } | 317 if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) 318 cp_ver |= (cur_cp_crc(ckpt) << 32); 319 |
326 return cp_ver == cpver_of_node(page); 327} 328 329/* 330 * f2fs assigns the following node offsets described as (num). 331 * N = NIDS_PER_BLOCK 332 * 333 * Inode block (0) --- 126 unchanged lines hidden --- | 320 return cp_ver == cpver_of_node(page); 321} 322 323/* 324 * f2fs assigns the following node offsets described as (num). 325 * N = NIDS_PER_BLOCK 326 * 327 * Inode block (0) --- 126 unchanged lines hidden --- |