1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * direct.c - NILFS direct block pointer. 4 * 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Koji Sato. 8 */ 9 10 #include <linux/errno.h> 11 #include "nilfs.h" 12 #include "page.h" 13 #include "direct.h" 14 #include "alloc.h" 15 #include "dat.h" 16 17 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct) 18 { 19 return (__le64 *) 20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1); 21 } 22 23 static inline __u64 24 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key) 25 { 26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key)); 27 } 28 29 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct, 30 __u64 key, __u64 ptr) 31 { 32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr); 33 } 34 35 static int nilfs_direct_lookup(const struct nilfs_bmap *direct, 36 __u64 key, int level, __u64 *ptrp) 37 { 38 __u64 ptr; 39 40 if (key > NILFS_DIRECT_KEY_MAX || level != 1) 41 return -ENOENT; 42 ptr = nilfs_direct_get_ptr(direct, key); 43 if (ptr == NILFS_BMAP_INVALID_PTR) 44 return -ENOENT; 45 46 *ptrp = ptr; 47 return 0; 48 } 49 50 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, 51 __u64 key, __u64 *ptrp, 52 unsigned int maxblocks) 53 { 54 struct inode *dat = NULL; 55 __u64 ptr, ptr2; 56 sector_t blocknr; 57 int ret, cnt; 58 59 if (key > NILFS_DIRECT_KEY_MAX) 60 return -ENOENT; 61 ptr = nilfs_direct_get_ptr(direct, key); 62 if (ptr == NILFS_BMAP_INVALID_PTR) 63 return -ENOENT; 64 65 if (NILFS_BMAP_USE_VBN(direct)) { 66 dat = nilfs_bmap_get_dat(direct); 67 ret = nilfs_dat_translate(dat, ptr, &blocknr); 68 if (ret < 0) 69 return ret; 70 ptr = blocknr; 71 } 72 73 maxblocks = min_t(unsigned int, maxblocks, 74 NILFS_DIRECT_KEY_MAX - key + 1); 75 for (cnt = 1; cnt < maxblocks && 76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != 77 NILFS_BMAP_INVALID_PTR; 78 cnt++) { 79 if (dat) { 80 ret = nilfs_dat_translate(dat, ptr2, &blocknr); 81 if (ret < 0) 82 return ret; 83 ptr2 = blocknr; 84 } 85 if (ptr2 != ptr + cnt) 86 break; 87 } 88 *ptrp = ptr; 89 return cnt; 90 } 91 92 static __u64 93 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) 94 { 95 __u64 ptr; 96 97 ptr = nilfs_bmap_find_target_seq(direct, key); 98 if (ptr != NILFS_BMAP_INVALID_PTR) 99 /* sequential access */ 100 return ptr; 101 102 /* block group */ 103 return nilfs_bmap_find_target_in_group(direct); 104 } 105 106 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) 107 { 108 union nilfs_bmap_ptr_req req; 109 struct inode *dat = NULL; 110 struct buffer_head *bh; 111 int ret; 112 113 if (key > NILFS_DIRECT_KEY_MAX) 114 return -ENOENT; 115 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR) 116 return -EEXIST; 117 118 if (NILFS_BMAP_USE_VBN(bmap)) { 119 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key); 120 dat = nilfs_bmap_get_dat(bmap); 121 } 122 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat); 123 if (!ret) { 124 /* ptr must be a pointer to a buffer head. */ 125 bh = (struct buffer_head *)((unsigned long)ptr); 126 set_buffer_nilfs_volatile(bh); 127 128 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat); 129 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr); 130 131 if (!nilfs_bmap_dirty(bmap)) 132 nilfs_bmap_set_dirty(bmap); 133 134 if (NILFS_BMAP_USE_VBN(bmap)) 135 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr); 136 137 nilfs_inode_add_blocks(bmap->b_inode, 1); 138 } 139 return ret; 140 } 141 142 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key) 143 { 144 union nilfs_bmap_ptr_req req; 145 struct inode *dat; 146 int ret; 147 148 if (key > NILFS_DIRECT_KEY_MAX || 149 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR) 150 return -ENOENT; 151 152 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL; 153 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key); 154 155 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat); 156 if (!ret) { 157 nilfs_bmap_commit_end_ptr(bmap, &req, dat); 158 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR); 159 nilfs_inode_sub_blocks(bmap->b_inode, 1); 160 } 161 return ret; 162 } 163 164 static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start, 165 __u64 *keyp) 166 { 167 __u64 key; 168 169 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) { 170 if (nilfs_direct_get_ptr(direct, key) != 171 NILFS_BMAP_INVALID_PTR) { 172 *keyp = key; 173 return 0; 174 } 175 } 176 return -ENOENT; 177 } 178 179 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp) 180 { 181 __u64 key, lastkey; 182 183 lastkey = NILFS_DIRECT_KEY_MAX + 1; 184 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++) 185 if (nilfs_direct_get_ptr(direct, key) != 186 NILFS_BMAP_INVALID_PTR) 187 lastkey = key; 188 189 if (lastkey == NILFS_DIRECT_KEY_MAX + 1) 190 return -ENOENT; 191 192 *keyp = lastkey; 193 194 return 0; 195 } 196 197 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key) 198 { 199 return key > NILFS_DIRECT_KEY_MAX; 200 } 201 202 static int nilfs_direct_gather_data(struct nilfs_bmap *direct, 203 __u64 *keys, __u64 *ptrs, int nitems) 204 { 205 __u64 key; 206 __u64 ptr; 207 int n; 208 209 if (nitems > NILFS_DIRECT_NBLOCKS) 210 nitems = NILFS_DIRECT_NBLOCKS; 211 n = 0; 212 for (key = 0; key < nitems; key++) { 213 ptr = nilfs_direct_get_ptr(direct, key); 214 if (ptr != NILFS_BMAP_INVALID_PTR) { 215 keys[n] = key; 216 ptrs[n] = ptr; 217 n++; 218 } 219 } 220 return n; 221 } 222 223 int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap, 224 __u64 key, __u64 *keys, __u64 *ptrs, int n) 225 { 226 __le64 *dptrs; 227 int ret, i, j; 228 229 /* no need to allocate any resource for conversion */ 230 231 /* delete */ 232 ret = bmap->b_ops->bop_delete(bmap, key); 233 if (ret < 0) 234 return ret; 235 236 /* free resources */ 237 if (bmap->b_ops->bop_clear != NULL) 238 bmap->b_ops->bop_clear(bmap); 239 240 /* convert */ 241 dptrs = nilfs_direct_dptrs(bmap); 242 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) { 243 if ((j < n) && (i == keys[j])) { 244 dptrs[i] = (i != key) ? 245 cpu_to_le64(ptrs[j]) : 246 NILFS_BMAP_INVALID_PTR; 247 j++; 248 } else 249 dptrs[i] = NILFS_BMAP_INVALID_PTR; 250 } 251 252 nilfs_direct_init(bmap); 253 return 0; 254 } 255 256 static int nilfs_direct_propagate(struct nilfs_bmap *bmap, 257 struct buffer_head *bh) 258 { 259 struct nilfs_palloc_req oldreq, newreq; 260 struct inode *dat; 261 __u64 key; 262 __u64 ptr; 263 int ret; 264 265 if (!NILFS_BMAP_USE_VBN(bmap)) 266 return 0; 267 268 dat = nilfs_bmap_get_dat(bmap); 269 key = nilfs_bmap_data_get_key(bmap, bh); 270 ptr = nilfs_direct_get_ptr(bmap, key); 271 if (!buffer_nilfs_volatile(bh)) { 272 oldreq.pr_entry_nr = ptr; 273 newreq.pr_entry_nr = ptr; 274 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq); 275 if (ret < 0) 276 return ret; 277 nilfs_dat_commit_update(dat, &oldreq, &newreq, 278 bmap->b_ptr_type == NILFS_BMAP_PTR_VS); 279 set_buffer_nilfs_volatile(bh); 280 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr); 281 } else 282 ret = nilfs_dat_mark_dirty(dat, ptr); 283 284 return ret; 285 } 286 287 static int nilfs_direct_assign_v(struct nilfs_bmap *direct, 288 __u64 key, __u64 ptr, 289 struct buffer_head **bh, 290 sector_t blocknr, 291 union nilfs_binfo *binfo) 292 { 293 struct inode *dat = nilfs_bmap_get_dat(direct); 294 union nilfs_bmap_ptr_req req; 295 int ret; 296 297 req.bpr_ptr = ptr; 298 ret = nilfs_dat_prepare_start(dat, &req.bpr_req); 299 if (!ret) { 300 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr); 301 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr); 302 binfo->bi_v.bi_blkoff = cpu_to_le64(key); 303 } 304 return ret; 305 } 306 307 static int nilfs_direct_assign_p(struct nilfs_bmap *direct, 308 __u64 key, __u64 ptr, 309 struct buffer_head **bh, 310 sector_t blocknr, 311 union nilfs_binfo *binfo) 312 { 313 nilfs_direct_set_ptr(direct, key, blocknr); 314 315 binfo->bi_dat.bi_blkoff = cpu_to_le64(key); 316 binfo->bi_dat.bi_level = 0; 317 318 return 0; 319 } 320 321 static int nilfs_direct_assign(struct nilfs_bmap *bmap, 322 struct buffer_head **bh, 323 sector_t blocknr, 324 union nilfs_binfo *binfo) 325 { 326 __u64 key; 327 __u64 ptr; 328 329 key = nilfs_bmap_data_get_key(bmap, *bh); 330 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { 331 nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT, 332 "%s (ino=%lu): invalid key: %llu", __func__, 333 bmap->b_inode->i_ino, (unsigned long long)key); 334 return -EINVAL; 335 } 336 ptr = nilfs_direct_get_ptr(bmap, key); 337 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { 338 nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT, 339 "%s (ino=%lu): invalid pointer: %llu", __func__, 340 bmap->b_inode->i_ino, (unsigned long long)ptr); 341 return -EINVAL; 342 } 343 344 return NILFS_BMAP_USE_VBN(bmap) ? 345 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) : 346 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo); 347 } 348 349 static const struct nilfs_bmap_operations nilfs_direct_ops = { 350 .bop_lookup = nilfs_direct_lookup, 351 .bop_lookup_contig = nilfs_direct_lookup_contig, 352 .bop_insert = nilfs_direct_insert, 353 .bop_delete = nilfs_direct_delete, 354 .bop_clear = NULL, 355 356 .bop_propagate = nilfs_direct_propagate, 357 358 .bop_lookup_dirty_buffers = NULL, 359 360 .bop_assign = nilfs_direct_assign, 361 .bop_mark = NULL, 362 363 .bop_seek_key = nilfs_direct_seek_key, 364 .bop_last_key = nilfs_direct_last_key, 365 366 .bop_check_insert = nilfs_direct_check_insert, 367 .bop_check_delete = NULL, 368 .bop_gather_data = nilfs_direct_gather_data, 369 }; 370 371 372 int nilfs_direct_init(struct nilfs_bmap *bmap) 373 { 374 bmap->b_ops = &nilfs_direct_ops; 375 return 0; 376 } 377