1 /* 2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README 3 */ 4 5 /* 6 * Now we have all buffers that must be used in balancing of the tree 7 * Further calculations can not cause schedule(), and thus the buffer 8 * tree will be stable until the balancing will be finished 9 * balance the tree according to the analysis made before, 10 * and using buffers obtained after all above. 11 */ 12 13 #include <asm/uaccess.h> 14 #include <linux/time.h> 15 #include "reiserfs.h" 16 #include <linux/buffer_head.h> 17 #include <linux/kernel.h> 18 19 static inline void buffer_info_init_left(struct tree_balance *tb, 20 struct buffer_info *bi) 21 { 22 bi->tb = tb; 23 bi->bi_bh = tb->L[0]; 24 bi->bi_parent = tb->FL[0]; 25 bi->bi_position = get_left_neighbor_position(tb, 0); 26 } 27 28 static inline void buffer_info_init_right(struct tree_balance *tb, 29 struct buffer_info *bi) 30 { 31 bi->tb = tb; 32 bi->bi_bh = tb->R[0]; 33 bi->bi_parent = tb->FR[0]; 34 bi->bi_position = get_right_neighbor_position(tb, 0); 35 } 36 37 static inline void buffer_info_init_tbS0(struct tree_balance *tb, 38 struct buffer_info *bi) 39 { 40 bi->tb = tb; 41 bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path); 42 bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0); 43 bi->bi_position = PATH_H_POSITION(tb->tb_path, 1); 44 } 45 46 static inline void buffer_info_init_bh(struct tree_balance *tb, 47 struct buffer_info *bi, 48 struct buffer_head *bh) 49 { 50 bi->tb = tb; 51 bi->bi_bh = bh; 52 bi->bi_parent = NULL; 53 bi->bi_position = 0; 54 } 55 56 inline void do_balance_mark_leaf_dirty(struct tree_balance *tb, 57 struct buffer_head *bh, int flag) 58 { 59 journal_mark_dirty(tb->transaction_handle, bh); 60 } 61 62 #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty 63 #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty 64 65 /* 66 * summary: 67 * if deleting something ( tb->insert_size[0] < 0 ) 68 * return(balance_leaf_when_delete()); (flag d handled here) 69 * else 70 * if lnum is larger than 0 we put items into the left node 71 * if rnum is larger than 0 we put items into the right node 72 * if snum1 is larger than 0 we put items into the new node s1 73 * if snum2 is larger than 0 we put items into the new node s2 74 * Note that all *num* count new items being created. 75 */ 76 77 static void balance_leaf_when_delete_del(struct tree_balance *tb) 78 { 79 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 80 int item_pos = PATH_LAST_POSITION(tb->tb_path); 81 struct buffer_info bi; 82 #ifdef CONFIG_REISERFS_CHECK 83 struct item_head *ih = item_head(tbS0, item_pos); 84 #endif 85 86 RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0], 87 "vs-12013: mode Delete, insert size %d, ih to be deleted %h", 88 -tb->insert_size[0], ih); 89 90 buffer_info_init_tbS0(tb, &bi); 91 leaf_delete_items(&bi, 0, item_pos, 1, -1); 92 93 if (!item_pos && tb->CFL[0]) { 94 if (B_NR_ITEMS(tbS0)) { 95 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 96 } else { 97 if (!PATH_H_POSITION(tb->tb_path, 1)) 98 replace_key(tb, tb->CFL[0], tb->lkey[0], 99 PATH_H_PPARENT(tb->tb_path, 0), 0); 100 } 101 } 102 103 RFALSE(!item_pos && !tb->CFL[0], 104 "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0], 105 tb->L[0]); 106 } 107 108 /* cut item in S[0] */ 109 static void balance_leaf_when_delete_cut(struct tree_balance *tb) 110 { 111 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 112 int item_pos = PATH_LAST_POSITION(tb->tb_path); 113 struct item_head *ih = item_head(tbS0, item_pos); 114 int pos_in_item = tb->tb_path->pos_in_item; 115 struct buffer_info bi; 116 buffer_info_init_tbS0(tb, &bi); 117 118 if (is_direntry_le_ih(ih)) { 119 /* 120 * UFS unlink semantics are such that you can only 121 * delete one directory entry at a time. 122 * 123 * when we cut a directory tb->insert_size[0] means 124 * number of entries to be cut (always 1) 125 */ 126 tb->insert_size[0] = -1; 127 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 128 -tb->insert_size[0]); 129 130 RFALSE(!item_pos && !pos_in_item && !tb->CFL[0], 131 "PAP-12030: can not change delimiting key. CFL[0]=%p", 132 tb->CFL[0]); 133 134 if (!item_pos && !pos_in_item && tb->CFL[0]) 135 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 136 } else { 137 leaf_cut_from_buffer(&bi, item_pos, pos_in_item, 138 -tb->insert_size[0]); 139 140 RFALSE(!ih_item_len(ih), 141 "PAP-12035: cut must leave non-zero dynamic " 142 "length of item"); 143 } 144 } 145 146 static int balance_leaf_when_delete_left(struct tree_balance *tb) 147 { 148 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 149 int n = B_NR_ITEMS(tbS0); 150 151 /* L[0] must be joined with S[0] */ 152 if (tb->lnum[0] == -1) { 153 /* R[0] must be also joined with S[0] */ 154 if (tb->rnum[0] == -1) { 155 if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) { 156 /* 157 * all contents of all the 158 * 3 buffers will be in L[0] 159 */ 160 if (PATH_H_POSITION(tb->tb_path, 1) == 0 && 161 1 < B_NR_ITEMS(tb->FR[0])) 162 replace_key(tb, tb->CFL[0], 163 tb->lkey[0], tb->FR[0], 1); 164 165 leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1, 166 NULL); 167 leaf_move_items(LEAF_FROM_R_TO_L, tb, 168 B_NR_ITEMS(tb->R[0]), -1, 169 NULL); 170 171 reiserfs_invalidate_buffer(tb, tbS0); 172 reiserfs_invalidate_buffer(tb, tb->R[0]); 173 174 return 0; 175 } 176 177 /* all contents of all the 3 buffers will be in R[0] */ 178 leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL); 179 leaf_move_items(LEAF_FROM_L_TO_R, tb, 180 B_NR_ITEMS(tb->L[0]), -1, NULL); 181 182 /* right_delimiting_key is correct in R[0] */ 183 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 184 185 reiserfs_invalidate_buffer(tb, tbS0); 186 reiserfs_invalidate_buffer(tb, tb->L[0]); 187 188 return -1; 189 } 190 191 RFALSE(tb->rnum[0] != 0, 192 "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]); 193 /* all contents of L[0] and S[0] will be in L[0] */ 194 leaf_shift_left(tb, n, -1); 195 196 reiserfs_invalidate_buffer(tb, tbS0); 197 198 return 0; 199 } 200 201 /* 202 * a part of contents of S[0] will be in L[0] and 203 * the rest part of S[0] will be in R[0] 204 */ 205 206 RFALSE((tb->lnum[0] + tb->rnum[0] < n) || 207 (tb->lnum[0] + tb->rnum[0] > n + 1), 208 "PAP-12050: rnum(%d) and lnum(%d) and item " 209 "number(%d) in S[0] are not consistent", 210 tb->rnum[0], tb->lnum[0], n); 211 RFALSE((tb->lnum[0] + tb->rnum[0] == n) && 212 (tb->lbytes != -1 || tb->rbytes != -1), 213 "PAP-12055: bad rbytes (%d)/lbytes (%d) " 214 "parameters when items are not split", 215 tb->rbytes, tb->lbytes); 216 RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) && 217 (tb->lbytes < 1 || tb->rbytes != -1), 218 "PAP-12060: bad rbytes (%d)/lbytes (%d) " 219 "parameters when items are split", 220 tb->rbytes, tb->lbytes); 221 222 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 223 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 224 225 reiserfs_invalidate_buffer(tb, tbS0); 226 227 return 0; 228 } 229 230 /* 231 * Balance leaf node in case of delete or cut: insert_size[0] < 0 232 * 233 * lnum, rnum can have values >= -1 234 * -1 means that the neighbor must be joined with S 235 * 0 means that nothing should be done with the neighbor 236 * >0 means to shift entirely or partly the specified number of items 237 * to the neighbor 238 */ 239 static int balance_leaf_when_delete(struct tree_balance *tb, int flag) 240 { 241 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 242 int item_pos = PATH_LAST_POSITION(tb->tb_path); 243 struct buffer_info bi; 244 int n; 245 struct item_head *ih; 246 247 RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1, 248 "vs- 12000: level: wrong FR %z", tb->FR[0]); 249 RFALSE(tb->blknum[0] > 1, 250 "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]); 251 RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0), 252 "PAP-12010: tree can not be empty"); 253 254 ih = item_head(tbS0, item_pos); 255 buffer_info_init_tbS0(tb, &bi); 256 257 /* Delete or truncate the item */ 258 259 BUG_ON(flag != M_DELETE && flag != M_CUT); 260 if (flag == M_DELETE) 261 balance_leaf_when_delete_del(tb); 262 else /* M_CUT */ 263 balance_leaf_when_delete_cut(tb); 264 265 266 /* 267 * the rule is that no shifting occurs unless by shifting 268 * a node can be freed 269 */ 270 n = B_NR_ITEMS(tbS0); 271 272 273 /* L[0] takes part in balancing */ 274 if (tb->lnum[0]) 275 return balance_leaf_when_delete_left(tb); 276 277 if (tb->rnum[0] == -1) { 278 /* all contents of R[0] and S[0] will be in R[0] */ 279 leaf_shift_right(tb, n, -1); 280 reiserfs_invalidate_buffer(tb, tbS0); 281 return 0; 282 } 283 284 RFALSE(tb->rnum[0], 285 "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]); 286 return 0; 287 } 288 289 static void balance_leaf_insert_left(struct tree_balance *tb, 290 struct item_head *ih, const char *body) 291 { 292 int ret; 293 struct buffer_info bi; 294 int n = B_NR_ITEMS(tb->L[0]); 295 296 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 297 /* part of new item falls into L[0] */ 298 int new_item_len, shift; 299 int version; 300 301 ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1); 302 303 /* Calculate item length to insert to S[0] */ 304 new_item_len = ih_item_len(ih) - tb->lbytes; 305 306 /* Calculate and check item length to insert to L[0] */ 307 put_ih_item_len(ih, ih_item_len(ih) - new_item_len); 308 309 RFALSE(ih_item_len(ih) <= 0, 310 "PAP-12080: there is nothing to insert into L[0]: " 311 "ih_item_len=%d", ih_item_len(ih)); 312 313 /* Insert new item into L[0] */ 314 buffer_info_init_left(tb, &bi); 315 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, 316 min_t(int, tb->zeroes_num, ih_item_len(ih))); 317 318 version = ih_version(ih); 319 320 /* 321 * Calculate key component, item length and body to 322 * insert into S[0] 323 */ 324 shift = 0; 325 if (is_indirect_le_ih(ih)) 326 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 327 328 add_le_ih_k_offset(ih, tb->lbytes << shift); 329 330 put_ih_item_len(ih, new_item_len); 331 if (tb->lbytes > tb->zeroes_num) { 332 body += (tb->lbytes - tb->zeroes_num); 333 tb->zeroes_num = 0; 334 } else 335 tb->zeroes_num -= tb->lbytes; 336 337 RFALSE(ih_item_len(ih) <= 0, 338 "PAP-12085: there is nothing to insert into S[0]: " 339 "ih_item_len=%d", ih_item_len(ih)); 340 } else { 341 /* new item in whole falls into L[0] */ 342 /* Shift lnum[0]-1 items to L[0] */ 343 ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes); 344 345 /* Insert new item into L[0] */ 346 buffer_info_init_left(tb, &bi); 347 leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body, 348 tb->zeroes_num); 349 tb->insert_size[0] = 0; 350 tb->zeroes_num = 0; 351 } 352 } 353 354 static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, 355 struct item_head *ih, 356 const char *body) 357 { 358 int n = B_NR_ITEMS(tb->L[0]); 359 struct buffer_info bi; 360 361 RFALSE(tb->zeroes_num, 362 "PAP-12090: invalid parameter in case of a directory"); 363 364 /* directory item */ 365 if (tb->lbytes > tb->pos_in_item) { 366 /* new directory entry falls into L[0] */ 367 struct item_head *pasted; 368 int ret, l_pos_in_item = tb->pos_in_item; 369 370 /* 371 * Shift lnum[0] - 1 items in whole. 372 * Shift lbytes - 1 entries from given directory item 373 */ 374 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1); 375 if (ret && !tb->item_pos) { 376 pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1); 377 l_pos_in_item += ih_entry_count(pasted) - 378 (tb->lbytes - 1); 379 } 380 381 /* Append given directory entry to directory item */ 382 buffer_info_init_left(tb, &bi); 383 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, 384 l_pos_in_item, tb->insert_size[0], 385 body, tb->zeroes_num); 386 387 /* 388 * previous string prepared space for pasting new entry, 389 * following string pastes this entry 390 */ 391 392 /* 393 * when we have merge directory item, pos_in_item 394 * has been changed too 395 */ 396 397 /* paste new directory entry. 1 is entry number */ 398 leaf_paste_entries(&bi, n + tb->item_pos - ret, 399 l_pos_in_item, 1, 400 (struct reiserfs_de_head *) body, 401 body + DEH_SIZE, tb->insert_size[0]); 402 tb->insert_size[0] = 0; 403 } else { 404 /* new directory item doesn't fall into L[0] */ 405 /* 406 * Shift lnum[0]-1 items in whole. Shift lbytes 407 * directory entries from directory item number lnum[0] 408 */ 409 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 410 } 411 412 /* Calculate new position to append in item body */ 413 tb->pos_in_item -= tb->lbytes; 414 } 415 416 static void balance_leaf_paste_left_shift(struct tree_balance *tb, 417 struct item_head *ih, 418 const char *body) 419 { 420 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 421 int n = B_NR_ITEMS(tb->L[0]); 422 struct buffer_info bi; 423 424 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 425 balance_leaf_paste_left_shift_dirent(tb, ih, body); 426 return; 427 } 428 429 RFALSE(tb->lbytes <= 0, 430 "PAP-12095: there is nothing to shift to L[0]. " 431 "lbytes=%d", tb->lbytes); 432 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)), 433 "PAP-12100: incorrect position to paste: " 434 "item_len=%d, pos_in_item=%d", 435 ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item); 436 437 /* appended item will be in L[0] in whole */ 438 if (tb->lbytes >= tb->pos_in_item) { 439 struct item_head *tbS0_pos_ih, *tbL0_ih; 440 struct item_head *tbS0_0_ih; 441 struct reiserfs_key *left_delim_key; 442 int ret, l_n, version, temp_l; 443 444 tbS0_pos_ih = item_head(tbS0, tb->item_pos); 445 tbS0_0_ih = item_head(tbS0, 0); 446 447 /* 448 * this bytes number must be appended 449 * to the last item of L[h] 450 */ 451 l_n = tb->lbytes - tb->pos_in_item; 452 453 /* Calculate new insert_size[0] */ 454 tb->insert_size[0] -= l_n; 455 456 RFALSE(tb->insert_size[0] <= 0, 457 "PAP-12105: there is nothing to paste into " 458 "L[0]. insert_size=%d", tb->insert_size[0]); 459 460 ret = leaf_shift_left(tb, tb->lnum[0], 461 ih_item_len(tbS0_pos_ih)); 462 463 tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret); 464 465 /* Append to body of item in L[0] */ 466 buffer_info_init_left(tb, &bi); 467 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, 468 ih_item_len(tbL0_ih), l_n, body, 469 min_t(int, l_n, tb->zeroes_num)); 470 471 /* 472 * 0-th item in S0 can be only of DIRECT type 473 * when l_n != 0 474 */ 475 temp_l = l_n; 476 477 RFALSE(ih_item_len(tbS0_0_ih), 478 "PAP-12106: item length must be 0"); 479 RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key, 480 leaf_key(tb->L[0], n + tb->item_pos - ret)), 481 "PAP-12107: items must be of the same file"); 482 483 if (is_indirect_le_ih(tbL0_ih)) { 484 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 485 temp_l = l_n << shift; 486 } 487 /* update key of first item in S0 */ 488 version = ih_version(tbS0_0_ih); 489 add_le_key_k_offset(version, &tbS0_0_ih->ih_key, temp_l); 490 491 /* update left delimiting key */ 492 left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]); 493 add_le_key_k_offset(version, left_delim_key, temp_l); 494 495 /* 496 * Calculate new body, position in item and 497 * insert_size[0] 498 */ 499 if (l_n > tb->zeroes_num) { 500 body += (l_n - tb->zeroes_num); 501 tb->zeroes_num = 0; 502 } else 503 tb->zeroes_num -= l_n; 504 tb->pos_in_item = 0; 505 506 RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key, 507 leaf_key(tb->L[0], 508 B_NR_ITEMS(tb->L[0]) - 1)) || 509 !op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size) || 510 !op_is_left_mergeable(left_delim_key, tbS0->b_size), 511 "PAP-12120: item must be merge-able with left " 512 "neighboring item"); 513 } else { 514 /* only part of the appended item will be in L[0] */ 515 516 /* Calculate position in item for append in S[0] */ 517 tb->pos_in_item -= tb->lbytes; 518 519 RFALSE(tb->pos_in_item <= 0, 520 "PAP-12125: no place for paste. pos_in_item=%d", 521 tb->pos_in_item); 522 523 /* 524 * Shift lnum[0] - 1 items in whole. 525 * Shift lbytes - 1 byte from item number lnum[0] 526 */ 527 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 528 } 529 } 530 531 532 /* appended item will be in L[0] in whole */ 533 static void balance_leaf_paste_left_whole(struct tree_balance *tb, 534 struct item_head *ih, 535 const char *body) 536 { 537 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 538 int n = B_NR_ITEMS(tb->L[0]); 539 struct buffer_info bi; 540 struct item_head *pasted; 541 int ret; 542 543 /* if we paste into first item of S[0] and it is left mergable */ 544 if (!tb->item_pos && 545 op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size)) { 546 /* 547 * then increment pos_in_item by the size of the 548 * last item in L[0] 549 */ 550 pasted = item_head(tb->L[0], n - 1); 551 if (is_direntry_le_ih(pasted)) 552 tb->pos_in_item += ih_entry_count(pasted); 553 else 554 tb->pos_in_item += ih_item_len(pasted); 555 } 556 557 /* 558 * Shift lnum[0] - 1 items in whole. 559 * Shift lbytes - 1 byte from item number lnum[0] 560 */ 561 ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 562 563 /* Append to body of item in L[0] */ 564 buffer_info_init_left(tb, &bi); 565 leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item, 566 tb->insert_size[0], body, tb->zeroes_num); 567 568 /* if appended item is directory, paste entry */ 569 pasted = item_head(tb->L[0], n + tb->item_pos - ret); 570 if (is_direntry_le_ih(pasted)) 571 leaf_paste_entries(&bi, n + tb->item_pos - ret, 572 tb->pos_in_item, 1, 573 (struct reiserfs_de_head *)body, 574 body + DEH_SIZE, tb->insert_size[0]); 575 576 /* 577 * if appended item is indirect item, put unformatted node 578 * into un list 579 */ 580 if (is_indirect_le_ih(pasted)) 581 set_ih_free_space(pasted, 0); 582 583 tb->insert_size[0] = 0; 584 tb->zeroes_num = 0; 585 } 586 587 static void balance_leaf_paste_left(struct tree_balance *tb, 588 struct item_head *ih, const char *body) 589 { 590 /* we must shift the part of the appended item */ 591 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) 592 balance_leaf_paste_left_shift(tb, ih, body); 593 else 594 balance_leaf_paste_left_whole(tb, ih, body); 595 } 596 597 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 598 static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, 599 const char *body, int flag) 600 { 601 if (tb->lnum[0] <= 0) 602 return; 603 604 /* new item or it part falls to L[0], shift it too */ 605 if (tb->item_pos < tb->lnum[0]) { 606 BUG_ON(flag != M_INSERT && flag != M_PASTE); 607 608 if (flag == M_INSERT) 609 balance_leaf_insert_left(tb, ih, body); 610 else /* M_PASTE */ 611 balance_leaf_paste_left(tb, ih, body); 612 } else 613 /* new item doesn't fall into L[0] */ 614 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 615 } 616 617 618 static void balance_leaf_insert_right(struct tree_balance *tb, 619 struct item_head *ih, const char *body) 620 { 621 622 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 623 int n = B_NR_ITEMS(tbS0); 624 struct buffer_info bi; 625 int ret; 626 627 /* new item or part of it doesn't fall into R[0] */ 628 if (n - tb->rnum[0] >= tb->item_pos) { 629 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 630 return; 631 } 632 633 /* new item or its part falls to R[0] */ 634 635 /* part of new item falls into R[0] */ 636 if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { 637 loff_t old_key_comp, old_len, r_zeroes_number; 638 const char *r_body; 639 int version, shift; 640 loff_t offset; 641 642 leaf_shift_right(tb, tb->rnum[0] - 1, -1); 643 644 version = ih_version(ih); 645 646 /* Remember key component and item length */ 647 old_key_comp = le_ih_k_offset(ih); 648 old_len = ih_item_len(ih); 649 650 /* 651 * Calculate key component and item length to insert 652 * into R[0] 653 */ 654 shift = 0; 655 if (is_indirect_le_ih(ih)) 656 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 657 offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift); 658 set_le_ih_k_offset(ih, offset); 659 put_ih_item_len(ih, tb->rbytes); 660 661 /* Insert part of the item into R[0] */ 662 buffer_info_init_right(tb, &bi); 663 if ((old_len - tb->rbytes) > tb->zeroes_num) { 664 r_zeroes_number = 0; 665 r_body = body + (old_len - tb->rbytes) - tb->zeroes_num; 666 } else { 667 r_body = body; 668 r_zeroes_number = tb->zeroes_num - 669 (old_len - tb->rbytes); 670 tb->zeroes_num -= r_zeroes_number; 671 } 672 673 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number); 674 675 /* Replace right delimiting key by first key in R[0] */ 676 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 677 678 /* 679 * Calculate key component and item length to 680 * insert into S[0] 681 */ 682 set_le_ih_k_offset(ih, old_key_comp); 683 put_ih_item_len(ih, old_len - tb->rbytes); 684 685 tb->insert_size[0] -= tb->rbytes; 686 687 } else { 688 /* whole new item falls into R[0] */ 689 690 /* Shift rnum[0]-1 items to R[0] */ 691 ret = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes); 692 693 /* Insert new item into R[0] */ 694 buffer_info_init_right(tb, &bi); 695 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1, 696 ih, body, tb->zeroes_num); 697 698 if (tb->item_pos - n + tb->rnum[0] - 1 == 0) 699 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 700 701 tb->zeroes_num = tb->insert_size[0] = 0; 702 } 703 } 704 705 706 static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, 707 struct item_head *ih, const char *body) 708 { 709 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 710 struct buffer_info bi; 711 int entry_count; 712 713 RFALSE(tb->zeroes_num, 714 "PAP-12145: invalid parameter in case of a directory"); 715 entry_count = ih_entry_count(item_head(tbS0, tb->item_pos)); 716 717 /* new directory entry falls into R[0] */ 718 if (entry_count - tb->rbytes < tb->pos_in_item) { 719 int paste_entry_position; 720 721 RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0], 722 "PAP-12150: no enough of entries to shift to R[0]: " 723 "rbytes=%d, entry_count=%d", tb->rbytes, entry_count); 724 725 /* 726 * Shift rnum[0]-1 items in whole. 727 * Shift rbytes-1 directory entries from directory 728 * item number rnum[0] 729 */ 730 leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1); 731 732 /* Paste given directory entry to directory item */ 733 paste_entry_position = tb->pos_in_item - entry_count + 734 tb->rbytes - 1; 735 buffer_info_init_right(tb, &bi); 736 leaf_paste_in_buffer(&bi, 0, paste_entry_position, 737 tb->insert_size[0], body, tb->zeroes_num); 738 739 /* paste entry */ 740 leaf_paste_entries(&bi, 0, paste_entry_position, 1, 741 (struct reiserfs_de_head *) body, 742 body + DEH_SIZE, tb->insert_size[0]); 743 744 /* change delimiting keys */ 745 if (paste_entry_position == 0) 746 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 747 748 tb->insert_size[0] = 0; 749 tb->pos_in_item++; 750 } else { 751 /* new directory entry doesn't fall into R[0] */ 752 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 753 } 754 } 755 756 static void balance_leaf_paste_right_shift(struct tree_balance *tb, 757 struct item_head *ih, const char *body) 758 { 759 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 760 int n_shift, n_rem, r_zeroes_number, version; 761 unsigned long temp_rem; 762 const char *r_body; 763 struct buffer_info bi; 764 765 /* we append to directory item */ 766 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 767 balance_leaf_paste_right_shift_dirent(tb, ih, body); 768 return; 769 } 770 771 /* regular object */ 772 773 /* 774 * Calculate number of bytes which must be shifted 775 * from appended item 776 */ 777 n_shift = tb->rbytes - tb->insert_size[0]; 778 if (n_shift < 0) 779 n_shift = 0; 780 781 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)), 782 "PAP-12155: invalid position to paste. ih_item_len=%d, " 783 "pos_in_item=%d", tb->pos_in_item, 784 ih_item_len(item_head(tbS0, tb->item_pos))); 785 786 leaf_shift_right(tb, tb->rnum[0], n_shift); 787 788 /* 789 * Calculate number of bytes which must remain in body 790 * after appending to R[0] 791 */ 792 n_rem = tb->insert_size[0] - tb->rbytes; 793 if (n_rem < 0) 794 n_rem = 0; 795 796 temp_rem = n_rem; 797 798 version = ih_version(item_head(tb->R[0], 0)); 799 800 if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) { 801 int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 802 temp_rem = n_rem << shift; 803 } 804 805 add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem); 806 add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]), 807 temp_rem); 808 809 do_balance_mark_internal_dirty(tb, tb->CFR[0], 0); 810 811 /* Append part of body into R[0] */ 812 buffer_info_init_right(tb, &bi); 813 if (n_rem > tb->zeroes_num) { 814 r_zeroes_number = 0; 815 r_body = body + n_rem - tb->zeroes_num; 816 } else { 817 r_body = body; 818 r_zeroes_number = tb->zeroes_num - n_rem; 819 tb->zeroes_num -= r_zeroes_number; 820 } 821 822 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, 823 r_body, r_zeroes_number); 824 825 if (is_indirect_le_ih(item_head(tb->R[0], 0))) 826 set_ih_free_space(item_head(tb->R[0], 0), 0); 827 828 tb->insert_size[0] = n_rem; 829 if (!n_rem) 830 tb->pos_in_item++; 831 } 832 833 static void balance_leaf_paste_right_whole(struct tree_balance *tb, 834 struct item_head *ih, const char *body) 835 { 836 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 837 int n = B_NR_ITEMS(tbS0); 838 struct item_head *pasted; 839 struct buffer_info bi; 840 841 buffer_info_init_right(tb, &bi); 842 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 843 844 /* append item in R[0] */ 845 if (tb->pos_in_item >= 0) { 846 buffer_info_init_right(tb, &bi); 847 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0], 848 tb->pos_in_item, tb->insert_size[0], body, 849 tb->zeroes_num); 850 } 851 852 /* paste new entry, if item is directory item */ 853 pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]); 854 if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) { 855 leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0], 856 tb->pos_in_item, 1, 857 (struct reiserfs_de_head *)body, 858 body + DEH_SIZE, tb->insert_size[0]); 859 860 if (!tb->pos_in_item) { 861 862 RFALSE(tb->item_pos - n + tb->rnum[0], 863 "PAP-12165: directory item must be first " 864 "item of node when pasting is in 0th position"); 865 866 /* update delimiting keys */ 867 replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0); 868 } 869 } 870 871 if (is_indirect_le_ih(pasted)) 872 set_ih_free_space(pasted, 0); 873 tb->zeroes_num = tb->insert_size[0] = 0; 874 } 875 876 static void balance_leaf_paste_right(struct tree_balance *tb, 877 struct item_head *ih, const char *body) 878 { 879 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 880 int n = B_NR_ITEMS(tbS0); 881 882 /* new item doesn't fall into R[0] */ 883 if (n - tb->rnum[0] > tb->item_pos) { 884 leaf_shift_right(tb, tb->rnum[0], tb->rbytes); 885 return; 886 } 887 888 /* pasted item or part of it falls to R[0] */ 889 890 if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1) 891 /* we must shift the part of the appended item */ 892 balance_leaf_paste_right_shift(tb, ih, body); 893 else 894 /* pasted item in whole falls into R[0] */ 895 balance_leaf_paste_right_whole(tb, ih, body); 896 } 897 898 /* shift rnum[0] items from S[0] to the right neighbor R[0] */ 899 static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, 900 const char *body, int flag) 901 { 902 if (tb->rnum[0] <= 0) 903 return; 904 905 BUG_ON(flag != M_INSERT && flag != M_PASTE); 906 907 if (flag == M_INSERT) 908 balance_leaf_insert_right(tb, ih, body); 909 else /* M_PASTE */ 910 balance_leaf_paste_right(tb, ih, body); 911 } 912 913 static void balance_leaf_new_nodes_insert(struct tree_balance *tb, 914 struct item_head *ih, 915 const char *body, 916 struct item_head *insert_key, 917 struct buffer_head **insert_ptr, 918 int i) 919 { 920 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 921 int n = B_NR_ITEMS(tbS0); 922 struct buffer_info bi; 923 int shift; 924 925 /* new item or it part don't falls into S_new[i] */ 926 if (n - tb->snum[i] >= tb->item_pos) { 927 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 928 tb->snum[i], tb->sbytes[i], tb->S_new[i]); 929 return; 930 } 931 932 /* new item or it's part falls to first new node S_new[i] */ 933 934 /* part of new item falls into S_new[i] */ 935 if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) { 936 int old_key_comp, old_len, r_zeroes_number; 937 const char *r_body; 938 int version; 939 940 /* Move snum[i]-1 items from S[0] to S_new[i] */ 941 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1, 942 tb->S_new[i]); 943 944 /* Remember key component and item length */ 945 version = ih_version(ih); 946 old_key_comp = le_ih_k_offset(ih); 947 old_len = ih_item_len(ih); 948 949 /* 950 * Calculate key component and item length to insert 951 * into S_new[i] 952 */ 953 shift = 0; 954 if (is_indirect_le_ih(ih)) 955 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 956 set_le_ih_k_offset(ih, 957 le_ih_k_offset(ih) + 958 ((old_len - tb->sbytes[i]) << shift)); 959 960 put_ih_item_len(ih, tb->sbytes[i]); 961 962 /* Insert part of the item into S_new[i] before 0-th item */ 963 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 964 965 if ((old_len - tb->sbytes[i]) > tb->zeroes_num) { 966 r_zeroes_number = 0; 967 r_body = body + (old_len - tb->sbytes[i]) - 968 tb->zeroes_num; 969 } else { 970 r_body = body; 971 r_zeroes_number = tb->zeroes_num - (old_len - 972 tb->sbytes[i]); 973 tb->zeroes_num -= r_zeroes_number; 974 } 975 976 leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number); 977 978 /* 979 * Calculate key component and item length to 980 * insert into S[i] 981 */ 982 set_le_ih_k_offset(ih, old_key_comp); 983 put_ih_item_len(ih, old_len - tb->sbytes[i]); 984 tb->insert_size[0] -= tb->sbytes[i]; 985 } else { 986 /* whole new item falls into S_new[i] */ 987 988 /* 989 * Shift snum[0] - 1 items to S_new[i] 990 * (sbytes[i] of split item) 991 */ 992 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 993 tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]); 994 995 /* Insert new item into S_new[i] */ 996 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 997 leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1, 998 ih, body, tb->zeroes_num); 999 1000 tb->zeroes_num = tb->insert_size[0] = 0; 1001 } 1002 } 1003 1004 /* we append to directory item */ 1005 static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, 1006 struct item_head *ih, 1007 const char *body, 1008 struct item_head *insert_key, 1009 struct buffer_head **insert_ptr, 1010 int i) 1011 { 1012 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1013 struct item_head *aux_ih = item_head(tbS0, tb->item_pos); 1014 int entry_count = ih_entry_count(aux_ih); 1015 struct buffer_info bi; 1016 1017 if (entry_count - tb->sbytes[i] < tb->pos_in_item && 1018 tb->pos_in_item <= entry_count) { 1019 /* new directory entry falls into S_new[i] */ 1020 1021 RFALSE(!tb->insert_size[0], 1022 "PAP-12215: insert_size is already 0"); 1023 RFALSE(tb->sbytes[i] - 1 >= entry_count, 1024 "PAP-12220: there are no so much entries (%d), only %d", 1025 tb->sbytes[i] - 1, entry_count); 1026 1027 /* 1028 * Shift snum[i]-1 items in whole. 1029 * Shift sbytes[i] directory entries 1030 * from directory item number snum[i] 1031 */ 1032 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], 1033 tb->sbytes[i] - 1, tb->S_new[i]); 1034 1035 /* 1036 * Paste given directory entry to 1037 * directory item 1038 */ 1039 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1040 leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count + 1041 tb->sbytes[i] - 1, tb->insert_size[0], 1042 body, tb->zeroes_num); 1043 1044 /* paste new directory entry */ 1045 leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count + 1046 tb->sbytes[i] - 1, 1, 1047 (struct reiserfs_de_head *) body, 1048 body + DEH_SIZE, tb->insert_size[0]); 1049 1050 tb->insert_size[0] = 0; 1051 tb->pos_in_item++; 1052 } else { 1053 /* new directory entry doesn't fall into S_new[i] */ 1054 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], 1055 tb->sbytes[i], tb->S_new[i]); 1056 } 1057 1058 } 1059 1060 static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, 1061 struct item_head *ih, 1062 const char *body, 1063 struct item_head *insert_key, 1064 struct buffer_head **insert_ptr, 1065 int i) 1066 { 1067 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1068 struct item_head *aux_ih = item_head(tbS0, tb->item_pos); 1069 int n_shift, n_rem, r_zeroes_number, shift; 1070 const char *r_body; 1071 struct item_head *tmp; 1072 struct buffer_info bi; 1073 1074 RFALSE(ih, "PAP-12210: ih must be 0"); 1075 1076 if (is_direntry_le_ih(aux_ih)) { 1077 balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key, 1078 insert_ptr, i); 1079 return; 1080 } 1081 1082 /* regular object */ 1083 1084 1085 RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) || 1086 tb->insert_size[0] <= 0, 1087 "PAP-12225: item too short or insert_size <= 0"); 1088 1089 /* 1090 * Calculate number of bytes which must be shifted from appended item 1091 */ 1092 n_shift = tb->sbytes[i] - tb->insert_size[0]; 1093 if (n_shift < 0) 1094 n_shift = 0; 1095 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift, 1096 tb->S_new[i]); 1097 1098 /* 1099 * Calculate number of bytes which must remain in body after 1100 * append to S_new[i] 1101 */ 1102 n_rem = tb->insert_size[0] - tb->sbytes[i]; 1103 if (n_rem < 0) 1104 n_rem = 0; 1105 1106 /* Append part of body into S_new[0] */ 1107 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1108 if (n_rem > tb->zeroes_num) { 1109 r_zeroes_number = 0; 1110 r_body = body + n_rem - tb->zeroes_num; 1111 } else { 1112 r_body = body; 1113 r_zeroes_number = tb->zeroes_num - n_rem; 1114 tb->zeroes_num -= r_zeroes_number; 1115 } 1116 1117 leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem, 1118 r_body, r_zeroes_number); 1119 1120 tmp = item_head(tb->S_new[i], 0); 1121 shift = 0; 1122 if (is_indirect_le_ih(tmp)) { 1123 set_ih_free_space(tmp, 0); 1124 shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT; 1125 } 1126 add_le_ih_k_offset(tmp, n_rem << shift); 1127 1128 tb->insert_size[0] = n_rem; 1129 if (!n_rem) 1130 tb->pos_in_item++; 1131 } 1132 1133 static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, 1134 struct item_head *ih, 1135 const char *body, 1136 struct item_head *insert_key, 1137 struct buffer_head **insert_ptr, 1138 int i) 1139 1140 { 1141 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1142 int n = B_NR_ITEMS(tbS0); 1143 int leaf_mi; 1144 struct item_head *pasted; 1145 struct buffer_info bi; 1146 1147 #ifdef CONFIG_REISERFS_CHECK 1148 struct item_head *ih_check = item_head(tbS0, tb->item_pos); 1149 1150 if (!is_direntry_le_ih(ih_check) && 1151 (tb->pos_in_item != ih_item_len(ih_check) || 1152 tb->insert_size[0] <= 0)) 1153 reiserfs_panic(tb->tb_sb, 1154 "PAP-12235", 1155 "pos_in_item must be equal to ih_item_len"); 1156 #endif 1157 1158 leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], 1159 tb->sbytes[i], tb->S_new[i]); 1160 1161 RFALSE(leaf_mi, 1162 "PAP-12240: unexpected value returned by leaf_move_items (%d)", 1163 leaf_mi); 1164 1165 /* paste into item */ 1166 buffer_info_init_bh(tb, &bi, tb->S_new[i]); 1167 leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i], 1168 tb->pos_in_item, tb->insert_size[0], 1169 body, tb->zeroes_num); 1170 1171 pasted = item_head(tb->S_new[i], tb->item_pos - n + 1172 tb->snum[i]); 1173 if (is_direntry_le_ih(pasted)) 1174 leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i], 1175 tb->pos_in_item, 1, 1176 (struct reiserfs_de_head *)body, 1177 body + DEH_SIZE, tb->insert_size[0]); 1178 1179 /* if we paste to indirect item update ih_free_space */ 1180 if (is_indirect_le_ih(pasted)) 1181 set_ih_free_space(pasted, 0); 1182 1183 tb->zeroes_num = tb->insert_size[0] = 0; 1184 1185 } 1186 static void balance_leaf_new_nodes_paste(struct tree_balance *tb, 1187 struct item_head *ih, 1188 const char *body, 1189 struct item_head *insert_key, 1190 struct buffer_head **insert_ptr, 1191 int i) 1192 { 1193 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1194 int n = B_NR_ITEMS(tbS0); 1195 1196 /* pasted item doesn't fall into S_new[i] */ 1197 if (n - tb->snum[i] > tb->item_pos) { 1198 leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, 1199 tb->snum[i], tb->sbytes[i], tb->S_new[i]); 1200 return; 1201 } 1202 1203 /* pasted item or part if it falls to S_new[i] */ 1204 1205 if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1) 1206 /* we must shift part of the appended item */ 1207 balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key, 1208 insert_ptr, i); 1209 else 1210 /* item falls wholly into S_new[i] */ 1211 balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key, 1212 insert_ptr, i); 1213 } 1214 1215 /* Fill new nodes that appear in place of S[0] */ 1216 static void balance_leaf_new_nodes(struct tree_balance *tb, 1217 struct item_head *ih, 1218 const char *body, 1219 struct item_head *insert_key, 1220 struct buffer_head **insert_ptr, 1221 int flag) 1222 { 1223 int i; 1224 for (i = tb->blknum[0] - 2; i >= 0; i--) { 1225 BUG_ON(flag != M_INSERT && flag != M_PASTE); 1226 1227 RFALSE(!tb->snum[i], 1228 "PAP-12200: snum[%d] == %d. Must be > 0", i, 1229 tb->snum[i]); 1230 1231 /* here we shift from S to S_new nodes */ 1232 1233 tb->S_new[i] = get_FEB(tb); 1234 1235 /* initialized block type and tree level */ 1236 set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL); 1237 1238 if (flag == M_INSERT) 1239 balance_leaf_new_nodes_insert(tb, ih, body, insert_key, 1240 insert_ptr, i); 1241 else /* M_PASTE */ 1242 balance_leaf_new_nodes_paste(tb, ih, body, insert_key, 1243 insert_ptr, i); 1244 1245 memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE); 1246 insert_ptr[i] = tb->S_new[i]; 1247 1248 RFALSE(!buffer_journaled(tb->S_new[i]) 1249 || buffer_journal_dirty(tb->S_new[i]) 1250 || buffer_dirty(tb->S_new[i]), 1251 "PAP-12247: S_new[%d] : (%b)", 1252 i, tb->S_new[i]); 1253 } 1254 } 1255 1256 static void balance_leaf_finish_node_insert(struct tree_balance *tb, 1257 struct item_head *ih, 1258 const char *body) 1259 { 1260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1261 struct buffer_info bi; 1262 buffer_info_init_tbS0(tb, &bi); 1263 leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num); 1264 1265 /* If we insert the first key change the delimiting key */ 1266 if (tb->item_pos == 0) { 1267 if (tb->CFL[0]) /* can be 0 in reiserfsck */ 1268 replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0); 1269 1270 } 1271 } 1272 1273 static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, 1274 struct item_head *ih, 1275 const char *body) 1276 { 1277 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1278 struct item_head *pasted = item_head(tbS0, tb->item_pos); 1279 struct buffer_info bi; 1280 1281 if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) { 1282 RFALSE(!tb->insert_size[0], 1283 "PAP-12260: insert_size is 0 already"); 1284 1285 /* prepare space */ 1286 buffer_info_init_tbS0(tb, &bi); 1287 leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item, 1288 tb->insert_size[0], body, tb->zeroes_num); 1289 1290 /* paste entry */ 1291 leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1, 1292 (struct reiserfs_de_head *)body, 1293 body + DEH_SIZE, tb->insert_size[0]); 1294 1295 if (!tb->item_pos && !tb->pos_in_item) { 1296 RFALSE(!tb->CFL[0] || !tb->L[0], 1297 "PAP-12270: CFL[0]/L[0] must be specified"); 1298 if (tb->CFL[0]) 1299 replace_key(tb, tb->CFL[0], tb->lkey[0], 1300 tbS0, 0); 1301 } 1302 1303 tb->insert_size[0] = 0; 1304 } 1305 } 1306 1307 static void balance_leaf_finish_node_paste(struct tree_balance *tb, 1308 struct item_head *ih, 1309 const char *body) 1310 { 1311 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1312 struct buffer_info bi; 1313 struct item_head *pasted = item_head(tbS0, tb->item_pos); 1314 1315 /* when directory, may be new entry already pasted */ 1316 if (is_direntry_le_ih(pasted)) { 1317 balance_leaf_finish_node_paste_dirent(tb, ih, body); 1318 return; 1319 } 1320 1321 /* regular object */ 1322 1323 if (tb->pos_in_item == ih_item_len(pasted)) { 1324 RFALSE(tb->insert_size[0] <= 0, 1325 "PAP-12275: insert size must not be %d", 1326 tb->insert_size[0]); 1327 buffer_info_init_tbS0(tb, &bi); 1328 leaf_paste_in_buffer(&bi, tb->item_pos, 1329 tb->pos_in_item, tb->insert_size[0], body, 1330 tb->zeroes_num); 1331 1332 if (is_indirect_le_ih(pasted)) 1333 set_ih_free_space(pasted, 0); 1334 1335 tb->insert_size[0] = 0; 1336 } 1337 #ifdef CONFIG_REISERFS_CHECK 1338 else if (tb->insert_size[0]) { 1339 print_cur_tb("12285"); 1340 reiserfs_panic(tb->tb_sb, "PAP-12285", 1341 "insert_size must be 0 (%d)", tb->insert_size[0]); 1342 } 1343 #endif 1344 } 1345 1346 /* 1347 * if the affected item was not wholly shifted then we 1348 * perform all necessary operations on that part or whole 1349 * of the affected item which remains in S 1350 */ 1351 static void balance_leaf_finish_node(struct tree_balance *tb, 1352 struct item_head *ih, 1353 const char *body, int flag) 1354 { 1355 /* if we must insert or append into buffer S[0] */ 1356 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { 1357 if (flag == M_INSERT) 1358 balance_leaf_finish_node_insert(tb, ih, body); 1359 else /* M_PASTE */ 1360 balance_leaf_finish_node_paste(tb, ih, body); 1361 } 1362 } 1363 1364 /** 1365 * balance_leaf - reiserfs tree balancing algorithm 1366 * @tb: tree balance state 1367 * @ih: item header of inserted item (little endian) 1368 * @body: body of inserted item or bytes to paste 1369 * @flag: i - insert, d - delete, c - cut, p - paste (see do_balance) 1370 * passed back: 1371 * @insert_key: key to insert new nodes 1372 * @insert_ptr: array of nodes to insert at the next level 1373 * 1374 * In our processing of one level we sometimes determine what must be 1375 * inserted into the next higher level. This insertion consists of a 1376 * key or two keys and their corresponding pointers. 1377 */ 1378 static int balance_leaf(struct tree_balance *tb, struct item_head *ih, 1379 const char *body, int flag, 1380 struct item_head *insert_key, 1381 struct buffer_head **insert_ptr) 1382 { 1383 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1384 1385 PROC_INFO_INC(tb->tb_sb, balance_at[0]); 1386 1387 /* Make balance in case insert_size[0] < 0 */ 1388 if (tb->insert_size[0] < 0) 1389 return balance_leaf_when_delete(tb, flag); 1390 1391 tb->item_pos = PATH_LAST_POSITION(tb->tb_path), 1392 tb->pos_in_item = tb->tb_path->pos_in_item, 1393 tb->zeroes_num = 0; 1394 if (flag == M_INSERT && !body) 1395 tb->zeroes_num = ih_item_len(ih); 1396 1397 /* 1398 * for indirect item pos_in_item is measured in unformatted node 1399 * pointers. Recalculate to bytes 1400 */ 1401 if (flag != M_INSERT 1402 && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) 1403 tb->pos_in_item *= UNFM_P_SIZE; 1404 1405 balance_leaf_left(tb, ih, body, flag); 1406 1407 /* tb->lnum[0] > 0 */ 1408 /* Calculate new item position */ 1409 tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0)); 1410 1411 balance_leaf_right(tb, ih, body, flag); 1412 1413 /* tb->rnum[0] > 0 */ 1414 RFALSE(tb->blknum[0] > 3, 1415 "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]); 1416 RFALSE(tb->blknum[0] < 0, 1417 "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]); 1418 1419 /* 1420 * if while adding to a node we discover that it is possible to split 1421 * it in two, and merge the left part into the left neighbor and the 1422 * right part into the right neighbor, eliminating the node 1423 */ 1424 if (tb->blknum[0] == 0) { /* node S[0] is empty now */ 1425 1426 RFALSE(!tb->lnum[0] || !tb->rnum[0], 1427 "PAP-12190: lnum and rnum must not be zero"); 1428 /* 1429 * if insertion was done before 0-th position in R[0], right 1430 * delimiting key of the tb->L[0]'s and left delimiting key are 1431 * not set correctly 1432 */ 1433 if (tb->CFL[0]) { 1434 if (!tb->CFR[0]) 1435 reiserfs_panic(tb->tb_sb, "vs-12195", 1436 "CFR not initialized"); 1437 copy_key(internal_key(tb->CFL[0], tb->lkey[0]), 1438 internal_key(tb->CFR[0], tb->rkey[0])); 1439 do_balance_mark_internal_dirty(tb, tb->CFL[0], 0); 1440 } 1441 1442 reiserfs_invalidate_buffer(tb, tbS0); 1443 return 0; 1444 } 1445 1446 balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag); 1447 1448 balance_leaf_finish_node(tb, ih, body, flag); 1449 1450 #ifdef CONFIG_REISERFS_CHECK 1451 if (flag == M_PASTE && tb->insert_size[0]) { 1452 print_cur_tb("12290"); 1453 reiserfs_panic(tb->tb_sb, 1454 "PAP-12290", "insert_size is still not 0 (%d)", 1455 tb->insert_size[0]); 1456 } 1457 #endif 1458 1459 /* Leaf level of the tree is balanced (end of balance_leaf) */ 1460 return 0; 1461 } 1462 1463 /* Make empty node */ 1464 void make_empty_node(struct buffer_info *bi) 1465 { 1466 struct block_head *blkh; 1467 1468 RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL"); 1469 1470 blkh = B_BLK_HEAD(bi->bi_bh); 1471 set_blkh_nr_item(blkh, 0); 1472 set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh)); 1473 1474 if (bi->bi_parent) 1475 B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */ 1476 } 1477 1478 /* Get first empty buffer */ 1479 struct buffer_head *get_FEB(struct tree_balance *tb) 1480 { 1481 int i; 1482 struct buffer_info bi; 1483 1484 for (i = 0; i < MAX_FEB_SIZE; i++) 1485 if (tb->FEB[i] != NULL) 1486 break; 1487 1488 if (i == MAX_FEB_SIZE) 1489 reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty"); 1490 1491 buffer_info_init_bh(tb, &bi, tb->FEB[i]); 1492 make_empty_node(&bi); 1493 set_buffer_uptodate(tb->FEB[i]); 1494 tb->used[i] = tb->FEB[i]; 1495 tb->FEB[i] = NULL; 1496 1497 return tb->used[i]; 1498 } 1499 1500 /* This is now used because reiserfs_free_block has to be able to schedule. */ 1501 static void store_thrown(struct tree_balance *tb, struct buffer_head *bh) 1502 { 1503 int i; 1504 1505 if (buffer_dirty(bh)) 1506 reiserfs_warning(tb->tb_sb, "reiserfs-12320", 1507 "called with dirty buffer"); 1508 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) 1509 if (!tb->thrown[i]) { 1510 tb->thrown[i] = bh; 1511 get_bh(bh); /* free_thrown puts this */ 1512 return; 1513 } 1514 reiserfs_warning(tb->tb_sb, "reiserfs-12321", 1515 "too many thrown buffers"); 1516 } 1517 1518 static void free_thrown(struct tree_balance *tb) 1519 { 1520 int i; 1521 b_blocknr_t blocknr; 1522 for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) { 1523 if (tb->thrown[i]) { 1524 blocknr = tb->thrown[i]->b_blocknr; 1525 if (buffer_dirty(tb->thrown[i])) 1526 reiserfs_warning(tb->tb_sb, "reiserfs-12322", 1527 "called with dirty buffer %d", 1528 blocknr); 1529 brelse(tb->thrown[i]); /* incremented in store_thrown */ 1530 reiserfs_free_block(tb->transaction_handle, NULL, 1531 blocknr, 0); 1532 } 1533 } 1534 } 1535 1536 void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh) 1537 { 1538 struct block_head *blkh; 1539 blkh = B_BLK_HEAD(bh); 1540 set_blkh_level(blkh, FREE_LEVEL); 1541 set_blkh_nr_item(blkh, 0); 1542 1543 clear_buffer_dirty(bh); 1544 store_thrown(tb, bh); 1545 } 1546 1547 /* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/ 1548 void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest, 1549 struct buffer_head *src, int n_src) 1550 { 1551 1552 RFALSE(dest == NULL || src == NULL, 1553 "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)", 1554 src, dest); 1555 RFALSE(!B_IS_KEYS_LEVEL(dest), 1556 "vs-12310: invalid level (%z) for destination buffer. dest must be leaf", 1557 dest); 1558 RFALSE(n_dest < 0 || n_src < 0, 1559 "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest); 1560 RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src), 1561 "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big", 1562 n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest)); 1563 1564 if (B_IS_ITEMS_LEVEL(src)) 1565 /* source buffer contains leaf node */ 1566 memcpy(internal_key(dest, n_dest), item_head(src, n_src), 1567 KEY_SIZE); 1568 else 1569 memcpy(internal_key(dest, n_dest), internal_key(src, n_src), 1570 KEY_SIZE); 1571 1572 do_balance_mark_internal_dirty(tb, dest, 0); 1573 } 1574 1575 int get_left_neighbor_position(struct tree_balance *tb, int h) 1576 { 1577 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1578 1579 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL, 1580 "vs-12325: FL[%d](%p) or F[%d](%p) does not exist", 1581 h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h)); 1582 1583 if (Sh_position == 0) 1584 return B_NR_ITEMS(tb->FL[h]); 1585 else 1586 return Sh_position - 1; 1587 } 1588 1589 int get_right_neighbor_position(struct tree_balance *tb, int h) 1590 { 1591 int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1); 1592 1593 RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL, 1594 "vs-12330: F[%d](%p) or FR[%d](%p) does not exist", 1595 h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]); 1596 1597 if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h))) 1598 return 0; 1599 else 1600 return Sh_position + 1; 1601 } 1602 1603 #ifdef CONFIG_REISERFS_CHECK 1604 1605 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value); 1606 static void check_internal_node(struct super_block *s, struct buffer_head *bh, 1607 char *mes) 1608 { 1609 struct disk_child *dc; 1610 int i; 1611 1612 RFALSE(!bh, "PAP-12336: bh == 0"); 1613 1614 if (!bh || !B_IS_IN_TREE(bh)) 1615 return; 1616 1617 RFALSE(!buffer_dirty(bh) && 1618 !(buffer_journaled(bh) || buffer_journal_dirty(bh)), 1619 "PAP-12337: buffer (%b) must be dirty", bh); 1620 dc = B_N_CHILD(bh, 0); 1621 1622 for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) { 1623 if (!is_reusable(s, dc_block_number(dc), 1)) { 1624 print_cur_tb(mes); 1625 reiserfs_panic(s, "PAP-12338", 1626 "invalid child pointer %y in %b", 1627 dc, bh); 1628 } 1629 } 1630 } 1631 1632 static int locked_or_not_in_tree(struct tree_balance *tb, 1633 struct buffer_head *bh, char *which) 1634 { 1635 if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) || 1636 !B_IS_IN_TREE(bh)) { 1637 reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh); 1638 return 1; 1639 } 1640 return 0; 1641 } 1642 1643 static int check_before_balancing(struct tree_balance *tb) 1644 { 1645 int retval = 0; 1646 1647 if (REISERFS_SB(tb->tb_sb)->cur_tb) { 1648 reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule " 1649 "occurred based on cur_tb not being null at " 1650 "this point in code. do_balance cannot properly " 1651 "handle concurrent tree accesses on a same " 1652 "mount point."); 1653 } 1654 1655 /* 1656 * double check that buffers that we will modify are unlocked. 1657 * (fix_nodes should already have prepped all of these for us). 1658 */ 1659 if (tb->lnum[0]) { 1660 retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]"); 1661 retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]"); 1662 retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]"); 1663 check_leaf(tb->L[0]); 1664 } 1665 if (tb->rnum[0]) { 1666 retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]"); 1667 retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]"); 1668 retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]"); 1669 check_leaf(tb->R[0]); 1670 } 1671 retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path), 1672 "S[0]"); 1673 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1674 1675 return retval; 1676 } 1677 1678 static void check_after_balance_leaf(struct tree_balance *tb) 1679 { 1680 if (tb->lnum[0]) { 1681 if (B_FREE_SPACE(tb->L[0]) != 1682 MAX_CHILD_SIZE(tb->L[0]) - 1683 dc_size(B_N_CHILD 1684 (tb->FL[0], get_left_neighbor_position(tb, 0)))) { 1685 print_cur_tb("12221"); 1686 reiserfs_panic(tb->tb_sb, "PAP-12355", 1687 "shift to left was incorrect"); 1688 } 1689 } 1690 if (tb->rnum[0]) { 1691 if (B_FREE_SPACE(tb->R[0]) != 1692 MAX_CHILD_SIZE(tb->R[0]) - 1693 dc_size(B_N_CHILD 1694 (tb->FR[0], get_right_neighbor_position(tb, 0)))) { 1695 print_cur_tb("12222"); 1696 reiserfs_panic(tb->tb_sb, "PAP-12360", 1697 "shift to right was incorrect"); 1698 } 1699 } 1700 if (PATH_H_PBUFFER(tb->tb_path, 1) && 1701 (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) != 1702 (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1703 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1704 PATH_H_POSITION(tb->tb_path, 1)))))) { 1705 int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)); 1706 int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) - 1707 dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1), 1708 PATH_H_POSITION(tb->tb_path, 1709 1)))); 1710 print_cur_tb("12223"); 1711 reiserfs_warning(tb->tb_sb, "reiserfs-12363", 1712 "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; " 1713 "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d", 1714 left, 1715 MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)), 1716 PATH_H_PBUFFER(tb->tb_path, 1), 1717 PATH_H_POSITION(tb->tb_path, 1), 1718 dc_size(B_N_CHILD 1719 (PATH_H_PBUFFER(tb->tb_path, 1), 1720 PATH_H_POSITION(tb->tb_path, 1))), 1721 right); 1722 reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect"); 1723 } 1724 } 1725 1726 static void check_leaf_level(struct tree_balance *tb) 1727 { 1728 check_leaf(tb->L[0]); 1729 check_leaf(tb->R[0]); 1730 check_leaf(PATH_PLAST_BUFFER(tb->tb_path)); 1731 } 1732 1733 static void check_internal_levels(struct tree_balance *tb) 1734 { 1735 int h; 1736 1737 /* check all internal nodes */ 1738 for (h = 1; tb->insert_size[h]; h++) { 1739 check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h), 1740 "BAD BUFFER ON PATH"); 1741 if (tb->lnum[h]) 1742 check_internal_node(tb->tb_sb, tb->L[h], "BAD L"); 1743 if (tb->rnum[h]) 1744 check_internal_node(tb->tb_sb, tb->R[h], "BAD R"); 1745 } 1746 1747 } 1748 1749 #endif 1750 1751 /* 1752 * Now we have all of the buffers that must be used in balancing of 1753 * the tree. We rely on the assumption that schedule() will not occur 1754 * while do_balance works. ( Only interrupt handlers are acceptable.) 1755 * We balance the tree according to the analysis made before this, 1756 * using buffers already obtained. For SMP support it will someday be 1757 * necessary to add ordered locking of tb. 1758 */ 1759 1760 /* 1761 * Some interesting rules of balancing: 1762 * we delete a maximum of two nodes per level per balancing: we never 1763 * delete R, when we delete two of three nodes L, S, R then we move 1764 * them into R. 1765 * 1766 * we only delete L if we are deleting two nodes, if we delete only 1767 * one node we delete S 1768 * 1769 * if we shift leaves then we shift as much as we can: this is a 1770 * deliberate policy of extremism in node packing which results in 1771 * higher average utilization after repeated random balance operations 1772 * at the cost of more memory copies and more balancing as a result of 1773 * small insertions to full nodes. 1774 * 1775 * if we shift internal nodes we try to evenly balance the node 1776 * utilization, with consequent less balancing at the cost of lower 1777 * utilization. 1778 * 1779 * one could argue that the policy for directories in leaves should be 1780 * that of internal nodes, but we will wait until another day to 1781 * evaluate this.... It would be nice to someday measure and prove 1782 * these assumptions as to what is optimal.... 1783 */ 1784 1785 static inline void do_balance_starts(struct tree_balance *tb) 1786 { 1787 /* use print_cur_tb() to see initial state of struct tree_balance */ 1788 1789 /* store_print_tb (tb); */ 1790 1791 /* do not delete, just comment it out */ 1792 /* 1793 print_tb(flag, PATH_LAST_POSITION(tb->tb_path), 1794 tb->tb_path->pos_in_item, tb, "check"); 1795 */ 1796 RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB"); 1797 #ifdef CONFIG_REISERFS_CHECK 1798 REISERFS_SB(tb->tb_sb)->cur_tb = tb; 1799 #endif 1800 } 1801 1802 static inline void do_balance_completed(struct tree_balance *tb) 1803 { 1804 1805 #ifdef CONFIG_REISERFS_CHECK 1806 check_leaf_level(tb); 1807 check_internal_levels(tb); 1808 REISERFS_SB(tb->tb_sb)->cur_tb = NULL; 1809 #endif 1810 1811 /* 1812 * reiserfs_free_block is no longer schedule safe. So, we need to 1813 * put the buffers we want freed on the thrown list during do_balance, 1814 * and then free them now 1815 */ 1816 1817 REISERFS_SB(tb->tb_sb)->s_do_balance++; 1818 1819 /* release all nodes hold to perform the balancing */ 1820 unfix_nodes(tb); 1821 1822 free_thrown(tb); 1823 } 1824 1825 /* 1826 * do_balance - balance the tree 1827 * 1828 * @tb: tree_balance structure 1829 * @ih: item header of inserted item 1830 * @body: body of inserted item or bytes to paste 1831 * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste 1832 * 1833 * Cut means delete part of an item (includes removing an entry from a 1834 * directory). 1835 * 1836 * Delete means delete whole item. 1837 * 1838 * Insert means add a new item into the tree. 1839 * 1840 * Paste means to append to the end of an existing file or to 1841 * insert a directory entry. 1842 */ 1843 void do_balance(struct tree_balance *tb, struct item_head *ih, 1844 const char *body, int flag) 1845 { 1846 int child_pos; /* position of a child node in its parent */ 1847 int h; /* level of the tree being processed */ 1848 1849 /* 1850 * in our processing of one level we sometimes determine what 1851 * must be inserted into the next higher level. This insertion 1852 * consists of a key or two keys and their corresponding 1853 * pointers 1854 */ 1855 struct item_head insert_key[2]; 1856 1857 /* inserted node-ptrs for the next level */ 1858 struct buffer_head *insert_ptr[2]; 1859 1860 tb->tb_mode = flag; 1861 tb->need_balance_dirty = 0; 1862 1863 if (FILESYSTEM_CHANGED_TB(tb)) { 1864 reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has " 1865 "changed"); 1866 } 1867 /* if we have no real work to do */ 1868 if (!tb->insert_size[0]) { 1869 reiserfs_warning(tb->tb_sb, "PAP-12350", 1870 "insert_size == 0, mode == %c", flag); 1871 unfix_nodes(tb); 1872 return; 1873 } 1874 1875 atomic_inc(&fs_generation(tb->tb_sb)); 1876 do_balance_starts(tb); 1877 1878 /* 1879 * balance_leaf returns 0 except if combining L R and S into 1880 * one node. see balance_internal() for explanation of this 1881 * line of code. 1882 */ 1883 child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) + 1884 balance_leaf(tb, ih, body, flag, insert_key, insert_ptr); 1885 1886 #ifdef CONFIG_REISERFS_CHECK 1887 check_after_balance_leaf(tb); 1888 #endif 1889 1890 /* Balance internal level of the tree. */ 1891 for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++) 1892 child_pos = balance_internal(tb, h, child_pos, insert_key, 1893 insert_ptr); 1894 1895 do_balance_completed(tb); 1896 } 1897