xfs_itable.c (acf403ecc4155153e5e2c1640be90fc166e56ba7) | xfs_itable.c (6d3ebaae7c20128bfa6965a8d5cee0d1deea8486) |
---|---|
1/* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * --- 19 unchanged lines hidden (view full) --- 28#include "xfs_inode.h" 29#include "xfs_btree.h" 30#include "xfs_ialloc.h" 31#include "xfs_ialloc_btree.h" 32#include "xfs_itable.h" 33#include "xfs_error.h" 34#include "xfs_trace.h" 35#include "xfs_icache.h" | 1/* 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * --- 19 unchanged lines hidden (view full) --- 28#include "xfs_inode.h" 29#include "xfs_btree.h" 30#include "xfs_ialloc.h" 31#include "xfs_ialloc_btree.h" 32#include "xfs_itable.h" 33#include "xfs_error.h" 34#include "xfs_trace.h" 35#include "xfs_icache.h" |
36#include "xfs_dinode.h" | |
37 38STATIC int 39xfs_internal_inum( 40 xfs_mount_t *mp, 41 xfs_ino_t ino) 42{ 43 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 44 (xfs_sb_version_hasquota(&mp->m_sb) && --- 186 unchanged lines hidden (view full) --- 231 232 /* Get the record, should always work */ 233 error = xfs_inobt_get_rec(cur, irec, &stat); 234 if (error) 235 return error; 236 XFS_WANT_CORRUPTED_RETURN(stat == 1); 237 238 /* Check if the record contains the inode in request */ | 36 37STATIC int 38xfs_internal_inum( 39 xfs_mount_t *mp, 40 xfs_ino_t ino) 41{ 42 return (ino == mp->m_sb.sb_rbmino || ino == mp->m_sb.sb_rsumino || 43 (xfs_sb_version_hasquota(&mp->m_sb) && --- 186 unchanged lines hidden (view full) --- 230 231 /* Get the record, should always work */ 232 error = xfs_inobt_get_rec(cur, irec, &stat); 233 if (error) 234 return error; 235 XFS_WANT_CORRUPTED_RETURN(stat == 1); 236 237 /* Check if the record contains the inode in request */ |
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { 240 *icount = 0; 241 return 0; 242 } | 238 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) 239 return -EINVAL; |
243 244 idx = agino - irec->ir_startino + 1; 245 if (idx < XFS_INODES_PER_CHUNK && 246 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 247 int i; 248 249 /* We got a right chunk with some left inodes allocated at it. 250 * Grab the chunk record. Mark all the uninteresting inodes --- 8 unchanged lines hidden (view full) --- 259 *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount; 260 } 261 262 return 0; 263} 264 265#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 266 | 240 241 idx = agino - irec->ir_startino + 1; 242 if (idx < XFS_INODES_PER_CHUNK && 243 (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 244 int i; 245 246 /* We got a right chunk with some left inodes allocated at it. 247 * Grab the chunk record. Mark all the uninteresting inodes --- 8 unchanged lines hidden (view full) --- 256 *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount; 257 } 258 259 return 0; 260} 261 262#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 263 |
267struct xfs_bulkstat_agichunk { 268 char __user **ac_ubuffer;/* pointer into user's buffer */ 269 int ac_ubleft; /* bytes left in user's buffer */ 270 int ac_ubelem; /* spaces used in user's buffer */ 271}; 272 | |
273/* 274 * Process inodes in chunk with a pointer to a formatter function 275 * that will iget the inode and fill in the appropriate structure. 276 */ | 264/* 265 * Process inodes in chunk with a pointer to a formatter function 266 * that will iget the inode and fill in the appropriate structure. 267 */ |
277static int | 268int |
278xfs_bulkstat_ag_ichunk( 279 struct xfs_mount *mp, 280 xfs_agnumber_t agno, 281 struct xfs_inobt_rec_incore *irbp, 282 bulkstat_one_pf formatter, 283 size_t statstruct_size, | 269xfs_bulkstat_ag_ichunk( 270 struct xfs_mount *mp, 271 xfs_agnumber_t agno, 272 struct xfs_inobt_rec_incore *irbp, 273 bulkstat_one_pf formatter, 274 size_t statstruct_size, |
284 struct xfs_bulkstat_agichunk *acp, 285 xfs_agino_t *last_agino) | 275 struct xfs_bulkstat_agichunk *acp) |
286{ | 276{ |
277 xfs_ino_t lastino = acp->ac_lastino; |
|
287 char __user **ubufp = acp->ac_ubuffer; | 278 char __user **ubufp = acp->ac_ubuffer; |
288 int chunkidx; | 279 int ubleft = acp->ac_ubleft; 280 int ubelem = acp->ac_ubelem; 281 int chunkidx, clustidx; |
289 int error = 0; | 282 int error = 0; |
290 xfs_agino_t agino = irbp->ir_startino; | 283 xfs_agino_t agino; |
291 | 284 |
292 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK; 293 chunkidx++, agino++) { 294 int fmterror; | 285 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 286 XFS_BULKSTAT_UBLEFT(ubleft) && 287 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 288 chunkidx++, clustidx++, agino++) { 289 int fmterror; /* bulkstat formatter result */ |
295 int ubused; | 290 int ubused; |
291 xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino); |
|
296 | 292 |
297 /* inode won't fit in buffer, we are done */ 298 if (acp->ac_ubleft < statstruct_size) 299 break; | 293 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); |
300 301 /* Skip if this inode is free */ | 294 295 /* Skip if this inode is free */ |
302 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) | 296 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 297 lastino = ino; |
303 continue; | 298 continue; |
299 } |
|
304 | 300 |
301 /* 302 * Count used inodes as free so we can tell when the 303 * chunk is used up. 304 */ 305 irbp->ir_freecount++; 306 |
|
305 /* Get the inode and fill in a single buffer */ 306 ubused = statstruct_size; | 307 /* Get the inode and fill in a single buffer */ 308 ubused = statstruct_size; |
307 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino), 308 *ubufp, acp->ac_ubleft, &ubused, &fmterror); 309 310 if (fmterror == BULKSTAT_RV_GIVEUP || 311 (error && error != -ENOENT && error != -EINVAL)) { 312 acp->ac_ubleft = 0; | 309 error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror); 310 if (fmterror == BULKSTAT_RV_NOTHING) { 311 if (error && error != -ENOENT && error != -EINVAL) { 312 ubleft = 0; 313 break; 314 } 315 lastino = ino; 316 continue; 317 } 318 if (fmterror == BULKSTAT_RV_GIVEUP) { 319 ubleft = 0; |
313 ASSERT(error); 314 break; 315 } | 320 ASSERT(error); 321 break; 322 } |
316 317 /* be careful not to leak error if at end of chunk */ 318 if (fmterror == BULKSTAT_RV_NOTHING || error) { 319 error = 0; 320 continue; 321 } 322 323 *ubufp += ubused; 324 acp->ac_ubleft -= ubused; 325 acp->ac_ubelem++; | 323 if (*ubufp) 324 *ubufp += ubused; 325 ubleft -= ubused; 326 ubelem++; 327 lastino = ino; |
326 } 327 | 328 } 329 |
328 /* 329 * Post-update *last_agino. At this point, agino will always point one 330 * inode past the last inode we processed successfully. Hence we 331 * substract that inode when setting the *last_agino cursor so that we 332 * return the correct cookie to userspace. On the next bulkstat call, 333 * the inode under the lastino cookie will be skipped as we have already 334 * processed it here. 335 */ 336 *last_agino = agino - 1; | 330 acp->ac_lastino = lastino; 331 acp->ac_ubleft = ubleft; 332 acp->ac_ubelem = ubelem; |
337 338 return error; 339} 340 341/* 342 * Return stat information in bulk (by-inode) for the filesystem. 343 */ 344int /* error status */ --- 6 unchanged lines hidden (view full) --- 351 char __user *ubuffer, /* buffer with inode stats */ 352 int *done) /* 1 if there are more stats to get */ 353{ 354 xfs_buf_t *agbp; /* agi header buffer */ 355 xfs_agi_t *agi; /* agi header data */ 356 xfs_agino_t agino; /* inode # in allocation group */ 357 xfs_agnumber_t agno; /* allocation group number */ 358 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ | 333 334 return error; 335} 336 337/* 338 * Return stat information in bulk (by-inode) for the filesystem. 339 */ 340int /* error status */ --- 6 unchanged lines hidden (view full) --- 347 char __user *ubuffer, /* buffer with inode stats */ 348 int *done) /* 1 if there are more stats to get */ 349{ 350 xfs_buf_t *agbp; /* agi header buffer */ 351 xfs_agi_t *agi; /* agi header data */ 352 xfs_agino_t agino; /* inode # in allocation group */ 353 xfs_agnumber_t agno; /* allocation group number */ 354 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ |
355 int end_of_ag; /* set if we've seen the ag end */ 356 int error; /* error code */ 357 int fmterror;/* bulkstat formatter result */ 358 int i; /* loop index */ 359 int icount; /* count of inodes good in irbuf */ |
|
359 size_t irbsize; /* size of irec buffer in bytes */ | 360 size_t irbsize; /* size of irec buffer in bytes */ |
361 xfs_ino_t ino; /* inode number (filesystem) */ 362 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ |
|
360 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ | 363 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ |
364 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 365 xfs_ino_t lastino; /* last inode number returned */ |
|
361 int nirbuf; /* size of irbuf */ | 366 int nirbuf; /* size of irbuf */ |
367 int rval; /* return value error code */ 368 int tmp; /* result value from btree calls */ |
|
362 int ubcount; /* size of user's buffer */ | 369 int ubcount; /* size of user's buffer */ |
363 struct xfs_bulkstat_agichunk ac; 364 int error = 0; | 370 int ubleft; /* bytes left in user's buffer */ 371 char __user *ubufp; /* pointer into user's buffer */ 372 int ubelem; /* spaces used in user's buffer */ |
365 366 /* 367 * Get the last inode value, see if there's nothing to do. 368 */ | 373 374 /* 375 * Get the last inode value, see if there's nothing to do. 376 */ |
369 agno = XFS_INO_TO_AGNO(mp, *lastinop); 370 agino = XFS_INO_TO_AGINO(mp, *lastinop); | 377 ino = (xfs_ino_t)*lastinop; 378 lastino = ino; 379 agno = XFS_INO_TO_AGNO(mp, ino); 380 agino = XFS_INO_TO_AGINO(mp, ino); |
371 if (agno >= mp->m_sb.sb_agcount || | 381 if (agno >= mp->m_sb.sb_agcount || |
372 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) { | 382 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { |
373 *done = 1; 374 *ubcountp = 0; 375 return 0; 376 } 377 378 ubcount = *ubcountp; /* statstruct's */ | 383 *done = 1; 384 *ubcountp = 0; 385 return 0; 386 } 387 388 ubcount = *ubcountp; /* statstruct's */ |
379 ac.ac_ubuffer = &ubuffer; 380 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */; 381 ac.ac_ubelem = 0; 382 383 *ubcountp = 0; | 389 ubleft = ubcount * statstruct_size; /* bytes */ 390 *ubcountp = ubelem = 0; |
384 *done = 0; | 391 *done = 0; |
385 | 392 fmterror = 0; 393 ubufp = ubuffer; |
386 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 387 if (!irbuf) 388 return -ENOMEM; 389 390 nirbuf = irbsize / sizeof(*irbuf); 391 392 /* 393 * Loop over the allocation groups, starting from the last 394 * inode returned; 0 means start of the allocation group. 395 */ | 394 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 395 if (!irbuf) 396 return -ENOMEM; 397 398 nirbuf = irbsize / sizeof(*irbuf); 399 400 /* 401 * Loop over the allocation groups, starting from the last 402 * inode returned; 0 means start of the allocation group. 403 */ |
396 while (agno < mp->m_sb.sb_agcount) { 397 struct xfs_inobt_rec_incore *irbp = irbuf; 398 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf; 399 bool end_of_ag = false; 400 int icount = 0; 401 int stat; 402 | 404 rval = 0; 405 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 406 cond_resched(); |
403 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 404 if (error) 405 break; 406 agi = XFS_BUF_TO_AGI(agbp); 407 /* 408 * Allocate and initialize a btree cursor for ialloc btree. 409 */ 410 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 411 XFS_BTNUM_INO); | 407 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 408 if (error) 409 break; 410 agi = XFS_BUF_TO_AGI(agbp); 411 /* 412 * Allocate and initialize a btree cursor for ialloc btree. 413 */ 414 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 415 XFS_BTNUM_INO); |
416 irbp = irbuf; 417 irbufend = irbuf + nirbuf; 418 end_of_ag = 0; 419 icount = 0; |
|
412 if (agino > 0) { 413 /* 414 * In the middle of an allocation group, we need to get 415 * the remainder of the chunk we're in. 416 */ 417 struct xfs_inobt_rec_incore r; 418 419 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 420 if (error) | 420 if (agino > 0) { 421 /* 422 * In the middle of an allocation group, we need to get 423 * the remainder of the chunk we're in. 424 */ 425 struct xfs_inobt_rec_incore r; 426 427 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 428 if (error) |
421 goto del_cursor; | 429 break; |
422 if (icount) { 423 irbp->ir_startino = r.ir_startino; 424 irbp->ir_freecount = r.ir_freecount; 425 irbp->ir_free = r.ir_free; 426 irbp++; | 430 if (icount) { 431 irbp->ir_startino = r.ir_startino; 432 irbp->ir_freecount = r.ir_freecount; 433 irbp->ir_free = r.ir_free; 434 irbp++; |
435 agino = r.ir_startino + XFS_INODES_PER_CHUNK; |
|
427 } 428 /* Increment to the next record */ | 436 } 437 /* Increment to the next record */ |
429 error = xfs_btree_increment(cur, 0, &stat); | 438 error = xfs_btree_increment(cur, 0, &tmp); |
430 } else { 431 /* Start of ag. Lookup the first inode chunk */ | 439 } else { 440 /* Start of ag. Lookup the first inode chunk */ |
432 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat); | 441 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); |
433 } | 442 } |
434 if (error || stat == 0) { 435 end_of_ag = true; 436 goto del_cursor; 437 } | 443 if (error) 444 break; |
438 439 /* 440 * Loop through inode btree records in this ag, 441 * until we run out of inodes or space in the buffer. 442 */ 443 while (irbp < irbufend && icount < ubcount) { 444 struct xfs_inobt_rec_incore r; 445 | 445 446 /* 447 * Loop through inode btree records in this ag, 448 * until we run out of inodes or space in the buffer. 449 */ 450 while (irbp < irbufend && icount < ubcount) { 451 struct xfs_inobt_rec_incore r; 452 |
446 error = xfs_inobt_get_rec(cur, &r, &stat); 447 if (error || stat == 0) { 448 end_of_ag = true; 449 goto del_cursor; | 453 error = xfs_inobt_get_rec(cur, &r, &i); 454 if (error || i == 0) { 455 end_of_ag = 1; 456 break; |
450 } 451 452 /* 453 * If this chunk has any allocated inodes, save it. 454 * Also start read-ahead now for this chunk. 455 */ 456 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 457 xfs_bulkstat_ichunk_ra(mp, agno, &r); 458 irbp->ir_startino = r.ir_startino; 459 irbp->ir_freecount = r.ir_freecount; 460 irbp->ir_free = r.ir_free; 461 irbp++; 462 icount += XFS_INODES_PER_CHUNK - r.ir_freecount; 463 } | 457 } 458 459 /* 460 * If this chunk has any allocated inodes, save it. 461 * Also start read-ahead now for this chunk. 462 */ 463 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 464 xfs_bulkstat_ichunk_ra(mp, agno, &r); 465 irbp->ir_startino = r.ir_startino; 466 irbp->ir_freecount = r.ir_freecount; 467 irbp->ir_free = r.ir_free; 468 irbp++; 469 icount += XFS_INODES_PER_CHUNK - r.ir_freecount; 470 } |
464 error = xfs_btree_increment(cur, 0, &stat); 465 if (error || stat == 0) { 466 end_of_ag = true; 467 goto del_cursor; 468 } | 471 /* 472 * Set agino to after this chunk and bump the cursor. 473 */ 474 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 475 error = xfs_btree_increment(cur, 0, &tmp); |
469 cond_resched(); 470 } | 476 cond_resched(); 477 } |
471 | |
472 /* | 478 /* |
473 * Drop the btree buffers and the agi buffer as we can't hold any 474 * of the locks these represent when calling iget. If there is a 475 * pending error, then we are done. | 479 * Drop the btree buffers and the agi buffer. 480 * We can't hold any of the locks these represent 481 * when calling iget. |
476 */ | 482 */ |
477del_cursor: | |
478 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 479 xfs_buf_relse(agbp); | 483 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 484 xfs_buf_relse(agbp); |
480 if (error) 481 break; | |
482 /* | 485 /* |
483 * Now format all the good inodes into the user's buffer. The 484 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer 485 * for the next loop iteration. | 486 * Now format all the good inodes into the user's buffer. |
486 */ 487 irbufend = irbp; 488 for (irbp = irbuf; | 487 */ 488 irbufend = irbp; 489 for (irbp = irbuf; |
489 irbp < irbufend && ac.ac_ubleft >= statstruct_size; 490 irbp++) { | 490 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 491 struct xfs_bulkstat_agichunk ac; 492 493 ac.ac_lastino = lastino; 494 ac.ac_ubuffer = &ubuffer; 495 ac.ac_ubleft = ubleft; 496 ac.ac_ubelem = ubelem; |
491 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, | 497 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, |
492 formatter, statstruct_size, &ac, 493 &agino); | 498 formatter, statstruct_size, &ac); |
494 if (error) | 499 if (error) |
495 break; | 500 rval = error; |
496 | 501 |
502 lastino = ac.ac_lastino; 503 ubleft = ac.ac_ubleft; 504 ubelem = ac.ac_ubelem; 505 |
|
497 cond_resched(); 498 } | 506 cond_resched(); 507 } |
499 | |
500 /* | 508 /* |
501 * If we've run out of space or had a formatting error, we 502 * are now done | 509 * Set up for the next loop iteration. |
503 */ | 510 */ |
504 if (ac.ac_ubleft < statstruct_size || error) | 511 if (XFS_BULKSTAT_UBLEFT(ubleft)) { 512 if (end_of_ag) { 513 agno++; 514 agino = 0; 515 } else 516 agino = XFS_INO_TO_AGINO(mp, lastino); 517 } else |
505 break; | 518 break; |
506 507 if (end_of_ag) { 508 agno++; 509 agino = 0; 510 } | |
511 } 512 /* 513 * Done, we're either out of filesystem or space to put the data. 514 */ 515 kmem_free(irbuf); | 519 } 520 /* 521 * Done, we're either out of filesystem or space to put the data. 522 */ 523 kmem_free(irbuf); |
516 *ubcountp = ac.ac_ubelem; 517 | 524 *ubcountp = ubelem; |
518 /* | 525 /* |
519 * We found some inodes, so clear the error status and return them. 520 * The lastino pointer will point directly at the inode that triggered 521 * any error that occurred, so on the next call the error will be 522 * triggered again and propagated to userspace as there will be no 523 * formatted inodes in the buffer. | 526 * Found some inodes, return them now and return the error next time. |
524 */ | 527 */ |
525 if (ac.ac_ubelem) 526 error = 0; 527 528 /* 529 * If we ran out of filesystem, lastino will point off the end of 530 * the filesystem so the next call will return immediately. 531 */ 532 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino); 533 if (agno >= mp->m_sb.sb_agcount) | 528 if (ubelem) 529 rval = 0; 530 if (agno >= mp->m_sb.sb_agcount) { 531 /* 532 * If we ran out of filesystem, mark lastino as off 533 * the end of the filesystem, so the next call 534 * will return immediately. 535 */ 536 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); |
534 *done = 1; | 537 *done = 1; |
538 } else 539 *lastinop = (xfs_ino_t)lastino; |
|
535 | 540 |
536 return error; | 541 return rval; |
537} 538 539int 540xfs_inumbers_fmt( 541 void __user *ubuffer, /* buffer to write to */ 542 const struct xfs_inogrp *buffer, /* buffer to read from */ 543 long count, /* # of elements to read */ 544 long *written) /* # of bytes written */ --- 114 unchanged lines hidden --- | 542} 543 544int 545xfs_inumbers_fmt( 546 void __user *ubuffer, /* buffer to write to */ 547 const struct xfs_inogrp *buffer, /* buffer to read from */ 548 long count, /* # of elements to read */ 549 long *written) /* # of bytes written */ --- 114 unchanged lines hidden --- |