1 /** 2 * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project. 3 * 4 * Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc. 5 * Copyright (c) 2002 Richard Russon 6 * 7 * This program/include file is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as published 9 * by the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program/include file is distributed in the hope that it will be 13 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty 14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program (in the main directory of the Linux-NTFS 19 * distribution in the file COPYING); if not, write to the Free Software 20 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23 #include <linux/buffer_head.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/swap.h> 27 #include <linux/writeback.h> 28 29 #include "attrib.h" 30 #include "debug.h" 31 #include "layout.h" 32 #include "lcnalloc.h" 33 #include "malloc.h" 34 #include "mft.h" 35 #include "ntfs.h" 36 #include "types.h" 37 38 /** 39 * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode 40 * @ni: ntfs inode for which to map (part of) a runlist 41 * @vcn: map runlist part containing this vcn 42 * @ctx: active attribute search context if present or NULL if not 43 * 44 * Map the part of a runlist containing the @vcn of the ntfs inode @ni. 45 * 46 * If @ctx is specified, it is an active search context of @ni and its base mft 47 * record. This is needed when ntfs_map_runlist_nolock() encounters unmapped 48 * runlist fragments and allows their mapping. If you do not have the mft 49 * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock() 50 * will perform the necessary mapping and unmapping. 51 * 52 * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and 53 * restores it before returning. Thus, @ctx will be left pointing to the same 54 * attribute on return as on entry. However, the actual pointers in @ctx may 55 * point to different memory locations on return, so you must remember to reset 56 * any cached pointers from the @ctx, i.e. after the call to 57 * ntfs_map_runlist_nolock(), you will probably want to do: 58 * m = ctx->mrec; 59 * a = ctx->attr; 60 * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that 61 * you cache ctx->mrec in a variable @m of type MFT_RECORD *. 62 * 63 * Return 0 on success and -errno on error. There is one special error code 64 * which is not an error as such. This is -ENOENT. It means that @vcn is out 65 * of bounds of the runlist. 66 * 67 * Note the runlist can be NULL after this function returns if @vcn is zero and 68 * the attribute has zero allocated size, i.e. there simply is no runlist. 69 * 70 * WARNING: If @ctx is supplied, regardless of whether success or failure is 71 * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx 72 * is no longer valid, i.e. you need to either call 73 * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. 74 * In that case PTR_ERR(@ctx->mrec) will give you the error code for 75 * why the mapping of the old inode failed. 76 * 77 * Locking: - The runlist described by @ni must be locked for writing on entry 78 * and is locked on return. Note the runlist will be modified. 79 * - If @ctx is NULL, the base mft record of @ni must not be mapped on 80 * entry and it will be left unmapped on return. 81 * - If @ctx is not NULL, the base mft record must be mapped on entry 82 * and it will be left mapped on return. 83 */ 84 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) 85 { 86 VCN end_vcn; 87 unsigned long flags; 88 ntfs_inode *base_ni; 89 MFT_RECORD *m; 90 ATTR_RECORD *a; 91 runlist_element *rl; 92 struct page *put_this_page = NULL; 93 int err = 0; 94 bool ctx_is_temporary, ctx_needs_reset; 95 ntfs_attr_search_ctx old_ctx = { NULL, }; 96 97 ntfs_debug("Mapping runlist part containing vcn 0x%llx.", 98 (unsigned long long)vcn); 99 if (!NInoAttr(ni)) 100 base_ni = ni; 101 else 102 base_ni = ni->ext.base_ntfs_ino; 103 if (!ctx) { 104 ctx_is_temporary = ctx_needs_reset = true; 105 m = map_mft_record(base_ni); 106 if (IS_ERR(m)) 107 return PTR_ERR(m); 108 ctx = ntfs_attr_get_search_ctx(base_ni, m); 109 if (unlikely(!ctx)) { 110 err = -ENOMEM; 111 goto err_out; 112 } 113 } else { 114 VCN allocated_size_vcn; 115 116 BUG_ON(IS_ERR(ctx->mrec)); 117 a = ctx->attr; 118 BUG_ON(!a->non_resident); 119 ctx_is_temporary = false; 120 end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn); 121 read_lock_irqsave(&ni->size_lock, flags); 122 allocated_size_vcn = ni->allocated_size >> 123 ni->vol->cluster_size_bits; 124 read_unlock_irqrestore(&ni->size_lock, flags); 125 if (!a->data.non_resident.lowest_vcn && end_vcn <= 0) 126 end_vcn = allocated_size_vcn - 1; 127 /* 128 * If we already have the attribute extent containing @vcn in 129 * @ctx, no need to look it up again. We slightly cheat in 130 * that if vcn exceeds the allocated size, we will refuse to 131 * map the runlist below, so there is definitely no need to get 132 * the right attribute extent. 133 */ 134 if (vcn >= allocated_size_vcn || (a->type == ni->type && 135 a->name_length == ni->name_len && 136 !memcmp((u8*)a + le16_to_cpu(a->name_offset), 137 ni->name, ni->name_len) && 138 sle64_to_cpu(a->data.non_resident.lowest_vcn) 139 <= vcn && end_vcn >= vcn)) 140 ctx_needs_reset = false; 141 else { 142 /* Save the old search context. */ 143 old_ctx = *ctx; 144 /* 145 * If the currently mapped (extent) inode is not the 146 * base inode we will unmap it when we reinitialize the 147 * search context which means we need to get a 148 * reference to the page containing the mapped mft 149 * record so we do not accidentally drop changes to the 150 * mft record when it has not been marked dirty yet. 151 */ 152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != 153 old_ctx.base_ntfs_ino) { 154 put_this_page = old_ctx.ntfs_ino->page; 155 get_page(put_this_page); 156 } 157 /* 158 * Reinitialize the search context so we can lookup the 159 * needed attribute extent. 160 */ 161 ntfs_attr_reinit_search_ctx(ctx); 162 ctx_needs_reset = true; 163 } 164 } 165 if (ctx_needs_reset) { 166 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 167 CASE_SENSITIVE, vcn, NULL, 0, ctx); 168 if (unlikely(err)) { 169 if (err == -ENOENT) 170 err = -EIO; 171 goto err_out; 172 } 173 BUG_ON(!ctx->attr->non_resident); 174 } 175 a = ctx->attr; 176 /* 177 * Only decompress the mapping pairs if @vcn is inside it. Otherwise 178 * we get into problems when we try to map an out of bounds vcn because 179 * we then try to map the already mapped runlist fragment and 180 * ntfs_mapping_pairs_decompress() fails. 181 */ 182 end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1; 183 if (unlikely(vcn && vcn >= end_vcn)) { 184 err = -ENOENT; 185 goto err_out; 186 } 187 rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl); 188 if (IS_ERR(rl)) 189 err = PTR_ERR(rl); 190 else 191 ni->runlist.rl = rl; 192 err_out: 193 if (ctx_is_temporary) { 194 if (likely(ctx)) 195 ntfs_attr_put_search_ctx(ctx); 196 unmap_mft_record(base_ni); 197 } else if (ctx_needs_reset) { 198 /* 199 * If there is no attribute list, restoring the search context 200 * is accomplished simply by copying the saved context back over 201 * the caller supplied context. If there is an attribute list, 202 * things are more complicated as we need to deal with mapping 203 * of mft records and resulting potential changes in pointers. 204 */ 205 if (NInoAttrList(base_ni)) { 206 /* 207 * If the currently mapped (extent) inode is not the 208 * one we had before, we need to unmap it and map the 209 * old one. 210 */ 211 if (ctx->ntfs_ino != old_ctx.ntfs_ino) { 212 /* 213 * If the currently mapped inode is not the 214 * base inode, unmap it. 215 */ 216 if (ctx->base_ntfs_ino && ctx->ntfs_ino != 217 ctx->base_ntfs_ino) { 218 unmap_extent_mft_record(ctx->ntfs_ino); 219 ctx->mrec = ctx->base_mrec; 220 BUG_ON(!ctx->mrec); 221 } 222 /* 223 * If the old mapped inode is not the base 224 * inode, map it. 225 */ 226 if (old_ctx.base_ntfs_ino && 227 old_ctx.ntfs_ino != 228 old_ctx.base_ntfs_ino) { 229 retry_map: 230 ctx->mrec = map_mft_record( 231 old_ctx.ntfs_ino); 232 /* 233 * Something bad has happened. If out 234 * of memory retry till it succeeds. 235 * Any other errors are fatal and we 236 * return the error code in ctx->mrec. 237 * Let the caller deal with it... We 238 * just need to fudge things so the 239 * caller can reinit and/or put the 240 * search context safely. 241 */ 242 if (IS_ERR(ctx->mrec)) { 243 if (PTR_ERR(ctx->mrec) == 244 -ENOMEM) { 245 schedule(); 246 goto retry_map; 247 } else 248 old_ctx.ntfs_ino = 249 old_ctx. 250 base_ntfs_ino; 251 } 252 } 253 } 254 /* Update the changed pointers in the saved context. */ 255 if (ctx->mrec != old_ctx.mrec) { 256 if (!IS_ERR(ctx->mrec)) 257 old_ctx.attr = (ATTR_RECORD*)( 258 (u8*)ctx->mrec + 259 ((u8*)old_ctx.attr - 260 (u8*)old_ctx.mrec)); 261 old_ctx.mrec = ctx->mrec; 262 } 263 } 264 /* Restore the search context to the saved one. */ 265 *ctx = old_ctx; 266 /* 267 * We drop the reference on the page we took earlier. In the 268 * case that IS_ERR(ctx->mrec) is true this means we might lose 269 * some changes to the mft record that had been made between 270 * the last time it was marked dirty/written out and now. This 271 * at this stage is not a problem as the mapping error is fatal 272 * enough that the mft record cannot be written out anyway and 273 * the caller is very likely to shutdown the whole inode 274 * immediately and mark the volume dirty for chkdsk to pick up 275 * the pieces anyway. 276 */ 277 if (put_this_page) 278 put_page(put_this_page); 279 } 280 return err; 281 } 282 283 /** 284 * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode 285 * @ni: ntfs inode for which to map (part of) a runlist 286 * @vcn: map runlist part containing this vcn 287 * 288 * Map the part of a runlist containing the @vcn of the ntfs inode @ni. 289 * 290 * Return 0 on success and -errno on error. There is one special error code 291 * which is not an error as such. This is -ENOENT. It means that @vcn is out 292 * of bounds of the runlist. 293 * 294 * Locking: - The runlist must be unlocked on entry and is unlocked on return. 295 * - This function takes the runlist lock for writing and may modify 296 * the runlist. 297 */ 298 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn) 299 { 300 int err = 0; 301 302 down_write(&ni->runlist.lock); 303 /* Make sure someone else didn't do the work while we were sleeping. */ 304 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <= 305 LCN_RL_NOT_MAPPED)) 306 err = ntfs_map_runlist_nolock(ni, vcn, NULL); 307 up_write(&ni->runlist.lock); 308 return err; 309 } 310 311 /** 312 * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode 313 * @ni: ntfs inode of the attribute whose runlist to search 314 * @vcn: vcn to convert 315 * @write_locked: true if the runlist is locked for writing 316 * 317 * Find the virtual cluster number @vcn in the runlist of the ntfs attribute 318 * described by the ntfs inode @ni and return the corresponding logical cluster 319 * number (lcn). 320 * 321 * If the @vcn is not mapped yet, the attempt is made to map the attribute 322 * extent containing the @vcn and the vcn to lcn conversion is retried. 323 * 324 * If @write_locked is true the caller has locked the runlist for writing and 325 * if false for reading. 326 * 327 * Since lcns must be >= 0, we use negative return codes with special meaning: 328 * 329 * Return code Meaning / Description 330 * ========================================== 331 * LCN_HOLE Hole / not allocated on disk. 332 * LCN_ENOENT There is no such vcn in the runlist, i.e. @vcn is out of bounds. 333 * LCN_ENOMEM Not enough memory to map runlist. 334 * LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc). 335 * 336 * Locking: - The runlist must be locked on entry and is left locked on return. 337 * - If @write_locked is 'false', i.e. the runlist is locked for reading, 338 * the lock may be dropped inside the function so you cannot rely on 339 * the runlist still being the same when this function returns. 340 */ 341 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn, 342 const bool write_locked) 343 { 344 LCN lcn; 345 unsigned long flags; 346 bool is_retry = false; 347 348 BUG_ON(!ni); 349 ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.", 350 ni->mft_no, (unsigned long long)vcn, 351 write_locked ? "write" : "read"); 352 BUG_ON(!NInoNonResident(ni)); 353 BUG_ON(vcn < 0); 354 if (!ni->runlist.rl) { 355 read_lock_irqsave(&ni->size_lock, flags); 356 if (!ni->allocated_size) { 357 read_unlock_irqrestore(&ni->size_lock, flags); 358 return LCN_ENOENT; 359 } 360 read_unlock_irqrestore(&ni->size_lock, flags); 361 } 362 retry_remap: 363 /* Convert vcn to lcn. If that fails map the runlist and retry once. */ 364 lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn); 365 if (likely(lcn >= LCN_HOLE)) { 366 ntfs_debug("Done, lcn 0x%llx.", (long long)lcn); 367 return lcn; 368 } 369 if (lcn != LCN_RL_NOT_MAPPED) { 370 if (lcn != LCN_ENOENT) 371 lcn = LCN_EIO; 372 } else if (!is_retry) { 373 int err; 374 375 if (!write_locked) { 376 up_read(&ni->runlist.lock); 377 down_write(&ni->runlist.lock); 378 if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) != 379 LCN_RL_NOT_MAPPED)) { 380 up_write(&ni->runlist.lock); 381 down_read(&ni->runlist.lock); 382 goto retry_remap; 383 } 384 } 385 err = ntfs_map_runlist_nolock(ni, vcn, NULL); 386 if (!write_locked) { 387 up_write(&ni->runlist.lock); 388 down_read(&ni->runlist.lock); 389 } 390 if (likely(!err)) { 391 is_retry = true; 392 goto retry_remap; 393 } 394 if (err == -ENOENT) 395 lcn = LCN_ENOENT; 396 else if (err == -ENOMEM) 397 lcn = LCN_ENOMEM; 398 else 399 lcn = LCN_EIO; 400 } 401 if (lcn != LCN_ENOENT) 402 ntfs_error(ni->vol->sb, "Failed with error code %lli.", 403 (long long)lcn); 404 return lcn; 405 } 406 407 /** 408 * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode 409 * @ni: ntfs inode describing the runlist to search 410 * @vcn: vcn to find 411 * @ctx: active attribute search context if present or NULL if not 412 * 413 * Find the virtual cluster number @vcn in the runlist described by the ntfs 414 * inode @ni and return the address of the runlist element containing the @vcn. 415 * 416 * If the @vcn is not mapped yet, the attempt is made to map the attribute 417 * extent containing the @vcn and the vcn to lcn conversion is retried. 418 * 419 * If @ctx is specified, it is an active search context of @ni and its base mft 420 * record. This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped 421 * runlist fragments and allows their mapping. If you do not have the mft 422 * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock() 423 * will perform the necessary mapping and unmapping. 424 * 425 * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and 426 * restores it before returning. Thus, @ctx will be left pointing to the same 427 * attribute on return as on entry. However, the actual pointers in @ctx may 428 * point to different memory locations on return, so you must remember to reset 429 * any cached pointers from the @ctx, i.e. after the call to 430 * ntfs_attr_find_vcn_nolock(), you will probably want to do: 431 * m = ctx->mrec; 432 * a = ctx->attr; 433 * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that 434 * you cache ctx->mrec in a variable @m of type MFT_RECORD *. 435 * Note you need to distinguish between the lcn of the returned runlist element 436 * being >= 0 and LCN_HOLE. In the later case you have to return zeroes on 437 * read and allocate clusters on write. 438 * 439 * Return the runlist element containing the @vcn on success and 440 * ERR_PTR(-errno) on error. You need to test the return value with IS_ERR() 441 * to decide if the return is success or failure and PTR_ERR() to get to the 442 * error code if IS_ERR() is true. 443 * 444 * The possible error return codes are: 445 * -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds. 446 * -ENOMEM - Not enough memory to map runlist. 447 * -EIO - Critical error (runlist/file is corrupt, i/o error, etc). 448 * 449 * WARNING: If @ctx is supplied, regardless of whether success or failure is 450 * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx 451 * is no longer valid, i.e. you need to either call 452 * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it. 453 * In that case PTR_ERR(@ctx->mrec) will give you the error code for 454 * why the mapping of the old inode failed. 455 * 456 * Locking: - The runlist described by @ni must be locked for writing on entry 457 * and is locked on return. Note the runlist may be modified when 458 * needed runlist fragments need to be mapped. 459 * - If @ctx is NULL, the base mft record of @ni must not be mapped on 460 * entry and it will be left unmapped on return. 461 * - If @ctx is not NULL, the base mft record must be mapped on entry 462 * and it will be left mapped on return. 463 */ 464 runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn, 465 ntfs_attr_search_ctx *ctx) 466 { 467 unsigned long flags; 468 runlist_element *rl; 469 int err = 0; 470 bool is_retry = false; 471 472 BUG_ON(!ni); 473 ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.", 474 ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out"); 475 BUG_ON(!NInoNonResident(ni)); 476 BUG_ON(vcn < 0); 477 if (!ni->runlist.rl) { 478 read_lock_irqsave(&ni->size_lock, flags); 479 if (!ni->allocated_size) { 480 read_unlock_irqrestore(&ni->size_lock, flags); 481 return ERR_PTR(-ENOENT); 482 } 483 read_unlock_irqrestore(&ni->size_lock, flags); 484 } 485 retry_remap: 486 rl = ni->runlist.rl; 487 if (likely(rl && vcn >= rl[0].vcn)) { 488 while (likely(rl->length)) { 489 if (unlikely(vcn < rl[1].vcn)) { 490 if (likely(rl->lcn >= LCN_HOLE)) { 491 ntfs_debug("Done."); 492 return rl; 493 } 494 break; 495 } 496 rl++; 497 } 498 if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) { 499 if (likely(rl->lcn == LCN_ENOENT)) 500 err = -ENOENT; 501 else 502 err = -EIO; 503 } 504 } 505 if (!err && !is_retry) { 506 /* 507 * If the search context is invalid we cannot map the unmapped 508 * region. 509 */ 510 if (IS_ERR(ctx->mrec)) 511 err = PTR_ERR(ctx->mrec); 512 else { 513 /* 514 * The @vcn is in an unmapped region, map the runlist 515 * and retry. 516 */ 517 err = ntfs_map_runlist_nolock(ni, vcn, ctx); 518 if (likely(!err)) { 519 is_retry = true; 520 goto retry_remap; 521 } 522 } 523 if (err == -EINVAL) 524 err = -EIO; 525 } else if (!err) 526 err = -EIO; 527 if (err != -ENOENT) 528 ntfs_error(ni->vol->sb, "Failed with error code %i.", err); 529 return ERR_PTR(err); 530 } 531 532 /** 533 * ntfs_attr_find - find (next) attribute in mft record 534 * @type: attribute type to find 535 * @name: attribute name to find (optional, i.e. NULL means don't care) 536 * @name_len: attribute name length (only needed if @name present) 537 * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present) 538 * @val: attribute value to find (optional, resident attributes only) 539 * @val_len: attribute value length 540 * @ctx: search context with mft record and attribute to search from 541 * 542 * You should not need to call this function directly. Use ntfs_attr_lookup() 543 * instead. 544 * 545 * ntfs_attr_find() takes a search context @ctx as parameter and searches the 546 * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an 547 * attribute of @type, optionally @name and @val. 548 * 549 * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will 550 * point to the found attribute. 551 * 552 * If the attribute is not found, ntfs_attr_find() returns -ENOENT and 553 * @ctx->attr will point to the attribute before which the attribute being 554 * searched for would need to be inserted if such an action were to be desired. 555 * 556 * On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is 557 * undefined and in particular do not rely on it not changing. 558 * 559 * If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it 560 * is 'false', the search begins after @ctx->attr. 561 * 562 * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and 563 * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record 564 * @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at 565 * the upcase table. If @ic is CASE_SENSITIVE, the comparison is case 566 * sensitive. When @name is present, @name_len is the @name length in Unicode 567 * characters. 568 * 569 * If @name is not present (NULL), we assume that the unnamed attribute is 570 * being searched for. 571 * 572 * Finally, the resident attribute value @val is looked for, if present. If 573 * @val is not present (NULL), @val_len is ignored. 574 * 575 * ntfs_attr_find() only searches the specified mft record and it ignores the 576 * presence of an attribute list attribute (unless it is the one being searched 577 * for, obviously). If you need to take attribute lists into consideration, 578 * use ntfs_attr_lookup() instead (see below). This also means that you cannot 579 * use ntfs_attr_find() to search for extent records of non-resident 580 * attributes, as extents with lowest_vcn != 0 are usually described by the 581 * attribute list attribute only. - Note that it is possible that the first 582 * extent is only in the attribute list while the last extent is in the base 583 * mft record, so do not rely on being able to find the first extent in the 584 * base mft record. 585 * 586 * Warning: Never use @val when looking for attribute types which can be 587 * non-resident as this most likely will result in a crash! 588 */ 589 static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name, 590 const u32 name_len, const IGNORE_CASE_BOOL ic, 591 const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx) 592 { 593 ATTR_RECORD *a; 594 ntfs_volume *vol = ctx->ntfs_ino->vol; 595 ntfschar *upcase = vol->upcase; 596 u32 upcase_len = vol->upcase_len; 597 598 /* 599 * Iterate over attributes in mft record starting at @ctx->attr, or the 600 * attribute following that, if @ctx->is_first is 'true'. 601 */ 602 if (ctx->is_first) { 603 a = ctx->attr; 604 ctx->is_first = false; 605 } else 606 a = (ATTR_RECORD*)((u8*)ctx->attr + 607 le32_to_cpu(ctx->attr->length)); 608 for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) { 609 if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec + 610 le32_to_cpu(ctx->mrec->bytes_allocated)) 611 break; 612 ctx->attr = a; 613 if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) || 614 a->type == AT_END)) 615 return -ENOENT; 616 if (unlikely(!a->length)) 617 break; 618 if (a->type != type) 619 continue; 620 /* 621 * If @name is present, compare the two names. If @name is 622 * missing, assume we want an unnamed attribute. 623 */ 624 if (!name) { 625 /* The search failed if the found attribute is named. */ 626 if (a->name_length) 627 return -ENOENT; 628 } else if (!ntfs_are_names_equal(name, name_len, 629 (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)), 630 a->name_length, ic, upcase, upcase_len)) { 631 register int rc; 632 633 rc = ntfs_collate_names(name, name_len, 634 (ntfschar*)((u8*)a + 635 le16_to_cpu(a->name_offset)), 636 a->name_length, 1, IGNORE_CASE, 637 upcase, upcase_len); 638 /* 639 * If @name collates before a->name, there is no 640 * matching attribute. 641 */ 642 if (rc == -1) 643 return -ENOENT; 644 /* If the strings are not equal, continue search. */ 645 if (rc) 646 continue; 647 rc = ntfs_collate_names(name, name_len, 648 (ntfschar*)((u8*)a + 649 le16_to_cpu(a->name_offset)), 650 a->name_length, 1, CASE_SENSITIVE, 651 upcase, upcase_len); 652 if (rc == -1) 653 return -ENOENT; 654 if (rc) 655 continue; 656 } 657 /* 658 * The names match or @name not present and attribute is 659 * unnamed. If no @val specified, we have found the attribute 660 * and are done. 661 */ 662 if (!val) 663 return 0; 664 /* @val is present; compare values. */ 665 else { 666 register int rc; 667 668 rc = memcmp(val, (u8*)a + le16_to_cpu( 669 a->data.resident.value_offset), 670 min_t(u32, val_len, le32_to_cpu( 671 a->data.resident.value_length))); 672 /* 673 * If @val collates before the current attribute's 674 * value, there is no matching attribute. 675 */ 676 if (!rc) { 677 register u32 avl; 678 679 avl = le32_to_cpu( 680 a->data.resident.value_length); 681 if (val_len == avl) 682 return 0; 683 if (val_len < avl) 684 return -ENOENT; 685 } else if (rc < 0) 686 return -ENOENT; 687 } 688 } 689 ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk."); 690 NVolSetErrors(vol); 691 return -EIO; 692 } 693 694 /** 695 * load_attribute_list - load an attribute list into memory 696 * @vol: ntfs volume from which to read 697 * @runlist: runlist of the attribute list 698 * @al_start: destination buffer 699 * @size: size of the destination buffer in bytes 700 * @initialized_size: initialized size of the attribute list 701 * 702 * Walk the runlist @runlist and load all clusters from it copying them into 703 * the linear buffer @al. The maximum number of bytes copied to @al is @size 704 * bytes. Note, @size does not need to be a multiple of the cluster size. If 705 * @initialized_size is less than @size, the region in @al between 706 * @initialized_size and @size will be zeroed and not read from disk. 707 * 708 * Return 0 on success or -errno on error. 709 */ 710 int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start, 711 const s64 size, const s64 initialized_size) 712 { 713 LCN lcn; 714 u8 *al = al_start; 715 u8 *al_end = al + initialized_size; 716 runlist_element *rl; 717 struct buffer_head *bh; 718 struct super_block *sb; 719 unsigned long block_size; 720 unsigned long block, max_block; 721 int err = 0; 722 unsigned char block_size_bits; 723 724 ntfs_debug("Entering."); 725 if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 || 726 initialized_size > size) 727 return -EINVAL; 728 if (!initialized_size) { 729 memset(al, 0, size); 730 return 0; 731 } 732 sb = vol->sb; 733 block_size = sb->s_blocksize; 734 block_size_bits = sb->s_blocksize_bits; 735 down_read(&runlist->lock); 736 rl = runlist->rl; 737 if (!rl) { 738 ntfs_error(sb, "Cannot read attribute list since runlist is " 739 "missing."); 740 goto err_out; 741 } 742 /* Read all clusters specified by the runlist one run at a time. */ 743 while (rl->length) { 744 lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn); 745 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.", 746 (unsigned long long)rl->vcn, 747 (unsigned long long)lcn); 748 /* The attribute list cannot be sparse. */ 749 if (lcn < 0) { 750 ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot " 751 "read attribute list."); 752 goto err_out; 753 } 754 block = lcn << vol->cluster_size_bits >> block_size_bits; 755 /* Read the run from device in chunks of block_size bytes. */ 756 max_block = block + (rl->length << vol->cluster_size_bits >> 757 block_size_bits); 758 ntfs_debug("max_block = 0x%lx.", max_block); 759 do { 760 ntfs_debug("Reading block = 0x%lx.", block); 761 bh = sb_bread(sb, block); 762 if (!bh) { 763 ntfs_error(sb, "sb_bread() failed. Cannot " 764 "read attribute list."); 765 goto err_out; 766 } 767 if (al + block_size >= al_end) 768 goto do_final; 769 memcpy(al, bh->b_data, block_size); 770 brelse(bh); 771 al += block_size; 772 } while (++block < max_block); 773 rl++; 774 } 775 if (initialized_size < size) { 776 initialize: 777 memset(al_start + initialized_size, 0, size - initialized_size); 778 } 779 done: 780 up_read(&runlist->lock); 781 return err; 782 do_final: 783 if (al < al_end) { 784 /* 785 * Partial block. 786 * 787 * Note: The attribute list can be smaller than its allocation 788 * by multiple clusters. This has been encountered by at least 789 * two people running Windows XP, thus we cannot do any 790 * truncation sanity checking here. (AIA) 791 */ 792 memcpy(al, bh->b_data, al_end - al); 793 brelse(bh); 794 if (initialized_size < size) 795 goto initialize; 796 goto done; 797 } 798 brelse(bh); 799 /* Real overflow! */ 800 ntfs_error(sb, "Attribute list buffer overflow. Read attribute list " 801 "is truncated."); 802 err_out: 803 err = -EIO; 804 goto done; 805 } 806 807 /** 808 * ntfs_external_attr_find - find an attribute in the attribute list of an inode 809 * @type: attribute type to find 810 * @name: attribute name to find (optional, i.e. NULL means don't care) 811 * @name_len: attribute name length (only needed if @name present) 812 * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present) 813 * @lowest_vcn: lowest vcn to find (optional, non-resident attributes only) 814 * @val: attribute value to find (optional, resident attributes only) 815 * @val_len: attribute value length 816 * @ctx: search context with mft record and attribute to search from 817 * 818 * You should not need to call this function directly. Use ntfs_attr_lookup() 819 * instead. 820 * 821 * Find an attribute by searching the attribute list for the corresponding 822 * attribute list entry. Having found the entry, map the mft record if the 823 * attribute is in a different mft record/inode, ntfs_attr_find() the attribute 824 * in there and return it. 825 * 826 * On first search @ctx->ntfs_ino must be the base mft record and @ctx must 827 * have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent 828 * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is 829 * then the base inode). 830 * 831 * After finishing with the attribute/mft record you need to call 832 * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any 833 * mapped inodes, etc). 834 * 835 * If the attribute is found, ntfs_external_attr_find() returns 0 and 836 * @ctx->attr will point to the found attribute. @ctx->mrec will point to the 837 * mft record in which @ctx->attr is located and @ctx->al_entry will point to 838 * the attribute list entry for the attribute. 839 * 840 * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and 841 * @ctx->attr will point to the attribute in the base mft record before which 842 * the attribute being searched for would need to be inserted if such an action 843 * were to be desired. @ctx->mrec will point to the mft record in which 844 * @ctx->attr is located and @ctx->al_entry will point to the attribute list 845 * entry of the attribute before which the attribute being searched for would 846 * need to be inserted if such an action were to be desired. 847 * 848 * Thus to insert the not found attribute, one wants to add the attribute to 849 * @ctx->mrec (the base mft record) and if there is not enough space, the 850 * attribute should be placed in a newly allocated extent mft record. The 851 * attribute list entry for the inserted attribute should be inserted in the 852 * attribute list attribute at @ctx->al_entry. 853 * 854 * On actual error, ntfs_external_attr_find() returns -EIO. In this case 855 * @ctx->attr is undefined and in particular do not rely on it not changing. 856 */ 857 static int ntfs_external_attr_find(const ATTR_TYPE type, 858 const ntfschar *name, const u32 name_len, 859 const IGNORE_CASE_BOOL ic, const VCN lowest_vcn, 860 const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx) 861 { 862 ntfs_inode *base_ni, *ni; 863 ntfs_volume *vol; 864 ATTR_LIST_ENTRY *al_entry, *next_al_entry; 865 u8 *al_start, *al_end; 866 ATTR_RECORD *a; 867 ntfschar *al_name; 868 u32 al_name_len; 869 int err = 0; 870 static const char *es = " Unmount and run chkdsk."; 871 872 ni = ctx->ntfs_ino; 873 base_ni = ctx->base_ntfs_ino; 874 ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type); 875 if (!base_ni) { 876 /* First call happens with the base mft record. */ 877 base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino; 878 ctx->base_mrec = ctx->mrec; 879 } 880 if (ni == base_ni) 881 ctx->base_attr = ctx->attr; 882 if (type == AT_END) 883 goto not_found; 884 vol = base_ni->vol; 885 al_start = base_ni->attr_list; 886 al_end = al_start + base_ni->attr_list_size; 887 if (!ctx->al_entry) 888 ctx->al_entry = (ATTR_LIST_ENTRY*)al_start; 889 /* 890 * Iterate over entries in attribute list starting at @ctx->al_entry, 891 * or the entry following that, if @ctx->is_first is 'true'. 892 */ 893 if (ctx->is_first) { 894 al_entry = ctx->al_entry; 895 ctx->is_first = false; 896 } else 897 al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry + 898 le16_to_cpu(ctx->al_entry->length)); 899 for (;; al_entry = next_al_entry) { 900 /* Out of bounds check. */ 901 if ((u8*)al_entry < base_ni->attr_list || 902 (u8*)al_entry > al_end) 903 break; /* Inode is corrupt. */ 904 ctx->al_entry = al_entry; 905 /* Catch the end of the attribute list. */ 906 if ((u8*)al_entry == al_end) 907 goto not_found; 908 if (!al_entry->length) 909 break; 910 if ((u8*)al_entry + 6 > al_end || (u8*)al_entry + 911 le16_to_cpu(al_entry->length) > al_end) 912 break; 913 next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry + 914 le16_to_cpu(al_entry->length)); 915 if (le32_to_cpu(al_entry->type) > le32_to_cpu(type)) 916 goto not_found; 917 if (type != al_entry->type) 918 continue; 919 /* 920 * If @name is present, compare the two names. If @name is 921 * missing, assume we want an unnamed attribute. 922 */ 923 al_name_len = al_entry->name_length; 924 al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset); 925 if (!name) { 926 if (al_name_len) 927 goto not_found; 928 } else if (!ntfs_are_names_equal(al_name, al_name_len, name, 929 name_len, ic, vol->upcase, vol->upcase_len)) { 930 register int rc; 931 932 rc = ntfs_collate_names(name, name_len, al_name, 933 al_name_len, 1, IGNORE_CASE, 934 vol->upcase, vol->upcase_len); 935 /* 936 * If @name collates before al_name, there is no 937 * matching attribute. 938 */ 939 if (rc == -1) 940 goto not_found; 941 /* If the strings are not equal, continue search. */ 942 if (rc) 943 continue; 944 /* 945 * FIXME: Reverse engineering showed 0, IGNORE_CASE but 946 * that is inconsistent with ntfs_attr_find(). The 947 * subsequent rc checks were also different. Perhaps I 948 * made a mistake in one of the two. Need to recheck 949 * which is correct or at least see what is going on... 950 * (AIA) 951 */ 952 rc = ntfs_collate_names(name, name_len, al_name, 953 al_name_len, 1, CASE_SENSITIVE, 954 vol->upcase, vol->upcase_len); 955 if (rc == -1) 956 goto not_found; 957 if (rc) 958 continue; 959 } 960 /* 961 * The names match or @name not present and attribute is 962 * unnamed. Now check @lowest_vcn. Continue search if the 963 * next attribute list entry still fits @lowest_vcn. Otherwise 964 * we have reached the right one or the search has failed. 965 */ 966 if (lowest_vcn && (u8*)next_al_entry >= al_start && 967 (u8*)next_al_entry + 6 < al_end && 968 (u8*)next_al_entry + le16_to_cpu( 969 next_al_entry->length) <= al_end && 970 sle64_to_cpu(next_al_entry->lowest_vcn) <= 971 lowest_vcn && 972 next_al_entry->type == al_entry->type && 973 next_al_entry->name_length == al_name_len && 974 ntfs_are_names_equal((ntfschar*)((u8*) 975 next_al_entry + 976 next_al_entry->name_offset), 977 next_al_entry->name_length, 978 al_name, al_name_len, CASE_SENSITIVE, 979 vol->upcase, vol->upcase_len)) 980 continue; 981 if (MREF_LE(al_entry->mft_reference) == ni->mft_no) { 982 if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) { 983 ntfs_error(vol->sb, "Found stale mft " 984 "reference in attribute list " 985 "of base inode 0x%lx.%s", 986 base_ni->mft_no, es); 987 err = -EIO; 988 break; 989 } 990 } else { /* Mft references do not match. */ 991 /* If there is a mapped record unmap it first. */ 992 if (ni != base_ni) 993 unmap_extent_mft_record(ni); 994 /* Do we want the base record back? */ 995 if (MREF_LE(al_entry->mft_reference) == 996 base_ni->mft_no) { 997 ni = ctx->ntfs_ino = base_ni; 998 ctx->mrec = ctx->base_mrec; 999 } else { 1000 /* We want an extent record. */ 1001 ctx->mrec = map_extent_mft_record(base_ni, 1002 le64_to_cpu( 1003 al_entry->mft_reference), &ni); 1004 if (IS_ERR(ctx->mrec)) { 1005 ntfs_error(vol->sb, "Failed to map " 1006 "extent mft record " 1007 "0x%lx of base inode " 1008 "0x%lx.%s", 1009 MREF_LE(al_entry-> 1010 mft_reference), 1011 base_ni->mft_no, es); 1012 err = PTR_ERR(ctx->mrec); 1013 if (err == -ENOENT) 1014 err = -EIO; 1015 /* Cause @ctx to be sanitized below. */ 1016 ni = NULL; 1017 break; 1018 } 1019 ctx->ntfs_ino = ni; 1020 } 1021 ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + 1022 le16_to_cpu(ctx->mrec->attrs_offset)); 1023 } 1024 /* 1025 * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the 1026 * mft record containing the attribute represented by the 1027 * current al_entry. 1028 */ 1029 /* 1030 * We could call into ntfs_attr_find() to find the right 1031 * attribute in this mft record but this would be less 1032 * efficient and not quite accurate as ntfs_attr_find() ignores 1033 * the attribute instance numbers for example which become 1034 * important when one plays with attribute lists. Also, 1035 * because a proper match has been found in the attribute list 1036 * entry above, the comparison can now be optimized. So it is 1037 * worth re-implementing a simplified ntfs_attr_find() here. 1038 */ 1039 a = ctx->attr; 1040 /* 1041 * Use a manual loop so we can still use break and continue 1042 * with the same meanings as above. 1043 */ 1044 do_next_attr_loop: 1045 if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec + 1046 le32_to_cpu(ctx->mrec->bytes_allocated)) 1047 break; 1048 if (a->type == AT_END) 1049 break; 1050 if (!a->length) 1051 break; 1052 if (al_entry->instance != a->instance) 1053 goto do_next_attr; 1054 /* 1055 * If the type and/or the name are mismatched between the 1056 * attribute list entry and the attribute record, there is 1057 * corruption so we break and return error EIO. 1058 */ 1059 if (al_entry->type != a->type) 1060 break; 1061 if (!ntfs_are_names_equal((ntfschar*)((u8*)a + 1062 le16_to_cpu(a->name_offset)), a->name_length, 1063 al_name, al_name_len, CASE_SENSITIVE, 1064 vol->upcase, vol->upcase_len)) 1065 break; 1066 ctx->attr = a; 1067 /* 1068 * If no @val specified or @val specified and it matches, we 1069 * have found it! 1070 */ 1071 if (!val || (!a->non_resident && le32_to_cpu( 1072 a->data.resident.value_length) == val_len && 1073 !memcmp((u8*)a + 1074 le16_to_cpu(a->data.resident.value_offset), 1075 val, val_len))) { 1076 ntfs_debug("Done, found."); 1077 return 0; 1078 } 1079 do_next_attr: 1080 /* Proceed to the next attribute in the current mft record. */ 1081 a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length)); 1082 goto do_next_attr_loop; 1083 } 1084 if (!err) { 1085 ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt " 1086 "attribute list attribute.%s", base_ni->mft_no, 1087 es); 1088 err = -EIO; 1089 } 1090 if (ni != base_ni) { 1091 if (ni) 1092 unmap_extent_mft_record(ni); 1093 ctx->ntfs_ino = base_ni; 1094 ctx->mrec = ctx->base_mrec; 1095 ctx->attr = ctx->base_attr; 1096 } 1097 if (err != -ENOMEM) 1098 NVolSetErrors(vol); 1099 return err; 1100 not_found: 1101 /* 1102 * If we were looking for AT_END, we reset the search context @ctx and 1103 * use ntfs_attr_find() to seek to the end of the base mft record. 1104 */ 1105 if (type == AT_END) { 1106 ntfs_attr_reinit_search_ctx(ctx); 1107 return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len, 1108 ctx); 1109 } 1110 /* 1111 * The attribute was not found. Before we return, we want to ensure 1112 * @ctx->mrec and @ctx->attr indicate the position at which the 1113 * attribute should be inserted in the base mft record. Since we also 1114 * want to preserve @ctx->al_entry we cannot reinitialize the search 1115 * context using ntfs_attr_reinit_search_ctx() as this would set 1116 * @ctx->al_entry to NULL. Thus we do the necessary bits manually (see 1117 * ntfs_attr_init_search_ctx() below). Note, we _only_ preserve 1118 * @ctx->al_entry as the remaining fields (base_*) are identical to 1119 * their non base_ counterparts and we cannot set @ctx->base_attr 1120 * correctly yet as we do not know what @ctx->attr will be set to by 1121 * the call to ntfs_attr_find() below. 1122 */ 1123 if (ni != base_ni) 1124 unmap_extent_mft_record(ni); 1125 ctx->mrec = ctx->base_mrec; 1126 ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + 1127 le16_to_cpu(ctx->mrec->attrs_offset)); 1128 ctx->is_first = true; 1129 ctx->ntfs_ino = base_ni; 1130 ctx->base_ntfs_ino = NULL; 1131 ctx->base_mrec = NULL; 1132 ctx->base_attr = NULL; 1133 /* 1134 * In case there are multiple matches in the base mft record, need to 1135 * keep enumerating until we get an attribute not found response (or 1136 * another error), otherwise we would keep returning the same attribute 1137 * over and over again and all programs using us for enumeration would 1138 * lock up in a tight loop. 1139 */ 1140 do { 1141 err = ntfs_attr_find(type, name, name_len, ic, val, val_len, 1142 ctx); 1143 } while (!err); 1144 ntfs_debug("Done, not found."); 1145 return err; 1146 } 1147 1148 /** 1149 * ntfs_attr_lookup - find an attribute in an ntfs inode 1150 * @type: attribute type to find 1151 * @name: attribute name to find (optional, i.e. NULL means don't care) 1152 * @name_len: attribute name length (only needed if @name present) 1153 * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present) 1154 * @lowest_vcn: lowest vcn to find (optional, non-resident attributes only) 1155 * @val: attribute value to find (optional, resident attributes only) 1156 * @val_len: attribute value length 1157 * @ctx: search context with mft record and attribute to search from 1158 * 1159 * Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must 1160 * be the base mft record and @ctx must have been obtained from a call to 1161 * ntfs_attr_get_search_ctx(). 1162 * 1163 * This function transparently handles attribute lists and @ctx is used to 1164 * continue searches where they were left off at. 1165 * 1166 * After finishing with the attribute/mft record you need to call 1167 * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any 1168 * mapped inodes, etc). 1169 * 1170 * Return 0 if the search was successful and -errno if not. 1171 * 1172 * When 0, @ctx->attr is the found attribute and it is in mft record 1173 * @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is 1174 * the attribute list entry of the found attribute. 1175 * 1176 * When -ENOENT, @ctx->attr is the attribute which collates just after the 1177 * attribute being searched for, i.e. if one wants to add the attribute to the 1178 * mft record this is the correct place to insert it into. If an attribute 1179 * list attribute is present, @ctx->al_entry is the attribute list entry which 1180 * collates just after the attribute list entry of the attribute being searched 1181 * for, i.e. if one wants to add the attribute to the mft record this is the 1182 * correct place to insert its attribute list entry into. 1183 * 1184 * When -errno != -ENOENT, an error occurred during the lookup. @ctx->attr is 1185 * then undefined and in particular you should not rely on it not changing. 1186 */ 1187 int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name, 1188 const u32 name_len, const IGNORE_CASE_BOOL ic, 1189 const VCN lowest_vcn, const u8 *val, const u32 val_len, 1190 ntfs_attr_search_ctx *ctx) 1191 { 1192 ntfs_inode *base_ni; 1193 1194 ntfs_debug("Entering."); 1195 BUG_ON(IS_ERR(ctx->mrec)); 1196 if (ctx->base_ntfs_ino) 1197 base_ni = ctx->base_ntfs_ino; 1198 else 1199 base_ni = ctx->ntfs_ino; 1200 /* Sanity check, just for debugging really. */ 1201 BUG_ON(!base_ni); 1202 if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST) 1203 return ntfs_attr_find(type, name, name_len, ic, val, val_len, 1204 ctx); 1205 return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn, 1206 val, val_len, ctx); 1207 } 1208 1209 /** 1210 * ntfs_attr_init_search_ctx - initialize an attribute search context 1211 * @ctx: attribute search context to initialize 1212 * @ni: ntfs inode with which to initialize the search context 1213 * @mrec: mft record with which to initialize the search context 1214 * 1215 * Initialize the attribute search context @ctx with @ni and @mrec. 1216 */ 1217 static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx, 1218 ntfs_inode *ni, MFT_RECORD *mrec) 1219 { 1220 *ctx = (ntfs_attr_search_ctx) { 1221 .mrec = mrec, 1222 /* Sanity checks are performed elsewhere. */ 1223 .attr = (ATTR_RECORD*)((u8*)mrec + 1224 le16_to_cpu(mrec->attrs_offset)), 1225 .is_first = true, 1226 .ntfs_ino = ni, 1227 }; 1228 } 1229 1230 /** 1231 * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context 1232 * @ctx: attribute search context to reinitialize 1233 * 1234 * Reinitialize the attribute search context @ctx, unmapping an associated 1235 * extent mft record if present, and initialize the search context again. 1236 * 1237 * This is used when a search for a new attribute is being started to reset 1238 * the search context to the beginning. 1239 */ 1240 void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx) 1241 { 1242 if (likely(!ctx->base_ntfs_ino)) { 1243 /* No attribute list. */ 1244 ctx->is_first = true; 1245 /* Sanity checks are performed elsewhere. */ 1246 ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec + 1247 le16_to_cpu(ctx->mrec->attrs_offset)); 1248 /* 1249 * This needs resetting due to ntfs_external_attr_find() which 1250 * can leave it set despite having zeroed ctx->base_ntfs_ino. 1251 */ 1252 ctx->al_entry = NULL; 1253 return; 1254 } /* Attribute list. */ 1255 if (ctx->ntfs_ino != ctx->base_ntfs_ino) 1256 unmap_extent_mft_record(ctx->ntfs_ino); 1257 ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec); 1258 return; 1259 } 1260 1261 /** 1262 * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context 1263 * @ni: ntfs inode with which to initialize the search context 1264 * @mrec: mft record with which to initialize the search context 1265 * 1266 * Allocate a new attribute search context, initialize it with @ni and @mrec, 1267 * and return it. Return NULL if allocation failed. 1268 */ 1269 ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec) 1270 { 1271 ntfs_attr_search_ctx *ctx; 1272 1273 ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS); 1274 if (ctx) 1275 ntfs_attr_init_search_ctx(ctx, ni, mrec); 1276 return ctx; 1277 } 1278 1279 /** 1280 * ntfs_attr_put_search_ctx - release an attribute search context 1281 * @ctx: attribute search context to free 1282 * 1283 * Release the attribute search context @ctx, unmapping an associated extent 1284 * mft record if present. 1285 */ 1286 void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx) 1287 { 1288 if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino) 1289 unmap_extent_mft_record(ctx->ntfs_ino); 1290 kmem_cache_free(ntfs_attr_ctx_cache, ctx); 1291 return; 1292 } 1293 1294 #ifdef NTFS_RW 1295 1296 /** 1297 * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file 1298 * @vol: ntfs volume to which the attribute belongs 1299 * @type: attribute type which to find 1300 * 1301 * Search for the attribute definition record corresponding to the attribute 1302 * @type in the $AttrDef system file. 1303 * 1304 * Return the attribute type definition record if found and NULL if not found. 1305 */ 1306 static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol, 1307 const ATTR_TYPE type) 1308 { 1309 ATTR_DEF *ad; 1310 1311 BUG_ON(!vol->attrdef); 1312 BUG_ON(!type); 1313 for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef < 1314 vol->attrdef_size && ad->type; ++ad) { 1315 /* We have not found it yet, carry on searching. */ 1316 if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type))) 1317 continue; 1318 /* We found the attribute; return it. */ 1319 if (likely(ad->type == type)) 1320 return ad; 1321 /* We have gone too far already. No point in continuing. */ 1322 break; 1323 } 1324 /* Attribute not found. */ 1325 ntfs_debug("Attribute type 0x%x not found in $AttrDef.", 1326 le32_to_cpu(type)); 1327 return NULL; 1328 } 1329 1330 /** 1331 * ntfs_attr_size_bounds_check - check a size of an attribute type for validity 1332 * @vol: ntfs volume to which the attribute belongs 1333 * @type: attribute type which to check 1334 * @size: size which to check 1335 * 1336 * Check whether the @size in bytes is valid for an attribute of @type on the 1337 * ntfs volume @vol. This information is obtained from $AttrDef system file. 1338 * 1339 * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not 1340 * listed in $AttrDef. 1341 */ 1342 int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type, 1343 const s64 size) 1344 { 1345 ATTR_DEF *ad; 1346 1347 BUG_ON(size < 0); 1348 /* 1349 * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not 1350 * listed in $AttrDef. 1351 */ 1352 if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024)) 1353 return -ERANGE; 1354 /* Get the $AttrDef entry for the attribute @type. */ 1355 ad = ntfs_attr_find_in_attrdef(vol, type); 1356 if (unlikely(!ad)) 1357 return -ENOENT; 1358 /* Do the bounds check. */ 1359 if (((sle64_to_cpu(ad->min_size) > 0) && 1360 size < sle64_to_cpu(ad->min_size)) || 1361 ((sle64_to_cpu(ad->max_size) > 0) && size > 1362 sle64_to_cpu(ad->max_size))) 1363 return -ERANGE; 1364 return 0; 1365 } 1366 1367 /** 1368 * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident 1369 * @vol: ntfs volume to which the attribute belongs 1370 * @type: attribute type which to check 1371 * 1372 * Check whether the attribute of @type on the ntfs volume @vol is allowed to 1373 * be non-resident. This information is obtained from $AttrDef system file. 1374 * 1375 * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and 1376 * -ENOENT if the attribute is not listed in $AttrDef. 1377 */ 1378 int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type) 1379 { 1380 ATTR_DEF *ad; 1381 1382 /* Find the attribute definition record in $AttrDef. */ 1383 ad = ntfs_attr_find_in_attrdef(vol, type); 1384 if (unlikely(!ad)) 1385 return -ENOENT; 1386 /* Check the flags and return the result. */ 1387 if (ad->flags & ATTR_DEF_RESIDENT) 1388 return -EPERM; 1389 return 0; 1390 } 1391 1392 /** 1393 * ntfs_attr_can_be_resident - check if an attribute can be resident 1394 * @vol: ntfs volume to which the attribute belongs 1395 * @type: attribute type which to check 1396 * 1397 * Check whether the attribute of @type on the ntfs volume @vol is allowed to 1398 * be resident. This information is derived from our ntfs knowledge and may 1399 * not be completely accurate, especially when user defined attributes are 1400 * present. Basically we allow everything to be resident except for index 1401 * allocation and $EA attributes. 1402 * 1403 * Return 0 if the attribute is allowed to be non-resident and -EPERM if not. 1404 * 1405 * Warning: In the system file $MFT the attribute $Bitmap must be non-resident 1406 * otherwise windows will not boot (blue screen of death)! We cannot 1407 * check for this here as we do not know which inode's $Bitmap is 1408 * being asked about so the caller needs to special case this. 1409 */ 1410 int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type) 1411 { 1412 if (type == AT_INDEX_ALLOCATION) 1413 return -EPERM; 1414 return 0; 1415 } 1416 1417 /** 1418 * ntfs_attr_record_resize - resize an attribute record 1419 * @m: mft record containing attribute record 1420 * @a: attribute record to resize 1421 * @new_size: new size in bytes to which to resize the attribute record @a 1422 * 1423 * Resize the attribute record @a, i.e. the resident part of the attribute, in 1424 * the mft record @m to @new_size bytes. 1425 * 1426 * Return 0 on success and -errno on error. The following error codes are 1427 * defined: 1428 * -ENOSPC - Not enough space in the mft record @m to perform the resize. 1429 * 1430 * Note: On error, no modifications have been performed whatsoever. 1431 * 1432 * Warning: If you make a record smaller without having copied all the data you 1433 * are interested in the data may be overwritten. 1434 */ 1435 int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size) 1436 { 1437 ntfs_debug("Entering for new_size %u.", new_size); 1438 /* Align to 8 bytes if it is not already done. */ 1439 if (new_size & 7) 1440 new_size = (new_size + 7) & ~7; 1441 /* If the actual attribute length has changed, move things around. */ 1442 if (new_size != le32_to_cpu(a->length)) { 1443 u32 new_muse = le32_to_cpu(m->bytes_in_use) - 1444 le32_to_cpu(a->length) + new_size; 1445 /* Not enough space in this mft record. */ 1446 if (new_muse > le32_to_cpu(m->bytes_allocated)) 1447 return -ENOSPC; 1448 /* Move attributes following @a to their new location. */ 1449 memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length), 1450 le32_to_cpu(m->bytes_in_use) - ((u8*)a - 1451 (u8*)m) - le32_to_cpu(a->length)); 1452 /* Adjust @m to reflect the change in used space. */ 1453 m->bytes_in_use = cpu_to_le32(new_muse); 1454 /* Adjust @a to reflect the new size. */ 1455 if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length)) 1456 a->length = cpu_to_le32(new_size); 1457 } 1458 return 0; 1459 } 1460 1461 /** 1462 * ntfs_resident_attr_value_resize - resize the value of a resident attribute 1463 * @m: mft record containing attribute record 1464 * @a: attribute record whose value to resize 1465 * @new_size: new size in bytes to which to resize the attribute value of @a 1466 * 1467 * Resize the value of the attribute @a in the mft record @m to @new_size bytes. 1468 * If the value is made bigger, the newly allocated space is cleared. 1469 * 1470 * Return 0 on success and -errno on error. The following error codes are 1471 * defined: 1472 * -ENOSPC - Not enough space in the mft record @m to perform the resize. 1473 * 1474 * Note: On error, no modifications have been performed whatsoever. 1475 * 1476 * Warning: If you make a record smaller without having copied all the data you 1477 * are interested in the data may be overwritten. 1478 */ 1479 int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a, 1480 const u32 new_size) 1481 { 1482 u32 old_size; 1483 1484 /* Resize the resident part of the attribute record. */ 1485 if (ntfs_attr_record_resize(m, a, 1486 le16_to_cpu(a->data.resident.value_offset) + new_size)) 1487 return -ENOSPC; 1488 /* 1489 * The resize succeeded! If we made the attribute value bigger, clear 1490 * the area between the old size and @new_size. 1491 */ 1492 old_size = le32_to_cpu(a->data.resident.value_length); 1493 if (new_size > old_size) 1494 memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) + 1495 old_size, 0, new_size - old_size); 1496 /* Finally update the length of the attribute value. */ 1497 a->data.resident.value_length = cpu_to_le32(new_size); 1498 return 0; 1499 } 1500 1501 /** 1502 * ntfs_attr_make_non_resident - convert a resident to a non-resident attribute 1503 * @ni: ntfs inode describing the attribute to convert 1504 * @data_size: size of the resident data to copy to the non-resident attribute 1505 * 1506 * Convert the resident ntfs attribute described by the ntfs inode @ni to a 1507 * non-resident one. 1508 * 1509 * @data_size must be equal to the attribute value size. This is needed since 1510 * we need to know the size before we can map the mft record and our callers 1511 * always know it. The reason we cannot simply read the size from the vfs 1512 * inode i_size is that this is not necessarily uptodate. This happens when 1513 * ntfs_attr_make_non_resident() is called in the ->truncate call path(s). 1514 * 1515 * Return 0 on success and -errno on error. The following error return codes 1516 * are defined: 1517 * -EPERM - The attribute is not allowed to be non-resident. 1518 * -ENOMEM - Not enough memory. 1519 * -ENOSPC - Not enough disk space. 1520 * -EINVAL - Attribute not defined on the volume. 1521 * -EIO - I/o error or other error. 1522 * Note that -ENOSPC is also returned in the case that there is not enough 1523 * space in the mft record to do the conversion. This can happen when the mft 1524 * record is already very full. The caller is responsible for trying to make 1525 * space in the mft record and trying again. FIXME: Do we need a separate 1526 * error return code for this kind of -ENOSPC or is it always worth trying 1527 * again in case the attribute may then fit in a resident state so no need to 1528 * make it non-resident at all? Ho-hum... (AIA) 1529 * 1530 * NOTE to self: No changes in the attribute list are required to move from 1531 * a resident to a non-resident attribute. 1532 * 1533 * Locking: - The caller must hold i_mutex on the inode. 1534 */ 1535 int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) 1536 { 1537 s64 new_size; 1538 struct inode *vi = VFS_I(ni); 1539 ntfs_volume *vol = ni->vol; 1540 ntfs_inode *base_ni; 1541 MFT_RECORD *m; 1542 ATTR_RECORD *a; 1543 ntfs_attr_search_ctx *ctx; 1544 struct page *page; 1545 runlist_element *rl; 1546 u8 *kaddr; 1547 unsigned long flags; 1548 int mp_size, mp_ofs, name_ofs, arec_size, err, err2; 1549 u32 attr_size; 1550 u8 old_res_attr_flags; 1551 1552 /* Check that the attribute is allowed to be non-resident. */ 1553 err = ntfs_attr_can_be_non_resident(vol, ni->type); 1554 if (unlikely(err)) { 1555 if (err == -EPERM) 1556 ntfs_debug("Attribute is not allowed to be " 1557 "non-resident."); 1558 else 1559 ntfs_debug("Attribute not defined on the NTFS " 1560 "volume!"); 1561 return err; 1562 } 1563 /* 1564 * FIXME: Compressed and encrypted attributes are not supported when 1565 * writing and we should never have gotten here for them. 1566 */ 1567 BUG_ON(NInoCompressed(ni)); 1568 BUG_ON(NInoEncrypted(ni)); 1569 /* 1570 * The size needs to be aligned to a cluster boundary for allocation 1571 * purposes. 1572 */ 1573 new_size = (data_size + vol->cluster_size - 1) & 1574 ~(vol->cluster_size - 1); 1575 if (new_size > 0) { 1576 /* 1577 * Will need the page later and since the page lock nests 1578 * outside all ntfs locks, we need to get the page now. 1579 */ 1580 page = find_or_create_page(vi->i_mapping, 0, 1581 mapping_gfp_mask(vi->i_mapping)); 1582 if (unlikely(!page)) 1583 return -ENOMEM; 1584 /* Start by allocating clusters to hold the attribute value. */ 1585 rl = ntfs_cluster_alloc(vol, 0, new_size >> 1586 vol->cluster_size_bits, -1, DATA_ZONE, true); 1587 if (IS_ERR(rl)) { 1588 err = PTR_ERR(rl); 1589 ntfs_debug("Failed to allocate cluster%s, error code " 1590 "%i.", (new_size >> 1591 vol->cluster_size_bits) > 1 ? "s" : "", 1592 err); 1593 goto page_err_out; 1594 } 1595 } else { 1596 rl = NULL; 1597 page = NULL; 1598 } 1599 /* Determine the size of the mapping pairs array. */ 1600 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1); 1601 if (unlikely(mp_size < 0)) { 1602 err = mp_size; 1603 ntfs_debug("Failed to get size for mapping pairs array, error " 1604 "code %i.", err); 1605 goto rl_err_out; 1606 } 1607 down_write(&ni->runlist.lock); 1608 if (!NInoAttr(ni)) 1609 base_ni = ni; 1610 else 1611 base_ni = ni->ext.base_ntfs_ino; 1612 m = map_mft_record(base_ni); 1613 if (IS_ERR(m)) { 1614 err = PTR_ERR(m); 1615 m = NULL; 1616 ctx = NULL; 1617 goto err_out; 1618 } 1619 ctx = ntfs_attr_get_search_ctx(base_ni, m); 1620 if (unlikely(!ctx)) { 1621 err = -ENOMEM; 1622 goto err_out; 1623 } 1624 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 1625 CASE_SENSITIVE, 0, NULL, 0, ctx); 1626 if (unlikely(err)) { 1627 if (err == -ENOENT) 1628 err = -EIO; 1629 goto err_out; 1630 } 1631 m = ctx->mrec; 1632 a = ctx->attr; 1633 BUG_ON(NInoNonResident(ni)); 1634 BUG_ON(a->non_resident); 1635 /* 1636 * Calculate new offsets for the name and the mapping pairs array. 1637 */ 1638 if (NInoSparse(ni) || NInoCompressed(ni)) 1639 name_ofs = (offsetof(ATTR_REC, 1640 data.non_resident.compressed_size) + 1641 sizeof(a->data.non_resident.compressed_size) + 1642 7) & ~7; 1643 else 1644 name_ofs = (offsetof(ATTR_REC, 1645 data.non_resident.compressed_size) + 7) & ~7; 1646 mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7; 1647 /* 1648 * Determine the size of the resident part of the now non-resident 1649 * attribute record. 1650 */ 1651 arec_size = (mp_ofs + mp_size + 7) & ~7; 1652 /* 1653 * If the page is not uptodate bring it uptodate by copying from the 1654 * attribute value. 1655 */ 1656 attr_size = le32_to_cpu(a->data.resident.value_length); 1657 BUG_ON(attr_size != data_size); 1658 if (page && !PageUptodate(page)) { 1659 kaddr = kmap_atomic(page); 1660 memcpy(kaddr, (u8*)a + 1661 le16_to_cpu(a->data.resident.value_offset), 1662 attr_size); 1663 memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size); 1664 kunmap_atomic(kaddr); 1665 flush_dcache_page(page); 1666 SetPageUptodate(page); 1667 } 1668 /* Backup the attribute flag. */ 1669 old_res_attr_flags = a->data.resident.flags; 1670 /* Resize the resident part of the attribute record. */ 1671 err = ntfs_attr_record_resize(m, a, arec_size); 1672 if (unlikely(err)) 1673 goto err_out; 1674 /* 1675 * Convert the resident part of the attribute record to describe a 1676 * non-resident attribute. 1677 */ 1678 a->non_resident = 1; 1679 /* Move the attribute name if it exists and update the offset. */ 1680 if (a->name_length) 1681 memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset), 1682 a->name_length * sizeof(ntfschar)); 1683 a->name_offset = cpu_to_le16(name_ofs); 1684 /* Setup the fields specific to non-resident attributes. */ 1685 a->data.non_resident.lowest_vcn = 0; 1686 a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >> 1687 vol->cluster_size_bits); 1688 a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs); 1689 memset(&a->data.non_resident.reserved, 0, 1690 sizeof(a->data.non_resident.reserved)); 1691 a->data.non_resident.allocated_size = cpu_to_sle64(new_size); 1692 a->data.non_resident.data_size = 1693 a->data.non_resident.initialized_size = 1694 cpu_to_sle64(attr_size); 1695 if (NInoSparse(ni) || NInoCompressed(ni)) { 1696 a->data.non_resident.compression_unit = 0; 1697 if (NInoCompressed(ni) || vol->major_ver < 3) 1698 a->data.non_resident.compression_unit = 4; 1699 a->data.non_resident.compressed_size = 1700 a->data.non_resident.allocated_size; 1701 } else 1702 a->data.non_resident.compression_unit = 0; 1703 /* Generate the mapping pairs array into the attribute record. */ 1704 err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs, 1705 arec_size - mp_ofs, rl, 0, -1, NULL); 1706 if (unlikely(err)) { 1707 ntfs_debug("Failed to build mapping pairs, error code %i.", 1708 err); 1709 goto undo_err_out; 1710 } 1711 /* Setup the in-memory attribute structure to be non-resident. */ 1712 ni->runlist.rl = rl; 1713 write_lock_irqsave(&ni->size_lock, flags); 1714 ni->allocated_size = new_size; 1715 if (NInoSparse(ni) || NInoCompressed(ni)) { 1716 ni->itype.compressed.size = ni->allocated_size; 1717 if (a->data.non_resident.compression_unit) { 1718 ni->itype.compressed.block_size = 1U << (a->data. 1719 non_resident.compression_unit + 1720 vol->cluster_size_bits); 1721 ni->itype.compressed.block_size_bits = 1722 ffs(ni->itype.compressed.block_size) - 1723 1; 1724 ni->itype.compressed.block_clusters = 1U << 1725 a->data.non_resident.compression_unit; 1726 } else { 1727 ni->itype.compressed.block_size = 0; 1728 ni->itype.compressed.block_size_bits = 0; 1729 ni->itype.compressed.block_clusters = 0; 1730 } 1731 vi->i_blocks = ni->itype.compressed.size >> 9; 1732 } else 1733 vi->i_blocks = ni->allocated_size >> 9; 1734 write_unlock_irqrestore(&ni->size_lock, flags); 1735 /* 1736 * This needs to be last since the address space operations ->readpage 1737 * and ->writepage can run concurrently with us as they are not 1738 * serialized on i_mutex. Note, we are not allowed to fail once we flip 1739 * this switch, which is another reason to do this last. 1740 */ 1741 NInoSetNonResident(ni); 1742 /* Mark the mft record dirty, so it gets written back. */ 1743 flush_dcache_mft_record_page(ctx->ntfs_ino); 1744 mark_mft_record_dirty(ctx->ntfs_ino); 1745 ntfs_attr_put_search_ctx(ctx); 1746 unmap_mft_record(base_ni); 1747 up_write(&ni->runlist.lock); 1748 if (page) { 1749 set_page_dirty(page); 1750 unlock_page(page); 1751 put_page(page); 1752 } 1753 ntfs_debug("Done."); 1754 return 0; 1755 undo_err_out: 1756 /* Convert the attribute back into a resident attribute. */ 1757 a->non_resident = 0; 1758 /* Move the attribute name if it exists and update the offset. */ 1759 name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) + 1760 sizeof(a->data.resident.reserved) + 7) & ~7; 1761 if (a->name_length) 1762 memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset), 1763 a->name_length * sizeof(ntfschar)); 1764 mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7; 1765 a->name_offset = cpu_to_le16(name_ofs); 1766 arec_size = (mp_ofs + attr_size + 7) & ~7; 1767 /* Resize the resident part of the attribute record. */ 1768 err2 = ntfs_attr_record_resize(m, a, arec_size); 1769 if (unlikely(err2)) { 1770 /* 1771 * This cannot happen (well if memory corruption is at work it 1772 * could happen in theory), but deal with it as well as we can. 1773 * If the old size is too small, truncate the attribute, 1774 * otherwise simply give it a larger allocated size. 1775 * FIXME: Should check whether chkdsk complains when the 1776 * allocated size is much bigger than the resident value size. 1777 */ 1778 arec_size = le32_to_cpu(a->length); 1779 if ((mp_ofs + attr_size) > arec_size) { 1780 err2 = attr_size; 1781 attr_size = arec_size - mp_ofs; 1782 ntfs_error(vol->sb, "Failed to undo partial resident " 1783 "to non-resident attribute " 1784 "conversion. Truncating inode 0x%lx, " 1785 "attribute type 0x%x from %i bytes to " 1786 "%i bytes to maintain metadata " 1787 "consistency. THIS MEANS YOU ARE " 1788 "LOSING %i BYTES DATA FROM THIS %s.", 1789 vi->i_ino, 1790 (unsigned)le32_to_cpu(ni->type), 1791 err2, attr_size, err2 - attr_size, 1792 ((ni->type == AT_DATA) && 1793 !ni->name_len) ? "FILE": "ATTRIBUTE"); 1794 write_lock_irqsave(&ni->size_lock, flags); 1795 ni->initialized_size = attr_size; 1796 i_size_write(vi, attr_size); 1797 write_unlock_irqrestore(&ni->size_lock, flags); 1798 } 1799 } 1800 /* Setup the fields specific to resident attributes. */ 1801 a->data.resident.value_length = cpu_to_le32(attr_size); 1802 a->data.resident.value_offset = cpu_to_le16(mp_ofs); 1803 a->data.resident.flags = old_res_attr_flags; 1804 memset(&a->data.resident.reserved, 0, 1805 sizeof(a->data.resident.reserved)); 1806 /* Copy the data from the page back to the attribute value. */ 1807 if (page) { 1808 kaddr = kmap_atomic(page); 1809 memcpy((u8*)a + mp_ofs, kaddr, attr_size); 1810 kunmap_atomic(kaddr); 1811 } 1812 /* Setup the allocated size in the ntfs inode in case it changed. */ 1813 write_lock_irqsave(&ni->size_lock, flags); 1814 ni->allocated_size = arec_size - mp_ofs; 1815 write_unlock_irqrestore(&ni->size_lock, flags); 1816 /* Mark the mft record dirty, so it gets written back. */ 1817 flush_dcache_mft_record_page(ctx->ntfs_ino); 1818 mark_mft_record_dirty(ctx->ntfs_ino); 1819 err_out: 1820 if (ctx) 1821 ntfs_attr_put_search_ctx(ctx); 1822 if (m) 1823 unmap_mft_record(base_ni); 1824 ni->runlist.rl = NULL; 1825 up_write(&ni->runlist.lock); 1826 rl_err_out: 1827 if (rl) { 1828 if (ntfs_cluster_free_from_rl(vol, rl) < 0) { 1829 ntfs_error(vol->sb, "Failed to release allocated " 1830 "cluster(s) in error code path. Run " 1831 "chkdsk to recover the lost " 1832 "cluster(s)."); 1833 NVolSetErrors(vol); 1834 } 1835 ntfs_free(rl); 1836 page_err_out: 1837 unlock_page(page); 1838 put_page(page); 1839 } 1840 if (err == -EINVAL) 1841 err = -EIO; 1842 return err; 1843 } 1844 1845 /** 1846 * ntfs_attr_extend_allocation - extend the allocated space of an attribute 1847 * @ni: ntfs inode of the attribute whose allocation to extend 1848 * @new_alloc_size: new size in bytes to which to extend the allocation to 1849 * @new_data_size: new size in bytes to which to extend the data to 1850 * @data_start: beginning of region which is required to be non-sparse 1851 * 1852 * Extend the allocated space of an attribute described by the ntfs inode @ni 1853 * to @new_alloc_size bytes. If @data_start is -1, the whole extension may be 1854 * implemented as a hole in the file (as long as both the volume and the ntfs 1855 * inode @ni have sparse support enabled). If @data_start is >= 0, then the 1856 * region between the old allocated size and @data_start - 1 may be made sparse 1857 * but the regions between @data_start and @new_alloc_size must be backed by 1858 * actual clusters. 1859 * 1860 * If @new_data_size is -1, it is ignored. If it is >= 0, then the data size 1861 * of the attribute is extended to @new_data_size. Note that the i_size of the 1862 * vfs inode is not updated. Only the data size in the base attribute record 1863 * is updated. The caller has to update i_size separately if this is required. 1864 * WARNING: It is a BUG() for @new_data_size to be smaller than the old data 1865 * size as well as for @new_data_size to be greater than @new_alloc_size. 1866 * 1867 * For resident attributes this involves resizing the attribute record and if 1868 * necessary moving it and/or other attributes into extent mft records and/or 1869 * converting the attribute to a non-resident attribute which in turn involves 1870 * extending the allocation of a non-resident attribute as described below. 1871 * 1872 * For non-resident attributes this involves allocating clusters in the data 1873 * zone on the volume (except for regions that are being made sparse) and 1874 * extending the run list to describe the allocated clusters as well as 1875 * updating the mapping pairs array of the attribute. This in turn involves 1876 * resizing the attribute record and if necessary moving it and/or other 1877 * attributes into extent mft records and/or splitting the attribute record 1878 * into multiple extent attribute records. 1879 * 1880 * Also, the attribute list attribute is updated if present and in some of the 1881 * above cases (the ones where extent mft records/attributes come into play), 1882 * an attribute list attribute is created if not already present. 1883 * 1884 * Return the new allocated size on success and -errno on error. In the case 1885 * that an error is encountered but a partial extension at least up to 1886 * @data_start (if present) is possible, the allocation is partially extended 1887 * and this is returned. This means the caller must check the returned size to 1888 * determine if the extension was partial. If @data_start is -1 then partial 1889 * allocations are not performed. 1890 * 1891 * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA. 1892 * 1893 * Locking: This function takes the runlist lock of @ni for writing as well as 1894 * locking the mft record of the base ntfs inode. These locks are maintained 1895 * throughout execution of the function. These locks are required so that the 1896 * attribute can be resized safely and so that it can for example be converted 1897 * from resident to non-resident safely. 1898 * 1899 * TODO: At present attribute list attribute handling is not implemented. 1900 * 1901 * TODO: At present it is not safe to call this function for anything other 1902 * than the $DATA attribute(s) of an uncompressed and unencrypted file. 1903 */ 1904 s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size, 1905 const s64 new_data_size, const s64 data_start) 1906 { 1907 VCN vcn; 1908 s64 ll, allocated_size, start = data_start; 1909 struct inode *vi = VFS_I(ni); 1910 ntfs_volume *vol = ni->vol; 1911 ntfs_inode *base_ni; 1912 MFT_RECORD *m; 1913 ATTR_RECORD *a; 1914 ntfs_attr_search_ctx *ctx; 1915 runlist_element *rl, *rl2; 1916 unsigned long flags; 1917 int err, mp_size; 1918 u32 attr_len = 0; /* Silence stupid gcc warning. */ 1919 bool mp_rebuilt; 1920 1921 #ifdef DEBUG 1922 read_lock_irqsave(&ni->size_lock, flags); 1923 allocated_size = ni->allocated_size; 1924 read_unlock_irqrestore(&ni->size_lock, flags); 1925 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " 1926 "old_allocated_size 0x%llx, " 1927 "new_allocated_size 0x%llx, new_data_size 0x%llx, " 1928 "data_start 0x%llx.", vi->i_ino, 1929 (unsigned)le32_to_cpu(ni->type), 1930 (unsigned long long)allocated_size, 1931 (unsigned long long)new_alloc_size, 1932 (unsigned long long)new_data_size, 1933 (unsigned long long)start); 1934 #endif 1935 retry_extend: 1936 /* 1937 * For non-resident attributes, @start and @new_size need to be aligned 1938 * to cluster boundaries for allocation purposes. 1939 */ 1940 if (NInoNonResident(ni)) { 1941 if (start > 0) 1942 start &= ~(s64)vol->cluster_size_mask; 1943 new_alloc_size = (new_alloc_size + vol->cluster_size - 1) & 1944 ~(s64)vol->cluster_size_mask; 1945 } 1946 BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size); 1947 /* Check if new size is allowed in $AttrDef. */ 1948 err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size); 1949 if (unlikely(err)) { 1950 /* Only emit errors when the write will fail completely. */ 1951 read_lock_irqsave(&ni->size_lock, flags); 1952 allocated_size = ni->allocated_size; 1953 read_unlock_irqrestore(&ni->size_lock, flags); 1954 if (start < 0 || start >= allocated_size) { 1955 if (err == -ERANGE) { 1956 ntfs_error(vol->sb, "Cannot extend allocation " 1957 "of inode 0x%lx, attribute " 1958 "type 0x%x, because the new " 1959 "allocation would exceed the " 1960 "maximum allowed size for " 1961 "this attribute type.", 1962 vi->i_ino, (unsigned) 1963 le32_to_cpu(ni->type)); 1964 } else { 1965 ntfs_error(vol->sb, "Cannot extend allocation " 1966 "of inode 0x%lx, attribute " 1967 "type 0x%x, because this " 1968 "attribute type is not " 1969 "defined on the NTFS volume. " 1970 "Possible corruption! You " 1971 "should run chkdsk!", 1972 vi->i_ino, (unsigned) 1973 le32_to_cpu(ni->type)); 1974 } 1975 } 1976 /* Translate error code to be POSIX conformant for write(2). */ 1977 if (err == -ERANGE) 1978 err = -EFBIG; 1979 else 1980 err = -EIO; 1981 return err; 1982 } 1983 if (!NInoAttr(ni)) 1984 base_ni = ni; 1985 else 1986 base_ni = ni->ext.base_ntfs_ino; 1987 /* 1988 * We will be modifying both the runlist (if non-resident) and the mft 1989 * record so lock them both down. 1990 */ 1991 down_write(&ni->runlist.lock); 1992 m = map_mft_record(base_ni); 1993 if (IS_ERR(m)) { 1994 err = PTR_ERR(m); 1995 m = NULL; 1996 ctx = NULL; 1997 goto err_out; 1998 } 1999 ctx = ntfs_attr_get_search_ctx(base_ni, m); 2000 if (unlikely(!ctx)) { 2001 err = -ENOMEM; 2002 goto err_out; 2003 } 2004 read_lock_irqsave(&ni->size_lock, flags); 2005 allocated_size = ni->allocated_size; 2006 read_unlock_irqrestore(&ni->size_lock, flags); 2007 /* 2008 * If non-resident, seek to the last extent. If resident, there is 2009 * only one extent, so seek to that. 2010 */ 2011 vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits : 2012 0; 2013 /* 2014 * Abort if someone did the work whilst we waited for the locks. If we 2015 * just converted the attribute from resident to non-resident it is 2016 * likely that exactly this has happened already. We cannot quite 2017 * abort if we need to update the data size. 2018 */ 2019 if (unlikely(new_alloc_size <= allocated_size)) { 2020 ntfs_debug("Allocated size already exceeds requested size."); 2021 new_alloc_size = allocated_size; 2022 if (new_data_size < 0) 2023 goto done; 2024 /* 2025 * We want the first attribute extent so that we can update the 2026 * data size. 2027 */ 2028 vcn = 0; 2029 } 2030 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 2031 CASE_SENSITIVE, vcn, NULL, 0, ctx); 2032 if (unlikely(err)) { 2033 if (err == -ENOENT) 2034 err = -EIO; 2035 goto err_out; 2036 } 2037 m = ctx->mrec; 2038 a = ctx->attr; 2039 /* Use goto to reduce indentation. */ 2040 if (a->non_resident) 2041 goto do_non_resident_extend; 2042 BUG_ON(NInoNonResident(ni)); 2043 /* The total length of the attribute value. */ 2044 attr_len = le32_to_cpu(a->data.resident.value_length); 2045 /* 2046 * Extend the attribute record to be able to store the new attribute 2047 * size. ntfs_attr_record_resize() will not do anything if the size is 2048 * not changing. 2049 */ 2050 if (new_alloc_size < vol->mft_record_size && 2051 !ntfs_attr_record_resize(m, a, 2052 le16_to_cpu(a->data.resident.value_offset) + 2053 new_alloc_size)) { 2054 /* The resize succeeded! */ 2055 write_lock_irqsave(&ni->size_lock, flags); 2056 ni->allocated_size = le32_to_cpu(a->length) - 2057 le16_to_cpu(a->data.resident.value_offset); 2058 write_unlock_irqrestore(&ni->size_lock, flags); 2059 if (new_data_size >= 0) { 2060 BUG_ON(new_data_size < attr_len); 2061 a->data.resident.value_length = 2062 cpu_to_le32((u32)new_data_size); 2063 } 2064 goto flush_done; 2065 } 2066 /* 2067 * We have to drop all the locks so we can call 2068 * ntfs_attr_make_non_resident(). This could be optimised by try- 2069 * locking the first page cache page and only if that fails dropping 2070 * the locks, locking the page, and redoing all the locking and 2071 * lookups. While this would be a huge optimisation, it is not worth 2072 * it as this is definitely a slow code path. 2073 */ 2074 ntfs_attr_put_search_ctx(ctx); 2075 unmap_mft_record(base_ni); 2076 up_write(&ni->runlist.lock); 2077 /* 2078 * Not enough space in the mft record, try to make the attribute 2079 * non-resident and if successful restart the extension process. 2080 */ 2081 err = ntfs_attr_make_non_resident(ni, attr_len); 2082 if (likely(!err)) 2083 goto retry_extend; 2084 /* 2085 * Could not make non-resident. If this is due to this not being 2086 * permitted for this attribute type or there not being enough space, 2087 * try to make other attributes non-resident. Otherwise fail. 2088 */ 2089 if (unlikely(err != -EPERM && err != -ENOSPC)) { 2090 /* Only emit errors when the write will fail completely. */ 2091 read_lock_irqsave(&ni->size_lock, flags); 2092 allocated_size = ni->allocated_size; 2093 read_unlock_irqrestore(&ni->size_lock, flags); 2094 if (start < 0 || start >= allocated_size) 2095 ntfs_error(vol->sb, "Cannot extend allocation of " 2096 "inode 0x%lx, attribute type 0x%x, " 2097 "because the conversion from resident " 2098 "to non-resident attribute failed " 2099 "with error code %i.", vi->i_ino, 2100 (unsigned)le32_to_cpu(ni->type), err); 2101 if (err != -ENOMEM) 2102 err = -EIO; 2103 goto conv_err_out; 2104 } 2105 /* TODO: Not implemented from here, abort. */ 2106 read_lock_irqsave(&ni->size_lock, flags); 2107 allocated_size = ni->allocated_size; 2108 read_unlock_irqrestore(&ni->size_lock, flags); 2109 if (start < 0 || start >= allocated_size) { 2110 if (err == -ENOSPC) 2111 ntfs_error(vol->sb, "Not enough space in the mft " 2112 "record/on disk for the non-resident " 2113 "attribute value. This case is not " 2114 "implemented yet."); 2115 else /* if (err == -EPERM) */ 2116 ntfs_error(vol->sb, "This attribute type may not be " 2117 "non-resident. This case is not " 2118 "implemented yet."); 2119 } 2120 err = -EOPNOTSUPP; 2121 goto conv_err_out; 2122 #if 0 2123 // TODO: Attempt to make other attributes non-resident. 2124 if (!err) 2125 goto do_resident_extend; 2126 /* 2127 * Both the attribute list attribute and the standard information 2128 * attribute must remain in the base inode. Thus, if this is one of 2129 * these attributes, we have to try to move other attributes out into 2130 * extent mft records instead. 2131 */ 2132 if (ni->type == AT_ATTRIBUTE_LIST || 2133 ni->type == AT_STANDARD_INFORMATION) { 2134 // TODO: Attempt to move other attributes into extent mft 2135 // records. 2136 err = -EOPNOTSUPP; 2137 if (!err) 2138 goto do_resident_extend; 2139 goto err_out; 2140 } 2141 // TODO: Attempt to move this attribute to an extent mft record, but 2142 // only if it is not already the only attribute in an mft record in 2143 // which case there would be nothing to gain. 2144 err = -EOPNOTSUPP; 2145 if (!err) 2146 goto do_resident_extend; 2147 /* There is nothing we can do to make enough space. )-: */ 2148 goto err_out; 2149 #endif 2150 do_non_resident_extend: 2151 BUG_ON(!NInoNonResident(ni)); 2152 if (new_alloc_size == allocated_size) { 2153 BUG_ON(vcn); 2154 goto alloc_done; 2155 } 2156 /* 2157 * If the data starts after the end of the old allocation, this is a 2158 * $DATA attribute and sparse attributes are enabled on the volume and 2159 * for this inode, then create a sparse region between the old 2160 * allocated size and the start of the data. Otherwise simply proceed 2161 * with filling the whole space between the old allocated size and the 2162 * new allocated size with clusters. 2163 */ 2164 if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA || 2165 !NVolSparseEnabled(vol) || NInoSparseDisabled(ni)) 2166 goto skip_sparse; 2167 // TODO: This is not implemented yet. We just fill in with real 2168 // clusters for now... 2169 ntfs_debug("Inserting holes is not-implemented yet. Falling back to " 2170 "allocating real clusters instead."); 2171 skip_sparse: 2172 rl = ni->runlist.rl; 2173 if (likely(rl)) { 2174 /* Seek to the end of the runlist. */ 2175 while (rl->length) 2176 rl++; 2177 } 2178 /* If this attribute extent is not mapped, map it now. */ 2179 if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED || 2180 (rl->lcn == LCN_ENOENT && rl > ni->runlist.rl && 2181 (rl-1)->lcn == LCN_RL_NOT_MAPPED))) { 2182 if (!rl && !allocated_size) 2183 goto first_alloc; 2184 rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl); 2185 if (IS_ERR(rl)) { 2186 err = PTR_ERR(rl); 2187 if (start < 0 || start >= allocated_size) 2188 ntfs_error(vol->sb, "Cannot extend allocation " 2189 "of inode 0x%lx, attribute " 2190 "type 0x%x, because the " 2191 "mapping of a runlist " 2192 "fragment failed with error " 2193 "code %i.", vi->i_ino, 2194 (unsigned)le32_to_cpu(ni->type), 2195 err); 2196 if (err != -ENOMEM) 2197 err = -EIO; 2198 goto err_out; 2199 } 2200 ni->runlist.rl = rl; 2201 /* Seek to the end of the runlist. */ 2202 while (rl->length) 2203 rl++; 2204 } 2205 /* 2206 * We now know the runlist of the last extent is mapped and @rl is at 2207 * the end of the runlist. We want to begin allocating clusters 2208 * starting at the last allocated cluster to reduce fragmentation. If 2209 * there are no valid LCNs in the attribute we let the cluster 2210 * allocator choose the starting cluster. 2211 */ 2212 /* If the last LCN is a hole or simillar seek back to last real LCN. */ 2213 while (rl->lcn < 0 && rl > ni->runlist.rl) 2214 rl--; 2215 first_alloc: 2216 // FIXME: Need to implement partial allocations so at least part of the 2217 // write can be performed when start >= 0. (Needed for POSIX write(2) 2218 // conformance.) 2219 rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits, 2220 (new_alloc_size - allocated_size) >> 2221 vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ? 2222 rl->lcn + rl->length : -1, DATA_ZONE, true); 2223 if (IS_ERR(rl2)) { 2224 err = PTR_ERR(rl2); 2225 if (start < 0 || start >= allocated_size) 2226 ntfs_error(vol->sb, "Cannot extend allocation of " 2227 "inode 0x%lx, attribute type 0x%x, " 2228 "because the allocation of clusters " 2229 "failed with error code %i.", vi->i_ino, 2230 (unsigned)le32_to_cpu(ni->type), err); 2231 if (err != -ENOMEM && err != -ENOSPC) 2232 err = -EIO; 2233 goto err_out; 2234 } 2235 rl = ntfs_runlists_merge(ni->runlist.rl, rl2); 2236 if (IS_ERR(rl)) { 2237 err = PTR_ERR(rl); 2238 if (start < 0 || start >= allocated_size) 2239 ntfs_error(vol->sb, "Cannot extend allocation of " 2240 "inode 0x%lx, attribute type 0x%x, " 2241 "because the runlist merge failed " 2242 "with error code %i.", vi->i_ino, 2243 (unsigned)le32_to_cpu(ni->type), err); 2244 if (err != -ENOMEM) 2245 err = -EIO; 2246 if (ntfs_cluster_free_from_rl(vol, rl2)) { 2247 ntfs_error(vol->sb, "Failed to release allocated " 2248 "cluster(s) in error code path. Run " 2249 "chkdsk to recover the lost " 2250 "cluster(s)."); 2251 NVolSetErrors(vol); 2252 } 2253 ntfs_free(rl2); 2254 goto err_out; 2255 } 2256 ni->runlist.rl = rl; 2257 ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size - 2258 allocated_size) >> vol->cluster_size_bits); 2259 /* Find the runlist element with which the attribute extent starts. */ 2260 ll = sle64_to_cpu(a->data.non_resident.lowest_vcn); 2261 rl2 = ntfs_rl_find_vcn_nolock(rl, ll); 2262 BUG_ON(!rl2); 2263 BUG_ON(!rl2->length); 2264 BUG_ON(rl2->lcn < LCN_HOLE); 2265 mp_rebuilt = false; 2266 /* Get the size for the new mapping pairs array for this extent. */ 2267 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1); 2268 if (unlikely(mp_size <= 0)) { 2269 err = mp_size; 2270 if (start < 0 || start >= allocated_size) 2271 ntfs_error(vol->sb, "Cannot extend allocation of " 2272 "inode 0x%lx, attribute type 0x%x, " 2273 "because determining the size for the " 2274 "mapping pairs failed with error code " 2275 "%i.", vi->i_ino, 2276 (unsigned)le32_to_cpu(ni->type), err); 2277 err = -EIO; 2278 goto undo_alloc; 2279 } 2280 /* Extend the attribute record to fit the bigger mapping pairs array. */ 2281 attr_len = le32_to_cpu(a->length); 2282 err = ntfs_attr_record_resize(m, a, mp_size + 2283 le16_to_cpu(a->data.non_resident.mapping_pairs_offset)); 2284 if (unlikely(err)) { 2285 BUG_ON(err != -ENOSPC); 2286 // TODO: Deal with this by moving this extent to a new mft 2287 // record or by starting a new extent in a new mft record, 2288 // possibly by extending this extent partially and filling it 2289 // and creating a new extent for the remainder, or by making 2290 // other attributes non-resident and/or by moving other 2291 // attributes out of this mft record. 2292 if (start < 0 || start >= allocated_size) 2293 ntfs_error(vol->sb, "Not enough space in the mft " 2294 "record for the extended attribute " 2295 "record. This case is not " 2296 "implemented yet."); 2297 err = -EOPNOTSUPP; 2298 goto undo_alloc; 2299 } 2300 mp_rebuilt = true; 2301 /* Generate the mapping pairs array directly into the attr record. */ 2302 err = ntfs_mapping_pairs_build(vol, (u8*)a + 2303 le16_to_cpu(a->data.non_resident.mapping_pairs_offset), 2304 mp_size, rl2, ll, -1, NULL); 2305 if (unlikely(err)) { 2306 if (start < 0 || start >= allocated_size) 2307 ntfs_error(vol->sb, "Cannot extend allocation of " 2308 "inode 0x%lx, attribute type 0x%x, " 2309 "because building the mapping pairs " 2310 "failed with error code %i.", vi->i_ino, 2311 (unsigned)le32_to_cpu(ni->type), err); 2312 err = -EIO; 2313 goto undo_alloc; 2314 } 2315 /* Update the highest_vcn. */ 2316 a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >> 2317 vol->cluster_size_bits) - 1); 2318 /* 2319 * We now have extended the allocated size of the attribute. Reflect 2320 * this in the ntfs_inode structure and the attribute record. 2321 */ 2322 if (a->data.non_resident.lowest_vcn) { 2323 /* 2324 * We are not in the first attribute extent, switch to it, but 2325 * first ensure the changes will make it to disk later. 2326 */ 2327 flush_dcache_mft_record_page(ctx->ntfs_ino); 2328 mark_mft_record_dirty(ctx->ntfs_ino); 2329 ntfs_attr_reinit_search_ctx(ctx); 2330 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, 2331 CASE_SENSITIVE, 0, NULL, 0, ctx); 2332 if (unlikely(err)) 2333 goto restore_undo_alloc; 2334 /* @m is not used any more so no need to set it. */ 2335 a = ctx->attr; 2336 } 2337 write_lock_irqsave(&ni->size_lock, flags); 2338 ni->allocated_size = new_alloc_size; 2339 a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size); 2340 /* 2341 * FIXME: This would fail if @ni is a directory, $MFT, or an index, 2342 * since those can have sparse/compressed set. For example can be 2343 * set compressed even though it is not compressed itself and in that 2344 * case the bit means that files are to be created compressed in the 2345 * directory... At present this is ok as this code is only called for 2346 * regular files, and only for their $DATA attribute(s). 2347 * FIXME: The calculation is wrong if we created a hole above. For now 2348 * it does not matter as we never create holes. 2349 */ 2350 if (NInoSparse(ni) || NInoCompressed(ni)) { 2351 ni->itype.compressed.size += new_alloc_size - allocated_size; 2352 a->data.non_resident.compressed_size = 2353 cpu_to_sle64(ni->itype.compressed.size); 2354 vi->i_blocks = ni->itype.compressed.size >> 9; 2355 } else 2356 vi->i_blocks = new_alloc_size >> 9; 2357 write_unlock_irqrestore(&ni->size_lock, flags); 2358 alloc_done: 2359 if (new_data_size >= 0) { 2360 BUG_ON(new_data_size < 2361 sle64_to_cpu(a->data.non_resident.data_size)); 2362 a->data.non_resident.data_size = cpu_to_sle64(new_data_size); 2363 } 2364 flush_done: 2365 /* Ensure the changes make it to disk. */ 2366 flush_dcache_mft_record_page(ctx->ntfs_ino); 2367 mark_mft_record_dirty(ctx->ntfs_ino); 2368 done: 2369 ntfs_attr_put_search_ctx(ctx); 2370 unmap_mft_record(base_ni); 2371 up_write(&ni->runlist.lock); 2372 ntfs_debug("Done, new_allocated_size 0x%llx.", 2373 (unsigned long long)new_alloc_size); 2374 return new_alloc_size; 2375 restore_undo_alloc: 2376 if (start < 0 || start >= allocated_size) 2377 ntfs_error(vol->sb, "Cannot complete extension of allocation " 2378 "of inode 0x%lx, attribute type 0x%x, because " 2379 "lookup of first attribute extent failed with " 2380 "error code %i.", vi->i_ino, 2381 (unsigned)le32_to_cpu(ni->type), err); 2382 if (err == -ENOENT) 2383 err = -EIO; 2384 ntfs_attr_reinit_search_ctx(ctx); 2385 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE, 2386 allocated_size >> vol->cluster_size_bits, NULL, 0, 2387 ctx)) { 2388 ntfs_error(vol->sb, "Failed to find last attribute extent of " 2389 "attribute in error code path. Run chkdsk to " 2390 "recover."); 2391 write_lock_irqsave(&ni->size_lock, flags); 2392 ni->allocated_size = new_alloc_size; 2393 /* 2394 * FIXME: This would fail if @ni is a directory... See above. 2395 * FIXME: The calculation is wrong if we created a hole above. 2396 * For now it does not matter as we never create holes. 2397 */ 2398 if (NInoSparse(ni) || NInoCompressed(ni)) { 2399 ni->itype.compressed.size += new_alloc_size - 2400 allocated_size; 2401 vi->i_blocks = ni->itype.compressed.size >> 9; 2402 } else 2403 vi->i_blocks = new_alloc_size >> 9; 2404 write_unlock_irqrestore(&ni->size_lock, flags); 2405 ntfs_attr_put_search_ctx(ctx); 2406 unmap_mft_record(base_ni); 2407 up_write(&ni->runlist.lock); 2408 /* 2409 * The only thing that is now wrong is the allocated size of the 2410 * base attribute extent which chkdsk should be able to fix. 2411 */ 2412 NVolSetErrors(vol); 2413 return err; 2414 } 2415 ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64( 2416 (allocated_size >> vol->cluster_size_bits) - 1); 2417 undo_alloc: 2418 ll = allocated_size >> vol->cluster_size_bits; 2419 if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) { 2420 ntfs_error(vol->sb, "Failed to release allocated cluster(s) " 2421 "in error code path. Run chkdsk to recover " 2422 "the lost cluster(s)."); 2423 NVolSetErrors(vol); 2424 } 2425 m = ctx->mrec; 2426 a = ctx->attr; 2427 /* 2428 * If the runlist truncation fails and/or the search context is no 2429 * longer valid, we cannot resize the attribute record or build the 2430 * mapping pairs array thus we mark the inode bad so that no access to 2431 * the freed clusters can happen. 2432 */ 2433 if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) { 2434 ntfs_error(vol->sb, "Failed to %s in error code path. Run " 2435 "chkdsk to recover.", IS_ERR(m) ? 2436 "restore attribute search context" : 2437 "truncate attribute runlist"); 2438 NVolSetErrors(vol); 2439 } else if (mp_rebuilt) { 2440 if (ntfs_attr_record_resize(m, a, attr_len)) { 2441 ntfs_error(vol->sb, "Failed to restore attribute " 2442 "record in error code path. Run " 2443 "chkdsk to recover."); 2444 NVolSetErrors(vol); 2445 } else /* if (success) */ { 2446 if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu( 2447 a->data.non_resident. 2448 mapping_pairs_offset), attr_len - 2449 le16_to_cpu(a->data.non_resident. 2450 mapping_pairs_offset), rl2, ll, -1, 2451 NULL)) { 2452 ntfs_error(vol->sb, "Failed to restore " 2453 "mapping pairs array in error " 2454 "code path. Run chkdsk to " 2455 "recover."); 2456 NVolSetErrors(vol); 2457 } 2458 flush_dcache_mft_record_page(ctx->ntfs_ino); 2459 mark_mft_record_dirty(ctx->ntfs_ino); 2460 } 2461 } 2462 err_out: 2463 if (ctx) 2464 ntfs_attr_put_search_ctx(ctx); 2465 if (m) 2466 unmap_mft_record(base_ni); 2467 up_write(&ni->runlist.lock); 2468 conv_err_out: 2469 ntfs_debug("Failed. Returning error code %i.", err); 2470 return err; 2471 } 2472 2473 /** 2474 * ntfs_attr_set - fill (a part of) an attribute with a byte 2475 * @ni: ntfs inode describing the attribute to fill 2476 * @ofs: offset inside the attribute at which to start to fill 2477 * @cnt: number of bytes to fill 2478 * @val: the unsigned 8-bit value with which to fill the attribute 2479 * 2480 * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at 2481 * byte offset @ofs inside the attribute with the constant byte @val. 2482 * 2483 * This function is effectively like memset() applied to an ntfs attribute. 2484 * Note thie function actually only operates on the page cache pages belonging 2485 * to the ntfs attribute and it marks them dirty after doing the memset(). 2486 * Thus it relies on the vm dirty page write code paths to cause the modified 2487 * pages to be written to the mft record/disk. 2488 * 2489 * Return 0 on success and -errno on error. An error code of -ESPIPE means 2490 * that @ofs + @cnt were outside the end of the attribute and no write was 2491 * performed. 2492 */ 2493 int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) 2494 { 2495 ntfs_volume *vol = ni->vol; 2496 struct address_space *mapping; 2497 struct page *page; 2498 u8 *kaddr; 2499 pgoff_t idx, end; 2500 unsigned start_ofs, end_ofs, size; 2501 2502 ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.", 2503 (long long)ofs, (long long)cnt, val); 2504 BUG_ON(ofs < 0); 2505 BUG_ON(cnt < 0); 2506 if (!cnt) 2507 goto done; 2508 /* 2509 * FIXME: Compressed and encrypted attributes are not supported when 2510 * writing and we should never have gotten here for them. 2511 */ 2512 BUG_ON(NInoCompressed(ni)); 2513 BUG_ON(NInoEncrypted(ni)); 2514 mapping = VFS_I(ni)->i_mapping; 2515 /* Work out the starting index and page offset. */ 2516 idx = ofs >> PAGE_SHIFT; 2517 start_ofs = ofs & ~PAGE_MASK; 2518 /* Work out the ending index and page offset. */ 2519 end = ofs + cnt; 2520 end_ofs = end & ~PAGE_MASK; 2521 /* If the end is outside the inode size return -ESPIPE. */ 2522 if (unlikely(end > i_size_read(VFS_I(ni)))) { 2523 ntfs_error(vol->sb, "Request exceeds end of attribute."); 2524 return -ESPIPE; 2525 } 2526 end >>= PAGE_SHIFT; 2527 /* If there is a first partial page, need to do it the slow way. */ 2528 if (start_ofs) { 2529 page = read_mapping_page(mapping, idx, NULL); 2530 if (IS_ERR(page)) { 2531 ntfs_error(vol->sb, "Failed to read first partial " 2532 "page (error, index 0x%lx).", idx); 2533 return PTR_ERR(page); 2534 } 2535 /* 2536 * If the last page is the same as the first page, need to 2537 * limit the write to the end offset. 2538 */ 2539 size = PAGE_SIZE; 2540 if (idx == end) 2541 size = end_ofs; 2542 kaddr = kmap_atomic(page); 2543 memset(kaddr + start_ofs, val, size - start_ofs); 2544 flush_dcache_page(page); 2545 kunmap_atomic(kaddr); 2546 set_page_dirty(page); 2547 put_page(page); 2548 balance_dirty_pages_ratelimited(mapping); 2549 cond_resched(); 2550 if (idx == end) 2551 goto done; 2552 idx++; 2553 } 2554 /* Do the whole pages the fast way. */ 2555 for (; idx < end; idx++) { 2556 /* Find or create the current page. (The page is locked.) */ 2557 page = grab_cache_page(mapping, idx); 2558 if (unlikely(!page)) { 2559 ntfs_error(vol->sb, "Insufficient memory to grab " 2560 "page (index 0x%lx).", idx); 2561 return -ENOMEM; 2562 } 2563 kaddr = kmap_atomic(page); 2564 memset(kaddr, val, PAGE_SIZE); 2565 flush_dcache_page(page); 2566 kunmap_atomic(kaddr); 2567 /* 2568 * If the page has buffers, mark them uptodate since buffer 2569 * state and not page state is definitive in 2.6 kernels. 2570 */ 2571 if (page_has_buffers(page)) { 2572 struct buffer_head *bh, *head; 2573 2574 bh = head = page_buffers(page); 2575 do { 2576 set_buffer_uptodate(bh); 2577 } while ((bh = bh->b_this_page) != head); 2578 } 2579 /* Now that buffers are uptodate, set the page uptodate, too. */ 2580 SetPageUptodate(page); 2581 /* 2582 * Set the page and all its buffers dirty and mark the inode 2583 * dirty, too. The VM will write the page later on. 2584 */ 2585 set_page_dirty(page); 2586 /* Finally unlock and release the page. */ 2587 unlock_page(page); 2588 put_page(page); 2589 balance_dirty_pages_ratelimited(mapping); 2590 cond_resched(); 2591 } 2592 /* If there is a last partial page, need to do it the slow way. */ 2593 if (end_ofs) { 2594 page = read_mapping_page(mapping, idx, NULL); 2595 if (IS_ERR(page)) { 2596 ntfs_error(vol->sb, "Failed to read last partial page " 2597 "(error, index 0x%lx).", idx); 2598 return PTR_ERR(page); 2599 } 2600 kaddr = kmap_atomic(page); 2601 memset(kaddr, val, end_ofs); 2602 flush_dcache_page(page); 2603 kunmap_atomic(kaddr); 2604 set_page_dirty(page); 2605 put_page(page); 2606 balance_dirty_pages_ratelimited(mapping); 2607 cond_resched(); 2608 } 2609 done: 2610 ntfs_debug("Done."); 2611 return 0; 2612 } 2613 2614 #endif /* NTFS_RW */ 2615