xref: /openbmc/linux/fs/ntfs/compress.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /**
2  * compress.c - NTFS kernel compressed attributes handling.
3  *		Part of the Linux-NTFS project.
4  *
5  * Copyright (c) 2001-2004 Anton Altaparmakov
6  * Copyright (c) 2002 Richard Russon
7  *
8  * This program/include file is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as published
10  * by the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program/include file is distributed in the hope that it will be
14  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program (in the main directory of the Linux-NTFS
20  * distribution in the file COPYING); if not, write to the Free Software
21  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  */
23 
24 #include <linux/fs.h>
25 #include <linux/buffer_head.h>
26 #include <linux/blkdev.h>
27 #include <linux/vmalloc.h>
28 
29 #include "attrib.h"
30 #include "inode.h"
31 #include "debug.h"
32 #include "ntfs.h"
33 
34 /**
35  * ntfs_compression_constants - enum of constants used in the compression code
36  */
37 typedef enum {
38 	/* Token types and access mask. */
39 	NTFS_SYMBOL_TOKEN	=	0,
40 	NTFS_PHRASE_TOKEN	=	1,
41 	NTFS_TOKEN_MASK		=	1,
42 
43 	/* Compression sub-block constants. */
44 	NTFS_SB_SIZE_MASK	=	0x0fff,
45 	NTFS_SB_SIZE		=	0x1000,
46 	NTFS_SB_IS_COMPRESSED	=	0x8000,
47 
48 	/*
49 	 * The maximum compression block size is by definition 16 * the cluster
50 	 * size, with the maximum supported cluster size being 4kiB. Thus the
51 	 * maximum compression buffer size is 64kiB, so we use this when
52 	 * initializing the compression buffer.
53 	 */
54 	NTFS_MAX_CB_SIZE	= 64 * 1024,
55 } ntfs_compression_constants;
56 
57 /**
58  * ntfs_compression_buffer - one buffer for the decompression engine
59  */
60 static u8 *ntfs_compression_buffer = NULL;
61 
62 /**
63  * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer
64  */
65 static DEFINE_SPINLOCK(ntfs_cb_lock);
66 
67 /**
68  * allocate_compression_buffers - allocate the decompression buffers
69  *
70  * Caller has to hold the ntfs_lock semaphore.
71  *
72  * Return 0 on success or -ENOMEM if the allocations failed.
73  */
74 int allocate_compression_buffers(void)
75 {
76 	BUG_ON(ntfs_compression_buffer);
77 
78 	ntfs_compression_buffer = vmalloc(NTFS_MAX_CB_SIZE);
79 	if (!ntfs_compression_buffer)
80 		return -ENOMEM;
81 	return 0;
82 }
83 
84 /**
85  * free_compression_buffers - free the decompression buffers
86  *
87  * Caller has to hold the ntfs_lock semaphore.
88  */
89 void free_compression_buffers(void)
90 {
91 	BUG_ON(!ntfs_compression_buffer);
92 	vfree(ntfs_compression_buffer);
93 	ntfs_compression_buffer = NULL;
94 }
95 
96 /**
97  * zero_partial_compressed_page - zero out of bounds compressed page region
98  */
99 static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page)
100 {
101 	u8 *kp = page_address(page);
102 	unsigned int kp_ofs;
103 
104 	ntfs_debug("Zeroing page region outside initialized size.");
105 	if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) {
106 		/*
107 		 * FIXME: Using clear_page() will become wrong when we get
108 		 * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem.
109 		 */
110 		clear_page(kp);
111 		return;
112 	}
113 	kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK;
114 	memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs);
115 	return;
116 }
117 
118 /**
119  * handle_bounds_compressed_page - test for&handle out of bounds compressed page
120  */
121 static inline void handle_bounds_compressed_page(ntfs_inode *ni,
122 		struct page *page)
123 {
124 	if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) &&
125 			(ni->initialized_size < VFS_I(ni)->i_size))
126 		zero_partial_compressed_page(ni, page);
127 	return;
128 }
129 
130 /**
131  * ntfs_decompress - decompress a compression block into an array of pages
132  * @dest_pages:		destination array of pages
133  * @dest_index:		current index into @dest_pages (IN/OUT)
134  * @dest_ofs:		current offset within @dest_pages[@dest_index] (IN/OUT)
135  * @dest_max_index:	maximum index into @dest_pages (IN)
136  * @dest_max_ofs:	maximum offset within @dest_pages[@dest_max_index] (IN)
137  * @xpage:		the target page (-1 if none) (IN)
138  * @xpage_done:		set to 1 if xpage was completed successfully (IN/OUT)
139  * @cb_start:		compression block to decompress (IN)
140  * @cb_size:		size of compression block @cb_start in bytes (IN)
141  *
142  * The caller must have disabled preemption. ntfs_decompress() reenables it when
143  * the critical section is finished.
144  *
145  * This decompresses the compression block @cb_start into the array of
146  * destination pages @dest_pages starting at index @dest_index into @dest_pages
147  * and at offset @dest_pos into the page @dest_pages[@dest_index].
148  *
149  * When the page @dest_pages[@xpage] is completed, @xpage_done is set to 1.
150  * If xpage is -1 or @xpage has not been completed, @xpage_done is not modified.
151  *
152  * @cb_start is a pointer to the compression block which needs decompressing
153  * and @cb_size is the size of @cb_start in bytes (8-64kiB).
154  *
155  * Return 0 if success or -EOVERFLOW on error in the compressed stream.
156  * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was
157  * completed during the decompression of the compression block (@cb_start).
158  *
159  * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up
160  * unpredicatbly! You have been warned!
161  *
162  * Note to hackers: This function may not sleep until it has finished accessing
163  * the compression block @cb_start as it is a per-CPU buffer.
164  */
165 static int ntfs_decompress(struct page *dest_pages[], int *dest_index,
166 		int *dest_ofs, const int dest_max_index, const int dest_max_ofs,
167 		const int xpage, char *xpage_done, u8 *const cb_start,
168 		const u32 cb_size)
169 {
170 	/*
171 	 * Pointers into the compressed data, i.e. the compression block (cb),
172 	 * and the therein contained sub-blocks (sb).
173 	 */
174 	u8 *cb_end = cb_start + cb_size; /* End of cb. */
175 	u8 *cb = cb_start;	/* Current position in cb. */
176 	u8 *cb_sb_start = cb;	/* Beginning of the current sb in the cb. */
177 	u8 *cb_sb_end;		/* End of current sb / beginning of next sb. */
178 
179 	/* Variables for uncompressed data / destination. */
180 	struct page *dp;	/* Current destination page being worked on. */
181 	u8 *dp_addr;		/* Current pointer into dp. */
182 	u8 *dp_sb_start;	/* Start of current sub-block in dp. */
183 	u8 *dp_sb_end;		/* End of current sb in dp (dp_sb_start +
184 				   NTFS_SB_SIZE). */
185 	u16 do_sb_start;	/* @dest_ofs when starting this sub-block. */
186 	u16 do_sb_end;		/* @dest_ofs of end of this sb (do_sb_start +
187 				   NTFS_SB_SIZE). */
188 
189 	/* Variables for tag and token parsing. */
190 	u8 tag;			/* Current tag. */
191 	int token;		/* Loop counter for the eight tokens in tag. */
192 
193 	/* Need this because we can't sleep, so need two stages. */
194 	int completed_pages[dest_max_index - *dest_index + 1];
195 	int nr_completed_pages = 0;
196 
197 	/* Default error code. */
198 	int err = -EOVERFLOW;
199 
200 	ntfs_debug("Entering, cb_size = 0x%x.", cb_size);
201 do_next_sb:
202 	ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.",
203 			cb - cb_start);
204 	/*
205 	 * Have we reached the end of the compression block or the end of the
206 	 * decompressed data?  The latter can happen for example if the current
207 	 * position in the compression block is one byte before its end so the
208 	 * first two checks do not detect it.
209 	 */
210 	if (cb == cb_end || !le16_to_cpup((le16*)cb) ||
211 			(*dest_index == dest_max_index &&
212 			*dest_ofs == dest_max_ofs)) {
213 		int i;
214 
215 		ntfs_debug("Completed. Returning success (0).");
216 		err = 0;
217 return_error:
218 		/* We can sleep from now on, so we drop lock. */
219 		spin_unlock(&ntfs_cb_lock);
220 		/* Second stage: finalize completed pages. */
221 		if (nr_completed_pages > 0) {
222 			struct page *page = dest_pages[completed_pages[0]];
223 			ntfs_inode *ni = NTFS_I(page->mapping->host);
224 
225 			for (i = 0; i < nr_completed_pages; i++) {
226 				int di = completed_pages[i];
227 
228 				dp = dest_pages[di];
229 				/*
230 				 * If we are outside the initialized size, zero
231 				 * the out of bounds page range.
232 				 */
233 				handle_bounds_compressed_page(ni, dp);
234 				flush_dcache_page(dp);
235 				kunmap(dp);
236 				SetPageUptodate(dp);
237 				unlock_page(dp);
238 				if (di == xpage)
239 					*xpage_done = 1;
240 				else
241 					page_cache_release(dp);
242 				dest_pages[di] = NULL;
243 			}
244 		}
245 		return err;
246 	}
247 
248 	/* Setup offsets for the current sub-block destination. */
249 	do_sb_start = *dest_ofs;
250 	do_sb_end = do_sb_start + NTFS_SB_SIZE;
251 
252 	/* Check that we are still within allowed boundaries. */
253 	if (*dest_index == dest_max_index && do_sb_end > dest_max_ofs)
254 		goto return_overflow;
255 
256 	/* Does the minimum size of a compressed sb overflow valid range? */
257 	if (cb + 6 > cb_end)
258 		goto return_overflow;
259 
260 	/* Setup the current sub-block source pointers and validate range. */
261 	cb_sb_start = cb;
262 	cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK)
263 			+ 3;
264 	if (cb_sb_end > cb_end)
265 		goto return_overflow;
266 
267 	/* Get the current destination page. */
268 	dp = dest_pages[*dest_index];
269 	if (!dp) {
270 		/* No page present. Skip decompression of this sub-block. */
271 		cb = cb_sb_end;
272 
273 		/* Advance destination position to next sub-block. */
274 		*dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK;
275 		if (!*dest_ofs && (++*dest_index > dest_max_index))
276 			goto return_overflow;
277 		goto do_next_sb;
278 	}
279 
280 	/* We have a valid destination page. Setup the destination pointers. */
281 	dp_addr = (u8*)page_address(dp) + do_sb_start;
282 
283 	/* Now, we are ready to process the current sub-block (sb). */
284 	if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) {
285 		ntfs_debug("Found uncompressed sub-block.");
286 		/* This sb is not compressed, just copy it into destination. */
287 
288 		/* Advance source position to first data byte. */
289 		cb += 2;
290 
291 		/* An uncompressed sb must be full size. */
292 		if (cb_sb_end - cb != NTFS_SB_SIZE)
293 			goto return_overflow;
294 
295 		/* Copy the block and advance the source position. */
296 		memcpy(dp_addr, cb, NTFS_SB_SIZE);
297 		cb += NTFS_SB_SIZE;
298 
299 		/* Advance destination position to next sub-block. */
300 		*dest_ofs += NTFS_SB_SIZE;
301 		if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) {
302 finalize_page:
303 			/*
304 			 * First stage: add current page index to array of
305 			 * completed pages.
306 			 */
307 			completed_pages[nr_completed_pages++] = *dest_index;
308 			if (++*dest_index > dest_max_index)
309 				goto return_overflow;
310 		}
311 		goto do_next_sb;
312 	}
313 	ntfs_debug("Found compressed sub-block.");
314 	/* This sb is compressed, decompress it into destination. */
315 
316 	/* Setup destination pointers. */
317 	dp_sb_start = dp_addr;
318 	dp_sb_end = dp_sb_start + NTFS_SB_SIZE;
319 
320 	/* Forward to the first tag in the sub-block. */
321 	cb += 2;
322 do_next_tag:
323 	if (cb == cb_sb_end) {
324 		/* Check if the decompressed sub-block was not full-length. */
325 		if (dp_addr < dp_sb_end) {
326 			int nr_bytes = do_sb_end - *dest_ofs;
327 
328 			ntfs_debug("Filling incomplete sub-block with "
329 					"zeroes.");
330 			/* Zero remainder and update destination position. */
331 			memset(dp_addr, 0, nr_bytes);
332 			*dest_ofs += nr_bytes;
333 		}
334 		/* We have finished the current sub-block. */
335 		if (!(*dest_ofs &= ~PAGE_CACHE_MASK))
336 			goto finalize_page;
337 		goto do_next_sb;
338 	}
339 
340 	/* Check we are still in range. */
341 	if (cb > cb_sb_end || dp_addr > dp_sb_end)
342 		goto return_overflow;
343 
344 	/* Get the next tag and advance to first token. */
345 	tag = *cb++;
346 
347 	/* Parse the eight tokens described by the tag. */
348 	for (token = 0; token < 8; token++, tag >>= 1) {
349 		u16 lg, pt, length, max_non_overlap;
350 		register u16 i;
351 		u8 *dp_back_addr;
352 
353 		/* Check if we are done / still in range. */
354 		if (cb >= cb_sb_end || dp_addr > dp_sb_end)
355 			break;
356 
357 		/* Determine token type and parse appropriately.*/
358 		if ((tag & NTFS_TOKEN_MASK) == NTFS_SYMBOL_TOKEN) {
359 			/*
360 			 * We have a symbol token, copy the symbol across, and
361 			 * advance the source and destination positions.
362 			 */
363 			*dp_addr++ = *cb++;
364 			++*dest_ofs;
365 
366 			/* Continue with the next token. */
367 			continue;
368 		}
369 
370 		/*
371 		 * We have a phrase token. Make sure it is not the first tag in
372 		 * the sb as this is illegal and would confuse the code below.
373 		 */
374 		if (dp_addr == dp_sb_start)
375 			goto return_overflow;
376 
377 		/*
378 		 * Determine the number of bytes to go back (p) and the number
379 		 * of bytes to copy (l). We use an optimized algorithm in which
380 		 * we first calculate log2(current destination position in sb),
381 		 * which allows determination of l and p in O(1) rather than
382 		 * O(n). We just need an arch-optimized log2() function now.
383 		 */
384 		lg = 0;
385 		for (i = *dest_ofs - do_sb_start - 1; i >= 0x10; i >>= 1)
386 			lg++;
387 
388 		/* Get the phrase token into i. */
389 		pt = le16_to_cpup((le16*)cb);
390 
391 		/*
392 		 * Calculate starting position of the byte sequence in
393 		 * the destination using the fact that p = (pt >> (12 - lg)) + 1
394 		 * and make sure we don't go too far back.
395 		 */
396 		dp_back_addr = dp_addr - (pt >> (12 - lg)) - 1;
397 		if (dp_back_addr < dp_sb_start)
398 			goto return_overflow;
399 
400 		/* Now calculate the length of the byte sequence. */
401 		length = (pt & (0xfff >> lg)) + 3;
402 
403 		/* Advance destination position and verify it is in range. */
404 		*dest_ofs += length;
405 		if (*dest_ofs > do_sb_end)
406 			goto return_overflow;
407 
408 		/* The number of non-overlapping bytes. */
409 		max_non_overlap = dp_addr - dp_back_addr;
410 
411 		if (length <= max_non_overlap) {
412 			/* The byte sequence doesn't overlap, just copy it. */
413 			memcpy(dp_addr, dp_back_addr, length);
414 
415 			/* Advance destination pointer. */
416 			dp_addr += length;
417 		} else {
418 			/*
419 			 * The byte sequence does overlap, copy non-overlapping
420 			 * part and then do a slow byte by byte copy for the
421 			 * overlapping part. Also, advance the destination
422 			 * pointer.
423 			 */
424 			memcpy(dp_addr, dp_back_addr, max_non_overlap);
425 			dp_addr += max_non_overlap;
426 			dp_back_addr += max_non_overlap;
427 			length -= max_non_overlap;
428 			while (length--)
429 				*dp_addr++ = *dp_back_addr++;
430 		}
431 
432 		/* Advance source position and continue with the next token. */
433 		cb += 2;
434 	}
435 
436 	/* No tokens left in the current tag. Continue with the next tag. */
437 	goto do_next_tag;
438 
439 return_overflow:
440 	ntfs_error(NULL, "Failed. Returning -EOVERFLOW.");
441 	goto return_error;
442 }
443 
444 /**
445  * ntfs_read_compressed_block - read a compressed block into the page cache
446  * @page:	locked page in the compression block(s) we need to read
447  *
448  * When we are called the page has already been verified to be locked and the
449  * attribute is known to be non-resident, not encrypted, but compressed.
450  *
451  * 1. Determine which compression block(s) @page is in.
452  * 2. Get hold of all pages corresponding to this/these compression block(s).
453  * 3. Read the (first) compression block.
454  * 4. Decompress it into the corresponding pages.
455  * 5. Throw the compressed data away and proceed to 3. for the next compression
456  *    block or return success if no more compression blocks left.
457  *
458  * Warning: We have to be careful what we do about existing pages. They might
459  * have been written to so that we would lose data if we were to just overwrite
460  * them with the out-of-date uncompressed data.
461  *
462  * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at
463  * the end of the file I think. We need to detect this case and zero the out
464  * of bounds remainder of the page in question and mark it as handled. At the
465  * moment we would just return -EIO on such a page. This bug will only become
466  * apparent if pages are above 8kiB and the NTFS volume only uses 512 byte
467  * clusters so is probably not going to be seen by anyone. Still this should
468  * be fixed. (AIA)
469  *
470  * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in
471  * handling sparse and compressed cbs. (AIA)
472  *
473  * FIXME: At the moment we don't do any zeroing out in the case that
474  * initialized_size is less than data_size. This should be safe because of the
475  * nature of the compression algorithm used. Just in case we check and output
476  * an error message in read inode if the two sizes are not equal for a
477  * compressed file. (AIA)
478  */
479 int ntfs_read_compressed_block(struct page *page)
480 {
481 	struct address_space *mapping = page->mapping;
482 	ntfs_inode *ni = NTFS_I(mapping->host);
483 	ntfs_volume *vol = ni->vol;
484 	struct super_block *sb = vol->sb;
485 	runlist_element *rl;
486 	unsigned long block_size = sb->s_blocksize;
487 	unsigned char block_size_bits = sb->s_blocksize_bits;
488 	u8 *cb, *cb_pos, *cb_end;
489 	struct buffer_head **bhs;
490 	unsigned long offset, index = page->index;
491 	u32 cb_size = ni->itype.compressed.block_size;
492 	u64 cb_size_mask = cb_size - 1UL;
493 	VCN vcn;
494 	LCN lcn;
495 	/* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */
496 	VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >>
497 			vol->cluster_size_bits;
498 	/*
499 	 * The first vcn after the last wanted vcn (minumum alignment is again
500 	 * PAGE_CACHE_SIZE.
501 	 */
502 	VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1)
503 			& ~cb_size_mask) >> vol->cluster_size_bits;
504 	/* Number of compression blocks (cbs) in the wanted vcn range. */
505 	unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits
506 			>> ni->itype.compressed.block_size_bits;
507 	/*
508 	 * Number of pages required to store the uncompressed data from all
509 	 * compression blocks (cbs) overlapping @page. Due to alignment
510 	 * guarantees of start_vcn and end_vcn, no need to round up here.
511 	 */
512 	unsigned int nr_pages = (end_vcn - start_vcn) <<
513 			vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
514 	unsigned int xpage, max_page, cur_page, cur_ofs, i;
515 	unsigned int cb_clusters, cb_max_ofs;
516 	int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0;
517 	struct page **pages;
518 	unsigned char xpage_done = 0;
519 
520 	ntfs_debug("Entering, page->index = 0x%lx, cb_size = 0x%x, nr_pages = "
521 			"%i.", index, cb_size, nr_pages);
522 	/*
523 	 * Bad things happen if we get here for anything that is not an
524 	 * unnamed $DATA attribute.
525 	 */
526 	BUG_ON(ni->type != AT_DATA);
527 	BUG_ON(ni->name_len);
528 
529 	pages = kmalloc(nr_pages * sizeof(struct page *), GFP_NOFS);
530 
531 	/* Allocate memory to store the buffer heads we need. */
532 	bhs_size = cb_size / block_size * sizeof(struct buffer_head *);
533 	bhs = kmalloc(bhs_size, GFP_NOFS);
534 
535 	if (unlikely(!pages || !bhs)) {
536 		kfree(bhs);
537 		kfree(pages);
538 		SetPageError(page);
539 		unlock_page(page);
540 		ntfs_error(vol->sb, "Failed to allocate internal buffers.");
541 		return -ENOMEM;
542 	}
543 
544 	/*
545 	 * We have already been given one page, this is the one we must do.
546 	 * Once again, the alignment guarantees keep it simple.
547 	 */
548 	offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT;
549 	xpage = index - offset;
550 	pages[xpage] = page;
551 	/*
552 	 * The remaining pages need to be allocated and inserted into the page
553 	 * cache, alignment guarantees keep all the below much simpler. (-8
554 	 */
555 	max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >>
556 			PAGE_CACHE_SHIFT) - offset;
557 	if (nr_pages < max_page)
558 		max_page = nr_pages;
559 	for (i = 0; i < max_page; i++, offset++) {
560 		if (i != xpage)
561 			pages[i] = grab_cache_page_nowait(mapping, offset);
562 		page = pages[i];
563 		if (page) {
564 			/*
565 			 * We only (re)read the page if it isn't already read
566 			 * in and/or dirty or we would be losing data or at
567 			 * least wasting our time.
568 			 */
569 			if (!PageDirty(page) && (!PageUptodate(page) ||
570 					PageError(page))) {
571 				ClearPageError(page);
572 				kmap(page);
573 				continue;
574 			}
575 			unlock_page(page);
576 			page_cache_release(page);
577 			pages[i] = NULL;
578 		}
579 	}
580 
581 	/*
582 	 * We have the runlist, and all the destination pages we need to fill.
583 	 * Now read the first compression block.
584 	 */
585 	cur_page = 0;
586 	cur_ofs = 0;
587 	cb_clusters = ni->itype.compressed.block_clusters;
588 do_next_cb:
589 	nr_cbs--;
590 	nr_bhs = 0;
591 
592 	/* Read all cb buffer heads one cluster at a time. */
593 	rl = NULL;
594 	for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn;
595 			vcn++) {
596 		BOOL is_retry = FALSE;
597 
598 		if (!rl) {
599 lock_retry_remap:
600 			down_read(&ni->runlist.lock);
601 			rl = ni->runlist.rl;
602 		}
603 		if (likely(rl != NULL)) {
604 			/* Seek to element containing target vcn. */
605 			while (rl->length && rl[1].vcn <= vcn)
606 				rl++;
607 			lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
608 		} else
609 			lcn = LCN_RL_NOT_MAPPED;
610 		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
611 				(unsigned long long)vcn,
612 				(unsigned long long)lcn);
613 		if (lcn < 0) {
614 			/*
615 			 * When we reach the first sparse cluster we have
616 			 * finished with the cb.
617 			 */
618 			if (lcn == LCN_HOLE)
619 				break;
620 			if (is_retry || lcn != LCN_RL_NOT_MAPPED)
621 				goto rl_err;
622 			is_retry = TRUE;
623 			/*
624 			 * Attempt to map runlist, dropping lock for the
625 			 * duration.
626 			 */
627 			up_read(&ni->runlist.lock);
628 			if (!ntfs_map_runlist(ni, vcn))
629 				goto lock_retry_remap;
630 			goto map_rl_err;
631 		}
632 		block = lcn << vol->cluster_size_bits >> block_size_bits;
633 		/* Read the lcn from device in chunks of block_size bytes. */
634 		max_block = block + (vol->cluster_size >> block_size_bits);
635 		do {
636 			ntfs_debug("block = 0x%x.", block);
637 			if (unlikely(!(bhs[nr_bhs] = sb_getblk(sb, block))))
638 				goto getblk_err;
639 			nr_bhs++;
640 		} while (++block < max_block);
641 	}
642 
643 	/* Release the lock if we took it. */
644 	if (rl)
645 		up_read(&ni->runlist.lock);
646 
647 	/* Setup and initiate io on all buffer heads. */
648 	for (i = 0; i < nr_bhs; i++) {
649 		struct buffer_head *tbh = bhs[i];
650 
651 		if (unlikely(test_set_buffer_locked(tbh)))
652 			continue;
653 		if (unlikely(buffer_uptodate(tbh))) {
654 			unlock_buffer(tbh);
655 			continue;
656 		}
657 		get_bh(tbh);
658 		tbh->b_end_io = end_buffer_read_sync;
659 		submit_bh(READ, tbh);
660 	}
661 
662 	/* Wait for io completion on all buffer heads. */
663 	for (i = 0; i < nr_bhs; i++) {
664 		struct buffer_head *tbh = bhs[i];
665 
666 		if (buffer_uptodate(tbh))
667 			continue;
668 		wait_on_buffer(tbh);
669 		/*
670 		 * We need an optimization barrier here, otherwise we start
671 		 * hitting the below fixup code when accessing a loopback
672 		 * mounted ntfs partition. This indicates either there is a
673 		 * race condition in the loop driver or, more likely, gcc
674 		 * overoptimises the code without the barrier and it doesn't
675 		 * do the Right Thing(TM).
676 		 */
677 		barrier();
678 		if (unlikely(!buffer_uptodate(tbh))) {
679 			ntfs_warning(vol->sb, "Buffer is unlocked but not "
680 					"uptodate! Unplugging the disk queue "
681 					"and rescheduling.");
682 			get_bh(tbh);
683 			blk_run_address_space(mapping);
684 			schedule();
685 			put_bh(tbh);
686 			if (unlikely(!buffer_uptodate(tbh)))
687 				goto read_err;
688 			ntfs_warning(vol->sb, "Buffer is now uptodate. Good.");
689 		}
690 	}
691 
692 	/*
693 	 * Get the compression buffer. We must not sleep any more
694 	 * until we are finished with it.
695 	 */
696 	spin_lock(&ntfs_cb_lock);
697 	cb = ntfs_compression_buffer;
698 
699 	BUG_ON(!cb);
700 
701 	cb_pos = cb;
702 	cb_end = cb + cb_size;
703 
704 	/* Copy the buffer heads into the contiguous buffer. */
705 	for (i = 0; i < nr_bhs; i++) {
706 		memcpy(cb_pos, bhs[i]->b_data, block_size);
707 		cb_pos += block_size;
708 	}
709 
710 	/* Just a precaution. */
711 	if (cb_pos + 2 <= cb + cb_size)
712 		*(u16*)cb_pos = 0;
713 
714 	/* Reset cb_pos back to the beginning. */
715 	cb_pos = cb;
716 
717 	/* We now have both source (if present) and destination. */
718 	ntfs_debug("Successfully read the compression block.");
719 
720 	/* The last page and maximum offset within it for the current cb. */
721 	cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size;
722 	cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK;
723 	cb_max_page >>= PAGE_CACHE_SHIFT;
724 
725 	/* Catch end of file inside a compression block. */
726 	if (cb_max_page > max_page)
727 		cb_max_page = max_page;
728 
729 	if (vcn == start_vcn - cb_clusters) {
730 		/* Sparse cb, zero out page range overlapping the cb. */
731 		ntfs_debug("Found sparse compression block.");
732 		/* We can sleep from now on, so we drop lock. */
733 		spin_unlock(&ntfs_cb_lock);
734 		if (cb_max_ofs)
735 			cb_max_page--;
736 		for (; cur_page < cb_max_page; cur_page++) {
737 			page = pages[cur_page];
738 			if (page) {
739 				/*
740 				 * FIXME: Using clear_page() will become wrong
741 				 * when we get PAGE_CACHE_SIZE != PAGE_SIZE but
742 				 * for now there is no problem.
743 				 */
744 				if (likely(!cur_ofs))
745 					clear_page(page_address(page));
746 				else
747 					memset(page_address(page) + cur_ofs, 0,
748 							PAGE_CACHE_SIZE -
749 							cur_ofs);
750 				flush_dcache_page(page);
751 				kunmap(page);
752 				SetPageUptodate(page);
753 				unlock_page(page);
754 				if (cur_page == xpage)
755 					xpage_done = 1;
756 				else
757 					page_cache_release(page);
758 				pages[cur_page] = NULL;
759 			}
760 			cb_pos += PAGE_CACHE_SIZE - cur_ofs;
761 			cur_ofs = 0;
762 			if (cb_pos >= cb_end)
763 				break;
764 		}
765 		/* If we have a partial final page, deal with it now. */
766 		if (cb_max_ofs && cb_pos < cb_end) {
767 			page = pages[cur_page];
768 			if (page)
769 				memset(page_address(page) + cur_ofs, 0,
770 						cb_max_ofs - cur_ofs);
771 			/*
772 			 * No need to update cb_pos at this stage:
773 			 *	cb_pos += cb_max_ofs - cur_ofs;
774 			 */
775 			cur_ofs = cb_max_ofs;
776 		}
777 	} else if (vcn == start_vcn) {
778 		/* We can't sleep so we need two stages. */
779 		unsigned int cur2_page = cur_page;
780 		unsigned int cur_ofs2 = cur_ofs;
781 		u8 *cb_pos2 = cb_pos;
782 
783 		ntfs_debug("Found uncompressed compression block.");
784 		/* Uncompressed cb, copy it to the destination pages. */
785 		/*
786 		 * TODO: As a big optimization, we could detect this case
787 		 * before we read all the pages and use block_read_full_page()
788 		 * on all full pages instead (we still have to treat partial
789 		 * pages especially but at least we are getting rid of the
790 		 * synchronous io for the majority of pages.
791 		 * Or if we choose not to do the read-ahead/-behind stuff, we
792 		 * could just return block_read_full_page(pages[xpage]) as long
793 		 * as PAGE_CACHE_SIZE <= cb_size.
794 		 */
795 		if (cb_max_ofs)
796 			cb_max_page--;
797 		/* First stage: copy data into destination pages. */
798 		for (; cur_page < cb_max_page; cur_page++) {
799 			page = pages[cur_page];
800 			if (page)
801 				memcpy(page_address(page) + cur_ofs, cb_pos,
802 						PAGE_CACHE_SIZE - cur_ofs);
803 			cb_pos += PAGE_CACHE_SIZE - cur_ofs;
804 			cur_ofs = 0;
805 			if (cb_pos >= cb_end)
806 				break;
807 		}
808 		/* If we have a partial final page, deal with it now. */
809 		if (cb_max_ofs && cb_pos < cb_end) {
810 			page = pages[cur_page];
811 			if (page)
812 				memcpy(page_address(page) + cur_ofs, cb_pos,
813 						cb_max_ofs - cur_ofs);
814 			cb_pos += cb_max_ofs - cur_ofs;
815 			cur_ofs = cb_max_ofs;
816 		}
817 		/* We can sleep from now on, so drop lock. */
818 		spin_unlock(&ntfs_cb_lock);
819 		/* Second stage: finalize pages. */
820 		for (; cur2_page < cb_max_page; cur2_page++) {
821 			page = pages[cur2_page];
822 			if (page) {
823 				/*
824 				 * If we are outside the initialized size, zero
825 				 * the out of bounds page range.
826 				 */
827 				handle_bounds_compressed_page(ni, page);
828 				flush_dcache_page(page);
829 				kunmap(page);
830 				SetPageUptodate(page);
831 				unlock_page(page);
832 				if (cur2_page == xpage)
833 					xpage_done = 1;
834 				else
835 					page_cache_release(page);
836 				pages[cur2_page] = NULL;
837 			}
838 			cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2;
839 			cur_ofs2 = 0;
840 			if (cb_pos2 >= cb_end)
841 				break;
842 		}
843 	} else {
844 		/* Compressed cb, decompress it into the destination page(s). */
845 		unsigned int prev_cur_page = cur_page;
846 
847 		ntfs_debug("Found compressed compression block.");
848 		err = ntfs_decompress(pages, &cur_page, &cur_ofs,
849 				cb_max_page, cb_max_ofs, xpage, &xpage_done,
850 				cb_pos,	cb_size - (cb_pos - cb));
851 		/*
852 		 * We can sleep from now on, lock already dropped by
853 		 * ntfs_decompress().
854 		 */
855 		if (err) {
856 			ntfs_error(vol->sb, "ntfs_decompress() failed in inode "
857 					"0x%lx with error code %i. Skipping "
858 					"this compression block.",
859 					ni->mft_no, -err);
860 			/* Release the unfinished pages. */
861 			for (; prev_cur_page < cur_page; prev_cur_page++) {
862 				page = pages[prev_cur_page];
863 				if (page) {
864 					if (prev_cur_page == xpage &&
865 							!xpage_done)
866 						SetPageError(page);
867 					flush_dcache_page(page);
868 					kunmap(page);
869 					unlock_page(page);
870 					if (prev_cur_page != xpage)
871 						page_cache_release(page);
872 					pages[prev_cur_page] = NULL;
873 				}
874 			}
875 		}
876 	}
877 
878 	/* Release the buffer heads. */
879 	for (i = 0; i < nr_bhs; i++)
880 		brelse(bhs[i]);
881 
882 	/* Do we have more work to do? */
883 	if (nr_cbs)
884 		goto do_next_cb;
885 
886 	/* We no longer need the list of buffer heads. */
887 	kfree(bhs);
888 
889 	/* Clean up if we have any pages left. Should never happen. */
890 	for (cur_page = 0; cur_page < max_page; cur_page++) {
891 		page = pages[cur_page];
892 		if (page) {
893 			ntfs_error(vol->sb, "Still have pages left! "
894 					"Terminating them with extreme "
895 					"prejudice.  Inode 0x%lx, page index "
896 					"0x%lx.", ni->mft_no, page->index);
897 			if (cur_page == xpage && !xpage_done)
898 				SetPageError(page);
899 			flush_dcache_page(page);
900 			kunmap(page);
901 			unlock_page(page);
902 			if (cur_page != xpage)
903 				page_cache_release(page);
904 			pages[cur_page] = NULL;
905 		}
906 	}
907 
908 	/* We no longer need the list of pages. */
909 	kfree(pages);
910 
911 	/* If we have completed the requested page, we return success. */
912 	if (likely(xpage_done))
913 		return 0;
914 
915 	ntfs_debug("Failed. Returning error code %s.", err == -EOVERFLOW ?
916 			"EOVERFLOW" : (!err ? "EIO" : "unkown error"));
917 	return err < 0 ? err : -EIO;
918 
919 read_err:
920 	ntfs_error(vol->sb, "IO error while reading compressed data.");
921 	/* Release the buffer heads. */
922 	for (i = 0; i < nr_bhs; i++)
923 		brelse(bhs[i]);
924 	goto err_out;
925 
926 map_rl_err:
927 	ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read "
928 			"compression block.");
929 	goto err_out;
930 
931 rl_err:
932 	up_read(&ni->runlist.lock);
933 	ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read "
934 			"compression block.");
935 	goto err_out;
936 
937 getblk_err:
938 	up_read(&ni->runlist.lock);
939 	ntfs_error(vol->sb, "getblk() failed. Cannot read compression block.");
940 
941 err_out:
942 	kfree(bhs);
943 	for (i = cur_page; i < max_page; i++) {
944 		page = pages[i];
945 		if (page) {
946 			if (i == xpage && !xpage_done)
947 				SetPageError(page);
948 			flush_dcache_page(page);
949 			kunmap(page);
950 			unlock_page(page);
951 			if (i != xpage)
952 				page_cache_release(page);
953 		}
954 	}
955 	kfree(pages);
956 	return -EIO;
957 }
958