xref: /openbmc/linux/fs/squashfs/file.c (revision e11c4e08)
168252eb5SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21701aecbSPhillip Lougher /*
31701aecbSPhillip Lougher  * Squashfs - a compressed read only filesystem for Linux
41701aecbSPhillip Lougher  *
51701aecbSPhillip Lougher  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
6d7f2ff67SPhillip Lougher  * Phillip Lougher <phillip@squashfs.org.uk>
71701aecbSPhillip Lougher  *
81701aecbSPhillip Lougher  * file.c
91701aecbSPhillip Lougher  */
101701aecbSPhillip Lougher 
111701aecbSPhillip Lougher /*
121701aecbSPhillip Lougher  * This file contains code for handling regular files.  A regular file
131701aecbSPhillip Lougher  * consists of a sequence of contiguous compressed blocks, and/or a
141701aecbSPhillip Lougher  * compressed fragment block (tail-end packed block).   The compressed size
151701aecbSPhillip Lougher  * of each datablock is stored in a block list contained within the
161701aecbSPhillip Lougher  * file inode (itself stored in one or more compressed metadata blocks).
171701aecbSPhillip Lougher  *
181701aecbSPhillip Lougher  * To speed up access to datablocks when reading 'large' files (256 Mbytes or
191701aecbSPhillip Lougher  * larger), the code implements an index cache that caches the mapping from
201701aecbSPhillip Lougher  * block index to datablock location on disk.
211701aecbSPhillip Lougher  *
221701aecbSPhillip Lougher  * The index cache allows Squashfs to handle large files (up to 1.75 TiB) while
231701aecbSPhillip Lougher  * retaining a simple and space-efficient block list on disk.  The cache
241701aecbSPhillip Lougher  * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
251701aecbSPhillip Lougher  * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
261701aecbSPhillip Lougher  * The index cache is designed to be memory efficient, and by default uses
271701aecbSPhillip Lougher  * 16 KiB.
281701aecbSPhillip Lougher  */
291701aecbSPhillip Lougher 
301701aecbSPhillip Lougher #include <linux/fs.h>
311701aecbSPhillip Lougher #include <linux/vfs.h>
321701aecbSPhillip Lougher #include <linux/kernel.h>
331701aecbSPhillip Lougher #include <linux/slab.h>
341701aecbSPhillip Lougher #include <linux/string.h>
351701aecbSPhillip Lougher #include <linux/pagemap.h>
361701aecbSPhillip Lougher #include <linux/mutex.h>
371701aecbSPhillip Lougher 
381701aecbSPhillip Lougher #include "squashfs_fs.h"
391701aecbSPhillip Lougher #include "squashfs_fs_sb.h"
401701aecbSPhillip Lougher #include "squashfs_fs_i.h"
411701aecbSPhillip Lougher #include "squashfs.h"
428fc78b6fSHsin-Yi Wang #include "page_actor.h"
431701aecbSPhillip Lougher 
441701aecbSPhillip Lougher /*
451701aecbSPhillip Lougher  * Locate cache slot in range [offset, index] for specified inode.  If
461701aecbSPhillip Lougher  * there's more than one return the slot closest to index.
471701aecbSPhillip Lougher  */
locate_meta_index(struct inode * inode,int offset,int index)481701aecbSPhillip Lougher static struct meta_index *locate_meta_index(struct inode *inode, int offset,
491701aecbSPhillip Lougher 				int index)
501701aecbSPhillip Lougher {
511701aecbSPhillip Lougher 	struct meta_index *meta = NULL;
521701aecbSPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
531701aecbSPhillip Lougher 	int i;
541701aecbSPhillip Lougher 
551701aecbSPhillip Lougher 	mutex_lock(&msblk->meta_index_mutex);
561701aecbSPhillip Lougher 
571701aecbSPhillip Lougher 	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
581701aecbSPhillip Lougher 
591701aecbSPhillip Lougher 	if (msblk->meta_index == NULL)
601701aecbSPhillip Lougher 		goto not_allocated;
611701aecbSPhillip Lougher 
621701aecbSPhillip Lougher 	for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
631701aecbSPhillip Lougher 		if (msblk->meta_index[i].inode_number == inode->i_ino &&
641701aecbSPhillip Lougher 				msblk->meta_index[i].offset >= offset &&
651701aecbSPhillip Lougher 				msblk->meta_index[i].offset <= index &&
661701aecbSPhillip Lougher 				msblk->meta_index[i].locked == 0) {
671701aecbSPhillip Lougher 			TRACE("locate_meta_index: entry %d, offset %d\n", i,
681701aecbSPhillip Lougher 					msblk->meta_index[i].offset);
691701aecbSPhillip Lougher 			meta = &msblk->meta_index[i];
701701aecbSPhillip Lougher 			offset = meta->offset;
711701aecbSPhillip Lougher 		}
721701aecbSPhillip Lougher 	}
731701aecbSPhillip Lougher 
741701aecbSPhillip Lougher 	if (meta)
751701aecbSPhillip Lougher 		meta->locked = 1;
761701aecbSPhillip Lougher 
771701aecbSPhillip Lougher not_allocated:
781701aecbSPhillip Lougher 	mutex_unlock(&msblk->meta_index_mutex);
791701aecbSPhillip Lougher 
801701aecbSPhillip Lougher 	return meta;
811701aecbSPhillip Lougher }
821701aecbSPhillip Lougher 
831701aecbSPhillip Lougher 
841701aecbSPhillip Lougher /*
851701aecbSPhillip Lougher  * Find and initialise an empty cache slot for index offset.
861701aecbSPhillip Lougher  */
empty_meta_index(struct inode * inode,int offset,int skip)871701aecbSPhillip Lougher static struct meta_index *empty_meta_index(struct inode *inode, int offset,
881701aecbSPhillip Lougher 				int skip)
891701aecbSPhillip Lougher {
901701aecbSPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
911701aecbSPhillip Lougher 	struct meta_index *meta = NULL;
921701aecbSPhillip Lougher 	int i;
931701aecbSPhillip Lougher 
941701aecbSPhillip Lougher 	mutex_lock(&msblk->meta_index_mutex);
951701aecbSPhillip Lougher 
961701aecbSPhillip Lougher 	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
971701aecbSPhillip Lougher 
981701aecbSPhillip Lougher 	if (msblk->meta_index == NULL) {
991701aecbSPhillip Lougher 		/*
1001701aecbSPhillip Lougher 		 * First time cache index has been used, allocate and
1011701aecbSPhillip Lougher 		 * initialise.  The cache index could be allocated at
1021701aecbSPhillip Lougher 		 * mount time but doing it here means it is allocated only
1031701aecbSPhillip Lougher 		 * if a 'large' file is read.
1041701aecbSPhillip Lougher 		 */
1051701aecbSPhillip Lougher 		msblk->meta_index = kcalloc(SQUASHFS_META_SLOTS,
1061701aecbSPhillip Lougher 			sizeof(*(msblk->meta_index)), GFP_KERNEL);
1071701aecbSPhillip Lougher 		if (msblk->meta_index == NULL) {
1081701aecbSPhillip Lougher 			ERROR("Failed to allocate meta_index\n");
1091701aecbSPhillip Lougher 			goto failed;
1101701aecbSPhillip Lougher 		}
1111701aecbSPhillip Lougher 		for (i = 0; i < SQUASHFS_META_SLOTS; i++) {
1121701aecbSPhillip Lougher 			msblk->meta_index[i].inode_number = 0;
1131701aecbSPhillip Lougher 			msblk->meta_index[i].locked = 0;
1141701aecbSPhillip Lougher 		}
1151701aecbSPhillip Lougher 		msblk->next_meta_index = 0;
1161701aecbSPhillip Lougher 	}
1171701aecbSPhillip Lougher 
1181701aecbSPhillip Lougher 	for (i = SQUASHFS_META_SLOTS; i &&
1191701aecbSPhillip Lougher 			msblk->meta_index[msblk->next_meta_index].locked; i--)
1201701aecbSPhillip Lougher 		msblk->next_meta_index = (msblk->next_meta_index + 1) %
1211701aecbSPhillip Lougher 			SQUASHFS_META_SLOTS;
1221701aecbSPhillip Lougher 
1231701aecbSPhillip Lougher 	if (i == 0) {
1241701aecbSPhillip Lougher 		TRACE("empty_meta_index: failed!\n");
1251701aecbSPhillip Lougher 		goto failed;
1261701aecbSPhillip Lougher 	}
1271701aecbSPhillip Lougher 
1281701aecbSPhillip Lougher 	TRACE("empty_meta_index: returned meta entry %d, %p\n",
1291701aecbSPhillip Lougher 			msblk->next_meta_index,
1301701aecbSPhillip Lougher 			&msblk->meta_index[msblk->next_meta_index]);
1311701aecbSPhillip Lougher 
1321701aecbSPhillip Lougher 	meta = &msblk->meta_index[msblk->next_meta_index];
1331701aecbSPhillip Lougher 	msblk->next_meta_index = (msblk->next_meta_index + 1) %
1341701aecbSPhillip Lougher 			SQUASHFS_META_SLOTS;
1351701aecbSPhillip Lougher 
1361701aecbSPhillip Lougher 	meta->inode_number = inode->i_ino;
1371701aecbSPhillip Lougher 	meta->offset = offset;
1381701aecbSPhillip Lougher 	meta->skip = skip;
1391701aecbSPhillip Lougher 	meta->entries = 0;
1401701aecbSPhillip Lougher 	meta->locked = 1;
1411701aecbSPhillip Lougher 
1421701aecbSPhillip Lougher failed:
1431701aecbSPhillip Lougher 	mutex_unlock(&msblk->meta_index_mutex);
1441701aecbSPhillip Lougher 	return meta;
1451701aecbSPhillip Lougher }
1461701aecbSPhillip Lougher 
1471701aecbSPhillip Lougher 
release_meta_index(struct inode * inode,struct meta_index * meta)1481701aecbSPhillip Lougher static void release_meta_index(struct inode *inode, struct meta_index *meta)
1491701aecbSPhillip Lougher {
1501701aecbSPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
1511701aecbSPhillip Lougher 	mutex_lock(&msblk->meta_index_mutex);
1521701aecbSPhillip Lougher 	meta->locked = 0;
1531701aecbSPhillip Lougher 	mutex_unlock(&msblk->meta_index_mutex);
1541701aecbSPhillip Lougher }
1551701aecbSPhillip Lougher 
1561701aecbSPhillip Lougher 
1571701aecbSPhillip Lougher /*
1581701aecbSPhillip Lougher  * Read the next n blocks from the block list, starting from
1591701aecbSPhillip Lougher  * metadata block <start_block, offset>.
1601701aecbSPhillip Lougher  */
read_indexes(struct super_block * sb,int n,u64 * start_block,int * offset)1611701aecbSPhillip Lougher static long long read_indexes(struct super_block *sb, int n,
1621701aecbSPhillip Lougher 				u64 *start_block, int *offset)
1631701aecbSPhillip Lougher {
1641701aecbSPhillip Lougher 	int err, i;
1651701aecbSPhillip Lougher 	long long block = 0;
16609cbfeafSKirill A. Shutemov 	__le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL);
1671701aecbSPhillip Lougher 
1681701aecbSPhillip Lougher 	if (blist == NULL) {
1691701aecbSPhillip Lougher 		ERROR("read_indexes: Failed to allocate block_list\n");
1701701aecbSPhillip Lougher 		return -ENOMEM;
1711701aecbSPhillip Lougher 	}
1721701aecbSPhillip Lougher 
1731701aecbSPhillip Lougher 	while (n) {
17409cbfeafSKirill A. Shutemov 		int blocks = min_t(int, n, PAGE_SIZE >> 2);
1751701aecbSPhillip Lougher 
1761701aecbSPhillip Lougher 		err = squashfs_read_metadata(sb, blist, start_block,
1771701aecbSPhillip Lougher 				offset, blocks << 2);
1781701aecbSPhillip Lougher 		if (err < 0) {
1791701aecbSPhillip Lougher 			ERROR("read_indexes: reading block [%llx:%x]\n",
1801701aecbSPhillip Lougher 				*start_block, *offset);
1811701aecbSPhillip Lougher 			goto failure;
1821701aecbSPhillip Lougher 		}
1831701aecbSPhillip Lougher 
1841701aecbSPhillip Lougher 		for (i = 0; i < blocks; i++) {
18501cfb793SLinus Torvalds 			int size = squashfs_block_size(blist[i]);
18601cfb793SLinus Torvalds 			if (size < 0) {
18701cfb793SLinus Torvalds 				err = size;
18801cfb793SLinus Torvalds 				goto failure;
18901cfb793SLinus Torvalds 			}
1901701aecbSPhillip Lougher 			block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
1911701aecbSPhillip Lougher 		}
1921701aecbSPhillip Lougher 		n -= blocks;
1931701aecbSPhillip Lougher 	}
1941701aecbSPhillip Lougher 
1951701aecbSPhillip Lougher 	kfree(blist);
1961701aecbSPhillip Lougher 	return block;
1971701aecbSPhillip Lougher 
1981701aecbSPhillip Lougher failure:
1991701aecbSPhillip Lougher 	kfree(blist);
2001701aecbSPhillip Lougher 	return err;
2011701aecbSPhillip Lougher }
2021701aecbSPhillip Lougher 
2031701aecbSPhillip Lougher 
2041701aecbSPhillip Lougher /*
2051701aecbSPhillip Lougher  * Each cache index slot has SQUASHFS_META_ENTRIES, each of which
2061701aecbSPhillip Lougher  * can cache one index -> datablock/blocklist-block mapping.  We wish
2071701aecbSPhillip Lougher  * to distribute these over the length of the file, entry[0] maps index x,
2081701aecbSPhillip Lougher  * entry[1] maps index x + skip, entry[2] maps index x + 2 * skip, and so on.
2091701aecbSPhillip Lougher  * The larger the file, the greater the skip factor.  The skip factor is
2101701aecbSPhillip Lougher  * limited to the size of the metadata cache (SQUASHFS_CACHED_BLKS) to ensure
2111701aecbSPhillip Lougher  * the number of metadata blocks that need to be read fits into the cache.
2121701aecbSPhillip Lougher  * If the skip factor is limited in this way then the file will use multiple
2131701aecbSPhillip Lougher  * slots.
2141701aecbSPhillip Lougher  */
calculate_skip(u64 blocks)215d6e621deSPhillip Lougher static inline int calculate_skip(u64 blocks)
2161701aecbSPhillip Lougher {
217d6e621deSPhillip Lougher 	u64 skip = blocks / ((SQUASHFS_META_ENTRIES + 1)
2181701aecbSPhillip Lougher 		 * SQUASHFS_META_INDEXES);
219d6e621deSPhillip Lougher 	return min((u64) SQUASHFS_CACHED_BLKS - 1, skip + 1);
2201701aecbSPhillip Lougher }
2211701aecbSPhillip Lougher 
2221701aecbSPhillip Lougher 
2231701aecbSPhillip Lougher /*
2241701aecbSPhillip Lougher  * Search and grow the index cache for the specified inode, returning the
2251701aecbSPhillip Lougher  * on-disk locations of the datablock and block list metadata block
2261701aecbSPhillip Lougher  * <index_block, index_offset> for index (scaled to nearest cache index).
2271701aecbSPhillip Lougher  */
fill_meta_index(struct inode * inode,int index,u64 * index_block,int * index_offset,u64 * data_block)2281701aecbSPhillip Lougher static int fill_meta_index(struct inode *inode, int index,
2291701aecbSPhillip Lougher 		u64 *index_block, int *index_offset, u64 *data_block)
2301701aecbSPhillip Lougher {
2311701aecbSPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
2321701aecbSPhillip Lougher 	int skip = calculate_skip(i_size_read(inode) >> msblk->block_log);
2331701aecbSPhillip Lougher 	int offset = 0;
2341701aecbSPhillip Lougher 	struct meta_index *meta;
2351701aecbSPhillip Lougher 	struct meta_entry *meta_entry;
2361701aecbSPhillip Lougher 	u64 cur_index_block = squashfs_i(inode)->block_list_start;
2371701aecbSPhillip Lougher 	int cur_offset = squashfs_i(inode)->offset;
2381701aecbSPhillip Lougher 	u64 cur_data_block = squashfs_i(inode)->start;
2391701aecbSPhillip Lougher 	int err, i;
2401701aecbSPhillip Lougher 
2411701aecbSPhillip Lougher 	/*
2421701aecbSPhillip Lougher 	 * Scale index to cache index (cache slot entry)
2431701aecbSPhillip Lougher 	 */
2441701aecbSPhillip Lougher 	index /= SQUASHFS_META_INDEXES * skip;
2451701aecbSPhillip Lougher 
2461701aecbSPhillip Lougher 	while (offset < index) {
2471701aecbSPhillip Lougher 		meta = locate_meta_index(inode, offset + 1, index);
2481701aecbSPhillip Lougher 
2491701aecbSPhillip Lougher 		if (meta == NULL) {
2501701aecbSPhillip Lougher 			meta = empty_meta_index(inode, offset + 1, skip);
2511701aecbSPhillip Lougher 			if (meta == NULL)
2521701aecbSPhillip Lougher 				goto all_done;
2531701aecbSPhillip Lougher 		} else {
2541701aecbSPhillip Lougher 			offset = index < meta->offset + meta->entries ? index :
2551701aecbSPhillip Lougher 				meta->offset + meta->entries - 1;
2561701aecbSPhillip Lougher 			meta_entry = &meta->meta_entry[offset - meta->offset];
2571701aecbSPhillip Lougher 			cur_index_block = meta_entry->index_block +
2581701aecbSPhillip Lougher 				msblk->inode_table;
2591701aecbSPhillip Lougher 			cur_offset = meta_entry->offset;
2601701aecbSPhillip Lougher 			cur_data_block = meta_entry->data_block;
2611701aecbSPhillip Lougher 			TRACE("get_meta_index: offset %d, meta->offset %d, "
2621701aecbSPhillip Lougher 				"meta->entries %d\n", offset, meta->offset,
2631701aecbSPhillip Lougher 				meta->entries);
2641701aecbSPhillip Lougher 			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
2651701aecbSPhillip Lougher 				" data_block 0x%llx\n", cur_index_block,
2661701aecbSPhillip Lougher 				cur_offset, cur_data_block);
2671701aecbSPhillip Lougher 		}
2681701aecbSPhillip Lougher 
2691701aecbSPhillip Lougher 		/*
2701701aecbSPhillip Lougher 		 * If necessary grow cache slot by reading block list.  Cache
2711701aecbSPhillip Lougher 		 * slot is extended up to index or to the end of the slot, in
2721701aecbSPhillip Lougher 		 * which case further slots will be used.
2731701aecbSPhillip Lougher 		 */
2741701aecbSPhillip Lougher 		for (i = meta->offset + meta->entries; i <= index &&
2751701aecbSPhillip Lougher 				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
2761701aecbSPhillip Lougher 			int blocks = skip * SQUASHFS_META_INDEXES;
2771701aecbSPhillip Lougher 			long long res = read_indexes(inode->i_sb, blocks,
2781701aecbSPhillip Lougher 					&cur_index_block, &cur_offset);
2791701aecbSPhillip Lougher 
2801701aecbSPhillip Lougher 			if (res < 0) {
2811701aecbSPhillip Lougher 				if (meta->entries == 0)
2821701aecbSPhillip Lougher 					/*
2831701aecbSPhillip Lougher 					 * Don't leave an empty slot on read
2841701aecbSPhillip Lougher 					 * error allocated to this inode...
2851701aecbSPhillip Lougher 					 */
2861701aecbSPhillip Lougher 					meta->inode_number = 0;
2871701aecbSPhillip Lougher 				err = res;
2881701aecbSPhillip Lougher 				goto failed;
2891701aecbSPhillip Lougher 			}
2901701aecbSPhillip Lougher 
2911701aecbSPhillip Lougher 			cur_data_block += res;
2921701aecbSPhillip Lougher 			meta_entry = &meta->meta_entry[i - meta->offset];
2931701aecbSPhillip Lougher 			meta_entry->index_block = cur_index_block -
2941701aecbSPhillip Lougher 				msblk->inode_table;
2951701aecbSPhillip Lougher 			meta_entry->offset = cur_offset;
2961701aecbSPhillip Lougher 			meta_entry->data_block = cur_data_block;
2971701aecbSPhillip Lougher 			meta->entries++;
2981701aecbSPhillip Lougher 			offset++;
2991701aecbSPhillip Lougher 		}
3001701aecbSPhillip Lougher 
3011701aecbSPhillip Lougher 		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
3021701aecbSPhillip Lougher 				meta->offset, meta->entries);
3031701aecbSPhillip Lougher 
3041701aecbSPhillip Lougher 		release_meta_index(inode, meta);
3051701aecbSPhillip Lougher 	}
3061701aecbSPhillip Lougher 
3071701aecbSPhillip Lougher all_done:
3081701aecbSPhillip Lougher 	*index_block = cur_index_block;
3091701aecbSPhillip Lougher 	*index_offset = cur_offset;
3101701aecbSPhillip Lougher 	*data_block = cur_data_block;
3111701aecbSPhillip Lougher 
3121701aecbSPhillip Lougher 	/*
3131701aecbSPhillip Lougher 	 * Scale cache index (cache slot entry) to index
3141701aecbSPhillip Lougher 	 */
3151701aecbSPhillip Lougher 	return offset * SQUASHFS_META_INDEXES * skip;
3161701aecbSPhillip Lougher 
3171701aecbSPhillip Lougher failed:
3181701aecbSPhillip Lougher 	release_meta_index(inode, meta);
3191701aecbSPhillip Lougher 	return err;
3201701aecbSPhillip Lougher }
3211701aecbSPhillip Lougher 
3221701aecbSPhillip Lougher 
3231701aecbSPhillip Lougher /*
3241701aecbSPhillip Lougher  * Get the on-disk location and compressed size of the datablock
3251701aecbSPhillip Lougher  * specified by index.  Fill_meta_index() does most of the work.
3261701aecbSPhillip Lougher  */
read_blocklist(struct inode * inode,int index,u64 * block)3271701aecbSPhillip Lougher static int read_blocklist(struct inode *inode, int index, u64 *block)
3281701aecbSPhillip Lougher {
3291701aecbSPhillip Lougher 	u64 start;
3301701aecbSPhillip Lougher 	long long blks;
3311701aecbSPhillip Lougher 	int offset;
3321701aecbSPhillip Lougher 	__le32 size;
3331701aecbSPhillip Lougher 	int res = fill_meta_index(inode, index, &start, &offset, block);
3341701aecbSPhillip Lougher 
3351701aecbSPhillip Lougher 	TRACE("read_blocklist: res %d, index %d, start 0x%llx, offset"
3361701aecbSPhillip Lougher 		       " 0x%x, block 0x%llx\n", res, index, start, offset,
3371701aecbSPhillip Lougher 			*block);
3381701aecbSPhillip Lougher 
3391701aecbSPhillip Lougher 	if (res < 0)
3401701aecbSPhillip Lougher 		return res;
3411701aecbSPhillip Lougher 
3421701aecbSPhillip Lougher 	/*
3431701aecbSPhillip Lougher 	 * res contains the index of the mapping returned by fill_meta_index(),
3441701aecbSPhillip Lougher 	 * this will likely be less than the desired index (because the
3451701aecbSPhillip Lougher 	 * meta_index cache works at a higher granularity).  Read any
3461701aecbSPhillip Lougher 	 * extra block indexes needed.
3471701aecbSPhillip Lougher 	 */
3481701aecbSPhillip Lougher 	if (res < index) {
3491701aecbSPhillip Lougher 		blks = read_indexes(inode->i_sb, index - res, &start, &offset);
3501701aecbSPhillip Lougher 		if (blks < 0)
3511701aecbSPhillip Lougher 			return (int) blks;
3521701aecbSPhillip Lougher 		*block += blks;
3531701aecbSPhillip Lougher 	}
3541701aecbSPhillip Lougher 
3551701aecbSPhillip Lougher 	/*
3561701aecbSPhillip Lougher 	 * Read length of block specified by index.
3571701aecbSPhillip Lougher 	 */
3581701aecbSPhillip Lougher 	res = squashfs_read_metadata(inode->i_sb, &size, &start, &offset,
3591701aecbSPhillip Lougher 			sizeof(size));
3601701aecbSPhillip Lougher 	if (res < 0)
3611701aecbSPhillip Lougher 		return res;
36201cfb793SLinus Torvalds 	return squashfs_block_size(size);
3631701aecbSPhillip Lougher }
3641701aecbSPhillip Lougher 
squashfs_fill_page(struct page * page,struct squashfs_cache_entry * buffer,int offset,int avail)365cdbb65c4SLinus Torvalds void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail)
366cdbb65c4SLinus Torvalds {
367cdbb65c4SLinus Torvalds 	int copied;
368cdbb65c4SLinus Torvalds 	void *pageaddr;
369cdbb65c4SLinus Torvalds 
370cdbb65c4SLinus Torvalds 	pageaddr = kmap_atomic(page);
371cdbb65c4SLinus Torvalds 	copied = squashfs_copy_data(pageaddr, buffer, offset, avail);
372cdbb65c4SLinus Torvalds 	memset(pageaddr + copied, 0, PAGE_SIZE - copied);
373cdbb65c4SLinus Torvalds 	kunmap_atomic(pageaddr);
374cdbb65c4SLinus Torvalds 
375cdbb65c4SLinus Torvalds 	flush_dcache_page(page);
376cdbb65c4SLinus Torvalds 	if (copied == avail)
377cdbb65c4SLinus Torvalds 		SetPageUptodate(page);
378cdbb65c4SLinus Torvalds 	else
379cdbb65c4SLinus Torvalds 		SetPageError(page);
380cdbb65c4SLinus Torvalds }
381cdbb65c4SLinus Torvalds 
3825f55dbc0SPhillip Lougher /* Copy data into page cache  */
squashfs_copy_cache(struct page * page,struct squashfs_cache_entry * buffer,int bytes,int offset)3835f55dbc0SPhillip Lougher void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer,
3845f55dbc0SPhillip Lougher 	int bytes, int offset)
3851701aecbSPhillip Lougher {
3861701aecbSPhillip Lougher 	struct inode *inode = page->mapping->host;
3871701aecbSPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
38809cbfeafSKirill A. Shutemov 	int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
3895f55dbc0SPhillip Lougher 	int start_index = page->index & ~mask, end_index = start_index | mask;
3901701aecbSPhillip Lougher 
3911701aecbSPhillip Lougher 	/*
3921701aecbSPhillip Lougher 	 * Loop copying datablock into pages.  As the datablock likely covers
393ea1754a0SKirill A. Shutemov 	 * many PAGE_SIZE pages (default block size is 128 KiB) explicitly
3941701aecbSPhillip Lougher 	 * grab the pages from the page cache, except for the page that we've
3951701aecbSPhillip Lougher 	 * been called to fill.
3961701aecbSPhillip Lougher 	 */
3971701aecbSPhillip Lougher 	for (i = start_index; i <= end_index && bytes > 0; i++,
39809cbfeafSKirill A. Shutemov 			bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
3991701aecbSPhillip Lougher 		struct page *push_page;
40009cbfeafSKirill A. Shutemov 		int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0;
4011701aecbSPhillip Lougher 
4021701aecbSPhillip Lougher 		TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail);
4031701aecbSPhillip Lougher 
4041701aecbSPhillip Lougher 		push_page = (i == page->index) ? page :
4051701aecbSPhillip Lougher 			grab_cache_page_nowait(page->mapping, i);
4061701aecbSPhillip Lougher 
4071701aecbSPhillip Lougher 		if (!push_page)
4081701aecbSPhillip Lougher 			continue;
4091701aecbSPhillip Lougher 
4101701aecbSPhillip Lougher 		if (PageUptodate(push_page))
4111701aecbSPhillip Lougher 			goto skip_page;
4121701aecbSPhillip Lougher 
413cdbb65c4SLinus Torvalds 		squashfs_fill_page(push_page, buffer, offset, avail);
4141701aecbSPhillip Lougher skip_page:
4151701aecbSPhillip Lougher 		unlock_page(push_page);
4161701aecbSPhillip Lougher 		if (i != page->index)
41709cbfeafSKirill A. Shutemov 			put_page(push_page);
4181701aecbSPhillip Lougher 	}
4195f55dbc0SPhillip Lougher }
4201701aecbSPhillip Lougher 
4215f55dbc0SPhillip Lougher /* Read datablock stored packed inside a fragment (tail-end packed block) */
squashfs_readpage_fragment(struct page * page,int expected)422a3f94cb9SPhillip Lougher static int squashfs_readpage_fragment(struct page *page, int expected)
4235f55dbc0SPhillip Lougher {
4245f55dbc0SPhillip Lougher 	struct inode *inode = page->mapping->host;
4255f55dbc0SPhillip Lougher 	struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
4265f55dbc0SPhillip Lougher 		squashfs_i(inode)->fragment_block,
4275f55dbc0SPhillip Lougher 		squashfs_i(inode)->fragment_size);
4285f55dbc0SPhillip Lougher 	int res = buffer->error;
4295f55dbc0SPhillip Lougher 
4305f55dbc0SPhillip Lougher 	if (res)
4315f55dbc0SPhillip Lougher 		ERROR("Unable to read page, block %llx, size %x\n",
4325f55dbc0SPhillip Lougher 			squashfs_i(inode)->fragment_block,
4335f55dbc0SPhillip Lougher 			squashfs_i(inode)->fragment_size);
4345f55dbc0SPhillip Lougher 	else
435a3f94cb9SPhillip Lougher 		squashfs_copy_cache(page, buffer, expected,
4365f55dbc0SPhillip Lougher 			squashfs_i(inode)->fragment_offset);
4375f55dbc0SPhillip Lougher 
4381701aecbSPhillip Lougher 	squashfs_cache_put(buffer);
4395f55dbc0SPhillip Lougher 	return res;
4405f55dbc0SPhillip Lougher }
4411701aecbSPhillip Lougher 
squashfs_readpage_sparse(struct page * page,int expected)442a3f94cb9SPhillip Lougher static int squashfs_readpage_sparse(struct page *page, int expected)
4435f55dbc0SPhillip Lougher {
444a3f94cb9SPhillip Lougher 	squashfs_copy_cache(page, NULL, expected, 0);
4455f55dbc0SPhillip Lougher 	return 0;
4465f55dbc0SPhillip Lougher }
4475f55dbc0SPhillip Lougher 
squashfs_read_folio(struct file * file,struct folio * folio)448124cfc15SMatthew Wilcox (Oracle) static int squashfs_read_folio(struct file *file, struct folio *folio)
4495f55dbc0SPhillip Lougher {
450124cfc15SMatthew Wilcox (Oracle) 	struct page *page = &folio->page;
4515f55dbc0SPhillip Lougher 	struct inode *inode = page->mapping->host;
4525f55dbc0SPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
45309cbfeafSKirill A. Shutemov 	int index = page->index >> (msblk->block_log - PAGE_SHIFT);
4545f55dbc0SPhillip Lougher 	int file_end = i_size_read(inode) >> msblk->block_log;
455a3f94cb9SPhillip Lougher 	int expected = index == file_end ?
456a3f94cb9SPhillip Lougher 			(i_size_read(inode) & (msblk->block_size - 1)) :
457a3f94cb9SPhillip Lougher 			 msblk->block_size;
45831e748e4SMatthew Wilcox (Oracle) 	int res = 0;
4595f55dbc0SPhillip Lougher 	void *pageaddr;
4605f55dbc0SPhillip Lougher 
4615f55dbc0SPhillip Lougher 	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
4625f55dbc0SPhillip Lougher 				page->index, squashfs_i(inode)->start);
4635f55dbc0SPhillip Lougher 
46409cbfeafSKirill A. Shutemov 	if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >>
46509cbfeafSKirill A. Shutemov 					PAGE_SHIFT))
4665f55dbc0SPhillip Lougher 		goto out;
4675f55dbc0SPhillip Lougher 
4685f55dbc0SPhillip Lougher 	if (index < file_end || squashfs_i(inode)->fragment_block ==
4695f55dbc0SPhillip Lougher 					SQUASHFS_INVALID_BLK) {
4705f55dbc0SPhillip Lougher 		u64 block = 0;
47131e748e4SMatthew Wilcox (Oracle) 
47231e748e4SMatthew Wilcox (Oracle) 		res = read_blocklist(inode, index, &block);
47331e748e4SMatthew Wilcox (Oracle) 		if (res < 0)
4745f55dbc0SPhillip Lougher 			goto error_out;
4755f55dbc0SPhillip Lougher 
47631e748e4SMatthew Wilcox (Oracle) 		if (res == 0)
477a3f94cb9SPhillip Lougher 			res = squashfs_readpage_sparse(page, expected);
4785f55dbc0SPhillip Lougher 		else
47931e748e4SMatthew Wilcox (Oracle) 			res = squashfs_readpage_block(page, block, res, expected);
4805f55dbc0SPhillip Lougher 	} else
481a3f94cb9SPhillip Lougher 		res = squashfs_readpage_fragment(page, expected);
4825f55dbc0SPhillip Lougher 
4835f55dbc0SPhillip Lougher 	if (!res)
4841701aecbSPhillip Lougher 		return 0;
4851701aecbSPhillip Lougher 
4861701aecbSPhillip Lougher error_out:
4871701aecbSPhillip Lougher 	SetPageError(page);
4881701aecbSPhillip Lougher out:
48953b55e55SCong Wang 	pageaddr = kmap_atomic(page);
49009cbfeafSKirill A. Shutemov 	memset(pageaddr, 0, PAGE_SIZE);
49153b55e55SCong Wang 	kunmap_atomic(pageaddr);
4921701aecbSPhillip Lougher 	flush_dcache_page(page);
49331e748e4SMatthew Wilcox (Oracle) 	if (res == 0)
4941701aecbSPhillip Lougher 		SetPageUptodate(page);
4951701aecbSPhillip Lougher 	unlock_page(page);
4961701aecbSPhillip Lougher 
49731e748e4SMatthew Wilcox (Oracle) 	return res;
4981701aecbSPhillip Lougher }
4991701aecbSPhillip Lougher 
squashfs_readahead_fragment(struct page ** page,unsigned int pages,unsigned int expected)500b09a7a03SPhillip Lougher static int squashfs_readahead_fragment(struct page **page,
501b09a7a03SPhillip Lougher 	unsigned int pages, unsigned int expected)
502b09a7a03SPhillip Lougher {
503b09a7a03SPhillip Lougher 	struct inode *inode = page[0]->mapping->host;
504b09a7a03SPhillip Lougher 	struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb,
505b09a7a03SPhillip Lougher 		squashfs_i(inode)->fragment_block,
506b09a7a03SPhillip Lougher 		squashfs_i(inode)->fragment_size);
507b09a7a03SPhillip Lougher 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
508b09a7a03SPhillip Lougher 	unsigned int n, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
509*e11c4e08SPhillip Lougher 	int error = buffer->error;
510b09a7a03SPhillip Lougher 
511*e11c4e08SPhillip Lougher 	if (error)
512b09a7a03SPhillip Lougher 		goto out;
513b09a7a03SPhillip Lougher 
514b09a7a03SPhillip Lougher 	expected += squashfs_i(inode)->fragment_offset;
515b09a7a03SPhillip Lougher 
516b09a7a03SPhillip Lougher 	for (n = 0; n < pages; n++) {
517b09a7a03SPhillip Lougher 		unsigned int base = (page[n]->index & mask) << PAGE_SHIFT;
518b09a7a03SPhillip Lougher 		unsigned int offset = base + squashfs_i(inode)->fragment_offset;
519b09a7a03SPhillip Lougher 
520b09a7a03SPhillip Lougher 		if (expected > offset) {
521b09a7a03SPhillip Lougher 			unsigned int avail = min_t(unsigned int, expected -
522b09a7a03SPhillip Lougher 				offset, PAGE_SIZE);
523b09a7a03SPhillip Lougher 
524b09a7a03SPhillip Lougher 			squashfs_fill_page(page[n], buffer, offset, avail);
525b09a7a03SPhillip Lougher 		}
526b09a7a03SPhillip Lougher 
527b09a7a03SPhillip Lougher 		unlock_page(page[n]);
528b09a7a03SPhillip Lougher 		put_page(page[n]);
529b09a7a03SPhillip Lougher 	}
530b09a7a03SPhillip Lougher 
531b09a7a03SPhillip Lougher out:
532b09a7a03SPhillip Lougher 	squashfs_cache_put(buffer);
533*e11c4e08SPhillip Lougher 	return error;
534b09a7a03SPhillip Lougher }
535b09a7a03SPhillip Lougher 
squashfs_readahead(struct readahead_control * ractl)5368fc78b6fSHsin-Yi Wang static void squashfs_readahead(struct readahead_control *ractl)
5378fc78b6fSHsin-Yi Wang {
5388fc78b6fSHsin-Yi Wang 	struct inode *inode = ractl->mapping->host;
5398fc78b6fSHsin-Yi Wang 	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
5408fc78b6fSHsin-Yi Wang 	size_t mask = (1UL << msblk->block_log) - 1;
5418fc78b6fSHsin-Yi Wang 	unsigned short shift = msblk->block_log - PAGE_SHIFT;
5428fc78b6fSHsin-Yi Wang 	loff_t start = readahead_pos(ractl) & ~mask;
5438fc78b6fSHsin-Yi Wang 	size_t len = readahead_length(ractl) + readahead_pos(ractl) - start;
5448fc78b6fSHsin-Yi Wang 	struct squashfs_page_actor *actor;
5458fc78b6fSHsin-Yi Wang 	unsigned int nr_pages = 0;
5468fc78b6fSHsin-Yi Wang 	struct page **pages;
5478fc78b6fSHsin-Yi Wang 	int i, file_end = i_size_read(inode) >> msblk->block_log;
5488fc78b6fSHsin-Yi Wang 	unsigned int max_pages = 1UL << shift;
5498fc78b6fSHsin-Yi Wang 
5508fc78b6fSHsin-Yi Wang 	readahead_expand(ractl, start, (len | mask) + 1);
5518fc78b6fSHsin-Yi Wang 
5528fc78b6fSHsin-Yi Wang 	pages = kmalloc_array(max_pages, sizeof(void *), GFP_KERNEL);
5538fc78b6fSHsin-Yi Wang 	if (!pages)
5548fc78b6fSHsin-Yi Wang 		return;
5558fc78b6fSHsin-Yi Wang 
5568fc78b6fSHsin-Yi Wang 	for (;;) {
5578fc78b6fSHsin-Yi Wang 		pgoff_t index;
5588fc78b6fSHsin-Yi Wang 		int res, bsize;
5598fc78b6fSHsin-Yi Wang 		u64 block = 0;
5608fc78b6fSHsin-Yi Wang 		unsigned int expected;
5619ef8eb61SPhillip Lougher 		struct page *last_page;
5628fc78b6fSHsin-Yi Wang 
563c9199de8SPhillip Lougher 		expected = start >> msblk->block_log == file_end ?
564c9199de8SPhillip Lougher 			   (i_size_read(inode) & (msblk->block_size - 1)) :
565c9199de8SPhillip Lougher 			    msblk->block_size;
566c9199de8SPhillip Lougher 
567c9199de8SPhillip Lougher 		max_pages = (expected + PAGE_SIZE - 1) >> PAGE_SHIFT;
568c9199de8SPhillip Lougher 
5698fc78b6fSHsin-Yi Wang 		nr_pages = __readahead_batch(ractl, pages, max_pages);
5708fc78b6fSHsin-Yi Wang 		if (!nr_pages)
5718fc78b6fSHsin-Yi Wang 			break;
5728fc78b6fSHsin-Yi Wang 
5738fc78b6fSHsin-Yi Wang 		if (readahead_pos(ractl) >= i_size_read(inode))
5748fc78b6fSHsin-Yi Wang 			goto skip_pages;
5758fc78b6fSHsin-Yi Wang 
5768fc78b6fSHsin-Yi Wang 		index = pages[0]->index >> shift;
577c9199de8SPhillip Lougher 
5788fc78b6fSHsin-Yi Wang 		if ((pages[nr_pages - 1]->index >> shift) != index)
5798fc78b6fSHsin-Yi Wang 			goto skip_pages;
5808fc78b6fSHsin-Yi Wang 
581b09a7a03SPhillip Lougher 		if (index == file_end && squashfs_i(inode)->fragment_block !=
582b09a7a03SPhillip Lougher 						SQUASHFS_INVALID_BLK) {
583b09a7a03SPhillip Lougher 			res = squashfs_readahead_fragment(pages, nr_pages,
584b09a7a03SPhillip Lougher 							  expected);
585b09a7a03SPhillip Lougher 			if (res)
586b09a7a03SPhillip Lougher 				goto skip_pages;
587b09a7a03SPhillip Lougher 			continue;
588b09a7a03SPhillip Lougher 		}
589b09a7a03SPhillip Lougher 
5908fc78b6fSHsin-Yi Wang 		bsize = read_blocklist(inode, index, &block);
5918fc78b6fSHsin-Yi Wang 		if (bsize == 0)
5928fc78b6fSHsin-Yi Wang 			goto skip_pages;
5938fc78b6fSHsin-Yi Wang 
5948fc78b6fSHsin-Yi Wang 		actor = squashfs_page_actor_init_special(msblk, pages, nr_pages,
5958fc78b6fSHsin-Yi Wang 							 expected);
5968fc78b6fSHsin-Yi Wang 		if (!actor)
5978fc78b6fSHsin-Yi Wang 			goto skip_pages;
5988fc78b6fSHsin-Yi Wang 
5998fc78b6fSHsin-Yi Wang 		res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
6008fc78b6fSHsin-Yi Wang 
6019ef8eb61SPhillip Lougher 		last_page = squashfs_page_actor_free(actor);
6028fc78b6fSHsin-Yi Wang 
6038fc78b6fSHsin-Yi Wang 		if (res == expected) {
6048fc78b6fSHsin-Yi Wang 			int bytes;
6058fc78b6fSHsin-Yi Wang 
6068fc78b6fSHsin-Yi Wang 			/* Last page (if present) may have trailing bytes not filled */
6078fc78b6fSHsin-Yi Wang 			bytes = res % PAGE_SIZE;
6089ef8eb61SPhillip Lougher 			if (index == file_end && bytes && last_page)
6099ef8eb61SPhillip Lougher 				memzero_page(last_page, bytes,
6108fc78b6fSHsin-Yi Wang 					     PAGE_SIZE - bytes);
6118fc78b6fSHsin-Yi Wang 
6128fc78b6fSHsin-Yi Wang 			for (i = 0; i < nr_pages; i++) {
6138fc78b6fSHsin-Yi Wang 				flush_dcache_page(pages[i]);
6148fc78b6fSHsin-Yi Wang 				SetPageUptodate(pages[i]);
6158fc78b6fSHsin-Yi Wang 			}
6168fc78b6fSHsin-Yi Wang 		}
6178fc78b6fSHsin-Yi Wang 
6188fc78b6fSHsin-Yi Wang 		for (i = 0; i < nr_pages; i++) {
6198fc78b6fSHsin-Yi Wang 			unlock_page(pages[i]);
6208fc78b6fSHsin-Yi Wang 			put_page(pages[i]);
6218fc78b6fSHsin-Yi Wang 		}
6228fc78b6fSHsin-Yi Wang 	}
6238fc78b6fSHsin-Yi Wang 
6248fc78b6fSHsin-Yi Wang 	kfree(pages);
6258fc78b6fSHsin-Yi Wang 	return;
6268fc78b6fSHsin-Yi Wang 
6278fc78b6fSHsin-Yi Wang skip_pages:
6288fc78b6fSHsin-Yi Wang 	for (i = 0; i < nr_pages; i++) {
6298fc78b6fSHsin-Yi Wang 		unlock_page(pages[i]);
6308fc78b6fSHsin-Yi Wang 		put_page(pages[i]);
6318fc78b6fSHsin-Yi Wang 	}
6328fc78b6fSHsin-Yi Wang 	kfree(pages);
6338fc78b6fSHsin-Yi Wang }
6341701aecbSPhillip Lougher 
6351701aecbSPhillip Lougher const struct address_space_operations squashfs_aops = {
6368fc78b6fSHsin-Yi Wang 	.read_folio = squashfs_read_folio,
6378fc78b6fSHsin-Yi Wang 	.readahead = squashfs_readahead
6381701aecbSPhillip Lougher };
639