1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/slab.h> 8 #include <linux/mm.h> 9 #include <linux/init.h> 10 #include <linux/err.h> 11 #include <linux/sched.h> 12 #include <linux/pagemap.h> 13 #include <linux/bio.h> 14 #include <linux/lzo.h> 15 #include <linux/refcount.h> 16 #include "compression.h" 17 #include "ctree.h" 18 19 #define LZO_LEN 4 20 21 /* 22 * Btrfs LZO compression format 23 * 24 * Regular and inlined LZO compressed data extents consist of: 25 * 26 * 1. Header 27 * Fixed size. LZO_LEN (4) bytes long, LE32. 28 * Records the total size (including the header) of compressed data. 29 * 30 * 2. Segment(s) 31 * Variable size. Each segment includes one segment header, followed by data 32 * payload. 33 * One regular LZO compressed extent can have one or more segments. 34 * For inlined LZO compressed extent, only one segment is allowed. 35 * One segment represents at most one sector of uncompressed data. 36 * 37 * 2.1 Segment header 38 * Fixed size. LZO_LEN (4) bytes long, LE32. 39 * Records the total size of the segment (not including the header). 40 * Segment header never crosses sector boundary, thus it's possible to 41 * have at most 3 padding zeros at the end of the sector. 42 * 43 * 2.2 Data Payload 44 * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize) 45 * which is 4419 for a 4KiB sectorsize. 46 * 47 * Example with 4K sectorsize: 48 * Page 1: 49 * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10 50 * 0x0000 | Header | SegHdr 01 | Data payload 01 ... | 51 * ... 52 * 0x0ff0 | SegHdr N | Data payload N ... |00| 53 * ^^ padding zeros 54 * Page 2: 55 * 0x1000 | SegHdr N+1| Data payload N+1 ... | 56 */ 57 58 struct workspace { 59 void *mem; 60 void *buf; /* where decompressed data goes */ 61 void *cbuf; /* where compressed data goes */ 62 struct list_head list; 63 }; 64 65 static struct workspace_manager wsm; 66 67 void lzo_free_workspace(struct list_head *ws) 68 { 69 struct workspace *workspace = list_entry(ws, struct workspace, list); 70 71 kvfree(workspace->buf); 72 kvfree(workspace->cbuf); 73 kvfree(workspace->mem); 74 kfree(workspace); 75 } 76 77 struct list_head *lzo_alloc_workspace(unsigned int level) 78 { 79 struct workspace *workspace; 80 81 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); 82 if (!workspace) 83 return ERR_PTR(-ENOMEM); 84 85 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); 86 workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL); 87 workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL); 88 if (!workspace->mem || !workspace->buf || !workspace->cbuf) 89 goto fail; 90 91 INIT_LIST_HEAD(&workspace->list); 92 93 return &workspace->list; 94 fail: 95 lzo_free_workspace(&workspace->list); 96 return ERR_PTR(-ENOMEM); 97 } 98 99 static inline void write_compress_length(char *buf, size_t len) 100 { 101 __le32 dlen; 102 103 dlen = cpu_to_le32(len); 104 memcpy(buf, &dlen, LZO_LEN); 105 } 106 107 static inline size_t read_compress_length(const char *buf) 108 { 109 __le32 dlen; 110 111 memcpy(&dlen, buf, LZO_LEN); 112 return le32_to_cpu(dlen); 113 } 114 115 /* 116 * Will do: 117 * 118 * - Write a segment header into the destination 119 * - Copy the compressed buffer into the destination 120 * - Make sure we have enough space in the last sector to fit a segment header 121 * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros. 122 * 123 * Will allocate new pages when needed. 124 */ 125 static int copy_compressed_data_to_page(char *compressed_data, 126 size_t compressed_size, 127 struct page **out_pages, 128 u32 *cur_out, 129 const u32 sectorsize) 130 { 131 u32 sector_bytes_left; 132 u32 orig_out; 133 struct page *cur_page; 134 char *kaddr; 135 136 /* 137 * We never allow a segment header crossing sector boundary, previous 138 * run should ensure we have enough space left inside the sector. 139 */ 140 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize); 141 142 cur_page = out_pages[*cur_out / PAGE_SIZE]; 143 /* Allocate a new page */ 144 if (!cur_page) { 145 cur_page = alloc_page(GFP_NOFS); 146 if (!cur_page) 147 return -ENOMEM; 148 out_pages[*cur_out / PAGE_SIZE] = cur_page; 149 } 150 151 kaddr = kmap(cur_page); 152 write_compress_length(kaddr + offset_in_page(*cur_out), 153 compressed_size); 154 *cur_out += LZO_LEN; 155 156 orig_out = *cur_out; 157 158 /* Copy compressed data */ 159 while (*cur_out - orig_out < compressed_size) { 160 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize, 161 orig_out + compressed_size - *cur_out); 162 163 kunmap(cur_page); 164 cur_page = out_pages[*cur_out / PAGE_SIZE]; 165 /* Allocate a new page */ 166 if (!cur_page) { 167 cur_page = alloc_page(GFP_NOFS); 168 if (!cur_page) 169 return -ENOMEM; 170 out_pages[*cur_out / PAGE_SIZE] = cur_page; 171 } 172 kaddr = kmap(cur_page); 173 174 memcpy(kaddr + offset_in_page(*cur_out), 175 compressed_data + *cur_out - orig_out, copy_len); 176 177 *cur_out += copy_len; 178 } 179 180 /* 181 * Check if we can fit the next segment header into the remaining space 182 * of the sector. 183 */ 184 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out; 185 if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0) 186 goto out; 187 188 /* The remaining size is not enough, pad it with zeros */ 189 memset(kaddr + offset_in_page(*cur_out), 0, 190 sector_bytes_left); 191 *cur_out += sector_bytes_left; 192 193 out: 194 kunmap(cur_page); 195 return 0; 196 } 197 198 int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, 199 u64 start, struct page **pages, unsigned long *out_pages, 200 unsigned long *total_in, unsigned long *total_out) 201 { 202 struct workspace *workspace = list_entry(ws, struct workspace, list); 203 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; 204 struct page *page_in = NULL; 205 char *sizes_ptr; 206 int ret = 0; 207 /* Points to the file offset of input data */ 208 u64 cur_in = start; 209 /* Points to the current output byte */ 210 u32 cur_out = 0; 211 u32 len = *total_out; 212 213 *out_pages = 0; 214 *total_out = 0; 215 *total_in = 0; 216 217 /* 218 * Skip the header for now, we will later come back and write the total 219 * compressed size 220 */ 221 cur_out += LZO_LEN; 222 while (cur_in < start + len) { 223 char *data_in; 224 const u32 sectorsize_mask = sectorsize - 1; 225 u32 sector_off = (cur_in - start) & sectorsize_mask; 226 u32 in_len; 227 size_t out_len; 228 229 /* Get the input page first */ 230 if (!page_in) { 231 page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT); 232 ASSERT(page_in); 233 } 234 235 /* Compress at most one sector of data each time */ 236 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off); 237 ASSERT(in_len); 238 data_in = kmap(page_in); 239 ret = lzo1x_1_compress(data_in + 240 offset_in_page(cur_in), in_len, 241 workspace->cbuf, &out_len, 242 workspace->mem); 243 kunmap(page_in); 244 if (ret < 0) { 245 pr_debug("BTRFS: lzo in loop returned %d\n", ret); 246 ret = -EIO; 247 goto out; 248 } 249 250 ret = copy_compressed_data_to_page(workspace->cbuf, out_len, 251 pages, &cur_out, sectorsize); 252 if (ret < 0) 253 goto out; 254 255 cur_in += in_len; 256 257 /* 258 * Check if we're making it bigger after two sectors. And if 259 * it is so, give up. 260 */ 261 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) { 262 ret = -E2BIG; 263 goto out; 264 } 265 266 /* Check if we have reached page boundary */ 267 if (IS_ALIGNED(cur_in, PAGE_SIZE)) { 268 put_page(page_in); 269 page_in = NULL; 270 } 271 } 272 273 /* Store the size of all chunks of compressed data */ 274 sizes_ptr = kmap_local_page(pages[0]); 275 write_compress_length(sizes_ptr, cur_out); 276 kunmap_local(sizes_ptr); 277 278 ret = 0; 279 *total_out = cur_out; 280 *total_in = cur_in - start; 281 out: 282 *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE); 283 return ret; 284 } 285 286 /* 287 * Copy the compressed segment payload into @dest. 288 * 289 * For the payload there will be no padding, just need to do page switching. 290 */ 291 static void copy_compressed_segment(struct compressed_bio *cb, 292 char *dest, u32 len, u32 *cur_in) 293 { 294 u32 orig_in = *cur_in; 295 296 while (*cur_in < orig_in + len) { 297 char *kaddr; 298 struct page *cur_page; 299 u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in), 300 orig_in + len - *cur_in); 301 302 ASSERT(copy_len); 303 cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE]; 304 305 kaddr = kmap(cur_page); 306 memcpy(dest + *cur_in - orig_in, 307 kaddr + offset_in_page(*cur_in), 308 copy_len); 309 kunmap(cur_page); 310 311 *cur_in += copy_len; 312 } 313 } 314 315 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb) 316 { 317 struct workspace *workspace = list_entry(ws, struct workspace, list); 318 const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); 319 const u32 sectorsize = fs_info->sectorsize; 320 char *kaddr; 321 int ret; 322 /* Compressed data length, can be unaligned */ 323 u32 len_in; 324 /* Offset inside the compressed data */ 325 u32 cur_in = 0; 326 /* Bytes decompressed so far */ 327 u32 cur_out = 0; 328 329 kaddr = kmap(cb->compressed_pages[0]); 330 len_in = read_compress_length(kaddr); 331 kunmap(cb->compressed_pages[0]); 332 cur_in += LZO_LEN; 333 334 /* 335 * LZO header length check 336 * 337 * The total length should not exceed the maximum extent length, 338 * and all sectors should be used. 339 * If this happens, it means the compressed extent is corrupted. 340 */ 341 if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) || 342 round_up(len_in, sectorsize) < cb->compressed_len) { 343 btrfs_err(fs_info, 344 "invalid lzo header, lzo len %u compressed len %u", 345 len_in, cb->compressed_len); 346 return -EUCLEAN; 347 } 348 349 /* Go through each lzo segment */ 350 while (cur_in < len_in) { 351 struct page *cur_page; 352 /* Length of the compressed segment */ 353 u32 seg_len; 354 u32 sector_bytes_left; 355 size_t out_len = lzo1x_worst_compress(sectorsize); 356 357 /* 358 * We should always have enough space for one segment header 359 * inside current sector. 360 */ 361 ASSERT(cur_in / sectorsize == 362 (cur_in + LZO_LEN - 1) / sectorsize); 363 cur_page = cb->compressed_pages[cur_in / PAGE_SIZE]; 364 ASSERT(cur_page); 365 kaddr = kmap(cur_page); 366 seg_len = read_compress_length(kaddr + offset_in_page(cur_in)); 367 kunmap(cur_page); 368 cur_in += LZO_LEN; 369 370 /* Copy the compressed segment payload into workspace */ 371 copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in); 372 373 /* Decompress the data */ 374 ret = lzo1x_decompress_safe(workspace->cbuf, seg_len, 375 workspace->buf, &out_len); 376 if (ret != LZO_E_OK) { 377 btrfs_err(fs_info, "failed to decompress"); 378 ret = -EIO; 379 goto out; 380 } 381 382 /* Copy the data into inode pages */ 383 ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out); 384 cur_out += out_len; 385 386 /* All data read, exit */ 387 if (ret == 0) 388 goto out; 389 ret = 0; 390 391 /* Check if the sector has enough space for a segment header */ 392 sector_bytes_left = sectorsize - (cur_in % sectorsize); 393 if (sector_bytes_left >= LZO_LEN) 394 continue; 395 396 /* Skip the padding zeros */ 397 cur_in += sector_bytes_left; 398 } 399 out: 400 if (!ret) 401 zero_fill_bio(cb->orig_bio); 402 return ret; 403 } 404 405 int lzo_decompress(struct list_head *ws, unsigned char *data_in, 406 struct page *dest_page, unsigned long start_byte, size_t srclen, 407 size_t destlen) 408 { 409 struct workspace *workspace = list_entry(ws, struct workspace, list); 410 size_t in_len; 411 size_t out_len; 412 size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE); 413 int ret = 0; 414 char *kaddr; 415 unsigned long bytes; 416 417 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2) 418 return -EUCLEAN; 419 420 in_len = read_compress_length(data_in); 421 if (in_len != srclen) 422 return -EUCLEAN; 423 data_in += LZO_LEN; 424 425 in_len = read_compress_length(data_in); 426 if (in_len != srclen - LZO_LEN * 2) { 427 ret = -EUCLEAN; 428 goto out; 429 } 430 data_in += LZO_LEN; 431 432 out_len = PAGE_SIZE; 433 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); 434 if (ret != LZO_E_OK) { 435 pr_warn("BTRFS: decompress failed!\n"); 436 ret = -EIO; 437 goto out; 438 } 439 440 if (out_len < start_byte) { 441 ret = -EIO; 442 goto out; 443 } 444 445 /* 446 * the caller is already checking against PAGE_SIZE, but lets 447 * move this check closer to the memcpy/memset 448 */ 449 destlen = min_t(unsigned long, destlen, PAGE_SIZE); 450 bytes = min_t(unsigned long, destlen, out_len - start_byte); 451 452 kaddr = kmap_local_page(dest_page); 453 memcpy(kaddr, workspace->buf + start_byte, bytes); 454 455 /* 456 * btrfs_getblock is doing a zero on the tail of the page too, 457 * but this will cover anything missing from the decompressed 458 * data. 459 */ 460 if (bytes < destlen) 461 memset(kaddr+bytes, 0, destlen-bytes); 462 kunmap_local(kaddr); 463 out: 464 return ret; 465 } 466 467 const struct btrfs_compress_op btrfs_lzo_compress = { 468 .workspace_manager = &wsm, 469 .max_level = 1, 470 .default_level = 1, 471 }; 472