zdata.c (47e4937a4a7ca4184fd282791dfee76c6799966a) zdata.c (8d8a09b093d7073465c824f74caf315c073d3875)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "zdata.h"
8#include "compress.h"

--- 216 unchanged lines hidden (view full) ---

225
226 if (!page)
227 continue;
228
229 /* block other users from reclaiming or migrating the page */
230 if (!trylock_page(page))
231 return -EBUSY;
232
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include "zdata.h"
8#include "compress.h"

--- 216 unchanged lines hidden (view full) ---

225
226 if (!page)
227 continue;
228
229 /* block other users from reclaiming or migrating the page */
230 if (!trylock_page(page))
231 return -EBUSY;
232
233 if (unlikely(page->mapping != mapping))
233 if (page->mapping != mapping)
234 continue;
235
236 /* barrier is implied in the following 'unlock_page' */
237 WRITE_ONCE(pcl->compressed_pages[i], NULL);
238 set_page_private(page, 0);
239 ClearPagePrivate(page);
240
241 unlock_page(page);

--- 111 unchanged lines hidden (view full) ---

353 pcl = container_of(grp, struct z_erofs_pcluster, obj);
354 if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
355 DBG_BUGON(1);
356 erofs_workgroup_put(grp);
357 return ERR_PTR(-EFSCORRUPTED);
358 }
359
360 cl = z_erofs_primarycollection(pcl);
234 continue;
235
236 /* barrier is implied in the following 'unlock_page' */
237 WRITE_ONCE(pcl->compressed_pages[i], NULL);
238 set_page_private(page, 0);
239 ClearPagePrivate(page);
240
241 unlock_page(page);

--- 111 unchanged lines hidden (view full) ---

353 pcl = container_of(grp, struct z_erofs_pcluster, obj);
354 if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
355 DBG_BUGON(1);
356 erofs_workgroup_put(grp);
357 return ERR_PTR(-EFSCORRUPTED);
358 }
359
360 cl = z_erofs_primarycollection(pcl);
361 if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) {
361 if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
362 DBG_BUGON(1);
363 erofs_workgroup_put(grp);
364 return ERR_PTR(-EFSCORRUPTED);
365 }
366
367 length = READ_ONCE(pcl->length);
368 if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
369 if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {

--- 31 unchanged lines hidden (view full) ---

401 struct erofs_map_blocks *map)
402{
403 struct z_erofs_pcluster *pcl;
404 struct z_erofs_collection *cl;
405 int err;
406
407 /* no available workgroup, let's allocate one */
408 pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
362 DBG_BUGON(1);
363 erofs_workgroup_put(grp);
364 return ERR_PTR(-EFSCORRUPTED);
365 }
366
367 length = READ_ONCE(pcl->length);
368 if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
369 if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {

--- 31 unchanged lines hidden (view full) ---

401 struct erofs_map_blocks *map)
402{
403 struct z_erofs_pcluster *pcl;
404 struct z_erofs_collection *cl;
405 int err;
406
407 /* no available workgroup, let's allocate one */
408 pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
409 if (unlikely(!pcl))
409 if (!pcl)
410 return ERR_PTR(-ENOMEM);
411
412 init_always(pcl);
413 pcl->obj.index = map->m_pa >> PAGE_SHIFT;
414
415 pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
416 (map->m_flags & EROFS_MAP_FULL_MAPPED ?
417 Z_EROFS_PCLUSTER_FULL_LENGTH : 0);

--- 51 unchanged lines hidden (view full) ---

469 return -EINVAL;
470 }
471
472repeat:
473 cl = cllookup(clt, inode, map);
474 if (!cl) {
475 cl = clregister(clt, inode, map);
476
410 return ERR_PTR(-ENOMEM);
411
412 init_always(pcl);
413 pcl->obj.index = map->m_pa >> PAGE_SHIFT;
414
415 pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
416 (map->m_flags & EROFS_MAP_FULL_MAPPED ?
417 Z_EROFS_PCLUSTER_FULL_LENGTH : 0);

--- 51 unchanged lines hidden (view full) ---

469 return -EINVAL;
470 }
471
472repeat:
473 cl = cllookup(clt, inode, map);
474 if (!cl) {
475 cl = clregister(clt, inode, map);
476
477 if (unlikely(cl == ERR_PTR(-EAGAIN)))
477 if (cl == ERR_PTR(-EAGAIN))
478 goto repeat;
479 }
480
481 if (IS_ERR(cl))
482 return PTR_ERR(cl);
483
484 z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
485 cl->pagevec, cl->vcnt);

--- 116 unchanged lines hidden (view full) ---

602 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
603
604 if (z_erofs_collector_end(clt))
605 fe->backmost = false;
606
607 map->m_la = offset + cur;
608 map->m_llen = 0;
609 err = z_erofs_map_blocks_iter(inode, map, 0);
478 goto repeat;
479 }
480
481 if (IS_ERR(cl))
482 return PTR_ERR(cl);
483
484 z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
485 cl->pagevec, cl->vcnt);

--- 116 unchanged lines hidden (view full) ---

602 debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
603
604 if (z_erofs_collector_end(clt))
605 fe->backmost = false;
606
607 map->m_la = offset + cur;
608 map->m_llen = 0;
609 err = z_erofs_map_blocks_iter(inode, map, 0);
610 if (unlikely(err))
610 if (err)
611 goto err_out;
612
613restart_now:
611 goto err_out;
612
613restart_now:
614 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
614 if (!(map->m_flags & EROFS_MAP_MAPPED))
615 goto hitted;
616
617 err = z_erofs_collector_begin(clt, inode, map);
615 goto hitted;
616
617 err = z_erofs_collector_begin(clt, inode, map);
618 if (unlikely(err))
618 if (err)
619 goto err_out;
620
621 /* preload all compressed pages (maybe downgrade role if necessary) */
622 if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
623 cache_strategy = DELAYEDALLOC;
624 else
625 cache_strategy = DONTALLOC;
626
627 preload_compressed_pages(clt, MNGD_MAPPING(sbi),
628 cache_strategy, pagepool);
629
630 tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
631hitted:
632 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
619 goto err_out;
620
621 /* preload all compressed pages (maybe downgrade role if necessary) */
622 if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
623 cache_strategy = DELAYEDALLOC;
624 else
625 cache_strategy = DONTALLOC;
626
627 preload_compressed_pages(clt, MNGD_MAPPING(sbi),
628 cache_strategy, pagepool);
629
630 tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
631hitted:
632 cur = end - min_t(unsigned int, offset + end - map->m_la, end);
633 if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) {
633 if (!(map->m_flags & EROFS_MAP_MAPPED)) {
634 zero_user_segment(page, cur, end);
635 goto next_part;
636 }
637
638 /* let's derive page type */
639 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
640 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
641 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :

--- 6 unchanged lines hidden (view full) ---

648 err = z_erofs_attach_page(clt, page, page_type);
649 /* should allocate an additional staging page for pagevec */
650 if (err == -EAGAIN) {
651 struct page *const newpage =
652 __stagingpage_alloc(pagepool, GFP_NOFS);
653
654 err = z_erofs_attach_page(clt, newpage,
655 Z_EROFS_PAGE_TYPE_EXCLUSIVE);
634 zero_user_segment(page, cur, end);
635 goto next_part;
636 }
637
638 /* let's derive page type */
639 page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
640 (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
641 (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :

--- 6 unchanged lines hidden (view full) ---

648 err = z_erofs_attach_page(clt, page, page_type);
649 /* should allocate an additional staging page for pagevec */
650 if (err == -EAGAIN) {
651 struct page *const newpage =
652 __stagingpage_alloc(pagepool, GFP_NOFS);
653
654 err = z_erofs_attach_page(clt, newpage,
655 Z_EROFS_PAGE_TYPE_EXCLUSIVE);
656 if (likely(!err))
656 if (!err)
657 goto retry;
658 }
659
657 goto retry;
658 }
659
660 if (unlikely(err))
660 if (err)
661 goto err_out;
662
663 index = page->index - (map->m_la >> PAGE_SHIFT);
664
665 z_erofs_onlinepage_fixup(page, index, true);
666
667 /* bump up the number of spiltted parts of a page */
668 ++spiltted;

--- 49 unchanged lines hidden (view full) ---

718
719 bio_for_each_segment_all(bvec, bio, iter_all) {
720 struct page *page = bvec->bv_page;
721 bool cachemngd = false;
722
723 DBG_BUGON(PageUptodate(page));
724 DBG_BUGON(!page->mapping);
725
661 goto err_out;
662
663 index = page->index - (map->m_la >> PAGE_SHIFT);
664
665 z_erofs_onlinepage_fixup(page, index, true);
666
667 /* bump up the number of spiltted parts of a page */
668 ++spiltted;

--- 49 unchanged lines hidden (view full) ---

718
719 bio_for_each_segment_all(bvec, bio, iter_all) {
720 struct page *page = bvec->bv_page;
721 bool cachemngd = false;
722
723 DBG_BUGON(PageUptodate(page));
724 DBG_BUGON(!page->mapping);
725
726 if (unlikely(!sbi && !z_erofs_page_is_staging(page))) {
726 if (!sbi && !z_erofs_page_is_staging(page)) {
727 sbi = EROFS_SB(page->mapping->host->i_sb);
728
729 if (time_to_inject(sbi, FAULT_READ_IO)) {
730 erofs_show_injection_info(FAULT_READ_IO);
731 err = BLK_STS_IOERR;
732 }
733 }
734
735 /* sbi should already be gotten if the page is managed */
736 if (sbi)
737 cachemngd = erofs_page_is_managed(sbi, page);
738
727 sbi = EROFS_SB(page->mapping->host->i_sb);
728
729 if (time_to_inject(sbi, FAULT_READ_IO)) {
730 erofs_show_injection_info(FAULT_READ_IO);
731 err = BLK_STS_IOERR;
732 }
733 }
734
735 /* sbi should already be gotten if the page is managed */
736 if (sbi)
737 cachemngd = erofs_page_is_managed(sbi, page);
738
739 if (unlikely(err))
739 if (err)
740 SetPageError(page);
741 else if (cachemngd)
742 SetPageUptodate(page);
743
744 if (cachemngd)
745 unlock_page(page);
746 }
747

--- 19 unchanged lines hidden (view full) ---

767
768 might_sleep();
769 cl = z_erofs_primarycollection(pcl);
770 DBG_BUGON(!READ_ONCE(cl->nr_pages));
771
772 mutex_lock(&cl->lock);
773 nr_pages = cl->nr_pages;
774
740 SetPageError(page);
741 else if (cachemngd)
742 SetPageUptodate(page);
743
744 if (cachemngd)
745 unlock_page(page);
746 }
747

--- 19 unchanged lines hidden (view full) ---

767
768 might_sleep();
769 cl = z_erofs_primarycollection(pcl);
770 DBG_BUGON(!READ_ONCE(cl->nr_pages));
771
772 mutex_lock(&cl->lock);
773 nr_pages = cl->nr_pages;
774
775 if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) {
775 if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
776 pages = pages_onstack;
777 } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
778 mutex_trylock(&z_pagemap_global_lock)) {
779 pages = z_pagemap_global;
780 } else {
781 gfp_t gfp_flags = GFP_KERNEL;
782
783 if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
784 gfp_flags |= __GFP_NOFAIL;
785
786 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
787 gfp_flags);
788
789 /* fallback to global pagemap for the lowmem scenario */
776 pages = pages_onstack;
777 } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
778 mutex_trylock(&z_pagemap_global_lock)) {
779 pages = z_pagemap_global;
780 } else {
781 gfp_t gfp_flags = GFP_KERNEL;
782
783 if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
784 gfp_flags |= __GFP_NOFAIL;
785
786 pages = kvmalloc_array(nr_pages, sizeof(struct page *),
787 gfp_flags);
788
789 /* fallback to global pagemap for the lowmem scenario */
790 if (unlikely(!pages)) {
790 if (!pages) {
791 mutex_lock(&z_pagemap_global_lock);
792 pages = z_pagemap_global;
793 }
794 }
795
796 for (i = 0; i < nr_pages; ++i)
797 pages[i] = NULL;
798

--- 19 unchanged lines hidden (view full) ---

818 pagenr = z_erofs_onlinepage_index(page);
819
820 DBG_BUGON(pagenr >= nr_pages);
821
822 /*
823 * currently EROFS doesn't support multiref(dedup),
824 * so here erroring out one multiref page.
825 */
791 mutex_lock(&z_pagemap_global_lock);
792 pages = z_pagemap_global;
793 }
794 }
795
796 for (i = 0; i < nr_pages; ++i)
797 pages[i] = NULL;
798

--- 19 unchanged lines hidden (view full) ---

818 pagenr = z_erofs_onlinepage_index(page);
819
820 DBG_BUGON(pagenr >= nr_pages);
821
822 /*
823 * currently EROFS doesn't support multiref(dedup),
824 * so here erroring out one multiref page.
825 */
826 if (unlikely(pages[pagenr])) {
826 if (pages[pagenr]) {
827 DBG_BUGON(1);
828 SetPageError(pages[pagenr]);
829 z_erofs_onlinepage_endio(pages[pagenr]);
830 err = -EFSCORRUPTED;
831 }
832 pages[pagenr] = page;
833 }
834 z_erofs_pagevec_ctor_exit(&ctor, true);

--- 7 unchanged lines hidden (view full) ---

842 page = compressed_pages[i];
843
844 /* all compressed pages ought to be valid */
845 DBG_BUGON(!page);
846 DBG_BUGON(!page->mapping);
847
848 if (!z_erofs_page_is_staging(page)) {
849 if (erofs_page_is_managed(sbi, page)) {
827 DBG_BUGON(1);
828 SetPageError(pages[pagenr]);
829 z_erofs_onlinepage_endio(pages[pagenr]);
830 err = -EFSCORRUPTED;
831 }
832 pages[pagenr] = page;
833 }
834 z_erofs_pagevec_ctor_exit(&ctor, true);

--- 7 unchanged lines hidden (view full) ---

842 page = compressed_pages[i];
843
844 /* all compressed pages ought to be valid */
845 DBG_BUGON(!page);
846 DBG_BUGON(!page->mapping);
847
848 if (!z_erofs_page_is_staging(page)) {
849 if (erofs_page_is_managed(sbi, page)) {
850 if (unlikely(!PageUptodate(page)))
850 if (!PageUptodate(page))
851 err = -EIO;
852 continue;
853 }
854
855 /*
856 * only if non-head page can be selected
857 * for inplace decompression
858 */
859 pagenr = z_erofs_onlinepage_index(page);
860
861 DBG_BUGON(pagenr >= nr_pages);
851 err = -EIO;
852 continue;
853 }
854
855 /*
856 * only if non-head page can be selected
857 * for inplace decompression
858 */
859 pagenr = z_erofs_onlinepage_index(page);
860
861 DBG_BUGON(pagenr >= nr_pages);
862 if (unlikely(pages[pagenr])) {
862 if (pages[pagenr]) {
863 DBG_BUGON(1);
864 SetPageError(pages[pagenr]);
865 z_erofs_onlinepage_endio(pages[pagenr]);
866 err = -EFSCORRUPTED;
867 }
868 pages[pagenr] = page;
869
870 overlapped = true;
871 }
872
873 /* PG_error needs checking for inplaced and staging pages */
863 DBG_BUGON(1);
864 SetPageError(pages[pagenr]);
865 z_erofs_onlinepage_endio(pages[pagenr]);
866 err = -EFSCORRUPTED;
867 }
868 pages[pagenr] = page;
869
870 overlapped = true;
871 }
872
873 /* PG_error needs checking for inplaced and staging pages */
874 if (unlikely(PageError(page))) {
874 if (PageError(page)) {
875 DBG_BUGON(PageUptodate(page));
876 err = -EIO;
877 }
878 }
879
875 DBG_BUGON(PageUptodate(page));
876 err = -EIO;
877 }
878 }
879
880 if (unlikely(err))
880 if (err)
881 goto out;
882
883 llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
884 if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
885 outputsize = llen;
886 partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
887 } else {
888 outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;

--- 32 unchanged lines hidden (view full) ---

921 continue;
922
923 DBG_BUGON(!page->mapping);
924
925 /* recycle all individual staging pages */
926 if (z_erofs_put_stagingpage(pagepool, page))
927 continue;
928
881 goto out;
882
883 llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
884 if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
885 outputsize = llen;
886 partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
887 } else {
888 outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;

--- 32 unchanged lines hidden (view full) ---

921 continue;
922
923 DBG_BUGON(!page->mapping);
924
925 /* recycle all individual staging pages */
926 if (z_erofs_put_stagingpage(pagepool, page))
927 continue;
928
929 if (unlikely(err < 0))
929 if (err < 0)
930 SetPageError(page);
931
932 z_erofs_onlinepage_endio(page);
933 }
934
935 if (pages == z_pagemap_global)
936 mutex_unlock(&z_pagemap_global_lock);
930 SetPageError(page);
931
932 z_erofs_onlinepage_endio(page);
933 }
934
935 if (pages == z_pagemap_global)
936 mutex_unlock(&z_pagemap_global_lock);
937 else if (unlikely(pages != pages_onstack))
937 else if (pages != pages_onstack)
938 kvfree(pages);
939
940 cl->nr_pages = 0;
941 cl->vcnt = 0;
942
943 /* all cl locks MUST be taken before the following line */
944 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
945

--- 261 unchanged lines hidden (view full) ---

1207 struct z_erofs_unzip_io *q[NR_JOBQUEUES];
1208 struct bio *bio;
1209 void *bi_private;
1210 /* since bio will be NULL, no need to initialize last_index */
1211 pgoff_t uninitialized_var(last_index);
1212 bool force_submit = false;
1213 unsigned int nr_bios;
1214
938 kvfree(pages);
939
940 cl->nr_pages = 0;
941 cl->vcnt = 0;
942
943 /* all cl locks MUST be taken before the following line */
944 WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
945

--- 261 unchanged lines hidden (view full) ---

1207 struct z_erofs_unzip_io *q[NR_JOBQUEUES];
1208 struct bio *bio;
1209 void *bi_private;
1210 /* since bio will be NULL, no need to initialize last_index */
1211 pgoff_t uninitialized_var(last_index);
1212 bool force_submit = false;
1213 unsigned int nr_bios;
1214
1215 if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL))
1215 if (owned_head == Z_EROFS_PCLUSTER_TAIL)
1216 return false;
1217
1218 force_submit = false;
1219 bio = NULL;
1220 nr_bios = 0;
1221 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1222
1223 /* by default, all need io submission */

--- 209 unchanged lines hidden ---
1216 return false;
1217
1218 force_submit = false;
1219 bio = NULL;
1220 nr_bios = 0;
1221 bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
1222
1223 /* by default, all need io submission */

--- 209 unchanged lines hidden ---