Lines Matching refs:spool
102 static inline bool subpool_is_free(struct hugepage_subpool *spool) in subpool_is_free() argument
104 if (spool->count) in subpool_is_free()
106 if (spool->max_hpages != -1) in subpool_is_free()
107 return spool->used_hpages == 0; in subpool_is_free()
108 if (spool->min_hpages != -1) in subpool_is_free()
109 return spool->rsv_hpages == spool->min_hpages; in subpool_is_free()
114 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, in unlock_or_release_subpool() argument
117 spin_unlock_irqrestore(&spool->lock, irq_flags); in unlock_or_release_subpool()
122 if (subpool_is_free(spool)) { in unlock_or_release_subpool()
123 if (spool->min_hpages != -1) in unlock_or_release_subpool()
124 hugetlb_acct_memory(spool->hstate, in unlock_or_release_subpool()
125 -spool->min_hpages); in unlock_or_release_subpool()
126 kfree(spool); in unlock_or_release_subpool()
133 struct hugepage_subpool *spool; in hugepage_new_subpool() local
135 spool = kzalloc(sizeof(*spool), GFP_KERNEL); in hugepage_new_subpool()
136 if (!spool) in hugepage_new_subpool()
139 spin_lock_init(&spool->lock); in hugepage_new_subpool()
140 spool->count = 1; in hugepage_new_subpool()
141 spool->max_hpages = max_hpages; in hugepage_new_subpool()
142 spool->hstate = h; in hugepage_new_subpool()
143 spool->min_hpages = min_hpages; in hugepage_new_subpool()
146 kfree(spool); in hugepage_new_subpool()
149 spool->rsv_hpages = min_hpages; in hugepage_new_subpool()
151 return spool; in hugepage_new_subpool()
154 void hugepage_put_subpool(struct hugepage_subpool *spool) in hugepage_put_subpool() argument
158 spin_lock_irqsave(&spool->lock, flags); in hugepage_put_subpool()
159 BUG_ON(!spool->count); in hugepage_put_subpool()
160 spool->count--; in hugepage_put_subpool()
161 unlock_or_release_subpool(spool, flags); in hugepage_put_subpool()
172 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, in hugepage_subpool_get_pages() argument
177 if (!spool) in hugepage_subpool_get_pages()
180 spin_lock_irq(&spool->lock); in hugepage_subpool_get_pages()
182 if (spool->max_hpages != -1) { /* maximum size accounting */ in hugepage_subpool_get_pages()
183 if ((spool->used_hpages + delta) <= spool->max_hpages) in hugepage_subpool_get_pages()
184 spool->used_hpages += delta; in hugepage_subpool_get_pages()
192 if (spool->min_hpages != -1 && spool->rsv_hpages) { in hugepage_subpool_get_pages()
193 if (delta > spool->rsv_hpages) { in hugepage_subpool_get_pages()
198 ret = delta - spool->rsv_hpages; in hugepage_subpool_get_pages()
199 spool->rsv_hpages = 0; in hugepage_subpool_get_pages()
202 spool->rsv_hpages -= delta; in hugepage_subpool_get_pages()
207 spin_unlock_irq(&spool->lock); in hugepage_subpool_get_pages()
217 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, in hugepage_subpool_put_pages() argument
223 if (!spool) in hugepage_subpool_put_pages()
226 spin_lock_irqsave(&spool->lock, flags); in hugepage_subpool_put_pages()
228 if (spool->max_hpages != -1) /* maximum size accounting */ in hugepage_subpool_put_pages()
229 spool->used_hpages -= delta; in hugepage_subpool_put_pages()
232 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { in hugepage_subpool_put_pages()
233 if (spool->rsv_hpages + delta <= spool->min_hpages) in hugepage_subpool_put_pages()
236 ret = spool->rsv_hpages + delta - spool->min_hpages; in hugepage_subpool_put_pages()
238 spool->rsv_hpages += delta; in hugepage_subpool_put_pages()
239 if (spool->rsv_hpages > spool->min_hpages) in hugepage_subpool_put_pages()
240 spool->rsv_hpages = spool->min_hpages; in hugepage_subpool_put_pages()
247 unlock_or_release_subpool(spool, flags); in hugepage_subpool_put_pages()
254 return HUGETLBFS_SB(inode->i_sb)->spool; in subpool_inode()
936 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_fix_reserve_counts() local
940 rsv_adjust = hugepage_subpool_get_pages(spool, 1); in hugetlb_fix_reserve_counts()
1901 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); in free_huge_folio() local
1930 if (hugepage_subpool_put_pages(spool, 1) == 0) in free_huge_folio()
3056 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_hugetlb_folio() local
3083 gbl_chg = hugepage_subpool_get_pages(spool, 1); in alloc_hugetlb_folio()
3148 hugetlb_set_folio_subpool(folio, spool); in alloc_hugetlb_folio()
3163 rsv_adjust = hugepage_subpool_put_pages(spool, 1); in alloc_hugetlb_folio()
3182 hugepage_subpool_put_pages(spool, 1); in alloc_hugetlb_folio()
4875 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close() local
4895 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); in hugetlb_vm_op_close()
6716 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_reserve_pages() local
6787 gbl_reserve = hugepage_subpool_get_pages(spool, chg); in hugetlb_reserve_pages()
6833 rsv_adjust = hugepage_subpool_put_pages(spool, in hugetlb_reserve_pages()
6850 (void)hugepage_subpool_put_pages(spool, chg); in hugetlb_reserve_pages()
6875 struct hugepage_subpool *spool = subpool_inode(inode); in hugetlb_unreserve_pages() local
6904 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); in hugetlb_unreserve_pages()