/openbmc/linux/lib/ |
H A D | xarray.c | 38 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) in xas_lock_type() argument 41 xas_lock_irq(xas); in xas_lock_type() 43 xas_lock_bh(xas); in xas_lock_type() 45 xas_lock(xas); in xas_lock_type() 48 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) in xas_unlock_type() argument 51 xas_unlock_irq(xas); in xas_unlock_type() 53 xas_unlock_bh(xas); in xas_unlock_type() 55 xas_unlock(xas); in xas_unlock_type() 126 static void xas_squash_marks(const struct xa_state *xas) in xas_squash_marks() argument 129 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; in xas_squash_marks() [all …]
|
H A D | test_xarray.c | 74 XA_STATE_ORDER(xas, xa, index, order); in xa_store_order() 78 xas_lock(&xas); in xa_store_order() 79 curr = xas_store(&xas, entry); in xa_store_order() 80 xas_unlock(&xas); in xa_store_order() 81 } while (xas_nomem(&xas, gfp)); in xa_store_order() 104 XA_STATE(xas, xa, 0); in check_xas_retry() 111 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); in check_xas_retry() 113 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); in check_xas_retry() 114 XA_BUG_ON(xa, xas_retry(&xas, NULL)); in check_xas_retry() 115 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); in check_xas_retry() [all …]
|
H A D | idr.c | 383 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); in ida_alloc_range() 395 xas_lock_irqsave(&xas, flags); in ida_alloc_range() 397 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); in ida_alloc_range() 398 if (xas.xa_index > min / IDA_BITMAP_BITS) in ida_alloc_range() 400 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 408 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() 412 xas_store(&xas, xa_mk_value(tmp)); in ida_alloc_range() 422 xas_store(&xas, bitmap); in ida_alloc_range() 423 if (xas_error(&xas)) { in ida_alloc_range() 431 if (xas.xa_index * IDA_BITMAP_BITS + bit > max) in ida_alloc_range() [all …]
|
H A D | iov_iter.c | 83 XA_STATE(xas, i->xarray, index); \ 87 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 if (xas_retry(&xas, folio)) \ 1010 XA_STATE(xas, xa, index); in iter_xarray_populate_pages() 1015 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in iter_xarray_populate_pages() 1016 if (xas_retry(&xas, page)) in iter_xarray_populate_pages() 1020 if (unlikely(page != xas_reload(&xas))) { in iter_xarray_populate_pages() 1021 xas_reset(&xas); in iter_xarray_populate_pages() 1025 pages[ret] = find_subpage(page, xas.xa_index); in iter_xarray_populate_pages() 1617 XA_STATE(xas, i->xarray, index); in iov_iter_extract_xarray_pages() [all …]
|
/openbmc/linux/include/linux/ |
H A D | xarray.h | 1406 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark)) argument 1407 #define xas_trylock(xas) xa_trylock((xas)->xa) argument 1408 #define xas_lock(xas) xa_lock((xas)->xa) argument 1409 #define xas_unlock(xas) xa_unlock((xas)->xa) argument 1410 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa) argument 1411 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa) argument 1412 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa) argument 1413 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa) argument 1414 #define xas_lock_irqsave(xas, flags) \ argument 1415 xa_lock_irqsave((xas)->xa, flags) [all …]
|
/openbmc/linux/fs/ |
H A D | dax.c | 143 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, in dax_entry_waitqueue() argument 147 unsigned long index = xas->xa_index; in dax_entry_waitqueue() 156 key->xa = xas->xa; in dax_entry_waitqueue() 159 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); in dax_entry_waitqueue() 181 static void dax_wake_entry(struct xa_state *xas, void *entry, in dax_wake_entry() argument 187 wq = dax_entry_waitqueue(xas, entry, &key); in dax_wake_entry() 209 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) in get_unlocked_entry() argument 219 entry = xas_find_conflict(xas); in get_unlocked_entry() 227 wq = dax_entry_waitqueue(xas, entry, &ewait.key); in get_unlocked_entry() 230 xas_unlock_irq(xas); in get_unlocked_entry() [all …]
|
/openbmc/linux/tools/testing/radix-tree/ |
H A D | iteration_check.c | 23 XA_STATE(xas, xa, index); in my_item_insert() 28 xas_lock(&xas); in my_item_insert() 30 xas_set_order(&xas, index, order); in my_item_insert() 32 if (xas_find_conflict(&xas)) in my_item_insert() 34 xas_store(&xas, item); in my_item_insert() 35 xas_set_mark(&xas, TAG); in my_item_insert() 38 xas_unlock(&xas); in my_item_insert() 39 if (xas_nomem(&xas, GFP_KERNEL)) in my_item_insert() 69 XA_STATE(xas, &array, 0); in tagged_iteration_fn() 75 xas_set(&xas, 0); in tagged_iteration_fn() [all …]
|
H A D | multiorder.c | 18 XA_STATE_ORDER(xas, xa, index, order); in item_insert_order() 22 xas_lock(&xas); in item_insert_order() 23 xas_store(&xas, item); in item_insert_order() 24 xas_unlock(&xas); in item_insert_order() 25 } while (xas_nomem(&xas, GFP_KERNEL)); in item_insert_order() 27 if (!xas_error(&xas)) in item_insert_order() 31 return xas_error(&xas); in item_insert_order() 36 XA_STATE(xas, xa, 0); in multiorder_iteration() 56 xas_set(&xas, j); in multiorder_iteration() 57 xas_for_each(&xas, item, ULONG_MAX) { in multiorder_iteration() [all …]
|
H A D | test.c | 176 XA_STATE(xas, xa, start); in tag_tagged_items() 183 xas_lock_irq(&xas); in tag_tagged_items() 184 xas_for_each_marked(&xas, item, end, iftag) { in tag_tagged_items() 185 xas_set_mark(&xas, thentag); in tag_tagged_items() 189 xas_pause(&xas); in tag_tagged_items() 190 xas_unlock_irq(&xas); in tag_tagged_items() 192 xas_lock_irq(&xas); in tag_tagged_items() 194 xas_unlock_irq(&xas); in tag_tagged_items() 257 XA_STATE(xas, xa, 0); in item_kill_tree() 260 xas_for_each(&xas, entry, ULONG_MAX) { in item_kill_tree() [all …]
|
H A D | iteration_check_2.c | 15 XA_STATE(xas, arg, 0); in iterator() 21 xas_set(&xas, 0); in iterator() 23 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) in iterator() 26 assert(xas.xa_index >= 100); in iterator()
|
H A D | regression1.c | 82 XA_STATE(xas, &mt_tree, start); in find_get_pages() 87 xas_for_each(&xas, page, ULONG_MAX) { in find_get_pages() 88 if (xas_retry(&xas, page)) in find_get_pages() 99 if (unlikely(page != xas_reload(&xas))) in find_get_pages() 108 xas_reset(&xas); in find_get_pages()
|
/openbmc/linux/fs/cachefiles/ |
H A D | ondemand.c | 26 XA_STATE(xas, NULL, 0); in cachefiles_ondemand_fd_release() 33 xas.xa = &cache->reqs; in cachefiles_ondemand_fd_release() 43 xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { in cachefiles_ondemand_fd_release() 47 xas_store(&xas, NULL); in cachefiles_ondemand_fd_release() 125 XA_STATE(xas, &cache->reqs, id); in cachefiles_ondemand_fd_ioctl() 134 req = xas_load(&xas); in cachefiles_ondemand_fd_ioctl() 140 xas_store(&xas, NULL); in cachefiles_ondemand_fd_ioctl() 170 XA_STATE(xas, &cache->reqs, 0); in cachefiles_ondemand_copen() 195 xas.xa_index = id; in cachefiles_ondemand_copen() 196 req = xas_load(&xas); in cachefiles_ondemand_copen() [all …]
|
/openbmc/linux/mm/ |
H A D | memfd.c | 32 static void memfd_tag_pins(struct xa_state *xas) in memfd_tag_pins() argument 40 xas_lock_irq(xas); in memfd_tag_pins() 41 xas_for_each(xas, page, ULONG_MAX) { in memfd_tag_pins() 49 xas_set_mark(xas, MEMFD_TAG_PINNED); in memfd_tag_pins() 51 xas_set(xas, page->index + cache_count); in memfd_tag_pins() 58 xas_pause(xas); in memfd_tag_pins() 59 xas_unlock_irq(xas); in memfd_tag_pins() 61 xas_lock_irq(xas); in memfd_tag_pins() 63 xas_unlock_irq(xas); in memfd_tag_pins() 77 XA_STATE(xas, &mapping->i_pages, 0); in memfd_wait_for_pins() [all …]
|
H A D | list_lru.c | 376 XA_STATE(xas, &lru->xa, 0); in memcg_destroy_list_lru() 382 xas_lock_irq(&xas); in memcg_destroy_list_lru() 383 xas_for_each(&xas, mlru, ULONG_MAX) { in memcg_destroy_list_lru() 385 xas_store(&xas, NULL); in memcg_destroy_list_lru() 387 xas_unlock_irq(&xas); in memcg_destroy_list_lru() 481 XA_STATE(xas, &lru->xa, 0); in memcg_list_lru_alloc() 510 xas_lock_irqsave(&xas, flags); in memcg_list_lru_alloc() 515 xas_set(&xas, index); in memcg_list_lru_alloc() 517 if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { in memcg_list_lru_alloc() 520 xas_store(&xas, mlru); in memcg_list_lru_alloc() [all …]
|
H A D | filemap.c | 129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 132 mapping_set_update(&xas, mapping); in page_cache_delete() 136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete() 142 xas_store(&xas, shadow); in page_cache_delete() 143 xas_init_marks(&xas); in page_cache_delete() 282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 287 mapping_set_update(&xas, mapping); in page_cache_delete_batch() 288 xas_for_each(&xas, folio, ULONG_MAX) { in page_cache_delete_batch() 314 xas_store(&xas, NULL); in page_cache_delete_batch() 476 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page() [all …]
|
H A D | swap_state.c | 91 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache() 95 xas_set_update(&xas, workingset_update_node); in add_to_swap_cache() 106 xas_lock_irq(&xas); in add_to_swap_cache() 107 xas_create_range(&xas); in add_to_swap_cache() 108 if (xas_error(&xas)) in add_to_swap_cache() 111 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); in add_to_swap_cache() 112 old = xas_load(&xas); in add_to_swap_cache() 117 xas_store(&xas, folio); in add_to_swap_cache() 118 xas_next(&xas); in add_to_swap_cache() 124 xas_unlock_irq(&xas); in add_to_swap_cache() [all …]
|
H A D | khugepaged.c | 1791 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); in collapse_file() 1813 xas_lock_irq(&xas); in collapse_file() 1814 xas_create_range(&xas); in collapse_file() 1815 if (!xas_error(&xas)) in collapse_file() 1817 xas_unlock_irq(&xas); in collapse_file() 1818 if (!xas_nomem(&xas, GFP_KERNEL)) { in collapse_file() 1825 xas_set(&xas, index); in collapse_file() 1826 page = xas_load(&xas); in collapse_file() 1828 VM_BUG_ON(index != xas.xa_index); in collapse_file() 1837 if (!xas_next_entry(&xas, end - 1)) { in collapse_file() [all …]
|
H A D | page-writeback.c | 2366 XA_STATE(xas, &mapping->i_pages, start); in tag_pages_for_writeback() 2370 xas_lock_irq(&xas); in tag_pages_for_writeback() 2371 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) { in tag_pages_for_writeback() 2372 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); in tag_pages_for_writeback() 2376 xas_pause(&xas); in tag_pages_for_writeback() 2377 xas_unlock_irq(&xas); in tag_pages_for_writeback() 2379 xas_lock_irq(&xas); in tag_pages_for_writeback() 2381 xas_unlock_irq(&xas); in tag_pages_for_writeback() 3019 XA_STATE(xas, &mapping->i_pages, folio_index(folio)); in __folio_start_writeback() 3024 xas_lock_irqsave(&xas, flags); in __folio_start_writeback() [all …]
|
/openbmc/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_mr.c | 96 XA_STATE(xas, &mr->page_list, 0); in rxe_mr_fill_pages_from_sgt() 106 xas_lock(&xas); in rxe_mr_fill_pages_from_sgt() 112 xas_set_err(&xas, -EINVAL); in rxe_mr_fill_pages_from_sgt() 116 xas_store(&xas, page); in rxe_mr_fill_pages_from_sgt() 117 if (xas_error(&xas)) in rxe_mr_fill_pages_from_sgt() 119 xas_next(&xas); in rxe_mr_fill_pages_from_sgt() 123 xas_unlock(&xas); in rxe_mr_fill_pages_from_sgt() 124 } while (xas_nomem(&xas, GFP_KERNEL)); in rxe_mr_fill_pages_from_sgt() 126 return xas_error(&xas); in rxe_mr_fill_pages_from_sgt() 161 XA_STATE(xas, &mr->page_list, 0); in rxe_mr_alloc() [all …]
|
/openbmc/linux/drivers/infiniband/core/ |
H A D | ib_core_uverbs.c | 268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); in rdma_user_mmap_entry_insert_range() 294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); in rdma_user_mmap_entry_insert_range() 295 if (xas.xa_node == XAS_RESTART) in rdma_user_mmap_entry_insert_range() 298 xa_first = xas.xa_index; in rdma_user_mmap_entry_insert_range() 308 xas_next_entry(&xas, xa_last - 1); in rdma_user_mmap_entry_insert_range() 309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last) in rdma_user_mmap_entry_insert_range()
|
/openbmc/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 523 XA_STATE(xas, xa, start_index); in batch_from_xarray() 528 entry = xas_next(&xas); in batch_from_xarray() 529 if (xas_retry(&xas, entry)) in batch_from_xarray() 544 XA_STATE(xas, xa, start_index); in batch_from_xarray_clear() 547 xas_lock(&xas); in batch_from_xarray_clear() 549 entry = xas_next(&xas); in batch_from_xarray_clear() 550 if (xas_retry(&xas, entry)) in batch_from_xarray_clear() 555 xas_store(&xas, NULL); in batch_from_xarray_clear() 560 xas_unlock(&xas); in batch_from_xarray_clear() 566 XA_STATE(xas, xa, start_index); in clear_xarray() [all …]
|
/openbmc/linux/arch/x86/kernel/cpu/sgx/ |
H A D | encl.c | 530 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); in sgx_encl_may_map() 545 xas_lock(&xas); in sgx_encl_may_map() 546 xas_for_each(&xas, page, PFN_DOWN(end - 1)) { in sgx_encl_may_map() 554 xas_pause(&xas); in sgx_encl_may_map() 555 xas_unlock(&xas); in sgx_encl_may_map() 561 xas_lock(&xas); in sgx_encl_may_map() 564 xas_unlock(&xas); in sgx_encl_may_map() 708 XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base)); in sgx_encl_release() 710 xas_lock(&xas); in sgx_encl_release() 711 xas_for_each(&xas, entry, max_page_index) { in sgx_encl_release() [all …]
|
/openbmc/linux/fs/smb/client/ |
H A D | file.c | 49 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in cifs_undirty_folios() 54 xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) { in cifs_undirty_folios() 55 if (xas_retry(&xas, folio)) in cifs_undirty_folios() 57 xas_pause(&xas); in cifs_undirty_folios() 77 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in cifs_pages_written_back() 85 xas_for_each(&xas, folio, end) { in cifs_pages_written_back() 86 if (xas_retry(&xas, folio)) in cifs_pages_written_back() 110 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in cifs_pages_write_failed() 118 xas_for_each(&xas, folio, end) { in cifs_pages_write_failed() 119 if (xas_retry(&xas, folio)) in cifs_pages_write_failed() [all …]
|
/openbmc/linux/fs/afs/ |
H A D | write.c | 296 XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); in afs_pages_written_back() 304 xas_for_each(&xas, folio, end) { in afs_pages_written_back() 480 XA_STATE(xas, &mapping->i_pages, index); in afs_extend_writeback() 490 xas_for_each(&xas, folio, ULONG_MAX) { in afs_extend_writeback() 492 if (xas_retry(&xas, folio)) in afs_extend_writeback() 500 xas_reset(&xas); in afs_extend_writeback() 505 if (unlikely(folio != xas_reload(&xas))) { in afs_extend_writeback() 547 xas_pause(&xas); in afs_extend_writeback()
|
/openbmc/linux/fs/netfs/ |
H A D | buffered_read.c | 25 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock_folios() 47 xas_for_each(&xas, folio, last_page) { in netfs_rreq_unlock_folios() 52 if (xas_retry(&xas, folio)) in netfs_rreq_unlock_folios()
|