Lines Matching +full:- +full:grp

1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
4 * Copyright(c) 2015-2018 Intel Corporation.
19 u32 rcventry, struct tid_group *grp,
30 struct tid_group *grp, u16 count,
55 fd->entry_to_rb = kcalloc(uctxt->expected_count, in hfi1_user_exp_rcv_init()
58 if (!fd->entry_to_rb) in hfi1_user_exp_rcv_init()
59 return -ENOMEM; in hfi1_user_exp_rcv_init()
61 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) { in hfi1_user_exp_rcv_init()
62 fd->invalid_tid_idx = 0; in hfi1_user_exp_rcv_init()
63 fd->invalid_tids = kcalloc(uctxt->expected_count, in hfi1_user_exp_rcv_init()
64 sizeof(*fd->invalid_tids), in hfi1_user_exp_rcv_init()
66 if (!fd->invalid_tids) { in hfi1_user_exp_rcv_init()
67 kfree(fd->entry_to_rb); in hfi1_user_exp_rcv_init()
68 fd->entry_to_rb = NULL; in hfi1_user_exp_rcv_init()
69 return -ENOMEM; in hfi1_user_exp_rcv_init()
71 fd->use_mn = true; in hfi1_user_exp_rcv_init()
78 * is enabled. To help with that, we calculate a per-process in hfi1_user_exp_rcv_init()
87 spin_lock(&fd->tid_lock); in hfi1_user_exp_rcv_init()
88 if (uctxt->subctxt_cnt && fd->use_mn) { in hfi1_user_exp_rcv_init()
91 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt; in hfi1_user_exp_rcv_init()
92 remainder = uctxt->expected_count % uctxt->subctxt_cnt; in hfi1_user_exp_rcv_init()
93 if (remainder && fd->subctxt < remainder) in hfi1_user_exp_rcv_init()
94 fd->tid_limit++; in hfi1_user_exp_rcv_init()
96 fd->tid_limit = uctxt->expected_count; in hfi1_user_exp_rcv_init()
98 spin_unlock(&fd->tid_lock); in hfi1_user_exp_rcv_init()
105 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_user_exp_rcv_free()
107 mutex_lock(&uctxt->exp_mutex); in hfi1_user_exp_rcv_free()
108 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) in hfi1_user_exp_rcv_free()
109 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); in hfi1_user_exp_rcv_free()
110 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) in hfi1_user_exp_rcv_free()
111 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); in hfi1_user_exp_rcv_free()
112 mutex_unlock(&uctxt->exp_mutex); in hfi1_user_exp_rcv_free()
114 kfree(fd->invalid_tids); in hfi1_user_exp_rcv_free()
115 fd->invalid_tids = NULL; in hfi1_user_exp_rcv_free()
117 kfree(fd->entry_to_rb); in hfi1_user_exp_rcv_free()
118 fd->entry_to_rb = NULL; in hfi1_user_exp_rcv_free()
140 struct hfi1_devdata *dd = fd->uctxt->dd; in unpin_rcv_pages()
144 dma_unmap_single(&dd->pcidev->dev, node->dma_addr, in unpin_rcv_pages()
145 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages()
146 pages = &node->pages[idx]; in unpin_rcv_pages()
149 pages = &tidbuf->pages[idx]; in unpin_rcv_pages()
150 mm = current->mm; in unpin_rcv_pages()
153 fd->tid_n_pinned -= npages; in unpin_rcv_pages()
162 unsigned int npages = tidbuf->npages; in pin_rcv_pages()
163 unsigned long vaddr = tidbuf->vaddr; in pin_rcv_pages()
165 struct hfi1_devdata *dd = fd->uctxt->dd; in pin_rcv_pages()
167 if (npages > fd->uctxt->expected_count) { in pin_rcv_pages()
169 return -EINVAL; in pin_rcv_pages()
175 return -ENOMEM; in pin_rcv_pages()
182 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) { in pin_rcv_pages()
184 return -ENOMEM; in pin_rcv_pages()
187 pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages); in pin_rcv_pages()
192 tidbuf->pages = pages; in pin_rcv_pages()
193 fd->tid_n_pinned += pinned; in pin_rcv_pages()
202 * 1. List of empty groups - tid_group_list
206 * 2. List of partially used groups - tid_used_list
210 * 3. List of full groups - tid_full_list
221 * * .count - number of pages in this set
222 * * .idx - starting index into struct page ** array
250 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_user_exp_rcv_setup()
251 struct hfi1_devdata *dd = uctxt->dd; in hfi1_user_exp_rcv_setup()
258 if (!PAGE_ALIGNED(tinfo->vaddr)) in hfi1_user_exp_rcv_setup()
259 return -EINVAL; in hfi1_user_exp_rcv_setup()
260 if (tinfo->length == 0) in hfi1_user_exp_rcv_setup()
261 return -EINVAL; in hfi1_user_exp_rcv_setup()
265 return -ENOMEM; in hfi1_user_exp_rcv_setup()
267 mutex_init(&tidbuf->cover_mutex); in hfi1_user_exp_rcv_setup()
268 tidbuf->vaddr = tinfo->vaddr; in hfi1_user_exp_rcv_setup()
269 tidbuf->length = tinfo->length; in hfi1_user_exp_rcv_setup()
270 tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length); in hfi1_user_exp_rcv_setup()
271 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), in hfi1_user_exp_rcv_setup()
273 if (!tidbuf->psets) { in hfi1_user_exp_rcv_setup()
274 ret = -ENOMEM; in hfi1_user_exp_rcv_setup()
278 if (fd->use_mn) { in hfi1_user_exp_rcv_setup()
280 &tidbuf->notifier, current->mm, in hfi1_user_exp_rcv_setup()
281 tidbuf->vaddr, tidbuf->npages * PAGE_SIZE, in hfi1_user_exp_rcv_setup()
285 mmu_seq = mmu_interval_read_begin(&tidbuf->notifier); in hfi1_user_exp_rcv_setup()
290 ret = (pinned < 0) ? pinned : -ENOSPC; in hfi1_user_exp_rcv_setup()
295 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); in hfi1_user_exp_rcv_setup()
298 spin_lock(&fd->tid_lock); in hfi1_user_exp_rcv_setup()
299 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) in hfi1_user_exp_rcv_setup()
300 pageset_count = fd->tid_limit - fd->tid_used; in hfi1_user_exp_rcv_setup()
302 pageset_count = tidbuf->n_psets; in hfi1_user_exp_rcv_setup()
303 fd->tid_used += pageset_count; in hfi1_user_exp_rcv_setup()
304 spin_unlock(&fd->tid_lock); in hfi1_user_exp_rcv_setup()
307 ret = -ENOSPC; in hfi1_user_exp_rcv_setup()
311 ngroups = pageset_count / dd->rcv_entries.group_size; in hfi1_user_exp_rcv_setup()
314 ret = -ENOMEM; in hfi1_user_exp_rcv_setup()
324 mutex_lock(&uctxt->exp_mutex); in hfi1_user_exp_rcv_setup()
329 while (ngroups && uctxt->tid_group_list.count) { in hfi1_user_exp_rcv_setup()
330 struct tid_group *grp = in hfi1_user_exp_rcv_setup() local
331 tid_group_pop(&uctxt->tid_group_list); in hfi1_user_exp_rcv_setup()
333 ret = program_rcvarray(fd, tidbuf, grp, in hfi1_user_exp_rcv_setup()
334 dd->rcv_entries.group_size, in hfi1_user_exp_rcv_setup()
338 * entries for the entire group, reset the grp fields in hfi1_user_exp_rcv_setup()
339 * and add the grp back to the free group list. in hfi1_user_exp_rcv_setup()
342 tid_group_add_tail(grp, &uctxt->tid_group_list); in hfi1_user_exp_rcv_setup()
348 tid_group_add_tail(grp, &uctxt->tid_full_list); in hfi1_user_exp_rcv_setup()
349 ngroups--; in hfi1_user_exp_rcv_setup()
354 struct tid_group *grp, *ptr; in hfi1_user_exp_rcv_setup() local
360 if (!uctxt->tid_used_list.count || need_group) { in hfi1_user_exp_rcv_setup()
361 if (!uctxt->tid_group_list.count) in hfi1_user_exp_rcv_setup()
364 grp = tid_group_pop(&uctxt->tid_group_list); in hfi1_user_exp_rcv_setup()
365 tid_group_add_tail(grp, &uctxt->tid_used_list); in hfi1_user_exp_rcv_setup()
369 * There is an optimization opportunity here - instead of in hfi1_user_exp_rcv_setup()
373 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list, in hfi1_user_exp_rcv_setup()
375 unsigned use = min_t(unsigned, pageset_count - tididx, in hfi1_user_exp_rcv_setup()
376 grp->size - grp->used); in hfi1_user_exp_rcv_setup()
378 ret = program_rcvarray(fd, tidbuf, grp, in hfi1_user_exp_rcv_setup()
387 if (grp->used == grp->size) in hfi1_user_exp_rcv_setup()
388 tid_group_move(grp, in hfi1_user_exp_rcv_setup()
389 &uctxt->tid_used_list, in hfi1_user_exp_rcv_setup()
390 &uctxt->tid_full_list); in hfi1_user_exp_rcv_setup()
408 mutex_unlock(&uctxt->exp_mutex); in hfi1_user_exp_rcv_setup()
415 ret = -ENOSPC; in hfi1_user_exp_rcv_setup()
420 spin_lock(&fd->tid_lock); in hfi1_user_exp_rcv_setup()
421 fd->tid_used -= pageset_count - tididx; in hfi1_user_exp_rcv_setup()
422 spin_unlock(&fd->tid_lock); in hfi1_user_exp_rcv_setup()
425 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages, in hfi1_user_exp_rcv_setup()
428 if (fd->use_mn) { in hfi1_user_exp_rcv_setup()
432 mutex_lock(&tidbuf->cover_mutex); in hfi1_user_exp_rcv_setup()
433 fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq); in hfi1_user_exp_rcv_setup()
434 mutex_unlock(&tidbuf->cover_mutex); in hfi1_user_exp_rcv_setup()
437 ret = -EBUSY; in hfi1_user_exp_rcv_setup()
442 tinfo->tidcnt = tididx; in hfi1_user_exp_rcv_setup()
443 tinfo->length = mapped_pages * PAGE_SIZE; in hfi1_user_exp_rcv_setup()
445 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), in hfi1_user_exp_rcv_setup()
447 ret = -EFAULT; in hfi1_user_exp_rcv_setup()
451 if (fd->use_mn) in hfi1_user_exp_rcv_setup()
452 mmu_interval_notifier_remove(&tidbuf->notifier); in hfi1_user_exp_rcv_setup()
453 kfree(tidbuf->pages); in hfi1_user_exp_rcv_setup()
454 kfree(tidbuf->psets); in hfi1_user_exp_rcv_setup()
461 tinfo->tidlist = (unsigned long)tidlist; in hfi1_user_exp_rcv_setup()
463 tinfo->tidlist = 0; in hfi1_user_exp_rcv_setup()
467 spin_lock(&fd->tid_lock); in hfi1_user_exp_rcv_setup()
468 fd->tid_used -= pageset_count; in hfi1_user_exp_rcv_setup()
469 spin_unlock(&fd->tid_lock); in hfi1_user_exp_rcv_setup()
471 if (fd->use_mn) in hfi1_user_exp_rcv_setup()
472 mmu_interval_notifier_remove(&tidbuf->notifier); in hfi1_user_exp_rcv_setup()
476 kfree(tidbuf->pages); in hfi1_user_exp_rcv_setup()
477 kfree(tidbuf->psets); in hfi1_user_exp_rcv_setup()
487 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_user_exp_rcv_clear()
491 if (unlikely(tinfo->tidcnt > fd->tid_used)) in hfi1_user_exp_rcv_clear()
492 return -EINVAL; in hfi1_user_exp_rcv_clear()
494 tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist), in hfi1_user_exp_rcv_clear()
495 sizeof(tidinfo[0]) * tinfo->tidcnt); in hfi1_user_exp_rcv_clear()
499 mutex_lock(&uctxt->exp_mutex); in hfi1_user_exp_rcv_clear()
500 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { in hfi1_user_exp_rcv_clear()
508 spin_lock(&fd->tid_lock); in hfi1_user_exp_rcv_clear()
509 fd->tid_used -= tididx; in hfi1_user_exp_rcv_clear()
510 spin_unlock(&fd->tid_lock); in hfi1_user_exp_rcv_clear()
511 tinfo->tidcnt = tididx; in hfi1_user_exp_rcv_clear()
512 mutex_unlock(&uctxt->exp_mutex); in hfi1_user_exp_rcv_clear()
521 struct hfi1_ctxtdata *uctxt = fd->uctxt; in hfi1_user_exp_rcv_invalid()
522 unsigned long *ev = uctxt->dd->events + in hfi1_user_exp_rcv_invalid()
523 (uctxt_offset(uctxt) + fd->subctxt); in hfi1_user_exp_rcv_invalid()
533 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL); in hfi1_user_exp_rcv_invalid()
535 return -EFAULT; in hfi1_user_exp_rcv_invalid()
537 spin_lock(&fd->invalid_lock); in hfi1_user_exp_rcv_invalid()
538 if (fd->invalid_tid_idx) { in hfi1_user_exp_rcv_invalid()
539 memcpy(array, fd->invalid_tids, sizeof(*array) * in hfi1_user_exp_rcv_invalid()
540 fd->invalid_tid_idx); in hfi1_user_exp_rcv_invalid()
541 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) * in hfi1_user_exp_rcv_invalid()
542 fd->invalid_tid_idx); in hfi1_user_exp_rcv_invalid()
543 tinfo->tidcnt = fd->invalid_tid_idx; in hfi1_user_exp_rcv_invalid()
544 fd->invalid_tid_idx = 0; in hfi1_user_exp_rcv_invalid()
551 tinfo->tidcnt = 0; in hfi1_user_exp_rcv_invalid()
553 spin_unlock(&fd->invalid_lock); in hfi1_user_exp_rcv_invalid()
555 if (tinfo->tidcnt) { in hfi1_user_exp_rcv_invalid()
556 if (copy_to_user((void __user *)tinfo->tidlist, in hfi1_user_exp_rcv_invalid()
557 array, sizeof(*array) * tinfo->tidcnt)) in hfi1_user_exp_rcv_invalid()
558 ret = -EFAULT; in hfi1_user_exp_rcv_invalid()
569 struct page **pages = tidbuf->pages; in find_phys_blocks()
570 struct tid_pageset *list = tidbuf->psets; in find_phys_blocks()
616 pagecount -= maxpages; in find_phys_blocks()
631 * program_rcvarray() - program an RcvArray group with receive buffers
637 * @grp: RcvArray group
646 * group 'grp'. To make best use of write-combining writes, the function will
652 * -EINVAL if the requested count is larger than the size of the group,
653 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
657 struct tid_group *grp, u16 count, in program_rcvarray() argument
661 struct hfi1_ctxtdata *uctxt = fd->uctxt; in program_rcvarray()
662 struct hfi1_devdata *dd = uctxt->dd; in program_rcvarray()
669 if (count > grp->size) in program_rcvarray()
670 return -EINVAL; in program_rcvarray()
673 for (idx = 0; idx < grp->size; idx++) { in program_rcvarray()
674 if (!(grp->map & (1 << idx))) { in program_rcvarray()
678 rcv_array_wc_fill(dd, grp->base + idx); in program_rcvarray()
690 if (useidx >= grp->size) { in program_rcvarray()
692 } else if (grp->map & (1 << useidx)) { in program_rcvarray()
693 rcv_array_wc_fill(dd, grp->base + useidx); in program_rcvarray()
698 rcventry = grp->base + useidx; in program_rcvarray()
699 npages = tbuf->psets[setidx].count; in program_rcvarray()
700 pageidx = tbuf->psets[setidx].idx; in program_rcvarray()
703 rcventry, grp, pageidx, in program_rcvarray()
709 tidinfo = create_tid(rcventry - uctxt->expected_base, npages); in program_rcvarray()
711 grp->used++; in program_rcvarray()
712 grp->map |= 1 << useidx++; in program_rcvarray()
717 for (; useidx < grp->size; useidx++) in program_rcvarray()
718 rcv_array_wc_fill(dd, grp->base + useidx); in program_rcvarray()
725 u32 rcventry, struct tid_group *grp, in set_rcvarray_entry() argument
729 struct hfi1_ctxtdata *uctxt = fd->uctxt; in set_rcvarray_entry()
731 struct hfi1_devdata *dd = uctxt->dd; in set_rcvarray_entry()
733 struct page **pages = tbuf->pages + pageidx; in set_rcvarray_entry()
741 return -ENOMEM; in set_rcvarray_entry()
743 phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])), in set_rcvarray_entry()
745 if (dma_mapping_error(&dd->pcidev->dev, phys)) { in set_rcvarray_entry()
749 return -EFAULT; in set_rcvarray_entry()
752 node->fdata = fd; in set_rcvarray_entry()
753 mutex_init(&node->invalidate_mutex); in set_rcvarray_entry()
754 node->phys = page_to_phys(pages[0]); in set_rcvarray_entry()
755 node->npages = npages; in set_rcvarray_entry()
756 node->rcventry = rcventry; in set_rcvarray_entry()
757 node->dma_addr = phys; in set_rcvarray_entry()
758 node->grp = grp; in set_rcvarray_entry()
759 node->freed = false; in set_rcvarray_entry()
760 memcpy(node->pages, pages, flex_array_size(node, pages, npages)); in set_rcvarray_entry()
762 if (fd->use_mn) { in set_rcvarray_entry()
764 &node->notifier, current->mm, in set_rcvarray_entry()
765 tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE, in set_rcvarray_entry()
770 fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node; in set_rcvarray_entry()
773 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages, in set_rcvarray_entry()
774 node->notifier.interval_tree.start, node->phys, in set_rcvarray_entry()
780 node->rcventry, node->notifier.interval_tree.start, in set_rcvarray_entry()
781 node->phys, ret); in set_rcvarray_entry()
782 dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE, in set_rcvarray_entry()
785 return -EFAULT; in set_rcvarray_entry()
790 struct hfi1_ctxtdata *uctxt = fd->uctxt; in unprogram_rcvarray()
791 struct hfi1_devdata *dd = uctxt->dd; in unprogram_rcvarray()
797 return -EINVAL; in unprogram_rcvarray()
799 rcventry = tididx + (tidctrl - 1); in unprogram_rcvarray()
801 if (rcventry >= uctxt->expected_count) { in unprogram_rcvarray()
803 rcventry, uctxt->ctxt); in unprogram_rcvarray()
804 return -EINVAL; in unprogram_rcvarray()
807 node = fd->entry_to_rb[rcventry]; in unprogram_rcvarray()
808 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) in unprogram_rcvarray()
809 return -EBADF; in unprogram_rcvarray()
811 if (fd->use_mn) in unprogram_rcvarray()
812 mmu_interval_notifier_remove(&node->notifier); in unprogram_rcvarray()
820 struct hfi1_ctxtdata *uctxt = fd->uctxt; in __clear_tid_node()
821 struct hfi1_devdata *dd = uctxt->dd; in __clear_tid_node()
823 mutex_lock(&node->invalidate_mutex); in __clear_tid_node()
824 if (node->freed) in __clear_tid_node()
826 node->freed = true; in __clear_tid_node()
828 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, in __clear_tid_node()
829 node->npages, in __clear_tid_node()
830 node->notifier.interval_tree.start, node->phys, in __clear_tid_node()
831 node->dma_addr); in __clear_tid_node()
834 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); in __clear_tid_node()
836 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); in __clear_tid_node()
838 mutex_unlock(&node->invalidate_mutex); in __clear_tid_node()
843 struct hfi1_ctxtdata *uctxt = fd->uctxt; in clear_tid_node()
847 node->grp->used--; in clear_tid_node()
848 node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); in clear_tid_node()
850 if (node->grp->used == node->grp->size - 1) in clear_tid_node()
851 tid_group_move(node->grp, &uctxt->tid_full_list, in clear_tid_node()
852 &uctxt->tid_used_list); in clear_tid_node()
853 else if (!node->grp->used) in clear_tid_node()
854 tid_group_move(node->grp, &uctxt->tid_used_list, in clear_tid_node()
855 &uctxt->tid_group_list); in clear_tid_node()
861 * clearing nodes in the non-cached case.
867 struct tid_group *grp, *ptr; in unlock_exp_tids() local
870 list_for_each_entry_safe(grp, ptr, &set->list, list) { in unlock_exp_tids()
871 list_del_init(&grp->list); in unlock_exp_tids()
873 for (i = 0; i < grp->size; i++) { in unlock_exp_tids()
874 if (grp->map & (1 << i)) { in unlock_exp_tids()
875 u16 rcventry = grp->base + i; in unlock_exp_tids()
878 node = fd->entry_to_rb[rcventry - in unlock_exp_tids()
879 uctxt->expected_base]; in unlock_exp_tids()
880 if (!node || node->rcventry != rcventry) in unlock_exp_tids()
883 if (fd->use_mn) in unlock_exp_tids()
885 &node->notifier); in unlock_exp_tids()
898 struct hfi1_filedata *fdata = node->fdata; in tid_rb_invalidate()
899 struct hfi1_ctxtdata *uctxt = fdata->uctxt; in tid_rb_invalidate()
901 if (node->freed) in tid_rb_invalidate()
905 if (range->event != MMU_NOTIFY_UNMAP) in tid_rb_invalidate()
908 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, in tid_rb_invalidate()
909 node->notifier.interval_tree.start, in tid_rb_invalidate()
910 node->rcventry, node->npages, node->dma_addr); in tid_rb_invalidate()
915 spin_lock(&fdata->invalid_lock); in tid_rb_invalidate()
916 if (fdata->invalid_tid_idx < uctxt->expected_count) { in tid_rb_invalidate()
917 fdata->invalid_tids[fdata->invalid_tid_idx] = in tid_rb_invalidate()
918 create_tid(node->rcventry - uctxt->expected_base, in tid_rb_invalidate()
919 node->npages); in tid_rb_invalidate()
920 if (!fdata->invalid_tid_idx) { in tid_rb_invalidate()
928 * handled on a per-process basis, we can in tid_rb_invalidate()
932 ev = uctxt->dd->events + in tid_rb_invalidate()
933 (uctxt_offset(uctxt) + fdata->subctxt); in tid_rb_invalidate()
936 fdata->invalid_tid_idx++; in tid_rb_invalidate()
938 spin_unlock(&fdata->invalid_lock); in tid_rb_invalidate()
950 if (range->event == MMU_NOTIFY_UNMAP) { in tid_cover_invalidate()
951 mutex_lock(&tidbuf->cover_mutex); in tid_cover_invalidate()
953 mutex_unlock(&tidbuf->cover_mutex); in tid_cover_invalidate()
962 u32 base = fdata->uctxt->expected_base; in cacheless_tid_rb_remove()
964 fdata->entry_to_rb[tnode->rcventry - base] = NULL; in cacheless_tid_rb_remove()