12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
247d99948SChristophe Leroy /*
347d99948SChristophe Leroy * IOMMU helpers in MMU context.
447d99948SChristophe Leroy *
547d99948SChristophe Leroy * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
647d99948SChristophe Leroy */
747d99948SChristophe Leroy
847d99948SChristophe Leroy #include <linux/sched/signal.h>
947d99948SChristophe Leroy #include <linux/slab.h>
1047d99948SChristophe Leroy #include <linux/rculist.h>
1147d99948SChristophe Leroy #include <linux/vmalloc.h>
1247d99948SChristophe Leroy #include <linux/mutex.h>
1347d99948SChristophe Leroy #include <linux/migrate.h>
1447d99948SChristophe Leroy #include <linux/hugetlb.h>
1547d99948SChristophe Leroy #include <linux/swap.h>
1647d99948SChristophe Leroy #include <linux/sizes.h>
1779eb597cSDaniel Jordan #include <linux/mm.h>
1847d99948SChristophe Leroy #include <asm/mmu_context.h>
1947d99948SChristophe Leroy #include <asm/pte-walk.h>
2047d99948SChristophe Leroy #include <linux/mm_inline.h>
2147d99948SChristophe Leroy
2247d99948SChristophe Leroy static DEFINE_MUTEX(mem_list_mutex);
2347d99948SChristophe Leroy
2447d99948SChristophe Leroy #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
2547d99948SChristophe Leroy #define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
2647d99948SChristophe Leroy
2747d99948SChristophe Leroy struct mm_iommu_table_group_mem_t {
2847d99948SChristophe Leroy struct list_head next;
2947d99948SChristophe Leroy struct rcu_head rcu;
3047d99948SChristophe Leroy unsigned long used;
3147d99948SChristophe Leroy atomic64_t mapped;
3247d99948SChristophe Leroy unsigned int pageshift;
3347d99948SChristophe Leroy u64 ua; /* userspace address */
3447d99948SChristophe Leroy u64 entries; /* number of entries in hpas/hpages[] */
3547d99948SChristophe Leroy /*
3647d99948SChristophe Leroy * in mm_iommu_get we temporarily use this to store
3747d99948SChristophe Leroy * struct page address.
3847d99948SChristophe Leroy *
3947d99948SChristophe Leroy * We need to convert ua to hpa in real mode. Make it
4047d99948SChristophe Leroy * simpler by storing physical address.
4147d99948SChristophe Leroy */
4247d99948SChristophe Leroy union {
4347d99948SChristophe Leroy struct page **hpages; /* vmalloc'ed */
4447d99948SChristophe Leroy phys_addr_t *hpas;
4547d99948SChristophe Leroy };
4647d99948SChristophe Leroy #define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
4747d99948SChristophe Leroy u64 dev_hpa; /* Device memory base address */
4847d99948SChristophe Leroy };
4947d99948SChristophe Leroy
mm_iommu_preregistered(struct mm_struct * mm)5047d99948SChristophe Leroy bool mm_iommu_preregistered(struct mm_struct *mm)
5147d99948SChristophe Leroy {
5247d99948SChristophe Leroy return !list_empty(&mm->context.iommu_group_mem_list);
5347d99948SChristophe Leroy }
5447d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
5547d99948SChristophe Leroy
mm_iommu_do_alloc(struct mm_struct * mm,unsigned long ua,unsigned long entries,unsigned long dev_hpa,struct mm_iommu_table_group_mem_t ** pmem)5647d99948SChristophe Leroy static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
5747d99948SChristophe Leroy unsigned long entries, unsigned long dev_hpa,
5847d99948SChristophe Leroy struct mm_iommu_table_group_mem_t **pmem)
5947d99948SChristophe Leroy {
60b970afcfSLinus Torvalds struct mm_iommu_table_group_mem_t *mem, *mem2;
61b970afcfSLinus Torvalds long i, ret, locked_entries = 0, pinned = 0;
6247d99948SChristophe Leroy unsigned int pageshift;
63b970afcfSLinus Torvalds unsigned long entry, chunk;
6447d99948SChristophe Leroy
6547d99948SChristophe Leroy if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
6679eb597cSDaniel Jordan ret = account_locked_vm(mm, entries, true);
6747d99948SChristophe Leroy if (ret)
68b970afcfSLinus Torvalds return ret;
6947d99948SChristophe Leroy
7047d99948SChristophe Leroy locked_entries = entries;
7147d99948SChristophe Leroy }
7247d99948SChristophe Leroy
7347d99948SChristophe Leroy mem = kzalloc(sizeof(*mem), GFP_KERNEL);
7447d99948SChristophe Leroy if (!mem) {
7547d99948SChristophe Leroy ret = -ENOMEM;
7647d99948SChristophe Leroy goto unlock_exit;
7747d99948SChristophe Leroy }
7847d99948SChristophe Leroy
7947d99948SChristophe Leroy if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
8047d99948SChristophe Leroy mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
8147d99948SChristophe Leroy mem->dev_hpa = dev_hpa;
8247d99948SChristophe Leroy goto good_exit;
8347d99948SChristophe Leroy }
8447d99948SChristophe Leroy mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
8547d99948SChristophe Leroy
8647d99948SChristophe Leroy /*
8747d99948SChristophe Leroy * For a starting point for a maximum page size calculation
8847d99948SChristophe Leroy * we use @ua and @entries natural alignment to allow IOMMU pages
8947d99948SChristophe Leroy * smaller than huge pages but still bigger than PAGE_SIZE.
9047d99948SChristophe Leroy */
9147d99948SChristophe Leroy mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
9247d99948SChristophe Leroy mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
9347d99948SChristophe Leroy if (!mem->hpas) {
9447d99948SChristophe Leroy kfree(mem);
9547d99948SChristophe Leroy ret = -ENOMEM;
9647d99948SChristophe Leroy goto unlock_exit;
9747d99948SChristophe Leroy }
9847d99948SChristophe Leroy
99d8ed45c5SMichel Lespinasse mmap_read_lock(mm);
100*23baf831SKirill A. Shutemov chunk = (1UL << (PAGE_SHIFT + MAX_ORDER)) /
101b970afcfSLinus Torvalds sizeof(struct vm_area_struct *);
102b970afcfSLinus Torvalds chunk = min(chunk, entries);
103b970afcfSLinus Torvalds for (entry = 0; entry < entries; entry += chunk) {
104b970afcfSLinus Torvalds unsigned long n = min(entries - entry, chunk);
10547d99948SChristophe Leroy
106aa4b87feSJohn Hubbard ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n,
107932f4a63SIra Weiny FOLL_WRITE | FOLL_LONGTERM,
108932f4a63SIra Weiny mem->hpages + entry);
109b970afcfSLinus Torvalds if (ret == n) {
110b970afcfSLinus Torvalds pinned += n;
111b970afcfSLinus Torvalds continue;
112b970afcfSLinus Torvalds }
113b970afcfSLinus Torvalds if (ret > 0)
114b970afcfSLinus Torvalds pinned += ret;
115b970afcfSLinus Torvalds break;
116b970afcfSLinus Torvalds }
117d8ed45c5SMichel Lespinasse mmap_read_unlock(mm);
118b970afcfSLinus Torvalds if (pinned != entries) {
119b970afcfSLinus Torvalds if (!ret)
12047d99948SChristophe Leroy ret = -EFAULT;
121b970afcfSLinus Torvalds goto free_exit;
12247d99948SChristophe Leroy }
12347d99948SChristophe Leroy
12447d99948SChristophe Leroy good_exit:
12547d99948SChristophe Leroy atomic64_set(&mem->mapped, 1);
12647d99948SChristophe Leroy mem->used = 1;
12747d99948SChristophe Leroy mem->ua = ua;
12847d99948SChristophe Leroy mem->entries = entries;
129b970afcfSLinus Torvalds
130b970afcfSLinus Torvalds mutex_lock(&mem_list_mutex);
131b970afcfSLinus Torvalds
132b5952f81SQian Cai list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next,
133b5952f81SQian Cai lockdep_is_held(&mem_list_mutex)) {
134b970afcfSLinus Torvalds /* Overlap? */
135b970afcfSLinus Torvalds if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
136b970afcfSLinus Torvalds (ua < (mem2->ua +
137b970afcfSLinus Torvalds (mem2->entries << PAGE_SHIFT)))) {
138b970afcfSLinus Torvalds ret = -EINVAL;
139b970afcfSLinus Torvalds mutex_unlock(&mem_list_mutex);
140b970afcfSLinus Torvalds goto free_exit;
141b970afcfSLinus Torvalds }
142b970afcfSLinus Torvalds }
14347d99948SChristophe Leroy
144c4b78169SAlexey Kardashevskiy if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
145c4b78169SAlexey Kardashevskiy /*
146c4b78169SAlexey Kardashevskiy * Allow to use larger than 64k IOMMU pages. Only do that
147c4b78169SAlexey Kardashevskiy * if we are backed by hugetlb. Skip device memory as it is not
148c4b78169SAlexey Kardashevskiy * backed with page structs.
149c4b78169SAlexey Kardashevskiy */
150c4b78169SAlexey Kardashevskiy pageshift = PAGE_SHIFT;
151c4b78169SAlexey Kardashevskiy for (i = 0; i < entries; ++i) {
152c4b78169SAlexey Kardashevskiy struct page *page = mem->hpages[i];
153c4b78169SAlexey Kardashevskiy
154c4b78169SAlexey Kardashevskiy if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
155c4b78169SAlexey Kardashevskiy pageshift = page_shift(compound_head(page));
156c4b78169SAlexey Kardashevskiy mem->pageshift = min(mem->pageshift, pageshift);
157c4b78169SAlexey Kardashevskiy /*
158c4b78169SAlexey Kardashevskiy * We don't need struct page reference any more, switch
159c4b78169SAlexey Kardashevskiy * to physical address.
160c4b78169SAlexey Kardashevskiy */
161c4b78169SAlexey Kardashevskiy mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
162c4b78169SAlexey Kardashevskiy }
163c4b78169SAlexey Kardashevskiy }
164c4b78169SAlexey Kardashevskiy
16547d99948SChristophe Leroy list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
16647d99948SChristophe Leroy
16747d99948SChristophe Leroy mutex_unlock(&mem_list_mutex);
16847d99948SChristophe Leroy
169b970afcfSLinus Torvalds *pmem = mem;
170b970afcfSLinus Torvalds
171b970afcfSLinus Torvalds return 0;
172b970afcfSLinus Torvalds
173b970afcfSLinus Torvalds free_exit:
174aa4b87feSJohn Hubbard /* free the references taken */
175f1f6a7ddSJohn Hubbard unpin_user_pages(mem->hpages, pinned);
176b970afcfSLinus Torvalds
177b970afcfSLinus Torvalds vfree(mem->hpas);
178b970afcfSLinus Torvalds kfree(mem);
179b970afcfSLinus Torvalds
180b970afcfSLinus Torvalds unlock_exit:
18179eb597cSDaniel Jordan account_locked_vm(mm, locked_entries, false);
182b970afcfSLinus Torvalds
18347d99948SChristophe Leroy return ret;
18447d99948SChristophe Leroy }
18547d99948SChristophe Leroy
mm_iommu_new(struct mm_struct * mm,unsigned long ua,unsigned long entries,struct mm_iommu_table_group_mem_t ** pmem)18647d99948SChristophe Leroy long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
18747d99948SChristophe Leroy struct mm_iommu_table_group_mem_t **pmem)
18847d99948SChristophe Leroy {
18947d99948SChristophe Leroy return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
19047d99948SChristophe Leroy pmem);
19147d99948SChristophe Leroy }
19247d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_new);
19347d99948SChristophe Leroy
mm_iommu_newdev(struct mm_struct * mm,unsigned long ua,unsigned long entries,unsigned long dev_hpa,struct mm_iommu_table_group_mem_t ** pmem)19447d99948SChristophe Leroy long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
19547d99948SChristophe Leroy unsigned long entries, unsigned long dev_hpa,
19647d99948SChristophe Leroy struct mm_iommu_table_group_mem_t **pmem)
19747d99948SChristophe Leroy {
19847d99948SChristophe Leroy return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
19947d99948SChristophe Leroy }
20047d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_newdev);
20147d99948SChristophe Leroy
mm_iommu_unpin(struct mm_iommu_table_group_mem_t * mem)20247d99948SChristophe Leroy static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
20347d99948SChristophe Leroy {
20447d99948SChristophe Leroy long i;
20547d99948SChristophe Leroy struct page *page = NULL;
20647d99948SChristophe Leroy
20747d99948SChristophe Leroy if (!mem->hpas)
20847d99948SChristophe Leroy return;
20947d99948SChristophe Leroy
21047d99948SChristophe Leroy for (i = 0; i < mem->entries; ++i) {
21147d99948SChristophe Leroy if (!mem->hpas[i])
21247d99948SChristophe Leroy continue;
21347d99948SChristophe Leroy
21447d99948SChristophe Leroy page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
21547d99948SChristophe Leroy if (!page)
21647d99948SChristophe Leroy continue;
21747d99948SChristophe Leroy
21847d99948SChristophe Leroy if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
21947d99948SChristophe Leroy SetPageDirty(page);
22047d99948SChristophe Leroy
221f1f6a7ddSJohn Hubbard unpin_user_page(page);
222aa4b87feSJohn Hubbard
22347d99948SChristophe Leroy mem->hpas[i] = 0;
22447d99948SChristophe Leroy }
22547d99948SChristophe Leroy }
22647d99948SChristophe Leroy
mm_iommu_do_free(struct mm_iommu_table_group_mem_t * mem)22747d99948SChristophe Leroy static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
22847d99948SChristophe Leroy {
22947d99948SChristophe Leroy
23047d99948SChristophe Leroy mm_iommu_unpin(mem);
23147d99948SChristophe Leroy vfree(mem->hpas);
23247d99948SChristophe Leroy kfree(mem);
23347d99948SChristophe Leroy }
23447d99948SChristophe Leroy
mm_iommu_free(struct rcu_head * head)23547d99948SChristophe Leroy static void mm_iommu_free(struct rcu_head *head)
23647d99948SChristophe Leroy {
23747d99948SChristophe Leroy struct mm_iommu_table_group_mem_t *mem = container_of(head,
23847d99948SChristophe Leroy struct mm_iommu_table_group_mem_t, rcu);
23947d99948SChristophe Leroy
24047d99948SChristophe Leroy mm_iommu_do_free(mem);
24147d99948SChristophe Leroy }
24247d99948SChristophe Leroy
mm_iommu_release(struct mm_iommu_table_group_mem_t * mem)24347d99948SChristophe Leroy static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
24447d99948SChristophe Leroy {
24547d99948SChristophe Leroy list_del_rcu(&mem->next);
24647d99948SChristophe Leroy call_rcu(&mem->rcu, mm_iommu_free);
24747d99948SChristophe Leroy }
24847d99948SChristophe Leroy
mm_iommu_put(struct mm_struct * mm,struct mm_iommu_table_group_mem_t * mem)24947d99948SChristophe Leroy long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
25047d99948SChristophe Leroy {
25147d99948SChristophe Leroy long ret = 0;
252b970afcfSLinus Torvalds unsigned long unlock_entries = 0;
25347d99948SChristophe Leroy
25447d99948SChristophe Leroy mutex_lock(&mem_list_mutex);
25547d99948SChristophe Leroy
25647d99948SChristophe Leroy if (mem->used == 0) {
25747d99948SChristophe Leroy ret = -ENOENT;
25847d99948SChristophe Leroy goto unlock_exit;
25947d99948SChristophe Leroy }
26047d99948SChristophe Leroy
26147d99948SChristophe Leroy --mem->used;
26247d99948SChristophe Leroy /* There are still users, exit */
26347d99948SChristophe Leroy if (mem->used)
26447d99948SChristophe Leroy goto unlock_exit;
26547d99948SChristophe Leroy
26647d99948SChristophe Leroy /* Are there still mappings? */
267c33cd1edSNicholas Piggin if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) {
26847d99948SChristophe Leroy ++mem->used;
26947d99948SChristophe Leroy ret = -EBUSY;
27047d99948SChristophe Leroy goto unlock_exit;
27147d99948SChristophe Leroy }
27247d99948SChristophe Leroy
273b970afcfSLinus Torvalds if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
274b970afcfSLinus Torvalds unlock_entries = mem->entries;
27547d99948SChristophe Leroy
276b970afcfSLinus Torvalds /* @mapped became 0 so now mappings are disabled, release the region */
277b970afcfSLinus Torvalds mm_iommu_release(mem);
27847d99948SChristophe Leroy
27947d99948SChristophe Leroy unlock_exit:
28047d99948SChristophe Leroy mutex_unlock(&mem_list_mutex);
28147d99948SChristophe Leroy
28279eb597cSDaniel Jordan account_locked_vm(mm, unlock_entries, false);
283b970afcfSLinus Torvalds
28447d99948SChristophe Leroy return ret;
28547d99948SChristophe Leroy }
28647d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_put);
28747d99948SChristophe Leroy
mm_iommu_lookup(struct mm_struct * mm,unsigned long ua,unsigned long size)28847d99948SChristophe Leroy struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
28947d99948SChristophe Leroy unsigned long ua, unsigned long size)
29047d99948SChristophe Leroy {
29147d99948SChristophe Leroy struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
29247d99948SChristophe Leroy
293b5952f81SQian Cai rcu_read_lock();
29447d99948SChristophe Leroy list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
29547d99948SChristophe Leroy if ((mem->ua <= ua) &&
29647d99948SChristophe Leroy (ua + size <= mem->ua +
29747d99948SChristophe Leroy (mem->entries << PAGE_SHIFT))) {
29847d99948SChristophe Leroy ret = mem;
29947d99948SChristophe Leroy break;
30047d99948SChristophe Leroy }
30147d99948SChristophe Leroy }
302b5952f81SQian Cai rcu_read_unlock();
30347d99948SChristophe Leroy
30447d99948SChristophe Leroy return ret;
30547d99948SChristophe Leroy }
30647d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_lookup);
30747d99948SChristophe Leroy
mm_iommu_get(struct mm_struct * mm,unsigned long ua,unsigned long entries)30847d99948SChristophe Leroy struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
30947d99948SChristophe Leroy unsigned long ua, unsigned long entries)
31047d99948SChristophe Leroy {
31147d99948SChristophe Leroy struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
31247d99948SChristophe Leroy
31347d99948SChristophe Leroy mutex_lock(&mem_list_mutex);
31447d99948SChristophe Leroy
315b5952f81SQian Cai list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next,
316b5952f81SQian Cai lockdep_is_held(&mem_list_mutex)) {
31747d99948SChristophe Leroy if ((mem->ua == ua) && (mem->entries == entries)) {
31847d99948SChristophe Leroy ret = mem;
31947d99948SChristophe Leroy ++mem->used;
32047d99948SChristophe Leroy break;
32147d99948SChristophe Leroy }
32247d99948SChristophe Leroy }
32347d99948SChristophe Leroy
32447d99948SChristophe Leroy mutex_unlock(&mem_list_mutex);
32547d99948SChristophe Leroy
32647d99948SChristophe Leroy return ret;
32747d99948SChristophe Leroy }
32847d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_get);
32947d99948SChristophe Leroy
mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t * mem,unsigned long ua,unsigned int pageshift,unsigned long * hpa)33047d99948SChristophe Leroy long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
33147d99948SChristophe Leroy unsigned long ua, unsigned int pageshift, unsigned long *hpa)
33247d99948SChristophe Leroy {
33347d99948SChristophe Leroy const long entry = (ua - mem->ua) >> PAGE_SHIFT;
33447d99948SChristophe Leroy u64 *va;
33547d99948SChristophe Leroy
33647d99948SChristophe Leroy if (entry >= mem->entries)
33747d99948SChristophe Leroy return -EFAULT;
33847d99948SChristophe Leroy
33947d99948SChristophe Leroy if (pageshift > mem->pageshift)
34047d99948SChristophe Leroy return -EFAULT;
34147d99948SChristophe Leroy
34247d99948SChristophe Leroy if (!mem->hpas) {
34347d99948SChristophe Leroy *hpa = mem->dev_hpa + (ua - mem->ua);
34447d99948SChristophe Leroy return 0;
34547d99948SChristophe Leroy }
34647d99948SChristophe Leroy
34747d99948SChristophe Leroy va = &mem->hpas[entry];
34847d99948SChristophe Leroy *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
34947d99948SChristophe Leroy
35047d99948SChristophe Leroy return 0;
35147d99948SChristophe Leroy }
35247d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
35347d99948SChristophe Leroy
mm_iommu_is_devmem(struct mm_struct * mm,unsigned long hpa,unsigned int pageshift,unsigned long * size)35447d99948SChristophe Leroy bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
35547d99948SChristophe Leroy unsigned int pageshift, unsigned long *size)
35647d99948SChristophe Leroy {
35747d99948SChristophe Leroy struct mm_iommu_table_group_mem_t *mem;
35847d99948SChristophe Leroy unsigned long end;
35947d99948SChristophe Leroy
360b5952f81SQian Cai rcu_read_lock();
36147d99948SChristophe Leroy list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
36247d99948SChristophe Leroy if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
36347d99948SChristophe Leroy continue;
36447d99948SChristophe Leroy
36547d99948SChristophe Leroy end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
36647d99948SChristophe Leroy if ((mem->dev_hpa <= hpa) && (hpa < end)) {
36747d99948SChristophe Leroy /*
36847d99948SChristophe Leroy * Since the IOMMU page size might be bigger than
36947d99948SChristophe Leroy * PAGE_SIZE, the amount of preregistered memory
37047d99948SChristophe Leroy * starting from @hpa might be smaller than 1<<pageshift
37147d99948SChristophe Leroy * and the caller needs to distinguish this situation.
37247d99948SChristophe Leroy */
37347d99948SChristophe Leroy *size = min(1UL << pageshift, end - hpa);
37447d99948SChristophe Leroy return true;
37547d99948SChristophe Leroy }
37647d99948SChristophe Leroy }
377b5952f81SQian Cai rcu_read_unlock();
37847d99948SChristophe Leroy
37947d99948SChristophe Leroy return false;
38047d99948SChristophe Leroy }
38147d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
38247d99948SChristophe Leroy
mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t * mem)38347d99948SChristophe Leroy long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
38447d99948SChristophe Leroy {
38547d99948SChristophe Leroy if (atomic64_inc_not_zero(&mem->mapped))
38647d99948SChristophe Leroy return 0;
38747d99948SChristophe Leroy
38847d99948SChristophe Leroy /* Last mm_iommu_put() has been called, no more mappings allowed() */
38947d99948SChristophe Leroy return -ENXIO;
39047d99948SChristophe Leroy }
39147d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
39247d99948SChristophe Leroy
mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t * mem)39347d99948SChristophe Leroy void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
39447d99948SChristophe Leroy {
39547d99948SChristophe Leroy atomic64_add_unless(&mem->mapped, -1, 1);
39647d99948SChristophe Leroy }
39747d99948SChristophe Leroy EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
39847d99948SChristophe Leroy
mm_iommu_init(struct mm_struct * mm)39947d99948SChristophe Leroy void mm_iommu_init(struct mm_struct *mm)
40047d99948SChristophe Leroy {
40147d99948SChristophe Leroy INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
40247d99948SChristophe Leroy }
403