Lines Matching full:mem

60 	struct mm_iommu_table_group_mem_t *mem, *mem2;  in mm_iommu_do_alloc()  local
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in mm_iommu_do_alloc()
74 if (!mem) { in mm_iommu_do_alloc()
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
81 mem->dev_hpa = dev_hpa; in mm_iommu_do_alloc()
84 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA; in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0]))); in mm_iommu_do_alloc()
93 if (!mem->hpas) { in mm_iommu_do_alloc()
94 kfree(mem); in mm_iommu_do_alloc()
108 mem->hpages + entry); in mm_iommu_do_alloc()
125 atomic64_set(&mem->mapped, 1); in mm_iommu_do_alloc()
126 mem->used = 1; in mm_iommu_do_alloc()
127 mem->ua = ua; in mm_iommu_do_alloc()
128 mem->entries = entries; in mm_iommu_do_alloc()
144 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) { in mm_iommu_do_alloc()
152 struct page *page = mem->hpages[i]; in mm_iommu_do_alloc()
154 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) in mm_iommu_do_alloc()
156 mem->pageshift = min(mem->pageshift, pageshift); in mm_iommu_do_alloc()
161 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; in mm_iommu_do_alloc()
165 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list); in mm_iommu_do_alloc()
169 *pmem = mem; in mm_iommu_do_alloc()
175 unpin_user_pages(mem->hpages, pinned); in mm_iommu_do_alloc()
177 vfree(mem->hpas); in mm_iommu_do_alloc()
178 kfree(mem); in mm_iommu_do_alloc()
202 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_unpin() argument
207 if (!mem->hpas) in mm_iommu_unpin()
210 for (i = 0; i < mem->entries; ++i) { in mm_iommu_unpin()
211 if (!mem->hpas[i]) in mm_iommu_unpin()
214 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT); in mm_iommu_unpin()
218 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) in mm_iommu_unpin()
223 mem->hpas[i] = 0; in mm_iommu_unpin()
227 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_do_free() argument
230 mm_iommu_unpin(mem); in mm_iommu_do_free()
231 vfree(mem->hpas); in mm_iommu_do_free()
232 kfree(mem); in mm_iommu_do_free()
237 struct mm_iommu_table_group_mem_t *mem = container_of(head, in mm_iommu_free() local
240 mm_iommu_do_free(mem); in mm_iommu_free()
243 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_release() argument
245 list_del_rcu(&mem->next); in mm_iommu_release()
246 call_rcu(&mem->rcu, mm_iommu_free); in mm_iommu_release()
249 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem) in mm_iommu_put() argument
256 if (mem->used == 0) { in mm_iommu_put()
261 --mem->used; in mm_iommu_put()
263 if (mem->used) in mm_iommu_put()
267 if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) { in mm_iommu_put()
268 ++mem->used; in mm_iommu_put()
273 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) in mm_iommu_put()
274 unlock_entries = mem->entries; in mm_iommu_put()
277 mm_iommu_release(mem); in mm_iommu_put()
291 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_lookup() local
294 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_lookup()
295 if ((mem->ua <= ua) && in mm_iommu_lookup()
296 (ua + size <= mem->ua + in mm_iommu_lookup()
297 (mem->entries << PAGE_SHIFT))) { in mm_iommu_lookup()
298 ret = mem; in mm_iommu_lookup()
311 struct mm_iommu_table_group_mem_t *mem, *ret = NULL; in mm_iommu_get() local
315 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next, in mm_iommu_get()
317 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_get()
318 ret = mem; in mm_iommu_get()
319 ++mem->used; in mm_iommu_get()
330 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, in mm_iommu_ua_to_hpa() argument
333 const long entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_to_hpa()
336 if (entry >= mem->entries) in mm_iommu_ua_to_hpa()
339 if (pageshift > mem->pageshift) in mm_iommu_ua_to_hpa()
342 if (!mem->hpas) { in mm_iommu_ua_to_hpa()
343 *hpa = mem->dev_hpa + (ua - mem->ua); in mm_iommu_ua_to_hpa()
347 va = &mem->hpas[entry]; in mm_iommu_ua_to_hpa()
357 struct mm_iommu_table_group_mem_t *mem; in mm_iommu_is_devmem() local
361 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) { in mm_iommu_is_devmem()
362 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) in mm_iommu_is_devmem()
365 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT); in mm_iommu_is_devmem()
366 if ((mem->dev_hpa <= hpa) && (hpa < end)) { in mm_iommu_is_devmem()
383 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_mapped_inc() argument
385 if (atomic64_inc_not_zero(&mem->mapped)) in mm_iommu_mapped_inc()
393 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem) in mm_iommu_mapped_dec() argument
395 atomic64_add_unless(&mem->mapped, -1, 1); in mm_iommu_mapped_dec()