Lines Matching refs:mmu

42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt)  in nvkm_mmu_ptp_put()  argument
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument
74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get()
82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get()
93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get()
120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument
124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find()
134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find()
141 nvkm_mmu_ptc_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt **ppt) in nvkm_mmu_ptc_put() argument
147 mutex_lock(&mmu->ptp.mutex); in nvkm_mmu_ptc_put()
148 nvkm_mmu_ptp_put(mmu, force, pt); in nvkm_mmu_ptc_put()
149 mutex_unlock(&mmu->ptp.mutex); in nvkm_mmu_ptc_put()
154 mutex_lock(&mmu->ptc.mutex); in nvkm_mmu_ptc_put()
162 mutex_unlock(&mmu->ptc.mutex); in nvkm_mmu_ptc_put()
167 nvkm_mmu_ptc_get(struct nvkm_mmu *mmu, u32 size, u32 align, bool zero) in nvkm_mmu_ptc_get() argument
175 mutex_lock(&mmu->ptp.mutex); in nvkm_mmu_ptc_get()
176 pt = nvkm_mmu_ptp_get(mmu, align, zero); in nvkm_mmu_ptc_get()
177 mutex_unlock(&mmu->ptp.mutex); in nvkm_mmu_ptc_get()
182 mutex_lock(&mmu->ptc.mutex); in nvkm_mmu_ptc_get()
183 ptc = nvkm_mmu_ptc_find(mmu, size); in nvkm_mmu_ptc_get()
185 mutex_unlock(&mmu->ptc.mutex); in nvkm_mmu_ptc_get()
196 mutex_unlock(&mmu->ptc.mutex); in nvkm_mmu_ptc_get()
199 mutex_unlock(&mmu->ptc.mutex); in nvkm_mmu_ptc_get()
207 ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST, in nvkm_mmu_ptc_get()
220 nvkm_mmu_ptc_dump(struct nvkm_mmu *mmu) in nvkm_mmu_ptc_dump() argument
223 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_dump()
234 nvkm_mmu_ptc_fini(struct nvkm_mmu *mmu) in nvkm_mmu_ptc_fini() argument
238 list_for_each_entry_safe(ptc, ptct, &mmu->ptc.list, head) { in nvkm_mmu_ptc_fini()
246 nvkm_mmu_ptc_init(struct nvkm_mmu *mmu) in nvkm_mmu_ptc_init() argument
248 mutex_init(&mmu->ptc.mutex); in nvkm_mmu_ptc_init()
249 INIT_LIST_HEAD(&mmu->ptc.list); in nvkm_mmu_ptc_init()
250 mutex_init(&mmu->ptp.mutex); in nvkm_mmu_ptc_init()
251 INIT_LIST_HEAD(&mmu->ptp.list); in nvkm_mmu_ptc_init()
255 nvkm_mmu_type(struct nvkm_mmu *mmu, int heap, u8 type) in nvkm_mmu_type() argument
257 if (heap >= 0 && !WARN_ON(mmu->type_nr == ARRAY_SIZE(mmu->type))) { in nvkm_mmu_type()
258 mmu->type[mmu->type_nr].type = type | mmu->heap[heap].type; in nvkm_mmu_type()
259 mmu->type[mmu->type_nr].heap = heap; in nvkm_mmu_type()
260 mmu->type_nr++; in nvkm_mmu_type()
265 nvkm_mmu_heap(struct nvkm_mmu *mmu, u8 type, u64 size) in nvkm_mmu_heap() argument
268 if (!WARN_ON(mmu->heap_nr == ARRAY_SIZE(mmu->heap))) { in nvkm_mmu_heap()
269 mmu->heap[mmu->heap_nr].type = type; in nvkm_mmu_heap()
270 mmu->heap[mmu->heap_nr].size = size; in nvkm_mmu_heap()
271 return mmu->heap_nr++; in nvkm_mmu_heap()
278 nvkm_mmu_host(struct nvkm_mmu *mmu) in nvkm_mmu_host() argument
280 struct nvkm_device *device = mmu->subdev.device; in nvkm_mmu_host()
281 u8 type = NVKM_MEM_KIND * !!mmu->func->kind_sys; in nvkm_mmu_host()
285 heap = nvkm_mmu_heap(mmu, NVKM_MEM_HOST, ~0ULL); in nvkm_mmu_host()
286 nvkm_mmu_type(mmu, heap, type); in nvkm_mmu_host()
296 nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND); in nvkm_mmu_host()
298 nvkm_mmu_type(mmu, heap, type); in nvkm_mmu_host()
308 nvkm_mmu_type(mmu, heap, type & ~NVKM_MEM_KIND); in nvkm_mmu_host()
311 nvkm_mmu_type(mmu, heap, type |= NVKM_MEM_UNCACHED); in nvkm_mmu_host()
315 nvkm_mmu_vram(struct nvkm_mmu *mmu) in nvkm_mmu_vram() argument
317 struct nvkm_device *device = mmu->subdev.device; in nvkm_mmu_vram()
322 u8 type = NVKM_MEM_KIND * !!mmu->func->kind; in nvkm_mmu_vram()
327 heapM = nvkm_mmu_heap(mmu, heap, sizeM << NVKM_RAM_MM_SHIFT); in nvkm_mmu_vram()
331 heapN = nvkm_mmu_heap(mmu, heap, sizeN << NVKM_RAM_MM_SHIFT); in nvkm_mmu_vram()
332 heapU = nvkm_mmu_heap(mmu, heap, sizeU << NVKM_RAM_MM_SHIFT); in nvkm_mmu_vram()
338 nvkm_mmu_type(mmu, heapU, type); in nvkm_mmu_vram()
339 nvkm_mmu_type(mmu, heapN, type); in nvkm_mmu_vram()
340 nvkm_mmu_type(mmu, heapM, type); in nvkm_mmu_vram()
346 nvkm_mmu_host(mmu); in nvkm_mmu_vram()
355 nvkm_mmu_type(mmu, heapN, type); in nvkm_mmu_vram()
356 nvkm_mmu_type(mmu, heapM, type); in nvkm_mmu_vram()
362 nvkm_mmu_type(mmu, heapN, type); in nvkm_mmu_vram()
363 nvkm_mmu_type(mmu, heapM, type); in nvkm_mmu_vram()
370 struct nvkm_mmu *mmu = nvkm_mmu(subdev); in nvkm_mmu_oneinit() local
373 if (mmu->subdev.device->fb && mmu->subdev.device->fb->ram) in nvkm_mmu_oneinit()
374 nvkm_mmu_vram(mmu); in nvkm_mmu_oneinit()
376 nvkm_mmu_host(mmu); in nvkm_mmu_oneinit()
378 if (mmu->func->vmm.global) { in nvkm_mmu_oneinit()
380 "gart", &mmu->vmm); in nvkm_mmu_oneinit()
391 struct nvkm_mmu *mmu = nvkm_mmu(subdev); in nvkm_mmu_init() local
392 if (mmu->func->init) in nvkm_mmu_init()
393 mmu->func->init(mmu); in nvkm_mmu_init()
400 struct nvkm_mmu *mmu = nvkm_mmu(subdev); in nvkm_mmu_dtor() local
402 nvkm_vmm_unref(&mmu->vmm); in nvkm_mmu_dtor()
404 nvkm_mmu_ptc_fini(mmu); in nvkm_mmu_dtor()
405 mutex_destroy(&mmu->mutex); in nvkm_mmu_dtor()
406 return mmu; in nvkm_mmu_dtor()
418 enum nvkm_subdev_type type, int inst, struct nvkm_mmu *mmu) in nvkm_mmu_ctor() argument
420 nvkm_subdev_ctor(&nvkm_mmu, device, type, inst, &mmu->subdev); in nvkm_mmu_ctor()
421 mmu->func = func; in nvkm_mmu_ctor()
422 mmu->dma_bits = func->dma_bits; in nvkm_mmu_ctor()
423 nvkm_mmu_ptc_init(mmu); in nvkm_mmu_ctor()
424 mutex_init(&mmu->mutex); in nvkm_mmu_ctor()
425 mmu->user.ctor = nvkm_ummu_new; in nvkm_mmu_ctor()
426 mmu->user.base = func->mmu.user; in nvkm_mmu_ctor()