xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gem.c (revision e7cd5ee9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-map-ops.h>
8 #include <linux/vmalloc.h>
9 #include <linux/spinlock.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/pfn_t.h>
13 
14 #include <drm/drm_prime.h>
15 
16 #include "msm_drv.h"
17 #include "msm_fence.h"
18 #include "msm_gem.h"
19 #include "msm_gpu.h"
20 #include "msm_mmu.h"
21 
22 static void update_lru(struct drm_gem_object *obj);
23 
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 {
26 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 	struct msm_drm_private *priv = obj->dev->dev_private;
28 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
29 			priv->vram.paddr;
30 }
31 
32 static bool use_pages(struct drm_gem_object *obj)
33 {
34 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 	return !msm_obj->vram_node;
36 }
37 
38 /*
39  * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40  * API.  Really GPU cache is out of scope here (handled on cmdstream)
41  * and all we need to do is invalidate newly allocated pages before
42  * mapping to CPU as uncached/writecombine.
43  *
44  * On top of this, we have the added headache, that depending on
45  * display generation, the display's iommu may be wired up to either
46  * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47  * that here we either have dma-direct or iommu ops.
48  *
49  * Let this be a cautionary tail of abstraction gone wrong.
50  */
51 
52 static void sync_for_device(struct msm_gem_object *msm_obj)
53 {
54 	struct device *dev = msm_obj->base.dev->dev;
55 
56 	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
57 }
58 
59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 {
61 	struct device *dev = msm_obj->base.dev->dev;
62 
63 	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
64 }
65 
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 {
69 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 	struct msm_drm_private *priv = obj->dev->dev_private;
71 	dma_addr_t paddr;
72 	struct page **p;
73 	int ret, i;
74 
75 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 	if (!p)
77 		return ERR_PTR(-ENOMEM);
78 
79 	spin_lock(&priv->vram.lock);
80 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 	spin_unlock(&priv->vram.lock);
82 	if (ret) {
83 		kvfree(p);
84 		return ERR_PTR(ret);
85 	}
86 
87 	paddr = physaddr(obj);
88 	for (i = 0; i < npages; i++) {
89 		p[i] = pfn_to_page(__phys_to_pfn(paddr));
90 		paddr += PAGE_SIZE;
91 	}
92 
93 	return p;
94 }
95 
96 static struct page **get_pages(struct drm_gem_object *obj)
97 {
98 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 
100 	GEM_WARN_ON(!msm_gem_is_locked(obj));
101 
102 	if (!msm_obj->pages) {
103 		struct drm_device *dev = obj->dev;
104 		struct page **p;
105 		int npages = obj->size >> PAGE_SHIFT;
106 
107 		if (use_pages(obj))
108 			p = drm_gem_get_pages(obj);
109 		else
110 			p = get_pages_vram(obj, npages);
111 
112 		if (IS_ERR(p)) {
113 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
114 					PTR_ERR(p));
115 			return p;
116 		}
117 
118 		msm_obj->pages = p;
119 
120 		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
121 		if (IS_ERR(msm_obj->sgt)) {
122 			void *ptr = ERR_CAST(msm_obj->sgt);
123 
124 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
125 			msm_obj->sgt = NULL;
126 			return ptr;
127 		}
128 
129 		/* For non-cached buffers, ensure the new pages are clean
130 		 * because display controller, GPU, etc. are not coherent:
131 		 */
132 		if (msm_obj->flags & MSM_BO_WC)
133 			sync_for_device(msm_obj);
134 
135 		update_lru(obj);
136 	}
137 
138 	return msm_obj->pages;
139 }
140 
141 static void put_pages_vram(struct drm_gem_object *obj)
142 {
143 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 	struct msm_drm_private *priv = obj->dev->dev_private;
145 
146 	spin_lock(&priv->vram.lock);
147 	drm_mm_remove_node(msm_obj->vram_node);
148 	spin_unlock(&priv->vram.lock);
149 
150 	kvfree(msm_obj->pages);
151 }
152 
153 static void put_pages(struct drm_gem_object *obj)
154 {
155 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
156 
157 	if (msm_obj->pages) {
158 		if (msm_obj->sgt) {
159 			/* For non-cached buffers, ensure the new
160 			 * pages are clean because display controller,
161 			 * GPU, etc. are not coherent:
162 			 */
163 			if (msm_obj->flags & MSM_BO_WC)
164 				sync_for_cpu(msm_obj);
165 
166 			sg_free_table(msm_obj->sgt);
167 			kfree(msm_obj->sgt);
168 			msm_obj->sgt = NULL;
169 		}
170 
171 		if (use_pages(obj))
172 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
173 		else
174 			put_pages_vram(obj);
175 
176 		msm_obj->pages = NULL;
177 	}
178 }
179 
180 static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
181 {
182 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
183 	struct page **p;
184 
185 	GEM_WARN_ON(!msm_gem_is_locked(obj));
186 
187 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
188 		return ERR_PTR(-EBUSY);
189 	}
190 
191 	p = get_pages(obj);
192 	if (!IS_ERR(p)) {
193 		msm_obj->pin_count++;
194 		update_lru(obj);
195 	}
196 
197 	return p;
198 }
199 
200 struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
201 {
202 	struct page **p;
203 
204 	msm_gem_lock(obj);
205 	p = msm_gem_pin_pages_locked(obj);
206 	msm_gem_unlock(obj);
207 
208 	return p;
209 }
210 
211 void msm_gem_unpin_pages(struct drm_gem_object *obj)
212 {
213 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
214 
215 	msm_gem_lock(obj);
216 	msm_obj->pin_count--;
217 	GEM_WARN_ON(msm_obj->pin_count < 0);
218 	update_lru(obj);
219 	msm_gem_unlock(obj);
220 }
221 
222 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
223 {
224 	if (msm_obj->flags & MSM_BO_WC)
225 		return pgprot_writecombine(prot);
226 	return prot;
227 }
228 
229 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
230 {
231 	struct vm_area_struct *vma = vmf->vma;
232 	struct drm_gem_object *obj = vma->vm_private_data;
233 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
234 	struct page **pages;
235 	unsigned long pfn;
236 	pgoff_t pgoff;
237 	int err;
238 	vm_fault_t ret;
239 
240 	/*
241 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
242 	 * a reference on obj. So, we dont need to hold one here.
243 	 */
244 	err = msm_gem_lock_interruptible(obj);
245 	if (err) {
246 		ret = VM_FAULT_NOPAGE;
247 		goto out;
248 	}
249 
250 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
251 		msm_gem_unlock(obj);
252 		return VM_FAULT_SIGBUS;
253 	}
254 
255 	/* make sure we have pages attached now */
256 	pages = get_pages(obj);
257 	if (IS_ERR(pages)) {
258 		ret = vmf_error(PTR_ERR(pages));
259 		goto out_unlock;
260 	}
261 
262 	/* We don't use vmf->pgoff since that has the fake offset: */
263 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
264 
265 	pfn = page_to_pfn(pages[pgoff]);
266 
267 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
268 			pfn, pfn << PAGE_SHIFT);
269 
270 	ret = vmf_insert_pfn(vma, vmf->address, pfn);
271 
272 out_unlock:
273 	msm_gem_unlock(obj);
274 out:
275 	return ret;
276 }
277 
278 /** get mmap offset */
279 static uint64_t mmap_offset(struct drm_gem_object *obj)
280 {
281 	struct drm_device *dev = obj->dev;
282 	int ret;
283 
284 	GEM_WARN_ON(!msm_gem_is_locked(obj));
285 
286 	/* Make it mmapable */
287 	ret = drm_gem_create_mmap_offset(obj);
288 
289 	if (ret) {
290 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
291 		return 0;
292 	}
293 
294 	return drm_vma_node_offset_addr(&obj->vma_node);
295 }
296 
297 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
298 {
299 	uint64_t offset;
300 
301 	msm_gem_lock(obj);
302 	offset = mmap_offset(obj);
303 	msm_gem_unlock(obj);
304 	return offset;
305 }
306 
307 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
308 		struct msm_gem_address_space *aspace)
309 {
310 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
311 	struct msm_gem_vma *vma;
312 
313 	GEM_WARN_ON(!msm_gem_is_locked(obj));
314 
315 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
316 	if (!vma)
317 		return ERR_PTR(-ENOMEM);
318 
319 	vma->aspace = aspace;
320 
321 	list_add_tail(&vma->list, &msm_obj->vmas);
322 
323 	return vma;
324 }
325 
326 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
327 		struct msm_gem_address_space *aspace)
328 {
329 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
330 	struct msm_gem_vma *vma;
331 
332 	GEM_WARN_ON(!msm_gem_is_locked(obj));
333 
334 	list_for_each_entry(vma, &msm_obj->vmas, list) {
335 		if (vma->aspace == aspace)
336 			return vma;
337 	}
338 
339 	return NULL;
340 }
341 
342 static void del_vma(struct msm_gem_vma *vma)
343 {
344 	if (!vma)
345 		return;
346 
347 	list_del(&vma->list);
348 	kfree(vma);
349 }
350 
351 /*
352  * If close is true, this also closes the VMA (releasing the allocated
353  * iova range) in addition to removing the iommu mapping.  In the eviction
354  * case (!close), we keep the iova allocated, but only remove the iommu
355  * mapping.
356  */
357 static void
358 put_iova_spaces(struct drm_gem_object *obj, bool close)
359 {
360 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
361 	struct msm_gem_vma *vma;
362 
363 	GEM_WARN_ON(!msm_gem_is_locked(obj));
364 
365 	list_for_each_entry(vma, &msm_obj->vmas, list) {
366 		if (vma->aspace) {
367 			msm_gem_purge_vma(vma->aspace, vma);
368 			if (close)
369 				msm_gem_close_vma(vma->aspace, vma);
370 		}
371 	}
372 }
373 
374 /* Called with msm_obj locked */
375 static void
376 put_iova_vmas(struct drm_gem_object *obj)
377 {
378 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
379 	struct msm_gem_vma *vma, *tmp;
380 
381 	GEM_WARN_ON(!msm_gem_is_locked(obj));
382 
383 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
384 		del_vma(vma);
385 	}
386 }
387 
388 static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
389 		struct msm_gem_address_space *aspace,
390 		u64 range_start, u64 range_end)
391 {
392 	struct msm_gem_vma *vma;
393 
394 	GEM_WARN_ON(!msm_gem_is_locked(obj));
395 
396 	vma = lookup_vma(obj, aspace);
397 
398 	if (!vma) {
399 		int ret;
400 
401 		vma = add_vma(obj, aspace);
402 		if (IS_ERR(vma))
403 			return vma;
404 
405 		ret = msm_gem_init_vma(aspace, vma, obj->size,
406 			range_start, range_end);
407 		if (ret) {
408 			del_vma(vma);
409 			return ERR_PTR(ret);
410 		}
411 	} else {
412 		GEM_WARN_ON(vma->iova < range_start);
413 		GEM_WARN_ON((vma->iova + obj->size) > range_end);
414 	}
415 
416 	return vma;
417 }
418 
419 int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
420 {
421 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 	struct page **pages;
423 	int ret, prot = IOMMU_READ;
424 
425 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
426 		prot |= IOMMU_WRITE;
427 
428 	if (msm_obj->flags & MSM_BO_MAP_PRIV)
429 		prot |= IOMMU_PRIV;
430 
431 	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
432 		prot |= IOMMU_CACHE;
433 
434 	GEM_WARN_ON(!msm_gem_is_locked(obj));
435 
436 	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
437 		return -EBUSY;
438 
439 	pages = get_pages(obj);
440 	if (IS_ERR(pages))
441 		return PTR_ERR(pages);
442 
443 	ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
444 
445 	if (!ret)
446 		msm_obj->pin_count++;
447 
448 	return ret;
449 }
450 
451 void msm_gem_unpin_locked(struct drm_gem_object *obj)
452 {
453 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
454 
455 	GEM_WARN_ON(!msm_gem_is_locked(obj));
456 
457 	msm_obj->pin_count--;
458 	GEM_WARN_ON(msm_obj->pin_count < 0);
459 
460 	update_lru(obj);
461 }
462 
463 struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
464 					   struct msm_gem_address_space *aspace)
465 {
466 	return get_vma_locked(obj, aspace, 0, U64_MAX);
467 }
468 
469 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
470 		struct msm_gem_address_space *aspace, uint64_t *iova,
471 		u64 range_start, u64 range_end)
472 {
473 	struct msm_gem_vma *vma;
474 	int ret;
475 
476 	GEM_WARN_ON(!msm_gem_is_locked(obj));
477 
478 	vma = get_vma_locked(obj, aspace, range_start, range_end);
479 	if (IS_ERR(vma))
480 		return PTR_ERR(vma);
481 
482 	ret = msm_gem_pin_vma_locked(obj, vma);
483 	if (!ret)
484 		*iova = vma->iova;
485 
486 	return ret;
487 }
488 
489 /*
490  * get iova and pin it. Should have a matching put
491  * limits iova to specified range (in pages)
492  */
493 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
494 		struct msm_gem_address_space *aspace, uint64_t *iova,
495 		u64 range_start, u64 range_end)
496 {
497 	int ret;
498 
499 	msm_gem_lock(obj);
500 	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
501 	msm_gem_unlock(obj);
502 
503 	return ret;
504 }
505 
506 /* get iova and pin it. Should have a matching put */
507 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
508 		struct msm_gem_address_space *aspace, uint64_t *iova)
509 {
510 	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
511 }
512 
513 /*
514  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
515  * valid for the life of the object
516  */
517 int msm_gem_get_iova(struct drm_gem_object *obj,
518 		struct msm_gem_address_space *aspace, uint64_t *iova)
519 {
520 	struct msm_gem_vma *vma;
521 	int ret = 0;
522 
523 	msm_gem_lock(obj);
524 	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
525 	if (IS_ERR(vma)) {
526 		ret = PTR_ERR(vma);
527 	} else {
528 		*iova = vma->iova;
529 	}
530 	msm_gem_unlock(obj);
531 
532 	return ret;
533 }
534 
535 static int clear_iova(struct drm_gem_object *obj,
536 		      struct msm_gem_address_space *aspace)
537 {
538 	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
539 
540 	if (!vma)
541 		return 0;
542 
543 	if (msm_gem_vma_inuse(vma))
544 		return -EBUSY;
545 
546 	msm_gem_purge_vma(vma->aspace, vma);
547 	msm_gem_close_vma(vma->aspace, vma);
548 	del_vma(vma);
549 
550 	return 0;
551 }
552 
553 /*
554  * Get the requested iova but don't pin it.  Fails if the requested iova is
555  * not available.  Doesn't need a put because iovas are currently valid for
556  * the life of the object.
557  *
558  * Setting an iova of zero will clear the vma.
559  */
560 int msm_gem_set_iova(struct drm_gem_object *obj,
561 		     struct msm_gem_address_space *aspace, uint64_t iova)
562 {
563 	int ret = 0;
564 
565 	msm_gem_lock(obj);
566 	if (!iova) {
567 		ret = clear_iova(obj, aspace);
568 	} else {
569 		struct msm_gem_vma *vma;
570 		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
571 		if (IS_ERR(vma)) {
572 			ret = PTR_ERR(vma);
573 		} else if (GEM_WARN_ON(vma->iova != iova)) {
574 			clear_iova(obj, aspace);
575 			ret = -EBUSY;
576 		}
577 	}
578 	msm_gem_unlock(obj);
579 
580 	return ret;
581 }
582 
583 /*
584  * Unpin a iova by updating the reference counts. The memory isn't actually
585  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
586  * to get rid of it
587  */
588 void msm_gem_unpin_iova(struct drm_gem_object *obj,
589 		struct msm_gem_address_space *aspace)
590 {
591 	struct msm_gem_vma *vma;
592 
593 	msm_gem_lock(obj);
594 	vma = lookup_vma(obj, aspace);
595 	if (!GEM_WARN_ON(!vma)) {
596 		msm_gem_unpin_vma(vma);
597 		msm_gem_unpin_locked(obj);
598 	}
599 	msm_gem_unlock(obj);
600 }
601 
602 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
603 		struct drm_mode_create_dumb *args)
604 {
605 	args->pitch = align_pitch(args->width, args->bpp);
606 	args->size  = PAGE_ALIGN(args->pitch * args->height);
607 	return msm_gem_new_handle(dev, file, args->size,
608 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
609 }
610 
611 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
612 		uint32_t handle, uint64_t *offset)
613 {
614 	struct drm_gem_object *obj;
615 	int ret = 0;
616 
617 	/* GEM does all our handle to object mapping */
618 	obj = drm_gem_object_lookup(file, handle);
619 	if (obj == NULL) {
620 		ret = -ENOENT;
621 		goto fail;
622 	}
623 
624 	*offset = msm_gem_mmap_offset(obj);
625 
626 	drm_gem_object_put(obj);
627 
628 fail:
629 	return ret;
630 }
631 
632 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
633 {
634 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
635 	int ret = 0;
636 
637 	GEM_WARN_ON(!msm_gem_is_locked(obj));
638 
639 	if (obj->import_attach)
640 		return ERR_PTR(-ENODEV);
641 
642 	if (GEM_WARN_ON(msm_obj->madv > madv)) {
643 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
644 			msm_obj->madv, madv);
645 		return ERR_PTR(-EBUSY);
646 	}
647 
648 	/* increment vmap_count *before* vmap() call, so shrinker can
649 	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
650 	 * This guarantees that we won't try to msm_gem_vunmap() this
651 	 * same object from within the vmap() call (while we already
652 	 * hold msm_obj lock)
653 	 */
654 	msm_obj->vmap_count++;
655 
656 	if (!msm_obj->vaddr) {
657 		struct page **pages = get_pages(obj);
658 		if (IS_ERR(pages)) {
659 			ret = PTR_ERR(pages);
660 			goto fail;
661 		}
662 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
663 				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
664 		if (msm_obj->vaddr == NULL) {
665 			ret = -ENOMEM;
666 			goto fail;
667 		}
668 
669 		update_lru(obj);
670 	}
671 
672 	return msm_obj->vaddr;
673 
674 fail:
675 	msm_obj->vmap_count--;
676 	return ERR_PTR(ret);
677 }
678 
679 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
680 {
681 	return get_vaddr(obj, MSM_MADV_WILLNEED);
682 }
683 
684 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
685 {
686 	void *ret;
687 
688 	msm_gem_lock(obj);
689 	ret = msm_gem_get_vaddr_locked(obj);
690 	msm_gem_unlock(obj);
691 
692 	return ret;
693 }
694 
695 /*
696  * Don't use this!  It is for the very special case of dumping
697  * submits from GPU hangs or faults, were the bo may already
698  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
699  * active list.
700  */
701 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
702 {
703 	return get_vaddr(obj, __MSM_MADV_PURGED);
704 }
705 
706 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
707 {
708 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
709 
710 	GEM_WARN_ON(!msm_gem_is_locked(obj));
711 	GEM_WARN_ON(msm_obj->vmap_count < 1);
712 
713 	msm_obj->vmap_count--;
714 }
715 
716 void msm_gem_put_vaddr(struct drm_gem_object *obj)
717 {
718 	msm_gem_lock(obj);
719 	msm_gem_put_vaddr_locked(obj);
720 	msm_gem_unlock(obj);
721 }
722 
723 /* Update madvise status, returns true if not purged, else
724  * false or -errno.
725  */
726 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
727 {
728 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
729 
730 	msm_gem_lock(obj);
731 
732 	if (msm_obj->madv != __MSM_MADV_PURGED)
733 		msm_obj->madv = madv;
734 
735 	madv = msm_obj->madv;
736 
737 	/* If the obj is inactive, we might need to move it
738 	 * between inactive lists
739 	 */
740 	if (msm_obj->active_count == 0)
741 		update_lru(obj);
742 
743 	msm_gem_unlock(obj);
744 
745 	return (madv != __MSM_MADV_PURGED);
746 }
747 
748 void msm_gem_purge(struct drm_gem_object *obj)
749 {
750 	struct drm_device *dev = obj->dev;
751 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
752 
753 	GEM_WARN_ON(!msm_gem_is_locked(obj));
754 	GEM_WARN_ON(!is_purgeable(msm_obj));
755 
756 	/* Get rid of any iommu mapping(s): */
757 	put_iova_spaces(obj, true);
758 
759 	msm_gem_vunmap(obj);
760 
761 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
762 
763 	put_pages(obj);
764 
765 	put_iova_vmas(obj);
766 
767 	msm_obj->madv = __MSM_MADV_PURGED;
768 	update_lru(obj);
769 
770 	drm_gem_free_mmap_offset(obj);
771 
772 	/* Our goal here is to return as much of the memory as
773 	 * is possible back to the system as we are called from OOM.
774 	 * To do this we must instruct the shmfs to drop all of its
775 	 * backing pages, *now*.
776 	 */
777 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
778 
779 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
780 			0, (loff_t)-1);
781 }
782 
783 /*
784  * Unpin the backing pages and make them available to be swapped out.
785  */
786 void msm_gem_evict(struct drm_gem_object *obj)
787 {
788 	struct drm_device *dev = obj->dev;
789 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
790 
791 	GEM_WARN_ON(!msm_gem_is_locked(obj));
792 	GEM_WARN_ON(is_unevictable(msm_obj));
793 	GEM_WARN_ON(!msm_obj->evictable);
794 	GEM_WARN_ON(msm_obj->active_count);
795 
796 	/* Get rid of any iommu mapping(s): */
797 	put_iova_spaces(obj, false);
798 
799 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
800 
801 	put_pages(obj);
802 
803 	update_lru(obj);
804 }
805 
806 void msm_gem_vunmap(struct drm_gem_object *obj)
807 {
808 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
809 
810 	GEM_WARN_ON(!msm_gem_is_locked(obj));
811 
812 	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
813 		return;
814 
815 	vunmap(msm_obj->vaddr);
816 	msm_obj->vaddr = NULL;
817 }
818 
819 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
820 {
821 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
822 	struct msm_drm_private *priv = obj->dev->dev_private;
823 
824 	might_sleep();
825 	GEM_WARN_ON(!msm_gem_is_locked(obj));
826 	GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
827 	GEM_WARN_ON(msm_obj->dontneed);
828 
829 	if (msm_obj->active_count++ == 0) {
830 		mutex_lock(&priv->mm_lock);
831 		if (msm_obj->evictable)
832 			mark_unevictable(msm_obj);
833 		list_move_tail(&msm_obj->mm_list, &gpu->active_list);
834 		mutex_unlock(&priv->mm_lock);
835 	}
836 }
837 
838 void msm_gem_active_put(struct drm_gem_object *obj)
839 {
840 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
841 
842 	might_sleep();
843 	GEM_WARN_ON(!msm_gem_is_locked(obj));
844 
845 	if (--msm_obj->active_count == 0) {
846 		update_lru(obj);
847 	}
848 }
849 
850 static void update_lru(struct drm_gem_object *obj)
851 {
852 	struct msm_drm_private *priv = obj->dev->dev_private;
853 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
854 
855 	GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
856 
857 	if (msm_obj->active_count != 0)
858 		return;
859 
860 	mutex_lock(&priv->mm_lock);
861 
862 	if (msm_obj->dontneed)
863 		mark_unpurgeable(msm_obj);
864 	if (msm_obj->evictable)
865 		mark_unevictable(msm_obj);
866 
867 	list_del(&msm_obj->mm_list);
868 	if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
869 		list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
870 		mark_evictable(msm_obj);
871 	} else if (msm_obj->madv == MSM_MADV_DONTNEED) {
872 		list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
873 		mark_purgeable(msm_obj);
874 	} else {
875 		GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
876 		list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
877 	}
878 
879 	mutex_unlock(&priv->mm_lock);
880 }
881 
882 bool msm_gem_active(struct drm_gem_object *obj)
883 {
884 	GEM_WARN_ON(!msm_gem_is_locked(obj));
885 
886 	if (to_msm_bo(obj)->pin_count)
887 		return true;
888 
889 	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
890 }
891 
892 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
893 {
894 	bool write = !!(op & MSM_PREP_WRITE);
895 	unsigned long remain =
896 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
897 	long ret;
898 
899 	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
900 				    true,  remain);
901 	if (ret == 0)
902 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
903 	else if (ret < 0)
904 		return ret;
905 
906 	/* TODO cache maintenance */
907 
908 	return 0;
909 }
910 
911 int msm_gem_cpu_fini(struct drm_gem_object *obj)
912 {
913 	/* TODO cache maintenance */
914 	return 0;
915 }
916 
917 #ifdef CONFIG_DEBUG_FS
918 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
919 		struct msm_gem_stats *stats)
920 {
921 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
922 	struct dma_resv *robj = obj->resv;
923 	struct msm_gem_vma *vma;
924 	uint64_t off = drm_vma_node_start(&obj->vma_node);
925 	const char *madv;
926 
927 	msm_gem_lock(obj);
928 
929 	stats->all.count++;
930 	stats->all.size += obj->size;
931 
932 	if (is_active(msm_obj)) {
933 		stats->active.count++;
934 		stats->active.size += obj->size;
935 	}
936 
937 	if (msm_obj->pages) {
938 		stats->resident.count++;
939 		stats->resident.size += obj->size;
940 	}
941 
942 	switch (msm_obj->madv) {
943 	case __MSM_MADV_PURGED:
944 		stats->purged.count++;
945 		stats->purged.size += obj->size;
946 		madv = " purged";
947 		break;
948 	case MSM_MADV_DONTNEED:
949 		stats->purgeable.count++;
950 		stats->purgeable.size += obj->size;
951 		madv = " purgeable";
952 		break;
953 	case MSM_MADV_WILLNEED:
954 	default:
955 		madv = "";
956 		break;
957 	}
958 
959 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
960 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
961 			obj->name, kref_read(&obj->refcount),
962 			off, msm_obj->vaddr);
963 
964 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
965 
966 	if (!list_empty(&msm_obj->vmas)) {
967 
968 		seq_puts(m, "      vmas:");
969 
970 		list_for_each_entry(vma, &msm_obj->vmas, list) {
971 			const char *name, *comm;
972 			if (vma->aspace) {
973 				struct msm_gem_address_space *aspace = vma->aspace;
974 				struct task_struct *task =
975 					get_pid_task(aspace->pid, PIDTYPE_PID);
976 				if (task) {
977 					comm = kstrdup(task->comm, GFP_KERNEL);
978 					put_task_struct(task);
979 				} else {
980 					comm = NULL;
981 				}
982 				name = aspace->name;
983 			} else {
984 				name = comm = NULL;
985 			}
986 			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
987 				name, comm ? ":" : "", comm ? comm : "",
988 				vma->aspace, vma->iova,
989 				vma->mapped ? "mapped" : "unmapped",
990 				msm_gem_vma_inuse(vma));
991 			kfree(comm);
992 		}
993 
994 		seq_puts(m, "\n");
995 	}
996 
997 	dma_resv_describe(robj, m);
998 	msm_gem_unlock(obj);
999 }
1000 
1001 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1002 {
1003 	struct msm_gem_stats stats = {};
1004 	struct msm_gem_object *msm_obj;
1005 
1006 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1007 	list_for_each_entry(msm_obj, list, node) {
1008 		struct drm_gem_object *obj = &msm_obj->base;
1009 		seq_puts(m, "   ");
1010 		msm_gem_describe(obj, m, &stats);
1011 	}
1012 
1013 	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1014 			stats.all.count, stats.all.size);
1015 	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1016 			stats.active.count, stats.active.size);
1017 	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1018 			stats.resident.count, stats.resident.size);
1019 	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1020 			stats.purgeable.count, stats.purgeable.size);
1021 	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1022 			stats.purged.count, stats.purged.size);
1023 }
1024 #endif
1025 
1026 /* don't call directly!  Use drm_gem_object_put() */
1027 static void msm_gem_free_object(struct drm_gem_object *obj)
1028 {
1029 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1030 	struct drm_device *dev = obj->dev;
1031 	struct msm_drm_private *priv = dev->dev_private;
1032 
1033 	mutex_lock(&priv->obj_lock);
1034 	list_del(&msm_obj->node);
1035 	mutex_unlock(&priv->obj_lock);
1036 
1037 	mutex_lock(&priv->mm_lock);
1038 	if (msm_obj->dontneed)
1039 		mark_unpurgeable(msm_obj);
1040 	list_del(&msm_obj->mm_list);
1041 	mutex_unlock(&priv->mm_lock);
1042 
1043 	/* object should not be on active list: */
1044 	GEM_WARN_ON(is_active(msm_obj));
1045 
1046 	put_iova_spaces(obj, true);
1047 
1048 	if (obj->import_attach) {
1049 		GEM_WARN_ON(msm_obj->vaddr);
1050 
1051 		/* Don't drop the pages for imported dmabuf, as they are not
1052 		 * ours, just free the array we allocated:
1053 		 */
1054 		kvfree(msm_obj->pages);
1055 
1056 		put_iova_vmas(obj);
1057 
1058 		drm_prime_gem_destroy(obj, msm_obj->sgt);
1059 	} else {
1060 		msm_gem_vunmap(obj);
1061 		put_pages(obj);
1062 		put_iova_vmas(obj);
1063 	}
1064 
1065 	drm_gem_object_release(obj);
1066 
1067 	kfree(msm_obj);
1068 }
1069 
1070 static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1071 {
1072 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1073 
1074 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1075 	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1076 
1077 	return 0;
1078 }
1079 
1080 /* convenience method to construct a GEM buffer object, and userspace handle */
1081 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1082 		uint32_t size, uint32_t flags, uint32_t *handle,
1083 		char *name)
1084 {
1085 	struct drm_gem_object *obj;
1086 	int ret;
1087 
1088 	obj = msm_gem_new(dev, size, flags);
1089 
1090 	if (IS_ERR(obj))
1091 		return PTR_ERR(obj);
1092 
1093 	if (name)
1094 		msm_gem_object_set_name(obj, "%s", name);
1095 
1096 	ret = drm_gem_handle_create(file, obj, handle);
1097 
1098 	/* drop reference from allocate - handle holds it now */
1099 	drm_gem_object_put(obj);
1100 
1101 	return ret;
1102 }
1103 
1104 static const struct vm_operations_struct vm_ops = {
1105 	.fault = msm_gem_fault,
1106 	.open = drm_gem_vm_open,
1107 	.close = drm_gem_vm_close,
1108 };
1109 
1110 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1111 	.free = msm_gem_free_object,
1112 	.pin = msm_gem_prime_pin,
1113 	.unpin = msm_gem_prime_unpin,
1114 	.get_sg_table = msm_gem_prime_get_sg_table,
1115 	.vmap = msm_gem_prime_vmap,
1116 	.vunmap = msm_gem_prime_vunmap,
1117 	.mmap = msm_gem_object_mmap,
1118 	.vm_ops = &vm_ops,
1119 };
1120 
1121 static int msm_gem_new_impl(struct drm_device *dev,
1122 		uint32_t size, uint32_t flags,
1123 		struct drm_gem_object **obj)
1124 {
1125 	struct msm_drm_private *priv = dev->dev_private;
1126 	struct msm_gem_object *msm_obj;
1127 
1128 	switch (flags & MSM_BO_CACHE_MASK) {
1129 	case MSM_BO_CACHED:
1130 	case MSM_BO_WC:
1131 		break;
1132 	case MSM_BO_CACHED_COHERENT:
1133 		if (priv->has_cached_coherent)
1134 			break;
1135 		fallthrough;
1136 	default:
1137 		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1138 				(flags & MSM_BO_CACHE_MASK));
1139 		return -EINVAL;
1140 	}
1141 
1142 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1143 	if (!msm_obj)
1144 		return -ENOMEM;
1145 
1146 	msm_obj->flags = flags;
1147 	msm_obj->madv = MSM_MADV_WILLNEED;
1148 
1149 	INIT_LIST_HEAD(&msm_obj->node);
1150 	INIT_LIST_HEAD(&msm_obj->vmas);
1151 
1152 	*obj = &msm_obj->base;
1153 	(*obj)->funcs = &msm_gem_object_funcs;
1154 
1155 	return 0;
1156 }
1157 
1158 struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
1159 {
1160 	struct msm_drm_private *priv = dev->dev_private;
1161 	struct msm_gem_object *msm_obj;
1162 	struct drm_gem_object *obj = NULL;
1163 	bool use_vram = false;
1164 	int ret;
1165 
1166 	size = PAGE_ALIGN(size);
1167 
1168 	if (!msm_use_mmu(dev))
1169 		use_vram = true;
1170 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1171 		use_vram = true;
1172 
1173 	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1174 		return ERR_PTR(-EINVAL);
1175 
1176 	/* Disallow zero sized objects as they make the underlying
1177 	 * infrastructure grumpy
1178 	 */
1179 	if (size == 0)
1180 		return ERR_PTR(-EINVAL);
1181 
1182 	ret = msm_gem_new_impl(dev, size, flags, &obj);
1183 	if (ret)
1184 		return ERR_PTR(ret);
1185 
1186 	msm_obj = to_msm_bo(obj);
1187 
1188 	if (use_vram) {
1189 		struct msm_gem_vma *vma;
1190 		struct page **pages;
1191 
1192 		drm_gem_private_object_init(dev, obj, size);
1193 
1194 		msm_gem_lock(obj);
1195 
1196 		vma = add_vma(obj, NULL);
1197 		msm_gem_unlock(obj);
1198 		if (IS_ERR(vma)) {
1199 			ret = PTR_ERR(vma);
1200 			goto fail;
1201 		}
1202 
1203 		to_msm_bo(obj)->vram_node = &vma->node;
1204 
1205 		/* Call chain get_pages() -> update_inactive() tries to
1206 		 * access msm_obj->mm_list, but it is not initialized yet.
1207 		 * To avoid NULL pointer dereference error, initialize
1208 		 * mm_list to be empty.
1209 		 */
1210 		INIT_LIST_HEAD(&msm_obj->mm_list);
1211 
1212 		msm_gem_lock(obj);
1213 		pages = get_pages(obj);
1214 		msm_gem_unlock(obj);
1215 		if (IS_ERR(pages)) {
1216 			ret = PTR_ERR(pages);
1217 			goto fail;
1218 		}
1219 
1220 		vma->iova = physaddr(obj);
1221 	} else {
1222 		ret = drm_gem_object_init(dev, obj, size);
1223 		if (ret)
1224 			goto fail;
1225 		/*
1226 		 * Our buffers are kept pinned, so allocating them from the
1227 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1228 		 * See comments above new_inode() why this is required _and_
1229 		 * expected if you're going to pin these pages.
1230 		 */
1231 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1232 	}
1233 
1234 	mutex_lock(&priv->mm_lock);
1235 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1236 	mutex_unlock(&priv->mm_lock);
1237 
1238 	mutex_lock(&priv->obj_lock);
1239 	list_add_tail(&msm_obj->node, &priv->objects);
1240 	mutex_unlock(&priv->obj_lock);
1241 
1242 	return obj;
1243 
1244 fail:
1245 	drm_gem_object_put(obj);
1246 	return ERR_PTR(ret);
1247 }
1248 
1249 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1250 		struct dma_buf *dmabuf, struct sg_table *sgt)
1251 {
1252 	struct msm_drm_private *priv = dev->dev_private;
1253 	struct msm_gem_object *msm_obj;
1254 	struct drm_gem_object *obj;
1255 	uint32_t size;
1256 	int ret, npages;
1257 
1258 	/* if we don't have IOMMU, don't bother pretending we can import: */
1259 	if (!msm_use_mmu(dev)) {
1260 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1261 		return ERR_PTR(-EINVAL);
1262 	}
1263 
1264 	size = PAGE_ALIGN(dmabuf->size);
1265 
1266 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1267 	if (ret)
1268 		return ERR_PTR(ret);
1269 
1270 	drm_gem_private_object_init(dev, obj, size);
1271 
1272 	npages = size / PAGE_SIZE;
1273 
1274 	msm_obj = to_msm_bo(obj);
1275 	msm_gem_lock(obj);
1276 	msm_obj->sgt = sgt;
1277 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1278 	if (!msm_obj->pages) {
1279 		msm_gem_unlock(obj);
1280 		ret = -ENOMEM;
1281 		goto fail;
1282 	}
1283 
1284 	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1285 	if (ret) {
1286 		msm_gem_unlock(obj);
1287 		goto fail;
1288 	}
1289 
1290 	msm_gem_unlock(obj);
1291 
1292 	mutex_lock(&priv->mm_lock);
1293 	list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1294 	mutex_unlock(&priv->mm_lock);
1295 
1296 	mutex_lock(&priv->obj_lock);
1297 	list_add_tail(&msm_obj->node, &priv->objects);
1298 	mutex_unlock(&priv->obj_lock);
1299 
1300 	return obj;
1301 
1302 fail:
1303 	drm_gem_object_put(obj);
1304 	return ERR_PTR(ret);
1305 }
1306 
1307 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1308 		uint32_t flags, struct msm_gem_address_space *aspace,
1309 		struct drm_gem_object **bo, uint64_t *iova)
1310 {
1311 	void *vaddr;
1312 	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1313 	int ret;
1314 
1315 	if (IS_ERR(obj))
1316 		return ERR_CAST(obj);
1317 
1318 	if (iova) {
1319 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1320 		if (ret)
1321 			goto err;
1322 	}
1323 
1324 	vaddr = msm_gem_get_vaddr(obj);
1325 	if (IS_ERR(vaddr)) {
1326 		msm_gem_unpin_iova(obj, aspace);
1327 		ret = PTR_ERR(vaddr);
1328 		goto err;
1329 	}
1330 
1331 	if (bo)
1332 		*bo = obj;
1333 
1334 	return vaddr;
1335 err:
1336 	drm_gem_object_put(obj);
1337 
1338 	return ERR_PTR(ret);
1339 
1340 }
1341 
1342 void msm_gem_kernel_put(struct drm_gem_object *bo,
1343 		struct msm_gem_address_space *aspace)
1344 {
1345 	if (IS_ERR_OR_NULL(bo))
1346 		return;
1347 
1348 	msm_gem_put_vaddr(bo);
1349 	msm_gem_unpin_iova(bo, aspace);
1350 	drm_gem_object_put(bo);
1351 }
1352 
1353 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1354 {
1355 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1356 	va_list ap;
1357 
1358 	if (!fmt)
1359 		return;
1360 
1361 	va_start(ap, fmt);
1362 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1363 	va_end(ap);
1364 }
1365