Home
last modified time | relevance | path

Searched full:unmapped (Results 1 – 25 of 520) sorted by relevance

12345678910>>...21

/openbmc/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_shrinker.c189 unsigned idx, unmapped = 0; in msm_gem_shrinker_vmap() local
192 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { in msm_gem_shrinker_vmap()
193 unmapped += drm_gem_lru_scan(lrus[idx], in msm_gem_shrinker_vmap()
194 vmap_shrink_limit - unmapped, in msm_gem_shrinker_vmap()
199 *(unsigned long *)ptr += unmapped; in msm_gem_shrinker_vmap()
201 if (unmapped > 0) in msm_gem_shrinker_vmap()
202 trace_msm_gem_purge_vmaps(unmapped); in msm_gem_shrinker_vmap()
H A Dmsm_gpu_trace.h143 TP_PROTO(u32 unmapped),
144 TP_ARGS(unmapped),
146 __field(u32, unmapped)
149 __entry->unmapped = unmapped;
151 TP_printk("Purging %u vmaps", __entry->unmapped)
H A Dmsm_iommu.c98 size_t unmapped, pgsize, count; in msm_iommu_pagetable_unmap() local
102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); in msm_iommu_pagetable_unmap()
103 if (!unmapped) in msm_iommu_pagetable_unmap()
106 iova += unmapped; in msm_iommu_pagetable_unmap()
107 size -= unmapped; in msm_iommu_pagetable_unmap()
/openbmc/linux/include/trace/events/
H A Dhuge_memory.h59 int referenced, int none_or_zero, int status, int unmapped),
61 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped),
70 __field(int, unmapped)
80 __entry->unmapped = unmapped;
83 …TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped
90 __entry->unmapped)
/openbmc/linux/mm/damon/
H A Dvaddr.c104 * Find three regions separated by two biggest unmapped regions
110 * separated by the two biggest unmapped regions in the space. Please refer to
199 * is actually mapped to the memory and accessed, monitoring the unmapped
204 * with the noise by simply identifying the unmapped areas as a region that
206 * unmapped areas inside will make the adaptive mechanism quite complex. That
207 * said, too huge unmapped areas inside the monitoring target should be removed
212 * between the three regions are the two biggest unmapped areas in the given
214 * end of the mappings and the two biggest unmapped areas of the address space.
223 * region and the stack will be two biggest unmapped regions. Because these
225 * two biggest unmapped regions will be sufficient to make a trade-off.
[all …]
H A Dvaddr-test.h45 * regions should not include the two biggest unmapped areas in the original
48 * Because these two unmapped areas are very huge but obviously never accessed,
53 * unmapped areas. After that, based on the information, it constructs the
61 * and end with 305. The process also has three unmapped areas, 25-200,
63 * unmapped areas, and thus it should be converted to three regions of 10-25,
/openbmc/linux/drivers/scsi/lpfc/
H A Dlpfc_disc.h229 * nodes transition from the unmapped to the mapped list.
248 * and put on the unmapped list. For ADISC processing, the node is taken off
249 * the ADISC list and placed on either the mapped or unmapped list (depending
250 * on its previous state). Once on the unmapped list, a PRLI is issued and the
253 * node, the node is taken off the unmapped list. The binding list is checked
255 * assignment is unsuccessful, the node is left on the unmapped list. If
260 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
265 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
282 * unmapped lists.
/openbmc/linux/drivers/iommu/amd/
H A Dio_pgtable_v2.c296 unsigned long unmapped = 0; in iommu_v2_unmap_pages() local
303 while (unmapped < size) { in iommu_v2_unmap_pages()
306 return unmapped; in iommu_v2_unmap_pages()
311 unmapped += unmap_size; in iommu_v2_unmap_pages()
314 return unmapped; in iommu_v2_unmap_pages()
H A Dio_pgtable.c444 unsigned long long unmapped; in iommu_v1_unmap_pages() local
451 unmapped = 0; in iommu_v1_unmap_pages()
453 while (unmapped < size) { in iommu_v1_unmap_pages()
462 return unmapped; in iommu_v1_unmap_pages()
466 unmapped += unmap_size; in iommu_v1_unmap_pages()
469 return unmapped; in iommu_v1_unmap_pages()
/openbmc/linux/Documentation/devicetree/bindings/interrupt-controller/
H A Dti,sci-inta.yaml37 Unmapped events ---->| | umapidx |-------------------------> Globalevents
81 ti,unmapped-event-sources:
86 Array of phandles to DMA controllers where the unmapped events originate.
/openbmc/linux/drivers/vfio/
H A Dvfio_iommu_type1.c986 size_t unmapped = 0; in unmap_unpin_fast() local
990 unmapped = iommu_unmap_fast(domain->domain, *iova, len, in unmap_unpin_fast()
993 if (!unmapped) { in unmap_unpin_fast()
998 entry->len = unmapped; in unmap_unpin_fast()
1001 *iova += unmapped; in unmap_unpin_fast()
1010 if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) { in unmap_unpin_fast()
1016 return unmapped; in unmap_unpin_fast()
1024 size_t unmapped = iommu_unmap(domain->domain, *iova, len); in unmap_unpin_slow() local
1026 if (unmapped) { in unmap_unpin_slow()
1029 unmapped >> PAGE_SHIFT, in unmap_unpin_slow()
[all …]
/openbmc/linux/mm/
H A Dmigrate_device.c65 unsigned long addr = start, unmapped = 0; in migrate_vma_collect_pmd() local
258 unmapped++; in migrate_vma_collect_pmd()
270 if (unmapped) in migrate_vma_collect_pmd()
363 unsigned long unmapped = 0; in migrate_device_unmap() local
373 unmapped++; in migrate_device_unmap()
411 unmapped++; in migrate_device_unmap()
430 return unmapped; in migrate_device_unmap()
460 * and unmapped, check whether each page is pinned or not. Pages that aren't
545 * At this point pages are locked and unmapped, and thus they have in migrate_vma_setup()
709 * called if the page could not be unmapped. in __migrate_device_pages()
/openbmc/linux/include/linux/dma/
H A Dk3-psil.h56 * @flow_start: PKDMA flow range start of mapped channel. Unmapped
58 * @flow_num: PKDMA flow count of mapped channel. Unmapped channels
/openbmc/linux/Documentation/translations/zh_CN/mm/damon/
H A Ddesign.rst57 <BIG UNMAPPED REGION 1>
61 <BIG UNMAPPED REGION 2>
/openbmc/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_exec.c35 * mapping. If such memory backed mappings are unmapped the kernel will make
38 * will result in those memory backed mappings being unmapped first.
48 * backed mappings being mapped and unmapped, either within a single or multiple
54 * of the previously unmapped sparse mapping within the same VM_BIND ioctl
56 * range of the previously unmapped sparse mapping within the same VM_BIND
/openbmc/linux/Documentation/networking/device_drivers/ethernet/marvell/
H A Docteontx2.rst172 - Error due to operation of unmapped PF.
186 - Error due to unmapped slot.
236 - Receive packet on an unmapped PF.
248 - Error due to unmapped slot.
290 Rx on unmapped PF_FUNC
/openbmc/hiomapd/vpnor/
H A Dbackend.cpp326 MSG_INFO("Host requested unmapped region of %" PRId32 in vpnor_copy()
386 " bytes to unmapped offset 0x%" PRIx32 "\n", in vpnor_write()
433 MSG_ERR("Try to write unmapped area (offset=0x%lx)\n", e.base); in vpnor_validate()
436 * Writes to unmapped areas are not meaningful, so deny the request. in vpnor_validate()
505 MSG_ERR("Aligned offset is unmapped area (offset=0x%lx)\n", e.base); in vpnor_align_offset()
508 * Writes to unmapped areas are not meaningful, so deny the request. in vpnor_align_offset()
/openbmc/linux/lib/
H A Ddevres.c67 * Managed ioremap(). Map is automatically unmapped on driver detach.
82 * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
97 * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
271 * Managed ioport_map(). Map is automatically unmapped on driver
341 * recorded in the iomap table are automatically unmapped on driver
371 * Managed pci_iomap(). Map is automatically unmapped on driver
/openbmc/linux/Documentation/mm/damon/
H A Ddesign.rst74 mapped to the physical memory and accessed. Thus, tracking the unmapped
78 cases. That said, too huge unmapped areas inside the monitoring target should
83 gaps between the three regions are the two biggest unmapped areas in the given
84 address space. The two biggest unmapped areas would be the gap between the
91 <BIG UNMAPPED REGION 1>
95 <BIG UNMAPPED REGION 2>
211 virtual memory could be dynamically mapped and unmapped. Physical memory could
/openbmc/linux/arch/hexagon/include/asm/
H A Dprocessor.h47 * Apparently the convention is that ld.so will ask for "unmapped" private
52 * you have to kick the "unmapped" base requests higher up.
/openbmc/linux/Documentation/arch/x86/x86_64/
H A D5level-paging.rst49 to look for unmapped area by specified address. If it's already
50 occupied, we look for unmapped area in *full* address space, rather than
/openbmc/linux/drivers/iommu/iommufd/
H A Dioas.c300 unsigned long unmapped = 0; in iommufd_ioas_unmap() local
308 rc = iopt_unmap_all(&ioas->iopt, &unmapped); in iommufd_ioas_unmap()
317 &unmapped); in iommufd_ioas_unmap()
322 cmd->length = unmapped; in iommufd_ioas_unmap()
H A Dio_pagetable.c477 unsigned long last, unsigned long *unmapped) in iopt_unmap_iova_range() argument
550 if (unmapped) in iopt_unmap_iova_range()
551 *unmapped = unmapped_bytes; in iopt_unmap_iova_range()
560 * @unmapped: Return number of bytes unmapped
566 unsigned long length, unsigned long *unmapped) in iopt_unmap_iova() argument
576 return iopt_unmap_iova_range(iopt, iova, iova_last, unmapped); in iopt_unmap_iova()
579 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped) in iopt_unmap_all() argument
583 rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped); in iopt_unmap_all()
701 * will be unmapped from the domain. The domain must already be removed from the
/openbmc/linux/drivers/staging/media/ipu3/
H A Dipu3-mmu.c383 size_t unmapped_page, unmapped = 0; in imgu_mmu_unmap() local
402 while (unmapped < size) { in imgu_mmu_unmap()
407 dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n", in imgu_mmu_unmap()
411 unmapped += unmapped_page; in imgu_mmu_unmap()
416 return unmapped; in imgu_mmu_unmap()
/openbmc/linux/drivers/iommu/
H A Dvirtio-iommu.c342 * On success, returns the number of unmapped bytes
347 size_t unmapped = 0; in viommu_del_mappings() local
367 unmapped += mapping->iova.last - mapping->iova.start + 1; in viommu_del_mappings()
374 return unmapped; in viommu_del_mappings()
863 size_t unmapped; in viommu_unmap_pages() local
868 unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1); in viommu_unmap_pages()
869 if (unmapped < size) in viommu_unmap_pages()
874 return unmapped; in viommu_unmap_pages()
880 .virt_end = cpu_to_le64(iova + unmapped - 1), in viommu_unmap_pages()
884 return ret ? 0 : unmapped; in viommu_unmap_pages()

12345678910>>...21