/openbmc/linux/mm/ |
H A D | cma.c | 39 struct cma cma_areas[MAX_CMA_AREAS]; 43 phys_addr_t cma_get_base(const struct cma *cma) in cma_get_base() argument 45 return PFN_PHYS(cma->base_pfn); in cma_get_base() 48 unsigned long cma_get_size(const struct cma *cma) in cma_get_size() argument 50 return cma->count << PAGE_SHIFT; in cma_get_size() 53 const char *cma_get_name(const struct cma *cma) in cma_get_name() argument 55 return cma->name; in cma_get_name() 58 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, in cma_bitmap_aligned_mask() argument 61 if (align_order <= cma->order_per_bit) in cma_bitmap_aligned_mask() 63 return (1UL << (align_order - cma->order_per_bit)) - 1; in cma_bitmap_aligned_mask() [all …]
|
H A D | cma_debug.c | 36 struct cma *cma = data; in cma_used_get() local 39 spin_lock_irq(&cma->lock); in cma_used_get() 41 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma)); in cma_used_get() 42 spin_unlock_irq(&cma->lock); in cma_used_get() 43 *val = (u64)used << cma->order_per_bit; in cma_used_get() 51 struct cma *cma = data; in cma_maxchunk_get() local 54 unsigned long bitmap_maxno = cma_bitmap_maxno(cma); in cma_maxchunk_get() 56 spin_lock_irq(&cma->lock); in cma_maxchunk_get() 58 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end); in cma_maxchunk_get() 61 end = find_next_bit(cma->bitmap, bitmap_maxno, start); in cma_maxchunk_get() [all …]
|
H A D | cma_sysfs.c | 17 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_success_pages() argument 19 atomic64_add(nr_pages, &cma->nr_pages_succeeded); in cma_sysfs_account_success_pages() 22 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages) in cma_sysfs_account_fail_pages() argument 24 atomic64_add(nr_pages, &cma->nr_pages_failed); in cma_sysfs_account_fail_pages() 27 static inline struct cma *cma_from_kobj(struct kobject *kobj) in cma_from_kobj() 29 return container_of(kobj, struct cma_kobject, kobj)->cma; in cma_from_kobj() 35 struct cma *cma = cma_from_kobj(kobj); in alloc_pages_success_show() local 38 atomic64_read(&cma->nr_pages_succeeded)); in alloc_pages_success_show() 45 struct cma *cma = cma_from_kobj(kobj); in alloc_pages_fail_show() local 47 return sysfs_emit(buf, "%llu\n", atomic64_read(&cma->nr_pages_failed)); in alloc_pages_fail_show() [all …]
|
H A D | cma.h | 10 struct cma *cma; member 13 struct cma { struct 36 extern struct cma cma_areas[MAX_CMA_AREAS]; argument 39 static inline unsigned long cma_bitmap_maxno(struct cma *cma) in cma_bitmap_maxno() argument 41 return cma->count >> cma->order_per_bit; in cma_bitmap_maxno() 45 void cma_sysfs_account_success_pages(struct cma *cma, unsigned long nr_pages); 46 void cma_sysfs_account_fail_pages(struct cma *cma, unsigned long nr_pages); 48 static inline void cma_sysfs_account_success_pages(struct cma *cma, in cma_sysfs_account_success_pages() argument 50 static inline void cma_sysfs_account_fail_pages(struct cma *cma, in cma_sysfs_account_fail_pages() argument
|
/openbmc/linux/include/linux/ |
H A D | cma.h | 27 struct cma; 30 extern phys_addr_t cma_get_base(const struct cma *cma); 31 extern unsigned long cma_get_size(const struct cma *cma); 32 extern const char *cma_get_name(const struct cma *cma); 37 bool fixed, const char *name, struct cma **res_cma, 42 bool fixed, const char *name, struct cma **res_cma) in cma_declare_contiguous() 50 struct cma **res_cma); 51 extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, 53 extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count); 54 extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); [all …]
|
H A D | dma-map-ops.h | 13 struct cma; 114 extern struct cma *dma_contiguous_default_area; 116 static inline struct cma *dev_get_cma_area(struct device *dev) in dev_get_cma_area() 125 phys_addr_t limit, struct cma **res_cma, bool fixed); 136 static inline struct cma *dev_get_cma_area(struct device *dev) in dev_get_cma_area() 144 phys_addr_t base, phys_addr_t limit, struct cma **res_cma, in dma_contiguous_reserve_area()
|
/openbmc/linux/kernel/dma/ |
H A D | contiguous.c | 61 struct cma *dma_contiguous_default_area; 102 static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; 104 static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES]; 172 struct cma **cma; in dma_numa_cma_reserve() local 182 cma = &dma_contiguous_pernuma_area[nid]; in dma_numa_cma_reserve() 185 0, false, name, cma, nid); in dma_numa_cma_reserve() 193 cma = &dma_contiguous_numa_area[nid]; in dma_numa_cma_reserve() 196 name, cma, nid); in dma_numa_cma_reserve() 281 phys_addr_t limit, struct cma **res_cma, in dma_contiguous_reserve_area() 335 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() argument [all …]
|
H A D | pool.c | 60 struct cma *cma; in cma_in_zone() local 62 cma = dev_get_cma_area(NULL); in cma_in_zone() 63 if (!cma) in cma_in_zone() 66 size = cma_get_size(cma); in cma_in_zone() 71 end = cma_get_base(cma) + size - 1; in cma_in_zone()
|
H A D | Kconfig | 153 You can disable CMA by specifying "cma=0" on the kernel's command 182 default, but it can be enabled by passing cma=size[MG] to the kernel. 194 enabled by passing cma=size[MG] to the kernel.
|
/openbmc/linux/Documentation/ABI/testing/ |
H A D | sysfs-kernel-mm-cma | 1 What: /sys/kernel/mm/cma/ 5 /sys/kernel/mm/cma/ contains a subdirectory for each CMA 9 /sys/kernel/mm/cma/<cma-heap-name> directory) contains the 15 What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_success 21 What: /sys/kernel/mm/cma/<cma-heap-name>/alloc_pages_fail
|
/openbmc/linux/drivers/dma-buf/heaps/ |
H A D | cma_heap.c | 28 struct cma *cma; member 259 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); in cma_heap_dma_buf_release() 303 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); in cma_heap_allocate() 359 cma_release(cma_heap->cma, cma_pages, pagecount); in cma_heap_allocate() 370 static int __add_cma_heap(struct cma *cma, void *data) in __add_cma_heap() argument 378 cma_heap->cma = cma; in __add_cma_heap() 380 exp_info.name = cma_get_name(cma); in __add_cma_heap() 397 struct cma *default_cma = dev_get_cma_area(NULL); in add_default_cma_heap()
|
/openbmc/linux/Documentation/admin-guide/mm/ |
H A D | cma_debugfs.rst | 8 Each CMA area represents a directory under <debugfs>/cma/, represented by 11 <debugfs>/cma/<cma_name> 21 echo 5 > <debugfs>/cma/<cma_name>/alloc
|
/openbmc/linux/arch/s390/mm/ |
H A D | init.c | 239 static int s390_cma_check_range(struct cma *cma, void *data) in s390_cma_check_range() argument 245 start = cma_get_base(cma); in s390_cma_check_range() 246 end = start + cma_get_size(cma); in s390_cma_check_range()
|
/openbmc/linux/arch/xtensa/boot/dts/ |
H A D | kc705.dts | 22 linux,cma { 28 linux,cma-default;
|
/openbmc/linux/arch/arm64/boot/dts/freescale/ |
H A D | imx93-tqma9352.dtsi | 19 linux,cma { 24 linux,cma-default;
|
H A D | imx8ulp-evk.dts | 28 linux,cma { 32 linux,cma-default;
|
H A D | imx93-11x11-evk.dts | 23 linux,cma { 28 linux,cma-default;
|
H A D | imx8mn-tqma8mqnl.dtsi | 39 linux,cma { 46 linux,cma-default;
|
/openbmc/linux/arch/arm64/boot/dts/amlogic/ |
H A D | meson-a1.dtsi | 54 linux,cma { 59 linux,cma-default;
|
/openbmc/linux/arch/arm/boot/dts/nxp/imx/ |
H A D | imx6ul-ccimx6ulsom.dtsi | 20 linux,cma { 24 linux,cma-default;
|
/openbmc/linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
H A D | pci.h | 23 bool cma; member
|
/openbmc/linux/drivers/infiniband/core/ |
H A D | Makefile | 25 rdma_cm-y := cma.o cma_trace.o
|
/openbmc/linux/arch/arm/boot/dts/broadcom/ |
H A D | bcm283x.dtsi | 38 cma: linux,cma { label 42 linux,cma-default;
|
/openbmc/linux/include/trace/events/ |
H A D | cma.h | 3 #define TRACE_SYSTEM cma
|
/openbmc/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_ttm.c | 299 drm->agp.cma = pci->agp.cma; in nouveau_ttm_init()
|