/openbmc/linux/include/linux/ |
H A D | mmap_lock.h | 20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); 21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, 23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); 25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument 29 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking() 32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument 36 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned() 39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument 42 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released() 47 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument [all …]
|
H A D | page_table_check.h | 17 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte); 18 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd); 19 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud); 20 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte, 22 void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd); 23 void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud); 24 void __page_table_check_pte_clear_range(struct mm_struct *mm, 44 static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) in page_table_check_pte_clear() argument 49 __page_table_check_pte_clear(mm, pte); in page_table_check_pte_clear() 52 static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) in page_table_check_pmd_clear() argument [all …]
|
H A D | mmu_notifier.h | 40 * that the mm refcount is zero and the range is no longer accessible. 66 * Called either by mmu_notifier_unregister or when the mm is 69 * methods (the ones invoked outside the mm context) and it 74 * tsk->mm == mm exits. 81 * last thread of this mm quits, you've also to be sure that 89 struct mm_struct *mm); 101 struct mm_struct *mm, 111 struct mm_struct *mm, 122 struct mm_struct *mm, 130 struct mm_struct *mm, [all …]
|
/openbmc/linux/arch/x86/include/asm/ |
H A D | mmu_context.h | 55 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument 57 mm->context.ldt = NULL; in init_new_context_ldt() 58 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt() 60 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 61 void destroy_context_ldt(struct mm_struct *mm); 62 void ldt_arch_exit_mmap(struct mm_struct *mm); 64 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument 66 struct mm_struct *mm) in ldt_dup_context() argument 70 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument 71 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument [all …]
|
H A D | pgalloc.h | 6 #include <linux/mm.h> /* for struct page */ 13 static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } in __paravirt_pgd_alloc() argument 18 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument 19 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {} in paravirt_pgd_free() argument 20 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pte() argument 21 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pmd() argument 24 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_pud() argument 25 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {} in paravirt_alloc_p4d() argument 52 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 64 static inline void pmd_populate_kernel(struct mm_struct *mm, in pmd_populate_kernel() argument [all …]
|
/openbmc/linux/drivers/gpu/drm/ |
H A D | drm_buddy.c | 14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, in drm_block_alloc() argument 35 static void drm_block_free(struct drm_buddy *mm, in drm_block_free() argument 41 static void list_insert_sorted(struct drm_buddy *mm, in list_insert_sorted() argument 47 head = &mm->free_list[drm_buddy_block_order(block)]; in list_insert_sorted() 68 static void mark_free(struct drm_buddy *mm, in mark_free() argument 74 list_insert_sorted(mm, block); in mark_free() 88 * @mm: DRM buddy manager to initialize 97 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) in drm_buddy_init() argument 113 mm->size = size; in drm_buddy_init() 114 mm->avail = size; in drm_buddy_init() [all …]
|
/openbmc/linux/drivers/gpu/drm/tests/ |
H A D | drm_buddy_test.c | 46 static void __dump_block(struct kunit *test, struct drm_buddy *mm, in __dump_block() argument 52 drm_buddy_block_size(mm, block), !block->parent, buddy); in __dump_block() 55 static void dump_block(struct kunit *test, struct drm_buddy *mm, in dump_block() argument 60 __dump_block(test, mm, block, false); in dump_block() 64 __dump_block(test, mm, buddy, true); in dump_block() 67 static int check_block(struct kunit *test, struct drm_buddy *mm, in check_block() argument 84 block_size = drm_buddy_block_size(mm, block); in check_block() 87 if (block_size < mm->chunk_size) { in check_block() 98 if (!IS_ALIGNED(block_size, mm->chunk_size)) { in check_block() 103 if (!IS_ALIGNED(offset, mm->chunk_size)) { in check_block() [all …]
|
/openbmc/linux/arch/s390/include/asm/ |
H A D | pgalloc.h | 18 #include <linux/mm.h> 26 struct page *page_table_alloc_pgste(struct mm_struct *mm); 37 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 39 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument 44 if (addr + len > mm->context.asce_limit && in check_asce_limit() 46 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit() 53 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument 55 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one() 62 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument 64 if (!mm_p4d_folded(mm)) in p4d_free() [all …]
|
H A D | mmu_context.h | 20 struct mm_struct *mm) in init_new_context() argument 24 spin_lock_init(&mm->context.lock); in init_new_context() 25 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context() 26 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 27 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 28 atomic_set(&mm->context.flush_count, 0); in init_new_context() 29 atomic_set(&mm->context.protected_count, 0); in init_new_context() 30 mm->context.gmap_asce = 0; in init_new_context() 31 mm->context.flush_mm = 0; in init_new_context() 33 mm->context.alloc_pgste = page_table_allocate_pgste || in init_new_context() [all …]
|
H A D | tlbflush.h | 5 #include <linux/mm.h> 27 /* Global TLB flush for the mm */ in __tlb_flush_idte() 42 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used 45 static inline void __tlb_flush_mm(struct mm_struct *mm) in __tlb_flush_mm() argument 50 * If the machine has IDTE we prefer to do a per mm flush in __tlb_flush_mm() 51 * on all cpus instead of doing a local flush if the mm in __tlb_flush_mm() 55 atomic_inc(&mm->context.flush_count); in __tlb_flush_mm() 57 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); in __tlb_flush_mm() 59 gmap_asce = READ_ONCE(mm->context.gmap_asce); in __tlb_flush_mm() 63 __tlb_flush_idte(mm->context.asce); in __tlb_flush_mm() [all …]
|
/openbmc/linux/arch/powerpc/include/asm/ |
H A D | mmu_context.h | 7 #include <linux/mm.h> 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 20 extern void destroy_context(struct mm_struct *mm); 24 extern bool mm_iommu_preregistered(struct mm_struct *mm); 25 extern long mm_iommu_new(struct mm_struct *mm, 28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 31 extern long mm_iommu_put(struct mm_struct *mm, 33 extern void mm_iommu_init(struct mm_struct *mm); 34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, [all …]
|
/openbmc/linux/include/trace/events/ |
H A D | ksm.h | 71 * @mm: address of the mm object of the process 77 TP_PROTO(void *mm), 79 TP_ARGS(mm), 82 __field(void *, mm) 86 __entry->mm = mm; 89 TP_printk("mm %p", __entry->mm) 95 * @mm: address of the mm object of the process 101 TP_PROTO(void *mm), 103 TP_ARGS(mm) 109 * @mm: address of the mm object of the process [all …]
|
H A D | huge_memory.h | 58 TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, 61 TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), 64 __field(struct mm_struct *, mm) 74 __entry->mm = mm; 83 …TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped… 84 __entry->mm, 95 TP_PROTO(struct mm_struct *mm, int isolated, int status), 97 TP_ARGS(mm, isolated, status), 100 __field(struct mm_struct *, mm) 106 __entry->mm = mm; [all …]
|
/openbmc/linux/Documentation/core-api/ |
H A D | mm-api.rst | 14 .. kernel-doc:: mm/gup.c 40 .. kernel-doc:: mm/slab.c 43 .. kernel-doc:: mm/slab_common.c 46 .. kernel-doc:: mm/util.c 52 .. kernel-doc:: mm/vmalloc.c 61 .. kernel-doc:: mm/filemap.c 67 .. kernel-doc:: mm/readahead.c 70 .. kernel-doc:: mm/readahead.c 76 .. kernel-doc:: mm/page-writeback.c 82 .. kernel-doc:: mm/truncate.c [all …]
|
/openbmc/linux/arch/s390/mm/ |
H A D | pgtable.c | 11 #include <linux/mm.h> 47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 58 asce = asce ? : mm->context.asce; in ptep_ipte_local() 67 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument 74 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global() 78 asce = asce ? : mm->context.asce; in ptep_ipte_global() 87 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument 96 atomic_inc(&mm->context.flush_count); in ptep_flush_direct() 98 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct() [all …]
|
/openbmc/linux/arch/m68k/include/asm/ |
H A D | mmu_context.h | 28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 32 if (mm->context != NO_CONTEXT) in get_mmu_context() 45 mm->context = ctx; in get_mmu_context() 46 context_mm[ctx] = mm; in get_mmu_context() 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 60 if (mm->context != NO_CONTEXT) { in destroy_context() 61 clear_bit(mm->context, context_map); in destroy_context() 62 mm->context = NO_CONTEXT; in destroy_context() 75 get_mmu_context(tsk->mm); in switch_mm() [all …]
|
/openbmc/linux/mm/ |
H A D | debug.c | 3 * mm/debug.c 5 * mm/ specific debug routines. 10 #include <linux/mm.h> 150 pr_emerg("vma %px start %px end %px mm %px\n" in dump_vma() 162 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 164 pr_emerg("mm %px task_size %lu\n" in dump_mm() 192 mm, mm->task_size, in dump_mm() 194 mm->get_unmapped_area, in dump_mm() 196 mm->mmap_base, mm->mmap_legacy_base, in dump_mm() 197 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() [all …]
|
H A D | mmu_notifier.c | 3 * linux/mm/mmu_notifier.c 13 #include <linux/mm.h> 19 #include <linux/sched/mm.h> 33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected 38 /* all mmu notifiers registered in this mm are queued in this list */ 54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any 57 * Note that the core mm creates nested invalidate_range_start()/end() regions 60 * progress on the mm side. 67 * - mm->active_invalidate_ranges != 0 73 * - mm->active_invalidate_ranges != 0 [all …]
|
/openbmc/linux/arch/arm/include/asm/ |
H A D | mmu_context.h | 24 void __check_vmalloc_seq(struct mm_struct *mm); 27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument 30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq() 32 __check_vmalloc_seq(mm); in check_vmalloc_seq() 38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 44 atomic64_set(&mm->context.id, 0); in init_new_context() 49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument [all …]
|
/openbmc/linux/arch/sparc/mm/ |
H A D | tlb.c | 2 /* arch/sparc64/mm/tlb.c 9 #include <linux/mm.h> 26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 33 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument 81 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one() 87 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one() [all …]
|
/openbmc/linux/fs/proc/ |
H A D | task_nommu.c | 3 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 23 VMA_ITERATOR(vmi, mm, 0); in task_mem() 28 mmap_read_lock(mm); in task_mem() 40 if (atomic_read(&mm->mm_count) > 1 || in task_mem() 50 if (atomic_read(&mm->mm_count) > 1) in task_mem() 51 sbytes += kobjsize(mm); in task_mem() 53 bytes += kobjsize(mm); in task_mem() 72 mmap_read_unlock(mm); in task_mem() [all …]
|
/openbmc/linux/drivers/net/ethernet/mscc/ |
H A D | ocelot_mm.c | 55 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_update_active_preemptible_tcs() local 66 ocelot_port->speed == SPEED_1000) && mm->tx_active) in ocelot_port_update_active_preemptible_tcs() 67 val = mm->preemptible_tcs; in ocelot_port_update_active_preemptible_tcs() 75 mm->active_preemptible_tcs = val; in ocelot_port_update_active_preemptible_tcs() 79 "port %d %s/%s, MM TX %s, preemptible TCs 0x%x, active 0x%x\n", in ocelot_port_update_active_preemptible_tcs() 82 mm->tx_active ? "active" : "inactive", mm->preemptible_tcs, in ocelot_port_update_active_preemptible_tcs() 83 mm->active_preemptible_tcs); in ocelot_port_update_active_preemptible_tcs() 93 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_change_fp() local 97 if (mm->preemptible_tcs == preemptible_tcs) in ocelot_port_change_fp() 100 mm->preemptible_tcs = preemptible_tcs; in ocelot_port_change_fp() [all …]
|
/openbmc/linux/include/asm-generic/ |
H A D | mmu_context.h | 15 * @mm: the currently active mm context which is becoming lazy 18 * tsk->mm will be NULL 21 static inline void enter_lazy_tlb(struct mm_struct *mm, in enter_lazy_tlb() argument 29 * @tsk: task struct for the mm 30 * @mm: the new mm struct 35 struct mm_struct *mm) in init_new_context() argument 42 * destroy_context - Undo init_new_context when the mm is going away 43 * @mm: old mm struct 46 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 52 * activate_mm - called after exec switches the current task to a new mm, to switch to it [all …]
|
/openbmc/linux/arch/x86/kernel/ |
H A D | ldt.c | 19 #include <linux/mm.h> 42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument 47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt() 50 * Any change to mm->context.ldt is followed by an IPI to all in load_mm_ldt() 51 * CPUs with the mm active. The LDT will not be freed until in load_mm_ldt() 93 * Load the LDT if either the old or new mm had an LDT. in switch_ldt() 95 * An mm will never go from having an LDT to not having an LDT. Two in switch_ldt() 138 struct mm_struct *mm = __mm; in flush_ldt() local 140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt() 143 load_mm_ldt(mm); in flush_ldt() [all …]
|
/openbmc/linux/drivers/iommu/ |
H A D | iommu-sva.c | 7 #include <linux/sched/mm.h> 14 /* Allocate a PASID for the mm within range (inclusive) */ 15 static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) in iommu_sva_alloc_pasid() argument 20 if (!arch_pgtable_dma_compat(mm)) in iommu_sva_alloc_pasid() 24 /* Is a PASID already associated with this mm? */ in iommu_sva_alloc_pasid() 25 if (mm_valid_pasid(mm)) { in iommu_sva_alloc_pasid() 26 if (mm->pasid >= dev->iommu->max_pasids) in iommu_sva_alloc_pasid() 36 mm->pasid = pasid; in iommu_sva_alloc_pasid() 46 * @mm: the mm to bind, caller must hold a reference to mm_users 49 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a [all …]
|