/openbmc/linux/arch/x86/include/asm/ |
H A D | mmu_context.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 32 * Xen requires page-aligned LDTs with special permissions. This is 45 * of an older, still-in-use LDT. 47 * slot will be -1 if this LDT doesn't have an alias mapping. 55 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument 57 mm->context.ldt = NULL; in init_new_context_ldt() 58 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt() 60 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 61 void destroy_context_ldt(struct mm_struct *mm); 62 void ldt_arch_exit_mmap(struct mm_struct *mm); [all …]
|
/openbmc/linux/drivers/gpu/drm/tests/ |
H A D | drm_buddy_test.c | 1 // SPDX-License-Identifier: MIT 46 static void __dump_block(struct kunit *test, struct drm_buddy *mm, in __dump_block() argument 50 block->header, drm_buddy_block_state(block), in __dump_block() 52 drm_buddy_block_size(mm, block), !block->parent, buddy); in __dump_block() 55 static void dump_block(struct kunit *test, struct drm_buddy *mm, in dump_block() argument 60 __dump_block(test, mm, block, false); in dump_block() 64 __dump_block(test, mm, buddy, true); in dump_block() 67 static int check_block(struct kunit *test, struct drm_buddy *mm, in check_block() argument 74 int err = 0; in check_block() 81 err = -EINVAL; in check_block() [all …]
|
H A D | drm_mm_test.c | 1 // SPDX-License-Identifier: GPL-2.0-only 36 [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW }, 37 [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH }, 41 { "bottom-up", DRM_MM_INSERT_LOW }, 42 { "top-down", DRM_MM_INSERT_HIGH }, 46 static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm) in assert_no_holes() argument 52 count = 0; in assert_no_holes() 53 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes() 61 drm_mm_for_each_node(hole, mm) { in assert_no_holes() 71 static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument [all …]
|
/openbmc/linux/arch/s390/mm/ |
H A D | pgtable.c | 1 // SPDX-License-Identifier: GPL-2.0 11 #include <linux/mm.h> 25 #include <asm/page-states.h> 47 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 53 opt = 0; in ptep_ipte_local() 54 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 55 if (asce == 0UL || nodat) in ptep_ipte_local() 57 if (asce != -1UL) { in ptep_ipte_local() 58 asce = asce ? : mm->context.asce; in ptep_ipte_local() 63 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL); in ptep_ipte_local() [all …]
|
/openbmc/linux/arch/m68k/include/asm/ |
H A D | mmu_context.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 5 #include <asm-generic/mm_hooks.h> 28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 32 if (mm->context != NO_CONTEXT) in get_mmu_context() 42 ctx = 0; in get_mmu_context() 45 mm->context = ctx; in get_mmu_context() 46 context_mm[ctx] = mm; in get_mmu_context() 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 60 if (mm->context != NO_CONTEXT) { in destroy_context() [all …]
|
/openbmc/linux/drivers/gpu/drm/ |
H A D | drm_buddy.c | 1 // SPDX-License-Identifier: MIT 14 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, in drm_block_alloc() argument 27 block->header = offset; in drm_block_alloc() 28 block->header |= order; in drm_block_alloc() 29 block->parent = parent; in drm_block_alloc() 31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); in drm_block_alloc() 35 static void drm_block_free(struct drm_buddy *mm, in drm_block_free() argument 41 static void list_insert_sorted(struct drm_buddy *mm, in list_insert_sorted() argument 47 head = &mm->free_list[drm_buddy_block_order(block)]; in list_insert_sorted() 49 list_add(&block->link, head); in list_insert_sorted() [all …]
|
/openbmc/linux/arch/s390/include/asm/ |
H A D | mmu_context.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 5 * Derived from "include/asm-i386/mmu_context.h" 16 #include <asm-generic/mm_hooks.h> 20 struct mm_struct *mm) in init_new_context() argument 24 spin_lock_init(&mm->context.lock); in init_new_context() 25 INIT_LIST_HEAD(&mm->context.pgtable_list); in init_new_context() 26 INIT_LIST_HEAD(&mm->context.gmap_list); in init_new_context() 27 cpumask_clear(&mm->context.cpu_attach_mask); in init_new_context() 28 atomic_set(&mm->context.flush_count, 0); in init_new_context() 29 atomic_set(&mm->context.protected_count, 0); in init_new_context() [all …]
|
H A D | tlbflush.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 5 #include <linux/mm.h> 27 /* Global TLB flush for the mm */ in __tlb_flush_idte() 28 asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc"); in __tlb_flush_idte() 36 unsigned int dummy = 0; in __tlb_flush_global() 38 csp(&dummy, 0, 0); in __tlb_flush_global() 42 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used 45 static inline void __tlb_flush_mm(struct mm_struct *mm) in __tlb_flush_mm() argument 50 * If the machine has IDTE we prefer to do a per mm flush in __tlb_flush_mm() 51 * on all cpus instead of doing a local flush if the mm in __tlb_flush_mm() [all …]
|
/openbmc/linux/drivers/net/ethernet/mscc/ |
H A D | ocelot_mm.c | 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 3 * Hardware library for MAC Merge Layer and Frame Preemption on TSN-capable 6 * Copyright 2022-2023 NXP 37 case 0: in ocelot_mm_verify_status() 54 struct ocelot_port *ocelot_port = ocelot->ports[port]; in ocelot_port_update_active_preemptible_tcs() 55 struct ocelot_mm_state *mm = &ocelot->mm[port]; in ocelot_port_update_active_preemptible_tcs() local 56 u32 val = 0; in ocelot_port_update_active_preemptible_tcs() 58 lockdep_assert_held(&ocelot->fwd_domain_lock); in ocelot_port_update_active_preemptible_tcs() 65 if ((ocelot_port->phy_mode != PHY_INTERFACE_MODE_QSGMII || in ocelot_port_update_active_preemptible_tcs() 66 ocelot_port->speed == SPEED_1000) && mm->tx_active) in ocelot_port_update_active_preemptible_tcs() [all …]
|
/openbmc/linux/arch/sparc/mm/ |
H A D | tlb.c | 1 // SPDX-License-Identifier: GPL-2.0 2 /* arch/sparc64/mm/tlb.c 9 #include <linux/mm.h> 26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 28 if (!tb->tlb_nr) in flush_tlb_pending() 33 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 34 if (tb->tlb_nr == 1) { in flush_tlb_pending() 35 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 39 &tb->vaddrs[0]); in flush_tlb_pending() [all …]
|
H A D | tsb.c | 1 // SPDX-License-Identifier: GPL-2.0 2 /* arch/sparc64/mm/tsb.c 25 return vaddr & (nentries - 1); in tsb_hash() 37 for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) { in flush_tsb_kernel_range_scan() 41 match |= (ent->tag << 22); in flush_tsb_kernel_range_scan() 43 ent->tag = (1UL << TSB_TAG_INVALID_BIT); in flush_tsb_kernel_range_scan() 56 if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES) in flush_tsb_kernel_range() 64 if (tag_compare(ent->tag, v)) in flush_tsb_kernel_range() 65 ent->tag = (1UL << TSB_TAG_INVALID_BIT); in flush_tsb_kernel_range() 75 v &= ~0x1UL; in __flush_tsb_one_entry() [all …]
|
/openbmc/linux/arch/powerpc/mm/book3s64/ |
H A D | mmu_context.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 3 * MMU context allocation for 64-bit kernels. 13 #include <linux/mm.h> 62 * id 0 (aka. ctx->id) is special, we always allocate a new one, even if in realloc_context_ids() 67 * the array, so that we can test if they're non-zero to decide if we in realloc_context_ids() 73 for (i = 0; i < ARRAY_SIZE(ctx->extended_id); i++) { in realloc_context_ids() 74 if (i == 0 || ctx->extended_id[i]) { in realloc_context_ids() 76 if (id < 0) in realloc_context_ids() 79 ctx->extended_id[i] = id; in realloc_context_ids() 84 return ctx->id; in realloc_context_ids() [all …]
|
H A D | slice.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 3 * address space "slices" (meta-segments) support 15 #include <linux/mm.h> 21 #include <linux/sched/mm.h> 39 (int)SLICE_NUM_LOW, &mask->low_slices); in slice_print_mask() 41 (int)SLICE_NUM_HIGH, mask->high_slices); in slice_print_mask() 44 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0) 63 unsigned long end = start + len - 1; in slice_range_to_mask() 65 ret->low_slices = 0; in slice_range_to_mask() 67 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); in slice_range_to_mask() [all …]
|
H A D | radix_tlb.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. 8 #include <linux/mm.h> 12 #include <linux/sched/mm.h> 15 #include <asm/ppc-opcode.h> 38 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1) in tlbiel_radix_set_isa300() 57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0); in tlbiel_all_isa300() 61 tlbiel_radix_set_isa300(set, is, 0, in tlbiel_all_isa300() 62 RIC_FLUSH_TLB, 0); in tlbiel_all_isa300() 67 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1); in tlbiel_all_isa300() [all …]
|
/openbmc/linux/fs/proc/ |
H A D | task_nommu.c | 1 // SPDX-License-Identifier: GPL-2.0 3 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 17 * "non-shared". Shared memory may get counted more than once, for 18 * each process that owns it. Non-shared memory is counted 21 void task_mem(struct seq_file *m, struct mm_struct *mm) in task_mem() argument 23 VMA_ITERATOR(vmi, mm, 0); in task_mem() 26 unsigned long bytes = 0, sbytes = 0, slack = 0, size; in task_mem() 28 mmap_read_lock(mm); in task_mem() 32 region = vma->vm_region; in task_mem() [all …]
|
/openbmc/linux/arch/arm/mm/ |
H A D | pgd.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * linux/arch/arm/mm/pgd.c 5 * Copyright (C) 1998-2005 Russell King 7 #include <linux/mm.h> 17 #include "mm.h" 30 pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument 42 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc() 47 init_pgd = pgd_offset_k(0); in pgd_alloc() 49 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); in pgd_alloc() 57 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc() [all …]
|
/openbmc/linux/kernel/ |
H A D | fork.c | 1 // SPDX-License-Identifier: GPL-2.0-only 9 * 'fork.c' contains the help-routines for the 'fork' system call 12 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 18 #include <linux/sched/mm.h> 45 #include <linux/mm.h> 82 #include <linux/posix-timers.h> 83 #include <linux/user-return-notifier.h> 142 DEFINE_PER_CPU(unsigned long, process_counts) = 0; 157 int total = 0; in nr_processes() 208 for (i = 0; i < NR_CACHED_STACKS; i++) { in try_release_thread_stack_to_cache() [all …]
|
/openbmc/linux/arch/powerpc/include/asm/ |
H A D | mmu_context.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 7 #include <linux/mm.h> 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 20 extern void destroy_context(struct mm_struct *mm); 24 extern bool mm_iommu_preregistered(struct mm_struct *mm); 25 extern long mm_iommu_new(struct mm_struct *mm, 28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 31 extern long mm_iommu_put(struct mm_struct *mm, 33 extern void mm_iommu_init(struct mm_struct *mm); 34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, [all …]
|
/openbmc/linux/arch/x86/kernel/ |
H A D | ldt.c | 1 // SPDX-License-Identifier: GPL-2.0 19 #include <linux/mm.h> 42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument 47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt() 50 * Any change to mm->context.ldt is followed by an IPI to all in load_mm_ldt() 51 * CPUs with the mm active. The LDT will not be freed until in load_mm_ldt() 65 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { in load_mm_ldt() 67 * Whoops -- either the new LDT isn't mapped in load_mm_ldt() 68 * (if slot == -1) or is mapped into a bogus in load_mm_ldt() 76 * If page table isolation is enabled, ldt->entries in load_mm_ldt() [all …]
|
/openbmc/linux/include/linux/ |
H A D | mmap_lock.h | 8 #include <linux/tracepoint-defs.h> 20 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); 21 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, 23 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); 25 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument 29 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking() 32 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument 36 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned() 39 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument 42 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released() [all …]
|
/openbmc/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_mqd_manager.c | 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd() 58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd() 59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd() 74 offset = (q->sdma_engine_id * in allocate_sdma_mqd() 75 dev->kfd->device_info.num_sdma_queues_per_engine + in allocate_sdma_mqd() 76 q->sdma_queue_id) * in allocate_sdma_mqd() 77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd() 79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * in allocate_sdma_mqd() [all …]
|
/openbmc/linux/drivers/misc/cxl/ |
H A D | fault.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 8 #include <linux/sched/mm.h> 10 #include <linux/mm.h> 24 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && in sste_matches() 25 (sste->esid_data == cpu_to_be64(slb->esid))); in sste_matches() 36 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ in find_free_sste() 40 if (slb->vsid & SLB_VSID_B_1T) in find_free_sste() 41 hash = (slb->esid >> SID_SHIFT_1T) & mask; in find_free_sste() 43 hash = (slb->esid >> SID_SHIFT) & mask; in find_free_sste() 45 primary = ctx->sstp + (hash << 3); in find_free_sste() [all …]
|
/openbmc/linux/mm/ |
H A D | mmu_notifier.c | 1 // SPDX-License-Identifier: GPL-2.0-only 3 * linux/mm/mmu_notifier.c 13 #include <linux/mm.h> 19 #include <linux/sched/mm.h> 33 * mm->notifier_subscriptions inside the mm_take_all_locks() protected 38 /* all mmu notifiers registered in this mm are queued in this list */ 51 * This is a collision-retry read-side/write-side 'lock', a lot like a 52 * seqcount, however this allows multiple write-sides to hold it at 54 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any 57 * Note that the core mm creates nested invalidate_range_start()/end() regions [all …]
|
/openbmc/linux/arch/sparc/include/asm/ |
H A D | mmu_context_64.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 16 #include <asm-generic/mm_hooks.h> 24 void get_new_mmu_context(struct mm_struct *mm); 27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 29 void destroy_context(struct mm_struct *mm); 37 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument 40 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx() 41 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx() 43 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx() 44 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx() [all …]
|
/openbmc/linux/kernel/sched/ |
H A D | membarrier.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> 17 * int x = 0, y = 0; 22 * CPU1 after the IPI-induced memory barrier: 29 * b: send IPI IPI-induced mb 36 * BUG_ON(r1 == 0 && r2 == 0) 41 * can be reordered after (a) (although not after (c)), so we get r1 == 0 42 * and r2 == 0. This violates the guarantee that membarrier() is 46 * before the IPI-induced memory barrier on CPU1. 53 * int x = 0, y = 0; [all …]
|