Lines Matching full:asid

27  * |      process ID         |   ASID    |
32 * The ASID is used to tag entries in the CPU caches and TLBs.
56 u64 context_id, asid; in a15_erratum_get_cpumask() local
65 * running the same ASID as the one being invalidated. in a15_erratum_get_cpumask()
67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
68 if (asid == 0) in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
70 if (context_id == asid) in a15_erratum_get_cpumask()
79 * With LPAE, the ASID and page tables are updated atomicly, so there is
80 * no need for a reserved set of tables (the active ASID tracking prevents
139 u64 asid; in flush_context() local
141 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context()
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
149 * ASID, as this is the only trace we have of in flush_context()
152 if (asid == 0) in flush_context()
153 asid = per_cpu(reserved_asids, i); in flush_context()
154 __set_bit(asid & ~ASID_MASK, asid_map); in flush_context()
155 per_cpu(reserved_asids, i) = asid; in flush_context()
165 static bool check_update_reserved_asid(u64 asid, u64 newasid) in check_update_reserved_asid() argument
173 * (i.e. the same ASID in the current generation) but we can't in check_update_reserved_asid()
175 * of the old ASID are updated to reflect the mm. Failure to do in check_update_reserved_asid()
176 * so could result in us missing the reserved ASID in a future in check_update_reserved_asid()
180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
192 u64 asid = atomic64_read(&mm->context.id); in new_context() local
195 if (asid != 0) { in new_context()
196 u64 newasid = generation | (asid & ~ASID_MASK); in new_context()
199 * If our current ASID was active during a rollover, we in new_context()
202 if (check_update_reserved_asid(asid, newasid)) in new_context()
206 * We had a valid ASID in a previous life, so try to re-use in new_context()
209 asid &= ~ASID_MASK; in new_context()
210 if (!__test_and_set_bit(asid, asid_map)) in new_context()
215 * Allocate a free ASID. If we can't find one, take a note of the in new_context()
217 * We always count from ASID #1, as we reserve ASID #0 to switch in new_context()
223 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); in new_context()
224 if (asid == NUM_USER_ASIDS) { in new_context()
228 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); in new_context()
231 __set_bit(asid, asid_map); in new_context()
232 cur_idx = asid; in new_context()
234 return asid | generation; in new_context()
241 u64 asid; in check_and_switch_context() local
246 * We cannot update the pgd and the ASID atomicly with classic in check_and_switch_context()
252 asid = atomic64_read(&mm->context.id); in check_and_switch_context()
253 if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) in check_and_switch_context()
254 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
258 /* Check that our ASID belongs to the current generation. */ in check_and_switch_context()
259 asid = atomic64_read(&mm->context.id); in check_and_switch_context()
260 if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { in check_and_switch_context()
261 asid = new_context(mm, cpu); in check_and_switch_context()
262 atomic64_set(&mm->context.id, asid); in check_and_switch_context()
270 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()