Lines Matching full:reorder
259 * serialization, if present in one of the percpu reorder queues.
262 * the cpu's reorder queue.
268 struct padata_list *reorder; in padata_find_next() local
271 reorder = per_cpu_ptr(pd->reorder_list, cpu); in padata_find_next()
273 spin_lock(&reorder->lock); in padata_find_next()
274 if (list_empty(&reorder->list)) { in padata_find_next()
275 spin_unlock(&reorder->lock); in padata_find_next()
279 padata = list_entry(reorder->list.next, struct padata_priv, list); in padata_find_next()
286 spin_unlock(&reorder->lock); in padata_find_next()
296 spin_unlock(&reorder->lock); in padata_find_next()
306 struct padata_list *reorder; in padata_reorder() local
310 * the reorder queue the time. Calculating in which percpu reorder in padata_reorder()
313 * the objects arrive to the reorder queues. So a cpu could wait to in padata_reorder()
327 * cpu's reorder queue, nothing to do for now. in padata_reorder()
346 * the reorder queues in the meantime. in padata_reorder()
348 * Ensure reorder queue is read after pd->lock is dropped so we see in padata_reorder()
354 reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); in padata_reorder()
355 if (!list_empty(&reorder->list) && padata_find_next(pd, false)) { in padata_reorder()
422 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); in padata_do_serial() local
426 spin_lock(&reorder->lock); in padata_do_serial()
428 list_for_each_prev(pos, &reorder->list) { in padata_do_serial()
435 spin_unlock(&reorder->lock); in padata_do_serial()
438 * Ensure the addition to the reorder list is ordered correctly in padata_do_serial()
587 /* Initialize per-CPU reorder lists */