Lines Matching +full:foo +full:- +full:queue

1 // SPDX-License-Identifier: GPL-2.0-or-later
13 * - object w/ a bit
14 * - free list
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
25 * - don't just push entire root set; process in place
32 * of foo to bar and vice versa. Current code chokes on that.
38 * upon the beginning and unmark non-junk ones.
56 * parents (->gc_tree).
58 * Damn. Added missing check for ->dead in listen queues scanning.
89 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { in unix_get_socket()
92 struct sock *sk = sock->sk; in unix_get_socket()
94 ops = READ_ONCE(sock->ops); in unix_get_socket()
97 if (sk && ops && ops->family == PF_UNIX) in unix_get_socket()
109 if (edge->successor->listener) in unix_edge_successor()
110 return unix_sk(edge->successor->listener)->vertex; in unix_edge_successor()
112 return edge->successor->vertex; in unix_edge_successor()
142 struct unix_vertex *vertex = edge->predecessor->vertex; in unix_add_edge()
145 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); in unix_add_edge()
146 vertex->index = unix_vertex_unvisited_index; in unix_add_edge()
147 vertex->out_degree = 0; in unix_add_edge()
148 INIT_LIST_HEAD(&vertex->edges); in unix_add_edge()
149 INIT_LIST_HEAD(&vertex->scc_entry); in unix_add_edge()
151 list_move_tail(&vertex->entry, &unix_unvisited_vertices); in unix_add_edge()
152 edge->predecessor->vertex = vertex; in unix_add_edge()
155 vertex->out_degree++; in unix_add_edge()
156 list_add_tail(&edge->vertex_entry, &vertex->edges); in unix_add_edge()
163 struct unix_vertex *vertex = edge->predecessor->vertex; in unix_del_edge()
165 if (!fpl->dead) in unix_del_edge()
168 list_del(&edge->vertex_entry); in unix_del_edge()
169 vertex->out_degree--; in unix_del_edge()
171 if (!vertex->out_degree) { in unix_del_edge()
172 edge->predecessor->vertex = NULL; in unix_del_edge()
173 list_move_tail(&vertex->entry, &fpl->vertices); in unix_del_edge()
181 list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { in unix_free_vertices()
182 list_del(&vertex->entry); in unix_free_vertices()
196 if (!fpl->count_unix) in unix_add_edges()
200 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges()
206 edge = fpl->edges + i++; in unix_add_edges()
207 edge->predecessor = inflight; in unix_add_edges()
208 edge->successor = receiver; in unix_add_edges()
211 } while (i < fpl->count_unix); in unix_add_edges()
213 receiver->scm_stat.nr_unix_fds += fpl->count_unix; in unix_add_edges()
214 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); in unix_add_edges()
216 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); in unix_add_edges()
220 fpl->inflight = true; in unix_add_edges()
232 if (!fpl->count_unix) in unix_del_edges()
236 struct unix_edge *edge = fpl->edges + i++; in unix_del_edges()
239 } while (i < fpl->count_unix); in unix_del_edges()
241 if (!fpl->dead) { in unix_del_edges()
242 receiver = fpl->edges[0].successor; in unix_del_edges()
243 receiver->scm_stat.nr_unix_fds -= fpl->count_unix; in unix_del_edges()
245 WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); in unix_del_edges()
247 WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); in unix_del_edges()
251 fpl->inflight = false; in unix_del_edges()
260 if (!receiver->scm_stat.nr_unix_fds) { in unix_update_edges()
261 receiver->listener = NULL; in unix_update_edges()
264 unix_update_graph(unix_sk(receiver->listener)->vertex); in unix_update_edges()
265 receiver->listener = NULL; in unix_update_edges()
275 if (!fpl->count_unix) in unix_prepare_fpl()
278 for (i = 0; i < fpl->count_unix; i++) { in unix_prepare_fpl()
283 list_add(&vertex->entry, &fpl->vertices); in unix_prepare_fpl()
286 fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), in unix_prepare_fpl()
288 if (!fpl->edges) in unix_prepare_fpl()
295 return -ENOMEM; in unix_prepare_fpl()
300 if (fpl->inflight) in unix_destroy_fpl()
303 kvfree(fpl->edges); in unix_destroy_fpl()
313 list_for_each_entry(edge, &vertex->edges, vertex_entry) { in unix_vertex_dead()
316 /* The vertex's fd can be received by a non-inflight socket. */ in unix_vertex_dead()
323 if (next_vertex->scc_index != vertex->scc_index) in unix_vertex_dead()
329 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); in unix_vertex_dead()
330 u = edge->predecessor; in unix_vertex_dead()
331 total_ref = file_count(u->sk.sk_socket->file); in unix_vertex_dead()
334 if (total_ref != vertex->out_degree) in unix_vertex_dead()
347 skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); in unix_collect_queue()
350 if (u->oob_skb) { in unix_collect_queue()
351 WARN_ON_ONCE(skb_unref(u->oob_skb)); in unix_collect_queue()
352 u->oob_skb = NULL; in unix_collect_queue()
362 struct sk_buff_head *queue; in unix_collect_skb() local
366 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); in unix_collect_skb()
367 u = edge->predecessor; in unix_collect_skb()
368 queue = &u->sk.sk_receive_queue; in unix_collect_skb()
370 spin_lock(&queue->lock); in unix_collect_skb()
372 if (u->sk.sk_state == TCP_LISTEN) { in unix_collect_skb()
375 skb_queue_walk(queue, skb) { in unix_collect_skb()
376 struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; in unix_collect_skb()
378 /* listener -> embryo order, the inversion never happens. */ in unix_collect_skb()
379 spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); in unix_collect_skb()
380 unix_collect_queue(unix_sk(skb->sk), hitlist); in unix_collect_skb()
381 spin_unlock(&embryo_queue->lock); in unix_collect_skb()
387 spin_unlock(&queue->lock); in unix_collect_skb()
402 /* Self-reference or a embryo-listener circle ? */ in unix_scc_cyclic()
403 list_for_each_entry(edge, &vertex->edges, vertex_entry) { in unix_scc_cyclic()
422 /* Push vertex to vertex_stack and mark it as on-stack in __unix_walk_scc()
426 list_add(&vertex->scc_entry, &vertex_stack); in __unix_walk_scc()
428 vertex->index = *last_index; in __unix_walk_scc()
429 vertex->scc_index = *last_index; in __unix_walk_scc()
433 list_for_each_entry(edge, &vertex->edges, vertex_entry) { in __unix_walk_scc()
439 if (next_vertex->index == unix_vertex_unvisited_index) { in __unix_walk_scc()
445 list_add(&edge->stack_entry, &edge_stack); in __unix_walk_scc()
455 list_del_init(&edge->stack_entry); in __unix_walk_scc()
458 vertex = edge->predecessor->vertex; in __unix_walk_scc()
464 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); in __unix_walk_scc()
465 } else if (next_vertex->index != unix_vertex_grouped_index) { in __unix_walk_scc()
472 vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); in __unix_walk_scc()
478 if (vertex->index == vertex->scc_index) { in __unix_walk_scc()
488 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); in __unix_walk_scc()
492 list_move_tail(&v->entry, &unix_visited_vertices); in __unix_walk_scc()
494 /* Mark vertex as off-stack. */ in __unix_walk_scc()
495 v->index = unix_vertex_grouped_index; in __unix_walk_scc()
546 list_add(&scc, &vertex->scc_entry); in unix_walk_scc_fast()
549 list_move_tail(&vertex->entry, &unix_visited_vertices); in unix_walk_scc_fast()
591 UNIXCB(skb).fp->dead = true; in __unix_gc()
625 if (!fpl || !fpl->count_unix || in wait_for_unix_gc()
626 READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) in wait_for_unix_gc()