xref: /openbmc/linux/net/tipc/bcast.c (revision 45471cd98decae5fced8b38e46c223f54a924814)
1 /*
2  * net/tipc/bcast.c: TIPC broadcast code
3  *
4  * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5  * Copyright (c) 2004, Intel Corporation.
6  * Copyright (c) 2005, 2010-2011, Wind River Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "socket.h"
39 #include "msg.h"
40 #include "bcast.h"
41 #include "name_distr.h"
42 #include "core.h"
43 
44 #define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
45 #define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
46 
47 const char tipc_bclink_name[] = "broadcast-link";
48 
49 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
50 			   struct tipc_node_map *nm_b,
51 			   struct tipc_node_map *nm_diff);
52 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
53 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
54 
55 static void tipc_bclink_lock(struct net *net)
56 {
57 	struct tipc_net *tn = net_generic(net, tipc_net_id);
58 
59 	spin_lock_bh(&tn->bclink->lock);
60 }
61 
62 static void tipc_bclink_unlock(struct net *net)
63 {
64 	struct tipc_net *tn = net_generic(net, tipc_net_id);
65 
66 	spin_unlock_bh(&tn->bclink->lock);
67 }
68 
69 void tipc_bclink_input(struct net *net)
70 {
71 	struct tipc_net *tn = net_generic(net, tipc_net_id);
72 
73 	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
74 }
75 
76 uint  tipc_bclink_get_mtu(void)
77 {
78 	return MAX_PKT_DEFAULT_MCAST;
79 }
80 
81 static u32 bcbuf_acks(struct sk_buff *buf)
82 {
83 	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
84 }
85 
86 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
87 {
88 	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
89 }
90 
91 static void bcbuf_decr_acks(struct sk_buff *buf)
92 {
93 	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
94 }
95 
96 void tipc_bclink_add_node(struct net *net, u32 addr)
97 {
98 	struct tipc_net *tn = net_generic(net, tipc_net_id);
99 
100 	tipc_bclink_lock(net);
101 	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
102 	tipc_bclink_unlock(net);
103 }
104 
105 void tipc_bclink_remove_node(struct net *net, u32 addr)
106 {
107 	struct tipc_net *tn = net_generic(net, tipc_net_id);
108 
109 	tipc_bclink_lock(net);
110 	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
111 	tipc_bclink_unlock(net);
112 }
113 
114 static void bclink_set_last_sent(struct net *net)
115 {
116 	struct tipc_net *tn = net_generic(net, tipc_net_id);
117 	struct tipc_link *bcl = tn->bcl;
118 
119 	bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
120 }
121 
122 u32 tipc_bclink_get_last_sent(struct net *net)
123 {
124 	struct tipc_net *tn = net_generic(net, tipc_net_id);
125 
126 	return tn->bcl->silent_intv_cnt;
127 }
128 
129 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
130 {
131 	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
132 						seqno : node->bclink.last_sent;
133 }
134 
135 /**
136  * tipc_bclink_retransmit_to - get most recent node to request retransmission
137  *
138  * Called with bclink_lock locked
139  */
140 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
141 {
142 	struct tipc_net *tn = net_generic(net, tipc_net_id);
143 
144 	return tn->bclink->retransmit_to;
145 }
146 
147 /**
148  * bclink_retransmit_pkt - retransmit broadcast packets
149  * @after: sequence number of last packet to *not* retransmit
150  * @to: sequence number of last packet to retransmit
151  *
152  * Called with bclink_lock locked
153  */
154 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
155 {
156 	struct sk_buff *skb;
157 	struct tipc_link *bcl = tn->bcl;
158 
159 	skb_queue_walk(&bcl->transmq, skb) {
160 		if (more(buf_seqno(skb), after)) {
161 			tipc_link_retransmit(bcl, skb, mod(to - after));
162 			break;
163 		}
164 	}
165 }
166 
167 /**
168  * tipc_bclink_wakeup_users - wake up pending users
169  *
170  * Called with no locks taken
171  */
172 void tipc_bclink_wakeup_users(struct net *net)
173 {
174 	struct tipc_net *tn = net_generic(net, tipc_net_id);
175 
176 	tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
177 }
178 
179 /**
180  * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
181  * @n_ptr: node that sent acknowledgement info
182  * @acked: broadcast sequence # that has been acknowledged
183  *
184  * Node is locked, bclink_lock unlocked.
185  */
186 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
187 {
188 	struct sk_buff *skb, *tmp;
189 	unsigned int released = 0;
190 	struct net *net = n_ptr->net;
191 	struct tipc_net *tn = net_generic(net, tipc_net_id);
192 
193 	if (unlikely(!n_ptr->bclink.recv_permitted))
194 		return;
195 
196 	tipc_bclink_lock(net);
197 
198 	/* Bail out if tx queue is empty (no clean up is required) */
199 	skb = skb_peek(&tn->bcl->transmq);
200 	if (!skb)
201 		goto exit;
202 
203 	/* Determine which messages need to be acknowledged */
204 	if (acked == INVALID_LINK_SEQ) {
205 		/*
206 		 * Contact with specified node has been lost, so need to
207 		 * acknowledge sent messages only (if other nodes still exist)
208 		 * or both sent and unsent messages (otherwise)
209 		 */
210 		if (tn->bclink->bcast_nodes.count)
211 			acked = tn->bcl->silent_intv_cnt;
212 		else
213 			acked = tn->bcl->snd_nxt;
214 	} else {
215 		/*
216 		 * Bail out if specified sequence number does not correspond
217 		 * to a message that has been sent and not yet acknowledged
218 		 */
219 		if (less(acked, buf_seqno(skb)) ||
220 		    less(tn->bcl->silent_intv_cnt, acked) ||
221 		    less_eq(acked, n_ptr->bclink.acked))
222 			goto exit;
223 	}
224 
225 	/* Skip over packets that node has previously acknowledged */
226 	skb_queue_walk(&tn->bcl->transmq, skb) {
227 		if (more(buf_seqno(skb), n_ptr->bclink.acked))
228 			break;
229 	}
230 
231 	/* Update packets that node is now acknowledging */
232 	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
233 		if (more(buf_seqno(skb), acked))
234 			break;
235 		bcbuf_decr_acks(skb);
236 		bclink_set_last_sent(net);
237 		if (bcbuf_acks(skb) == 0) {
238 			__skb_unlink(skb, &tn->bcl->transmq);
239 			kfree_skb(skb);
240 			released = 1;
241 		}
242 	}
243 	n_ptr->bclink.acked = acked;
244 
245 	/* Try resolving broadcast link congestion, if necessary */
246 	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
247 		tipc_link_push_packets(tn->bcl);
248 		bclink_set_last_sent(net);
249 	}
250 	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
251 		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
252 exit:
253 	tipc_bclink_unlock(net);
254 }
255 
256 /**
257  * tipc_bclink_update_link_state - update broadcast link state
258  *
259  * RCU and node lock set
260  */
261 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
262 				   u32 last_sent)
263 {
264 	struct sk_buff *buf;
265 	struct net *net = n_ptr->net;
266 	struct tipc_net *tn = net_generic(net, tipc_net_id);
267 
268 	/* Ignore "stale" link state info */
269 	if (less_eq(last_sent, n_ptr->bclink.last_in))
270 		return;
271 
272 	/* Update link synchronization state; quit if in sync */
273 	bclink_update_last_sent(n_ptr, last_sent);
274 
275 	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
276 		return;
277 
278 	/* Update out-of-sync state; quit if loss is still unconfirmed */
279 	if ((++n_ptr->bclink.oos_state) == 1) {
280 		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
281 			return;
282 		n_ptr->bclink.oos_state++;
283 	}
284 
285 	/* Don't NACK if one has been recently sent (or seen) */
286 	if (n_ptr->bclink.oos_state & 0x1)
287 		return;
288 
289 	/* Send NACK */
290 	buf = tipc_buf_acquire(INT_H_SIZE);
291 	if (buf) {
292 		struct tipc_msg *msg = buf_msg(buf);
293 		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
294 		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
295 
296 		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
297 			      INT_H_SIZE, n_ptr->addr);
298 		msg_set_non_seq(msg, 1);
299 		msg_set_mc_netid(msg, tn->net_id);
300 		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
301 		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
302 		msg_set_bcgap_to(msg, to);
303 
304 		tipc_bclink_lock(net);
305 		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
306 		tn->bcl->stats.sent_nacks++;
307 		tipc_bclink_unlock(net);
308 		kfree_skb(buf);
309 
310 		n_ptr->bclink.oos_state++;
311 	}
312 }
313 
314 /**
315  * bclink_peek_nack - monitor retransmission requests sent by other nodes
316  *
317  * Delay any upcoming NACK by this node if another node has already
318  * requested the first message this node is going to ask for.
319  */
320 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
321 {
322 	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
323 
324 	if (unlikely(!n_ptr))
325 		return;
326 
327 	tipc_node_lock(n_ptr);
328 	if (n_ptr->bclink.recv_permitted &&
329 	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
330 	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
331 		n_ptr->bclink.oos_state = 2;
332 	tipc_node_unlock(n_ptr);
333 	tipc_node_put(n_ptr);
334 }
335 
336 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
337  *                    and to identified node local sockets
338  * @net: the applicable net namespace
339  * @list: chain of buffers containing message
340  * Consumes the buffer chain, except when returning -ELINKCONG
341  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
342  */
343 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
344 {
345 	struct tipc_net *tn = net_generic(net, tipc_net_id);
346 	struct tipc_link *bcl = tn->bcl;
347 	struct tipc_bclink *bclink = tn->bclink;
348 	int rc = 0;
349 	int bc = 0;
350 	struct sk_buff *skb;
351 	struct sk_buff_head arrvq;
352 	struct sk_buff_head inputq;
353 
354 	/* Prepare clone of message for local node */
355 	skb = tipc_msg_reassemble(list);
356 	if (unlikely(!skb)) {
357 		__skb_queue_purge(list);
358 		return -EHOSTUNREACH;
359 	}
360 	/* Broadcast to all nodes */
361 	if (likely(bclink)) {
362 		tipc_bclink_lock(net);
363 		if (likely(bclink->bcast_nodes.count)) {
364 			rc = __tipc_link_xmit(net, bcl, list);
365 			if (likely(!rc)) {
366 				u32 len = skb_queue_len(&bcl->transmq);
367 
368 				bclink_set_last_sent(net);
369 				bcl->stats.queue_sz_counts++;
370 				bcl->stats.accu_queue_sz += len;
371 			}
372 			bc = 1;
373 		}
374 		tipc_bclink_unlock(net);
375 	}
376 
377 	if (unlikely(!bc))
378 		__skb_queue_purge(list);
379 
380 	if (unlikely(rc)) {
381 		kfree_skb(skb);
382 		return rc;
383 	}
384 	/* Deliver message clone */
385 	__skb_queue_head_init(&arrvq);
386 	skb_queue_head_init(&inputq);
387 	__skb_queue_tail(&arrvq, skb);
388 	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
389 	return rc;
390 }
391 
392 /**
393  * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
394  *
395  * Called with both sending node's lock and bclink_lock taken.
396  */
397 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
398 {
399 	struct tipc_net *tn = net_generic(node->net, tipc_net_id);
400 
401 	bclink_update_last_sent(node, seqno);
402 	node->bclink.last_in = seqno;
403 	node->bclink.oos_state = 0;
404 	tn->bcl->stats.recv_info++;
405 
406 	/*
407 	 * Unicast an ACK periodically, ensuring that
408 	 * all nodes in the cluster don't ACK at the same time
409 	 */
410 	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
411 		tipc_link_proto_xmit(node->active_links[node->addr & 1],
412 				     STATE_MSG, 0, 0, 0, 0);
413 		tn->bcl->stats.sent_acks++;
414 	}
415 }
416 
417 /**
418  * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
419  *
420  * RCU is locked, no other locks set
421  */
422 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
423 {
424 	struct tipc_net *tn = net_generic(net, tipc_net_id);
425 	struct tipc_link *bcl = tn->bcl;
426 	struct tipc_msg *msg = buf_msg(buf);
427 	struct tipc_node *node;
428 	u32 next_in;
429 	u32 seqno;
430 	int deferred = 0;
431 	int pos = 0;
432 	struct sk_buff *iskb;
433 	struct sk_buff_head *arrvq, *inputq;
434 
435 	/* Screen out unwanted broadcast messages */
436 	if (msg_mc_netid(msg) != tn->net_id)
437 		goto exit;
438 
439 	node = tipc_node_find(net, msg_prevnode(msg));
440 	if (unlikely(!node))
441 		goto exit;
442 
443 	tipc_node_lock(node);
444 	if (unlikely(!node->bclink.recv_permitted))
445 		goto unlock;
446 
447 	/* Handle broadcast protocol message */
448 	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
449 		if (msg_type(msg) != STATE_MSG)
450 			goto unlock;
451 		if (msg_destnode(msg) == tn->own_addr) {
452 			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
453 			tipc_bclink_lock(net);
454 			bcl->stats.recv_nacks++;
455 			tn->bclink->retransmit_to = node;
456 			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
457 					      msg_bcgap_to(msg));
458 			tipc_bclink_unlock(net);
459 			tipc_node_unlock(node);
460 		} else {
461 			tipc_node_unlock(node);
462 			bclink_peek_nack(net, msg);
463 		}
464 		tipc_node_put(node);
465 		goto exit;
466 	}
467 
468 	/* Handle in-sequence broadcast message */
469 	seqno = msg_seqno(msg);
470 	next_in = mod(node->bclink.last_in + 1);
471 	arrvq = &tn->bclink->arrvq;
472 	inputq = &tn->bclink->inputq;
473 
474 	if (likely(seqno == next_in)) {
475 receive:
476 		/* Deliver message to destination */
477 		if (likely(msg_isdata(msg))) {
478 			tipc_bclink_lock(net);
479 			bclink_accept_pkt(node, seqno);
480 			spin_lock_bh(&inputq->lock);
481 			__skb_queue_tail(arrvq, buf);
482 			spin_unlock_bh(&inputq->lock);
483 			node->action_flags |= TIPC_BCAST_MSG_EVT;
484 			tipc_bclink_unlock(net);
485 			tipc_node_unlock(node);
486 		} else if (msg_user(msg) == MSG_BUNDLER) {
487 			tipc_bclink_lock(net);
488 			bclink_accept_pkt(node, seqno);
489 			bcl->stats.recv_bundles++;
490 			bcl->stats.recv_bundled += msg_msgcnt(msg);
491 			pos = 0;
492 			while (tipc_msg_extract(buf, &iskb, &pos)) {
493 				spin_lock_bh(&inputq->lock);
494 				__skb_queue_tail(arrvq, iskb);
495 				spin_unlock_bh(&inputq->lock);
496 			}
497 			node->action_flags |= TIPC_BCAST_MSG_EVT;
498 			tipc_bclink_unlock(net);
499 			tipc_node_unlock(node);
500 		} else if (msg_user(msg) == MSG_FRAGMENTER) {
501 			tipc_bclink_lock(net);
502 			bclink_accept_pkt(node, seqno);
503 			tipc_buf_append(&node->bclink.reasm_buf, &buf);
504 			if (unlikely(!buf && !node->bclink.reasm_buf)) {
505 				tipc_bclink_unlock(net);
506 				goto unlock;
507 			}
508 			bcl->stats.recv_fragments++;
509 			if (buf) {
510 				bcl->stats.recv_fragmented++;
511 				msg = buf_msg(buf);
512 				tipc_bclink_unlock(net);
513 				goto receive;
514 			}
515 			tipc_bclink_unlock(net);
516 			tipc_node_unlock(node);
517 		} else {
518 			tipc_bclink_lock(net);
519 			bclink_accept_pkt(node, seqno);
520 			tipc_bclink_unlock(net);
521 			tipc_node_unlock(node);
522 			kfree_skb(buf);
523 		}
524 		buf = NULL;
525 
526 		/* Determine new synchronization state */
527 		tipc_node_lock(node);
528 		if (unlikely(!tipc_node_is_up(node)))
529 			goto unlock;
530 
531 		if (node->bclink.last_in == node->bclink.last_sent)
532 			goto unlock;
533 
534 		if (skb_queue_empty(&node->bclink.deferdq)) {
535 			node->bclink.oos_state = 1;
536 			goto unlock;
537 		}
538 
539 		msg = buf_msg(skb_peek(&node->bclink.deferdq));
540 		seqno = msg_seqno(msg);
541 		next_in = mod(next_in + 1);
542 		if (seqno != next_in)
543 			goto unlock;
544 
545 		/* Take in-sequence message from deferred queue & deliver it */
546 		buf = __skb_dequeue(&node->bclink.deferdq);
547 		goto receive;
548 	}
549 
550 	/* Handle out-of-sequence broadcast message */
551 	if (less(next_in, seqno)) {
552 		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
553 					       buf);
554 		bclink_update_last_sent(node, seqno);
555 		buf = NULL;
556 	}
557 
558 	tipc_bclink_lock(net);
559 
560 	if (deferred)
561 		bcl->stats.deferred_recv++;
562 	else
563 		bcl->stats.duplicates++;
564 
565 	tipc_bclink_unlock(net);
566 
567 unlock:
568 	tipc_node_unlock(node);
569 	tipc_node_put(node);
570 exit:
571 	kfree_skb(buf);
572 }
573 
574 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
575 {
576 	return (n_ptr->bclink.recv_permitted &&
577 		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
578 }
579 
580 
581 /**
582  * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
583  *
584  * Send packet over as many bearers as necessary to reach all nodes
585  * that have joined the broadcast link.
586  *
587  * Returns 0 (packet sent successfully) under all circumstances,
588  * since the broadcast link's pseudo-bearer never blocks
589  */
590 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
591 			      struct tipc_bearer *unused1,
592 			      struct tipc_media_addr *unused2)
593 {
594 	int bp_index;
595 	struct tipc_msg *msg = buf_msg(buf);
596 	struct tipc_net *tn = net_generic(net, tipc_net_id);
597 	struct tipc_bcbearer *bcbearer = tn->bcbearer;
598 	struct tipc_bclink *bclink = tn->bclink;
599 
600 	/* Prepare broadcast link message for reliable transmission,
601 	 * if first time trying to send it;
602 	 * preparation is skipped for broadcast link protocol messages
603 	 * since they are sent in an unreliable manner and don't need it
604 	 */
605 	if (likely(!msg_non_seq(buf_msg(buf)))) {
606 		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
607 		msg_set_non_seq(msg, 1);
608 		msg_set_mc_netid(msg, tn->net_id);
609 		tn->bcl->stats.sent_info++;
610 		if (WARN_ON(!bclink->bcast_nodes.count)) {
611 			dump_stack();
612 			return 0;
613 		}
614 	}
615 
616 	/* Send buffer over bearers until all targets reached */
617 	bcbearer->remains = bclink->bcast_nodes;
618 
619 	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
620 		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
621 		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
622 		struct tipc_bearer *bp[2] = {p, s};
623 		struct tipc_bearer *b = bp[msg_link_selector(msg)];
624 		struct sk_buff *tbuf;
625 
626 		if (!p)
627 			break; /* No more bearers to try */
628 		if (!b)
629 			b = p;
630 		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
631 			       &bcbearer->remains_new);
632 		if (bcbearer->remains_new.count == bcbearer->remains.count)
633 			continue; /* Nothing added by bearer pair */
634 
635 		if (bp_index == 0) {
636 			/* Use original buffer for first bearer */
637 			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
638 		} else {
639 			/* Avoid concurrent buffer access */
640 			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
641 			if (!tbuf)
642 				break;
643 			tipc_bearer_send(net, b->identity, tbuf,
644 					 &b->bcast_addr);
645 			kfree_skb(tbuf); /* Bearer keeps a clone */
646 		}
647 		if (bcbearer->remains_new.count == 0)
648 			break; /* All targets reached */
649 
650 		bcbearer->remains = bcbearer->remains_new;
651 	}
652 
653 	return 0;
654 }
655 
656 /**
657  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
658  */
659 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
660 			u32 node, bool action)
661 {
662 	struct tipc_net *tn = net_generic(net, tipc_net_id);
663 	struct tipc_bcbearer *bcbearer = tn->bcbearer;
664 	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
665 	struct tipc_bcbearer_pair *bp_curr;
666 	struct tipc_bearer *b;
667 	int b_index;
668 	int pri;
669 
670 	tipc_bclink_lock(net);
671 
672 	if (action)
673 		tipc_nmap_add(nm_ptr, node);
674 	else
675 		tipc_nmap_remove(nm_ptr, node);
676 
677 	/* Group bearers by priority (can assume max of two per priority) */
678 	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
679 
680 	rcu_read_lock();
681 	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
682 		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
683 		if (!b || !b->nodes.count)
684 			continue;
685 
686 		if (!bp_temp[b->priority].primary)
687 			bp_temp[b->priority].primary = b;
688 		else
689 			bp_temp[b->priority].secondary = b;
690 	}
691 	rcu_read_unlock();
692 
693 	/* Create array of bearer pairs for broadcasting */
694 	bp_curr = bcbearer->bpairs;
695 	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
696 
697 	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
698 
699 		if (!bp_temp[pri].primary)
700 			continue;
701 
702 		bp_curr->primary = bp_temp[pri].primary;
703 
704 		if (bp_temp[pri].secondary) {
705 			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
706 					    &bp_temp[pri].secondary->nodes)) {
707 				bp_curr->secondary = bp_temp[pri].secondary;
708 			} else {
709 				bp_curr++;
710 				bp_curr->primary = bp_temp[pri].secondary;
711 			}
712 		}
713 
714 		bp_curr++;
715 	}
716 
717 	tipc_bclink_unlock(net);
718 }
719 
720 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
721 				      struct tipc_stats *stats)
722 {
723 	int i;
724 	struct nlattr *nest;
725 
726 	struct nla_map {
727 		__u32 key;
728 		__u32 val;
729 	};
730 
731 	struct nla_map map[] = {
732 		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
733 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
734 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
735 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
736 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
737 		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
738 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
739 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
740 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
741 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
742 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
743 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
744 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
745 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
746 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
747 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
748 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
749 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
750 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
751 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
752 	};
753 
754 	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
755 	if (!nest)
756 		return -EMSGSIZE;
757 
758 	for (i = 0; i <  ARRAY_SIZE(map); i++)
759 		if (nla_put_u32(skb, map[i].key, map[i].val))
760 			goto msg_full;
761 
762 	nla_nest_end(skb, nest);
763 
764 	return 0;
765 msg_full:
766 	nla_nest_cancel(skb, nest);
767 
768 	return -EMSGSIZE;
769 }
770 
771 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
772 {
773 	int err;
774 	void *hdr;
775 	struct nlattr *attrs;
776 	struct nlattr *prop;
777 	struct tipc_net *tn = net_generic(net, tipc_net_id);
778 	struct tipc_link *bcl = tn->bcl;
779 
780 	if (!bcl)
781 		return 0;
782 
783 	tipc_bclink_lock(net);
784 
785 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
786 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
787 	if (!hdr)
788 		return -EMSGSIZE;
789 
790 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
791 	if (!attrs)
792 		goto msg_full;
793 
794 	/* The broadcast link is always up */
795 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
796 		goto attr_msg_full;
797 
798 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
799 		goto attr_msg_full;
800 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
801 		goto attr_msg_full;
802 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
803 		goto attr_msg_full;
804 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
805 		goto attr_msg_full;
806 
807 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
808 	if (!prop)
809 		goto attr_msg_full;
810 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
811 		goto prop_msg_full;
812 	nla_nest_end(msg->skb, prop);
813 
814 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
815 	if (err)
816 		goto attr_msg_full;
817 
818 	tipc_bclink_unlock(net);
819 	nla_nest_end(msg->skb, attrs);
820 	genlmsg_end(msg->skb, hdr);
821 
822 	return 0;
823 
824 prop_msg_full:
825 	nla_nest_cancel(msg->skb, prop);
826 attr_msg_full:
827 	nla_nest_cancel(msg->skb, attrs);
828 msg_full:
829 	tipc_bclink_unlock(net);
830 	genlmsg_cancel(msg->skb, hdr);
831 
832 	return -EMSGSIZE;
833 }
834 
835 int tipc_bclink_reset_stats(struct net *net)
836 {
837 	struct tipc_net *tn = net_generic(net, tipc_net_id);
838 	struct tipc_link *bcl = tn->bcl;
839 
840 	if (!bcl)
841 		return -ENOPROTOOPT;
842 
843 	tipc_bclink_lock(net);
844 	memset(&bcl->stats, 0, sizeof(bcl->stats));
845 	tipc_bclink_unlock(net);
846 	return 0;
847 }
848 
849 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
850 {
851 	struct tipc_net *tn = net_generic(net, tipc_net_id);
852 	struct tipc_link *bcl = tn->bcl;
853 
854 	if (!bcl)
855 		return -ENOPROTOOPT;
856 	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
857 		return -EINVAL;
858 
859 	tipc_bclink_lock(net);
860 	tipc_link_set_queue_limits(bcl, limit);
861 	tipc_bclink_unlock(net);
862 	return 0;
863 }
864 
865 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
866 {
867 	int err;
868 	u32 win;
869 	struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
870 
871 	if (!attrs[TIPC_NLA_LINK_PROP])
872 		return -EINVAL;
873 
874 	err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
875 	if (err)
876 		return err;
877 
878 	if (!props[TIPC_NLA_PROP_WIN])
879 		return -EOPNOTSUPP;
880 
881 	win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
882 
883 	return tipc_bclink_set_queue_limits(net, win);
884 }
885 
886 int tipc_bclink_init(struct net *net)
887 {
888 	struct tipc_net *tn = net_generic(net, tipc_net_id);
889 	struct tipc_bcbearer *bcbearer;
890 	struct tipc_bclink *bclink;
891 	struct tipc_link *bcl;
892 
893 	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
894 	if (!bcbearer)
895 		return -ENOMEM;
896 
897 	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
898 	if (!bclink) {
899 		kfree(bcbearer);
900 		return -ENOMEM;
901 	}
902 
903 	bcl = &bclink->link;
904 	bcbearer->bearer.media = &bcbearer->media;
905 	bcbearer->media.send_msg = tipc_bcbearer_send;
906 	sprintf(bcbearer->media.name, "tipc-broadcast");
907 
908 	spin_lock_init(&bclink->lock);
909 	__skb_queue_head_init(&bcl->transmq);
910 	__skb_queue_head_init(&bcl->backlogq);
911 	__skb_queue_head_init(&bcl->deferdq);
912 	skb_queue_head_init(&bcl->wakeupq);
913 	bcl->snd_nxt = 1;
914 	spin_lock_init(&bclink->node.lock);
915 	__skb_queue_head_init(&bclink->arrvq);
916 	skb_queue_head_init(&bclink->inputq);
917 	bcl->owner = &bclink->node;
918 	bcl->owner->net = net;
919 	bcl->mtu = MAX_PKT_DEFAULT_MCAST;
920 	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
921 	bcl->bearer_id = MAX_BEARERS;
922 	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
923 	bcl->state = WORKING_WORKING;
924 	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
925 	msg_set_prevnode(bcl->pmsg, tn->own_addr);
926 	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
927 	tn->bcbearer = bcbearer;
928 	tn->bclink = bclink;
929 	tn->bcl = bcl;
930 	return 0;
931 }
932 
933 void tipc_bclink_stop(struct net *net)
934 {
935 	struct tipc_net *tn = net_generic(net, tipc_net_id);
936 
937 	tipc_bclink_lock(net);
938 	tipc_link_purge_queues(tn->bcl);
939 	tipc_bclink_unlock(net);
940 
941 	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
942 	synchronize_net();
943 	kfree(tn->bcbearer);
944 	kfree(tn->bclink);
945 }
946 
947 /**
948  * tipc_nmap_add - add a node to a node map
949  */
950 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
951 {
952 	int n = tipc_node(node);
953 	int w = n / WSIZE;
954 	u32 mask = (1 << (n % WSIZE));
955 
956 	if ((nm_ptr->map[w] & mask) == 0) {
957 		nm_ptr->count++;
958 		nm_ptr->map[w] |= mask;
959 	}
960 }
961 
962 /**
963  * tipc_nmap_remove - remove a node from a node map
964  */
965 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
966 {
967 	int n = tipc_node(node);
968 	int w = n / WSIZE;
969 	u32 mask = (1 << (n % WSIZE));
970 
971 	if ((nm_ptr->map[w] & mask) != 0) {
972 		nm_ptr->map[w] &= ~mask;
973 		nm_ptr->count--;
974 	}
975 }
976 
977 /**
978  * tipc_nmap_diff - find differences between node maps
979  * @nm_a: input node map A
980  * @nm_b: input node map B
981  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
982  */
983 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
984 			   struct tipc_node_map *nm_b,
985 			   struct tipc_node_map *nm_diff)
986 {
987 	int stop = ARRAY_SIZE(nm_a->map);
988 	int w;
989 	int b;
990 	u32 map;
991 
992 	memset(nm_diff, 0, sizeof(*nm_diff));
993 	for (w = 0; w < stop; w++) {
994 		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
995 		nm_diff->map[w] = map;
996 		if (map != 0) {
997 			for (b = 0 ; b < WSIZE; b++) {
998 				if (map & (1 << b))
999 					nm_diff->count++;
1000 			}
1001 		}
1002 	}
1003 }
1004