xref: /openbmc/linux/net/tipc/bcast.c (revision 4bce6fce)
1 /*
2  * net/tipc/bcast.c: TIPC broadcast code
3  *
4  * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5  * Copyright (c) 2004, Intel Corporation.
6  * Copyright (c) 2005, 2010-2011, Wind River Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 #include "socket.h"
39 #include "msg.h"
40 #include "bcast.h"
41 #include "name_distr.h"
42 #include "core.h"
43 
44 #define	MAX_PKT_DEFAULT_MCAST	1500	/* bcast link max packet size (fixed) */
45 #define	BCLINK_WIN_DEFAULT	20	/* bcast link window size (default) */
46 
47 const char tipc_bclink_name[] = "broadcast-link";
48 
49 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
50 			   struct tipc_node_map *nm_b,
51 			   struct tipc_node_map *nm_diff);
52 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
53 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
54 
55 static void tipc_bclink_lock(struct net *net)
56 {
57 	struct tipc_net *tn = net_generic(net, tipc_net_id);
58 
59 	spin_lock_bh(&tn->bclink->lock);
60 }
61 
62 static void tipc_bclink_unlock(struct net *net)
63 {
64 	struct tipc_net *tn = net_generic(net, tipc_net_id);
65 
66 	spin_unlock_bh(&tn->bclink->lock);
67 }
68 
69 void tipc_bclink_input(struct net *net)
70 {
71 	struct tipc_net *tn = net_generic(net, tipc_net_id);
72 
73 	tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
74 }
75 
76 uint  tipc_bclink_get_mtu(void)
77 {
78 	return MAX_PKT_DEFAULT_MCAST;
79 }
80 
81 static u32 bcbuf_acks(struct sk_buff *buf)
82 {
83 	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
84 }
85 
86 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
87 {
88 	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
89 }
90 
91 static void bcbuf_decr_acks(struct sk_buff *buf)
92 {
93 	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
94 }
95 
96 void tipc_bclink_add_node(struct net *net, u32 addr)
97 {
98 	struct tipc_net *tn = net_generic(net, tipc_net_id);
99 
100 	tipc_bclink_lock(net);
101 	tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
102 	tipc_bclink_unlock(net);
103 }
104 
105 void tipc_bclink_remove_node(struct net *net, u32 addr)
106 {
107 	struct tipc_net *tn = net_generic(net, tipc_net_id);
108 
109 	tipc_bclink_lock(net);
110 	tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
111 	tipc_bclink_unlock(net);
112 }
113 
114 static void bclink_set_last_sent(struct net *net)
115 {
116 	struct tipc_net *tn = net_generic(net, tipc_net_id);
117 	struct tipc_link *bcl = tn->bcl;
118 	struct sk_buff *skb = skb_peek(&bcl->backlogq);
119 
120 	if (skb)
121 		bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
122 	else
123 		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
124 }
125 
126 u32 tipc_bclink_get_last_sent(struct net *net)
127 {
128 	struct tipc_net *tn = net_generic(net, tipc_net_id);
129 
130 	return tn->bcl->fsm_msg_cnt;
131 }
132 
133 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
134 {
135 	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
136 						seqno : node->bclink.last_sent;
137 }
138 
139 /**
140  * tipc_bclink_retransmit_to - get most recent node to request retransmission
141  *
142  * Called with bclink_lock locked
143  */
144 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
145 {
146 	struct tipc_net *tn = net_generic(net, tipc_net_id);
147 
148 	return tn->bclink->retransmit_to;
149 }
150 
151 /**
152  * bclink_retransmit_pkt - retransmit broadcast packets
153  * @after: sequence number of last packet to *not* retransmit
154  * @to: sequence number of last packet to retransmit
155  *
156  * Called with bclink_lock locked
157  */
158 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
159 {
160 	struct sk_buff *skb;
161 	struct tipc_link *bcl = tn->bcl;
162 
163 	skb_queue_walk(&bcl->transmq, skb) {
164 		if (more(buf_seqno(skb), after)) {
165 			tipc_link_retransmit(bcl, skb, mod(to - after));
166 			break;
167 		}
168 	}
169 }
170 
171 /**
172  * tipc_bclink_wakeup_users - wake up pending users
173  *
174  * Called with no locks taken
175  */
176 void tipc_bclink_wakeup_users(struct net *net)
177 {
178 	struct tipc_net *tn = net_generic(net, tipc_net_id);
179 
180 	tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
181 }
182 
183 /**
184  * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
185  * @n_ptr: node that sent acknowledgement info
186  * @acked: broadcast sequence # that has been acknowledged
187  *
188  * Node is locked, bclink_lock unlocked.
189  */
190 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
191 {
192 	struct sk_buff *skb, *tmp;
193 	unsigned int released = 0;
194 	struct net *net = n_ptr->net;
195 	struct tipc_net *tn = net_generic(net, tipc_net_id);
196 
197 	if (unlikely(!n_ptr->bclink.recv_permitted))
198 		return;
199 
200 	tipc_bclink_lock(net);
201 
202 	/* Bail out if tx queue is empty (no clean up is required) */
203 	skb = skb_peek(&tn->bcl->transmq);
204 	if (!skb)
205 		goto exit;
206 
207 	/* Determine which messages need to be acknowledged */
208 	if (acked == INVALID_LINK_SEQ) {
209 		/*
210 		 * Contact with specified node has been lost, so need to
211 		 * acknowledge sent messages only (if other nodes still exist)
212 		 * or both sent and unsent messages (otherwise)
213 		 */
214 		if (tn->bclink->bcast_nodes.count)
215 			acked = tn->bcl->fsm_msg_cnt;
216 		else
217 			acked = tn->bcl->next_out_no;
218 	} else {
219 		/*
220 		 * Bail out if specified sequence number does not correspond
221 		 * to a message that has been sent and not yet acknowledged
222 		 */
223 		if (less(acked, buf_seqno(skb)) ||
224 		    less(tn->bcl->fsm_msg_cnt, acked) ||
225 		    less_eq(acked, n_ptr->bclink.acked))
226 			goto exit;
227 	}
228 
229 	/* Skip over packets that node has previously acknowledged */
230 	skb_queue_walk(&tn->bcl->transmq, skb) {
231 		if (more(buf_seqno(skb), n_ptr->bclink.acked))
232 			break;
233 	}
234 
235 	/* Update packets that node is now acknowledging */
236 	skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
237 		if (more(buf_seqno(skb), acked))
238 			break;
239 		bcbuf_decr_acks(skb);
240 		bclink_set_last_sent(net);
241 		if (bcbuf_acks(skb) == 0) {
242 			__skb_unlink(skb, &tn->bcl->transmq);
243 			kfree_skb(skb);
244 			released = 1;
245 		}
246 	}
247 	n_ptr->bclink.acked = acked;
248 
249 	/* Try resolving broadcast link congestion, if necessary */
250 	if (unlikely(skb_peek(&tn->bcl->backlogq))) {
251 		tipc_link_push_packets(tn->bcl);
252 		bclink_set_last_sent(net);
253 	}
254 	if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
255 		n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
256 exit:
257 	tipc_bclink_unlock(net);
258 }
259 
260 /**
261  * tipc_bclink_update_link_state - update broadcast link state
262  *
263  * RCU and node lock set
264  */
265 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
266 				   u32 last_sent)
267 {
268 	struct sk_buff *buf;
269 	struct net *net = n_ptr->net;
270 	struct tipc_net *tn = net_generic(net, tipc_net_id);
271 
272 	/* Ignore "stale" link state info */
273 	if (less_eq(last_sent, n_ptr->bclink.last_in))
274 		return;
275 
276 	/* Update link synchronization state; quit if in sync */
277 	bclink_update_last_sent(n_ptr, last_sent);
278 
279 	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
280 		return;
281 
282 	/* Update out-of-sync state; quit if loss is still unconfirmed */
283 	if ((++n_ptr->bclink.oos_state) == 1) {
284 		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
285 			return;
286 		n_ptr->bclink.oos_state++;
287 	}
288 
289 	/* Don't NACK if one has been recently sent (or seen) */
290 	if (n_ptr->bclink.oos_state & 0x1)
291 		return;
292 
293 	/* Send NACK */
294 	buf = tipc_buf_acquire(INT_H_SIZE);
295 	if (buf) {
296 		struct tipc_msg *msg = buf_msg(buf);
297 		struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
298 		u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
299 
300 		tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
301 			      INT_H_SIZE, n_ptr->addr);
302 		msg_set_non_seq(msg, 1);
303 		msg_set_mc_netid(msg, tn->net_id);
304 		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
305 		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
306 		msg_set_bcgap_to(msg, to);
307 
308 		tipc_bclink_lock(net);
309 		tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
310 		tn->bcl->stats.sent_nacks++;
311 		tipc_bclink_unlock(net);
312 		kfree_skb(buf);
313 
314 		n_ptr->bclink.oos_state++;
315 	}
316 }
317 
318 /**
319  * bclink_peek_nack - monitor retransmission requests sent by other nodes
320  *
321  * Delay any upcoming NACK by this node if another node has already
322  * requested the first message this node is going to ask for.
323  */
324 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
325 {
326 	struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
327 
328 	if (unlikely(!n_ptr))
329 		return;
330 
331 	tipc_node_lock(n_ptr);
332 	if (n_ptr->bclink.recv_permitted &&
333 	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
334 	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
335 		n_ptr->bclink.oos_state = 2;
336 	tipc_node_unlock(n_ptr);
337 	tipc_node_put(n_ptr);
338 }
339 
340 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
341  *                    and to identified node local sockets
342  * @net: the applicable net namespace
343  * @list: chain of buffers containing message
344  * Consumes the buffer chain, except when returning -ELINKCONG
345  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
346  */
347 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
348 {
349 	struct tipc_net *tn = net_generic(net, tipc_net_id);
350 	struct tipc_link *bcl = tn->bcl;
351 	struct tipc_bclink *bclink = tn->bclink;
352 	int rc = 0;
353 	int bc = 0;
354 	struct sk_buff *skb;
355 	struct sk_buff_head arrvq;
356 	struct sk_buff_head inputq;
357 
358 	/* Prepare clone of message for local node */
359 	skb = tipc_msg_reassemble(list);
360 	if (unlikely(!skb)) {
361 		__skb_queue_purge(list);
362 		return -EHOSTUNREACH;
363 	}
364 	/* Broadcast to all nodes */
365 	if (likely(bclink)) {
366 		tipc_bclink_lock(net);
367 		if (likely(bclink->bcast_nodes.count)) {
368 			rc = __tipc_link_xmit(net, bcl, list);
369 			if (likely(!rc)) {
370 				u32 len = skb_queue_len(&bcl->transmq);
371 
372 				bclink_set_last_sent(net);
373 				bcl->stats.queue_sz_counts++;
374 				bcl->stats.accu_queue_sz += len;
375 			}
376 			bc = 1;
377 		}
378 		tipc_bclink_unlock(net);
379 	}
380 
381 	if (unlikely(!bc))
382 		__skb_queue_purge(list);
383 
384 	if (unlikely(rc)) {
385 		kfree_skb(skb);
386 		return rc;
387 	}
388 	/* Deliver message clone */
389 	__skb_queue_head_init(&arrvq);
390 	skb_queue_head_init(&inputq);
391 	__skb_queue_tail(&arrvq, skb);
392 	tipc_sk_mcast_rcv(net, &arrvq, &inputq);
393 	return rc;
394 }
395 
396 /**
397  * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
398  *
399  * Called with both sending node's lock and bclink_lock taken.
400  */
401 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
402 {
403 	struct tipc_net *tn = net_generic(node->net, tipc_net_id);
404 
405 	bclink_update_last_sent(node, seqno);
406 	node->bclink.last_in = seqno;
407 	node->bclink.oos_state = 0;
408 	tn->bcl->stats.recv_info++;
409 
410 	/*
411 	 * Unicast an ACK periodically, ensuring that
412 	 * all nodes in the cluster don't ACK at the same time
413 	 */
414 	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
415 		tipc_link_proto_xmit(node->active_links[node->addr & 1],
416 				     STATE_MSG, 0, 0, 0, 0);
417 		tn->bcl->stats.sent_acks++;
418 	}
419 }
420 
421 /**
422  * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
423  *
424  * RCU is locked, no other locks set
425  */
426 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
427 {
428 	struct tipc_net *tn = net_generic(net, tipc_net_id);
429 	struct tipc_link *bcl = tn->bcl;
430 	struct tipc_msg *msg = buf_msg(buf);
431 	struct tipc_node *node;
432 	u32 next_in;
433 	u32 seqno;
434 	int deferred = 0;
435 	int pos = 0;
436 	struct sk_buff *iskb;
437 	struct sk_buff_head *arrvq, *inputq;
438 
439 	/* Screen out unwanted broadcast messages */
440 	if (msg_mc_netid(msg) != tn->net_id)
441 		goto exit;
442 
443 	node = tipc_node_find(net, msg_prevnode(msg));
444 	if (unlikely(!node))
445 		goto exit;
446 
447 	tipc_node_lock(node);
448 	if (unlikely(!node->bclink.recv_permitted))
449 		goto unlock;
450 
451 	/* Handle broadcast protocol message */
452 	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
453 		if (msg_type(msg) != STATE_MSG)
454 			goto unlock;
455 		if (msg_destnode(msg) == tn->own_addr) {
456 			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
457 			tipc_bclink_lock(net);
458 			bcl->stats.recv_nacks++;
459 			tn->bclink->retransmit_to = node;
460 			bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
461 					      msg_bcgap_to(msg));
462 			tipc_bclink_unlock(net);
463 			tipc_node_unlock(node);
464 		} else {
465 			tipc_node_unlock(node);
466 			bclink_peek_nack(net, msg);
467 		}
468 		tipc_node_put(node);
469 		goto exit;
470 	}
471 
472 	/* Handle in-sequence broadcast message */
473 	seqno = msg_seqno(msg);
474 	next_in = mod(node->bclink.last_in + 1);
475 	arrvq = &tn->bclink->arrvq;
476 	inputq = &tn->bclink->inputq;
477 
478 	if (likely(seqno == next_in)) {
479 receive:
480 		/* Deliver message to destination */
481 		if (likely(msg_isdata(msg))) {
482 			tipc_bclink_lock(net);
483 			bclink_accept_pkt(node, seqno);
484 			spin_lock_bh(&inputq->lock);
485 			__skb_queue_tail(arrvq, buf);
486 			spin_unlock_bh(&inputq->lock);
487 			node->action_flags |= TIPC_BCAST_MSG_EVT;
488 			tipc_bclink_unlock(net);
489 			tipc_node_unlock(node);
490 		} else if (msg_user(msg) == MSG_BUNDLER) {
491 			tipc_bclink_lock(net);
492 			bclink_accept_pkt(node, seqno);
493 			bcl->stats.recv_bundles++;
494 			bcl->stats.recv_bundled += msg_msgcnt(msg);
495 			pos = 0;
496 			while (tipc_msg_extract(buf, &iskb, &pos)) {
497 				spin_lock_bh(&inputq->lock);
498 				__skb_queue_tail(arrvq, iskb);
499 				spin_unlock_bh(&inputq->lock);
500 			}
501 			node->action_flags |= TIPC_BCAST_MSG_EVT;
502 			tipc_bclink_unlock(net);
503 			tipc_node_unlock(node);
504 		} else if (msg_user(msg) == MSG_FRAGMENTER) {
505 			tipc_bclink_lock(net);
506 			bclink_accept_pkt(node, seqno);
507 			tipc_buf_append(&node->bclink.reasm_buf, &buf);
508 			if (unlikely(!buf && !node->bclink.reasm_buf)) {
509 				tipc_bclink_unlock(net);
510 				goto unlock;
511 			}
512 			bcl->stats.recv_fragments++;
513 			if (buf) {
514 				bcl->stats.recv_fragmented++;
515 				msg = buf_msg(buf);
516 				tipc_bclink_unlock(net);
517 				goto receive;
518 			}
519 			tipc_bclink_unlock(net);
520 			tipc_node_unlock(node);
521 		} else {
522 			tipc_bclink_lock(net);
523 			bclink_accept_pkt(node, seqno);
524 			tipc_bclink_unlock(net);
525 			tipc_node_unlock(node);
526 			kfree_skb(buf);
527 		}
528 		buf = NULL;
529 
530 		/* Determine new synchronization state */
531 		tipc_node_lock(node);
532 		if (unlikely(!tipc_node_is_up(node)))
533 			goto unlock;
534 
535 		if (node->bclink.last_in == node->bclink.last_sent)
536 			goto unlock;
537 
538 		if (skb_queue_empty(&node->bclink.deferdq)) {
539 			node->bclink.oos_state = 1;
540 			goto unlock;
541 		}
542 
543 		msg = buf_msg(skb_peek(&node->bclink.deferdq));
544 		seqno = msg_seqno(msg);
545 		next_in = mod(next_in + 1);
546 		if (seqno != next_in)
547 			goto unlock;
548 
549 		/* Take in-sequence message from deferred queue & deliver it */
550 		buf = __skb_dequeue(&node->bclink.deferdq);
551 		goto receive;
552 	}
553 
554 	/* Handle out-of-sequence broadcast message */
555 	if (less(next_in, seqno)) {
556 		deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
557 					       buf);
558 		bclink_update_last_sent(node, seqno);
559 		buf = NULL;
560 	}
561 
562 	tipc_bclink_lock(net);
563 
564 	if (deferred)
565 		bcl->stats.deferred_recv++;
566 	else
567 		bcl->stats.duplicates++;
568 
569 	tipc_bclink_unlock(net);
570 
571 unlock:
572 	tipc_node_unlock(node);
573 	tipc_node_put(node);
574 exit:
575 	kfree_skb(buf);
576 }
577 
578 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
579 {
580 	return (n_ptr->bclink.recv_permitted &&
581 		(tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
582 }
583 
584 
585 /**
586  * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
587  *
588  * Send packet over as many bearers as necessary to reach all nodes
589  * that have joined the broadcast link.
590  *
591  * Returns 0 (packet sent successfully) under all circumstances,
592  * since the broadcast link's pseudo-bearer never blocks
593  */
594 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
595 			      struct tipc_bearer *unused1,
596 			      struct tipc_media_addr *unused2)
597 {
598 	int bp_index;
599 	struct tipc_msg *msg = buf_msg(buf);
600 	struct tipc_net *tn = net_generic(net, tipc_net_id);
601 	struct tipc_bcbearer *bcbearer = tn->bcbearer;
602 	struct tipc_bclink *bclink = tn->bclink;
603 
604 	/* Prepare broadcast link message for reliable transmission,
605 	 * if first time trying to send it;
606 	 * preparation is skipped for broadcast link protocol messages
607 	 * since they are sent in an unreliable manner and don't need it
608 	 */
609 	if (likely(!msg_non_seq(buf_msg(buf)))) {
610 		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
611 		msg_set_non_seq(msg, 1);
612 		msg_set_mc_netid(msg, tn->net_id);
613 		tn->bcl->stats.sent_info++;
614 		if (WARN_ON(!bclink->bcast_nodes.count)) {
615 			dump_stack();
616 			return 0;
617 		}
618 	}
619 
620 	/* Send buffer over bearers until all targets reached */
621 	bcbearer->remains = bclink->bcast_nodes;
622 
623 	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
624 		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
625 		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
626 		struct tipc_bearer *bp[2] = {p, s};
627 		struct tipc_bearer *b = bp[msg_link_selector(msg)];
628 		struct sk_buff *tbuf;
629 
630 		if (!p)
631 			break; /* No more bearers to try */
632 		if (!b)
633 			b = p;
634 		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
635 			       &bcbearer->remains_new);
636 		if (bcbearer->remains_new.count == bcbearer->remains.count)
637 			continue; /* Nothing added by bearer pair */
638 
639 		if (bp_index == 0) {
640 			/* Use original buffer for first bearer */
641 			tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
642 		} else {
643 			/* Avoid concurrent buffer access */
644 			tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
645 			if (!tbuf)
646 				break;
647 			tipc_bearer_send(net, b->identity, tbuf,
648 					 &b->bcast_addr);
649 			kfree_skb(tbuf); /* Bearer keeps a clone */
650 		}
651 		if (bcbearer->remains_new.count == 0)
652 			break; /* All targets reached */
653 
654 		bcbearer->remains = bcbearer->remains_new;
655 	}
656 
657 	return 0;
658 }
659 
660 /**
661  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
662  */
663 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
664 			u32 node, bool action)
665 {
666 	struct tipc_net *tn = net_generic(net, tipc_net_id);
667 	struct tipc_bcbearer *bcbearer = tn->bcbearer;
668 	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
669 	struct tipc_bcbearer_pair *bp_curr;
670 	struct tipc_bearer *b;
671 	int b_index;
672 	int pri;
673 
674 	tipc_bclink_lock(net);
675 
676 	if (action)
677 		tipc_nmap_add(nm_ptr, node);
678 	else
679 		tipc_nmap_remove(nm_ptr, node);
680 
681 	/* Group bearers by priority (can assume max of two per priority) */
682 	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
683 
684 	rcu_read_lock();
685 	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
686 		b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
687 		if (!b || !b->nodes.count)
688 			continue;
689 
690 		if (!bp_temp[b->priority].primary)
691 			bp_temp[b->priority].primary = b;
692 		else
693 			bp_temp[b->priority].secondary = b;
694 	}
695 	rcu_read_unlock();
696 
697 	/* Create array of bearer pairs for broadcasting */
698 	bp_curr = bcbearer->bpairs;
699 	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
700 
701 	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
702 
703 		if (!bp_temp[pri].primary)
704 			continue;
705 
706 		bp_curr->primary = bp_temp[pri].primary;
707 
708 		if (bp_temp[pri].secondary) {
709 			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
710 					    &bp_temp[pri].secondary->nodes)) {
711 				bp_curr->secondary = bp_temp[pri].secondary;
712 			} else {
713 				bp_curr++;
714 				bp_curr->primary = bp_temp[pri].secondary;
715 			}
716 		}
717 
718 		bp_curr++;
719 	}
720 
721 	tipc_bclink_unlock(net);
722 }
723 
724 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
725 				      struct tipc_stats *stats)
726 {
727 	int i;
728 	struct nlattr *nest;
729 
730 	struct nla_map {
731 		__u32 key;
732 		__u32 val;
733 	};
734 
735 	struct nla_map map[] = {
736 		{TIPC_NLA_STATS_RX_INFO, stats->recv_info},
737 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
738 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
739 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
740 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
741 		{TIPC_NLA_STATS_TX_INFO, stats->sent_info},
742 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
743 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
744 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
745 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
746 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
747 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
748 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
749 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
750 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
751 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
752 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
753 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
754 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
755 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
756 	};
757 
758 	nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
759 	if (!nest)
760 		return -EMSGSIZE;
761 
762 	for (i = 0; i <  ARRAY_SIZE(map); i++)
763 		if (nla_put_u32(skb, map[i].key, map[i].val))
764 			goto msg_full;
765 
766 	nla_nest_end(skb, nest);
767 
768 	return 0;
769 msg_full:
770 	nla_nest_cancel(skb, nest);
771 
772 	return -EMSGSIZE;
773 }
774 
775 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
776 {
777 	int err;
778 	void *hdr;
779 	struct nlattr *attrs;
780 	struct nlattr *prop;
781 	struct tipc_net *tn = net_generic(net, tipc_net_id);
782 	struct tipc_link *bcl = tn->bcl;
783 
784 	if (!bcl)
785 		return 0;
786 
787 	tipc_bclink_lock(net);
788 
789 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
790 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
791 	if (!hdr)
792 		return -EMSGSIZE;
793 
794 	attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
795 	if (!attrs)
796 		goto msg_full;
797 
798 	/* The broadcast link is always up */
799 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
800 		goto attr_msg_full;
801 
802 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
803 		goto attr_msg_full;
804 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
805 		goto attr_msg_full;
806 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
807 		goto attr_msg_full;
808 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
809 		goto attr_msg_full;
810 
811 	prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
812 	if (!prop)
813 		goto attr_msg_full;
814 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
815 		goto prop_msg_full;
816 	nla_nest_end(msg->skb, prop);
817 
818 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
819 	if (err)
820 		goto attr_msg_full;
821 
822 	tipc_bclink_unlock(net);
823 	nla_nest_end(msg->skb, attrs);
824 	genlmsg_end(msg->skb, hdr);
825 
826 	return 0;
827 
828 prop_msg_full:
829 	nla_nest_cancel(msg->skb, prop);
830 attr_msg_full:
831 	nla_nest_cancel(msg->skb, attrs);
832 msg_full:
833 	tipc_bclink_unlock(net);
834 	genlmsg_cancel(msg->skb, hdr);
835 
836 	return -EMSGSIZE;
837 }
838 
839 int tipc_bclink_reset_stats(struct net *net)
840 {
841 	struct tipc_net *tn = net_generic(net, tipc_net_id);
842 	struct tipc_link *bcl = tn->bcl;
843 
844 	if (!bcl)
845 		return -ENOPROTOOPT;
846 
847 	tipc_bclink_lock(net);
848 	memset(&bcl->stats, 0, sizeof(bcl->stats));
849 	tipc_bclink_unlock(net);
850 	return 0;
851 }
852 
853 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
854 {
855 	struct tipc_net *tn = net_generic(net, tipc_net_id);
856 	struct tipc_link *bcl = tn->bcl;
857 
858 	if (!bcl)
859 		return -ENOPROTOOPT;
860 	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
861 		return -EINVAL;
862 
863 	tipc_bclink_lock(net);
864 	tipc_link_set_queue_limits(bcl, limit);
865 	tipc_bclink_unlock(net);
866 	return 0;
867 }
868 
869 int tipc_bclink_init(struct net *net)
870 {
871 	struct tipc_net *tn = net_generic(net, tipc_net_id);
872 	struct tipc_bcbearer *bcbearer;
873 	struct tipc_bclink *bclink;
874 	struct tipc_link *bcl;
875 
876 	bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
877 	if (!bcbearer)
878 		return -ENOMEM;
879 
880 	bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
881 	if (!bclink) {
882 		kfree(bcbearer);
883 		return -ENOMEM;
884 	}
885 
886 	bcl = &bclink->link;
887 	bcbearer->bearer.media = &bcbearer->media;
888 	bcbearer->media.send_msg = tipc_bcbearer_send;
889 	sprintf(bcbearer->media.name, "tipc-broadcast");
890 
891 	spin_lock_init(&bclink->lock);
892 	__skb_queue_head_init(&bcl->transmq);
893 	__skb_queue_head_init(&bcl->backlogq);
894 	__skb_queue_head_init(&bcl->deferdq);
895 	skb_queue_head_init(&bcl->wakeupq);
896 	bcl->next_out_no = 1;
897 	spin_lock_init(&bclink->node.lock);
898 	__skb_queue_head_init(&bclink->arrvq);
899 	skb_queue_head_init(&bclink->inputq);
900 	bcl->owner = &bclink->node;
901 	bcl->owner->net = net;
902 	bcl->mtu = MAX_PKT_DEFAULT_MCAST;
903 	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
904 	bcl->bearer_id = MAX_BEARERS;
905 	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
906 	bcl->state = WORKING_WORKING;
907 	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
908 	msg_set_prevnode(bcl->pmsg, tn->own_addr);
909 	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
910 	tn->bcbearer = bcbearer;
911 	tn->bclink = bclink;
912 	tn->bcl = bcl;
913 	return 0;
914 }
915 
916 void tipc_bclink_stop(struct net *net)
917 {
918 	struct tipc_net *tn = net_generic(net, tipc_net_id);
919 
920 	tipc_bclink_lock(net);
921 	tipc_link_purge_queues(tn->bcl);
922 	tipc_bclink_unlock(net);
923 
924 	RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
925 	synchronize_net();
926 	kfree(tn->bcbearer);
927 	kfree(tn->bclink);
928 }
929 
930 /**
931  * tipc_nmap_add - add a node to a node map
932  */
933 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
934 {
935 	int n = tipc_node(node);
936 	int w = n / WSIZE;
937 	u32 mask = (1 << (n % WSIZE));
938 
939 	if ((nm_ptr->map[w] & mask) == 0) {
940 		nm_ptr->count++;
941 		nm_ptr->map[w] |= mask;
942 	}
943 }
944 
945 /**
946  * tipc_nmap_remove - remove a node from a node map
947  */
948 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
949 {
950 	int n = tipc_node(node);
951 	int w = n / WSIZE;
952 	u32 mask = (1 << (n % WSIZE));
953 
954 	if ((nm_ptr->map[w] & mask) != 0) {
955 		nm_ptr->map[w] &= ~mask;
956 		nm_ptr->count--;
957 	}
958 }
959 
960 /**
961  * tipc_nmap_diff - find differences between node maps
962  * @nm_a: input node map A
963  * @nm_b: input node map B
964  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
965  */
966 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
967 			   struct tipc_node_map *nm_b,
968 			   struct tipc_node_map *nm_diff)
969 {
970 	int stop = ARRAY_SIZE(nm_a->map);
971 	int w;
972 	int b;
973 	u32 map;
974 
975 	memset(nm_diff, 0, sizeof(*nm_diff));
976 	for (w = 0; w < stop; w++) {
977 		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
978 		nm_diff->map[w] = map;
979 		if (map != 0) {
980 			for (b = 0 ; b < WSIZE; b++) {
981 				if (map & (1 << b))
982 					nm_diff->count++;
983 			}
984 		}
985 	}
986 }
987