xref: /openbmc/linux/net/802/mrp.c (revision 8ffd3d5e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	IEEE 802.1Q Multiple Registration Protocol (MRP)
4  *
5  *	Copyright (c) 2012 Massachusetts Institute of Technology
6  *
7  *	Adapted from code in net/802/garp.c
8  *	Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
9  */
10 #include <linux/kernel.h>
11 #include <linux/timer.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <net/mrp.h>
19 #include <asm/unaligned.h>
20 
21 static unsigned int mrp_join_time __read_mostly = 200;
22 module_param(mrp_join_time, uint, 0644);
23 MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
24 
25 static unsigned int mrp_periodic_time __read_mostly = 1000;
26 module_param(mrp_periodic_time, uint, 0644);
27 MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
28 
29 MODULE_LICENSE("GPL");
30 
31 static const u8
32 mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
33 	[MRP_APPLICANT_VO] = {
34 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
35 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_VP,
36 		[MRP_EVENT_LV]		= MRP_APPLICANT_VO,
37 		[MRP_EVENT_TX]		= MRP_APPLICANT_VO,
38 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_VO,
39 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_AO,
40 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_VO,
41 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_VO,
42 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_VO,
43 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VO,
44 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VO,
45 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VO,
46 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_VO,
47 	},
48 	[MRP_APPLICANT_VP] = {
49 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
50 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_VP,
51 		[MRP_EVENT_LV]		= MRP_APPLICANT_VO,
52 		[MRP_EVENT_TX]		= MRP_APPLICANT_AA,
53 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_VP,
54 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_AP,
55 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_VP,
56 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_VP,
57 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_VP,
58 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
59 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
60 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
61 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_VP,
62 	},
63 	[MRP_APPLICANT_VN] = {
64 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
65 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_VN,
66 		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
67 		[MRP_EVENT_TX]		= MRP_APPLICANT_AN,
68 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_VN,
69 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_VN,
70 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_VN,
71 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_VN,
72 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_VN,
73 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VN,
74 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VN,
75 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VN,
76 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_VN,
77 	},
78 	[MRP_APPLICANT_AN] = {
79 		[MRP_EVENT_NEW]		= MRP_APPLICANT_AN,
80 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AN,
81 		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
82 		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
83 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AN,
84 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_AN,
85 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AN,
86 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AN,
87 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AN,
88 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VN,
89 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VN,
90 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VN,
91 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AN,
92 	},
93 	[MRP_APPLICANT_AA] = {
94 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
95 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AA,
96 		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
97 		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
98 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AA,
99 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QA,
100 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AA,
101 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AA,
102 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AA,
103 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
104 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
105 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
106 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AA,
107 	},
108 	[MRP_APPLICANT_QA] = {
109 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
110 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_QA,
111 		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
112 		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
113 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_QA,
114 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QA,
115 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_QA,
116 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AA,
117 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AA,
118 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
119 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
120 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
121 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AA,
122 	},
123 	[MRP_APPLICANT_LA] = {
124 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
125 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AA,
126 		[MRP_EVENT_LV]		= MRP_APPLICANT_LA,
127 		[MRP_EVENT_TX]		= MRP_APPLICANT_VO,
128 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_LA,
129 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_LA,
130 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_LA,
131 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_LA,
132 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_LA,
133 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_LA,
134 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_LA,
135 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_LA,
136 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_LA,
137 	},
138 	[MRP_APPLICANT_AO] = {
139 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
140 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AP,
141 		[MRP_EVENT_LV]		= MRP_APPLICANT_AO,
142 		[MRP_EVENT_TX]		= MRP_APPLICANT_AO,
143 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AO,
144 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QO,
145 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AO,
146 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AO,
147 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AO,
148 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VO,
149 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VO,
150 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VO,
151 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AO,
152 	},
153 	[MRP_APPLICANT_QO] = {
154 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
155 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_QP,
156 		[MRP_EVENT_LV]		= MRP_APPLICANT_QO,
157 		[MRP_EVENT_TX]		= MRP_APPLICANT_QO,
158 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_QO,
159 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QO,
160 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_QO,
161 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AO,
162 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AO,
163 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VO,
164 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VO,
165 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VO,
166 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_QO,
167 	},
168 	[MRP_APPLICANT_AP] = {
169 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
170 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_AP,
171 		[MRP_EVENT_LV]		= MRP_APPLICANT_AO,
172 		[MRP_EVENT_TX]		= MRP_APPLICANT_QA,
173 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_AP,
174 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QP,
175 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_AP,
176 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AP,
177 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AP,
178 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
179 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
180 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
181 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AP,
182 	},
183 	[MRP_APPLICANT_QP] = {
184 		[MRP_EVENT_NEW]		= MRP_APPLICANT_VN,
185 		[MRP_EVENT_JOIN]	= MRP_APPLICANT_QP,
186 		[MRP_EVENT_LV]		= MRP_APPLICANT_QO,
187 		[MRP_EVENT_TX]		= MRP_APPLICANT_QP,
188 		[MRP_EVENT_R_NEW]	= MRP_APPLICANT_QP,
189 		[MRP_EVENT_R_JOIN_IN]	= MRP_APPLICANT_QP,
190 		[MRP_EVENT_R_IN]	= MRP_APPLICANT_QP,
191 		[MRP_EVENT_R_JOIN_MT]	= MRP_APPLICANT_AP,
192 		[MRP_EVENT_R_MT]	= MRP_APPLICANT_AP,
193 		[MRP_EVENT_R_LV]	= MRP_APPLICANT_VP,
194 		[MRP_EVENT_R_LA]	= MRP_APPLICANT_VP,
195 		[MRP_EVENT_REDECLARE]	= MRP_APPLICANT_VP,
196 		[MRP_EVENT_PERIODIC]	= MRP_APPLICANT_AP,
197 	},
198 };
199 
200 static const u8
201 mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
202 	[MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
203 	[MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
204 	[MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
205 	[MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
206 	[MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
207 	[MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
208 	[MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
209 	[MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
210 	[MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
211 	[MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
212 	[MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
213 };
214 
215 static void mrp_attrvalue_inc(void *value, u8 len)
216 {
217 	u8 *v = (u8 *)value;
218 
219 	/* Add 1 to the last byte. If it becomes zero,
220 	 * go to the previous byte and repeat.
221 	 */
222 	while (len > 0 && !++v[--len])
223 		;
224 }
225 
226 static int mrp_attr_cmp(const struct mrp_attr *attr,
227 			 const void *value, u8 len, u8 type)
228 {
229 	if (attr->type != type)
230 		return attr->type - type;
231 	if (attr->len != len)
232 		return attr->len - len;
233 	return memcmp(attr->value, value, len);
234 }
235 
236 static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
237 					const void *value, u8 len, u8 type)
238 {
239 	struct rb_node *parent = app->mad.rb_node;
240 	struct mrp_attr *attr;
241 	int d;
242 
243 	while (parent) {
244 		attr = rb_entry(parent, struct mrp_attr, node);
245 		d = mrp_attr_cmp(attr, value, len, type);
246 		if (d > 0)
247 			parent = parent->rb_left;
248 		else if (d < 0)
249 			parent = parent->rb_right;
250 		else
251 			return attr;
252 	}
253 	return NULL;
254 }
255 
256 static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
257 					const void *value, u8 len, u8 type)
258 {
259 	struct rb_node *parent = NULL, **p = &app->mad.rb_node;
260 	struct mrp_attr *attr;
261 	int d;
262 
263 	while (*p) {
264 		parent = *p;
265 		attr = rb_entry(parent, struct mrp_attr, node);
266 		d = mrp_attr_cmp(attr, value, len, type);
267 		if (d > 0)
268 			p = &parent->rb_left;
269 		else if (d < 0)
270 			p = &parent->rb_right;
271 		else {
272 			/* The attribute already exists; re-use it. */
273 			return attr;
274 		}
275 	}
276 	attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
277 	if (!attr)
278 		return attr;
279 	attr->state = MRP_APPLICANT_VO;
280 	attr->type  = type;
281 	attr->len   = len;
282 	memcpy(attr->value, value, len);
283 
284 	rb_link_node(&attr->node, parent, p);
285 	rb_insert_color(&attr->node, &app->mad);
286 	return attr;
287 }
288 
289 static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
290 {
291 	rb_erase(&attr->node, &app->mad);
292 	kfree(attr);
293 }
294 
295 static void mrp_attr_destroy_all(struct mrp_applicant *app)
296 {
297 	struct rb_node *node, *next;
298 	struct mrp_attr *attr;
299 
300 	for (node = rb_first(&app->mad);
301 	     next = node ? rb_next(node) : NULL, node != NULL;
302 	     node = next) {
303 		attr = rb_entry(node, struct mrp_attr, node);
304 		mrp_attr_destroy(app, attr);
305 	}
306 }
307 
308 static int mrp_pdu_init(struct mrp_applicant *app)
309 {
310 	struct sk_buff *skb;
311 	struct mrp_pdu_hdr *ph;
312 
313 	skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
314 			GFP_ATOMIC);
315 	if (!skb)
316 		return -ENOMEM;
317 
318 	skb->dev = app->dev;
319 	skb->protocol = app->app->pkttype.type;
320 	skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
321 	skb_reset_network_header(skb);
322 	skb_reset_transport_header(skb);
323 
324 	ph = __skb_put(skb, sizeof(*ph));
325 	ph->version = app->app->version;
326 
327 	app->pdu = skb;
328 	return 0;
329 }
330 
331 static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
332 {
333 	__be16 *endmark;
334 
335 	if (skb_tailroom(app->pdu) < sizeof(*endmark))
336 		return -1;
337 	endmark = __skb_put(app->pdu, sizeof(*endmark));
338 	put_unaligned(MRP_END_MARK, endmark);
339 	return 0;
340 }
341 
342 static void mrp_pdu_queue(struct mrp_applicant *app)
343 {
344 	if (!app->pdu)
345 		return;
346 
347 	if (mrp_cb(app->pdu)->mh)
348 		mrp_pdu_append_end_mark(app);
349 	mrp_pdu_append_end_mark(app);
350 
351 	dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
352 			app->app->group_address, app->dev->dev_addr,
353 			app->pdu->len);
354 
355 	skb_queue_tail(&app->queue, app->pdu);
356 	app->pdu = NULL;
357 }
358 
359 static void mrp_queue_xmit(struct mrp_applicant *app)
360 {
361 	struct sk_buff *skb;
362 
363 	while ((skb = skb_dequeue(&app->queue)))
364 		dev_queue_xmit(skb);
365 }
366 
367 static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
368 				  u8 attrtype, u8 attrlen)
369 {
370 	struct mrp_msg_hdr *mh;
371 
372 	if (mrp_cb(app->pdu)->mh) {
373 		if (mrp_pdu_append_end_mark(app) < 0)
374 			return -1;
375 		mrp_cb(app->pdu)->mh = NULL;
376 		mrp_cb(app->pdu)->vah = NULL;
377 	}
378 
379 	if (skb_tailroom(app->pdu) < sizeof(*mh))
380 		return -1;
381 	mh = __skb_put(app->pdu, sizeof(*mh));
382 	mh->attrtype = attrtype;
383 	mh->attrlen = attrlen;
384 	mrp_cb(app->pdu)->mh = mh;
385 	return 0;
386 }
387 
388 static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
389 				      const void *firstattrvalue, u8 attrlen)
390 {
391 	struct mrp_vecattr_hdr *vah;
392 
393 	if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
394 		return -1;
395 	vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
396 	put_unaligned(0, &vah->lenflags);
397 	memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
398 	mrp_cb(app->pdu)->vah = vah;
399 	memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
400 	return 0;
401 }
402 
403 static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
404 					const struct mrp_attr *attr,
405 					enum mrp_vecattr_event vaevent)
406 {
407 	u16 len, pos;
408 	u8 *vaevents;
409 	int err;
410 again:
411 	if (!app->pdu) {
412 		err = mrp_pdu_init(app);
413 		if (err < 0)
414 			return err;
415 	}
416 
417 	/* If there is no Message header in the PDU, or the Message header is
418 	 * for a different attribute type, add an EndMark (if necessary) and a
419 	 * new Message header to the PDU.
420 	 */
421 	if (!mrp_cb(app->pdu)->mh ||
422 	    mrp_cb(app->pdu)->mh->attrtype != attr->type ||
423 	    mrp_cb(app->pdu)->mh->attrlen != attr->len) {
424 		if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
425 			goto queue;
426 	}
427 
428 	/* If there is no VectorAttribute header for this Message in the PDU,
429 	 * or this attribute's value does not sequentially follow the previous
430 	 * attribute's value, add a new VectorAttribute header to the PDU.
431 	 */
432 	if (!mrp_cb(app->pdu)->vah ||
433 	    memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
434 		if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
435 			goto queue;
436 	}
437 
438 	len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
439 	pos = len % 3;
440 
441 	/* Events are packed into Vectors in the PDU, three to a byte. Add a
442 	 * byte to the end of the Vector if necessary.
443 	 */
444 	if (!pos) {
445 		if (skb_tailroom(app->pdu) < sizeof(u8))
446 			goto queue;
447 		vaevents = __skb_put(app->pdu, sizeof(u8));
448 	} else {
449 		vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
450 	}
451 
452 	switch (pos) {
453 	case 0:
454 		*vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
455 				       __MRP_VECATTR_EVENT_MAX);
456 		break;
457 	case 1:
458 		*vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
459 		break;
460 	case 2:
461 		*vaevents += vaevent;
462 		break;
463 	default:
464 		WARN_ON(1);
465 	}
466 
467 	/* Increment the length of the VectorAttribute in the PDU, as well as
468 	 * the value of the next attribute that would continue its Vector.
469 	 */
470 	put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
471 	mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
472 
473 	return 0;
474 
475 queue:
476 	mrp_pdu_queue(app);
477 	goto again;
478 }
479 
480 static void mrp_attr_event(struct mrp_applicant *app,
481 			   struct mrp_attr *attr, enum mrp_event event)
482 {
483 	enum mrp_applicant_state state;
484 
485 	state = mrp_applicant_state_table[attr->state][event];
486 	if (state == MRP_APPLICANT_INVALID) {
487 		WARN_ON(1);
488 		return;
489 	}
490 
491 	if (event == MRP_EVENT_TX) {
492 		/* When appending the attribute fails, don't update its state
493 		 * in order to retry at the next TX event.
494 		 */
495 
496 		switch (mrp_tx_action_table[attr->state]) {
497 		case MRP_TX_ACTION_NONE:
498 		case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
499 		case MRP_TX_ACTION_S_IN_OPTIONAL:
500 			break;
501 		case MRP_TX_ACTION_S_NEW:
502 			if (mrp_pdu_append_vecattr_event(
503 				    app, attr, MRP_VECATTR_EVENT_NEW) < 0)
504 				return;
505 			break;
506 		case MRP_TX_ACTION_S_JOIN_IN:
507 			if (mrp_pdu_append_vecattr_event(
508 				    app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
509 				return;
510 			break;
511 		case MRP_TX_ACTION_S_LV:
512 			if (mrp_pdu_append_vecattr_event(
513 				    app, attr, MRP_VECATTR_EVENT_LV) < 0)
514 				return;
515 			/* As a pure applicant, sending a leave message
516 			 * implies that the attribute was unregistered and
517 			 * can be destroyed.
518 			 */
519 			mrp_attr_destroy(app, attr);
520 			return;
521 		default:
522 			WARN_ON(1);
523 		}
524 	}
525 
526 	attr->state = state;
527 }
528 
529 int mrp_request_join(const struct net_device *dev,
530 		     const struct mrp_application *appl,
531 		     const void *value, u8 len, u8 type)
532 {
533 	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
534 	struct mrp_applicant *app = rtnl_dereference(
535 		port->applicants[appl->type]);
536 	struct mrp_attr *attr;
537 
538 	if (sizeof(struct mrp_skb_cb) + len >
539 	    sizeof_field(struct sk_buff, cb))
540 		return -ENOMEM;
541 
542 	spin_lock_bh(&app->lock);
543 	attr = mrp_attr_create(app, value, len, type);
544 	if (!attr) {
545 		spin_unlock_bh(&app->lock);
546 		return -ENOMEM;
547 	}
548 	mrp_attr_event(app, attr, MRP_EVENT_JOIN);
549 	spin_unlock_bh(&app->lock);
550 	return 0;
551 }
552 EXPORT_SYMBOL_GPL(mrp_request_join);
553 
554 void mrp_request_leave(const struct net_device *dev,
555 		       const struct mrp_application *appl,
556 		       const void *value, u8 len, u8 type)
557 {
558 	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
559 	struct mrp_applicant *app = rtnl_dereference(
560 		port->applicants[appl->type]);
561 	struct mrp_attr *attr;
562 
563 	if (sizeof(struct mrp_skb_cb) + len >
564 	    sizeof_field(struct sk_buff, cb))
565 		return;
566 
567 	spin_lock_bh(&app->lock);
568 	attr = mrp_attr_lookup(app, value, len, type);
569 	if (!attr) {
570 		spin_unlock_bh(&app->lock);
571 		return;
572 	}
573 	mrp_attr_event(app, attr, MRP_EVENT_LV);
574 	spin_unlock_bh(&app->lock);
575 }
576 EXPORT_SYMBOL_GPL(mrp_request_leave);
577 
578 static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
579 {
580 	struct rb_node *node, *next;
581 	struct mrp_attr *attr;
582 
583 	for (node = rb_first(&app->mad);
584 	     next = node ? rb_next(node) : NULL, node != NULL;
585 	     node = next) {
586 		attr = rb_entry(node, struct mrp_attr, node);
587 		mrp_attr_event(app, attr, event);
588 	}
589 }
590 
591 static void mrp_join_timer_arm(struct mrp_applicant *app)
592 {
593 	unsigned long delay;
594 
595 	delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
596 	mod_timer(&app->join_timer, jiffies + delay);
597 }
598 
599 static void mrp_join_timer(struct timer_list *t)
600 {
601 	struct mrp_applicant *app = from_timer(app, t, join_timer);
602 
603 	spin_lock(&app->lock);
604 	mrp_mad_event(app, MRP_EVENT_TX);
605 	mrp_pdu_queue(app);
606 	spin_unlock(&app->lock);
607 
608 	mrp_queue_xmit(app);
609 	spin_lock(&app->lock);
610 	if (likely(app->active))
611 		mrp_join_timer_arm(app);
612 	spin_unlock(&app->lock);
613 }
614 
615 static void mrp_periodic_timer_arm(struct mrp_applicant *app)
616 {
617 	mod_timer(&app->periodic_timer,
618 		  jiffies + msecs_to_jiffies(mrp_periodic_time));
619 }
620 
621 static void mrp_periodic_timer(struct timer_list *t)
622 {
623 	struct mrp_applicant *app = from_timer(app, t, periodic_timer);
624 
625 	spin_lock(&app->lock);
626 	if (likely(app->active)) {
627 		mrp_mad_event(app, MRP_EVENT_PERIODIC);
628 		mrp_pdu_queue(app);
629 		mrp_periodic_timer_arm(app);
630 	}
631 	spin_unlock(&app->lock);
632 }
633 
634 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
635 {
636 	__be16 endmark;
637 
638 	if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
639 		return -1;
640 	if (endmark == MRP_END_MARK) {
641 		*offset += sizeof(endmark);
642 		return -1;
643 	}
644 	return 0;
645 }
646 
647 static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
648 					struct sk_buff *skb,
649 					enum mrp_vecattr_event vaevent)
650 {
651 	struct mrp_attr *attr;
652 	enum mrp_event event;
653 
654 	attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
655 			       mrp_cb(skb)->mh->attrlen,
656 			       mrp_cb(skb)->mh->attrtype);
657 	if (attr == NULL)
658 		return;
659 
660 	switch (vaevent) {
661 	case MRP_VECATTR_EVENT_NEW:
662 		event = MRP_EVENT_R_NEW;
663 		break;
664 	case MRP_VECATTR_EVENT_JOIN_IN:
665 		event = MRP_EVENT_R_JOIN_IN;
666 		break;
667 	case MRP_VECATTR_EVENT_IN:
668 		event = MRP_EVENT_R_IN;
669 		break;
670 	case MRP_VECATTR_EVENT_JOIN_MT:
671 		event = MRP_EVENT_R_JOIN_MT;
672 		break;
673 	case MRP_VECATTR_EVENT_MT:
674 		event = MRP_EVENT_R_MT;
675 		break;
676 	case MRP_VECATTR_EVENT_LV:
677 		event = MRP_EVENT_R_LV;
678 		break;
679 	default:
680 		return;
681 	}
682 
683 	mrp_attr_event(app, attr, event);
684 }
685 
686 static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
687 				 struct sk_buff *skb, int *offset)
688 {
689 	struct mrp_vecattr_hdr _vah;
690 	u16 valen;
691 	u8 vaevents, vaevent;
692 
693 	mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
694 					      &_vah);
695 	if (!mrp_cb(skb)->vah)
696 		return -1;
697 	*offset += sizeof(_vah);
698 
699 	if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
700 	    MRP_VECATTR_HDR_FLAG_LA)
701 		mrp_mad_event(app, MRP_EVENT_R_LA);
702 	valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
703 			    MRP_VECATTR_HDR_LEN_MASK);
704 
705 	/* The VectorAttribute structure in a PDU carries event information
706 	 * about one or more attributes having consecutive values. Only the
707 	 * value for the first attribute is contained in the structure. So
708 	 * we make a copy of that value, and then increment it each time we
709 	 * advance to the next event in its Vector.
710 	 */
711 	if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
712 	    sizeof_field(struct sk_buff, cb))
713 		return -1;
714 	if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
715 			  mrp_cb(skb)->mh->attrlen) < 0)
716 		return -1;
717 	*offset += mrp_cb(skb)->mh->attrlen;
718 
719 	/* In a VectorAttribute, the Vector contains events which are packed
720 	 * three to a byte. We process one byte of the Vector at a time.
721 	 */
722 	while (valen > 0) {
723 		if (skb_copy_bits(skb, *offset, &vaevents,
724 				  sizeof(vaevents)) < 0)
725 			return -1;
726 		*offset += sizeof(vaevents);
727 
728 		/* Extract and process the first event. */
729 		vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
730 				      __MRP_VECATTR_EVENT_MAX);
731 		if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
732 			/* The byte is malformed; stop processing. */
733 			return -1;
734 		}
735 		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
736 
737 		/* If present, extract and process the second event. */
738 		if (!--valen)
739 			break;
740 		mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
741 				  mrp_cb(skb)->mh->attrlen);
742 		vaevents %= (__MRP_VECATTR_EVENT_MAX *
743 			     __MRP_VECATTR_EVENT_MAX);
744 		vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
745 		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
746 
747 		/* If present, extract and process the third event. */
748 		if (!--valen)
749 			break;
750 		mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
751 				  mrp_cb(skb)->mh->attrlen);
752 		vaevents %= __MRP_VECATTR_EVENT_MAX;
753 		vaevent = vaevents;
754 		mrp_pdu_parse_vecattr_event(app, skb, vaevent);
755 	}
756 	return 0;
757 }
758 
759 static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
760 			     int *offset)
761 {
762 	struct mrp_msg_hdr _mh;
763 
764 	mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
765 	if (!mrp_cb(skb)->mh)
766 		return -1;
767 	*offset += sizeof(_mh);
768 
769 	if (mrp_cb(skb)->mh->attrtype == 0 ||
770 	    mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
771 	    mrp_cb(skb)->mh->attrlen == 0)
772 		return -1;
773 
774 	while (skb->len > *offset) {
775 		if (mrp_pdu_parse_end_mark(skb, offset) < 0)
776 			break;
777 		if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
778 			return -1;
779 	}
780 	return 0;
781 }
782 
783 static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
784 		   struct packet_type *pt, struct net_device *orig_dev)
785 {
786 	struct mrp_application *appl = container_of(pt, struct mrp_application,
787 						    pkttype);
788 	struct mrp_port *port;
789 	struct mrp_applicant *app;
790 	struct mrp_pdu_hdr _ph;
791 	const struct mrp_pdu_hdr *ph;
792 	int offset = skb_network_offset(skb);
793 
794 	/* If the interface is in promiscuous mode, drop the packet if
795 	 * it was unicast to another host.
796 	 */
797 	if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
798 		goto out;
799 	skb = skb_share_check(skb, GFP_ATOMIC);
800 	if (unlikely(!skb))
801 		goto out;
802 	port = rcu_dereference(dev->mrp_port);
803 	if (unlikely(!port))
804 		goto out;
805 	app = rcu_dereference(port->applicants[appl->type]);
806 	if (unlikely(!app))
807 		goto out;
808 
809 	ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
810 	if (!ph)
811 		goto out;
812 	offset += sizeof(_ph);
813 
814 	if (ph->version != app->app->version)
815 		goto out;
816 
817 	spin_lock(&app->lock);
818 	while (skb->len > offset) {
819 		if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
820 			break;
821 		if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
822 			break;
823 	}
824 	spin_unlock(&app->lock);
825 out:
826 	kfree_skb(skb);
827 	return 0;
828 }
829 
830 static int mrp_init_port(struct net_device *dev)
831 {
832 	struct mrp_port *port;
833 
834 	port = kzalloc(sizeof(*port), GFP_KERNEL);
835 	if (!port)
836 		return -ENOMEM;
837 	rcu_assign_pointer(dev->mrp_port, port);
838 	return 0;
839 }
840 
841 static void mrp_release_port(struct net_device *dev)
842 {
843 	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
844 	unsigned int i;
845 
846 	for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
847 		if (rtnl_dereference(port->applicants[i]))
848 			return;
849 	}
850 	RCU_INIT_POINTER(dev->mrp_port, NULL);
851 	kfree_rcu(port, rcu);
852 }
853 
854 int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
855 {
856 	struct mrp_applicant *app;
857 	int err;
858 
859 	ASSERT_RTNL();
860 
861 	if (!rtnl_dereference(dev->mrp_port)) {
862 		err = mrp_init_port(dev);
863 		if (err < 0)
864 			goto err1;
865 	}
866 
867 	err = -ENOMEM;
868 	app = kzalloc(sizeof(*app), GFP_KERNEL);
869 	if (!app)
870 		goto err2;
871 
872 	err = dev_mc_add(dev, appl->group_address);
873 	if (err < 0)
874 		goto err3;
875 
876 	app->dev = dev;
877 	app->app = appl;
878 	app->mad = RB_ROOT;
879 	app->active = true;
880 	spin_lock_init(&app->lock);
881 	skb_queue_head_init(&app->queue);
882 	rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
883 	timer_setup(&app->join_timer, mrp_join_timer, 0);
884 	mrp_join_timer_arm(app);
885 	timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
886 	mrp_periodic_timer_arm(app);
887 	return 0;
888 
889 err3:
890 	kfree(app);
891 err2:
892 	mrp_release_port(dev);
893 err1:
894 	return err;
895 }
896 EXPORT_SYMBOL_GPL(mrp_init_applicant);
897 
898 void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
899 {
900 	struct mrp_port *port = rtnl_dereference(dev->mrp_port);
901 	struct mrp_applicant *app = rtnl_dereference(
902 		port->applicants[appl->type]);
903 
904 	ASSERT_RTNL();
905 
906 	RCU_INIT_POINTER(port->applicants[appl->type], NULL);
907 
908 	spin_lock_bh(&app->lock);
909 	app->active = false;
910 	spin_unlock_bh(&app->lock);
911 	/* Delete timer and generate a final TX event to flush out
912 	 * all pending messages before the applicant is gone.
913 	 */
914 	timer_shutdown_sync(&app->join_timer);
915 	timer_shutdown_sync(&app->periodic_timer);
916 
917 	spin_lock_bh(&app->lock);
918 	mrp_mad_event(app, MRP_EVENT_TX);
919 	mrp_attr_destroy_all(app);
920 	mrp_pdu_queue(app);
921 	spin_unlock_bh(&app->lock);
922 
923 	mrp_queue_xmit(app);
924 
925 	dev_mc_del(dev, appl->group_address);
926 	kfree_rcu(app, rcu);
927 	mrp_release_port(dev);
928 }
929 EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
930 
931 int mrp_register_application(struct mrp_application *appl)
932 {
933 	appl->pkttype.func = mrp_rcv;
934 	dev_add_pack(&appl->pkttype);
935 	return 0;
936 }
937 EXPORT_SYMBOL_GPL(mrp_register_application);
938 
939 void mrp_unregister_application(struct mrp_application *appl)
940 {
941 	dev_remove_pack(&appl->pkttype);
942 }
943 EXPORT_SYMBOL_GPL(mrp_unregister_application);
944