1 /* 2 * IEEE 802.1Q Multiple Registration Protocol (MRP) 3 * 4 * Copyright (c) 2012 Massachusetts Institute of Technology 5 * 6 * Adapted from code in net/802/garp.c 7 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * version 2 as published by the Free Software Foundation. 12 */ 13 #include <linux/kernel.h> 14 #include <linux/timer.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rtnetlink.h> 19 #include <linux/slab.h> 20 #include <linux/module.h> 21 #include <net/mrp.h> 22 #include <asm/unaligned.h> 23 24 static unsigned int mrp_join_time __read_mostly = 200; 25 module_param(mrp_join_time, uint, 0644); 26 MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)"); 27 28 static unsigned int mrp_periodic_time __read_mostly = 1000; 29 module_param(mrp_periodic_time, uint, 0644); 30 MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)"); 31 32 MODULE_LICENSE("GPL"); 33 34 static const u8 35 mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = { 36 [MRP_APPLICANT_VO] = { 37 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 38 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP, 39 [MRP_EVENT_LV] = MRP_APPLICANT_VO, 40 [MRP_EVENT_TX] = MRP_APPLICANT_VO, 41 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO, 42 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO, 43 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO, 44 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO, 45 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO, 46 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO, 47 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO, 48 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO, 49 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO, 50 }, 51 [MRP_APPLICANT_VP] = { 52 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 53 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP, 54 [MRP_EVENT_LV] = MRP_APPLICANT_VO, 55 [MRP_EVENT_TX] = MRP_APPLICANT_AA, 56 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP, 57 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP, 58 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP, 59 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP, 60 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP, 61 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 62 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 63 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 64 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP, 65 }, 66 [MRP_APPLICANT_VN] = { 67 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 68 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN, 69 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 70 [MRP_EVENT_TX] = MRP_APPLICANT_AN, 71 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN, 72 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN, 73 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN, 74 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN, 75 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN, 76 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN, 77 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN, 78 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN, 79 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN, 80 }, 81 [MRP_APPLICANT_AN] = { 82 [MRP_EVENT_NEW] = MRP_APPLICANT_AN, 83 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN, 84 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 85 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 86 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN, 87 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN, 88 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN, 89 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN, 90 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN, 91 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN, 92 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN, 93 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN, 94 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN, 95 }, 96 [MRP_APPLICANT_AA] = { 97 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 98 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA, 99 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 100 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 101 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA, 102 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA, 103 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA, 104 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA, 105 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA, 106 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 107 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 108 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 109 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA, 110 }, 111 [MRP_APPLICANT_QA] = { 112 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 113 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA, 114 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 115 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 116 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA, 117 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA, 118 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA, 119 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA, 120 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA, 121 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 122 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 123 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 124 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA, 125 }, 126 [MRP_APPLICANT_LA] = { 127 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 128 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA, 129 [MRP_EVENT_LV] = MRP_APPLICANT_LA, 130 [MRP_EVENT_TX] = MRP_APPLICANT_VO, 131 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA, 132 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA, 133 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA, 134 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA, 135 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA, 136 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA, 137 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA, 138 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA, 139 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA, 140 }, 141 [MRP_APPLICANT_AO] = { 142 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 143 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP, 144 [MRP_EVENT_LV] = MRP_APPLICANT_AO, 145 [MRP_EVENT_TX] = MRP_APPLICANT_AO, 146 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO, 147 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO, 148 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO, 149 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO, 150 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO, 151 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO, 152 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO, 153 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO, 154 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO, 155 }, 156 [MRP_APPLICANT_QO] = { 157 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 158 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP, 159 [MRP_EVENT_LV] = MRP_APPLICANT_QO, 160 [MRP_EVENT_TX] = MRP_APPLICANT_QO, 161 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO, 162 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO, 163 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO, 164 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO, 165 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO, 166 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO, 167 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO, 168 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO, 169 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO, 170 }, 171 [MRP_APPLICANT_AP] = { 172 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 173 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP, 174 [MRP_EVENT_LV] = MRP_APPLICANT_AO, 175 [MRP_EVENT_TX] = MRP_APPLICANT_QA, 176 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP, 177 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP, 178 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP, 179 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP, 180 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP, 181 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 182 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 183 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 184 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP, 185 }, 186 [MRP_APPLICANT_QP] = { 187 [MRP_EVENT_NEW] = MRP_APPLICANT_VN, 188 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP, 189 [MRP_EVENT_LV] = MRP_APPLICANT_QO, 190 [MRP_EVENT_TX] = MRP_APPLICANT_QP, 191 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP, 192 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP, 193 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP, 194 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP, 195 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP, 196 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP, 197 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP, 198 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP, 199 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP, 200 }, 201 }; 202 203 static const u8 204 mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = { 205 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL, 206 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN, 207 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW, 208 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW, 209 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN, 210 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL, 211 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV, 212 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL, 213 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL, 214 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN, 215 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL, 216 }; 217 218 static void mrp_attrvalue_inc(void *value, u8 len) 219 { 220 u8 *v = (u8 *)value; 221 222 /* Add 1 to the last byte. If it becomes zero, 223 * go to the previous byte and repeat. 224 */ 225 while (len > 0 && !++v[--len]) 226 ; 227 } 228 229 static int mrp_attr_cmp(const struct mrp_attr *attr, 230 const void *value, u8 len, u8 type) 231 { 232 if (attr->type != type) 233 return attr->type - type; 234 if (attr->len != len) 235 return attr->len - len; 236 return memcmp(attr->value, value, len); 237 } 238 239 static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app, 240 const void *value, u8 len, u8 type) 241 { 242 struct rb_node *parent = app->mad.rb_node; 243 struct mrp_attr *attr; 244 int d; 245 246 while (parent) { 247 attr = rb_entry(parent, struct mrp_attr, node); 248 d = mrp_attr_cmp(attr, value, len, type); 249 if (d > 0) 250 parent = parent->rb_left; 251 else if (d < 0) 252 parent = parent->rb_right; 253 else 254 return attr; 255 } 256 return NULL; 257 } 258 259 static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app, 260 const void *value, u8 len, u8 type) 261 { 262 struct rb_node *parent = NULL, **p = &app->mad.rb_node; 263 struct mrp_attr *attr; 264 int d; 265 266 while (*p) { 267 parent = *p; 268 attr = rb_entry(parent, struct mrp_attr, node); 269 d = mrp_attr_cmp(attr, value, len, type); 270 if (d > 0) 271 p = &parent->rb_left; 272 else if (d < 0) 273 p = &parent->rb_right; 274 else { 275 /* The attribute already exists; re-use it. */ 276 return attr; 277 } 278 } 279 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); 280 if (!attr) 281 return attr; 282 attr->state = MRP_APPLICANT_VO; 283 attr->type = type; 284 attr->len = len; 285 memcpy(attr->value, value, len); 286 287 rb_link_node(&attr->node, parent, p); 288 rb_insert_color(&attr->node, &app->mad); 289 return attr; 290 } 291 292 static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr) 293 { 294 rb_erase(&attr->node, &app->mad); 295 kfree(attr); 296 } 297 298 static int mrp_pdu_init(struct mrp_applicant *app) 299 { 300 struct sk_buff *skb; 301 struct mrp_pdu_hdr *ph; 302 303 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev), 304 GFP_ATOMIC); 305 if (!skb) 306 return -ENOMEM; 307 308 skb->dev = app->dev; 309 skb->protocol = app->app->pkttype.type; 310 skb_reserve(skb, LL_RESERVED_SPACE(app->dev)); 311 skb_reset_network_header(skb); 312 skb_reset_transport_header(skb); 313 314 ph = __skb_put(skb, sizeof(*ph)); 315 ph->version = app->app->version; 316 317 app->pdu = skb; 318 return 0; 319 } 320 321 static int mrp_pdu_append_end_mark(struct mrp_applicant *app) 322 { 323 __be16 *endmark; 324 325 if (skb_tailroom(app->pdu) < sizeof(*endmark)) 326 return -1; 327 endmark = __skb_put(app->pdu, sizeof(*endmark)); 328 put_unaligned(MRP_END_MARK, endmark); 329 return 0; 330 } 331 332 static void mrp_pdu_queue(struct mrp_applicant *app) 333 { 334 if (!app->pdu) 335 return; 336 337 if (mrp_cb(app->pdu)->mh) 338 mrp_pdu_append_end_mark(app); 339 mrp_pdu_append_end_mark(app); 340 341 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type), 342 app->app->group_address, app->dev->dev_addr, 343 app->pdu->len); 344 345 skb_queue_tail(&app->queue, app->pdu); 346 app->pdu = NULL; 347 } 348 349 static void mrp_queue_xmit(struct mrp_applicant *app) 350 { 351 struct sk_buff *skb; 352 353 while ((skb = skb_dequeue(&app->queue))) 354 dev_queue_xmit(skb); 355 } 356 357 static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app, 358 u8 attrtype, u8 attrlen) 359 { 360 struct mrp_msg_hdr *mh; 361 362 if (mrp_cb(app->pdu)->mh) { 363 if (mrp_pdu_append_end_mark(app) < 0) 364 return -1; 365 mrp_cb(app->pdu)->mh = NULL; 366 mrp_cb(app->pdu)->vah = NULL; 367 } 368 369 if (skb_tailroom(app->pdu) < sizeof(*mh)) 370 return -1; 371 mh = __skb_put(app->pdu, sizeof(*mh)); 372 mh->attrtype = attrtype; 373 mh->attrlen = attrlen; 374 mrp_cb(app->pdu)->mh = mh; 375 return 0; 376 } 377 378 static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app, 379 const void *firstattrvalue, u8 attrlen) 380 { 381 struct mrp_vecattr_hdr *vah; 382 383 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen) 384 return -1; 385 vah = __skb_put(app->pdu, sizeof(*vah) + attrlen); 386 put_unaligned(0, &vah->lenflags); 387 memcpy(vah->firstattrvalue, firstattrvalue, attrlen); 388 mrp_cb(app->pdu)->vah = vah; 389 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen); 390 return 0; 391 } 392 393 static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app, 394 const struct mrp_attr *attr, 395 enum mrp_vecattr_event vaevent) 396 { 397 u16 len, pos; 398 u8 *vaevents; 399 int err; 400 again: 401 if (!app->pdu) { 402 err = mrp_pdu_init(app); 403 if (err < 0) 404 return err; 405 } 406 407 /* If there is no Message header in the PDU, or the Message header is 408 * for a different attribute type, add an EndMark (if necessary) and a 409 * new Message header to the PDU. 410 */ 411 if (!mrp_cb(app->pdu)->mh || 412 mrp_cb(app->pdu)->mh->attrtype != attr->type || 413 mrp_cb(app->pdu)->mh->attrlen != attr->len) { 414 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0) 415 goto queue; 416 } 417 418 /* If there is no VectorAttribute header for this Message in the PDU, 419 * or this attribute's value does not sequentially follow the previous 420 * attribute's value, add a new VectorAttribute header to the PDU. 421 */ 422 if (!mrp_cb(app->pdu)->vah || 423 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) { 424 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0) 425 goto queue; 426 } 427 428 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags)); 429 pos = len % 3; 430 431 /* Events are packed into Vectors in the PDU, three to a byte. Add a 432 * byte to the end of the Vector if necessary. 433 */ 434 if (!pos) { 435 if (skb_tailroom(app->pdu) < sizeof(u8)) 436 goto queue; 437 vaevents = __skb_put(app->pdu, sizeof(u8)); 438 } else { 439 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8)); 440 } 441 442 switch (pos) { 443 case 0: 444 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX * 445 __MRP_VECATTR_EVENT_MAX); 446 break; 447 case 1: 448 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX; 449 break; 450 case 2: 451 *vaevents += vaevent; 452 break; 453 default: 454 WARN_ON(1); 455 } 456 457 /* Increment the length of the VectorAttribute in the PDU, as well as 458 * the value of the next attribute that would continue its Vector. 459 */ 460 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags); 461 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len); 462 463 return 0; 464 465 queue: 466 mrp_pdu_queue(app); 467 goto again; 468 } 469 470 static void mrp_attr_event(struct mrp_applicant *app, 471 struct mrp_attr *attr, enum mrp_event event) 472 { 473 enum mrp_applicant_state state; 474 475 state = mrp_applicant_state_table[attr->state][event]; 476 if (state == MRP_APPLICANT_INVALID) { 477 WARN_ON(1); 478 return; 479 } 480 481 if (event == MRP_EVENT_TX) { 482 /* When appending the attribute fails, don't update its state 483 * in order to retry at the next TX event. 484 */ 485 486 switch (mrp_tx_action_table[attr->state]) { 487 case MRP_TX_ACTION_NONE: 488 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL: 489 case MRP_TX_ACTION_S_IN_OPTIONAL: 490 break; 491 case MRP_TX_ACTION_S_NEW: 492 if (mrp_pdu_append_vecattr_event( 493 app, attr, MRP_VECATTR_EVENT_NEW) < 0) 494 return; 495 break; 496 case MRP_TX_ACTION_S_JOIN_IN: 497 if (mrp_pdu_append_vecattr_event( 498 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0) 499 return; 500 break; 501 case MRP_TX_ACTION_S_LV: 502 if (mrp_pdu_append_vecattr_event( 503 app, attr, MRP_VECATTR_EVENT_LV) < 0) 504 return; 505 /* As a pure applicant, sending a leave message 506 * implies that the attribute was unregistered and 507 * can be destroyed. 508 */ 509 mrp_attr_destroy(app, attr); 510 return; 511 default: 512 WARN_ON(1); 513 } 514 } 515 516 attr->state = state; 517 } 518 519 int mrp_request_join(const struct net_device *dev, 520 const struct mrp_application *appl, 521 const void *value, u8 len, u8 type) 522 { 523 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 524 struct mrp_applicant *app = rtnl_dereference( 525 port->applicants[appl->type]); 526 struct mrp_attr *attr; 527 528 if (sizeof(struct mrp_skb_cb) + len > 529 FIELD_SIZEOF(struct sk_buff, cb)) 530 return -ENOMEM; 531 532 spin_lock_bh(&app->lock); 533 attr = mrp_attr_create(app, value, len, type); 534 if (!attr) { 535 spin_unlock_bh(&app->lock); 536 return -ENOMEM; 537 } 538 mrp_attr_event(app, attr, MRP_EVENT_JOIN); 539 spin_unlock_bh(&app->lock); 540 return 0; 541 } 542 EXPORT_SYMBOL_GPL(mrp_request_join); 543 544 void mrp_request_leave(const struct net_device *dev, 545 const struct mrp_application *appl, 546 const void *value, u8 len, u8 type) 547 { 548 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 549 struct mrp_applicant *app = rtnl_dereference( 550 port->applicants[appl->type]); 551 struct mrp_attr *attr; 552 553 if (sizeof(struct mrp_skb_cb) + len > 554 FIELD_SIZEOF(struct sk_buff, cb)) 555 return; 556 557 spin_lock_bh(&app->lock); 558 attr = mrp_attr_lookup(app, value, len, type); 559 if (!attr) { 560 spin_unlock_bh(&app->lock); 561 return; 562 } 563 mrp_attr_event(app, attr, MRP_EVENT_LV); 564 spin_unlock_bh(&app->lock); 565 } 566 EXPORT_SYMBOL_GPL(mrp_request_leave); 567 568 static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event) 569 { 570 struct rb_node *node, *next; 571 struct mrp_attr *attr; 572 573 for (node = rb_first(&app->mad); 574 next = node ? rb_next(node) : NULL, node != NULL; 575 node = next) { 576 attr = rb_entry(node, struct mrp_attr, node); 577 mrp_attr_event(app, attr, event); 578 } 579 } 580 581 static void mrp_join_timer_arm(struct mrp_applicant *app) 582 { 583 unsigned long delay; 584 585 delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32; 586 mod_timer(&app->join_timer, jiffies + delay); 587 } 588 589 static void mrp_join_timer(struct timer_list *t) 590 { 591 struct mrp_applicant *app = from_timer(app, t, join_timer); 592 593 spin_lock(&app->lock); 594 mrp_mad_event(app, MRP_EVENT_TX); 595 mrp_pdu_queue(app); 596 spin_unlock(&app->lock); 597 598 mrp_queue_xmit(app); 599 mrp_join_timer_arm(app); 600 } 601 602 static void mrp_periodic_timer_arm(struct mrp_applicant *app) 603 { 604 mod_timer(&app->periodic_timer, 605 jiffies + msecs_to_jiffies(mrp_periodic_time)); 606 } 607 608 static void mrp_periodic_timer(struct timer_list *t) 609 { 610 struct mrp_applicant *app = from_timer(app, t, periodic_timer); 611 612 spin_lock(&app->lock); 613 mrp_mad_event(app, MRP_EVENT_PERIODIC); 614 mrp_pdu_queue(app); 615 spin_unlock(&app->lock); 616 617 mrp_periodic_timer_arm(app); 618 } 619 620 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset) 621 { 622 __be16 endmark; 623 624 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0) 625 return -1; 626 if (endmark == MRP_END_MARK) { 627 *offset += sizeof(endmark); 628 return -1; 629 } 630 return 0; 631 } 632 633 static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app, 634 struct sk_buff *skb, 635 enum mrp_vecattr_event vaevent) 636 { 637 struct mrp_attr *attr; 638 enum mrp_event event; 639 640 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue, 641 mrp_cb(skb)->mh->attrlen, 642 mrp_cb(skb)->mh->attrtype); 643 if (attr == NULL) 644 return; 645 646 switch (vaevent) { 647 case MRP_VECATTR_EVENT_NEW: 648 event = MRP_EVENT_R_NEW; 649 break; 650 case MRP_VECATTR_EVENT_JOIN_IN: 651 event = MRP_EVENT_R_JOIN_IN; 652 break; 653 case MRP_VECATTR_EVENT_IN: 654 event = MRP_EVENT_R_IN; 655 break; 656 case MRP_VECATTR_EVENT_JOIN_MT: 657 event = MRP_EVENT_R_JOIN_MT; 658 break; 659 case MRP_VECATTR_EVENT_MT: 660 event = MRP_EVENT_R_MT; 661 break; 662 case MRP_VECATTR_EVENT_LV: 663 event = MRP_EVENT_R_LV; 664 break; 665 default: 666 return; 667 } 668 669 mrp_attr_event(app, attr, event); 670 } 671 672 static int mrp_pdu_parse_vecattr(struct mrp_applicant *app, 673 struct sk_buff *skb, int *offset) 674 { 675 struct mrp_vecattr_hdr _vah; 676 u16 valen; 677 u8 vaevents, vaevent; 678 679 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah), 680 &_vah); 681 if (!mrp_cb(skb)->vah) 682 return -1; 683 *offset += sizeof(_vah); 684 685 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) & 686 MRP_VECATTR_HDR_FLAG_LA) 687 mrp_mad_event(app, MRP_EVENT_R_LA); 688 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) & 689 MRP_VECATTR_HDR_LEN_MASK); 690 691 /* The VectorAttribute structure in a PDU carries event information 692 * about one or more attributes having consecutive values. Only the 693 * value for the first attribute is contained in the structure. So 694 * we make a copy of that value, and then increment it each time we 695 * advance to the next event in its Vector. 696 */ 697 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen > 698 FIELD_SIZEOF(struct sk_buff, cb)) 699 return -1; 700 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue, 701 mrp_cb(skb)->mh->attrlen) < 0) 702 return -1; 703 *offset += mrp_cb(skb)->mh->attrlen; 704 705 /* In a VectorAttribute, the Vector contains events which are packed 706 * three to a byte. We process one byte of the Vector at a time. 707 */ 708 while (valen > 0) { 709 if (skb_copy_bits(skb, *offset, &vaevents, 710 sizeof(vaevents)) < 0) 711 return -1; 712 *offset += sizeof(vaevents); 713 714 /* Extract and process the first event. */ 715 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX * 716 __MRP_VECATTR_EVENT_MAX); 717 if (vaevent >= __MRP_VECATTR_EVENT_MAX) { 718 /* The byte is malformed; stop processing. */ 719 return -1; 720 } 721 mrp_pdu_parse_vecattr_event(app, skb, vaevent); 722 723 /* If present, extract and process the second event. */ 724 if (!--valen) 725 break; 726 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue, 727 mrp_cb(skb)->mh->attrlen); 728 vaevents %= (__MRP_VECATTR_EVENT_MAX * 729 __MRP_VECATTR_EVENT_MAX); 730 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX; 731 mrp_pdu_parse_vecattr_event(app, skb, vaevent); 732 733 /* If present, extract and process the third event. */ 734 if (!--valen) 735 break; 736 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue, 737 mrp_cb(skb)->mh->attrlen); 738 vaevents %= __MRP_VECATTR_EVENT_MAX; 739 vaevent = vaevents; 740 mrp_pdu_parse_vecattr_event(app, skb, vaevent); 741 } 742 return 0; 743 } 744 745 static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb, 746 int *offset) 747 { 748 struct mrp_msg_hdr _mh; 749 750 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh); 751 if (!mrp_cb(skb)->mh) 752 return -1; 753 *offset += sizeof(_mh); 754 755 if (mrp_cb(skb)->mh->attrtype == 0 || 756 mrp_cb(skb)->mh->attrtype > app->app->maxattr || 757 mrp_cb(skb)->mh->attrlen == 0) 758 return -1; 759 760 while (skb->len > *offset) { 761 if (mrp_pdu_parse_end_mark(skb, offset) < 0) 762 break; 763 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0) 764 return -1; 765 } 766 return 0; 767 } 768 769 static int mrp_rcv(struct sk_buff *skb, struct net_device *dev, 770 struct packet_type *pt, struct net_device *orig_dev) 771 { 772 struct mrp_application *appl = container_of(pt, struct mrp_application, 773 pkttype); 774 struct mrp_port *port; 775 struct mrp_applicant *app; 776 struct mrp_pdu_hdr _ph; 777 const struct mrp_pdu_hdr *ph; 778 int offset = skb_network_offset(skb); 779 780 /* If the interface is in promiscuous mode, drop the packet if 781 * it was unicast to another host. 782 */ 783 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) 784 goto out; 785 skb = skb_share_check(skb, GFP_ATOMIC); 786 if (unlikely(!skb)) 787 goto out; 788 port = rcu_dereference(dev->mrp_port); 789 if (unlikely(!port)) 790 goto out; 791 app = rcu_dereference(port->applicants[appl->type]); 792 if (unlikely(!app)) 793 goto out; 794 795 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph); 796 if (!ph) 797 goto out; 798 offset += sizeof(_ph); 799 800 if (ph->version != app->app->version) 801 goto out; 802 803 spin_lock(&app->lock); 804 while (skb->len > offset) { 805 if (mrp_pdu_parse_end_mark(skb, &offset) < 0) 806 break; 807 if (mrp_pdu_parse_msg(app, skb, &offset) < 0) 808 break; 809 } 810 spin_unlock(&app->lock); 811 out: 812 kfree_skb(skb); 813 return 0; 814 } 815 816 static int mrp_init_port(struct net_device *dev) 817 { 818 struct mrp_port *port; 819 820 port = kzalloc(sizeof(*port), GFP_KERNEL); 821 if (!port) 822 return -ENOMEM; 823 rcu_assign_pointer(dev->mrp_port, port); 824 return 0; 825 } 826 827 static void mrp_release_port(struct net_device *dev) 828 { 829 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 830 unsigned int i; 831 832 for (i = 0; i <= MRP_APPLICATION_MAX; i++) { 833 if (rtnl_dereference(port->applicants[i])) 834 return; 835 } 836 RCU_INIT_POINTER(dev->mrp_port, NULL); 837 kfree_rcu(port, rcu); 838 } 839 840 int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl) 841 { 842 struct mrp_applicant *app; 843 int err; 844 845 ASSERT_RTNL(); 846 847 if (!rtnl_dereference(dev->mrp_port)) { 848 err = mrp_init_port(dev); 849 if (err < 0) 850 goto err1; 851 } 852 853 err = -ENOMEM; 854 app = kzalloc(sizeof(*app), GFP_KERNEL); 855 if (!app) 856 goto err2; 857 858 err = dev_mc_add(dev, appl->group_address); 859 if (err < 0) 860 goto err3; 861 862 app->dev = dev; 863 app->app = appl; 864 app->mad = RB_ROOT; 865 spin_lock_init(&app->lock); 866 skb_queue_head_init(&app->queue); 867 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app); 868 timer_setup(&app->join_timer, mrp_join_timer, 0); 869 mrp_join_timer_arm(app); 870 timer_setup(&app->periodic_timer, mrp_periodic_timer, 0); 871 mrp_periodic_timer_arm(app); 872 return 0; 873 874 err3: 875 kfree(app); 876 err2: 877 mrp_release_port(dev); 878 err1: 879 return err; 880 } 881 EXPORT_SYMBOL_GPL(mrp_init_applicant); 882 883 void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl) 884 { 885 struct mrp_port *port = rtnl_dereference(dev->mrp_port); 886 struct mrp_applicant *app = rtnl_dereference( 887 port->applicants[appl->type]); 888 889 ASSERT_RTNL(); 890 891 RCU_INIT_POINTER(port->applicants[appl->type], NULL); 892 893 /* Delete timer and generate a final TX event to flush out 894 * all pending messages before the applicant is gone. 895 */ 896 del_timer_sync(&app->join_timer); 897 del_timer_sync(&app->periodic_timer); 898 899 spin_lock_bh(&app->lock); 900 mrp_mad_event(app, MRP_EVENT_TX); 901 mrp_pdu_queue(app); 902 spin_unlock_bh(&app->lock); 903 904 mrp_queue_xmit(app); 905 906 dev_mc_del(dev, appl->group_address); 907 kfree_rcu(app, rcu); 908 mrp_release_port(dev); 909 } 910 EXPORT_SYMBOL_GPL(mrp_uninit_applicant); 911 912 int mrp_register_application(struct mrp_application *appl) 913 { 914 appl->pkttype.func = mrp_rcv; 915 dev_add_pack(&appl->pkttype); 916 return 0; 917 } 918 EXPORT_SYMBOL_GPL(mrp_register_application); 919 920 void mrp_unregister_application(struct mrp_application *appl) 921 { 922 dev_remove_pack(&appl->pkttype); 923 } 924 EXPORT_SYMBOL_GPL(mrp_unregister_application); 925