1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2011-2014 Autronica Fire and Security AS 3 * 4 * Author(s): 5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se 6 * 7 * Routines for handling Netlink messages for HSR. 8 */ 9 10 #include "hsr_netlink.h" 11 #include <linux/kernel.h> 12 #include <net/rtnetlink.h> 13 #include <net/genetlink.h> 14 #include "hsr_main.h" 15 #include "hsr_device.h" 16 #include "hsr_framereg.h" 17 18 static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { 19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 22 [IFLA_HSR_VERSION] = { .type = NLA_U8 }, 23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, 24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, 25 }; 26 27 /* Here, it seems a netdevice has already been allocated for us, and the 28 * hsr_dev_setup routine has been executed. Nice! 29 */ 30 static int hsr_newlink(struct net *src_net, struct net_device *dev, 31 struct nlattr *tb[], struct nlattr *data[], 32 struct netlink_ext_ack *extack) 33 { 34 struct net_device *link[2]; 35 unsigned char multicast_spec, hsr_version; 36 37 if (!data) { 38 netdev_info(dev, "HSR: No slave devices specified\n"); 39 return -EINVAL; 40 } 41 if (!data[IFLA_HSR_SLAVE1]) { 42 netdev_info(dev, "HSR: Slave1 device not specified\n"); 43 return -EINVAL; 44 } 45 link[0] = __dev_get_by_index(src_net, 46 nla_get_u32(data[IFLA_HSR_SLAVE1])); 47 if (!data[IFLA_HSR_SLAVE2]) { 48 netdev_info(dev, "HSR: Slave2 device not specified\n"); 49 return -EINVAL; 50 } 51 link[1] = __dev_get_by_index(src_net, 52 nla_get_u32(data[IFLA_HSR_SLAVE2])); 53 54 if (!link[0] || !link[1]) 55 return -ENODEV; 56 if (link[0] == link[1]) 57 return -EINVAL; 58 59 if (!data[IFLA_HSR_MULTICAST_SPEC]) 60 multicast_spec = 0; 61 else 62 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); 63 64 if (!data[IFLA_HSR_VERSION]) 65 hsr_version = 0; 66 else 67 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); 68 69 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version); 70 } 71 72 static void hsr_dellink(struct net_device *hsr_dev, struct list_head *head) 73 { 74 hsr_dev_destroy(hsr_dev); 75 unregister_netdevice_queue(hsr_dev, head); 76 } 77 78 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) 79 { 80 struct hsr_priv *hsr; 81 struct hsr_port *port; 82 int res; 83 84 hsr = netdev_priv(dev); 85 86 res = 0; 87 88 rcu_read_lock(); 89 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); 90 if (port) 91 res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex); 92 rcu_read_unlock(); 93 if (res) 94 goto nla_put_failure; 95 96 rcu_read_lock(); 97 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 98 if (port) 99 res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex); 100 rcu_read_unlock(); 101 if (res) 102 goto nla_put_failure; 103 104 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, 105 hsr->sup_multicast_addr) || 106 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) 107 goto nla_put_failure; 108 109 return 0; 110 111 nla_put_failure: 112 return -EMSGSIZE; 113 } 114 115 static struct rtnl_link_ops hsr_link_ops __read_mostly = { 116 .kind = "hsr", 117 .maxtype = IFLA_HSR_MAX, 118 .policy = hsr_policy, 119 .priv_size = sizeof(struct hsr_priv), 120 .setup = hsr_dev_setup, 121 .newlink = hsr_newlink, 122 .dellink = hsr_dellink, 123 .fill_info = hsr_fill_info, 124 }; 125 126 /* attribute policy */ 127 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = { 128 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN }, 129 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN }, 130 [HSR_A_IFINDEX] = { .type = NLA_U32 }, 131 [HSR_A_IF1_AGE] = { .type = NLA_U32 }, 132 [HSR_A_IF2_AGE] = { .type = NLA_U32 }, 133 [HSR_A_IF1_SEQ] = { .type = NLA_U16 }, 134 [HSR_A_IF2_SEQ] = { .type = NLA_U16 }, 135 }; 136 137 static struct genl_family hsr_genl_family; 138 139 static const struct genl_multicast_group hsr_mcgrps[] = { 140 { .name = "hsr-network", }, 141 }; 142 143 /* This is called if for some node with MAC address addr, we only get frames 144 * over one of the slave interfaces. This would indicate an open network ring 145 * (i.e. a link has failed somewhere). 146 */ 147 void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], 148 struct hsr_port *port) 149 { 150 struct sk_buff *skb; 151 void *msg_head; 152 struct hsr_port *master; 153 int res; 154 155 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 156 if (!skb) 157 goto fail; 158 159 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, 160 HSR_C_RING_ERROR); 161 if (!msg_head) 162 goto nla_put_failure; 163 164 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); 165 if (res < 0) 166 goto nla_put_failure; 167 168 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); 169 if (res < 0) 170 goto nla_put_failure; 171 172 genlmsg_end(skb, msg_head); 173 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); 174 175 return; 176 177 nla_put_failure: 178 kfree_skb(skb); 179 180 fail: 181 rcu_read_lock(); 182 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 183 netdev_warn(master->dev, "Could not send HSR ring error message\n"); 184 rcu_read_unlock(); 185 } 186 187 /* This is called when we haven't heard from the node with MAC address addr for 188 * some time (just before the node is removed from the node table/list). 189 */ 190 void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]) 191 { 192 struct sk_buff *skb; 193 void *msg_head; 194 struct hsr_port *master; 195 int res; 196 197 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 198 if (!skb) 199 goto fail; 200 201 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN); 202 if (!msg_head) 203 goto nla_put_failure; 204 205 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); 206 if (res < 0) 207 goto nla_put_failure; 208 209 genlmsg_end(skb, msg_head); 210 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); 211 212 return; 213 214 nla_put_failure: 215 kfree_skb(skb); 216 217 fail: 218 rcu_read_lock(); 219 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 220 netdev_warn(master->dev, "Could not send HSR node down\n"); 221 rcu_read_unlock(); 222 } 223 224 /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table 225 * about the status of a specific node in the network, defined by its MAC 226 * address. 227 * 228 * Input: hsr ifindex, node mac address 229 * Output: hsr ifindex, node mac address (copied from request), 230 * age of latest frame from node over slave 1, slave 2 [ms] 231 */ 232 static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) 233 { 234 /* For receiving */ 235 struct nlattr *na; 236 struct net_device *hsr_dev; 237 238 /* For sending */ 239 struct sk_buff *skb_out; 240 void *msg_head; 241 struct hsr_priv *hsr; 242 struct hsr_port *port; 243 unsigned char hsr_node_addr_b[ETH_ALEN]; 244 int hsr_node_if1_age; 245 u16 hsr_node_if1_seq; 246 int hsr_node_if2_age; 247 u16 hsr_node_if2_seq; 248 int addr_b_ifindex; 249 int res; 250 251 if (!info) 252 goto invalid; 253 254 na = info->attrs[HSR_A_IFINDEX]; 255 if (!na) 256 goto invalid; 257 na = info->attrs[HSR_A_NODE_ADDR]; 258 if (!na) 259 goto invalid; 260 261 hsr_dev = __dev_get_by_index(genl_info_net(info), 262 nla_get_u32(info->attrs[HSR_A_IFINDEX])); 263 if (!hsr_dev) 264 goto invalid; 265 if (!is_hsr_master(hsr_dev)) 266 goto invalid; 267 268 /* Send reply */ 269 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 270 if (!skb_out) { 271 res = -ENOMEM; 272 goto fail; 273 } 274 275 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, 276 info->snd_seq, &hsr_genl_family, 0, 277 HSR_C_SET_NODE_STATUS); 278 if (!msg_head) { 279 res = -ENOMEM; 280 goto nla_put_failure; 281 } 282 283 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); 284 if (res < 0) 285 goto nla_put_failure; 286 287 hsr = netdev_priv(hsr_dev); 288 res = hsr_get_node_data(hsr, 289 (unsigned char *) 290 nla_data(info->attrs[HSR_A_NODE_ADDR]), 291 hsr_node_addr_b, 292 &addr_b_ifindex, 293 &hsr_node_if1_age, 294 &hsr_node_if1_seq, 295 &hsr_node_if2_age, 296 &hsr_node_if2_seq); 297 if (res < 0) 298 goto nla_put_failure; 299 300 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, 301 nla_data(info->attrs[HSR_A_NODE_ADDR])); 302 if (res < 0) 303 goto nla_put_failure; 304 305 if (addr_b_ifindex > -1) { 306 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN, 307 hsr_node_addr_b); 308 if (res < 0) 309 goto nla_put_failure; 310 311 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, 312 addr_b_ifindex); 313 if (res < 0) 314 goto nla_put_failure; 315 } 316 317 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age); 318 if (res < 0) 319 goto nla_put_failure; 320 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); 321 if (res < 0) 322 goto nla_put_failure; 323 rcu_read_lock(); 324 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); 325 if (port) 326 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, 327 port->dev->ifindex); 328 rcu_read_unlock(); 329 if (res < 0) 330 goto nla_put_failure; 331 332 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age); 333 if (res < 0) 334 goto nla_put_failure; 335 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); 336 if (res < 0) 337 goto nla_put_failure; 338 rcu_read_lock(); 339 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 340 if (port) 341 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, 342 port->dev->ifindex); 343 rcu_read_unlock(); 344 if (res < 0) 345 goto nla_put_failure; 346 347 genlmsg_end(skb_out, msg_head); 348 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); 349 350 return 0; 351 352 invalid: 353 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); 354 return 0; 355 356 nla_put_failure: 357 kfree_skb(skb_out); 358 /* Fall through */ 359 360 fail: 361 return res; 362 } 363 364 /* Get a list of MacAddressA of all nodes known to this node (including self). 365 */ 366 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) 367 { 368 /* For receiving */ 369 struct nlattr *na; 370 struct net_device *hsr_dev; 371 372 /* For sending */ 373 struct sk_buff *skb_out; 374 void *msg_head; 375 struct hsr_priv *hsr; 376 void *pos; 377 unsigned char addr[ETH_ALEN]; 378 int res; 379 380 if (!info) 381 goto invalid; 382 383 na = info->attrs[HSR_A_IFINDEX]; 384 if (!na) 385 goto invalid; 386 387 hsr_dev = __dev_get_by_index(genl_info_net(info), 388 nla_get_u32(info->attrs[HSR_A_IFINDEX])); 389 if (!hsr_dev) 390 goto invalid; 391 if (!is_hsr_master(hsr_dev)) 392 goto invalid; 393 394 /* Send reply */ 395 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 396 if (!skb_out) { 397 res = -ENOMEM; 398 goto fail; 399 } 400 401 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, 402 info->snd_seq, &hsr_genl_family, 0, 403 HSR_C_SET_NODE_LIST); 404 if (!msg_head) { 405 res = -ENOMEM; 406 goto nla_put_failure; 407 } 408 409 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); 410 if (res < 0) 411 goto nla_put_failure; 412 413 hsr = netdev_priv(hsr_dev); 414 415 rcu_read_lock(); 416 pos = hsr_get_next_node(hsr, NULL, addr); 417 while (pos) { 418 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); 419 if (res < 0) { 420 rcu_read_unlock(); 421 goto nla_put_failure; 422 } 423 pos = hsr_get_next_node(hsr, pos, addr); 424 } 425 rcu_read_unlock(); 426 427 genlmsg_end(skb_out, msg_head); 428 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); 429 430 return 0; 431 432 invalid: 433 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); 434 return 0; 435 436 nla_put_failure: 437 kfree_skb(skb_out); 438 /* Fall through */ 439 440 fail: 441 return res; 442 } 443 444 static const struct genl_ops hsr_ops[] = { 445 { 446 .cmd = HSR_C_GET_NODE_STATUS, 447 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 448 .flags = 0, 449 .doit = hsr_get_node_status, 450 .dumpit = NULL, 451 }, 452 { 453 .cmd = HSR_C_GET_NODE_LIST, 454 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 455 .flags = 0, 456 .doit = hsr_get_node_list, 457 .dumpit = NULL, 458 }, 459 }; 460 461 static struct genl_family hsr_genl_family __ro_after_init = { 462 .hdrsize = 0, 463 .name = "HSR", 464 .version = 1, 465 .maxattr = HSR_A_MAX, 466 .policy = hsr_genl_policy, 467 .module = THIS_MODULE, 468 .ops = hsr_ops, 469 .n_ops = ARRAY_SIZE(hsr_ops), 470 .mcgrps = hsr_mcgrps, 471 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps), 472 }; 473 474 int __init hsr_netlink_init(void) 475 { 476 int rc; 477 478 rc = rtnl_link_register(&hsr_link_ops); 479 if (rc) 480 goto fail_rtnl_link_register; 481 482 rc = genl_register_family(&hsr_genl_family); 483 if (rc) 484 goto fail_genl_register_family; 485 486 return 0; 487 488 fail_genl_register_family: 489 rtnl_link_unregister(&hsr_link_ops); 490 fail_rtnl_link_register: 491 492 return rc; 493 } 494 495 void __exit hsr_netlink_exit(void) 496 { 497 genl_unregister_family(&hsr_genl_family); 498 rtnl_link_unregister(&hsr_link_ops); 499 } 500 501 MODULE_ALIAS_RTNL_LINK("hsr"); 502