1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2011-2014 Autronica Fire and Security AS 3 * 4 * Author(s): 5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se 6 * 7 * Routines for handling Netlink messages for HSR. 8 */ 9 10 #include "hsr_netlink.h" 11 #include <linux/kernel.h> 12 #include <net/rtnetlink.h> 13 #include <net/genetlink.h> 14 #include "hsr_main.h" 15 #include "hsr_device.h" 16 #include "hsr_framereg.h" 17 18 static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { 19 [IFLA_HSR_SLAVE1] = { .type = NLA_U32 }, 20 [IFLA_HSR_SLAVE2] = { .type = NLA_U32 }, 21 [IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 }, 22 [IFLA_HSR_VERSION] = { .type = NLA_U8 }, 23 [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, 24 [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, 25 }; 26 27 /* Here, it seems a netdevice has already been allocated for us, and the 28 * hsr_dev_setup routine has been executed. Nice! 29 */ 30 static int hsr_newlink(struct net *src_net, struct net_device *dev, 31 struct nlattr *tb[], struct nlattr *data[], 32 struct netlink_ext_ack *extack) 33 { 34 struct net_device *link[2]; 35 unsigned char multicast_spec, hsr_version; 36 37 if (!data) { 38 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified"); 39 return -EINVAL; 40 } 41 if (!data[IFLA_HSR_SLAVE1]) { 42 NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified"); 43 return -EINVAL; 44 } 45 link[0] = __dev_get_by_index(src_net, 46 nla_get_u32(data[IFLA_HSR_SLAVE1])); 47 if (!link[0]) { 48 NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist"); 49 return -EINVAL; 50 } 51 if (!data[IFLA_HSR_SLAVE2]) { 52 NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified"); 53 return -EINVAL; 54 } 55 link[1] = __dev_get_by_index(src_net, 56 nla_get_u32(data[IFLA_HSR_SLAVE2])); 57 if (!link[1]) { 58 NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist"); 59 return -EINVAL; 60 } 61 62 if (link[0] == link[1]) { 63 NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same"); 64 return -EINVAL; 65 } 66 67 if (!data[IFLA_HSR_MULTICAST_SPEC]) 68 multicast_spec = 0; 69 else 70 multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); 71 72 if (!data[IFLA_HSR_VERSION]) { 73 hsr_version = 0; 74 } else { 75 hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); 76 if (hsr_version > 1) { 77 NL_SET_ERR_MSG_MOD(extack, 78 "Only versions 0..1 are supported"); 79 return -EINVAL; 80 } 81 } 82 83 return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack); 84 } 85 86 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) 87 { 88 struct hsr_priv *hsr = netdev_priv(dev); 89 struct hsr_port *port; 90 91 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); 92 if (port) { 93 if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex)) 94 goto nla_put_failure; 95 } 96 97 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 98 if (port) { 99 if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex)) 100 goto nla_put_failure; 101 } 102 103 if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN, 104 hsr->sup_multicast_addr) || 105 nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) 106 goto nla_put_failure; 107 108 return 0; 109 110 nla_put_failure: 111 return -EMSGSIZE; 112 } 113 114 static struct rtnl_link_ops hsr_link_ops __read_mostly = { 115 .kind = "hsr", 116 .maxtype = IFLA_HSR_MAX, 117 .policy = hsr_policy, 118 .priv_size = sizeof(struct hsr_priv), 119 .setup = hsr_dev_setup, 120 .newlink = hsr_newlink, 121 .fill_info = hsr_fill_info, 122 }; 123 124 /* attribute policy */ 125 static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = { 126 [HSR_A_NODE_ADDR] = { .len = ETH_ALEN }, 127 [HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN }, 128 [HSR_A_IFINDEX] = { .type = NLA_U32 }, 129 [HSR_A_IF1_AGE] = { .type = NLA_U32 }, 130 [HSR_A_IF2_AGE] = { .type = NLA_U32 }, 131 [HSR_A_IF1_SEQ] = { .type = NLA_U16 }, 132 [HSR_A_IF2_SEQ] = { .type = NLA_U16 }, 133 }; 134 135 static struct genl_family hsr_genl_family; 136 137 static const struct genl_multicast_group hsr_mcgrps[] = { 138 { .name = "hsr-network", }, 139 }; 140 141 /* This is called if for some node with MAC address addr, we only get frames 142 * over one of the slave interfaces. This would indicate an open network ring 143 * (i.e. a link has failed somewhere). 144 */ 145 void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN], 146 struct hsr_port *port) 147 { 148 struct sk_buff *skb; 149 void *msg_head; 150 struct hsr_port *master; 151 int res; 152 153 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 154 if (!skb) 155 goto fail; 156 157 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, 158 HSR_C_RING_ERROR); 159 if (!msg_head) 160 goto nla_put_failure; 161 162 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); 163 if (res < 0) 164 goto nla_put_failure; 165 166 res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex); 167 if (res < 0) 168 goto nla_put_failure; 169 170 genlmsg_end(skb, msg_head); 171 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); 172 173 return; 174 175 nla_put_failure: 176 kfree_skb(skb); 177 178 fail: 179 rcu_read_lock(); 180 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 181 netdev_warn(master->dev, "Could not send HSR ring error message\n"); 182 rcu_read_unlock(); 183 } 184 185 /* This is called when we haven't heard from the node with MAC address addr for 186 * some time (just before the node is removed from the node table/list). 187 */ 188 void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]) 189 { 190 struct sk_buff *skb; 191 void *msg_head; 192 struct hsr_port *master; 193 int res; 194 195 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 196 if (!skb) 197 goto fail; 198 199 msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN); 200 if (!msg_head) 201 goto nla_put_failure; 202 203 res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr); 204 if (res < 0) 205 goto nla_put_failure; 206 207 genlmsg_end(skb, msg_head); 208 genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC); 209 210 return; 211 212 nla_put_failure: 213 kfree_skb(skb); 214 215 fail: 216 rcu_read_lock(); 217 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); 218 netdev_warn(master->dev, "Could not send HSR node down\n"); 219 rcu_read_unlock(); 220 } 221 222 /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table 223 * about the status of a specific node in the network, defined by its MAC 224 * address. 225 * 226 * Input: hsr ifindex, node mac address 227 * Output: hsr ifindex, node mac address (copied from request), 228 * age of latest frame from node over slave 1, slave 2 [ms] 229 */ 230 static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info) 231 { 232 /* For receiving */ 233 struct nlattr *na; 234 struct net_device *hsr_dev; 235 236 /* For sending */ 237 struct sk_buff *skb_out; 238 void *msg_head; 239 struct hsr_priv *hsr; 240 struct hsr_port *port; 241 unsigned char hsr_node_addr_b[ETH_ALEN]; 242 int hsr_node_if1_age; 243 u16 hsr_node_if1_seq; 244 int hsr_node_if2_age; 245 u16 hsr_node_if2_seq; 246 int addr_b_ifindex; 247 int res; 248 249 if (!info) 250 goto invalid; 251 252 na = info->attrs[HSR_A_IFINDEX]; 253 if (!na) 254 goto invalid; 255 na = info->attrs[HSR_A_NODE_ADDR]; 256 if (!na) 257 goto invalid; 258 259 rcu_read_lock(); 260 hsr_dev = dev_get_by_index_rcu(genl_info_net(info), 261 nla_get_u32(info->attrs[HSR_A_IFINDEX])); 262 if (!hsr_dev) 263 goto rcu_unlock; 264 if (!is_hsr_master(hsr_dev)) 265 goto rcu_unlock; 266 267 /* Send reply */ 268 skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC); 269 if (!skb_out) { 270 res = -ENOMEM; 271 goto fail; 272 } 273 274 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, 275 info->snd_seq, &hsr_genl_family, 0, 276 HSR_C_SET_NODE_STATUS); 277 if (!msg_head) { 278 res = -ENOMEM; 279 goto nla_put_failure; 280 } 281 282 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); 283 if (res < 0) 284 goto nla_put_failure; 285 286 hsr = netdev_priv(hsr_dev); 287 res = hsr_get_node_data(hsr, 288 (unsigned char *) 289 nla_data(info->attrs[HSR_A_NODE_ADDR]), 290 hsr_node_addr_b, 291 &addr_b_ifindex, 292 &hsr_node_if1_age, 293 &hsr_node_if1_seq, 294 &hsr_node_if2_age, 295 &hsr_node_if2_seq); 296 if (res < 0) 297 goto nla_put_failure; 298 299 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, 300 nla_data(info->attrs[HSR_A_NODE_ADDR])); 301 if (res < 0) 302 goto nla_put_failure; 303 304 if (addr_b_ifindex > -1) { 305 res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN, 306 hsr_node_addr_b); 307 if (res < 0) 308 goto nla_put_failure; 309 310 res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX, 311 addr_b_ifindex); 312 if (res < 0) 313 goto nla_put_failure; 314 } 315 316 res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age); 317 if (res < 0) 318 goto nla_put_failure; 319 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq); 320 if (res < 0) 321 goto nla_put_failure; 322 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); 323 if (port) 324 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX, 325 port->dev->ifindex); 326 if (res < 0) 327 goto nla_put_failure; 328 329 res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age); 330 if (res < 0) 331 goto nla_put_failure; 332 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq); 333 if (res < 0) 334 goto nla_put_failure; 335 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 336 if (port) 337 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX, 338 port->dev->ifindex); 339 if (res < 0) 340 goto nla_put_failure; 341 342 rcu_read_unlock(); 343 344 genlmsg_end(skb_out, msg_head); 345 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); 346 347 return 0; 348 349 rcu_unlock: 350 rcu_read_unlock(); 351 invalid: 352 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); 353 return 0; 354 355 nla_put_failure: 356 kfree_skb(skb_out); 357 /* Fall through */ 358 359 fail: 360 rcu_read_unlock(); 361 return res; 362 } 363 364 /* Get a list of MacAddressA of all nodes known to this node (including self). 365 */ 366 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info) 367 { 368 unsigned char addr[ETH_ALEN]; 369 struct net_device *hsr_dev; 370 struct sk_buff *skb_out; 371 struct hsr_priv *hsr; 372 bool restart = false; 373 struct nlattr *na; 374 void *pos = NULL; 375 void *msg_head; 376 int res; 377 378 if (!info) 379 goto invalid; 380 381 na = info->attrs[HSR_A_IFINDEX]; 382 if (!na) 383 goto invalid; 384 385 rcu_read_lock(); 386 hsr_dev = dev_get_by_index_rcu(genl_info_net(info), 387 nla_get_u32(info->attrs[HSR_A_IFINDEX])); 388 if (!hsr_dev) 389 goto rcu_unlock; 390 if (!is_hsr_master(hsr_dev)) 391 goto rcu_unlock; 392 393 restart: 394 /* Send reply */ 395 skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); 396 if (!skb_out) { 397 res = -ENOMEM; 398 goto fail; 399 } 400 401 msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid, 402 info->snd_seq, &hsr_genl_family, 0, 403 HSR_C_SET_NODE_LIST); 404 if (!msg_head) { 405 res = -ENOMEM; 406 goto nla_put_failure; 407 } 408 409 if (!restart) { 410 res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex); 411 if (res < 0) 412 goto nla_put_failure; 413 } 414 415 hsr = netdev_priv(hsr_dev); 416 417 if (!pos) 418 pos = hsr_get_next_node(hsr, NULL, addr); 419 while (pos) { 420 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr); 421 if (res < 0) { 422 if (res == -EMSGSIZE) { 423 genlmsg_end(skb_out, msg_head); 424 genlmsg_unicast(genl_info_net(info), skb_out, 425 info->snd_portid); 426 restart = true; 427 goto restart; 428 } 429 goto nla_put_failure; 430 } 431 pos = hsr_get_next_node(hsr, pos, addr); 432 } 433 rcu_read_unlock(); 434 435 genlmsg_end(skb_out, msg_head); 436 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid); 437 438 return 0; 439 440 rcu_unlock: 441 rcu_read_unlock(); 442 invalid: 443 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL); 444 return 0; 445 446 nla_put_failure: 447 nlmsg_free(skb_out); 448 /* Fall through */ 449 450 fail: 451 rcu_read_unlock(); 452 return res; 453 } 454 455 static const struct genl_ops hsr_ops[] = { 456 { 457 .cmd = HSR_C_GET_NODE_STATUS, 458 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 459 .flags = 0, 460 .doit = hsr_get_node_status, 461 .dumpit = NULL, 462 }, 463 { 464 .cmd = HSR_C_GET_NODE_LIST, 465 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 466 .flags = 0, 467 .doit = hsr_get_node_list, 468 .dumpit = NULL, 469 }, 470 }; 471 472 static struct genl_family hsr_genl_family __ro_after_init = { 473 .hdrsize = 0, 474 .name = "HSR", 475 .version = 1, 476 .maxattr = HSR_A_MAX, 477 .policy = hsr_genl_policy, 478 .netnsok = true, 479 .module = THIS_MODULE, 480 .ops = hsr_ops, 481 .n_ops = ARRAY_SIZE(hsr_ops), 482 .mcgrps = hsr_mcgrps, 483 .n_mcgrps = ARRAY_SIZE(hsr_mcgrps), 484 }; 485 486 int __init hsr_netlink_init(void) 487 { 488 int rc; 489 490 rc = rtnl_link_register(&hsr_link_ops); 491 if (rc) 492 goto fail_rtnl_link_register; 493 494 rc = genl_register_family(&hsr_genl_family); 495 if (rc) 496 goto fail_genl_register_family; 497 498 hsr_debugfs_create_root(); 499 return 0; 500 501 fail_genl_register_family: 502 rtnl_link_unregister(&hsr_link_ops); 503 fail_rtnl_link_register: 504 505 return rc; 506 } 507 508 void __exit hsr_netlink_exit(void) 509 { 510 genl_unregister_family(&hsr_genl_family); 511 rtnl_link_unregister(&hsr_link_ops); 512 } 513 514 MODULE_ALIAS_RTNL_LINK("hsr"); 515