1 /* 2 * Copyright (c) 2008-2011, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Author: Lucy Liu <lucy.liu@intel.com> 18 */ 19 20 #include <linux/netdevice.h> 21 #include <linux/netlink.h> 22 #include <linux/slab.h> 23 #include <net/netlink.h> 24 #include <net/rtnetlink.h> 25 #include <linux/dcbnl.h> 26 #include <net/dcbevent.h> 27 #include <linux/rtnetlink.h> 28 #include <net/sock.h> 29 30 /** 31 * Data Center Bridging (DCB) is a collection of Ethernet enhancements 32 * intended to allow network traffic with differing requirements 33 * (highly reliable, no drops vs. best effort vs. low latency) to operate 34 * and co-exist on Ethernet. Current DCB features are: 35 * 36 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a 37 * framework for assigning bandwidth guarantees to traffic classes. 38 * 39 * Priority-based Flow Control (PFC) - provides a flow control mechanism which 40 * can work independently for each 802.1p priority. 41 * 42 * Congestion Notification - provides a mechanism for end-to-end congestion 43 * control for protocols which do not have built-in congestion management. 44 * 45 * More information about the emerging standards for these Ethernet features 46 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html 47 * 48 * This file implements an rtnetlink interface to allow configuration of DCB 49 * features for capable devices. 50 */ 51 52 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>"); 53 MODULE_DESCRIPTION("Data Center Bridging netlink interface"); 54 MODULE_LICENSE("GPL"); 55 56 /**************** DCB attribute policies *************************************/ 57 58 /* DCB netlink attributes policy */ 59 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 60 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 61 [DCB_ATTR_STATE] = {.type = NLA_U8}, 62 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 63 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, 64 [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, 65 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, 66 [DCB_ATTR_CAP] = {.type = NLA_NESTED}, 67 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, 68 [DCB_ATTR_BCN] = {.type = NLA_NESTED}, 69 [DCB_ATTR_APP] = {.type = NLA_NESTED}, 70 [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, 71 [DCB_ATTR_DCBX] = {.type = NLA_U8}, 72 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, 73 }; 74 75 /* DCB priority flow control to User Priority nested attributes */ 76 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 77 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 78 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 79 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 80 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, 81 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, 82 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, 83 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, 84 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, 85 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, 86 }; 87 88 /* DCB priority grouping nested attributes */ 89 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 90 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 91 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 92 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 93 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, 94 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, 95 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, 96 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, 97 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, 98 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, 99 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, 100 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, 101 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, 102 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, 103 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, 104 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, 105 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, 106 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, 107 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, 108 }; 109 110 /* DCB traffic class nested attributes. */ 111 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 112 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 113 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 114 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 115 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, 116 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, 117 }; 118 119 /* DCB capabilities nested attributes. */ 120 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 121 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 122 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 123 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 124 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, 125 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, 126 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, 127 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, 128 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, 129 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, 130 }; 131 132 /* DCB capabilities nested attributes. */ 133 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 134 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 135 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 136 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 137 }; 138 139 /* DCB BCN nested attributes. */ 140 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 141 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 142 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 143 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 144 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, 145 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, 146 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, 147 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, 148 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, 149 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, 150 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, 151 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, 152 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, 153 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, 154 [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, 155 [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, 156 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, 157 [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, 158 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, 159 [DCB_BCN_ATTR_W] = {.type = NLA_U32}, 160 [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, 161 [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, 162 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, 163 [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, 164 [DCB_BCN_ATTR_C] = {.type = NLA_U32}, 165 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, 166 }; 167 168 /* DCB APP nested attributes. */ 169 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 170 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 171 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 172 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 173 }; 174 175 /* IEEE 802.1Qaz nested attributes. */ 176 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { 177 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, 178 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, 179 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, 180 }; 181 182 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { 183 [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)}, 184 }; 185 186 /* DCB number of traffic classes nested attributes. */ 187 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { 188 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, 189 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, 190 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, 191 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, 192 }; 193 194 static LIST_HEAD(dcb_app_list); 195 static DEFINE_SPINLOCK(dcb_lock); 196 197 /* standard netlink reply call */ 198 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid, 199 u32 seq, u16 flags) 200 { 201 struct sk_buff *dcbnl_skb; 202 struct dcbmsg *dcb; 203 struct nlmsghdr *nlh; 204 int ret = -EINVAL; 205 206 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 207 if (!dcbnl_skb) 208 return ret; 209 210 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags); 211 212 dcb = NLMSG_DATA(nlh); 213 dcb->dcb_family = AF_UNSPEC; 214 dcb->cmd = cmd; 215 dcb->dcb_pad = 0; 216 217 ret = nla_put_u8(dcbnl_skb, attr, value); 218 if (ret) 219 goto err; 220 221 /* end the message, assign the nlmsg_len. */ 222 nlmsg_end(dcbnl_skb, nlh); 223 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 224 if (ret) 225 return -EINVAL; 226 227 return 0; 228 nlmsg_failure: 229 err: 230 kfree_skb(dcbnl_skb); 231 return ret; 232 } 233 234 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb, 235 u32 pid, u32 seq, u16 flags) 236 { 237 int ret = -EINVAL; 238 239 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ 240 if (!netdev->dcbnl_ops->getstate) 241 return ret; 242 243 ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB, 244 DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags); 245 246 return ret; 247 } 248 249 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb, 250 u32 pid, u32 seq, u16 flags) 251 { 252 struct sk_buff *dcbnl_skb; 253 struct nlmsghdr *nlh; 254 struct dcbmsg *dcb; 255 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; 256 u8 value; 257 int ret = -EINVAL; 258 int i; 259 int getall = 0; 260 261 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg) 262 return ret; 263 264 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 265 tb[DCB_ATTR_PFC_CFG], 266 dcbnl_pfc_up_nest); 267 if (ret) 268 goto err_out; 269 270 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 271 if (!dcbnl_skb) 272 goto err_out; 273 274 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 275 276 dcb = NLMSG_DATA(nlh); 277 dcb->dcb_family = AF_UNSPEC; 278 dcb->cmd = DCB_CMD_PFC_GCFG; 279 280 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG); 281 if (!nest) 282 goto err; 283 284 if (data[DCB_PFC_UP_ATTR_ALL]) 285 getall = 1; 286 287 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 288 if (!getall && !data[i]) 289 continue; 290 291 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, 292 &value); 293 ret = nla_put_u8(dcbnl_skb, i, value); 294 295 if (ret) { 296 nla_nest_cancel(dcbnl_skb, nest); 297 goto err; 298 } 299 } 300 nla_nest_end(dcbnl_skb, nest); 301 302 nlmsg_end(dcbnl_skb, nlh); 303 304 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 305 if (ret) 306 goto err_out; 307 308 return 0; 309 nlmsg_failure: 310 err: 311 kfree_skb(dcbnl_skb); 312 err_out: 313 return -EINVAL; 314 } 315 316 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb, 317 u32 pid, u32 seq, u16 flags) 318 { 319 struct sk_buff *dcbnl_skb; 320 struct nlmsghdr *nlh; 321 struct dcbmsg *dcb; 322 u8 perm_addr[MAX_ADDR_LEN]; 323 int ret = -EINVAL; 324 325 if (!netdev->dcbnl_ops->getpermhwaddr) 326 return ret; 327 328 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 329 if (!dcbnl_skb) 330 goto err_out; 331 332 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 333 334 dcb = NLMSG_DATA(nlh); 335 dcb->dcb_family = AF_UNSPEC; 336 dcb->cmd = DCB_CMD_GPERM_HWADDR; 337 338 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 339 340 ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), 341 perm_addr); 342 343 nlmsg_end(dcbnl_skb, nlh); 344 345 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 346 if (ret) 347 goto err_out; 348 349 return 0; 350 351 nlmsg_failure: 352 kfree_skb(dcbnl_skb); 353 err_out: 354 return -EINVAL; 355 } 356 357 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb, 358 u32 pid, u32 seq, u16 flags) 359 { 360 struct sk_buff *dcbnl_skb; 361 struct nlmsghdr *nlh; 362 struct dcbmsg *dcb; 363 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; 364 u8 value; 365 int ret = -EINVAL; 366 int i; 367 int getall = 0; 368 369 if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap) 370 return ret; 371 372 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], 373 dcbnl_cap_nest); 374 if (ret) 375 goto err_out; 376 377 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 378 if (!dcbnl_skb) 379 goto err_out; 380 381 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 382 383 dcb = NLMSG_DATA(nlh); 384 dcb->dcb_family = AF_UNSPEC; 385 dcb->cmd = DCB_CMD_GCAP; 386 387 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP); 388 if (!nest) 389 goto err; 390 391 if (data[DCB_CAP_ATTR_ALL]) 392 getall = 1; 393 394 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { 395 if (!getall && !data[i]) 396 continue; 397 398 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { 399 ret = nla_put_u8(dcbnl_skb, i, value); 400 401 if (ret) { 402 nla_nest_cancel(dcbnl_skb, nest); 403 goto err; 404 } 405 } 406 } 407 nla_nest_end(dcbnl_skb, nest); 408 409 nlmsg_end(dcbnl_skb, nlh); 410 411 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 412 if (ret) 413 goto err_out; 414 415 return 0; 416 nlmsg_failure: 417 err: 418 kfree_skb(dcbnl_skb); 419 err_out: 420 return -EINVAL; 421 } 422 423 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb, 424 u32 pid, u32 seq, u16 flags) 425 { 426 struct sk_buff *dcbnl_skb; 427 struct nlmsghdr *nlh; 428 struct dcbmsg *dcb; 429 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; 430 u8 value; 431 int ret = -EINVAL; 432 int i; 433 int getall = 0; 434 435 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs) 436 return ret; 437 438 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 439 dcbnl_numtcs_nest); 440 if (ret) { 441 ret = -EINVAL; 442 goto err_out; 443 } 444 445 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 446 if (!dcbnl_skb) { 447 ret = -EINVAL; 448 goto err_out; 449 } 450 451 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 452 453 dcb = NLMSG_DATA(nlh); 454 dcb->dcb_family = AF_UNSPEC; 455 dcb->cmd = DCB_CMD_GNUMTCS; 456 457 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS); 458 if (!nest) { 459 ret = -EINVAL; 460 goto err; 461 } 462 463 if (data[DCB_NUMTCS_ATTR_ALL]) 464 getall = 1; 465 466 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 467 if (!getall && !data[i]) 468 continue; 469 470 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); 471 if (!ret) { 472 ret = nla_put_u8(dcbnl_skb, i, value); 473 474 if (ret) { 475 nla_nest_cancel(dcbnl_skb, nest); 476 ret = -EINVAL; 477 goto err; 478 } 479 } else { 480 goto err; 481 } 482 } 483 nla_nest_end(dcbnl_skb, nest); 484 485 nlmsg_end(dcbnl_skb, nlh); 486 487 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 488 if (ret) { 489 ret = -EINVAL; 490 goto err_out; 491 } 492 493 return 0; 494 nlmsg_failure: 495 err: 496 kfree_skb(dcbnl_skb); 497 err_out: 498 return ret; 499 } 500 501 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb, 502 u32 pid, u32 seq, u16 flags) 503 { 504 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; 505 int ret = -EINVAL; 506 u8 value; 507 int i; 508 509 if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs) 510 return ret; 511 512 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 513 dcbnl_numtcs_nest); 514 515 if (ret) { 516 ret = -EINVAL; 517 goto err; 518 } 519 520 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 521 if (data[i] == NULL) 522 continue; 523 524 value = nla_get_u8(data[i]); 525 526 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); 527 528 if (ret) 529 goto operr; 530 } 531 532 operr: 533 ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS, 534 DCB_ATTR_NUMTCS, pid, seq, flags); 535 536 err: 537 return ret; 538 } 539 540 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb, 541 u32 pid, u32 seq, u16 flags) 542 { 543 int ret = -EINVAL; 544 545 if (!netdev->dcbnl_ops->getpfcstate) 546 return ret; 547 548 ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB, 549 DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE, 550 pid, seq, flags); 551 552 return ret; 553 } 554 555 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb, 556 u32 pid, u32 seq, u16 flags) 557 { 558 int ret = -EINVAL; 559 u8 value; 560 561 if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate) 562 return ret; 563 564 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); 565 566 netdev->dcbnl_ops->setpfcstate(netdev, value); 567 568 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE, 569 pid, seq, flags); 570 571 return ret; 572 } 573 574 static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, 575 u32 pid, u32 seq, u16 flags) 576 { 577 struct sk_buff *dcbnl_skb; 578 struct nlmsghdr *nlh; 579 struct dcbmsg *dcb; 580 struct nlattr *app_nest; 581 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 582 u16 id; 583 u8 up, idtype; 584 int ret = -EINVAL; 585 586 if (!tb[DCB_ATTR_APP]) 587 goto out; 588 589 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 590 dcbnl_app_nest); 591 if (ret) 592 goto out; 593 594 ret = -EINVAL; 595 /* all must be non-null */ 596 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 597 (!app_tb[DCB_APP_ATTR_ID])) 598 goto out; 599 600 /* either by eth type or by socket number */ 601 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 602 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 603 (idtype != DCB_APP_IDTYPE_PORTNUM)) 604 goto out; 605 606 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 607 608 if (netdev->dcbnl_ops->getapp) { 609 up = netdev->dcbnl_ops->getapp(netdev, idtype, id); 610 } else { 611 struct dcb_app app = { 612 .selector = idtype, 613 .protocol = id, 614 }; 615 up = dcb_getapp(netdev, &app); 616 } 617 618 /* send this back */ 619 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 620 if (!dcbnl_skb) 621 goto out; 622 623 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 624 dcb = NLMSG_DATA(nlh); 625 dcb->dcb_family = AF_UNSPEC; 626 dcb->cmd = DCB_CMD_GAPP; 627 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 629 if (!app_nest) 630 goto out_cancel; 631 632 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 633 if (ret) 634 goto out_cancel; 635 636 ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id); 637 if (ret) 638 goto out_cancel; 639 640 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up); 641 if (ret) 642 goto out_cancel; 643 644 nla_nest_end(dcbnl_skb, app_nest); 645 nlmsg_end(dcbnl_skb, nlh); 646 647 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 648 if (ret) 649 goto nlmsg_failure; 650 651 goto out; 652 653 out_cancel: 654 nla_nest_cancel(dcbnl_skb, app_nest); 655 nlmsg_failure: 656 kfree_skb(dcbnl_skb); 657 out: 658 return ret; 659 } 660 661 static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb, 662 u32 pid, u32 seq, u16 flags) 663 { 664 int err, ret = -EINVAL; 665 u16 id; 666 u8 up, idtype; 667 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 668 669 if (!tb[DCB_ATTR_APP]) 670 goto out; 671 672 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 673 dcbnl_app_nest); 674 if (ret) 675 goto out; 676 677 ret = -EINVAL; 678 /* all must be non-null */ 679 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 680 (!app_tb[DCB_APP_ATTR_ID]) || 681 (!app_tb[DCB_APP_ATTR_PRIORITY])) 682 goto out; 683 684 /* either by eth type or by socket number */ 685 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 686 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 687 (idtype != DCB_APP_IDTYPE_PORTNUM)) 688 goto out; 689 690 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 691 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 692 693 if (netdev->dcbnl_ops->setapp) { 694 err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); 695 } else { 696 struct dcb_app app; 697 app.selector = idtype; 698 app.protocol = id; 699 app.priority = up; 700 err = dcb_setapp(netdev, &app); 701 } 702 703 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, 704 pid, seq, flags); 705 out: 706 return ret; 707 } 708 709 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb, 710 u32 pid, u32 seq, u16 flags, int dir) 711 { 712 struct sk_buff *dcbnl_skb; 713 struct nlmsghdr *nlh; 714 struct dcbmsg *dcb; 715 struct nlattr *pg_nest, *param_nest, *data; 716 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 717 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 718 u8 prio, pgid, tc_pct, up_map; 719 int ret = -EINVAL; 720 int getall = 0; 721 int i; 722 723 if (!tb[DCB_ATTR_PG_CFG] || 724 !netdev->dcbnl_ops->getpgtccfgtx || 725 !netdev->dcbnl_ops->getpgtccfgrx || 726 !netdev->dcbnl_ops->getpgbwgcfgtx || 727 !netdev->dcbnl_ops->getpgbwgcfgrx) 728 return ret; 729 730 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 731 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 732 733 if (ret) 734 goto err_out; 735 736 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 737 if (!dcbnl_skb) 738 goto err_out; 739 740 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 741 742 dcb = NLMSG_DATA(nlh); 743 dcb->dcb_family = AF_UNSPEC; 744 dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG; 745 746 pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG); 747 if (!pg_nest) 748 goto err; 749 750 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 751 getall = 1; 752 753 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 754 if (!getall && !pg_tb[i]) 755 continue; 756 757 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 758 data = pg_tb[DCB_PG_ATTR_TC_ALL]; 759 else 760 data = pg_tb[i]; 761 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 762 data, dcbnl_tc_param_nest); 763 if (ret) 764 goto err_pg; 765 766 param_nest = nla_nest_start(dcbnl_skb, i); 767 if (!param_nest) 768 goto err_pg; 769 770 pgid = DCB_ATTR_VALUE_UNDEFINED; 771 prio = DCB_ATTR_VALUE_UNDEFINED; 772 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 773 up_map = DCB_ATTR_VALUE_UNDEFINED; 774 775 if (dir) { 776 /* Rx */ 777 netdev->dcbnl_ops->getpgtccfgrx(netdev, 778 i - DCB_PG_ATTR_TC_0, &prio, 779 &pgid, &tc_pct, &up_map); 780 } else { 781 /* Tx */ 782 netdev->dcbnl_ops->getpgtccfgtx(netdev, 783 i - DCB_PG_ATTR_TC_0, &prio, 784 &pgid, &tc_pct, &up_map); 785 } 786 787 if (param_tb[DCB_TC_ATTR_PARAM_PGID] || 788 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 789 ret = nla_put_u8(dcbnl_skb, 790 DCB_TC_ATTR_PARAM_PGID, pgid); 791 if (ret) 792 goto err_param; 793 } 794 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || 795 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 796 ret = nla_put_u8(dcbnl_skb, 797 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 798 if (ret) 799 goto err_param; 800 } 801 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || 802 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 803 ret = nla_put_u8(dcbnl_skb, 804 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 805 if (ret) 806 goto err_param; 807 } 808 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || 809 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 810 ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT, 811 tc_pct); 812 if (ret) 813 goto err_param; 814 } 815 nla_nest_end(dcbnl_skb, param_nest); 816 } 817 818 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) 819 getall = 1; 820 else 821 getall = 0; 822 823 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 824 if (!getall && !pg_tb[i]) 825 continue; 826 827 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 828 829 if (dir) { 830 /* Rx */ 831 netdev->dcbnl_ops->getpgbwgcfgrx(netdev, 832 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 833 } else { 834 /* Tx */ 835 netdev->dcbnl_ops->getpgbwgcfgtx(netdev, 836 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 837 } 838 ret = nla_put_u8(dcbnl_skb, i, tc_pct); 839 840 if (ret) 841 goto err_pg; 842 } 843 844 nla_nest_end(dcbnl_skb, pg_nest); 845 846 nlmsg_end(dcbnl_skb, nlh); 847 848 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 849 if (ret) 850 goto err_out; 851 852 return 0; 853 854 err_param: 855 nla_nest_cancel(dcbnl_skb, param_nest); 856 err_pg: 857 nla_nest_cancel(dcbnl_skb, pg_nest); 858 nlmsg_failure: 859 err: 860 kfree_skb(dcbnl_skb); 861 err_out: 862 ret = -EINVAL; 863 return ret; 864 } 865 866 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb, 867 u32 pid, u32 seq, u16 flags) 868 { 869 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0); 870 } 871 872 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb, 873 u32 pid, u32 seq, u16 flags) 874 { 875 return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1); 876 } 877 878 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb, 879 u32 pid, u32 seq, u16 flags) 880 { 881 int ret = -EINVAL; 882 u8 value; 883 884 if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate) 885 return ret; 886 887 value = nla_get_u8(tb[DCB_ATTR_STATE]); 888 889 ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value), 890 RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE, 891 pid, seq, flags); 892 893 return ret; 894 } 895 896 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb, 897 u32 pid, u32 seq, u16 flags) 898 { 899 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; 900 int i; 901 int ret = -EINVAL; 902 u8 value; 903 904 if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg) 905 return ret; 906 907 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 908 tb[DCB_ATTR_PFC_CFG], 909 dcbnl_pfc_up_nest); 910 if (ret) 911 goto err; 912 913 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 914 if (data[i] == NULL) 915 continue; 916 value = nla_get_u8(data[i]); 917 netdev->dcbnl_ops->setpfccfg(netdev, 918 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); 919 } 920 921 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG, 922 pid, seq, flags); 923 err: 924 return ret; 925 } 926 927 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb, 928 u32 pid, u32 seq, u16 flags) 929 { 930 int ret = -EINVAL; 931 932 if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall) 933 return ret; 934 935 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, 936 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); 937 938 return ret; 939 } 940 941 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb, 942 u32 pid, u32 seq, u16 flags, int dir) 943 { 944 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 945 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 946 int ret = -EINVAL; 947 int i; 948 u8 pgid; 949 u8 up_map; 950 u8 prio; 951 u8 tc_pct; 952 953 if (!tb[DCB_ATTR_PG_CFG] || 954 !netdev->dcbnl_ops->setpgtccfgtx || 955 !netdev->dcbnl_ops->setpgtccfgrx || 956 !netdev->dcbnl_ops->setpgbwgcfgtx || 957 !netdev->dcbnl_ops->setpgbwgcfgrx) 958 return ret; 959 960 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, 961 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest); 962 if (ret) 963 goto err; 964 965 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 966 if (!pg_tb[i]) 967 continue; 968 969 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 970 pg_tb[i], dcbnl_tc_param_nest); 971 if (ret) 972 goto err; 973 974 pgid = DCB_ATTR_VALUE_UNDEFINED; 975 prio = DCB_ATTR_VALUE_UNDEFINED; 976 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 977 up_map = DCB_ATTR_VALUE_UNDEFINED; 978 979 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) 980 prio = 981 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); 982 983 if (param_tb[DCB_TC_ATTR_PARAM_PGID]) 984 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); 985 986 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) 987 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); 988 989 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) 990 up_map = 991 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); 992 993 /* dir: Tx = 0, Rx = 1 */ 994 if (dir) { 995 /* Rx */ 996 netdev->dcbnl_ops->setpgtccfgrx(netdev, 997 i - DCB_PG_ATTR_TC_0, 998 prio, pgid, tc_pct, up_map); 999 } else { 1000 /* Tx */ 1001 netdev->dcbnl_ops->setpgtccfgtx(netdev, 1002 i - DCB_PG_ATTR_TC_0, 1003 prio, pgid, tc_pct, up_map); 1004 } 1005 } 1006 1007 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 1008 if (!pg_tb[i]) 1009 continue; 1010 1011 tc_pct = nla_get_u8(pg_tb[i]); 1012 1013 /* dir: Tx = 0, Rx = 1 */ 1014 if (dir) { 1015 /* Rx */ 1016 netdev->dcbnl_ops->setpgbwgcfgrx(netdev, 1017 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 1018 } else { 1019 /* Tx */ 1020 netdev->dcbnl_ops->setpgbwgcfgtx(netdev, 1021 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 1022 } 1023 } 1024 1025 ret = dcbnl_reply(0, RTM_SETDCB, 1026 (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG), 1027 DCB_ATTR_PG_CFG, pid, seq, flags); 1028 1029 err: 1030 return ret; 1031 } 1032 1033 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb, 1034 u32 pid, u32 seq, u16 flags) 1035 { 1036 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0); 1037 } 1038 1039 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb, 1040 u32 pid, u32 seq, u16 flags) 1041 { 1042 return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1); 1043 } 1044 1045 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb, 1046 u32 pid, u32 seq, u16 flags) 1047 { 1048 struct sk_buff *dcbnl_skb; 1049 struct nlmsghdr *nlh; 1050 struct dcbmsg *dcb; 1051 struct nlattr *bcn_nest; 1052 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; 1053 u8 value_byte; 1054 u32 value_integer; 1055 int ret = -EINVAL; 1056 bool getall = false; 1057 int i; 1058 1059 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp || 1060 !netdev->dcbnl_ops->getbcncfg) 1061 return ret; 1062 1063 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, 1064 tb[DCB_ATTR_BCN], dcbnl_bcn_nest); 1065 1066 if (ret) 1067 goto err_out; 1068 1069 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1070 if (!dcbnl_skb) 1071 goto err_out; 1072 1073 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 1074 1075 dcb = NLMSG_DATA(nlh); 1076 dcb->dcb_family = AF_UNSPEC; 1077 dcb->cmd = DCB_CMD_BCN_GCFG; 1078 1079 bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN); 1080 if (!bcn_nest) 1081 goto err; 1082 1083 if (bcn_tb[DCB_BCN_ATTR_ALL]) 1084 getall = true; 1085 1086 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 1087 if (!getall && !bcn_tb[i]) 1088 continue; 1089 1090 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, 1091 &value_byte); 1092 ret = nla_put_u8(dcbnl_skb, i, value_byte); 1093 if (ret) 1094 goto err_bcn; 1095 } 1096 1097 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 1098 if (!getall && !bcn_tb[i]) 1099 continue; 1100 1101 netdev->dcbnl_ops->getbcncfg(netdev, i, 1102 &value_integer); 1103 ret = nla_put_u32(dcbnl_skb, i, value_integer); 1104 if (ret) 1105 goto err_bcn; 1106 } 1107 1108 nla_nest_end(dcbnl_skb, bcn_nest); 1109 1110 nlmsg_end(dcbnl_skb, nlh); 1111 1112 ret = rtnl_unicast(dcbnl_skb, &init_net, pid); 1113 if (ret) 1114 goto err_out; 1115 1116 return 0; 1117 1118 err_bcn: 1119 nla_nest_cancel(dcbnl_skb, bcn_nest); 1120 nlmsg_failure: 1121 err: 1122 kfree_skb(dcbnl_skb); 1123 err_out: 1124 ret = -EINVAL; 1125 return ret; 1126 } 1127 1128 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb, 1129 u32 pid, u32 seq, u16 flags) 1130 { 1131 struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; 1132 int i; 1133 int ret = -EINVAL; 1134 u8 value_byte; 1135 u32 value_int; 1136 1137 if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg || 1138 !netdev->dcbnl_ops->setbcnrp) 1139 return ret; 1140 1141 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, 1142 tb[DCB_ATTR_BCN], 1143 dcbnl_pfc_up_nest); 1144 if (ret) 1145 goto err; 1146 1147 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 1148 if (data[i] == NULL) 1149 continue; 1150 value_byte = nla_get_u8(data[i]); 1151 netdev->dcbnl_ops->setbcnrp(netdev, 1152 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); 1153 } 1154 1155 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 1156 if (data[i] == NULL) 1157 continue; 1158 value_int = nla_get_u32(data[i]); 1159 netdev->dcbnl_ops->setbcncfg(netdev, 1160 i, value_int); 1161 } 1162 1163 ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN, 1164 pid, seq, flags); 1165 err: 1166 return ret; 1167 } 1168 1169 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 1170 int app_nested_type, int app_info_type, 1171 int app_entry_type) 1172 { 1173 struct dcb_peer_app_info info; 1174 struct dcb_app *table = NULL; 1175 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1176 u16 app_count; 1177 int err; 1178 1179 1180 /** 1181 * retrieve the peer app configuration form the driver. If the driver 1182 * handlers fail exit without doing anything 1183 */ 1184 err = ops->peer_getappinfo(netdev, &info, &app_count); 1185 if (!err && app_count) { 1186 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL); 1187 if (!table) 1188 return -ENOMEM; 1189 1190 err = ops->peer_getapptable(netdev, table); 1191 } 1192 1193 if (!err) { 1194 u16 i; 1195 struct nlattr *app; 1196 1197 /** 1198 * build the message, from here on the only possible failure 1199 * is due to the skb size 1200 */ 1201 err = -EMSGSIZE; 1202 1203 app = nla_nest_start(skb, app_nested_type); 1204 if (!app) 1205 goto nla_put_failure; 1206 1207 if (app_info_type) 1208 NLA_PUT(skb, app_info_type, sizeof(info), &info); 1209 1210 for (i = 0; i < app_count; i++) 1211 NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app), 1212 &table[i]); 1213 1214 nla_nest_end(skb, app); 1215 } 1216 err = 0; 1217 1218 nla_put_failure: 1219 kfree(table); 1220 return err; 1221 } 1222 1223 /* Handle IEEE 802.1Qaz GET commands. */ 1224 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) 1225 { 1226 struct nlattr *ieee, *app; 1227 struct dcb_app_type *itr; 1228 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1229 int dcbx; 1230 int err = -EMSGSIZE; 1231 1232 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1233 1234 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1235 if (!ieee) 1236 goto nla_put_failure; 1237 1238 if (ops->ieee_getets) { 1239 struct ieee_ets ets; 1240 err = ops->ieee_getets(netdev, &ets); 1241 if (!err) 1242 NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets); 1243 } 1244 1245 if (ops->ieee_getpfc) { 1246 struct ieee_pfc pfc; 1247 err = ops->ieee_getpfc(netdev, &pfc); 1248 if (!err) 1249 NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc); 1250 } 1251 1252 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1253 if (!app) 1254 goto nla_put_failure; 1255 1256 spin_lock(&dcb_lock); 1257 list_for_each_entry(itr, &dcb_app_list, list) { 1258 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) { 1259 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1260 &itr->app); 1261 if (err) { 1262 spin_unlock(&dcb_lock); 1263 goto nla_put_failure; 1264 } 1265 } 1266 } 1267 1268 if (netdev->dcbnl_ops->getdcbx) 1269 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1270 else 1271 dcbx = -EOPNOTSUPP; 1272 1273 spin_unlock(&dcb_lock); 1274 nla_nest_end(skb, app); 1275 1276 /* get peer info if available */ 1277 if (ops->ieee_peer_getets) { 1278 struct ieee_ets ets; 1279 err = ops->ieee_peer_getets(netdev, &ets); 1280 if (!err) 1281 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets); 1282 } 1283 1284 if (ops->ieee_peer_getpfc) { 1285 struct ieee_pfc pfc; 1286 err = ops->ieee_peer_getpfc(netdev, &pfc); 1287 if (!err) 1288 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc); 1289 } 1290 1291 if (ops->peer_getappinfo && ops->peer_getapptable) { 1292 err = dcbnl_build_peer_app(netdev, skb, 1293 DCB_ATTR_IEEE_PEER_APP, 1294 DCB_ATTR_IEEE_APP_UNSPEC, 1295 DCB_ATTR_IEEE_APP); 1296 if (err) 1297 goto nla_put_failure; 1298 } 1299 1300 nla_nest_end(skb, ieee); 1301 if (dcbx >= 0) { 1302 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1303 if (err) 1304 goto nla_put_failure; 1305 } 1306 1307 return 0; 1308 1309 nla_put_failure: 1310 return err; 1311 } 1312 1313 int dcbnl_notify(struct net_device *dev, int event, int cmd, 1314 u32 seq, u32 pid) 1315 { 1316 struct net *net = dev_net(dev); 1317 struct sk_buff *skb; 1318 struct nlmsghdr *nlh; 1319 struct dcbmsg *dcb; 1320 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1321 int err; 1322 1323 if (!ops) 1324 return -EOPNOTSUPP; 1325 1326 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1327 if (!skb) 1328 return -ENOBUFS; 1329 1330 nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0); 1331 if (nlh == NULL) { 1332 kfree(skb); 1333 return -EMSGSIZE; 1334 } 1335 1336 dcb = NLMSG_DATA(nlh); 1337 dcb->dcb_family = AF_UNSPEC; 1338 dcb->cmd = cmd; 1339 1340 err = dcbnl_ieee_fill(skb, dev); 1341 if (err < 0) { 1342 /* Report error to broadcast listeners */ 1343 nlmsg_cancel(skb, nlh); 1344 kfree_skb(skb); 1345 rtnl_set_sk_err(net, RTNLGRP_DCB, err); 1346 } else { 1347 /* End nlmsg and notify broadcast listeners */ 1348 nlmsg_end(skb, nlh); 1349 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); 1350 } 1351 1352 return err; 1353 } 1354 EXPORT_SYMBOL(dcbnl_notify); 1355 1356 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not 1357 * be completed the entire msg is aborted and error value is returned. 1358 * No attempt is made to reconcile the case where only part of the 1359 * cmd can be completed. 1360 */ 1361 static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, 1362 u32 pid, u32 seq, u16 flags) 1363 { 1364 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1365 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1366 int err = -EOPNOTSUPP; 1367 1368 if (!ops) 1369 return err; 1370 1371 if (!tb[DCB_ATTR_IEEE]) 1372 return -EINVAL; 1373 1374 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, 1375 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); 1376 if (err) 1377 return err; 1378 1379 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { 1380 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); 1381 err = ops->ieee_setets(netdev, ets); 1382 if (err) 1383 goto err; 1384 } 1385 1386 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1387 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1388 err = ops->ieee_setpfc(netdev, pfc); 1389 if (err) 1390 goto err; 1391 } 1392 1393 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1394 struct nlattr *attr; 1395 int rem; 1396 1397 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1398 struct dcb_app *app_data; 1399 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1400 continue; 1401 app_data = nla_data(attr); 1402 if (ops->ieee_setapp) 1403 err = ops->ieee_setapp(netdev, app_data); 1404 else 1405 err = dcb_ieee_setapp(netdev, app_data); 1406 if (err) 1407 goto err; 1408 } 1409 } 1410 1411 err: 1412 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE, 1413 pid, seq, flags); 1414 dcbnl_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1415 return err; 1416 } 1417 1418 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb, 1419 u32 pid, u32 seq, u16 flags) 1420 { 1421 struct net *net = dev_net(netdev); 1422 struct sk_buff *skb; 1423 struct nlmsghdr *nlh; 1424 struct dcbmsg *dcb; 1425 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1426 int err; 1427 1428 if (!ops) 1429 return -EOPNOTSUPP; 1430 1431 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1432 if (!skb) 1433 return -ENOBUFS; 1434 1435 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 1436 if (nlh == NULL) { 1437 kfree(skb); 1438 return -EMSGSIZE; 1439 } 1440 1441 dcb = NLMSG_DATA(nlh); 1442 dcb->dcb_family = AF_UNSPEC; 1443 dcb->cmd = DCB_CMD_IEEE_GET; 1444 1445 err = dcbnl_ieee_fill(skb, netdev); 1446 1447 if (err < 0) { 1448 nlmsg_cancel(skb, nlh); 1449 kfree_skb(skb); 1450 } else { 1451 nlmsg_end(skb, nlh); 1452 err = rtnl_unicast(skb, net, pid); 1453 } 1454 1455 return err; 1456 } 1457 1458 static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb, 1459 u32 pid, u32 seq, u16 flags) 1460 { 1461 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1462 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1463 int err = -EOPNOTSUPP; 1464 1465 if (!ops) 1466 return -EOPNOTSUPP; 1467 1468 if (!tb[DCB_ATTR_IEEE]) 1469 return -EINVAL; 1470 1471 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, 1472 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy); 1473 if (err) 1474 return err; 1475 1476 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1477 struct nlattr *attr; 1478 int rem; 1479 1480 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1481 struct dcb_app *app_data; 1482 1483 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1484 continue; 1485 app_data = nla_data(attr); 1486 if (ops->ieee_delapp) 1487 err = ops->ieee_delapp(netdev, app_data); 1488 else 1489 err = dcb_ieee_delapp(netdev, app_data); 1490 if (err) 1491 goto err; 1492 } 1493 } 1494 1495 err: 1496 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE, 1497 pid, seq, flags); 1498 dcbnl_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1499 return err; 1500 } 1501 1502 1503 /* DCBX configuration */ 1504 static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb, 1505 u32 pid, u32 seq, u16 flags) 1506 { 1507 int ret; 1508 1509 if (!netdev->dcbnl_ops->getdcbx) 1510 return -EOPNOTSUPP; 1511 1512 ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB, 1513 DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags); 1514 1515 return ret; 1516 } 1517 1518 static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb, 1519 u32 pid, u32 seq, u16 flags) 1520 { 1521 int ret; 1522 u8 value; 1523 1524 if (!netdev->dcbnl_ops->setdcbx) 1525 return -EOPNOTSUPP; 1526 1527 if (!tb[DCB_ATTR_DCBX]) 1528 return -EINVAL; 1529 1530 value = nla_get_u8(tb[DCB_ATTR_DCBX]); 1531 1532 ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value), 1533 RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX, 1534 pid, seq, flags); 1535 1536 return ret; 1537 } 1538 1539 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb, 1540 u32 pid, u32 seq, u16 flags) 1541 { 1542 struct sk_buff *dcbnl_skb; 1543 struct nlmsghdr *nlh; 1544 struct dcbmsg *dcb; 1545 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; 1546 u8 value; 1547 int ret, i; 1548 int getall = 0; 1549 1550 if (!netdev->dcbnl_ops->getfeatcfg) 1551 return -EOPNOTSUPP; 1552 1553 if (!tb[DCB_ATTR_FEATCFG]) 1554 return -EINVAL; 1555 1556 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], 1557 dcbnl_featcfg_nest); 1558 if (ret) 1559 goto err_out; 1560 1561 dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1562 if (!dcbnl_skb) { 1563 ret = -ENOBUFS; 1564 goto err_out; 1565 } 1566 1567 nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 1568 1569 dcb = NLMSG_DATA(nlh); 1570 dcb->dcb_family = AF_UNSPEC; 1571 dcb->cmd = DCB_CMD_GFEATCFG; 1572 1573 nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG); 1574 if (!nest) { 1575 ret = -EMSGSIZE; 1576 goto nla_put_failure; 1577 } 1578 1579 if (data[DCB_FEATCFG_ATTR_ALL]) 1580 getall = 1; 1581 1582 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1583 if (!getall && !data[i]) 1584 continue; 1585 1586 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); 1587 if (!ret) 1588 ret = nla_put_u8(dcbnl_skb, i, value); 1589 1590 if (ret) { 1591 nla_nest_cancel(dcbnl_skb, nest); 1592 goto nla_put_failure; 1593 } 1594 } 1595 nla_nest_end(dcbnl_skb, nest); 1596 1597 nlmsg_end(dcbnl_skb, nlh); 1598 1599 return rtnl_unicast(dcbnl_skb, &init_net, pid); 1600 nla_put_failure: 1601 nlmsg_cancel(dcbnl_skb, nlh); 1602 nlmsg_failure: 1603 kfree_skb(dcbnl_skb); 1604 err_out: 1605 return ret; 1606 } 1607 1608 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb, 1609 u32 pid, u32 seq, u16 flags) 1610 { 1611 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; 1612 int ret, i; 1613 u8 value; 1614 1615 if (!netdev->dcbnl_ops->setfeatcfg) 1616 return -ENOTSUPP; 1617 1618 if (!tb[DCB_ATTR_FEATCFG]) 1619 return -EINVAL; 1620 1621 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], 1622 dcbnl_featcfg_nest); 1623 1624 if (ret) 1625 goto err; 1626 1627 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1628 if (data[i] == NULL) 1629 continue; 1630 1631 value = nla_get_u8(data[i]); 1632 1633 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); 1634 1635 if (ret) 1636 goto err; 1637 } 1638 err: 1639 dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG, 1640 pid, seq, flags); 1641 1642 return ret; 1643 } 1644 1645 /* Handle CEE DCBX GET commands. */ 1646 static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, 1647 u32 pid, u32 seq, u16 flags) 1648 { 1649 struct sk_buff *skb; 1650 struct nlmsghdr *nlh; 1651 struct dcbmsg *dcb; 1652 struct nlattr *cee; 1653 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1654 int err; 1655 1656 if (!ops) 1657 return -EOPNOTSUPP; 1658 1659 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1660 if (!skb) 1661 return -ENOBUFS; 1662 1663 nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 1664 1665 dcb = NLMSG_DATA(nlh); 1666 dcb->dcb_family = AF_UNSPEC; 1667 dcb->cmd = DCB_CMD_CEE_GET; 1668 1669 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1670 1671 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1672 if (!cee) 1673 goto nla_put_failure; 1674 1675 /* get peer info if available */ 1676 if (ops->cee_peer_getpg) { 1677 struct cee_pg pg; 1678 err = ops->cee_peer_getpg(netdev, &pg); 1679 if (!err) 1680 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); 1681 } 1682 1683 if (ops->cee_peer_getpfc) { 1684 struct cee_pfc pfc; 1685 err = ops->cee_peer_getpfc(netdev, &pfc); 1686 if (!err) 1687 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); 1688 } 1689 1690 if (ops->peer_getappinfo && ops->peer_getapptable) { 1691 err = dcbnl_build_peer_app(netdev, skb, 1692 DCB_ATTR_CEE_PEER_APP_TABLE, 1693 DCB_ATTR_CEE_PEER_APP_INFO, 1694 DCB_ATTR_CEE_PEER_APP); 1695 if (err) 1696 goto nla_put_failure; 1697 } 1698 1699 nla_nest_end(skb, cee); 1700 nlmsg_end(skb, nlh); 1701 1702 return rtnl_unicast(skb, &init_net, pid); 1703 nla_put_failure: 1704 nlmsg_cancel(skb, nlh); 1705 nlmsg_failure: 1706 kfree_skb(skb); 1707 return -1; 1708 } 1709 1710 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1711 { 1712 struct net *net = sock_net(skb->sk); 1713 struct net_device *netdev; 1714 struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh); 1715 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1716 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 1717 int ret = -EINVAL; 1718 1719 if (!net_eq(net, &init_net)) 1720 return -EINVAL; 1721 1722 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, 1723 dcbnl_rtnl_policy); 1724 if (ret < 0) 1725 return ret; 1726 1727 if (!tb[DCB_ATTR_IFNAME]) 1728 return -EINVAL; 1729 1730 netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME])); 1731 if (!netdev) 1732 return -EINVAL; 1733 1734 if (!netdev->dcbnl_ops) 1735 goto errout; 1736 1737 switch (dcb->cmd) { 1738 case DCB_CMD_GSTATE: 1739 ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq, 1740 nlh->nlmsg_flags); 1741 goto out; 1742 case DCB_CMD_PFC_GCFG: 1743 ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq, 1744 nlh->nlmsg_flags); 1745 goto out; 1746 case DCB_CMD_GPERM_HWADDR: 1747 ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq, 1748 nlh->nlmsg_flags); 1749 goto out; 1750 case DCB_CMD_PGTX_GCFG: 1751 ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq, 1752 nlh->nlmsg_flags); 1753 goto out; 1754 case DCB_CMD_PGRX_GCFG: 1755 ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq, 1756 nlh->nlmsg_flags); 1757 goto out; 1758 case DCB_CMD_BCN_GCFG: 1759 ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq, 1760 nlh->nlmsg_flags); 1761 goto out; 1762 case DCB_CMD_SSTATE: 1763 ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq, 1764 nlh->nlmsg_flags); 1765 goto out; 1766 case DCB_CMD_PFC_SCFG: 1767 ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq, 1768 nlh->nlmsg_flags); 1769 goto out; 1770 1771 case DCB_CMD_SET_ALL: 1772 ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq, 1773 nlh->nlmsg_flags); 1774 goto out; 1775 case DCB_CMD_PGTX_SCFG: 1776 ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq, 1777 nlh->nlmsg_flags); 1778 goto out; 1779 case DCB_CMD_PGRX_SCFG: 1780 ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq, 1781 nlh->nlmsg_flags); 1782 goto out; 1783 case DCB_CMD_GCAP: 1784 ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq, 1785 nlh->nlmsg_flags); 1786 goto out; 1787 case DCB_CMD_GNUMTCS: 1788 ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq, 1789 nlh->nlmsg_flags); 1790 goto out; 1791 case DCB_CMD_SNUMTCS: 1792 ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq, 1793 nlh->nlmsg_flags); 1794 goto out; 1795 case DCB_CMD_PFC_GSTATE: 1796 ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq, 1797 nlh->nlmsg_flags); 1798 goto out; 1799 case DCB_CMD_PFC_SSTATE: 1800 ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq, 1801 nlh->nlmsg_flags); 1802 goto out; 1803 case DCB_CMD_BCN_SCFG: 1804 ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq, 1805 nlh->nlmsg_flags); 1806 goto out; 1807 case DCB_CMD_GAPP: 1808 ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq, 1809 nlh->nlmsg_flags); 1810 goto out; 1811 case DCB_CMD_SAPP: 1812 ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq, 1813 nlh->nlmsg_flags); 1814 goto out; 1815 case DCB_CMD_IEEE_SET: 1816 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq, 1817 nlh->nlmsg_flags); 1818 goto out; 1819 case DCB_CMD_IEEE_GET: 1820 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq, 1821 nlh->nlmsg_flags); 1822 goto out; 1823 case DCB_CMD_IEEE_DEL: 1824 ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq, 1825 nlh->nlmsg_flags); 1826 goto out; 1827 case DCB_CMD_GDCBX: 1828 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq, 1829 nlh->nlmsg_flags); 1830 goto out; 1831 case DCB_CMD_SDCBX: 1832 ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq, 1833 nlh->nlmsg_flags); 1834 goto out; 1835 case DCB_CMD_GFEATCFG: 1836 ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq, 1837 nlh->nlmsg_flags); 1838 goto out; 1839 case DCB_CMD_SFEATCFG: 1840 ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq, 1841 nlh->nlmsg_flags); 1842 goto out; 1843 case DCB_CMD_CEE_GET: 1844 ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq, 1845 nlh->nlmsg_flags); 1846 goto out; 1847 default: 1848 goto errout; 1849 } 1850 errout: 1851 ret = -EINVAL; 1852 out: 1853 dev_put(netdev); 1854 return ret; 1855 } 1856 1857 /** 1858 * dcb_getapp - retrieve the DCBX application user priority 1859 * 1860 * On success returns a non-zero 802.1p user priority bitmap 1861 * otherwise returns 0 as the invalid user priority bitmap to 1862 * indicate an error. 1863 */ 1864 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) 1865 { 1866 struct dcb_app_type *itr; 1867 u8 prio = 0; 1868 1869 spin_lock(&dcb_lock); 1870 list_for_each_entry(itr, &dcb_app_list, list) { 1871 if (itr->app.selector == app->selector && 1872 itr->app.protocol == app->protocol && 1873 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 1874 prio = itr->app.priority; 1875 break; 1876 } 1877 } 1878 spin_unlock(&dcb_lock); 1879 1880 return prio; 1881 } 1882 EXPORT_SYMBOL(dcb_getapp); 1883 1884 /** 1885 * dcb_setapp - add CEE dcb application data to app list 1886 * 1887 * Priority 0 is an invalid priority in CEE spec. This routine 1888 * removes applications from the app list if the priority is 1889 * set to zero. 1890 */ 1891 int dcb_setapp(struct net_device *dev, struct dcb_app *new) 1892 { 1893 struct dcb_app_type *itr; 1894 struct dcb_app_type event; 1895 1896 memcpy(&event.name, dev->name, sizeof(event.name)); 1897 memcpy(&event.app, new, sizeof(event.app)); 1898 1899 spin_lock(&dcb_lock); 1900 /* Search for existing match and replace */ 1901 list_for_each_entry(itr, &dcb_app_list, list) { 1902 if (itr->app.selector == new->selector && 1903 itr->app.protocol == new->protocol && 1904 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 1905 if (new->priority) 1906 itr->app.priority = new->priority; 1907 else { 1908 list_del(&itr->list); 1909 kfree(itr); 1910 } 1911 goto out; 1912 } 1913 } 1914 /* App type does not exist add new application type */ 1915 if (new->priority) { 1916 struct dcb_app_type *entry; 1917 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC); 1918 if (!entry) { 1919 spin_unlock(&dcb_lock); 1920 return -ENOMEM; 1921 } 1922 1923 memcpy(&entry->app, new, sizeof(*new)); 1924 strncpy(entry->name, dev->name, IFNAMSIZ); 1925 list_add(&entry->list, &dcb_app_list); 1926 } 1927 out: 1928 spin_unlock(&dcb_lock); 1929 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1930 return 0; 1931 } 1932 EXPORT_SYMBOL(dcb_setapp); 1933 1934 /** 1935 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority 1936 * 1937 * Helper routine which on success returns a non-zero 802.1Qaz user 1938 * priority bitmap otherwise returns 0 to indicate the dcb_app was 1939 * not found in APP list. 1940 */ 1941 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) 1942 { 1943 struct dcb_app_type *itr; 1944 u8 prio = 0; 1945 1946 spin_lock(&dcb_lock); 1947 list_for_each_entry(itr, &dcb_app_list, list) { 1948 if (itr->app.selector == app->selector && 1949 itr->app.protocol == app->protocol && 1950 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 1951 prio |= 1 << itr->app.priority; 1952 } 1953 } 1954 spin_unlock(&dcb_lock); 1955 1956 return prio; 1957 } 1958 EXPORT_SYMBOL(dcb_ieee_getapp_mask); 1959 1960 /** 1961 * dcb_ieee_setapp - add IEEE dcb application data to app list 1962 * 1963 * This adds Application data to the list. Multiple application 1964 * entries may exists for the same selector and protocol as long 1965 * as the priorities are different. 1966 */ 1967 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) 1968 { 1969 struct dcb_app_type *itr, *entry; 1970 struct dcb_app_type event; 1971 int err = 0; 1972 1973 memcpy(&event.name, dev->name, sizeof(event.name)); 1974 memcpy(&event.app, new, sizeof(event.app)); 1975 1976 spin_lock(&dcb_lock); 1977 /* Search for existing match and abort if found */ 1978 list_for_each_entry(itr, &dcb_app_list, list) { 1979 if (itr->app.selector == new->selector && 1980 itr->app.protocol == new->protocol && 1981 itr->app.priority == new->priority && 1982 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 1983 err = -EEXIST; 1984 goto out; 1985 } 1986 } 1987 1988 /* App entry does not exist add new entry */ 1989 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC); 1990 if (!entry) { 1991 err = -ENOMEM; 1992 goto out; 1993 } 1994 1995 memcpy(&entry->app, new, sizeof(*new)); 1996 strncpy(entry->name, dev->name, IFNAMSIZ); 1997 list_add(&entry->list, &dcb_app_list); 1998 out: 1999 spin_unlock(&dcb_lock); 2000 if (!err) 2001 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 2002 return err; 2003 } 2004 EXPORT_SYMBOL(dcb_ieee_setapp); 2005 2006 /** 2007 * dcb_ieee_delapp - delete IEEE dcb application data from list 2008 * 2009 * This removes a matching APP data from the APP list 2010 */ 2011 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) 2012 { 2013 struct dcb_app_type *itr; 2014 struct dcb_app_type event; 2015 int err = -ENOENT; 2016 2017 memcpy(&event.name, dev->name, sizeof(event.name)); 2018 memcpy(&event.app, del, sizeof(event.app)); 2019 2020 spin_lock(&dcb_lock); 2021 /* Search for existing match and remove it. */ 2022 list_for_each_entry(itr, &dcb_app_list, list) { 2023 if (itr->app.selector == del->selector && 2024 itr->app.protocol == del->protocol && 2025 itr->app.priority == del->priority && 2026 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) { 2027 list_del(&itr->list); 2028 kfree(itr); 2029 err = 0; 2030 goto out; 2031 } 2032 } 2033 2034 out: 2035 spin_unlock(&dcb_lock); 2036 if (!err) 2037 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 2038 return err; 2039 } 2040 EXPORT_SYMBOL(dcb_ieee_delapp); 2041 2042 static void dcb_flushapp(void) 2043 { 2044 struct dcb_app_type *app; 2045 struct dcb_app_type *tmp; 2046 2047 spin_lock(&dcb_lock); 2048 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { 2049 list_del(&app->list); 2050 kfree(app); 2051 } 2052 spin_unlock(&dcb_lock); 2053 } 2054 2055 static int __init dcbnl_init(void) 2056 { 2057 INIT_LIST_HEAD(&dcb_app_list); 2058 2059 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); 2060 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); 2061 2062 return 0; 2063 } 2064 module_init(dcbnl_init); 2065 2066 static void __exit dcbnl_exit(void) 2067 { 2068 rtnl_unregister(PF_UNSPEC, RTM_GETDCB); 2069 rtnl_unregister(PF_UNSPEC, RTM_SETDCB); 2070 dcb_flushapp(); 2071 } 2072 module_exit(dcbnl_exit); 2073