1 /* 2 * Copyright (c) 2008-2011, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Description: Data Center Bridging netlink interface 17 * Author: Lucy Liu <lucy.liu@intel.com> 18 */ 19 20 #include <linux/netdevice.h> 21 #include <linux/netlink.h> 22 #include <linux/slab.h> 23 #include <net/netlink.h> 24 #include <net/rtnetlink.h> 25 #include <linux/dcbnl.h> 26 #include <net/dcbevent.h> 27 #include <linux/rtnetlink.h> 28 #include <linux/init.h> 29 #include <net/sock.h> 30 31 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements 32 * intended to allow network traffic with differing requirements 33 * (highly reliable, no drops vs. best effort vs. low latency) to operate 34 * and co-exist on Ethernet. Current DCB features are: 35 * 36 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a 37 * framework for assigning bandwidth guarantees to traffic classes. 38 * 39 * Priority-based Flow Control (PFC) - provides a flow control mechanism which 40 * can work independently for each 802.1p priority. 41 * 42 * Congestion Notification - provides a mechanism for end-to-end congestion 43 * control for protocols which do not have built-in congestion management. 44 * 45 * More information about the emerging standards for these Ethernet features 46 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html 47 * 48 * This file implements an rtnetlink interface to allow configuration of DCB 49 * features for capable devices. 50 */ 51 52 /**************** DCB attribute policies *************************************/ 53 54 /* DCB netlink attributes policy */ 55 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 56 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 57 [DCB_ATTR_STATE] = {.type = NLA_U8}, 58 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 59 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, 60 [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, 61 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, 62 [DCB_ATTR_CAP] = {.type = NLA_NESTED}, 63 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, 64 [DCB_ATTR_BCN] = {.type = NLA_NESTED}, 65 [DCB_ATTR_APP] = {.type = NLA_NESTED}, 66 [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, 67 [DCB_ATTR_DCBX] = {.type = NLA_U8}, 68 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, 69 }; 70 71 /* DCB priority flow control to User Priority nested attributes */ 72 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 73 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 74 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 75 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 76 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, 77 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, 78 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, 79 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, 80 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, 81 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, 82 }; 83 84 /* DCB priority grouping nested attributes */ 85 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 86 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 87 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 88 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 89 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, 90 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, 91 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, 92 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, 93 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, 94 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, 95 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, 96 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, 97 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, 98 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, 99 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, 100 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, 101 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, 102 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, 103 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, 104 }; 105 106 /* DCB traffic class nested attributes. */ 107 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 108 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 109 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 110 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 111 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, 112 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, 113 }; 114 115 /* DCB capabilities nested attributes. */ 116 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 117 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 118 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 119 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 120 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, 121 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, 122 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, 123 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, 124 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, 125 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, 126 }; 127 128 /* DCB capabilities nested attributes. */ 129 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 130 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 131 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 132 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 133 }; 134 135 /* DCB BCN nested attributes. */ 136 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 137 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 138 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 139 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 140 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, 141 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, 142 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, 143 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, 144 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, 145 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, 146 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, 147 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, 148 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, 149 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, 150 [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, 151 [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, 152 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, 153 [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, 154 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, 155 [DCB_BCN_ATTR_W] = {.type = NLA_U32}, 156 [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, 157 [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, 158 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, 159 [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, 160 [DCB_BCN_ATTR_C] = {.type = NLA_U32}, 161 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, 162 }; 163 164 /* DCB APP nested attributes. */ 165 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 166 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 167 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 168 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 169 }; 170 171 /* IEEE 802.1Qaz nested attributes. */ 172 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { 173 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, 174 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, 175 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, 176 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, 177 [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)}, 178 [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)}, 179 [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)}, 180 }; 181 182 /* DCB number of traffic classes nested attributes. */ 183 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { 184 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, 185 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, 186 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, 187 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, 188 }; 189 190 static LIST_HEAD(dcb_app_list); 191 static DEFINE_SPINLOCK(dcb_lock); 192 193 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, 194 u32 flags, struct nlmsghdr **nlhp) 195 { 196 struct sk_buff *skb; 197 struct dcbmsg *dcb; 198 struct nlmsghdr *nlh; 199 200 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 201 if (!skb) 202 return NULL; 203 204 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); 205 BUG_ON(!nlh); 206 207 dcb = nlmsg_data(nlh); 208 dcb->dcb_family = AF_UNSPEC; 209 dcb->cmd = cmd; 210 dcb->dcb_pad = 0; 211 212 if (nlhp) 213 *nlhp = nlh; 214 215 return skb; 216 } 217 218 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, 219 u32 seq, struct nlattr **tb, struct sk_buff *skb) 220 { 221 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ 222 if (!netdev->dcbnl_ops->getstate) 223 return -EOPNOTSUPP; 224 225 return nla_put_u8(skb, DCB_ATTR_STATE, 226 netdev->dcbnl_ops->getstate(netdev)); 227 } 228 229 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, 230 u32 seq, struct nlattr **tb, struct sk_buff *skb) 231 { 232 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; 233 u8 value; 234 int ret; 235 int i; 236 int getall = 0; 237 238 if (!tb[DCB_ATTR_PFC_CFG]) 239 return -EINVAL; 240 241 if (!netdev->dcbnl_ops->getpfccfg) 242 return -EOPNOTSUPP; 243 244 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 245 tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); 246 if (ret) 247 return ret; 248 249 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG); 250 if (!nest) 251 return -EMSGSIZE; 252 253 if (data[DCB_PFC_UP_ATTR_ALL]) 254 getall = 1; 255 256 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 257 if (!getall && !data[i]) 258 continue; 259 260 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, 261 &value); 262 ret = nla_put_u8(skb, i, value); 263 if (ret) { 264 nla_nest_cancel(skb, nest); 265 return ret; 266 } 267 } 268 nla_nest_end(skb, nest); 269 270 return 0; 271 } 272 273 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, 274 u32 seq, struct nlattr **tb, struct sk_buff *skb) 275 { 276 u8 perm_addr[MAX_ADDR_LEN]; 277 278 if (!netdev->dcbnl_ops->getpermhwaddr) 279 return -EOPNOTSUPP; 280 281 memset(perm_addr, 0, sizeof(perm_addr)); 282 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 283 284 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); 285 } 286 287 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, 288 u32 seq, struct nlattr **tb, struct sk_buff *skb) 289 { 290 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; 291 u8 value; 292 int ret; 293 int i; 294 int getall = 0; 295 296 if (!tb[DCB_ATTR_CAP]) 297 return -EINVAL; 298 299 if (!netdev->dcbnl_ops->getcap) 300 return -EOPNOTSUPP; 301 302 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], 303 dcbnl_cap_nest, NULL); 304 if (ret) 305 return ret; 306 307 nest = nla_nest_start(skb, DCB_ATTR_CAP); 308 if (!nest) 309 return -EMSGSIZE; 310 311 if (data[DCB_CAP_ATTR_ALL]) 312 getall = 1; 313 314 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { 315 if (!getall && !data[i]) 316 continue; 317 318 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { 319 ret = nla_put_u8(skb, i, value); 320 if (ret) { 321 nla_nest_cancel(skb, nest); 322 return ret; 323 } 324 } 325 } 326 nla_nest_end(skb, nest); 327 328 return 0; 329 } 330 331 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, 332 u32 seq, struct nlattr **tb, struct sk_buff *skb) 333 { 334 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; 335 u8 value; 336 int ret; 337 int i; 338 int getall = 0; 339 340 if (!tb[DCB_ATTR_NUMTCS]) 341 return -EINVAL; 342 343 if (!netdev->dcbnl_ops->getnumtcs) 344 return -EOPNOTSUPP; 345 346 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 347 dcbnl_numtcs_nest, NULL); 348 if (ret) 349 return ret; 350 351 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS); 352 if (!nest) 353 return -EMSGSIZE; 354 355 if (data[DCB_NUMTCS_ATTR_ALL]) 356 getall = 1; 357 358 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 359 if (!getall && !data[i]) 360 continue; 361 362 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); 363 if (!ret) { 364 ret = nla_put_u8(skb, i, value); 365 if (ret) { 366 nla_nest_cancel(skb, nest); 367 return ret; 368 } 369 } else 370 return -EINVAL; 371 } 372 nla_nest_end(skb, nest); 373 374 return 0; 375 } 376 377 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, 378 u32 seq, struct nlattr **tb, struct sk_buff *skb) 379 { 380 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; 381 int ret; 382 u8 value; 383 int i; 384 385 if (!tb[DCB_ATTR_NUMTCS]) 386 return -EINVAL; 387 388 if (!netdev->dcbnl_ops->setnumtcs) 389 return -EOPNOTSUPP; 390 391 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 392 dcbnl_numtcs_nest, NULL); 393 if (ret) 394 return ret; 395 396 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 397 if (data[i] == NULL) 398 continue; 399 400 value = nla_get_u8(data[i]); 401 402 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); 403 if (ret) 404 break; 405 } 406 407 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); 408 } 409 410 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, 411 u32 seq, struct nlattr **tb, struct sk_buff *skb) 412 { 413 if (!netdev->dcbnl_ops->getpfcstate) 414 return -EOPNOTSUPP; 415 416 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 417 netdev->dcbnl_ops->getpfcstate(netdev)); 418 } 419 420 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, 421 u32 seq, struct nlattr **tb, struct sk_buff *skb) 422 { 423 u8 value; 424 425 if (!tb[DCB_ATTR_PFC_STATE]) 426 return -EINVAL; 427 428 if (!netdev->dcbnl_ops->setpfcstate) 429 return -EOPNOTSUPP; 430 431 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); 432 433 netdev->dcbnl_ops->setpfcstate(netdev, value); 434 435 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); 436 } 437 438 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, 439 u32 seq, struct nlattr **tb, struct sk_buff *skb) 440 { 441 struct nlattr *app_nest; 442 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 443 u16 id; 444 u8 up, idtype; 445 int ret; 446 447 if (!tb[DCB_ATTR_APP]) 448 return -EINVAL; 449 450 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 451 dcbnl_app_nest, NULL); 452 if (ret) 453 return ret; 454 455 /* all must be non-null */ 456 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 457 (!app_tb[DCB_APP_ATTR_ID])) 458 return -EINVAL; 459 460 /* either by eth type or by socket number */ 461 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 462 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 463 (idtype != DCB_APP_IDTYPE_PORTNUM)) 464 return -EINVAL; 465 466 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 467 468 if (netdev->dcbnl_ops->getapp) { 469 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id); 470 if (ret < 0) 471 return ret; 472 else 473 up = ret; 474 } else { 475 struct dcb_app app = { 476 .selector = idtype, 477 .protocol = id, 478 }; 479 up = dcb_getapp(netdev, &app); 480 } 481 482 app_nest = nla_nest_start(skb, DCB_ATTR_APP); 483 if (!app_nest) 484 return -EMSGSIZE; 485 486 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); 487 if (ret) 488 goto out_cancel; 489 490 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); 491 if (ret) 492 goto out_cancel; 493 494 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); 495 if (ret) 496 goto out_cancel; 497 498 nla_nest_end(skb, app_nest); 499 500 return 0; 501 502 out_cancel: 503 nla_nest_cancel(skb, app_nest); 504 return ret; 505 } 506 507 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, 508 u32 seq, struct nlattr **tb, struct sk_buff *skb) 509 { 510 int ret; 511 u16 id; 512 u8 up, idtype; 513 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 514 515 if (!tb[DCB_ATTR_APP]) 516 return -EINVAL; 517 518 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 519 dcbnl_app_nest, NULL); 520 if (ret) 521 return ret; 522 523 /* all must be non-null */ 524 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 525 (!app_tb[DCB_APP_ATTR_ID]) || 526 (!app_tb[DCB_APP_ATTR_PRIORITY])) 527 return -EINVAL; 528 529 /* either by eth type or by socket number */ 530 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 531 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 532 (idtype != DCB_APP_IDTYPE_PORTNUM)) 533 return -EINVAL; 534 535 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 536 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 537 538 if (netdev->dcbnl_ops->setapp) { 539 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); 540 if (ret < 0) 541 return ret; 542 } else { 543 struct dcb_app app; 544 app.selector = idtype; 545 app.protocol = id; 546 app.priority = up; 547 ret = dcb_setapp(netdev, &app); 548 } 549 550 ret = nla_put_u8(skb, DCB_ATTR_APP, ret); 551 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); 552 553 return ret; 554 } 555 556 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 557 struct nlattr **tb, struct sk_buff *skb, int dir) 558 { 559 struct nlattr *pg_nest, *param_nest, *data; 560 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 561 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 562 u8 prio, pgid, tc_pct, up_map; 563 int ret; 564 int getall = 0; 565 int i; 566 567 if (!tb[DCB_ATTR_PG_CFG]) 568 return -EINVAL; 569 570 if (!netdev->dcbnl_ops->getpgtccfgtx || 571 !netdev->dcbnl_ops->getpgtccfgrx || 572 !netdev->dcbnl_ops->getpgbwgcfgtx || 573 !netdev->dcbnl_ops->getpgbwgcfgrx) 574 return -EOPNOTSUPP; 575 576 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], 577 dcbnl_pg_nest, NULL); 578 if (ret) 579 return ret; 580 581 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG); 582 if (!pg_nest) 583 return -EMSGSIZE; 584 585 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 586 getall = 1; 587 588 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 589 if (!getall && !pg_tb[i]) 590 continue; 591 592 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 593 data = pg_tb[DCB_PG_ATTR_TC_ALL]; 594 else 595 data = pg_tb[i]; 596 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data, 597 dcbnl_tc_param_nest, NULL); 598 if (ret) 599 goto err_pg; 600 601 param_nest = nla_nest_start(skb, i); 602 if (!param_nest) 603 goto err_pg; 604 605 pgid = DCB_ATTR_VALUE_UNDEFINED; 606 prio = DCB_ATTR_VALUE_UNDEFINED; 607 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 608 up_map = DCB_ATTR_VALUE_UNDEFINED; 609 610 if (dir) { 611 /* Rx */ 612 netdev->dcbnl_ops->getpgtccfgrx(netdev, 613 i - DCB_PG_ATTR_TC_0, &prio, 614 &pgid, &tc_pct, &up_map); 615 } else { 616 /* Tx */ 617 netdev->dcbnl_ops->getpgtccfgtx(netdev, 618 i - DCB_PG_ATTR_TC_0, &prio, 619 &pgid, &tc_pct, &up_map); 620 } 621 622 if (param_tb[DCB_TC_ATTR_PARAM_PGID] || 623 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 624 ret = nla_put_u8(skb, 625 DCB_TC_ATTR_PARAM_PGID, pgid); 626 if (ret) 627 goto err_param; 628 } 629 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || 630 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 631 ret = nla_put_u8(skb, 632 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 633 if (ret) 634 goto err_param; 635 } 636 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || 637 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 638 ret = nla_put_u8(skb, 639 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 640 if (ret) 641 goto err_param; 642 } 643 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || 644 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 645 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, 646 tc_pct); 647 if (ret) 648 goto err_param; 649 } 650 nla_nest_end(skb, param_nest); 651 } 652 653 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) 654 getall = 1; 655 else 656 getall = 0; 657 658 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 659 if (!getall && !pg_tb[i]) 660 continue; 661 662 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 663 664 if (dir) { 665 /* Rx */ 666 netdev->dcbnl_ops->getpgbwgcfgrx(netdev, 667 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 668 } else { 669 /* Tx */ 670 netdev->dcbnl_ops->getpgbwgcfgtx(netdev, 671 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 672 } 673 ret = nla_put_u8(skb, i, tc_pct); 674 if (ret) 675 goto err_pg; 676 } 677 678 nla_nest_end(skb, pg_nest); 679 680 return 0; 681 682 err_param: 683 nla_nest_cancel(skb, param_nest); 684 err_pg: 685 nla_nest_cancel(skb, pg_nest); 686 687 return -EMSGSIZE; 688 } 689 690 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 691 u32 seq, struct nlattr **tb, struct sk_buff *skb) 692 { 693 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); 694 } 695 696 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 697 u32 seq, struct nlattr **tb, struct sk_buff *skb) 698 { 699 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); 700 } 701 702 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, 703 u32 seq, struct nlattr **tb, struct sk_buff *skb) 704 { 705 u8 value; 706 707 if (!tb[DCB_ATTR_STATE]) 708 return -EINVAL; 709 710 if (!netdev->dcbnl_ops->setstate) 711 return -EOPNOTSUPP; 712 713 value = nla_get_u8(tb[DCB_ATTR_STATE]); 714 715 return nla_put_u8(skb, DCB_ATTR_STATE, 716 netdev->dcbnl_ops->setstate(netdev, value)); 717 } 718 719 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, 720 u32 seq, struct nlattr **tb, struct sk_buff *skb) 721 { 722 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; 723 int i; 724 int ret; 725 u8 value; 726 727 if (!tb[DCB_ATTR_PFC_CFG]) 728 return -EINVAL; 729 730 if (!netdev->dcbnl_ops->setpfccfg) 731 return -EOPNOTSUPP; 732 733 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 734 tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); 735 if (ret) 736 return ret; 737 738 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 739 if (data[i] == NULL) 740 continue; 741 value = nla_get_u8(data[i]); 742 netdev->dcbnl_ops->setpfccfg(netdev, 743 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); 744 } 745 746 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); 747 } 748 749 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, 750 u32 seq, struct nlattr **tb, struct sk_buff *skb) 751 { 752 int ret; 753 754 if (!tb[DCB_ATTR_SET_ALL]) 755 return -EINVAL; 756 757 if (!netdev->dcbnl_ops->setall) 758 return -EOPNOTSUPP; 759 760 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, 761 netdev->dcbnl_ops->setall(netdev)); 762 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); 763 764 return ret; 765 } 766 767 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 768 u32 seq, struct nlattr **tb, struct sk_buff *skb, 769 int dir) 770 { 771 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 772 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 773 int ret; 774 int i; 775 u8 pgid; 776 u8 up_map; 777 u8 prio; 778 u8 tc_pct; 779 780 if (!tb[DCB_ATTR_PG_CFG]) 781 return -EINVAL; 782 783 if (!netdev->dcbnl_ops->setpgtccfgtx || 784 !netdev->dcbnl_ops->setpgtccfgrx || 785 !netdev->dcbnl_ops->setpgbwgcfgtx || 786 !netdev->dcbnl_ops->setpgbwgcfgrx) 787 return -EOPNOTSUPP; 788 789 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], 790 dcbnl_pg_nest, NULL); 791 if (ret) 792 return ret; 793 794 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 795 if (!pg_tb[i]) 796 continue; 797 798 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 799 pg_tb[i], dcbnl_tc_param_nest, NULL); 800 if (ret) 801 return ret; 802 803 pgid = DCB_ATTR_VALUE_UNDEFINED; 804 prio = DCB_ATTR_VALUE_UNDEFINED; 805 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 806 up_map = DCB_ATTR_VALUE_UNDEFINED; 807 808 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) 809 prio = 810 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); 811 812 if (param_tb[DCB_TC_ATTR_PARAM_PGID]) 813 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); 814 815 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) 816 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); 817 818 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) 819 up_map = 820 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); 821 822 /* dir: Tx = 0, Rx = 1 */ 823 if (dir) { 824 /* Rx */ 825 netdev->dcbnl_ops->setpgtccfgrx(netdev, 826 i - DCB_PG_ATTR_TC_0, 827 prio, pgid, tc_pct, up_map); 828 } else { 829 /* Tx */ 830 netdev->dcbnl_ops->setpgtccfgtx(netdev, 831 i - DCB_PG_ATTR_TC_0, 832 prio, pgid, tc_pct, up_map); 833 } 834 } 835 836 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 837 if (!pg_tb[i]) 838 continue; 839 840 tc_pct = nla_get_u8(pg_tb[i]); 841 842 /* dir: Tx = 0, Rx = 1 */ 843 if (dir) { 844 /* Rx */ 845 netdev->dcbnl_ops->setpgbwgcfgrx(netdev, 846 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 847 } else { 848 /* Tx */ 849 netdev->dcbnl_ops->setpgbwgcfgtx(netdev, 850 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 851 } 852 } 853 854 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); 855 } 856 857 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 858 u32 seq, struct nlattr **tb, struct sk_buff *skb) 859 { 860 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); 861 } 862 863 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 864 u32 seq, struct nlattr **tb, struct sk_buff *skb) 865 { 866 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); 867 } 868 869 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 870 u32 seq, struct nlattr **tb, struct sk_buff *skb) 871 { 872 struct nlattr *bcn_nest; 873 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; 874 u8 value_byte; 875 u32 value_integer; 876 int ret; 877 bool getall = false; 878 int i; 879 880 if (!tb[DCB_ATTR_BCN]) 881 return -EINVAL; 882 883 if (!netdev->dcbnl_ops->getbcnrp || 884 !netdev->dcbnl_ops->getbcncfg) 885 return -EOPNOTSUPP; 886 887 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], 888 dcbnl_bcn_nest, NULL); 889 if (ret) 890 return ret; 891 892 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN); 893 if (!bcn_nest) 894 return -EMSGSIZE; 895 896 if (bcn_tb[DCB_BCN_ATTR_ALL]) 897 getall = true; 898 899 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 900 if (!getall && !bcn_tb[i]) 901 continue; 902 903 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, 904 &value_byte); 905 ret = nla_put_u8(skb, i, value_byte); 906 if (ret) 907 goto err_bcn; 908 } 909 910 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 911 if (!getall && !bcn_tb[i]) 912 continue; 913 914 netdev->dcbnl_ops->getbcncfg(netdev, i, 915 &value_integer); 916 ret = nla_put_u32(skb, i, value_integer); 917 if (ret) 918 goto err_bcn; 919 } 920 921 nla_nest_end(skb, bcn_nest); 922 923 return 0; 924 925 err_bcn: 926 nla_nest_cancel(skb, bcn_nest); 927 return ret; 928 } 929 930 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 931 u32 seq, struct nlattr **tb, struct sk_buff *skb) 932 { 933 struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; 934 int i; 935 int ret; 936 u8 value_byte; 937 u32 value_int; 938 939 if (!tb[DCB_ATTR_BCN]) 940 return -EINVAL; 941 942 if (!netdev->dcbnl_ops->setbcncfg || 943 !netdev->dcbnl_ops->setbcnrp) 944 return -EOPNOTSUPP; 945 946 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], 947 dcbnl_pfc_up_nest, NULL); 948 if (ret) 949 return ret; 950 951 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 952 if (data[i] == NULL) 953 continue; 954 value_byte = nla_get_u8(data[i]); 955 netdev->dcbnl_ops->setbcnrp(netdev, 956 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); 957 } 958 959 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 960 if (data[i] == NULL) 961 continue; 962 value_int = nla_get_u32(data[i]); 963 netdev->dcbnl_ops->setbcncfg(netdev, 964 i, value_int); 965 } 966 967 return nla_put_u8(skb, DCB_ATTR_BCN, 0); 968 } 969 970 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 971 int app_nested_type, int app_info_type, 972 int app_entry_type) 973 { 974 struct dcb_peer_app_info info; 975 struct dcb_app *table = NULL; 976 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 977 u16 app_count; 978 int err; 979 980 981 /** 982 * retrieve the peer app configuration form the driver. If the driver 983 * handlers fail exit without doing anything 984 */ 985 err = ops->peer_getappinfo(netdev, &info, &app_count); 986 if (!err && app_count) { 987 table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL); 988 if (!table) 989 return -ENOMEM; 990 991 err = ops->peer_getapptable(netdev, table); 992 } 993 994 if (!err) { 995 u16 i; 996 struct nlattr *app; 997 998 /** 999 * build the message, from here on the only possible failure 1000 * is due to the skb size 1001 */ 1002 err = -EMSGSIZE; 1003 1004 app = nla_nest_start(skb, app_nested_type); 1005 if (!app) 1006 goto nla_put_failure; 1007 1008 if (app_info_type && 1009 nla_put(skb, app_info_type, sizeof(info), &info)) 1010 goto nla_put_failure; 1011 1012 for (i = 0; i < app_count; i++) { 1013 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), 1014 &table[i])) 1015 goto nla_put_failure; 1016 } 1017 nla_nest_end(skb, app); 1018 } 1019 err = 0; 1020 1021 nla_put_failure: 1022 kfree(table); 1023 return err; 1024 } 1025 1026 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */ 1027 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) 1028 { 1029 struct nlattr *ieee, *app; 1030 struct dcb_app_type *itr; 1031 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1032 int dcbx; 1033 int err; 1034 1035 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1036 return -EMSGSIZE; 1037 1038 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1039 if (!ieee) 1040 return -EMSGSIZE; 1041 1042 if (ops->ieee_getets) { 1043 struct ieee_ets ets; 1044 memset(&ets, 0, sizeof(ets)); 1045 err = ops->ieee_getets(netdev, &ets); 1046 if (!err && 1047 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1048 return -EMSGSIZE; 1049 } 1050 1051 if (ops->ieee_getmaxrate) { 1052 struct ieee_maxrate maxrate; 1053 memset(&maxrate, 0, sizeof(maxrate)); 1054 err = ops->ieee_getmaxrate(netdev, &maxrate); 1055 if (!err) { 1056 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1057 sizeof(maxrate), &maxrate); 1058 if (err) 1059 return -EMSGSIZE; 1060 } 1061 } 1062 1063 if (ops->ieee_getqcn) { 1064 struct ieee_qcn qcn; 1065 1066 memset(&qcn, 0, sizeof(qcn)); 1067 err = ops->ieee_getqcn(netdev, &qcn); 1068 if (!err) { 1069 err = nla_put(skb, DCB_ATTR_IEEE_QCN, 1070 sizeof(qcn), &qcn); 1071 if (err) 1072 return -EMSGSIZE; 1073 } 1074 } 1075 1076 if (ops->ieee_getqcnstats) { 1077 struct ieee_qcn_stats qcn_stats; 1078 1079 memset(&qcn_stats, 0, sizeof(qcn_stats)); 1080 err = ops->ieee_getqcnstats(netdev, &qcn_stats); 1081 if (!err) { 1082 err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS, 1083 sizeof(qcn_stats), &qcn_stats); 1084 if (err) 1085 return -EMSGSIZE; 1086 } 1087 } 1088 1089 if (ops->ieee_getpfc) { 1090 struct ieee_pfc pfc; 1091 memset(&pfc, 0, sizeof(pfc)); 1092 err = ops->ieee_getpfc(netdev, &pfc); 1093 if (!err && 1094 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1095 return -EMSGSIZE; 1096 } 1097 1098 if (ops->dcbnl_getbuffer) { 1099 struct dcbnl_buffer buffer; 1100 1101 memset(&buffer, 0, sizeof(buffer)); 1102 err = ops->dcbnl_getbuffer(netdev, &buffer); 1103 if (!err && 1104 nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer)) 1105 return -EMSGSIZE; 1106 } 1107 1108 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1109 if (!app) 1110 return -EMSGSIZE; 1111 1112 spin_lock_bh(&dcb_lock); 1113 list_for_each_entry(itr, &dcb_app_list, list) { 1114 if (itr->ifindex == netdev->ifindex) { 1115 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1116 &itr->app); 1117 if (err) { 1118 spin_unlock_bh(&dcb_lock); 1119 return -EMSGSIZE; 1120 } 1121 } 1122 } 1123 1124 if (netdev->dcbnl_ops->getdcbx) 1125 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1126 else 1127 dcbx = -EOPNOTSUPP; 1128 1129 spin_unlock_bh(&dcb_lock); 1130 nla_nest_end(skb, app); 1131 1132 /* get peer info if available */ 1133 if (ops->ieee_peer_getets) { 1134 struct ieee_ets ets; 1135 memset(&ets, 0, sizeof(ets)); 1136 err = ops->ieee_peer_getets(netdev, &ets); 1137 if (!err && 1138 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1139 return -EMSGSIZE; 1140 } 1141 1142 if (ops->ieee_peer_getpfc) { 1143 struct ieee_pfc pfc; 1144 memset(&pfc, 0, sizeof(pfc)); 1145 err = ops->ieee_peer_getpfc(netdev, &pfc); 1146 if (!err && 1147 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1148 return -EMSGSIZE; 1149 } 1150 1151 if (ops->peer_getappinfo && ops->peer_getapptable) { 1152 err = dcbnl_build_peer_app(netdev, skb, 1153 DCB_ATTR_IEEE_PEER_APP, 1154 DCB_ATTR_IEEE_APP_UNSPEC, 1155 DCB_ATTR_IEEE_APP); 1156 if (err) 1157 return -EMSGSIZE; 1158 } 1159 1160 nla_nest_end(skb, ieee); 1161 if (dcbx >= 0) { 1162 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1163 if (err) 1164 return -EMSGSIZE; 1165 } 1166 1167 return 0; 1168 } 1169 1170 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, 1171 int dir) 1172 { 1173 u8 pgid, up_map, prio, tc_pct; 1174 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1175 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; 1176 struct nlattr *pg = nla_nest_start(skb, i); 1177 1178 if (!pg) 1179 return -EMSGSIZE; 1180 1181 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 1182 struct nlattr *tc_nest = nla_nest_start(skb, i); 1183 1184 if (!tc_nest) 1185 return -EMSGSIZE; 1186 1187 pgid = DCB_ATTR_VALUE_UNDEFINED; 1188 prio = DCB_ATTR_VALUE_UNDEFINED; 1189 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 1190 up_map = DCB_ATTR_VALUE_UNDEFINED; 1191 1192 if (!dir) 1193 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, 1194 &prio, &pgid, &tc_pct, &up_map); 1195 else 1196 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, 1197 &prio, &pgid, &tc_pct, &up_map); 1198 1199 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || 1200 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || 1201 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || 1202 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) 1203 return -EMSGSIZE; 1204 nla_nest_end(skb, tc_nest); 1205 } 1206 1207 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 1208 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 1209 1210 if (!dir) 1211 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, 1212 &tc_pct); 1213 else 1214 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1215 &tc_pct); 1216 if (nla_put_u8(skb, i, tc_pct)) 1217 return -EMSGSIZE; 1218 } 1219 nla_nest_end(skb, pg); 1220 return 0; 1221 } 1222 1223 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) 1224 { 1225 struct nlattr *cee, *app; 1226 struct dcb_app_type *itr; 1227 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1228 int dcbx, i, err = -EMSGSIZE; 1229 u8 value; 1230 1231 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1232 goto nla_put_failure; 1233 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1234 if (!cee) 1235 goto nla_put_failure; 1236 1237 /* local pg */ 1238 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { 1239 err = dcbnl_cee_pg_fill(skb, netdev, 1); 1240 if (err) 1241 goto nla_put_failure; 1242 } 1243 1244 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { 1245 err = dcbnl_cee_pg_fill(skb, netdev, 0); 1246 if (err) 1247 goto nla_put_failure; 1248 } 1249 1250 /* local pfc */ 1251 if (ops->getpfccfg) { 1252 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); 1253 1254 if (!pfc_nest) 1255 goto nla_put_failure; 1256 1257 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 1258 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); 1259 if (nla_put_u8(skb, i, value)) 1260 goto nla_put_failure; 1261 } 1262 nla_nest_end(skb, pfc_nest); 1263 } 1264 1265 /* local app */ 1266 spin_lock_bh(&dcb_lock); 1267 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); 1268 if (!app) 1269 goto dcb_unlock; 1270 1271 list_for_each_entry(itr, &dcb_app_list, list) { 1272 if (itr->ifindex == netdev->ifindex) { 1273 struct nlattr *app_nest = nla_nest_start(skb, 1274 DCB_ATTR_APP); 1275 if (!app_nest) 1276 goto dcb_unlock; 1277 1278 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, 1279 itr->app.selector); 1280 if (err) 1281 goto dcb_unlock; 1282 1283 err = nla_put_u16(skb, DCB_APP_ATTR_ID, 1284 itr->app.protocol); 1285 if (err) 1286 goto dcb_unlock; 1287 1288 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, 1289 itr->app.priority); 1290 if (err) 1291 goto dcb_unlock; 1292 1293 nla_nest_end(skb, app_nest); 1294 } 1295 } 1296 nla_nest_end(skb, app); 1297 1298 if (netdev->dcbnl_ops->getdcbx) 1299 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1300 else 1301 dcbx = -EOPNOTSUPP; 1302 1303 spin_unlock_bh(&dcb_lock); 1304 1305 /* features flags */ 1306 if (ops->getfeatcfg) { 1307 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); 1308 if (!feat) 1309 goto nla_put_failure; 1310 1311 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; 1312 i++) 1313 if (!ops->getfeatcfg(netdev, i, &value) && 1314 nla_put_u8(skb, i, value)) 1315 goto nla_put_failure; 1316 1317 nla_nest_end(skb, feat); 1318 } 1319 1320 /* peer info if available */ 1321 if (ops->cee_peer_getpg) { 1322 struct cee_pg pg; 1323 memset(&pg, 0, sizeof(pg)); 1324 err = ops->cee_peer_getpg(netdev, &pg); 1325 if (!err && 1326 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) 1327 goto nla_put_failure; 1328 } 1329 1330 if (ops->cee_peer_getpfc) { 1331 struct cee_pfc pfc; 1332 memset(&pfc, 0, sizeof(pfc)); 1333 err = ops->cee_peer_getpfc(netdev, &pfc); 1334 if (!err && 1335 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) 1336 goto nla_put_failure; 1337 } 1338 1339 if (ops->peer_getappinfo && ops->peer_getapptable) { 1340 err = dcbnl_build_peer_app(netdev, skb, 1341 DCB_ATTR_CEE_PEER_APP_TABLE, 1342 DCB_ATTR_CEE_PEER_APP_INFO, 1343 DCB_ATTR_CEE_PEER_APP); 1344 if (err) 1345 goto nla_put_failure; 1346 } 1347 nla_nest_end(skb, cee); 1348 1349 /* DCBX state */ 1350 if (dcbx >= 0) { 1351 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1352 if (err) 1353 goto nla_put_failure; 1354 } 1355 return 0; 1356 1357 dcb_unlock: 1358 spin_unlock_bh(&dcb_lock); 1359 nla_put_failure: 1360 err = -EMSGSIZE; 1361 return err; 1362 } 1363 1364 static int dcbnl_notify(struct net_device *dev, int event, int cmd, 1365 u32 seq, u32 portid, int dcbx_ver) 1366 { 1367 struct net *net = dev_net(dev); 1368 struct sk_buff *skb; 1369 struct nlmsghdr *nlh; 1370 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1371 int err; 1372 1373 if (!ops) 1374 return -EOPNOTSUPP; 1375 1376 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); 1377 if (!skb) 1378 return -ENOBUFS; 1379 1380 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) 1381 err = dcbnl_ieee_fill(skb, dev); 1382 else 1383 err = dcbnl_cee_fill(skb, dev); 1384 1385 if (err < 0) { 1386 /* Report error to broadcast listeners */ 1387 nlmsg_free(skb); 1388 rtnl_set_sk_err(net, RTNLGRP_DCB, err); 1389 } else { 1390 /* End nlmsg and notify broadcast listeners */ 1391 nlmsg_end(skb, nlh); 1392 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); 1393 } 1394 1395 return err; 1396 } 1397 1398 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, 1399 u32 seq, u32 portid) 1400 { 1401 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); 1402 } 1403 EXPORT_SYMBOL(dcbnl_ieee_notify); 1404 1405 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, 1406 u32 seq, u32 portid) 1407 { 1408 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE); 1409 } 1410 EXPORT_SYMBOL(dcbnl_cee_notify); 1411 1412 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands. 1413 * If any requested operation can not be completed 1414 * the entire msg is aborted and error value is returned. 1415 * No attempt is made to reconcile the case where only part of the 1416 * cmd can be completed. 1417 */ 1418 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, 1419 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1420 { 1421 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1422 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1423 int err; 1424 1425 if (!ops) 1426 return -EOPNOTSUPP; 1427 1428 if (!tb[DCB_ATTR_IEEE]) 1429 return -EINVAL; 1430 1431 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], 1432 dcbnl_ieee_policy, NULL); 1433 if (err) 1434 return err; 1435 1436 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { 1437 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); 1438 err = ops->ieee_setets(netdev, ets); 1439 if (err) 1440 goto err; 1441 } 1442 1443 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { 1444 struct ieee_maxrate *maxrate = 1445 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); 1446 err = ops->ieee_setmaxrate(netdev, maxrate); 1447 if (err) 1448 goto err; 1449 } 1450 1451 if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) { 1452 struct ieee_qcn *qcn = 1453 nla_data(ieee[DCB_ATTR_IEEE_QCN]); 1454 1455 err = ops->ieee_setqcn(netdev, qcn); 1456 if (err) 1457 goto err; 1458 } 1459 1460 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1461 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1462 err = ops->ieee_setpfc(netdev, pfc); 1463 if (err) 1464 goto err; 1465 } 1466 1467 if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) { 1468 struct dcbnl_buffer *buffer = 1469 nla_data(ieee[DCB_ATTR_DCB_BUFFER]); 1470 1471 err = ops->dcbnl_setbuffer(netdev, buffer); 1472 if (err) 1473 goto err; 1474 } 1475 1476 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1477 struct nlattr *attr; 1478 int rem; 1479 1480 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1481 struct dcb_app *app_data; 1482 1483 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1484 continue; 1485 1486 if (nla_len(attr) < sizeof(struct dcb_app)) { 1487 err = -ERANGE; 1488 goto err; 1489 } 1490 1491 app_data = nla_data(attr); 1492 if (ops->ieee_setapp) 1493 err = ops->ieee_setapp(netdev, app_data); 1494 else 1495 err = dcb_ieee_setapp(netdev, app_data); 1496 if (err) 1497 goto err; 1498 } 1499 } 1500 1501 err: 1502 err = nla_put_u8(skb, DCB_ATTR_IEEE, err); 1503 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1504 return err; 1505 } 1506 1507 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, 1508 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1509 { 1510 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1511 1512 if (!ops) 1513 return -EOPNOTSUPP; 1514 1515 return dcbnl_ieee_fill(skb, netdev); 1516 } 1517 1518 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, 1519 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1520 { 1521 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1522 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1523 int err; 1524 1525 if (!ops) 1526 return -EOPNOTSUPP; 1527 1528 if (!tb[DCB_ATTR_IEEE]) 1529 return -EINVAL; 1530 1531 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], 1532 dcbnl_ieee_policy, NULL); 1533 if (err) 1534 return err; 1535 1536 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1537 struct nlattr *attr; 1538 int rem; 1539 1540 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1541 struct dcb_app *app_data; 1542 1543 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1544 continue; 1545 app_data = nla_data(attr); 1546 if (ops->ieee_delapp) 1547 err = ops->ieee_delapp(netdev, app_data); 1548 else 1549 err = dcb_ieee_delapp(netdev, app_data); 1550 if (err) 1551 goto err; 1552 } 1553 } 1554 1555 err: 1556 err = nla_put_u8(skb, DCB_ATTR_IEEE, err); 1557 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1558 return err; 1559 } 1560 1561 1562 /* DCBX configuration */ 1563 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, 1564 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1565 { 1566 if (!netdev->dcbnl_ops->getdcbx) 1567 return -EOPNOTSUPP; 1568 1569 return nla_put_u8(skb, DCB_ATTR_DCBX, 1570 netdev->dcbnl_ops->getdcbx(netdev)); 1571 } 1572 1573 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, 1574 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1575 { 1576 u8 value; 1577 1578 if (!netdev->dcbnl_ops->setdcbx) 1579 return -EOPNOTSUPP; 1580 1581 if (!tb[DCB_ATTR_DCBX]) 1582 return -EINVAL; 1583 1584 value = nla_get_u8(tb[DCB_ATTR_DCBX]); 1585 1586 return nla_put_u8(skb, DCB_ATTR_DCBX, 1587 netdev->dcbnl_ops->setdcbx(netdev, value)); 1588 } 1589 1590 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, 1591 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1592 { 1593 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; 1594 u8 value; 1595 int ret, i; 1596 int getall = 0; 1597 1598 if (!netdev->dcbnl_ops->getfeatcfg) 1599 return -EOPNOTSUPP; 1600 1601 if (!tb[DCB_ATTR_FEATCFG]) 1602 return -EINVAL; 1603 1604 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, 1605 tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); 1606 if (ret) 1607 return ret; 1608 1609 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG); 1610 if (!nest) 1611 return -EMSGSIZE; 1612 1613 if (data[DCB_FEATCFG_ATTR_ALL]) 1614 getall = 1; 1615 1616 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1617 if (!getall && !data[i]) 1618 continue; 1619 1620 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); 1621 if (!ret) 1622 ret = nla_put_u8(skb, i, value); 1623 1624 if (ret) { 1625 nla_nest_cancel(skb, nest); 1626 goto nla_put_failure; 1627 } 1628 } 1629 nla_nest_end(skb, nest); 1630 1631 nla_put_failure: 1632 return ret; 1633 } 1634 1635 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, 1636 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1637 { 1638 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; 1639 int ret, i; 1640 u8 value; 1641 1642 if (!netdev->dcbnl_ops->setfeatcfg) 1643 return -ENOTSUPP; 1644 1645 if (!tb[DCB_ATTR_FEATCFG]) 1646 return -EINVAL; 1647 1648 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, 1649 tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); 1650 1651 if (ret) 1652 goto err; 1653 1654 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1655 if (data[i] == NULL) 1656 continue; 1657 1658 value = nla_get_u8(data[i]); 1659 1660 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); 1661 1662 if (ret) 1663 goto err; 1664 } 1665 err: 1666 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret); 1667 1668 return ret; 1669 } 1670 1671 /* Handle CEE DCBX GET commands. */ 1672 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, 1673 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1674 { 1675 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1676 1677 if (!ops) 1678 return -EOPNOTSUPP; 1679 1680 return dcbnl_cee_fill(skb, netdev); 1681 } 1682 1683 struct reply_func { 1684 /* reply netlink message type */ 1685 int type; 1686 1687 /* function to fill message contents */ 1688 int (*cb)(struct net_device *, struct nlmsghdr *, u32, 1689 struct nlattr **, struct sk_buff *); 1690 }; 1691 1692 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { 1693 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate }, 1694 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate }, 1695 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg }, 1696 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg }, 1697 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr }, 1698 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap }, 1699 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs }, 1700 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs }, 1701 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate }, 1702 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate }, 1703 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp }, 1704 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp }, 1705 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg }, 1706 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg }, 1707 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg }, 1708 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg }, 1709 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall }, 1710 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg }, 1711 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg }, 1712 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get }, 1713 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set }, 1714 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del }, 1715 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx }, 1716 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx }, 1717 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg }, 1718 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg }, 1719 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, 1720 }; 1721 1722 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1723 struct netlink_ext_ack *extack) 1724 { 1725 struct net *net = sock_net(skb->sk); 1726 struct net_device *netdev; 1727 struct dcbmsg *dcb = nlmsg_data(nlh); 1728 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1729 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1730 int ret = -EINVAL; 1731 struct sk_buff *reply_skb; 1732 struct nlmsghdr *reply_nlh = NULL; 1733 const struct reply_func *fn; 1734 1735 if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN)) 1736 return -EPERM; 1737 1738 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, 1739 dcbnl_rtnl_policy, extack); 1740 if (ret < 0) 1741 return ret; 1742 1743 if (dcb->cmd > DCB_CMD_MAX) 1744 return -EINVAL; 1745 1746 /* check if a reply function has been defined for the command */ 1747 fn = &reply_funcs[dcb->cmd]; 1748 if (!fn->cb) 1749 return -EOPNOTSUPP; 1750 1751 if (!tb[DCB_ATTR_IFNAME]) 1752 return -EINVAL; 1753 1754 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME])); 1755 if (!netdev) 1756 return -ENODEV; 1757 1758 if (!netdev->dcbnl_ops) 1759 return -EOPNOTSUPP; 1760 1761 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq, 1762 nlh->nlmsg_flags, &reply_nlh); 1763 if (!reply_skb) 1764 return -ENOBUFS; 1765 1766 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); 1767 if (ret < 0) { 1768 nlmsg_free(reply_skb); 1769 goto out; 1770 } 1771 1772 nlmsg_end(reply_skb, reply_nlh); 1773 1774 ret = rtnl_unicast(reply_skb, net, portid); 1775 out: 1776 return ret; 1777 } 1778 1779 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, 1780 int ifindex, int prio) 1781 { 1782 struct dcb_app_type *itr; 1783 1784 list_for_each_entry(itr, &dcb_app_list, list) { 1785 if (itr->app.selector == app->selector && 1786 itr->app.protocol == app->protocol && 1787 itr->ifindex == ifindex && 1788 (!prio || itr->app.priority == prio)) 1789 return itr; 1790 } 1791 1792 return NULL; 1793 } 1794 1795 static int dcb_app_add(const struct dcb_app *app, int ifindex) 1796 { 1797 struct dcb_app_type *entry; 1798 1799 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 1800 if (!entry) 1801 return -ENOMEM; 1802 1803 memcpy(&entry->app, app, sizeof(*app)); 1804 entry->ifindex = ifindex; 1805 list_add(&entry->list, &dcb_app_list); 1806 1807 return 0; 1808 } 1809 1810 /** 1811 * dcb_getapp - retrieve the DCBX application user priority 1812 * 1813 * On success returns a non-zero 802.1p user priority bitmap 1814 * otherwise returns 0 as the invalid user priority bitmap to 1815 * indicate an error. 1816 */ 1817 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) 1818 { 1819 struct dcb_app_type *itr; 1820 u8 prio = 0; 1821 1822 spin_lock_bh(&dcb_lock); 1823 if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) 1824 prio = itr->app.priority; 1825 spin_unlock_bh(&dcb_lock); 1826 1827 return prio; 1828 } 1829 EXPORT_SYMBOL(dcb_getapp); 1830 1831 /** 1832 * dcb_setapp - add CEE dcb application data to app list 1833 * 1834 * Priority 0 is an invalid priority in CEE spec. This routine 1835 * removes applications from the app list if the priority is 1836 * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap 1837 */ 1838 int dcb_setapp(struct net_device *dev, struct dcb_app *new) 1839 { 1840 struct dcb_app_type *itr; 1841 struct dcb_app_type event; 1842 int err = 0; 1843 1844 event.ifindex = dev->ifindex; 1845 memcpy(&event.app, new, sizeof(event.app)); 1846 if (dev->dcbnl_ops->getdcbx) 1847 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1848 1849 spin_lock_bh(&dcb_lock); 1850 /* Search for existing match and replace */ 1851 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { 1852 if (new->priority) 1853 itr->app.priority = new->priority; 1854 else { 1855 list_del(&itr->list); 1856 kfree(itr); 1857 } 1858 goto out; 1859 } 1860 /* App type does not exist add new application type */ 1861 if (new->priority) 1862 err = dcb_app_add(new, dev->ifindex); 1863 out: 1864 spin_unlock_bh(&dcb_lock); 1865 if (!err) 1866 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1867 return err; 1868 } 1869 EXPORT_SYMBOL(dcb_setapp); 1870 1871 /** 1872 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority 1873 * 1874 * Helper routine which on success returns a non-zero 802.1Qaz user 1875 * priority bitmap otherwise returns 0 to indicate the dcb_app was 1876 * not found in APP list. 1877 */ 1878 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) 1879 { 1880 struct dcb_app_type *itr; 1881 u8 prio = 0; 1882 1883 spin_lock_bh(&dcb_lock); 1884 if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) 1885 prio |= 1 << itr->app.priority; 1886 spin_unlock_bh(&dcb_lock); 1887 1888 return prio; 1889 } 1890 EXPORT_SYMBOL(dcb_ieee_getapp_mask); 1891 1892 /** 1893 * dcb_ieee_setapp - add IEEE dcb application data to app list 1894 * 1895 * This adds Application data to the list. Multiple application 1896 * entries may exists for the same selector and protocol as long 1897 * as the priorities are different. Priority is expected to be a 1898 * 3-bit unsigned integer 1899 */ 1900 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) 1901 { 1902 struct dcb_app_type event; 1903 int err = 0; 1904 1905 event.ifindex = dev->ifindex; 1906 memcpy(&event.app, new, sizeof(event.app)); 1907 if (dev->dcbnl_ops->getdcbx) 1908 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1909 1910 spin_lock_bh(&dcb_lock); 1911 /* Search for existing match and abort if found */ 1912 if (dcb_app_lookup(new, dev->ifindex, new->priority)) { 1913 err = -EEXIST; 1914 goto out; 1915 } 1916 1917 err = dcb_app_add(new, dev->ifindex); 1918 out: 1919 spin_unlock_bh(&dcb_lock); 1920 if (!err) 1921 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1922 return err; 1923 } 1924 EXPORT_SYMBOL(dcb_ieee_setapp); 1925 1926 /** 1927 * dcb_ieee_delapp - delete IEEE dcb application data from list 1928 * 1929 * This removes a matching APP data from the APP list 1930 */ 1931 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) 1932 { 1933 struct dcb_app_type *itr; 1934 struct dcb_app_type event; 1935 int err = -ENOENT; 1936 1937 event.ifindex = dev->ifindex; 1938 memcpy(&event.app, del, sizeof(event.app)); 1939 if (dev->dcbnl_ops->getdcbx) 1940 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1941 1942 spin_lock_bh(&dcb_lock); 1943 /* Search for existing match and remove it. */ 1944 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { 1945 list_del(&itr->list); 1946 kfree(itr); 1947 err = 0; 1948 } 1949 1950 spin_unlock_bh(&dcb_lock); 1951 if (!err) 1952 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1953 return err; 1954 } 1955 EXPORT_SYMBOL(dcb_ieee_delapp); 1956 1957 static int __init dcbnl_init(void) 1958 { 1959 INIT_LIST_HEAD(&dcb_app_list); 1960 1961 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0); 1962 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0); 1963 1964 return 0; 1965 } 1966 device_initcall(dcbnl_init); 1967