1 /* 2 * Copyright (c) 2008-2011, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, see <http://www.gnu.org/licenses/>. 15 * 16 * Description: Data Center Bridging netlink interface 17 * Author: Lucy Liu <lucy.liu@intel.com> 18 */ 19 20 #include <linux/netdevice.h> 21 #include <linux/netlink.h> 22 #include <linux/slab.h> 23 #include <net/netlink.h> 24 #include <net/rtnetlink.h> 25 #include <linux/dcbnl.h> 26 #include <net/dcbevent.h> 27 #include <linux/rtnetlink.h> 28 #include <linux/init.h> 29 #include <net/sock.h> 30 31 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements 32 * intended to allow network traffic with differing requirements 33 * (highly reliable, no drops vs. best effort vs. low latency) to operate 34 * and co-exist on Ethernet. Current DCB features are: 35 * 36 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a 37 * framework for assigning bandwidth guarantees to traffic classes. 38 * 39 * Priority-based Flow Control (PFC) - provides a flow control mechanism which 40 * can work independently for each 802.1p priority. 41 * 42 * Congestion Notification - provides a mechanism for end-to-end congestion 43 * control for protocols which do not have built-in congestion management. 44 * 45 * More information about the emerging standards for these Ethernet features 46 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html 47 * 48 * This file implements an rtnetlink interface to allow configuration of DCB 49 * features for capable devices. 50 */ 51 52 /**************** DCB attribute policies *************************************/ 53 54 /* DCB netlink attributes policy */ 55 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { 56 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, 57 [DCB_ATTR_STATE] = {.type = NLA_U8}, 58 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, 59 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, 60 [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, 61 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, 62 [DCB_ATTR_CAP] = {.type = NLA_NESTED}, 63 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, 64 [DCB_ATTR_BCN] = {.type = NLA_NESTED}, 65 [DCB_ATTR_APP] = {.type = NLA_NESTED}, 66 [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, 67 [DCB_ATTR_DCBX] = {.type = NLA_U8}, 68 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, 69 }; 70 71 /* DCB priority flow control to User Priority nested attributes */ 72 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { 73 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, 74 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, 75 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, 76 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, 77 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, 78 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, 79 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, 80 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, 81 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, 82 }; 83 84 /* DCB priority grouping nested attributes */ 85 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { 86 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, 87 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, 88 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, 89 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, 90 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, 91 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, 92 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, 93 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, 94 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, 95 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, 96 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, 97 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, 98 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, 99 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, 100 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, 101 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, 102 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, 103 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, 104 }; 105 106 /* DCB traffic class nested attributes. */ 107 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { 108 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, 109 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, 110 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, 111 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, 112 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, 113 }; 114 115 /* DCB capabilities nested attributes. */ 116 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { 117 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, 118 [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, 119 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, 120 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, 121 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, 122 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, 123 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, 124 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, 125 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, 126 }; 127 128 /* DCB capabilities nested attributes. */ 129 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { 130 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, 131 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, 132 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, 133 }; 134 135 /* DCB BCN nested attributes. */ 136 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { 137 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, 138 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, 139 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, 140 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, 141 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, 142 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, 143 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, 144 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, 145 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, 146 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, 147 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, 148 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, 149 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, 150 [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, 151 [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, 152 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, 153 [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, 154 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, 155 [DCB_BCN_ATTR_W] = {.type = NLA_U32}, 156 [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, 157 [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, 158 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, 159 [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, 160 [DCB_BCN_ATTR_C] = {.type = NLA_U32}, 161 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, 162 }; 163 164 /* DCB APP nested attributes. */ 165 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { 166 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, 167 [DCB_APP_ATTR_ID] = {.type = NLA_U16}, 168 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, 169 }; 170 171 /* IEEE 802.1Qaz nested attributes. */ 172 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { 173 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, 174 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, 175 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, 176 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, 177 [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)}, 178 [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)}, 179 [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)}, 180 }; 181 182 /* DCB number of traffic classes nested attributes. */ 183 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { 184 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, 185 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, 186 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, 187 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, 188 }; 189 190 static LIST_HEAD(dcb_app_list); 191 static DEFINE_SPINLOCK(dcb_lock); 192 193 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, 194 u32 flags, struct nlmsghdr **nlhp) 195 { 196 struct sk_buff *skb; 197 struct dcbmsg *dcb; 198 struct nlmsghdr *nlh; 199 200 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 201 if (!skb) 202 return NULL; 203 204 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); 205 BUG_ON(!nlh); 206 207 dcb = nlmsg_data(nlh); 208 dcb->dcb_family = AF_UNSPEC; 209 dcb->cmd = cmd; 210 dcb->dcb_pad = 0; 211 212 if (nlhp) 213 *nlhp = nlh; 214 215 return skb; 216 } 217 218 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, 219 u32 seq, struct nlattr **tb, struct sk_buff *skb) 220 { 221 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ 222 if (!netdev->dcbnl_ops->getstate) 223 return -EOPNOTSUPP; 224 225 return nla_put_u8(skb, DCB_ATTR_STATE, 226 netdev->dcbnl_ops->getstate(netdev)); 227 } 228 229 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, 230 u32 seq, struct nlattr **tb, struct sk_buff *skb) 231 { 232 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; 233 u8 value; 234 int ret; 235 int i; 236 int getall = 0; 237 238 if (!tb[DCB_ATTR_PFC_CFG]) 239 return -EINVAL; 240 241 if (!netdev->dcbnl_ops->getpfccfg) 242 return -EOPNOTSUPP; 243 244 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 245 tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); 246 if (ret) 247 return ret; 248 249 nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG); 250 if (!nest) 251 return -EMSGSIZE; 252 253 if (data[DCB_PFC_UP_ATTR_ALL]) 254 getall = 1; 255 256 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 257 if (!getall && !data[i]) 258 continue; 259 260 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, 261 &value); 262 ret = nla_put_u8(skb, i, value); 263 if (ret) { 264 nla_nest_cancel(skb, nest); 265 return ret; 266 } 267 } 268 nla_nest_end(skb, nest); 269 270 return 0; 271 } 272 273 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, 274 u32 seq, struct nlattr **tb, struct sk_buff *skb) 275 { 276 u8 perm_addr[MAX_ADDR_LEN]; 277 278 if (!netdev->dcbnl_ops->getpermhwaddr) 279 return -EOPNOTSUPP; 280 281 memset(perm_addr, 0, sizeof(perm_addr)); 282 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); 283 284 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); 285 } 286 287 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, 288 u32 seq, struct nlattr **tb, struct sk_buff *skb) 289 { 290 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; 291 u8 value; 292 int ret; 293 int i; 294 int getall = 0; 295 296 if (!tb[DCB_ATTR_CAP]) 297 return -EINVAL; 298 299 if (!netdev->dcbnl_ops->getcap) 300 return -EOPNOTSUPP; 301 302 ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], 303 dcbnl_cap_nest, NULL); 304 if (ret) 305 return ret; 306 307 nest = nla_nest_start(skb, DCB_ATTR_CAP); 308 if (!nest) 309 return -EMSGSIZE; 310 311 if (data[DCB_CAP_ATTR_ALL]) 312 getall = 1; 313 314 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { 315 if (!getall && !data[i]) 316 continue; 317 318 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { 319 ret = nla_put_u8(skb, i, value); 320 if (ret) { 321 nla_nest_cancel(skb, nest); 322 return ret; 323 } 324 } 325 } 326 nla_nest_end(skb, nest); 327 328 return 0; 329 } 330 331 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, 332 u32 seq, struct nlattr **tb, struct sk_buff *skb) 333 { 334 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; 335 u8 value; 336 int ret; 337 int i; 338 int getall = 0; 339 340 if (!tb[DCB_ATTR_NUMTCS]) 341 return -EINVAL; 342 343 if (!netdev->dcbnl_ops->getnumtcs) 344 return -EOPNOTSUPP; 345 346 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 347 dcbnl_numtcs_nest, NULL); 348 if (ret) 349 return ret; 350 351 nest = nla_nest_start(skb, DCB_ATTR_NUMTCS); 352 if (!nest) 353 return -EMSGSIZE; 354 355 if (data[DCB_NUMTCS_ATTR_ALL]) 356 getall = 1; 357 358 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 359 if (!getall && !data[i]) 360 continue; 361 362 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); 363 if (!ret) { 364 ret = nla_put_u8(skb, i, value); 365 if (ret) { 366 nla_nest_cancel(skb, nest); 367 return ret; 368 } 369 } else 370 return -EINVAL; 371 } 372 nla_nest_end(skb, nest); 373 374 return 0; 375 } 376 377 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, 378 u32 seq, struct nlattr **tb, struct sk_buff *skb) 379 { 380 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; 381 int ret; 382 u8 value; 383 int i; 384 385 if (!tb[DCB_ATTR_NUMTCS]) 386 return -EINVAL; 387 388 if (!netdev->dcbnl_ops->setnumtcs) 389 return -EOPNOTSUPP; 390 391 ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], 392 dcbnl_numtcs_nest, NULL); 393 if (ret) 394 return ret; 395 396 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { 397 if (data[i] == NULL) 398 continue; 399 400 value = nla_get_u8(data[i]); 401 402 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); 403 if (ret) 404 break; 405 } 406 407 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); 408 } 409 410 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, 411 u32 seq, struct nlattr **tb, struct sk_buff *skb) 412 { 413 if (!netdev->dcbnl_ops->getpfcstate) 414 return -EOPNOTSUPP; 415 416 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 417 netdev->dcbnl_ops->getpfcstate(netdev)); 418 } 419 420 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, 421 u32 seq, struct nlattr **tb, struct sk_buff *skb) 422 { 423 u8 value; 424 425 if (!tb[DCB_ATTR_PFC_STATE]) 426 return -EINVAL; 427 428 if (!netdev->dcbnl_ops->setpfcstate) 429 return -EOPNOTSUPP; 430 431 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); 432 433 netdev->dcbnl_ops->setpfcstate(netdev, value); 434 435 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); 436 } 437 438 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, 439 u32 seq, struct nlattr **tb, struct sk_buff *skb) 440 { 441 struct nlattr *app_nest; 442 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 443 u16 id; 444 u8 up, idtype; 445 int ret; 446 447 if (!tb[DCB_ATTR_APP]) 448 return -EINVAL; 449 450 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 451 dcbnl_app_nest, NULL); 452 if (ret) 453 return ret; 454 455 /* all must be non-null */ 456 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 457 (!app_tb[DCB_APP_ATTR_ID])) 458 return -EINVAL; 459 460 /* either by eth type or by socket number */ 461 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 462 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 463 (idtype != DCB_APP_IDTYPE_PORTNUM)) 464 return -EINVAL; 465 466 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 467 468 if (netdev->dcbnl_ops->getapp) { 469 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id); 470 if (ret < 0) 471 return ret; 472 else 473 up = ret; 474 } else { 475 struct dcb_app app = { 476 .selector = idtype, 477 .protocol = id, 478 }; 479 up = dcb_getapp(netdev, &app); 480 } 481 482 app_nest = nla_nest_start(skb, DCB_ATTR_APP); 483 if (!app_nest) 484 return -EMSGSIZE; 485 486 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); 487 if (ret) 488 goto out_cancel; 489 490 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); 491 if (ret) 492 goto out_cancel; 493 494 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); 495 if (ret) 496 goto out_cancel; 497 498 nla_nest_end(skb, app_nest); 499 500 return 0; 501 502 out_cancel: 503 nla_nest_cancel(skb, app_nest); 504 return ret; 505 } 506 507 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, 508 u32 seq, struct nlattr **tb, struct sk_buff *skb) 509 { 510 int ret; 511 u16 id; 512 u8 up, idtype; 513 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; 514 515 if (!tb[DCB_ATTR_APP]) 516 return -EINVAL; 517 518 ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], 519 dcbnl_app_nest, NULL); 520 if (ret) 521 return ret; 522 523 /* all must be non-null */ 524 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || 525 (!app_tb[DCB_APP_ATTR_ID]) || 526 (!app_tb[DCB_APP_ATTR_PRIORITY])) 527 return -EINVAL; 528 529 /* either by eth type or by socket number */ 530 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); 531 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && 532 (idtype != DCB_APP_IDTYPE_PORTNUM)) 533 return -EINVAL; 534 535 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); 536 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); 537 538 if (netdev->dcbnl_ops->setapp) { 539 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); 540 if (ret < 0) 541 return ret; 542 } else { 543 struct dcb_app app; 544 app.selector = idtype; 545 app.protocol = id; 546 app.priority = up; 547 ret = dcb_setapp(netdev, &app); 548 } 549 550 ret = nla_put_u8(skb, DCB_ATTR_APP, ret); 551 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); 552 553 return ret; 554 } 555 556 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 557 struct nlattr **tb, struct sk_buff *skb, int dir) 558 { 559 struct nlattr *pg_nest, *param_nest, *data; 560 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 561 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 562 u8 prio, pgid, tc_pct, up_map; 563 int ret; 564 int getall = 0; 565 int i; 566 567 if (!tb[DCB_ATTR_PG_CFG]) 568 return -EINVAL; 569 570 if (!netdev->dcbnl_ops->getpgtccfgtx || 571 !netdev->dcbnl_ops->getpgtccfgrx || 572 !netdev->dcbnl_ops->getpgbwgcfgtx || 573 !netdev->dcbnl_ops->getpgbwgcfgrx) 574 return -EOPNOTSUPP; 575 576 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], 577 dcbnl_pg_nest, NULL); 578 if (ret) 579 return ret; 580 581 pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG); 582 if (!pg_nest) 583 return -EMSGSIZE; 584 585 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 586 getall = 1; 587 588 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 589 if (!getall && !pg_tb[i]) 590 continue; 591 592 if (pg_tb[DCB_PG_ATTR_TC_ALL]) 593 data = pg_tb[DCB_PG_ATTR_TC_ALL]; 594 else 595 data = pg_tb[i]; 596 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data, 597 dcbnl_tc_param_nest, NULL); 598 if (ret) 599 goto err_pg; 600 601 param_nest = nla_nest_start(skb, i); 602 if (!param_nest) 603 goto err_pg; 604 605 pgid = DCB_ATTR_VALUE_UNDEFINED; 606 prio = DCB_ATTR_VALUE_UNDEFINED; 607 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 608 up_map = DCB_ATTR_VALUE_UNDEFINED; 609 610 if (dir) { 611 /* Rx */ 612 netdev->dcbnl_ops->getpgtccfgrx(netdev, 613 i - DCB_PG_ATTR_TC_0, &prio, 614 &pgid, &tc_pct, &up_map); 615 } else { 616 /* Tx */ 617 netdev->dcbnl_ops->getpgtccfgtx(netdev, 618 i - DCB_PG_ATTR_TC_0, &prio, 619 &pgid, &tc_pct, &up_map); 620 } 621 622 if (param_tb[DCB_TC_ATTR_PARAM_PGID] || 623 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 624 ret = nla_put_u8(skb, 625 DCB_TC_ATTR_PARAM_PGID, pgid); 626 if (ret) 627 goto err_param; 628 } 629 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || 630 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 631 ret = nla_put_u8(skb, 632 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 633 if (ret) 634 goto err_param; 635 } 636 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || 637 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 638 ret = nla_put_u8(skb, 639 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 640 if (ret) 641 goto err_param; 642 } 643 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || 644 param_tb[DCB_TC_ATTR_PARAM_ALL]) { 645 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, 646 tc_pct); 647 if (ret) 648 goto err_param; 649 } 650 nla_nest_end(skb, param_nest); 651 } 652 653 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) 654 getall = 1; 655 else 656 getall = 0; 657 658 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 659 if (!getall && !pg_tb[i]) 660 continue; 661 662 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 663 664 if (dir) { 665 /* Rx */ 666 netdev->dcbnl_ops->getpgbwgcfgrx(netdev, 667 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 668 } else { 669 /* Tx */ 670 netdev->dcbnl_ops->getpgbwgcfgtx(netdev, 671 i - DCB_PG_ATTR_BW_ID_0, &tc_pct); 672 } 673 ret = nla_put_u8(skb, i, tc_pct); 674 if (ret) 675 goto err_pg; 676 } 677 678 nla_nest_end(skb, pg_nest); 679 680 return 0; 681 682 err_param: 683 nla_nest_cancel(skb, param_nest); 684 err_pg: 685 nla_nest_cancel(skb, pg_nest); 686 687 return -EMSGSIZE; 688 } 689 690 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 691 u32 seq, struct nlattr **tb, struct sk_buff *skb) 692 { 693 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); 694 } 695 696 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 697 u32 seq, struct nlattr **tb, struct sk_buff *skb) 698 { 699 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); 700 } 701 702 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, 703 u32 seq, struct nlattr **tb, struct sk_buff *skb) 704 { 705 u8 value; 706 707 if (!tb[DCB_ATTR_STATE]) 708 return -EINVAL; 709 710 if (!netdev->dcbnl_ops->setstate) 711 return -EOPNOTSUPP; 712 713 value = nla_get_u8(tb[DCB_ATTR_STATE]); 714 715 return nla_put_u8(skb, DCB_ATTR_STATE, 716 netdev->dcbnl_ops->setstate(netdev, value)); 717 } 718 719 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, 720 u32 seq, struct nlattr **tb, struct sk_buff *skb) 721 { 722 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; 723 int i; 724 int ret; 725 u8 value; 726 727 if (!tb[DCB_ATTR_PFC_CFG]) 728 return -EINVAL; 729 730 if (!netdev->dcbnl_ops->setpfccfg) 731 return -EOPNOTSUPP; 732 733 ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX, 734 tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); 735 if (ret) 736 return ret; 737 738 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 739 if (data[i] == NULL) 740 continue; 741 value = nla_get_u8(data[i]); 742 netdev->dcbnl_ops->setpfccfg(netdev, 743 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); 744 } 745 746 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); 747 } 748 749 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, 750 u32 seq, struct nlattr **tb, struct sk_buff *skb) 751 { 752 int ret; 753 754 if (!tb[DCB_ATTR_SET_ALL]) 755 return -EINVAL; 756 757 if (!netdev->dcbnl_ops->setall) 758 return -EOPNOTSUPP; 759 760 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, 761 netdev->dcbnl_ops->setall(netdev)); 762 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); 763 764 return ret; 765 } 766 767 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 768 u32 seq, struct nlattr **tb, struct sk_buff *skb, 769 int dir) 770 { 771 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; 772 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; 773 int ret; 774 int i; 775 u8 pgid; 776 u8 up_map; 777 u8 prio; 778 u8 tc_pct; 779 780 if (!tb[DCB_ATTR_PG_CFG]) 781 return -EINVAL; 782 783 if (!netdev->dcbnl_ops->setpgtccfgtx || 784 !netdev->dcbnl_ops->setpgtccfgrx || 785 !netdev->dcbnl_ops->setpgbwgcfgtx || 786 !netdev->dcbnl_ops->setpgbwgcfgrx) 787 return -EOPNOTSUPP; 788 789 ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], 790 dcbnl_pg_nest, NULL); 791 if (ret) 792 return ret; 793 794 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 795 if (!pg_tb[i]) 796 continue; 797 798 ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, 799 pg_tb[i], dcbnl_tc_param_nest, NULL); 800 if (ret) 801 return ret; 802 803 pgid = DCB_ATTR_VALUE_UNDEFINED; 804 prio = DCB_ATTR_VALUE_UNDEFINED; 805 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 806 up_map = DCB_ATTR_VALUE_UNDEFINED; 807 808 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) 809 prio = 810 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); 811 812 if (param_tb[DCB_TC_ATTR_PARAM_PGID]) 813 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); 814 815 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) 816 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); 817 818 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) 819 up_map = 820 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); 821 822 /* dir: Tx = 0, Rx = 1 */ 823 if (dir) { 824 /* Rx */ 825 netdev->dcbnl_ops->setpgtccfgrx(netdev, 826 i - DCB_PG_ATTR_TC_0, 827 prio, pgid, tc_pct, up_map); 828 } else { 829 /* Tx */ 830 netdev->dcbnl_ops->setpgtccfgtx(netdev, 831 i - DCB_PG_ATTR_TC_0, 832 prio, pgid, tc_pct, up_map); 833 } 834 } 835 836 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 837 if (!pg_tb[i]) 838 continue; 839 840 tc_pct = nla_get_u8(pg_tb[i]); 841 842 /* dir: Tx = 0, Rx = 1 */ 843 if (dir) { 844 /* Rx */ 845 netdev->dcbnl_ops->setpgbwgcfgrx(netdev, 846 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 847 } else { 848 /* Tx */ 849 netdev->dcbnl_ops->setpgbwgcfgtx(netdev, 850 i - DCB_PG_ATTR_BW_ID_0, tc_pct); 851 } 852 } 853 854 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); 855 } 856 857 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 858 u32 seq, struct nlattr **tb, struct sk_buff *skb) 859 { 860 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); 861 } 862 863 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 864 u32 seq, struct nlattr **tb, struct sk_buff *skb) 865 { 866 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); 867 } 868 869 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, 870 u32 seq, struct nlattr **tb, struct sk_buff *skb) 871 { 872 struct nlattr *bcn_nest; 873 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; 874 u8 value_byte; 875 u32 value_integer; 876 int ret; 877 bool getall = false; 878 int i; 879 880 if (!tb[DCB_ATTR_BCN]) 881 return -EINVAL; 882 883 if (!netdev->dcbnl_ops->getbcnrp || 884 !netdev->dcbnl_ops->getbcncfg) 885 return -EOPNOTSUPP; 886 887 ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], 888 dcbnl_bcn_nest, NULL); 889 if (ret) 890 return ret; 891 892 bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN); 893 if (!bcn_nest) 894 return -EMSGSIZE; 895 896 if (bcn_tb[DCB_BCN_ATTR_ALL]) 897 getall = true; 898 899 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 900 if (!getall && !bcn_tb[i]) 901 continue; 902 903 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, 904 &value_byte); 905 ret = nla_put_u8(skb, i, value_byte); 906 if (ret) 907 goto err_bcn; 908 } 909 910 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 911 if (!getall && !bcn_tb[i]) 912 continue; 913 914 netdev->dcbnl_ops->getbcncfg(netdev, i, 915 &value_integer); 916 ret = nla_put_u32(skb, i, value_integer); 917 if (ret) 918 goto err_bcn; 919 } 920 921 nla_nest_end(skb, bcn_nest); 922 923 return 0; 924 925 err_bcn: 926 nla_nest_cancel(skb, bcn_nest); 927 return ret; 928 } 929 930 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, 931 u32 seq, struct nlattr **tb, struct sk_buff *skb) 932 { 933 struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; 934 int i; 935 int ret; 936 u8 value_byte; 937 u32 value_int; 938 939 if (!tb[DCB_ATTR_BCN]) 940 return -EINVAL; 941 942 if (!netdev->dcbnl_ops->setbcncfg || 943 !netdev->dcbnl_ops->setbcnrp) 944 return -EOPNOTSUPP; 945 946 ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], 947 dcbnl_pfc_up_nest, NULL); 948 if (ret) 949 return ret; 950 951 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { 952 if (data[i] == NULL) 953 continue; 954 value_byte = nla_get_u8(data[i]); 955 netdev->dcbnl_ops->setbcnrp(netdev, 956 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); 957 } 958 959 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { 960 if (data[i] == NULL) 961 continue; 962 value_int = nla_get_u32(data[i]); 963 netdev->dcbnl_ops->setbcncfg(netdev, 964 i, value_int); 965 } 966 967 return nla_put_u8(skb, DCB_ATTR_BCN, 0); 968 } 969 970 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 971 int app_nested_type, int app_info_type, 972 int app_entry_type) 973 { 974 struct dcb_peer_app_info info; 975 struct dcb_app *table = NULL; 976 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 977 u16 app_count; 978 int err; 979 980 981 /** 982 * retrieve the peer app configuration form the driver. If the driver 983 * handlers fail exit without doing anything 984 */ 985 err = ops->peer_getappinfo(netdev, &info, &app_count); 986 if (!err && app_count) { 987 table = kmalloc_array(app_count, sizeof(struct dcb_app), 988 GFP_KERNEL); 989 if (!table) 990 return -ENOMEM; 991 992 err = ops->peer_getapptable(netdev, table); 993 } 994 995 if (!err) { 996 u16 i; 997 struct nlattr *app; 998 999 /** 1000 * build the message, from here on the only possible failure 1001 * is due to the skb size 1002 */ 1003 err = -EMSGSIZE; 1004 1005 app = nla_nest_start(skb, app_nested_type); 1006 if (!app) 1007 goto nla_put_failure; 1008 1009 if (app_info_type && 1010 nla_put(skb, app_info_type, sizeof(info), &info)) 1011 goto nla_put_failure; 1012 1013 for (i = 0; i < app_count; i++) { 1014 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), 1015 &table[i])) 1016 goto nla_put_failure; 1017 } 1018 nla_nest_end(skb, app); 1019 } 1020 err = 0; 1021 1022 nla_put_failure: 1023 kfree(table); 1024 return err; 1025 } 1026 1027 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */ 1028 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) 1029 { 1030 struct nlattr *ieee, *app; 1031 struct dcb_app_type *itr; 1032 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1033 int dcbx; 1034 int err; 1035 1036 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1037 return -EMSGSIZE; 1038 1039 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1040 if (!ieee) 1041 return -EMSGSIZE; 1042 1043 if (ops->ieee_getets) { 1044 struct ieee_ets ets; 1045 memset(&ets, 0, sizeof(ets)); 1046 err = ops->ieee_getets(netdev, &ets); 1047 if (!err && 1048 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) 1049 return -EMSGSIZE; 1050 } 1051 1052 if (ops->ieee_getmaxrate) { 1053 struct ieee_maxrate maxrate; 1054 memset(&maxrate, 0, sizeof(maxrate)); 1055 err = ops->ieee_getmaxrate(netdev, &maxrate); 1056 if (!err) { 1057 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, 1058 sizeof(maxrate), &maxrate); 1059 if (err) 1060 return -EMSGSIZE; 1061 } 1062 } 1063 1064 if (ops->ieee_getqcn) { 1065 struct ieee_qcn qcn; 1066 1067 memset(&qcn, 0, sizeof(qcn)); 1068 err = ops->ieee_getqcn(netdev, &qcn); 1069 if (!err) { 1070 err = nla_put(skb, DCB_ATTR_IEEE_QCN, 1071 sizeof(qcn), &qcn); 1072 if (err) 1073 return -EMSGSIZE; 1074 } 1075 } 1076 1077 if (ops->ieee_getqcnstats) { 1078 struct ieee_qcn_stats qcn_stats; 1079 1080 memset(&qcn_stats, 0, sizeof(qcn_stats)); 1081 err = ops->ieee_getqcnstats(netdev, &qcn_stats); 1082 if (!err) { 1083 err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS, 1084 sizeof(qcn_stats), &qcn_stats); 1085 if (err) 1086 return -EMSGSIZE; 1087 } 1088 } 1089 1090 if (ops->ieee_getpfc) { 1091 struct ieee_pfc pfc; 1092 memset(&pfc, 0, sizeof(pfc)); 1093 err = ops->ieee_getpfc(netdev, &pfc); 1094 if (!err && 1095 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) 1096 return -EMSGSIZE; 1097 } 1098 1099 if (ops->dcbnl_getbuffer) { 1100 struct dcbnl_buffer buffer; 1101 1102 memset(&buffer, 0, sizeof(buffer)); 1103 err = ops->dcbnl_getbuffer(netdev, &buffer); 1104 if (!err && 1105 nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer)) 1106 return -EMSGSIZE; 1107 } 1108 1109 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1110 if (!app) 1111 return -EMSGSIZE; 1112 1113 spin_lock_bh(&dcb_lock); 1114 list_for_each_entry(itr, &dcb_app_list, list) { 1115 if (itr->ifindex == netdev->ifindex) { 1116 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1117 &itr->app); 1118 if (err) { 1119 spin_unlock_bh(&dcb_lock); 1120 return -EMSGSIZE; 1121 } 1122 } 1123 } 1124 1125 if (netdev->dcbnl_ops->getdcbx) 1126 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1127 else 1128 dcbx = -EOPNOTSUPP; 1129 1130 spin_unlock_bh(&dcb_lock); 1131 nla_nest_end(skb, app); 1132 1133 /* get peer info if available */ 1134 if (ops->ieee_peer_getets) { 1135 struct ieee_ets ets; 1136 memset(&ets, 0, sizeof(ets)); 1137 err = ops->ieee_peer_getets(netdev, &ets); 1138 if (!err && 1139 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) 1140 return -EMSGSIZE; 1141 } 1142 1143 if (ops->ieee_peer_getpfc) { 1144 struct ieee_pfc pfc; 1145 memset(&pfc, 0, sizeof(pfc)); 1146 err = ops->ieee_peer_getpfc(netdev, &pfc); 1147 if (!err && 1148 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) 1149 return -EMSGSIZE; 1150 } 1151 1152 if (ops->peer_getappinfo && ops->peer_getapptable) { 1153 err = dcbnl_build_peer_app(netdev, skb, 1154 DCB_ATTR_IEEE_PEER_APP, 1155 DCB_ATTR_IEEE_APP_UNSPEC, 1156 DCB_ATTR_IEEE_APP); 1157 if (err) 1158 return -EMSGSIZE; 1159 } 1160 1161 nla_nest_end(skb, ieee); 1162 if (dcbx >= 0) { 1163 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1164 if (err) 1165 return -EMSGSIZE; 1166 } 1167 1168 return 0; 1169 } 1170 1171 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, 1172 int dir) 1173 { 1174 u8 pgid, up_map, prio, tc_pct; 1175 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1176 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; 1177 struct nlattr *pg = nla_nest_start(skb, i); 1178 1179 if (!pg) 1180 return -EMSGSIZE; 1181 1182 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { 1183 struct nlattr *tc_nest = nla_nest_start(skb, i); 1184 1185 if (!tc_nest) 1186 return -EMSGSIZE; 1187 1188 pgid = DCB_ATTR_VALUE_UNDEFINED; 1189 prio = DCB_ATTR_VALUE_UNDEFINED; 1190 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 1191 up_map = DCB_ATTR_VALUE_UNDEFINED; 1192 1193 if (!dir) 1194 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, 1195 &prio, &pgid, &tc_pct, &up_map); 1196 else 1197 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, 1198 &prio, &pgid, &tc_pct, &up_map); 1199 1200 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || 1201 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || 1202 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || 1203 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) 1204 return -EMSGSIZE; 1205 nla_nest_end(skb, tc_nest); 1206 } 1207 1208 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { 1209 tc_pct = DCB_ATTR_VALUE_UNDEFINED; 1210 1211 if (!dir) 1212 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, 1213 &tc_pct); 1214 else 1215 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1216 &tc_pct); 1217 if (nla_put_u8(skb, i, tc_pct)) 1218 return -EMSGSIZE; 1219 } 1220 nla_nest_end(skb, pg); 1221 return 0; 1222 } 1223 1224 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) 1225 { 1226 struct nlattr *cee, *app; 1227 struct dcb_app_type *itr; 1228 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1229 int dcbx, i, err = -EMSGSIZE; 1230 u8 value; 1231 1232 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) 1233 goto nla_put_failure; 1234 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1235 if (!cee) 1236 goto nla_put_failure; 1237 1238 /* local pg */ 1239 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { 1240 err = dcbnl_cee_pg_fill(skb, netdev, 1); 1241 if (err) 1242 goto nla_put_failure; 1243 } 1244 1245 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { 1246 err = dcbnl_cee_pg_fill(skb, netdev, 0); 1247 if (err) 1248 goto nla_put_failure; 1249 } 1250 1251 /* local pfc */ 1252 if (ops->getpfccfg) { 1253 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC); 1254 1255 if (!pfc_nest) 1256 goto nla_put_failure; 1257 1258 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 1259 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); 1260 if (nla_put_u8(skb, i, value)) 1261 goto nla_put_failure; 1262 } 1263 nla_nest_end(skb, pfc_nest); 1264 } 1265 1266 /* local app */ 1267 spin_lock_bh(&dcb_lock); 1268 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); 1269 if (!app) 1270 goto dcb_unlock; 1271 1272 list_for_each_entry(itr, &dcb_app_list, list) { 1273 if (itr->ifindex == netdev->ifindex) { 1274 struct nlattr *app_nest = nla_nest_start(skb, 1275 DCB_ATTR_APP); 1276 if (!app_nest) 1277 goto dcb_unlock; 1278 1279 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, 1280 itr->app.selector); 1281 if (err) 1282 goto dcb_unlock; 1283 1284 err = nla_put_u16(skb, DCB_APP_ATTR_ID, 1285 itr->app.protocol); 1286 if (err) 1287 goto dcb_unlock; 1288 1289 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, 1290 itr->app.priority); 1291 if (err) 1292 goto dcb_unlock; 1293 1294 nla_nest_end(skb, app_nest); 1295 } 1296 } 1297 nla_nest_end(skb, app); 1298 1299 if (netdev->dcbnl_ops->getdcbx) 1300 dcbx = netdev->dcbnl_ops->getdcbx(netdev); 1301 else 1302 dcbx = -EOPNOTSUPP; 1303 1304 spin_unlock_bh(&dcb_lock); 1305 1306 /* features flags */ 1307 if (ops->getfeatcfg) { 1308 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT); 1309 if (!feat) 1310 goto nla_put_failure; 1311 1312 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; 1313 i++) 1314 if (!ops->getfeatcfg(netdev, i, &value) && 1315 nla_put_u8(skb, i, value)) 1316 goto nla_put_failure; 1317 1318 nla_nest_end(skb, feat); 1319 } 1320 1321 /* peer info if available */ 1322 if (ops->cee_peer_getpg) { 1323 struct cee_pg pg; 1324 memset(&pg, 0, sizeof(pg)); 1325 err = ops->cee_peer_getpg(netdev, &pg); 1326 if (!err && 1327 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) 1328 goto nla_put_failure; 1329 } 1330 1331 if (ops->cee_peer_getpfc) { 1332 struct cee_pfc pfc; 1333 memset(&pfc, 0, sizeof(pfc)); 1334 err = ops->cee_peer_getpfc(netdev, &pfc); 1335 if (!err && 1336 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) 1337 goto nla_put_failure; 1338 } 1339 1340 if (ops->peer_getappinfo && ops->peer_getapptable) { 1341 err = dcbnl_build_peer_app(netdev, skb, 1342 DCB_ATTR_CEE_PEER_APP_TABLE, 1343 DCB_ATTR_CEE_PEER_APP_INFO, 1344 DCB_ATTR_CEE_PEER_APP); 1345 if (err) 1346 goto nla_put_failure; 1347 } 1348 nla_nest_end(skb, cee); 1349 1350 /* DCBX state */ 1351 if (dcbx >= 0) { 1352 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); 1353 if (err) 1354 goto nla_put_failure; 1355 } 1356 return 0; 1357 1358 dcb_unlock: 1359 spin_unlock_bh(&dcb_lock); 1360 nla_put_failure: 1361 err = -EMSGSIZE; 1362 return err; 1363 } 1364 1365 static int dcbnl_notify(struct net_device *dev, int event, int cmd, 1366 u32 seq, u32 portid, int dcbx_ver) 1367 { 1368 struct net *net = dev_net(dev); 1369 struct sk_buff *skb; 1370 struct nlmsghdr *nlh; 1371 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; 1372 int err; 1373 1374 if (!ops) 1375 return -EOPNOTSUPP; 1376 1377 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); 1378 if (!skb) 1379 return -ENOBUFS; 1380 1381 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) 1382 err = dcbnl_ieee_fill(skb, dev); 1383 else 1384 err = dcbnl_cee_fill(skb, dev); 1385 1386 if (err < 0) { 1387 /* Report error to broadcast listeners */ 1388 nlmsg_free(skb); 1389 rtnl_set_sk_err(net, RTNLGRP_DCB, err); 1390 } else { 1391 /* End nlmsg and notify broadcast listeners */ 1392 nlmsg_end(skb, nlh); 1393 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); 1394 } 1395 1396 return err; 1397 } 1398 1399 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, 1400 u32 seq, u32 portid) 1401 { 1402 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); 1403 } 1404 EXPORT_SYMBOL(dcbnl_ieee_notify); 1405 1406 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, 1407 u32 seq, u32 portid) 1408 { 1409 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE); 1410 } 1411 EXPORT_SYMBOL(dcbnl_cee_notify); 1412 1413 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands. 1414 * If any requested operation can not be completed 1415 * the entire msg is aborted and error value is returned. 1416 * No attempt is made to reconcile the case where only part of the 1417 * cmd can be completed. 1418 */ 1419 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, 1420 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1421 { 1422 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1423 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1424 int err; 1425 1426 if (!ops) 1427 return -EOPNOTSUPP; 1428 1429 if (!tb[DCB_ATTR_IEEE]) 1430 return -EINVAL; 1431 1432 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], 1433 dcbnl_ieee_policy, NULL); 1434 if (err) 1435 return err; 1436 1437 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { 1438 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); 1439 err = ops->ieee_setets(netdev, ets); 1440 if (err) 1441 goto err; 1442 } 1443 1444 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { 1445 struct ieee_maxrate *maxrate = 1446 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); 1447 err = ops->ieee_setmaxrate(netdev, maxrate); 1448 if (err) 1449 goto err; 1450 } 1451 1452 if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) { 1453 struct ieee_qcn *qcn = 1454 nla_data(ieee[DCB_ATTR_IEEE_QCN]); 1455 1456 err = ops->ieee_setqcn(netdev, qcn); 1457 if (err) 1458 goto err; 1459 } 1460 1461 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1462 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1463 err = ops->ieee_setpfc(netdev, pfc); 1464 if (err) 1465 goto err; 1466 } 1467 1468 if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) { 1469 struct dcbnl_buffer *buffer = 1470 nla_data(ieee[DCB_ATTR_DCB_BUFFER]); 1471 1472 err = ops->dcbnl_setbuffer(netdev, buffer); 1473 if (err) 1474 goto err; 1475 } 1476 1477 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1478 struct nlattr *attr; 1479 int rem; 1480 1481 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1482 struct dcb_app *app_data; 1483 1484 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1485 continue; 1486 1487 if (nla_len(attr) < sizeof(struct dcb_app)) { 1488 err = -ERANGE; 1489 goto err; 1490 } 1491 1492 app_data = nla_data(attr); 1493 if (ops->ieee_setapp) 1494 err = ops->ieee_setapp(netdev, app_data); 1495 else 1496 err = dcb_ieee_setapp(netdev, app_data); 1497 if (err) 1498 goto err; 1499 } 1500 } 1501 1502 err: 1503 err = nla_put_u8(skb, DCB_ATTR_IEEE, err); 1504 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); 1505 return err; 1506 } 1507 1508 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, 1509 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1510 { 1511 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1512 1513 if (!ops) 1514 return -EOPNOTSUPP; 1515 1516 return dcbnl_ieee_fill(skb, netdev); 1517 } 1518 1519 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, 1520 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1521 { 1522 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1523 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; 1524 int err; 1525 1526 if (!ops) 1527 return -EOPNOTSUPP; 1528 1529 if (!tb[DCB_ATTR_IEEE]) 1530 return -EINVAL; 1531 1532 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], 1533 dcbnl_ieee_policy, NULL); 1534 if (err) 1535 return err; 1536 1537 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { 1538 struct nlattr *attr; 1539 int rem; 1540 1541 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) { 1542 struct dcb_app *app_data; 1543 1544 if (nla_type(attr) != DCB_ATTR_IEEE_APP) 1545 continue; 1546 app_data = nla_data(attr); 1547 if (ops->ieee_delapp) 1548 err = ops->ieee_delapp(netdev, app_data); 1549 else 1550 err = dcb_ieee_delapp(netdev, app_data); 1551 if (err) 1552 goto err; 1553 } 1554 } 1555 1556 err: 1557 err = nla_put_u8(skb, DCB_ATTR_IEEE, err); 1558 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); 1559 return err; 1560 } 1561 1562 1563 /* DCBX configuration */ 1564 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, 1565 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1566 { 1567 if (!netdev->dcbnl_ops->getdcbx) 1568 return -EOPNOTSUPP; 1569 1570 return nla_put_u8(skb, DCB_ATTR_DCBX, 1571 netdev->dcbnl_ops->getdcbx(netdev)); 1572 } 1573 1574 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, 1575 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1576 { 1577 u8 value; 1578 1579 if (!netdev->dcbnl_ops->setdcbx) 1580 return -EOPNOTSUPP; 1581 1582 if (!tb[DCB_ATTR_DCBX]) 1583 return -EINVAL; 1584 1585 value = nla_get_u8(tb[DCB_ATTR_DCBX]); 1586 1587 return nla_put_u8(skb, DCB_ATTR_DCBX, 1588 netdev->dcbnl_ops->setdcbx(netdev, value)); 1589 } 1590 1591 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, 1592 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1593 { 1594 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; 1595 u8 value; 1596 int ret, i; 1597 int getall = 0; 1598 1599 if (!netdev->dcbnl_ops->getfeatcfg) 1600 return -EOPNOTSUPP; 1601 1602 if (!tb[DCB_ATTR_FEATCFG]) 1603 return -EINVAL; 1604 1605 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, 1606 tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); 1607 if (ret) 1608 return ret; 1609 1610 nest = nla_nest_start(skb, DCB_ATTR_FEATCFG); 1611 if (!nest) 1612 return -EMSGSIZE; 1613 1614 if (data[DCB_FEATCFG_ATTR_ALL]) 1615 getall = 1; 1616 1617 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1618 if (!getall && !data[i]) 1619 continue; 1620 1621 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); 1622 if (!ret) 1623 ret = nla_put_u8(skb, i, value); 1624 1625 if (ret) { 1626 nla_nest_cancel(skb, nest); 1627 goto nla_put_failure; 1628 } 1629 } 1630 nla_nest_end(skb, nest); 1631 1632 nla_put_failure: 1633 return ret; 1634 } 1635 1636 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, 1637 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1638 { 1639 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; 1640 int ret, i; 1641 u8 value; 1642 1643 if (!netdev->dcbnl_ops->setfeatcfg) 1644 return -ENOTSUPP; 1645 1646 if (!tb[DCB_ATTR_FEATCFG]) 1647 return -EINVAL; 1648 1649 ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, 1650 tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); 1651 1652 if (ret) 1653 goto err; 1654 1655 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { 1656 if (data[i] == NULL) 1657 continue; 1658 1659 value = nla_get_u8(data[i]); 1660 1661 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); 1662 1663 if (ret) 1664 goto err; 1665 } 1666 err: 1667 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret); 1668 1669 return ret; 1670 } 1671 1672 /* Handle CEE DCBX GET commands. */ 1673 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, 1674 u32 seq, struct nlattr **tb, struct sk_buff *skb) 1675 { 1676 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1677 1678 if (!ops) 1679 return -EOPNOTSUPP; 1680 1681 return dcbnl_cee_fill(skb, netdev); 1682 } 1683 1684 struct reply_func { 1685 /* reply netlink message type */ 1686 int type; 1687 1688 /* function to fill message contents */ 1689 int (*cb)(struct net_device *, struct nlmsghdr *, u32, 1690 struct nlattr **, struct sk_buff *); 1691 }; 1692 1693 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { 1694 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate }, 1695 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate }, 1696 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg }, 1697 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg }, 1698 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr }, 1699 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap }, 1700 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs }, 1701 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs }, 1702 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate }, 1703 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate }, 1704 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp }, 1705 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp }, 1706 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg }, 1707 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg }, 1708 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg }, 1709 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg }, 1710 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall }, 1711 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg }, 1712 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg }, 1713 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get }, 1714 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set }, 1715 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del }, 1716 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx }, 1717 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx }, 1718 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg }, 1719 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg }, 1720 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, 1721 }; 1722 1723 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, 1724 struct netlink_ext_ack *extack) 1725 { 1726 struct net *net = sock_net(skb->sk); 1727 struct net_device *netdev; 1728 struct dcbmsg *dcb = nlmsg_data(nlh); 1729 struct nlattr *tb[DCB_ATTR_MAX + 1]; 1730 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1731 int ret = -EINVAL; 1732 struct sk_buff *reply_skb; 1733 struct nlmsghdr *reply_nlh = NULL; 1734 const struct reply_func *fn; 1735 1736 if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN)) 1737 return -EPERM; 1738 1739 ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, 1740 dcbnl_rtnl_policy, extack); 1741 if (ret < 0) 1742 return ret; 1743 1744 if (dcb->cmd > DCB_CMD_MAX) 1745 return -EINVAL; 1746 1747 /* check if a reply function has been defined for the command */ 1748 fn = &reply_funcs[dcb->cmd]; 1749 if (!fn->cb) 1750 return -EOPNOTSUPP; 1751 1752 if (!tb[DCB_ATTR_IFNAME]) 1753 return -EINVAL; 1754 1755 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME])); 1756 if (!netdev) 1757 return -ENODEV; 1758 1759 if (!netdev->dcbnl_ops) 1760 return -EOPNOTSUPP; 1761 1762 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq, 1763 nlh->nlmsg_flags, &reply_nlh); 1764 if (!reply_skb) 1765 return -ENOBUFS; 1766 1767 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); 1768 if (ret < 0) { 1769 nlmsg_free(reply_skb); 1770 goto out; 1771 } 1772 1773 nlmsg_end(reply_skb, reply_nlh); 1774 1775 ret = rtnl_unicast(reply_skb, net, portid); 1776 out: 1777 return ret; 1778 } 1779 1780 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, 1781 int ifindex, int prio) 1782 { 1783 struct dcb_app_type *itr; 1784 1785 list_for_each_entry(itr, &dcb_app_list, list) { 1786 if (itr->app.selector == app->selector && 1787 itr->app.protocol == app->protocol && 1788 itr->ifindex == ifindex && 1789 ((prio == -1) || itr->app.priority == prio)) 1790 return itr; 1791 } 1792 1793 return NULL; 1794 } 1795 1796 static int dcb_app_add(const struct dcb_app *app, int ifindex) 1797 { 1798 struct dcb_app_type *entry; 1799 1800 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 1801 if (!entry) 1802 return -ENOMEM; 1803 1804 memcpy(&entry->app, app, sizeof(*app)); 1805 entry->ifindex = ifindex; 1806 list_add(&entry->list, &dcb_app_list); 1807 1808 return 0; 1809 } 1810 1811 /** 1812 * dcb_getapp - retrieve the DCBX application user priority 1813 * 1814 * On success returns a non-zero 802.1p user priority bitmap 1815 * otherwise returns 0 as the invalid user priority bitmap to 1816 * indicate an error. 1817 */ 1818 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) 1819 { 1820 struct dcb_app_type *itr; 1821 u8 prio = 0; 1822 1823 spin_lock_bh(&dcb_lock); 1824 itr = dcb_app_lookup(app, dev->ifindex, -1); 1825 if (itr) 1826 prio = itr->app.priority; 1827 spin_unlock_bh(&dcb_lock); 1828 1829 return prio; 1830 } 1831 EXPORT_SYMBOL(dcb_getapp); 1832 1833 /** 1834 * dcb_setapp - add CEE dcb application data to app list 1835 * 1836 * Priority 0 is an invalid priority in CEE spec. This routine 1837 * removes applications from the app list if the priority is 1838 * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap 1839 */ 1840 int dcb_setapp(struct net_device *dev, struct dcb_app *new) 1841 { 1842 struct dcb_app_type *itr; 1843 struct dcb_app_type event; 1844 int err = 0; 1845 1846 event.ifindex = dev->ifindex; 1847 memcpy(&event.app, new, sizeof(event.app)); 1848 if (dev->dcbnl_ops->getdcbx) 1849 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1850 1851 spin_lock_bh(&dcb_lock); 1852 /* Search for existing match and replace */ 1853 itr = dcb_app_lookup(new, dev->ifindex, -1); 1854 if (itr) { 1855 if (new->priority) 1856 itr->app.priority = new->priority; 1857 else { 1858 list_del(&itr->list); 1859 kfree(itr); 1860 } 1861 goto out; 1862 } 1863 /* App type does not exist add new application type */ 1864 if (new->priority) 1865 err = dcb_app_add(new, dev->ifindex); 1866 out: 1867 spin_unlock_bh(&dcb_lock); 1868 if (!err) 1869 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1870 return err; 1871 } 1872 EXPORT_SYMBOL(dcb_setapp); 1873 1874 /** 1875 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority 1876 * 1877 * Helper routine which on success returns a non-zero 802.1Qaz user 1878 * priority bitmap otherwise returns 0 to indicate the dcb_app was 1879 * not found in APP list. 1880 */ 1881 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) 1882 { 1883 struct dcb_app_type *itr; 1884 u8 prio = 0; 1885 1886 spin_lock_bh(&dcb_lock); 1887 itr = dcb_app_lookup(app, dev->ifindex, -1); 1888 if (itr) 1889 prio |= 1 << itr->app.priority; 1890 spin_unlock_bh(&dcb_lock); 1891 1892 return prio; 1893 } 1894 EXPORT_SYMBOL(dcb_ieee_getapp_mask); 1895 1896 /** 1897 * dcb_ieee_setapp - add IEEE dcb application data to app list 1898 * 1899 * This adds Application data to the list. Multiple application 1900 * entries may exists for the same selector and protocol as long 1901 * as the priorities are different. Priority is expected to be a 1902 * 3-bit unsigned integer 1903 */ 1904 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) 1905 { 1906 struct dcb_app_type event; 1907 int err = 0; 1908 1909 event.ifindex = dev->ifindex; 1910 memcpy(&event.app, new, sizeof(event.app)); 1911 if (dev->dcbnl_ops->getdcbx) 1912 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1913 1914 spin_lock_bh(&dcb_lock); 1915 /* Search for existing match and abort if found */ 1916 if (dcb_app_lookup(new, dev->ifindex, new->priority)) { 1917 err = -EEXIST; 1918 goto out; 1919 } 1920 1921 err = dcb_app_add(new, dev->ifindex); 1922 out: 1923 spin_unlock_bh(&dcb_lock); 1924 if (!err) 1925 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1926 return err; 1927 } 1928 EXPORT_SYMBOL(dcb_ieee_setapp); 1929 1930 /** 1931 * dcb_ieee_delapp - delete IEEE dcb application data from list 1932 * 1933 * This removes a matching APP data from the APP list 1934 */ 1935 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) 1936 { 1937 struct dcb_app_type *itr; 1938 struct dcb_app_type event; 1939 int err = -ENOENT; 1940 1941 event.ifindex = dev->ifindex; 1942 memcpy(&event.app, del, sizeof(event.app)); 1943 if (dev->dcbnl_ops->getdcbx) 1944 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1945 1946 spin_lock_bh(&dcb_lock); 1947 /* Search for existing match and remove it. */ 1948 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { 1949 list_del(&itr->list); 1950 kfree(itr); 1951 err = 0; 1952 } 1953 1954 spin_unlock_bh(&dcb_lock); 1955 if (!err) 1956 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1957 return err; 1958 } 1959 EXPORT_SYMBOL(dcb_ieee_delapp); 1960 1961 /** 1962 * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from 1963 * priorities to the DSCP values assigned to that priority. Initialize p_map 1964 * such that each map element holds a bit mask of DSCP values configured for 1965 * that priority by APP entries. 1966 */ 1967 void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev, 1968 struct dcb_ieee_app_prio_map *p_map) 1969 { 1970 int ifindex = dev->ifindex; 1971 struct dcb_app_type *itr; 1972 u8 prio; 1973 1974 memset(p_map->map, 0, sizeof(p_map->map)); 1975 1976 spin_lock_bh(&dcb_lock); 1977 list_for_each_entry(itr, &dcb_app_list, list) { 1978 if (itr->ifindex == ifindex && 1979 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && 1980 itr->app.protocol < 64 && 1981 itr->app.priority < IEEE_8021QAZ_MAX_TCS) { 1982 prio = itr->app.priority; 1983 p_map->map[prio] |= 1ULL << itr->app.protocol; 1984 } 1985 } 1986 spin_unlock_bh(&dcb_lock); 1987 } 1988 EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map); 1989 1990 /** 1991 * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from 1992 * DSCP values to the priorities assigned to that DSCP value. Initialize p_map 1993 * such that each map element holds a bit mask of priorities configured for a 1994 * given DSCP value by APP entries. 1995 */ 1996 void 1997 dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev, 1998 struct dcb_ieee_app_dscp_map *p_map) 1999 { 2000 int ifindex = dev->ifindex; 2001 struct dcb_app_type *itr; 2002 2003 memset(p_map->map, 0, sizeof(p_map->map)); 2004 2005 spin_lock_bh(&dcb_lock); 2006 list_for_each_entry(itr, &dcb_app_list, list) { 2007 if (itr->ifindex == ifindex && 2008 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && 2009 itr->app.protocol < 64 && 2010 itr->app.priority < IEEE_8021QAZ_MAX_TCS) 2011 p_map->map[itr->app.protocol] |= 1 << itr->app.priority; 2012 } 2013 spin_unlock_bh(&dcb_lock); 2014 } 2015 EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map); 2016 2017 /** 2018 * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet 2019 * type, with valid PID values >= 1536. A special meaning is then assigned to 2020 * protocol value of 0: "default priority. For use when priority is not 2021 * otherwise specified". 2022 * 2023 * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries 2024 * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default 2025 * priorities set by these entries. 2026 */ 2027 u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev) 2028 { 2029 int ifindex = dev->ifindex; 2030 struct dcb_app_type *itr; 2031 u8 mask = 0; 2032 2033 spin_lock_bh(&dcb_lock); 2034 list_for_each_entry(itr, &dcb_app_list, list) { 2035 if (itr->ifindex == ifindex && 2036 itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && 2037 itr->app.protocol == 0 && 2038 itr->app.priority < IEEE_8021QAZ_MAX_TCS) 2039 mask |= 1 << itr->app.priority; 2040 } 2041 spin_unlock_bh(&dcb_lock); 2042 2043 return mask; 2044 } 2045 EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask); 2046 2047 static int __init dcbnl_init(void) 2048 { 2049 INIT_LIST_HEAD(&dcb_app_list); 2050 2051 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0); 2052 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0); 2053 2054 return 0; 2055 } 2056 device_initcall(dcbnl_init); 2057