xref: /openbmc/linux/net/dcb/dcbnl.c (revision eb3fcf00)
1 /*
2  * Copyright (c) 2008-2011, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Author: Lucy Liu <lucy.liu@intel.com>
17  */
18 
19 #include <linux/netdevice.h>
20 #include <linux/netlink.h>
21 #include <linux/slab.h>
22 #include <net/netlink.h>
23 #include <net/rtnetlink.h>
24 #include <linux/dcbnl.h>
25 #include <net/dcbevent.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/module.h>
28 #include <net/sock.h>
29 
30 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
31  * intended to allow network traffic with differing requirements
32  * (highly reliable, no drops vs. best effort vs. low latency) to operate
33  * and co-exist on Ethernet.  Current DCB features are:
34  *
35  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
36  *   framework for assigning bandwidth guarantees to traffic classes.
37  *
38  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
39  *   can work independently for each 802.1p priority.
40  *
41  * Congestion Notification - provides a mechanism for end-to-end congestion
42  *   control for protocols which do not have built-in congestion management.
43  *
44  * More information about the emerging standards for these Ethernet features
45  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
46  *
47  * This file implements an rtnetlink interface to allow configuration of DCB
48  * features for capable devices.
49  */
50 
51 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
52 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
53 MODULE_LICENSE("GPL");
54 
55 /**************** DCB attribute policies *************************************/
56 
57 /* DCB netlink attributes policy */
58 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
59 	[DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
60 	[DCB_ATTR_STATE]       = {.type = NLA_U8},
61 	[DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
62 	[DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
63 	[DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
64 	[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
65 	[DCB_ATTR_CAP]         = {.type = NLA_NESTED},
66 	[DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
67 	[DCB_ATTR_BCN]         = {.type = NLA_NESTED},
68 	[DCB_ATTR_APP]         = {.type = NLA_NESTED},
69 	[DCB_ATTR_IEEE]	       = {.type = NLA_NESTED},
70 	[DCB_ATTR_DCBX]        = {.type = NLA_U8},
71 	[DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
72 };
73 
74 /* DCB priority flow control to User Priority nested attributes */
75 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
76 	[DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
77 	[DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
78 	[DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
79 	[DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
80 	[DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
81 	[DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
82 	[DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
83 	[DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
84 	[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
85 };
86 
87 /* DCB priority grouping nested attributes */
88 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
89 	[DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
90 	[DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
91 	[DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
92 	[DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
93 	[DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
94 	[DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
95 	[DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
96 	[DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
97 	[DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
98 	[DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
99 	[DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
100 	[DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
101 	[DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
102 	[DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
103 	[DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
104 	[DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
105 	[DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
106 	[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
107 };
108 
109 /* DCB traffic class nested attributes. */
110 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
111 	[DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
112 	[DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
113 	[DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
114 	[DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
115 	[DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
116 };
117 
118 /* DCB capabilities nested attributes. */
119 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
120 	[DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
121 	[DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
122 	[DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
123 	[DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
124 	[DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
125 	[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
126 	[DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
127 	[DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
128 	[DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
129 };
130 
131 /* DCB capabilities nested attributes. */
132 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
133 	[DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
134 	[DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
135 	[DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
136 };
137 
138 /* DCB BCN nested attributes. */
139 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
140 	[DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
141 	[DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
142 	[DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
143 	[DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
144 	[DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
145 	[DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
146 	[DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
147 	[DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
148 	[DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
149 	[DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
150 	[DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
151 	[DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
152 	[DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
153 	[DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
154 	[DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
155 	[DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
156 	[DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
157 	[DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
158 	[DCB_BCN_ATTR_W]            = {.type = NLA_U32},
159 	[DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
160 	[DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
161 	[DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
162 	[DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
163 	[DCB_BCN_ATTR_C]            = {.type = NLA_U32},
164 	[DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
165 };
166 
167 /* DCB APP nested attributes. */
168 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
169 	[DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
170 	[DCB_APP_ATTR_ID]           = {.type = NLA_U16},
171 	[DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
172 };
173 
174 /* IEEE 802.1Qaz nested attributes. */
175 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
176 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
177 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
178 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
179 	[DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
180 	[DCB_ATTR_IEEE_QCN]         = {.len = sizeof(struct ieee_qcn)},
181 	[DCB_ATTR_IEEE_QCN_STATS]   = {.len = sizeof(struct ieee_qcn_stats)},
182 };
183 
184 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
185 	[DCB_ATTR_IEEE_APP]	    = {.len = sizeof(struct dcb_app)},
186 };
187 
188 /* DCB number of traffic classes nested attributes. */
189 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
190 	[DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
191 	[DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
192 	[DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
193 	[DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
194 };
195 
196 static LIST_HEAD(dcb_app_list);
197 static DEFINE_SPINLOCK(dcb_lock);
198 
199 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
200 				    u32 flags, struct nlmsghdr **nlhp)
201 {
202 	struct sk_buff *skb;
203 	struct dcbmsg *dcb;
204 	struct nlmsghdr *nlh;
205 
206 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
207 	if (!skb)
208 		return NULL;
209 
210 	nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
211 	BUG_ON(!nlh);
212 
213 	dcb = nlmsg_data(nlh);
214 	dcb->dcb_family = AF_UNSPEC;
215 	dcb->cmd = cmd;
216 	dcb->dcb_pad = 0;
217 
218 	if (nlhp)
219 		*nlhp = nlh;
220 
221 	return skb;
222 }
223 
224 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
225 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
226 {
227 	/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
228 	if (!netdev->dcbnl_ops->getstate)
229 		return -EOPNOTSUPP;
230 
231 	return nla_put_u8(skb, DCB_ATTR_STATE,
232 			  netdev->dcbnl_ops->getstate(netdev));
233 }
234 
235 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
236 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
237 {
238 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
239 	u8 value;
240 	int ret;
241 	int i;
242 	int getall = 0;
243 
244 	if (!tb[DCB_ATTR_PFC_CFG])
245 		return -EINVAL;
246 
247 	if (!netdev->dcbnl_ops->getpfccfg)
248 		return -EOPNOTSUPP;
249 
250 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
251 	                       tb[DCB_ATTR_PFC_CFG],
252 	                       dcbnl_pfc_up_nest);
253 	if (ret)
254 		return ret;
255 
256 	nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
257 	if (!nest)
258 		return -EMSGSIZE;
259 
260 	if (data[DCB_PFC_UP_ATTR_ALL])
261 		getall = 1;
262 
263 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
264 		if (!getall && !data[i])
265 			continue;
266 
267 		netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
268 		                             &value);
269 		ret = nla_put_u8(skb, i, value);
270 		if (ret) {
271 			nla_nest_cancel(skb, nest);
272 			return ret;
273 		}
274 	}
275 	nla_nest_end(skb, nest);
276 
277 	return 0;
278 }
279 
280 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
281 				u32 seq, struct nlattr **tb, struct sk_buff *skb)
282 {
283 	u8 perm_addr[MAX_ADDR_LEN];
284 
285 	if (!netdev->dcbnl_ops->getpermhwaddr)
286 		return -EOPNOTSUPP;
287 
288 	memset(perm_addr, 0, sizeof(perm_addr));
289 	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
290 
291 	return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
292 }
293 
294 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
295 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
296 {
297 	struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
298 	u8 value;
299 	int ret;
300 	int i;
301 	int getall = 0;
302 
303 	if (!tb[DCB_ATTR_CAP])
304 		return -EINVAL;
305 
306 	if (!netdev->dcbnl_ops->getcap)
307 		return -EOPNOTSUPP;
308 
309 	ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
310 	                       dcbnl_cap_nest);
311 	if (ret)
312 		return ret;
313 
314 	nest = nla_nest_start(skb, DCB_ATTR_CAP);
315 	if (!nest)
316 		return -EMSGSIZE;
317 
318 	if (data[DCB_CAP_ATTR_ALL])
319 		getall = 1;
320 
321 	for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
322 		if (!getall && !data[i])
323 			continue;
324 
325 		if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
326 			ret = nla_put_u8(skb, i, value);
327 			if (ret) {
328 				nla_nest_cancel(skb, nest);
329 				return ret;
330 			}
331 		}
332 	}
333 	nla_nest_end(skb, nest);
334 
335 	return 0;
336 }
337 
338 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
339 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
340 {
341 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
342 	u8 value;
343 	int ret;
344 	int i;
345 	int getall = 0;
346 
347 	if (!tb[DCB_ATTR_NUMTCS])
348 		return -EINVAL;
349 
350 	if (!netdev->dcbnl_ops->getnumtcs)
351 		return -EOPNOTSUPP;
352 
353 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
354 	                       dcbnl_numtcs_nest);
355 	if (ret)
356 		return ret;
357 
358 	nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
359 	if (!nest)
360 		return -EMSGSIZE;
361 
362 	if (data[DCB_NUMTCS_ATTR_ALL])
363 		getall = 1;
364 
365 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
366 		if (!getall && !data[i])
367 			continue;
368 
369 		ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
370 		if (!ret) {
371 			ret = nla_put_u8(skb, i, value);
372 			if (ret) {
373 				nla_nest_cancel(skb, nest);
374 				return ret;
375 			}
376 		} else
377 			return -EINVAL;
378 	}
379 	nla_nest_end(skb, nest);
380 
381 	return 0;
382 }
383 
384 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
385 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
386 {
387 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
388 	int ret;
389 	u8 value;
390 	int i;
391 
392 	if (!tb[DCB_ATTR_NUMTCS])
393 		return -EINVAL;
394 
395 	if (!netdev->dcbnl_ops->setnumtcs)
396 		return -EOPNOTSUPP;
397 
398 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
399 	                       dcbnl_numtcs_nest);
400 	if (ret)
401 		return ret;
402 
403 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
404 		if (data[i] == NULL)
405 			continue;
406 
407 		value = nla_get_u8(data[i]);
408 
409 		ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
410 		if (ret)
411 			break;
412 	}
413 
414 	return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
415 }
416 
417 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
418 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
419 {
420 	if (!netdev->dcbnl_ops->getpfcstate)
421 		return -EOPNOTSUPP;
422 
423 	return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
424 			  netdev->dcbnl_ops->getpfcstate(netdev));
425 }
426 
427 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
428 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
429 {
430 	u8 value;
431 
432 	if (!tb[DCB_ATTR_PFC_STATE])
433 		return -EINVAL;
434 
435 	if (!netdev->dcbnl_ops->setpfcstate)
436 		return -EOPNOTSUPP;
437 
438 	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
439 
440 	netdev->dcbnl_ops->setpfcstate(netdev, value);
441 
442 	return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
443 }
444 
445 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
446 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
447 {
448 	struct nlattr *app_nest;
449 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
450 	u16 id;
451 	u8 up, idtype;
452 	int ret;
453 
454 	if (!tb[DCB_ATTR_APP])
455 		return -EINVAL;
456 
457 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
458 	                       dcbnl_app_nest);
459 	if (ret)
460 		return ret;
461 
462 	/* all must be non-null */
463 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
464 	    (!app_tb[DCB_APP_ATTR_ID]))
465 		return -EINVAL;
466 
467 	/* either by eth type or by socket number */
468 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
469 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
470 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
471 		return -EINVAL;
472 
473 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
474 
475 	if (netdev->dcbnl_ops->getapp) {
476 		ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
477 		if (ret < 0)
478 			return ret;
479 		else
480 			up = ret;
481 	} else {
482 		struct dcb_app app = {
483 					.selector = idtype,
484 					.protocol = id,
485 				     };
486 		up = dcb_getapp(netdev, &app);
487 	}
488 
489 	app_nest = nla_nest_start(skb, DCB_ATTR_APP);
490 	if (!app_nest)
491 		return -EMSGSIZE;
492 
493 	ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
494 	if (ret)
495 		goto out_cancel;
496 
497 	ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
498 	if (ret)
499 		goto out_cancel;
500 
501 	ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
502 	if (ret)
503 		goto out_cancel;
504 
505 	nla_nest_end(skb, app_nest);
506 
507 	return 0;
508 
509 out_cancel:
510 	nla_nest_cancel(skb, app_nest);
511 	return ret;
512 }
513 
514 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
515 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
516 {
517 	int ret;
518 	u16 id;
519 	u8 up, idtype;
520 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
521 
522 	if (!tb[DCB_ATTR_APP])
523 		return -EINVAL;
524 
525 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
526 	                       dcbnl_app_nest);
527 	if (ret)
528 		return ret;
529 
530 	/* all must be non-null */
531 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
532 	    (!app_tb[DCB_APP_ATTR_ID]) ||
533 	    (!app_tb[DCB_APP_ATTR_PRIORITY]))
534 		return -EINVAL;
535 
536 	/* either by eth type or by socket number */
537 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
538 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
539 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
540 		return -EINVAL;
541 
542 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
543 	up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
544 
545 	if (netdev->dcbnl_ops->setapp) {
546 		ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
547 		if (ret < 0)
548 			return ret;
549 	} else {
550 		struct dcb_app app;
551 		app.selector = idtype;
552 		app.protocol = id;
553 		app.priority = up;
554 		ret = dcb_setapp(netdev, &app);
555 	}
556 
557 	ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
558 	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
559 
560 	return ret;
561 }
562 
563 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
564 			     struct nlattr **tb, struct sk_buff *skb, int dir)
565 {
566 	struct nlattr *pg_nest, *param_nest, *data;
567 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
568 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
569 	u8 prio, pgid, tc_pct, up_map;
570 	int ret;
571 	int getall = 0;
572 	int i;
573 
574 	if (!tb[DCB_ATTR_PG_CFG])
575 		return -EINVAL;
576 
577 	if (!netdev->dcbnl_ops->getpgtccfgtx ||
578 	    !netdev->dcbnl_ops->getpgtccfgrx ||
579 	    !netdev->dcbnl_ops->getpgbwgcfgtx ||
580 	    !netdev->dcbnl_ops->getpgbwgcfgrx)
581 		return -EOPNOTSUPP;
582 
583 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
584 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
585 	if (ret)
586 		return ret;
587 
588 	pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
589 	if (!pg_nest)
590 		return -EMSGSIZE;
591 
592 	if (pg_tb[DCB_PG_ATTR_TC_ALL])
593 		getall = 1;
594 
595 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
596 		if (!getall && !pg_tb[i])
597 			continue;
598 
599 		if (pg_tb[DCB_PG_ATTR_TC_ALL])
600 			data = pg_tb[DCB_PG_ATTR_TC_ALL];
601 		else
602 			data = pg_tb[i];
603 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
604 				       data, dcbnl_tc_param_nest);
605 		if (ret)
606 			goto err_pg;
607 
608 		param_nest = nla_nest_start(skb, i);
609 		if (!param_nest)
610 			goto err_pg;
611 
612 		pgid = DCB_ATTR_VALUE_UNDEFINED;
613 		prio = DCB_ATTR_VALUE_UNDEFINED;
614 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
615 		up_map = DCB_ATTR_VALUE_UNDEFINED;
616 
617 		if (dir) {
618 			/* Rx */
619 			netdev->dcbnl_ops->getpgtccfgrx(netdev,
620 						i - DCB_PG_ATTR_TC_0, &prio,
621 						&pgid, &tc_pct, &up_map);
622 		} else {
623 			/* Tx */
624 			netdev->dcbnl_ops->getpgtccfgtx(netdev,
625 						i - DCB_PG_ATTR_TC_0, &prio,
626 						&pgid, &tc_pct, &up_map);
627 		}
628 
629 		if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
630 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
631 			ret = nla_put_u8(skb,
632 			                 DCB_TC_ATTR_PARAM_PGID, pgid);
633 			if (ret)
634 				goto err_param;
635 		}
636 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
637 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
638 			ret = nla_put_u8(skb,
639 			                 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
640 			if (ret)
641 				goto err_param;
642 		}
643 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
644 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
645 			ret = nla_put_u8(skb,
646 			                 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
647 			if (ret)
648 				goto err_param;
649 		}
650 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
651 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
652 			ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
653 			                 tc_pct);
654 			if (ret)
655 				goto err_param;
656 		}
657 		nla_nest_end(skb, param_nest);
658 	}
659 
660 	if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
661 		getall = 1;
662 	else
663 		getall = 0;
664 
665 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
666 		if (!getall && !pg_tb[i])
667 			continue;
668 
669 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
670 
671 		if (dir) {
672 			/* Rx */
673 			netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
674 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
675 		} else {
676 			/* Tx */
677 			netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
678 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
679 		}
680 		ret = nla_put_u8(skb, i, tc_pct);
681 		if (ret)
682 			goto err_pg;
683 	}
684 
685 	nla_nest_end(skb, pg_nest);
686 
687 	return 0;
688 
689 err_param:
690 	nla_nest_cancel(skb, param_nest);
691 err_pg:
692 	nla_nest_cancel(skb, pg_nest);
693 
694 	return -EMSGSIZE;
695 }
696 
697 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
698 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
699 {
700 	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
701 }
702 
703 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
704 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
705 {
706 	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
707 }
708 
709 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
710 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
711 {
712 	u8 value;
713 
714 	if (!tb[DCB_ATTR_STATE])
715 		return -EINVAL;
716 
717 	if (!netdev->dcbnl_ops->setstate)
718 		return -EOPNOTSUPP;
719 
720 	value = nla_get_u8(tb[DCB_ATTR_STATE]);
721 
722 	return nla_put_u8(skb, DCB_ATTR_STATE,
723 			  netdev->dcbnl_ops->setstate(netdev, value));
724 }
725 
726 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
727 			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
728 {
729 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
730 	int i;
731 	int ret;
732 	u8 value;
733 
734 	if (!tb[DCB_ATTR_PFC_CFG])
735 		return -EINVAL;
736 
737 	if (!netdev->dcbnl_ops->setpfccfg)
738 		return -EOPNOTSUPP;
739 
740 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
741 	                       tb[DCB_ATTR_PFC_CFG],
742 	                       dcbnl_pfc_up_nest);
743 	if (ret)
744 		return ret;
745 
746 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
747 		if (data[i] == NULL)
748 			continue;
749 		value = nla_get_u8(data[i]);
750 		netdev->dcbnl_ops->setpfccfg(netdev,
751 			data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
752 	}
753 
754 	return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
755 }
756 
757 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
758 			u32 seq, struct nlattr **tb, struct sk_buff *skb)
759 {
760 	int ret;
761 
762 	if (!tb[DCB_ATTR_SET_ALL])
763 		return -EINVAL;
764 
765 	if (!netdev->dcbnl_ops->setall)
766 		return -EOPNOTSUPP;
767 
768 	ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
769 			 netdev->dcbnl_ops->setall(netdev));
770 	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
771 
772 	return ret;
773 }
774 
775 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
776 			     u32 seq, struct nlattr **tb, struct sk_buff *skb,
777 			     int dir)
778 {
779 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
780 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
781 	int ret;
782 	int i;
783 	u8 pgid;
784 	u8 up_map;
785 	u8 prio;
786 	u8 tc_pct;
787 
788 	if (!tb[DCB_ATTR_PG_CFG])
789 		return -EINVAL;
790 
791 	if (!netdev->dcbnl_ops->setpgtccfgtx ||
792 	    !netdev->dcbnl_ops->setpgtccfgrx ||
793 	    !netdev->dcbnl_ops->setpgbwgcfgtx ||
794 	    !netdev->dcbnl_ops->setpgbwgcfgrx)
795 		return -EOPNOTSUPP;
796 
797 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
798 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
799 	if (ret)
800 		return ret;
801 
802 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
803 		if (!pg_tb[i])
804 			continue;
805 
806 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
807 		                       pg_tb[i], dcbnl_tc_param_nest);
808 		if (ret)
809 			return ret;
810 
811 		pgid = DCB_ATTR_VALUE_UNDEFINED;
812 		prio = DCB_ATTR_VALUE_UNDEFINED;
813 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
814 		up_map = DCB_ATTR_VALUE_UNDEFINED;
815 
816 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
817 			prio =
818 			    nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
819 
820 		if (param_tb[DCB_TC_ATTR_PARAM_PGID])
821 			pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
822 
823 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
824 			tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
825 
826 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
827 			up_map =
828 			     nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
829 
830 		/* dir: Tx = 0, Rx = 1 */
831 		if (dir) {
832 			/* Rx */
833 			netdev->dcbnl_ops->setpgtccfgrx(netdev,
834 				i - DCB_PG_ATTR_TC_0,
835 				prio, pgid, tc_pct, up_map);
836 		} else {
837 			/* Tx */
838 			netdev->dcbnl_ops->setpgtccfgtx(netdev,
839 				i - DCB_PG_ATTR_TC_0,
840 				prio, pgid, tc_pct, up_map);
841 		}
842 	}
843 
844 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
845 		if (!pg_tb[i])
846 			continue;
847 
848 		tc_pct = nla_get_u8(pg_tb[i]);
849 
850 		/* dir: Tx = 0, Rx = 1 */
851 		if (dir) {
852 			/* Rx */
853 			netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
854 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
855 		} else {
856 			/* Tx */
857 			netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
858 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
859 		}
860 	}
861 
862 	return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
863 }
864 
865 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
866 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
867 {
868 	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
869 }
870 
871 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
872 			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
873 {
874 	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
875 }
876 
877 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
878 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
879 {
880 	struct nlattr *bcn_nest;
881 	struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
882 	u8 value_byte;
883 	u32 value_integer;
884 	int ret;
885 	bool getall = false;
886 	int i;
887 
888 	if (!tb[DCB_ATTR_BCN])
889 		return -EINVAL;
890 
891 	if (!netdev->dcbnl_ops->getbcnrp ||
892 	    !netdev->dcbnl_ops->getbcncfg)
893 		return -EOPNOTSUPP;
894 
895 	ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
896 	                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
897 	if (ret)
898 		return ret;
899 
900 	bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
901 	if (!bcn_nest)
902 		return -EMSGSIZE;
903 
904 	if (bcn_tb[DCB_BCN_ATTR_ALL])
905 		getall = true;
906 
907 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
908 		if (!getall && !bcn_tb[i])
909 			continue;
910 
911 		netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
912 		                            &value_byte);
913 		ret = nla_put_u8(skb, i, value_byte);
914 		if (ret)
915 			goto err_bcn;
916 	}
917 
918 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
919 		if (!getall && !bcn_tb[i])
920 			continue;
921 
922 		netdev->dcbnl_ops->getbcncfg(netdev, i,
923 		                             &value_integer);
924 		ret = nla_put_u32(skb, i, value_integer);
925 		if (ret)
926 			goto err_bcn;
927 	}
928 
929 	nla_nest_end(skb, bcn_nest);
930 
931 	return 0;
932 
933 err_bcn:
934 	nla_nest_cancel(skb, bcn_nest);
935 	return ret;
936 }
937 
938 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
939 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
940 {
941 	struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
942 	int i;
943 	int ret;
944 	u8 value_byte;
945 	u32 value_int;
946 
947 	if (!tb[DCB_ATTR_BCN])
948 		return -EINVAL;
949 
950 	if (!netdev->dcbnl_ops->setbcncfg ||
951 	    !netdev->dcbnl_ops->setbcnrp)
952 		return -EOPNOTSUPP;
953 
954 	ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
955 	                       tb[DCB_ATTR_BCN],
956 	                       dcbnl_pfc_up_nest);
957 	if (ret)
958 		return ret;
959 
960 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
961 		if (data[i] == NULL)
962 			continue;
963 		value_byte = nla_get_u8(data[i]);
964 		netdev->dcbnl_ops->setbcnrp(netdev,
965 			data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
966 	}
967 
968 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
969 		if (data[i] == NULL)
970 			continue;
971 		value_int = nla_get_u32(data[i]);
972 		netdev->dcbnl_ops->setbcncfg(netdev,
973 	                                     i, value_int);
974 	}
975 
976 	return nla_put_u8(skb, DCB_ATTR_BCN, 0);
977 }
978 
979 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
980 				int app_nested_type, int app_info_type,
981 				int app_entry_type)
982 {
983 	struct dcb_peer_app_info info;
984 	struct dcb_app *table = NULL;
985 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
986 	u16 app_count;
987 	int err;
988 
989 
990 	/**
991 	 * retrieve the peer app configuration form the driver. If the driver
992 	 * handlers fail exit without doing anything
993 	 */
994 	err = ops->peer_getappinfo(netdev, &info, &app_count);
995 	if (!err && app_count) {
996 		table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
997 		if (!table)
998 			return -ENOMEM;
999 
1000 		err = ops->peer_getapptable(netdev, table);
1001 	}
1002 
1003 	if (!err) {
1004 		u16 i;
1005 		struct nlattr *app;
1006 
1007 		/**
1008 		 * build the message, from here on the only possible failure
1009 		 * is due to the skb size
1010 		 */
1011 		err = -EMSGSIZE;
1012 
1013 		app = nla_nest_start(skb, app_nested_type);
1014 		if (!app)
1015 			goto nla_put_failure;
1016 
1017 		if (app_info_type &&
1018 		    nla_put(skb, app_info_type, sizeof(info), &info))
1019 			goto nla_put_failure;
1020 
1021 		for (i = 0; i < app_count; i++) {
1022 			if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1023 				    &table[i]))
1024 				goto nla_put_failure;
1025 		}
1026 		nla_nest_end(skb, app);
1027 	}
1028 	err = 0;
1029 
1030 nla_put_failure:
1031 	kfree(table);
1032 	return err;
1033 }
1034 
1035 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
1036 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1037 {
1038 	struct nlattr *ieee, *app;
1039 	struct dcb_app_type *itr;
1040 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1041 	int dcbx;
1042 	int err;
1043 
1044 	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1045 		return -EMSGSIZE;
1046 
1047 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1048 	if (!ieee)
1049 		return -EMSGSIZE;
1050 
1051 	if (ops->ieee_getets) {
1052 		struct ieee_ets ets;
1053 		memset(&ets, 0, sizeof(ets));
1054 		err = ops->ieee_getets(netdev, &ets);
1055 		if (!err &&
1056 		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1057 			return -EMSGSIZE;
1058 	}
1059 
1060 	if (ops->ieee_getmaxrate) {
1061 		struct ieee_maxrate maxrate;
1062 		memset(&maxrate, 0, sizeof(maxrate));
1063 		err = ops->ieee_getmaxrate(netdev, &maxrate);
1064 		if (!err) {
1065 			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1066 				      sizeof(maxrate), &maxrate);
1067 			if (err)
1068 				return -EMSGSIZE;
1069 		}
1070 	}
1071 
1072 	if (ops->ieee_getqcn) {
1073 		struct ieee_qcn qcn;
1074 
1075 		memset(&qcn, 0, sizeof(qcn));
1076 		err = ops->ieee_getqcn(netdev, &qcn);
1077 		if (!err) {
1078 			err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1079 				      sizeof(qcn), &qcn);
1080 			if (err)
1081 				return -EMSGSIZE;
1082 		}
1083 	}
1084 
1085 	if (ops->ieee_getqcnstats) {
1086 		struct ieee_qcn_stats qcn_stats;
1087 
1088 		memset(&qcn_stats, 0, sizeof(qcn_stats));
1089 		err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1090 		if (!err) {
1091 			err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1092 				      sizeof(qcn_stats), &qcn_stats);
1093 			if (err)
1094 				return -EMSGSIZE;
1095 		}
1096 	}
1097 
1098 	if (ops->ieee_getpfc) {
1099 		struct ieee_pfc pfc;
1100 		memset(&pfc, 0, sizeof(pfc));
1101 		err = ops->ieee_getpfc(netdev, &pfc);
1102 		if (!err &&
1103 		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1104 			return -EMSGSIZE;
1105 	}
1106 
1107 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1108 	if (!app)
1109 		return -EMSGSIZE;
1110 
1111 	spin_lock_bh(&dcb_lock);
1112 	list_for_each_entry(itr, &dcb_app_list, list) {
1113 		if (itr->ifindex == netdev->ifindex) {
1114 			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1115 					 &itr->app);
1116 			if (err) {
1117 				spin_unlock_bh(&dcb_lock);
1118 				return -EMSGSIZE;
1119 			}
1120 		}
1121 	}
1122 
1123 	if (netdev->dcbnl_ops->getdcbx)
1124 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1125 	else
1126 		dcbx = -EOPNOTSUPP;
1127 
1128 	spin_unlock_bh(&dcb_lock);
1129 	nla_nest_end(skb, app);
1130 
1131 	/* get peer info if available */
1132 	if (ops->ieee_peer_getets) {
1133 		struct ieee_ets ets;
1134 		memset(&ets, 0, sizeof(ets));
1135 		err = ops->ieee_peer_getets(netdev, &ets);
1136 		if (!err &&
1137 		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1138 			return -EMSGSIZE;
1139 	}
1140 
1141 	if (ops->ieee_peer_getpfc) {
1142 		struct ieee_pfc pfc;
1143 		memset(&pfc, 0, sizeof(pfc));
1144 		err = ops->ieee_peer_getpfc(netdev, &pfc);
1145 		if (!err &&
1146 		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1147 			return -EMSGSIZE;
1148 	}
1149 
1150 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1151 		err = dcbnl_build_peer_app(netdev, skb,
1152 					   DCB_ATTR_IEEE_PEER_APP,
1153 					   DCB_ATTR_IEEE_APP_UNSPEC,
1154 					   DCB_ATTR_IEEE_APP);
1155 		if (err)
1156 			return -EMSGSIZE;
1157 	}
1158 
1159 	nla_nest_end(skb, ieee);
1160 	if (dcbx >= 0) {
1161 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1162 		if (err)
1163 			return -EMSGSIZE;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1170 			     int dir)
1171 {
1172 	u8 pgid, up_map, prio, tc_pct;
1173 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1174 	int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1175 	struct nlattr *pg = nla_nest_start(skb, i);
1176 
1177 	if (!pg)
1178 		return -EMSGSIZE;
1179 
1180 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1181 		struct nlattr *tc_nest = nla_nest_start(skb, i);
1182 
1183 		if (!tc_nest)
1184 			return -EMSGSIZE;
1185 
1186 		pgid = DCB_ATTR_VALUE_UNDEFINED;
1187 		prio = DCB_ATTR_VALUE_UNDEFINED;
1188 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1189 		up_map = DCB_ATTR_VALUE_UNDEFINED;
1190 
1191 		if (!dir)
1192 			ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1193 					  &prio, &pgid, &tc_pct, &up_map);
1194 		else
1195 			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1196 					  &prio, &pgid, &tc_pct, &up_map);
1197 
1198 		if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1199 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1200 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1201 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1202 			return -EMSGSIZE;
1203 		nla_nest_end(skb, tc_nest);
1204 	}
1205 
1206 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1207 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1208 
1209 		if (!dir)
1210 			ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1211 					   &tc_pct);
1212 		else
1213 			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1214 					   &tc_pct);
1215 		if (nla_put_u8(skb, i, tc_pct))
1216 			return -EMSGSIZE;
1217 	}
1218 	nla_nest_end(skb, pg);
1219 	return 0;
1220 }
1221 
1222 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1223 {
1224 	struct nlattr *cee, *app;
1225 	struct dcb_app_type *itr;
1226 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1227 	int dcbx, i, err = -EMSGSIZE;
1228 	u8 value;
1229 
1230 	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1231 		goto nla_put_failure;
1232 	cee = nla_nest_start(skb, DCB_ATTR_CEE);
1233 	if (!cee)
1234 		goto nla_put_failure;
1235 
1236 	/* local pg */
1237 	if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1238 		err = dcbnl_cee_pg_fill(skb, netdev, 1);
1239 		if (err)
1240 			goto nla_put_failure;
1241 	}
1242 
1243 	if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1244 		err = dcbnl_cee_pg_fill(skb, netdev, 0);
1245 		if (err)
1246 			goto nla_put_failure;
1247 	}
1248 
1249 	/* local pfc */
1250 	if (ops->getpfccfg) {
1251 		struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1252 
1253 		if (!pfc_nest)
1254 			goto nla_put_failure;
1255 
1256 		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1257 			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1258 			if (nla_put_u8(skb, i, value))
1259 				goto nla_put_failure;
1260 		}
1261 		nla_nest_end(skb, pfc_nest);
1262 	}
1263 
1264 	/* local app */
1265 	spin_lock_bh(&dcb_lock);
1266 	app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1267 	if (!app)
1268 		goto dcb_unlock;
1269 
1270 	list_for_each_entry(itr, &dcb_app_list, list) {
1271 		if (itr->ifindex == netdev->ifindex) {
1272 			struct nlattr *app_nest = nla_nest_start(skb,
1273 								 DCB_ATTR_APP);
1274 			if (!app_nest)
1275 				goto dcb_unlock;
1276 
1277 			err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1278 					 itr->app.selector);
1279 			if (err)
1280 				goto dcb_unlock;
1281 
1282 			err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1283 					  itr->app.protocol);
1284 			if (err)
1285 				goto dcb_unlock;
1286 
1287 			err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1288 					 itr->app.priority);
1289 			if (err)
1290 				goto dcb_unlock;
1291 
1292 			nla_nest_end(skb, app_nest);
1293 		}
1294 	}
1295 	nla_nest_end(skb, app);
1296 
1297 	if (netdev->dcbnl_ops->getdcbx)
1298 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1299 	else
1300 		dcbx = -EOPNOTSUPP;
1301 
1302 	spin_unlock_bh(&dcb_lock);
1303 
1304 	/* features flags */
1305 	if (ops->getfeatcfg) {
1306 		struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1307 		if (!feat)
1308 			goto nla_put_failure;
1309 
1310 		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1311 		     i++)
1312 			if (!ops->getfeatcfg(netdev, i, &value) &&
1313 			    nla_put_u8(skb, i, value))
1314 				goto nla_put_failure;
1315 
1316 		nla_nest_end(skb, feat);
1317 	}
1318 
1319 	/* peer info if available */
1320 	if (ops->cee_peer_getpg) {
1321 		struct cee_pg pg;
1322 		memset(&pg, 0, sizeof(pg));
1323 		err = ops->cee_peer_getpg(netdev, &pg);
1324 		if (!err &&
1325 		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1326 			goto nla_put_failure;
1327 	}
1328 
1329 	if (ops->cee_peer_getpfc) {
1330 		struct cee_pfc pfc;
1331 		memset(&pfc, 0, sizeof(pfc));
1332 		err = ops->cee_peer_getpfc(netdev, &pfc);
1333 		if (!err &&
1334 		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1335 			goto nla_put_failure;
1336 	}
1337 
1338 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1339 		err = dcbnl_build_peer_app(netdev, skb,
1340 					   DCB_ATTR_CEE_PEER_APP_TABLE,
1341 					   DCB_ATTR_CEE_PEER_APP_INFO,
1342 					   DCB_ATTR_CEE_PEER_APP);
1343 		if (err)
1344 			goto nla_put_failure;
1345 	}
1346 	nla_nest_end(skb, cee);
1347 
1348 	/* DCBX state */
1349 	if (dcbx >= 0) {
1350 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1351 		if (err)
1352 			goto nla_put_failure;
1353 	}
1354 	return 0;
1355 
1356 dcb_unlock:
1357 	spin_unlock_bh(&dcb_lock);
1358 nla_put_failure:
1359 	return err;
1360 }
1361 
1362 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1363 			u32 seq, u32 portid, int dcbx_ver)
1364 {
1365 	struct net *net = dev_net(dev);
1366 	struct sk_buff *skb;
1367 	struct nlmsghdr *nlh;
1368 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1369 	int err;
1370 
1371 	if (!ops)
1372 		return -EOPNOTSUPP;
1373 
1374 	skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1375 	if (!skb)
1376 		return -ENOBUFS;
1377 
1378 	if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1379 		err = dcbnl_ieee_fill(skb, dev);
1380 	else
1381 		err = dcbnl_cee_fill(skb, dev);
1382 
1383 	if (err < 0) {
1384 		/* Report error to broadcast listeners */
1385 		nlmsg_free(skb);
1386 		rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1387 	} else {
1388 		/* End nlmsg and notify broadcast listeners */
1389 		nlmsg_end(skb, nlh);
1390 		rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1391 	}
1392 
1393 	return err;
1394 }
1395 
1396 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1397 		      u32 seq, u32 portid)
1398 {
1399 	return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1400 }
1401 EXPORT_SYMBOL(dcbnl_ieee_notify);
1402 
1403 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1404 		     u32 seq, u32 portid)
1405 {
1406 	return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1407 }
1408 EXPORT_SYMBOL(dcbnl_cee_notify);
1409 
1410 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
1411  * If any requested operation can not be completed
1412  * the entire msg is aborted and error value is returned.
1413  * No attempt is made to reconcile the case where only part of the
1414  * cmd can be completed.
1415  */
1416 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1417 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
1418 {
1419 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1420 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1421 	int err;
1422 
1423 	if (!ops)
1424 		return -EOPNOTSUPP;
1425 
1426 	if (!tb[DCB_ATTR_IEEE])
1427 		return -EINVAL;
1428 
1429 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1430 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1431 	if (err)
1432 		return err;
1433 
1434 	if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1435 		struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1436 		err = ops->ieee_setets(netdev, ets);
1437 		if (err)
1438 			goto err;
1439 	}
1440 
1441 	if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1442 		struct ieee_maxrate *maxrate =
1443 			nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1444 		err = ops->ieee_setmaxrate(netdev, maxrate);
1445 		if (err)
1446 			goto err;
1447 	}
1448 
1449 	if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1450 		struct ieee_qcn *qcn =
1451 			nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1452 
1453 		err = ops->ieee_setqcn(netdev, qcn);
1454 		if (err)
1455 			goto err;
1456 	}
1457 
1458 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1459 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1460 		err = ops->ieee_setpfc(netdev, pfc);
1461 		if (err)
1462 			goto err;
1463 	}
1464 
1465 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1466 		struct nlattr *attr;
1467 		int rem;
1468 
1469 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1470 			struct dcb_app *app_data;
1471 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1472 				continue;
1473 			app_data = nla_data(attr);
1474 			if (ops->ieee_setapp)
1475 				err = ops->ieee_setapp(netdev, app_data);
1476 			else
1477 				err = dcb_ieee_setapp(netdev, app_data);
1478 			if (err)
1479 				goto err;
1480 		}
1481 	}
1482 
1483 err:
1484 	err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1485 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1486 	return err;
1487 }
1488 
1489 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1490 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
1491 {
1492 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1493 
1494 	if (!ops)
1495 		return -EOPNOTSUPP;
1496 
1497 	return dcbnl_ieee_fill(skb, netdev);
1498 }
1499 
1500 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1501 			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
1502 {
1503 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1504 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1505 	int err;
1506 
1507 	if (!ops)
1508 		return -EOPNOTSUPP;
1509 
1510 	if (!tb[DCB_ATTR_IEEE])
1511 		return -EINVAL;
1512 
1513 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1514 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1515 	if (err)
1516 		return err;
1517 
1518 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1519 		struct nlattr *attr;
1520 		int rem;
1521 
1522 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1523 			struct dcb_app *app_data;
1524 
1525 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1526 				continue;
1527 			app_data = nla_data(attr);
1528 			if (ops->ieee_delapp)
1529 				err = ops->ieee_delapp(netdev, app_data);
1530 			else
1531 				err = dcb_ieee_delapp(netdev, app_data);
1532 			if (err)
1533 				goto err;
1534 		}
1535 	}
1536 
1537 err:
1538 	err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1539 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1540 	return err;
1541 }
1542 
1543 
1544 /* DCBX configuration */
1545 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1546 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1547 {
1548 	if (!netdev->dcbnl_ops->getdcbx)
1549 		return -EOPNOTSUPP;
1550 
1551 	return nla_put_u8(skb, DCB_ATTR_DCBX,
1552 			  netdev->dcbnl_ops->getdcbx(netdev));
1553 }
1554 
1555 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1556 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1557 {
1558 	u8 value;
1559 
1560 	if (!netdev->dcbnl_ops->setdcbx)
1561 		return -EOPNOTSUPP;
1562 
1563 	if (!tb[DCB_ATTR_DCBX])
1564 		return -EINVAL;
1565 
1566 	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1567 
1568 	return nla_put_u8(skb, DCB_ATTR_DCBX,
1569 			  netdev->dcbnl_ops->setdcbx(netdev, value));
1570 }
1571 
1572 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1573 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
1574 {
1575 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1576 	u8 value;
1577 	int ret, i;
1578 	int getall = 0;
1579 
1580 	if (!netdev->dcbnl_ops->getfeatcfg)
1581 		return -EOPNOTSUPP;
1582 
1583 	if (!tb[DCB_ATTR_FEATCFG])
1584 		return -EINVAL;
1585 
1586 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1587 			       dcbnl_featcfg_nest);
1588 	if (ret)
1589 		return ret;
1590 
1591 	nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1592 	if (!nest)
1593 		return -EMSGSIZE;
1594 
1595 	if (data[DCB_FEATCFG_ATTR_ALL])
1596 		getall = 1;
1597 
1598 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1599 		if (!getall && !data[i])
1600 			continue;
1601 
1602 		ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1603 		if (!ret)
1604 			ret = nla_put_u8(skb, i, value);
1605 
1606 		if (ret) {
1607 			nla_nest_cancel(skb, nest);
1608 			goto nla_put_failure;
1609 		}
1610 	}
1611 	nla_nest_end(skb, nest);
1612 
1613 nla_put_failure:
1614 	return ret;
1615 }
1616 
1617 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1618 			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
1619 {
1620 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1621 	int ret, i;
1622 	u8 value;
1623 
1624 	if (!netdev->dcbnl_ops->setfeatcfg)
1625 		return -ENOTSUPP;
1626 
1627 	if (!tb[DCB_ATTR_FEATCFG])
1628 		return -EINVAL;
1629 
1630 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1631 			       dcbnl_featcfg_nest);
1632 
1633 	if (ret)
1634 		goto err;
1635 
1636 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1637 		if (data[i] == NULL)
1638 			continue;
1639 
1640 		value = nla_get_u8(data[i]);
1641 
1642 		ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1643 
1644 		if (ret)
1645 			goto err;
1646 	}
1647 err:
1648 	ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1649 
1650 	return ret;
1651 }
1652 
1653 /* Handle CEE DCBX GET commands. */
1654 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1655 			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1656 {
1657 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1658 
1659 	if (!ops)
1660 		return -EOPNOTSUPP;
1661 
1662 	return dcbnl_cee_fill(skb, netdev);
1663 }
1664 
1665 struct reply_func {
1666 	/* reply netlink message type */
1667 	int	type;
1668 
1669 	/* function to fill message contents */
1670 	int   (*cb)(struct net_device *, struct nlmsghdr *, u32,
1671 		    struct nlattr **, struct sk_buff *);
1672 };
1673 
1674 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1675 	[DCB_CMD_GSTATE]	= { RTM_GETDCB, dcbnl_getstate },
1676 	[DCB_CMD_SSTATE]	= { RTM_SETDCB, dcbnl_setstate },
1677 	[DCB_CMD_PFC_GCFG]	= { RTM_GETDCB, dcbnl_getpfccfg },
1678 	[DCB_CMD_PFC_SCFG]	= { RTM_SETDCB, dcbnl_setpfccfg },
1679 	[DCB_CMD_GPERM_HWADDR]	= { RTM_GETDCB, dcbnl_getperm_hwaddr },
1680 	[DCB_CMD_GCAP]		= { RTM_GETDCB, dcbnl_getcap },
1681 	[DCB_CMD_GNUMTCS]	= { RTM_GETDCB, dcbnl_getnumtcs },
1682 	[DCB_CMD_SNUMTCS]	= { RTM_SETDCB, dcbnl_setnumtcs },
1683 	[DCB_CMD_PFC_GSTATE]	= { RTM_GETDCB, dcbnl_getpfcstate },
1684 	[DCB_CMD_PFC_SSTATE]	= { RTM_SETDCB, dcbnl_setpfcstate },
1685 	[DCB_CMD_GAPP]		= { RTM_GETDCB, dcbnl_getapp },
1686 	[DCB_CMD_SAPP]		= { RTM_SETDCB, dcbnl_setapp },
1687 	[DCB_CMD_PGTX_GCFG]	= { RTM_GETDCB, dcbnl_pgtx_getcfg },
1688 	[DCB_CMD_PGTX_SCFG]	= { RTM_SETDCB, dcbnl_pgtx_setcfg },
1689 	[DCB_CMD_PGRX_GCFG]	= { RTM_GETDCB, dcbnl_pgrx_getcfg },
1690 	[DCB_CMD_PGRX_SCFG]	= { RTM_SETDCB, dcbnl_pgrx_setcfg },
1691 	[DCB_CMD_SET_ALL]	= { RTM_SETDCB, dcbnl_setall },
1692 	[DCB_CMD_BCN_GCFG]	= { RTM_GETDCB, dcbnl_bcn_getcfg },
1693 	[DCB_CMD_BCN_SCFG]	= { RTM_SETDCB, dcbnl_bcn_setcfg },
1694 	[DCB_CMD_IEEE_GET]	= { RTM_GETDCB, dcbnl_ieee_get },
1695 	[DCB_CMD_IEEE_SET]	= { RTM_SETDCB, dcbnl_ieee_set },
1696 	[DCB_CMD_IEEE_DEL]	= { RTM_SETDCB, dcbnl_ieee_del },
1697 	[DCB_CMD_GDCBX]		= { RTM_GETDCB, dcbnl_getdcbx },
1698 	[DCB_CMD_SDCBX]		= { RTM_SETDCB, dcbnl_setdcbx },
1699 	[DCB_CMD_GFEATCFG]	= { RTM_GETDCB, dcbnl_getfeatcfg },
1700 	[DCB_CMD_SFEATCFG]	= { RTM_SETDCB, dcbnl_setfeatcfg },
1701 	[DCB_CMD_CEE_GET]	= { RTM_GETDCB, dcbnl_cee_get },
1702 };
1703 
1704 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
1705 {
1706 	struct net *net = sock_net(skb->sk);
1707 	struct net_device *netdev;
1708 	struct dcbmsg *dcb = nlmsg_data(nlh);
1709 	struct nlattr *tb[DCB_ATTR_MAX + 1];
1710 	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1711 	int ret = -EINVAL;
1712 	struct sk_buff *reply_skb;
1713 	struct nlmsghdr *reply_nlh = NULL;
1714 	const struct reply_func *fn;
1715 
1716 	if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1717 		return -EPERM;
1718 
1719 	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1720 			  dcbnl_rtnl_policy);
1721 	if (ret < 0)
1722 		return ret;
1723 
1724 	if (dcb->cmd > DCB_CMD_MAX)
1725 		return -EINVAL;
1726 
1727 	/* check if a reply function has been defined for the command */
1728 	fn = &reply_funcs[dcb->cmd];
1729 	if (!fn->cb)
1730 		return -EOPNOTSUPP;
1731 
1732 	if (!tb[DCB_ATTR_IFNAME])
1733 		return -EINVAL;
1734 
1735 	netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1736 	if (!netdev)
1737 		return -ENODEV;
1738 
1739 	if (!netdev->dcbnl_ops)
1740 		return -EOPNOTSUPP;
1741 
1742 	reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1743 				 nlh->nlmsg_flags, &reply_nlh);
1744 	if (!reply_skb)
1745 		return -ENOBUFS;
1746 
1747 	ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1748 	if (ret < 0) {
1749 		nlmsg_free(reply_skb);
1750 		goto out;
1751 	}
1752 
1753 	nlmsg_end(reply_skb, reply_nlh);
1754 
1755 	ret = rtnl_unicast(reply_skb, net, portid);
1756 out:
1757 	return ret;
1758 }
1759 
1760 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1761 					   int ifindex, int prio)
1762 {
1763 	struct dcb_app_type *itr;
1764 
1765 	list_for_each_entry(itr, &dcb_app_list, list) {
1766 		if (itr->app.selector == app->selector &&
1767 		    itr->app.protocol == app->protocol &&
1768 		    itr->ifindex == ifindex &&
1769 		    (!prio || itr->app.priority == prio))
1770 			return itr;
1771 	}
1772 
1773 	return NULL;
1774 }
1775 
1776 static int dcb_app_add(const struct dcb_app *app, int ifindex)
1777 {
1778 	struct dcb_app_type *entry;
1779 
1780 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1781 	if (!entry)
1782 		return -ENOMEM;
1783 
1784 	memcpy(&entry->app, app, sizeof(*app));
1785 	entry->ifindex = ifindex;
1786 	list_add(&entry->list, &dcb_app_list);
1787 
1788 	return 0;
1789 }
1790 
1791 /**
1792  * dcb_getapp - retrieve the DCBX application user priority
1793  *
1794  * On success returns a non-zero 802.1p user priority bitmap
1795  * otherwise returns 0 as the invalid user priority bitmap to
1796  * indicate an error.
1797  */
1798 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1799 {
1800 	struct dcb_app_type *itr;
1801 	u8 prio = 0;
1802 
1803 	spin_lock_bh(&dcb_lock);
1804 	if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1805 		prio = itr->app.priority;
1806 	spin_unlock_bh(&dcb_lock);
1807 
1808 	return prio;
1809 }
1810 EXPORT_SYMBOL(dcb_getapp);
1811 
1812 /**
1813  * dcb_setapp - add CEE dcb application data to app list
1814  *
1815  * Priority 0 is an invalid priority in CEE spec. This routine
1816  * removes applications from the app list if the priority is
1817  * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
1818  */
1819 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1820 {
1821 	struct dcb_app_type *itr;
1822 	struct dcb_app_type event;
1823 	int err = 0;
1824 
1825 	event.ifindex = dev->ifindex;
1826 	memcpy(&event.app, new, sizeof(event.app));
1827 	if (dev->dcbnl_ops->getdcbx)
1828 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1829 
1830 	spin_lock_bh(&dcb_lock);
1831 	/* Search for existing match and replace */
1832 	if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
1833 		if (new->priority)
1834 			itr->app.priority = new->priority;
1835 		else {
1836 			list_del(&itr->list);
1837 			kfree(itr);
1838 		}
1839 		goto out;
1840 	}
1841 	/* App type does not exist add new application type */
1842 	if (new->priority)
1843 		err = dcb_app_add(new, dev->ifindex);
1844 out:
1845 	spin_unlock_bh(&dcb_lock);
1846 	if (!err)
1847 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1848 	return err;
1849 }
1850 EXPORT_SYMBOL(dcb_setapp);
1851 
1852 /**
1853  * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
1854  *
1855  * Helper routine which on success returns a non-zero 802.1Qaz user
1856  * priority bitmap otherwise returns 0 to indicate the dcb_app was
1857  * not found in APP list.
1858  */
1859 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1860 {
1861 	struct dcb_app_type *itr;
1862 	u8 prio = 0;
1863 
1864 	spin_lock_bh(&dcb_lock);
1865 	if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1866 		prio |= 1 << itr->app.priority;
1867 	spin_unlock_bh(&dcb_lock);
1868 
1869 	return prio;
1870 }
1871 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1872 
1873 /**
1874  * dcb_ieee_setapp - add IEEE dcb application data to app list
1875  *
1876  * This adds Application data to the list. Multiple application
1877  * entries may exists for the same selector and protocol as long
1878  * as the priorities are different. Priority is expected to be a
1879  * 3-bit unsigned integer
1880  */
1881 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1882 {
1883 	struct dcb_app_type event;
1884 	int err = 0;
1885 
1886 	event.ifindex = dev->ifindex;
1887 	memcpy(&event.app, new, sizeof(event.app));
1888 	if (dev->dcbnl_ops->getdcbx)
1889 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1890 
1891 	spin_lock_bh(&dcb_lock);
1892 	/* Search for existing match and abort if found */
1893 	if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1894 		err = -EEXIST;
1895 		goto out;
1896 	}
1897 
1898 	err = dcb_app_add(new, dev->ifindex);
1899 out:
1900 	spin_unlock_bh(&dcb_lock);
1901 	if (!err)
1902 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1903 	return err;
1904 }
1905 EXPORT_SYMBOL(dcb_ieee_setapp);
1906 
1907 /**
1908  * dcb_ieee_delapp - delete IEEE dcb application data from list
1909  *
1910  * This removes a matching APP data from the APP list
1911  */
1912 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1913 {
1914 	struct dcb_app_type *itr;
1915 	struct dcb_app_type event;
1916 	int err = -ENOENT;
1917 
1918 	event.ifindex = dev->ifindex;
1919 	memcpy(&event.app, del, sizeof(event.app));
1920 	if (dev->dcbnl_ops->getdcbx)
1921 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1922 
1923 	spin_lock_bh(&dcb_lock);
1924 	/* Search for existing match and remove it. */
1925 	if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1926 		list_del(&itr->list);
1927 		kfree(itr);
1928 		err = 0;
1929 	}
1930 
1931 	spin_unlock_bh(&dcb_lock);
1932 	if (!err)
1933 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1934 	return err;
1935 }
1936 EXPORT_SYMBOL(dcb_ieee_delapp);
1937 
1938 static void dcb_flushapp(void)
1939 {
1940 	struct dcb_app_type *app;
1941 	struct dcb_app_type *tmp;
1942 
1943 	spin_lock_bh(&dcb_lock);
1944 	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1945 		list_del(&app->list);
1946 		kfree(app);
1947 	}
1948 	spin_unlock_bh(&dcb_lock);
1949 }
1950 
1951 static int __init dcbnl_init(void)
1952 {
1953 	INIT_LIST_HEAD(&dcb_app_list);
1954 
1955 	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
1956 	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
1957 
1958 	return 0;
1959 }
1960 module_init(dcbnl_init);
1961 
1962 static void __exit dcbnl_exit(void)
1963 {
1964 	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1965 	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1966 	dcb_flushapp();
1967 }
1968 module_exit(dcbnl_exit);
1969