xref: /openbmc/linux/net/dcb/dcbnl.c (revision 33a03aad)
1 /*
2  * Copyright (c) 2008-2011, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Author: Lucy Liu <lucy.liu@intel.com>
18  */
19 
20 #include <linux/netdevice.h>
21 #include <linux/netlink.h>
22 #include <linux/slab.h>
23 #include <net/netlink.h>
24 #include <net/rtnetlink.h>
25 #include <linux/dcbnl.h>
26 #include <net/dcbevent.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/module.h>
29 #include <net/sock.h>
30 
31 /**
32  * Data Center Bridging (DCB) is a collection of Ethernet enhancements
33  * intended to allow network traffic with differing requirements
34  * (highly reliable, no drops vs. best effort vs. low latency) to operate
35  * and co-exist on Ethernet.  Current DCB features are:
36  *
37  * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
38  *   framework for assigning bandwidth guarantees to traffic classes.
39  *
40  * Priority-based Flow Control (PFC) - provides a flow control mechanism which
41  *   can work independently for each 802.1p priority.
42  *
43  * Congestion Notification - provides a mechanism for end-to-end congestion
44  *   control for protocols which do not have built-in congestion management.
45  *
46  * More information about the emerging standards for these Ethernet features
47  * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
48  *
49  * This file implements an rtnetlink interface to allow configuration of DCB
50  * features for capable devices.
51  */
52 
53 MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
54 MODULE_DESCRIPTION("Data Center Bridging netlink interface");
55 MODULE_LICENSE("GPL");
56 
57 /**************** DCB attribute policies *************************************/
58 
59 /* DCB netlink attributes policy */
60 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
61 	[DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
62 	[DCB_ATTR_STATE]       = {.type = NLA_U8},
63 	[DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
64 	[DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
65 	[DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
66 	[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
67 	[DCB_ATTR_CAP]         = {.type = NLA_NESTED},
68 	[DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
69 	[DCB_ATTR_BCN]         = {.type = NLA_NESTED},
70 	[DCB_ATTR_APP]         = {.type = NLA_NESTED},
71 	[DCB_ATTR_IEEE]	       = {.type = NLA_NESTED},
72 	[DCB_ATTR_DCBX]        = {.type = NLA_U8},
73 	[DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
74 };
75 
76 /* DCB priority flow control to User Priority nested attributes */
77 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
78 	[DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
79 	[DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
80 	[DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
81 	[DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
82 	[DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
83 	[DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
84 	[DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
85 	[DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
86 	[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
87 };
88 
89 /* DCB priority grouping nested attributes */
90 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
91 	[DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
92 	[DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
93 	[DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
94 	[DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
95 	[DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
96 	[DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
97 	[DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
98 	[DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
99 	[DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
100 	[DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
101 	[DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
102 	[DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
103 	[DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
104 	[DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
105 	[DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
106 	[DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
107 	[DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
108 	[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
109 };
110 
111 /* DCB traffic class nested attributes. */
112 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
113 	[DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
114 	[DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
115 	[DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
116 	[DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
117 	[DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
118 };
119 
120 /* DCB capabilities nested attributes. */
121 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
122 	[DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
123 	[DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
124 	[DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
125 	[DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
126 	[DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
127 	[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
128 	[DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
129 	[DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
130 	[DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
131 };
132 
133 /* DCB capabilities nested attributes. */
134 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
135 	[DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
136 	[DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
137 	[DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
138 };
139 
140 /* DCB BCN nested attributes. */
141 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
142 	[DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
143 	[DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
144 	[DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
145 	[DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
146 	[DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
147 	[DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
148 	[DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
149 	[DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
150 	[DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
151 	[DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
152 	[DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
153 	[DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
154 	[DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
155 	[DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
156 	[DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
157 	[DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
158 	[DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
159 	[DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
160 	[DCB_BCN_ATTR_W]            = {.type = NLA_U32},
161 	[DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
162 	[DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
163 	[DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
164 	[DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
165 	[DCB_BCN_ATTR_C]            = {.type = NLA_U32},
166 	[DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
167 };
168 
169 /* DCB APP nested attributes. */
170 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
171 	[DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
172 	[DCB_APP_ATTR_ID]           = {.type = NLA_U16},
173 	[DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
174 };
175 
176 /* IEEE 802.1Qaz nested attributes. */
177 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
179 	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
180 	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
181 	[DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
182 };
183 
184 static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
185 	[DCB_ATTR_IEEE_APP]	    = {.len = sizeof(struct dcb_app)},
186 };
187 
188 /* DCB number of traffic classes nested attributes. */
189 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
190 	[DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
191 	[DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
192 	[DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
193 	[DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
194 };
195 
196 static LIST_HEAD(dcb_app_list);
197 static DEFINE_SPINLOCK(dcb_lock);
198 
199 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
200 				    u32 flags, struct nlmsghdr **nlhp)
201 {
202 	struct sk_buff *skb;
203 	struct dcbmsg *dcb;
204 	struct nlmsghdr *nlh;
205 
206 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
207 	if (!skb)
208 		return NULL;
209 
210 	nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
211 	if (!nlh) {
212 		/* header should always fit, allocation must be buggy */
213 		BUG();
214 	}
215 
216 	dcb = nlmsg_data(nlh);
217 	dcb->dcb_family = AF_UNSPEC;
218 	dcb->cmd = cmd;
219 	dcb->dcb_pad = 0;
220 
221 	if (nlhp)
222 		*nlhp = nlh;
223 
224 	return skb;
225 }
226 
227 /* standard netlink reply call */
228 static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
229                        u32 seq, u16 flags)
230 {
231 	struct sk_buff *dcbnl_skb;
232 	struct dcbmsg *dcb;
233 	struct nlmsghdr *nlh;
234 	int ret = -EINVAL;
235 
236 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
237 	if (!dcbnl_skb)
238 		return ret;
239 
240 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
241 
242 	dcb = NLMSG_DATA(nlh);
243 	dcb->dcb_family = AF_UNSPEC;
244 	dcb->cmd = cmd;
245 	dcb->dcb_pad = 0;
246 
247 	ret = nla_put_u8(dcbnl_skb, attr, value);
248 	if (ret)
249 		goto err;
250 
251 	/* end the message, assign the nlmsg_len. */
252 	nlmsg_end(dcbnl_skb, nlh);
253 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
254 	if (ret)
255 		return -EINVAL;
256 
257 	return 0;
258 nlmsg_failure:
259 err:
260 	kfree_skb(dcbnl_skb);
261 	return ret;
262 }
263 
264 static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
265                           u32 pid, u32 seq, u16 flags)
266 {
267 	int ret = -EINVAL;
268 
269 	/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
270 	if (!netdev->dcbnl_ops->getstate)
271 		return ret;
272 
273 	ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
274 	                  DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
275 
276 	return ret;
277 }
278 
279 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
280                            u32 pid, u32 seq, u16 flags)
281 {
282 	struct sk_buff *dcbnl_skb;
283 	struct nlmsghdr *nlh;
284 	struct dcbmsg *dcb;
285 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
286 	u8 value;
287 	int ret = -EINVAL;
288 	int i;
289 	int getall = 0;
290 
291 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
292 		return ret;
293 
294 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
295 	                       tb[DCB_ATTR_PFC_CFG],
296 	                       dcbnl_pfc_up_nest);
297 	if (ret)
298 		goto err_out;
299 
300 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
301 	if (!dcbnl_skb)
302 		goto err_out;
303 
304 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
305 
306 	dcb = NLMSG_DATA(nlh);
307 	dcb->dcb_family = AF_UNSPEC;
308 	dcb->cmd = DCB_CMD_PFC_GCFG;
309 
310 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
311 	if (!nest)
312 		goto err;
313 
314 	if (data[DCB_PFC_UP_ATTR_ALL])
315 		getall = 1;
316 
317 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
318 		if (!getall && !data[i])
319 			continue;
320 
321 		netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
322 		                             &value);
323 		ret = nla_put_u8(dcbnl_skb, i, value);
324 
325 		if (ret) {
326 			nla_nest_cancel(dcbnl_skb, nest);
327 			goto err;
328 		}
329 	}
330 	nla_nest_end(dcbnl_skb, nest);
331 
332 	nlmsg_end(dcbnl_skb, nlh);
333 
334 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
335 	if (ret)
336 		goto err_out;
337 
338 	return 0;
339 nlmsg_failure:
340 err:
341 	kfree_skb(dcbnl_skb);
342 err_out:
343 	return -EINVAL;
344 }
345 
346 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
347                                 u32 pid, u32 seq, u16 flags)
348 {
349 	struct sk_buff *dcbnl_skb;
350 	struct nlmsghdr *nlh;
351 	struct dcbmsg *dcb;
352 	u8 perm_addr[MAX_ADDR_LEN];
353 	int ret = -EINVAL;
354 
355 	if (!netdev->dcbnl_ops->getpermhwaddr)
356 		return ret;
357 
358 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
359 	if (!dcbnl_skb)
360 		goto err_out;
361 
362 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
363 
364 	dcb = NLMSG_DATA(nlh);
365 	dcb->dcb_family = AF_UNSPEC;
366 	dcb->cmd = DCB_CMD_GPERM_HWADDR;
367 
368 	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
369 
370 	ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
371 	              perm_addr);
372 
373 	nlmsg_end(dcbnl_skb, nlh);
374 
375 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
376 	if (ret)
377 		goto err_out;
378 
379 	return 0;
380 
381 nlmsg_failure:
382 	kfree_skb(dcbnl_skb);
383 err_out:
384 	return -EINVAL;
385 }
386 
387 static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
388                         u32 pid, u32 seq, u16 flags)
389 {
390 	struct sk_buff *dcbnl_skb;
391 	struct nlmsghdr *nlh;
392 	struct dcbmsg *dcb;
393 	struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
394 	u8 value;
395 	int ret = -EINVAL;
396 	int i;
397 	int getall = 0;
398 
399 	if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
400 		return ret;
401 
402 	ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
403 	                       dcbnl_cap_nest);
404 	if (ret)
405 		goto err_out;
406 
407 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
408 	if (!dcbnl_skb)
409 		goto err_out;
410 
411 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
412 
413 	dcb = NLMSG_DATA(nlh);
414 	dcb->dcb_family = AF_UNSPEC;
415 	dcb->cmd = DCB_CMD_GCAP;
416 
417 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
418 	if (!nest)
419 		goto err;
420 
421 	if (data[DCB_CAP_ATTR_ALL])
422 		getall = 1;
423 
424 	for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
425 		if (!getall && !data[i])
426 			continue;
427 
428 		if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
429 			ret = nla_put_u8(dcbnl_skb, i, value);
430 
431 			if (ret) {
432 				nla_nest_cancel(dcbnl_skb, nest);
433 				goto err;
434 			}
435 		}
436 	}
437 	nla_nest_end(dcbnl_skb, nest);
438 
439 	nlmsg_end(dcbnl_skb, nlh);
440 
441 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
442 	if (ret)
443 		goto err_out;
444 
445 	return 0;
446 nlmsg_failure:
447 err:
448 	kfree_skb(dcbnl_skb);
449 err_out:
450 	return -EINVAL;
451 }
452 
453 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
454                            u32 pid, u32 seq, u16 flags)
455 {
456 	struct sk_buff *dcbnl_skb;
457 	struct nlmsghdr *nlh;
458 	struct dcbmsg *dcb;
459 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
460 	u8 value;
461 	int ret = -EINVAL;
462 	int i;
463 	int getall = 0;
464 
465 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
466 		return ret;
467 
468 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
469 	                       dcbnl_numtcs_nest);
470 	if (ret) {
471 		ret = -EINVAL;
472 		goto err_out;
473 	}
474 
475 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
476 	if (!dcbnl_skb) {
477 		ret = -EINVAL;
478 		goto err_out;
479 	}
480 
481 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
482 
483 	dcb = NLMSG_DATA(nlh);
484 	dcb->dcb_family = AF_UNSPEC;
485 	dcb->cmd = DCB_CMD_GNUMTCS;
486 
487 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
488 	if (!nest) {
489 		ret = -EINVAL;
490 		goto err;
491 	}
492 
493 	if (data[DCB_NUMTCS_ATTR_ALL])
494 		getall = 1;
495 
496 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
497 		if (!getall && !data[i])
498 			continue;
499 
500 		ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
501 		if (!ret) {
502 			ret = nla_put_u8(dcbnl_skb, i, value);
503 
504 			if (ret) {
505 				nla_nest_cancel(dcbnl_skb, nest);
506 				ret = -EINVAL;
507 				goto err;
508 			}
509 		} else {
510 			goto err;
511 		}
512 	}
513 	nla_nest_end(dcbnl_skb, nest);
514 
515 	nlmsg_end(dcbnl_skb, nlh);
516 
517 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
518 	if (ret) {
519 		ret = -EINVAL;
520 		goto err_out;
521 	}
522 
523 	return 0;
524 nlmsg_failure:
525 err:
526 	kfree_skb(dcbnl_skb);
527 err_out:
528 	return ret;
529 }
530 
531 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
532                            u32 pid, u32 seq, u16 flags)
533 {
534 	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
535 	int ret = -EINVAL;
536 	u8 value;
537 	int i;
538 
539 	if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
540 		return ret;
541 
542 	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
543 	                       dcbnl_numtcs_nest);
544 
545 	if (ret) {
546 		ret = -EINVAL;
547 		goto err;
548 	}
549 
550 	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
551 		if (data[i] == NULL)
552 			continue;
553 
554 		value = nla_get_u8(data[i]);
555 
556 		ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
557 
558 		if (ret)
559 			goto operr;
560 	}
561 
562 operr:
563 	ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
564 	                  DCB_ATTR_NUMTCS, pid, seq, flags);
565 
566 err:
567 	return ret;
568 }
569 
570 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
571                              u32 pid, u32 seq, u16 flags)
572 {
573 	int ret = -EINVAL;
574 
575 	if (!netdev->dcbnl_ops->getpfcstate)
576 		return ret;
577 
578 	ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
579 	                  DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
580 	                  pid, seq, flags);
581 
582 	return ret;
583 }
584 
585 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
586                              u32 pid, u32 seq, u16 flags)
587 {
588 	int ret = -EINVAL;
589 	u8 value;
590 
591 	if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
592 		return ret;
593 
594 	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
595 
596 	netdev->dcbnl_ops->setpfcstate(netdev, value);
597 
598 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
599 	                  pid, seq, flags);
600 
601 	return ret;
602 }
603 
604 static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
605                         u32 pid, u32 seq, u16 flags)
606 {
607 	struct sk_buff *dcbnl_skb;
608 	struct nlmsghdr *nlh;
609 	struct dcbmsg *dcb;
610 	struct nlattr *app_nest;
611 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
612 	u16 id;
613 	u8 up, idtype;
614 	int ret = -EINVAL;
615 
616 	if (!tb[DCB_ATTR_APP])
617 		goto out;
618 
619 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
620 	                       dcbnl_app_nest);
621 	if (ret)
622 		goto out;
623 
624 	ret = -EINVAL;
625 	/* all must be non-null */
626 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
627 	    (!app_tb[DCB_APP_ATTR_ID]))
628 		goto out;
629 
630 	/* either by eth type or by socket number */
631 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
632 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
633 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
634 		goto out;
635 
636 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
637 
638 	if (netdev->dcbnl_ops->getapp) {
639 		up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
640 	} else {
641 		struct dcb_app app = {
642 					.selector = idtype,
643 					.protocol = id,
644 				     };
645 		up = dcb_getapp(netdev, &app);
646 	}
647 
648 	/* send this back */
649 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
650 	if (!dcbnl_skb)
651 		goto out;
652 
653 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
654 	dcb = NLMSG_DATA(nlh);
655 	dcb->dcb_family = AF_UNSPEC;
656 	dcb->cmd = DCB_CMD_GAPP;
657 
658 	app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
659 	if (!app_nest)
660 		goto out_cancel;
661 
662 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
663 	if (ret)
664 		goto out_cancel;
665 
666 	ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
667 	if (ret)
668 		goto out_cancel;
669 
670 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
671 	if (ret)
672 		goto out_cancel;
673 
674 	nla_nest_end(dcbnl_skb, app_nest);
675 	nlmsg_end(dcbnl_skb, nlh);
676 
677 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
678 	if (ret)
679 		goto nlmsg_failure;
680 
681 	goto out;
682 
683 out_cancel:
684 	nla_nest_cancel(dcbnl_skb, app_nest);
685 nlmsg_failure:
686 	kfree_skb(dcbnl_skb);
687 out:
688 	return ret;
689 }
690 
691 static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
692                         u32 pid, u32 seq, u16 flags)
693 {
694 	int err, ret = -EINVAL;
695 	u16 id;
696 	u8 up, idtype;
697 	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
698 
699 	if (!tb[DCB_ATTR_APP])
700 		goto out;
701 
702 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
703 	                       dcbnl_app_nest);
704 	if (ret)
705 		goto out;
706 
707 	ret = -EINVAL;
708 	/* all must be non-null */
709 	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
710 	    (!app_tb[DCB_APP_ATTR_ID]) ||
711 	    (!app_tb[DCB_APP_ATTR_PRIORITY]))
712 		goto out;
713 
714 	/* either by eth type or by socket number */
715 	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
716 	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
717 	    (idtype != DCB_APP_IDTYPE_PORTNUM))
718 		goto out;
719 
720 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
721 	up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
722 
723 	if (netdev->dcbnl_ops->setapp) {
724 		err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
725 	} else {
726 		struct dcb_app app;
727 		app.selector = idtype;
728 		app.protocol = id;
729 		app.priority = up;
730 		err = dcb_setapp(netdev, &app);
731 	}
732 
733 	ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
734 			  pid, seq, flags);
735 	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
736 out:
737 	return ret;
738 }
739 
740 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
741                              u32 pid, u32 seq, u16 flags, int dir)
742 {
743 	struct sk_buff *dcbnl_skb;
744 	struct nlmsghdr *nlh;
745 	struct dcbmsg *dcb;
746 	struct nlattr *pg_nest, *param_nest, *data;
747 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
748 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
749 	u8 prio, pgid, tc_pct, up_map;
750 	int ret  = -EINVAL;
751 	int getall = 0;
752 	int i;
753 
754 	if (!tb[DCB_ATTR_PG_CFG] ||
755 	    !netdev->dcbnl_ops->getpgtccfgtx ||
756 	    !netdev->dcbnl_ops->getpgtccfgrx ||
757 	    !netdev->dcbnl_ops->getpgbwgcfgtx ||
758 	    !netdev->dcbnl_ops->getpgbwgcfgrx)
759 		return ret;
760 
761 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
762 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
763 
764 	if (ret)
765 		goto err_out;
766 
767 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
768 	if (!dcbnl_skb)
769 		goto err_out;
770 
771 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
772 
773 	dcb = NLMSG_DATA(nlh);
774 	dcb->dcb_family = AF_UNSPEC;
775 	dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
776 
777 	pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
778 	if (!pg_nest)
779 		goto err;
780 
781 	if (pg_tb[DCB_PG_ATTR_TC_ALL])
782 		getall = 1;
783 
784 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
785 		if (!getall && !pg_tb[i])
786 			continue;
787 
788 		if (pg_tb[DCB_PG_ATTR_TC_ALL])
789 			data = pg_tb[DCB_PG_ATTR_TC_ALL];
790 		else
791 			data = pg_tb[i];
792 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
793 				       data, dcbnl_tc_param_nest);
794 		if (ret)
795 			goto err_pg;
796 
797 		param_nest = nla_nest_start(dcbnl_skb, i);
798 		if (!param_nest)
799 			goto err_pg;
800 
801 		pgid = DCB_ATTR_VALUE_UNDEFINED;
802 		prio = DCB_ATTR_VALUE_UNDEFINED;
803 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
804 		up_map = DCB_ATTR_VALUE_UNDEFINED;
805 
806 		if (dir) {
807 			/* Rx */
808 			netdev->dcbnl_ops->getpgtccfgrx(netdev,
809 						i - DCB_PG_ATTR_TC_0, &prio,
810 						&pgid, &tc_pct, &up_map);
811 		} else {
812 			/* Tx */
813 			netdev->dcbnl_ops->getpgtccfgtx(netdev,
814 						i - DCB_PG_ATTR_TC_0, &prio,
815 						&pgid, &tc_pct, &up_map);
816 		}
817 
818 		if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
819 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
820 			ret = nla_put_u8(dcbnl_skb,
821 			                 DCB_TC_ATTR_PARAM_PGID, pgid);
822 			if (ret)
823 				goto err_param;
824 		}
825 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
826 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
827 			ret = nla_put_u8(dcbnl_skb,
828 			                 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
829 			if (ret)
830 				goto err_param;
831 		}
832 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
833 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
834 			ret = nla_put_u8(dcbnl_skb,
835 			                 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
836 			if (ret)
837 				goto err_param;
838 		}
839 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
840 		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
841 			ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
842 			                 tc_pct);
843 			if (ret)
844 				goto err_param;
845 		}
846 		nla_nest_end(dcbnl_skb, param_nest);
847 	}
848 
849 	if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
850 		getall = 1;
851 	else
852 		getall = 0;
853 
854 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
855 		if (!getall && !pg_tb[i])
856 			continue;
857 
858 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
859 
860 		if (dir) {
861 			/* Rx */
862 			netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
863 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
864 		} else {
865 			/* Tx */
866 			netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
867 					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
868 		}
869 		ret = nla_put_u8(dcbnl_skb, i, tc_pct);
870 
871 		if (ret)
872 			goto err_pg;
873 	}
874 
875 	nla_nest_end(dcbnl_skb, pg_nest);
876 
877 	nlmsg_end(dcbnl_skb, nlh);
878 
879 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
880 	if (ret)
881 		goto err_out;
882 
883 	return 0;
884 
885 err_param:
886 	nla_nest_cancel(dcbnl_skb, param_nest);
887 err_pg:
888 	nla_nest_cancel(dcbnl_skb, pg_nest);
889 nlmsg_failure:
890 err:
891 	kfree_skb(dcbnl_skb);
892 err_out:
893 	ret  = -EINVAL;
894 	return ret;
895 }
896 
897 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
898                              u32 pid, u32 seq, u16 flags)
899 {
900 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
901 }
902 
903 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
904                              u32 pid, u32 seq, u16 flags)
905 {
906 	return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
907 }
908 
909 static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
910                           u32 pid, u32 seq, u16 flags)
911 {
912 	int ret = -EINVAL;
913 	u8 value;
914 
915 	if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
916 		return ret;
917 
918 	value = nla_get_u8(tb[DCB_ATTR_STATE]);
919 
920 	ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
921 	                  RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
922 	                  pid, seq, flags);
923 
924 	return ret;
925 }
926 
927 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
928                            u32 pid, u32 seq, u16 flags)
929 {
930 	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
931 	int i;
932 	int ret = -EINVAL;
933 	u8 value;
934 
935 	if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
936 		return ret;
937 
938 	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
939 	                       tb[DCB_ATTR_PFC_CFG],
940 	                       dcbnl_pfc_up_nest);
941 	if (ret)
942 		goto err;
943 
944 	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
945 		if (data[i] == NULL)
946 			continue;
947 		value = nla_get_u8(data[i]);
948 		netdev->dcbnl_ops->setpfccfg(netdev,
949 			data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
950 	}
951 
952 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
953 	                  pid, seq, flags);
954 err:
955 	return ret;
956 }
957 
958 static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
959                         u32 pid, u32 seq, u16 flags)
960 {
961 	int ret = -EINVAL;
962 
963 	if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
964 		return ret;
965 
966 	ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
967 	                  DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
968 	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
969 
970 	return ret;
971 }
972 
973 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
974                              u32 pid, u32 seq, u16 flags, int dir)
975 {
976 	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
977 	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
978 	int ret = -EINVAL;
979 	int i;
980 	u8 pgid;
981 	u8 up_map;
982 	u8 prio;
983 	u8 tc_pct;
984 
985 	if (!tb[DCB_ATTR_PG_CFG] ||
986 	    !netdev->dcbnl_ops->setpgtccfgtx ||
987 	    !netdev->dcbnl_ops->setpgtccfgrx ||
988 	    !netdev->dcbnl_ops->setpgbwgcfgtx ||
989 	    !netdev->dcbnl_ops->setpgbwgcfgrx)
990 		return ret;
991 
992 	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
993 	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
994 	if (ret)
995 		goto err;
996 
997 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
998 		if (!pg_tb[i])
999 			continue;
1000 
1001 		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
1002 		                       pg_tb[i], dcbnl_tc_param_nest);
1003 		if (ret)
1004 			goto err;
1005 
1006 		pgid = DCB_ATTR_VALUE_UNDEFINED;
1007 		prio = DCB_ATTR_VALUE_UNDEFINED;
1008 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1009 		up_map = DCB_ATTR_VALUE_UNDEFINED;
1010 
1011 		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
1012 			prio =
1013 			    nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
1014 
1015 		if (param_tb[DCB_TC_ATTR_PARAM_PGID])
1016 			pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
1017 
1018 		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
1019 			tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
1020 
1021 		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
1022 			up_map =
1023 			     nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
1024 
1025 		/* dir: Tx = 0, Rx = 1 */
1026 		if (dir) {
1027 			/* Rx */
1028 			netdev->dcbnl_ops->setpgtccfgrx(netdev,
1029 				i - DCB_PG_ATTR_TC_0,
1030 				prio, pgid, tc_pct, up_map);
1031 		} else {
1032 			/* Tx */
1033 			netdev->dcbnl_ops->setpgtccfgtx(netdev,
1034 				i - DCB_PG_ATTR_TC_0,
1035 				prio, pgid, tc_pct, up_map);
1036 		}
1037 	}
1038 
1039 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1040 		if (!pg_tb[i])
1041 			continue;
1042 
1043 		tc_pct = nla_get_u8(pg_tb[i]);
1044 
1045 		/* dir: Tx = 0, Rx = 1 */
1046 		if (dir) {
1047 			/* Rx */
1048 			netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
1049 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1050 		} else {
1051 			/* Tx */
1052 			netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
1053 					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
1054 		}
1055 	}
1056 
1057 	ret = dcbnl_reply(0, RTM_SETDCB,
1058 			  (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
1059 			  DCB_ATTR_PG_CFG, pid, seq, flags);
1060 
1061 err:
1062 	return ret;
1063 }
1064 
1065 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
1066                              u32 pid, u32 seq, u16 flags)
1067 {
1068 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
1069 }
1070 
1071 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
1072                              u32 pid, u32 seq, u16 flags)
1073 {
1074 	return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
1075 }
1076 
1077 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
1078                             u32 pid, u32 seq, u16 flags)
1079 {
1080 	struct sk_buff *dcbnl_skb;
1081 	struct nlmsghdr *nlh;
1082 	struct dcbmsg *dcb;
1083 	struct nlattr *bcn_nest;
1084 	struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
1085 	u8 value_byte;
1086 	u32 value_integer;
1087 	int ret  = -EINVAL;
1088 	bool getall = false;
1089 	int i;
1090 
1091 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
1092 	    !netdev->dcbnl_ops->getbcncfg)
1093 		return ret;
1094 
1095 	ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
1096 	                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
1097 
1098 	if (ret)
1099 		goto err_out;
1100 
1101 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1102 	if (!dcbnl_skb)
1103 		goto err_out;
1104 
1105 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1106 
1107 	dcb = NLMSG_DATA(nlh);
1108 	dcb->dcb_family = AF_UNSPEC;
1109 	dcb->cmd = DCB_CMD_BCN_GCFG;
1110 
1111 	bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
1112 	if (!bcn_nest)
1113 		goto err;
1114 
1115 	if (bcn_tb[DCB_BCN_ATTR_ALL])
1116 		getall = true;
1117 
1118 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1119 		if (!getall && !bcn_tb[i])
1120 			continue;
1121 
1122 		netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
1123 		                            &value_byte);
1124 		ret = nla_put_u8(dcbnl_skb, i, value_byte);
1125 		if (ret)
1126 			goto err_bcn;
1127 	}
1128 
1129 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1130 		if (!getall && !bcn_tb[i])
1131 			continue;
1132 
1133 		netdev->dcbnl_ops->getbcncfg(netdev, i,
1134 		                             &value_integer);
1135 		ret = nla_put_u32(dcbnl_skb, i, value_integer);
1136 		if (ret)
1137 			goto err_bcn;
1138 	}
1139 
1140 	nla_nest_end(dcbnl_skb, bcn_nest);
1141 
1142 	nlmsg_end(dcbnl_skb, nlh);
1143 
1144 	ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
1145 	if (ret)
1146 		goto err_out;
1147 
1148 	return 0;
1149 
1150 err_bcn:
1151 	nla_nest_cancel(dcbnl_skb, bcn_nest);
1152 nlmsg_failure:
1153 err:
1154 	kfree_skb(dcbnl_skb);
1155 err_out:
1156 	ret  = -EINVAL;
1157 	return ret;
1158 }
1159 
1160 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
1161                             u32 pid, u32 seq, u16 flags)
1162 {
1163 	struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
1164 	int i;
1165 	int ret = -EINVAL;
1166 	u8 value_byte;
1167 	u32 value_int;
1168 
1169 	if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
1170 	    !netdev->dcbnl_ops->setbcnrp)
1171 		return ret;
1172 
1173 	ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
1174 	                       tb[DCB_ATTR_BCN],
1175 	                       dcbnl_pfc_up_nest);
1176 	if (ret)
1177 		goto err;
1178 
1179 	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
1180 		if (data[i] == NULL)
1181 			continue;
1182 		value_byte = nla_get_u8(data[i]);
1183 		netdev->dcbnl_ops->setbcnrp(netdev,
1184 			data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
1185 	}
1186 
1187 	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
1188 		if (data[i] == NULL)
1189 			continue;
1190 		value_int = nla_get_u32(data[i]);
1191 		netdev->dcbnl_ops->setbcncfg(netdev,
1192 	                                     i, value_int);
1193 	}
1194 
1195 	ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
1196 	                  pid, seq, flags);
1197 err:
1198 	return ret;
1199 }
1200 
1201 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1202 				int app_nested_type, int app_info_type,
1203 				int app_entry_type)
1204 {
1205 	struct dcb_peer_app_info info;
1206 	struct dcb_app *table = NULL;
1207 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1208 	u16 app_count;
1209 	int err;
1210 
1211 
1212 	/**
1213 	 * retrieve the peer app configuration form the driver. If the driver
1214 	 * handlers fail exit without doing anything
1215 	 */
1216 	err = ops->peer_getappinfo(netdev, &info, &app_count);
1217 	if (!err && app_count) {
1218 		table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
1219 		if (!table)
1220 			return -ENOMEM;
1221 
1222 		err = ops->peer_getapptable(netdev, table);
1223 	}
1224 
1225 	if (!err) {
1226 		u16 i;
1227 		struct nlattr *app;
1228 
1229 		/**
1230 		 * build the message, from here on the only possible failure
1231 		 * is due to the skb size
1232 		 */
1233 		err = -EMSGSIZE;
1234 
1235 		app = nla_nest_start(skb, app_nested_type);
1236 		if (!app)
1237 			goto nla_put_failure;
1238 
1239 		if (app_info_type &&
1240 		    nla_put(skb, app_info_type, sizeof(info), &info))
1241 			goto nla_put_failure;
1242 
1243 		for (i = 0; i < app_count; i++) {
1244 			if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1245 				    &table[i]))
1246 				goto nla_put_failure;
1247 		}
1248 		nla_nest_end(skb, app);
1249 	}
1250 	err = 0;
1251 
1252 nla_put_failure:
1253 	kfree(table);
1254 	return err;
1255 }
1256 
1257 /* Handle IEEE 802.1Qaz GET commands. */
1258 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1259 {
1260 	struct nlattr *ieee, *app;
1261 	struct dcb_app_type *itr;
1262 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1263 	int dcbx;
1264 	int err = -EMSGSIZE;
1265 
1266 	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1267 		goto nla_put_failure;
1268 	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1269 	if (!ieee)
1270 		goto nla_put_failure;
1271 
1272 	if (ops->ieee_getets) {
1273 		struct ieee_ets ets;
1274 		err = ops->ieee_getets(netdev, &ets);
1275 		if (!err &&
1276 		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1277 			goto nla_put_failure;
1278 	}
1279 
1280 	if (ops->ieee_getmaxrate) {
1281 		struct ieee_maxrate maxrate;
1282 		err = ops->ieee_getmaxrate(netdev, &maxrate);
1283 		if (!err) {
1284 			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1285 				      sizeof(maxrate), &maxrate);
1286 			if (err)
1287 				goto nla_put_failure;
1288 		}
1289 	}
1290 
1291 	if (ops->ieee_getpfc) {
1292 		struct ieee_pfc pfc;
1293 		err = ops->ieee_getpfc(netdev, &pfc);
1294 		if (!err &&
1295 		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1296 			goto nla_put_failure;
1297 	}
1298 
1299 	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1300 	if (!app)
1301 		goto nla_put_failure;
1302 
1303 	spin_lock(&dcb_lock);
1304 	list_for_each_entry(itr, &dcb_app_list, list) {
1305 		if (itr->ifindex == netdev->ifindex) {
1306 			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1307 					 &itr->app);
1308 			if (err) {
1309 				spin_unlock(&dcb_lock);
1310 				goto nla_put_failure;
1311 			}
1312 		}
1313 	}
1314 
1315 	if (netdev->dcbnl_ops->getdcbx)
1316 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1317 	else
1318 		dcbx = -EOPNOTSUPP;
1319 
1320 	spin_unlock(&dcb_lock);
1321 	nla_nest_end(skb, app);
1322 
1323 	/* get peer info if available */
1324 	if (ops->ieee_peer_getets) {
1325 		struct ieee_ets ets;
1326 		err = ops->ieee_peer_getets(netdev, &ets);
1327 		if (!err &&
1328 		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1329 			goto nla_put_failure;
1330 	}
1331 
1332 	if (ops->ieee_peer_getpfc) {
1333 		struct ieee_pfc pfc;
1334 		err = ops->ieee_peer_getpfc(netdev, &pfc);
1335 		if (!err &&
1336 		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1337 			goto nla_put_failure;
1338 	}
1339 
1340 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1341 		err = dcbnl_build_peer_app(netdev, skb,
1342 					   DCB_ATTR_IEEE_PEER_APP,
1343 					   DCB_ATTR_IEEE_APP_UNSPEC,
1344 					   DCB_ATTR_IEEE_APP);
1345 		if (err)
1346 			goto nla_put_failure;
1347 	}
1348 
1349 	nla_nest_end(skb, ieee);
1350 	if (dcbx >= 0) {
1351 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1352 		if (err)
1353 			goto nla_put_failure;
1354 	}
1355 
1356 	return 0;
1357 
1358 nla_put_failure:
1359 	return err;
1360 }
1361 
1362 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1363 			     int dir)
1364 {
1365 	u8 pgid, up_map, prio, tc_pct;
1366 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1367 	int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1368 	struct nlattr *pg = nla_nest_start(skb, i);
1369 
1370 	if (!pg)
1371 		goto nla_put_failure;
1372 
1373 	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1374 		struct nlattr *tc_nest = nla_nest_start(skb, i);
1375 
1376 		if (!tc_nest)
1377 			goto nla_put_failure;
1378 
1379 		pgid = DCB_ATTR_VALUE_UNDEFINED;
1380 		prio = DCB_ATTR_VALUE_UNDEFINED;
1381 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1382 		up_map = DCB_ATTR_VALUE_UNDEFINED;
1383 
1384 		if (!dir)
1385 			ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1386 					  &prio, &pgid, &tc_pct, &up_map);
1387 		else
1388 			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1389 					  &prio, &pgid, &tc_pct, &up_map);
1390 
1391 		if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1392 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1393 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1394 		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1395 			goto nla_put_failure;
1396 		nla_nest_end(skb, tc_nest);
1397 	}
1398 
1399 	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1400 		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1401 
1402 		if (!dir)
1403 			ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1404 					   &tc_pct);
1405 		else
1406 			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1407 					   &tc_pct);
1408 		if (nla_put_u8(skb, i, tc_pct))
1409 			goto nla_put_failure;
1410 	}
1411 	nla_nest_end(skb, pg);
1412 	return 0;
1413 
1414 nla_put_failure:
1415 	return -EMSGSIZE;
1416 }
1417 
1418 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1419 {
1420 	struct nlattr *cee, *app;
1421 	struct dcb_app_type *itr;
1422 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1423 	int dcbx, i, err = -EMSGSIZE;
1424 	u8 value;
1425 
1426 	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1427 		goto nla_put_failure;
1428 	cee = nla_nest_start(skb, DCB_ATTR_CEE);
1429 	if (!cee)
1430 		goto nla_put_failure;
1431 
1432 	/* local pg */
1433 	if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1434 		err = dcbnl_cee_pg_fill(skb, netdev, 1);
1435 		if (err)
1436 			goto nla_put_failure;
1437 	}
1438 
1439 	if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1440 		err = dcbnl_cee_pg_fill(skb, netdev, 0);
1441 		if (err)
1442 			goto nla_put_failure;
1443 	}
1444 
1445 	/* local pfc */
1446 	if (ops->getpfccfg) {
1447 		struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1448 
1449 		if (!pfc_nest)
1450 			goto nla_put_failure;
1451 
1452 		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1453 			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1454 			if (nla_put_u8(skb, i, value))
1455 				goto nla_put_failure;
1456 		}
1457 		nla_nest_end(skb, pfc_nest);
1458 	}
1459 
1460 	/* local app */
1461 	spin_lock(&dcb_lock);
1462 	app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1463 	if (!app)
1464 		goto dcb_unlock;
1465 
1466 	list_for_each_entry(itr, &dcb_app_list, list) {
1467 		if (itr->ifindex == netdev->ifindex) {
1468 			struct nlattr *app_nest = nla_nest_start(skb,
1469 								 DCB_ATTR_APP);
1470 			if (!app_nest)
1471 				goto dcb_unlock;
1472 
1473 			err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1474 					 itr->app.selector);
1475 			if (err)
1476 				goto dcb_unlock;
1477 
1478 			err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1479 					  itr->app.protocol);
1480 			if (err)
1481 				goto dcb_unlock;
1482 
1483 			err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1484 					 itr->app.priority);
1485 			if (err)
1486 				goto dcb_unlock;
1487 
1488 			nla_nest_end(skb, app_nest);
1489 		}
1490 	}
1491 	nla_nest_end(skb, app);
1492 
1493 	if (netdev->dcbnl_ops->getdcbx)
1494 		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1495 	else
1496 		dcbx = -EOPNOTSUPP;
1497 
1498 	spin_unlock(&dcb_lock);
1499 
1500 	/* features flags */
1501 	if (ops->getfeatcfg) {
1502 		struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1503 		if (!feat)
1504 			goto nla_put_failure;
1505 
1506 		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1507 		     i++)
1508 			if (!ops->getfeatcfg(netdev, i, &value) &&
1509 			    nla_put_u8(skb, i, value))
1510 				goto nla_put_failure;
1511 
1512 		nla_nest_end(skb, feat);
1513 	}
1514 
1515 	/* peer info if available */
1516 	if (ops->cee_peer_getpg) {
1517 		struct cee_pg pg;
1518 		err = ops->cee_peer_getpg(netdev, &pg);
1519 		if (!err &&
1520 		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1521 			goto nla_put_failure;
1522 	}
1523 
1524 	if (ops->cee_peer_getpfc) {
1525 		struct cee_pfc pfc;
1526 		err = ops->cee_peer_getpfc(netdev, &pfc);
1527 		if (!err &&
1528 		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1529 			goto nla_put_failure;
1530 	}
1531 
1532 	if (ops->peer_getappinfo && ops->peer_getapptable) {
1533 		err = dcbnl_build_peer_app(netdev, skb,
1534 					   DCB_ATTR_CEE_PEER_APP_TABLE,
1535 					   DCB_ATTR_CEE_PEER_APP_INFO,
1536 					   DCB_ATTR_CEE_PEER_APP);
1537 		if (err)
1538 			goto nla_put_failure;
1539 	}
1540 	nla_nest_end(skb, cee);
1541 
1542 	/* DCBX state */
1543 	if (dcbx >= 0) {
1544 		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1545 		if (err)
1546 			goto nla_put_failure;
1547 	}
1548 	return 0;
1549 
1550 dcb_unlock:
1551 	spin_unlock(&dcb_lock);
1552 nla_put_failure:
1553 	return err;
1554 }
1555 
1556 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1557 			u32 seq, u32 pid, int dcbx_ver)
1558 {
1559 	struct net *net = dev_net(dev);
1560 	struct sk_buff *skb;
1561 	struct nlmsghdr *nlh;
1562 	struct dcbmsg *dcb;
1563 	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1564 	int err;
1565 
1566 	if (!ops)
1567 		return -EOPNOTSUPP;
1568 
1569 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1570 	if (!skb)
1571 		return -ENOBUFS;
1572 
1573 	nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1574 	if (nlh == NULL) {
1575 		nlmsg_free(skb);
1576 		return -EMSGSIZE;
1577 	}
1578 
1579 	dcb = NLMSG_DATA(nlh);
1580 	dcb->dcb_family = AF_UNSPEC;
1581 	dcb->cmd = cmd;
1582 
1583 	if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1584 		err = dcbnl_ieee_fill(skb, dev);
1585 	else
1586 		err = dcbnl_cee_fill(skb, dev);
1587 
1588 	if (err < 0) {
1589 		/* Report error to broadcast listeners */
1590 		nlmsg_cancel(skb, nlh);
1591 		kfree_skb(skb);
1592 		rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1593 	} else {
1594 		/* End nlmsg and notify broadcast listeners */
1595 		nlmsg_end(skb, nlh);
1596 		rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1597 	}
1598 
1599 	return err;
1600 }
1601 
1602 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1603 		      u32 seq, u32 pid)
1604 {
1605 	return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1606 }
1607 EXPORT_SYMBOL(dcbnl_ieee_notify);
1608 
1609 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1610 		     u32 seq, u32 pid)
1611 {
1612 	return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1613 }
1614 EXPORT_SYMBOL(dcbnl_cee_notify);
1615 
1616 /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1617  * be completed the entire msg is aborted and error value is returned.
1618  * No attempt is made to reconcile the case where only part of the
1619  * cmd can be completed.
1620  */
1621 static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1622 			  u32 pid, u32 seq, u16 flags)
1623 {
1624 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1625 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1626 	int err = -EOPNOTSUPP;
1627 
1628 	if (!ops)
1629 		return err;
1630 
1631 	if (!tb[DCB_ATTR_IEEE])
1632 		return -EINVAL;
1633 
1634 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1635 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1636 	if (err)
1637 		return err;
1638 
1639 	if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1640 		struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1641 		err = ops->ieee_setets(netdev, ets);
1642 		if (err)
1643 			goto err;
1644 	}
1645 
1646 	if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1647 		struct ieee_maxrate *maxrate =
1648 			nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1649 		err = ops->ieee_setmaxrate(netdev, maxrate);
1650 		if (err)
1651 			goto err;
1652 	}
1653 
1654 	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1655 		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1656 		err = ops->ieee_setpfc(netdev, pfc);
1657 		if (err)
1658 			goto err;
1659 	}
1660 
1661 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1662 		struct nlattr *attr;
1663 		int rem;
1664 
1665 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1666 			struct dcb_app *app_data;
1667 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1668 				continue;
1669 			app_data = nla_data(attr);
1670 			if (ops->ieee_setapp)
1671 				err = ops->ieee_setapp(netdev, app_data);
1672 			else
1673 				err = dcb_ieee_setapp(netdev, app_data);
1674 			if (err)
1675 				goto err;
1676 		}
1677 	}
1678 
1679 err:
1680 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1681 		    pid, seq, flags);
1682 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1683 	return err;
1684 }
1685 
1686 static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1687 			  u32 pid, u32 seq, u16 flags)
1688 {
1689 	struct net *net = dev_net(netdev);
1690 	struct sk_buff *skb;
1691 	struct nlmsghdr *nlh;
1692 	struct dcbmsg *dcb;
1693 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1694 	int err;
1695 
1696 	if (!ops)
1697 		return -EOPNOTSUPP;
1698 
1699 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1700 	if (!skb)
1701 		return -ENOBUFS;
1702 
1703 	nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1704 	if (nlh == NULL) {
1705 		nlmsg_free(skb);
1706 		return -EMSGSIZE;
1707 	}
1708 
1709 	dcb = NLMSG_DATA(nlh);
1710 	dcb->dcb_family = AF_UNSPEC;
1711 	dcb->cmd = DCB_CMD_IEEE_GET;
1712 
1713 	err = dcbnl_ieee_fill(skb, netdev);
1714 
1715 	if (err < 0) {
1716 		nlmsg_cancel(skb, nlh);
1717 		kfree_skb(skb);
1718 	} else {
1719 		nlmsg_end(skb, nlh);
1720 		err = rtnl_unicast(skb, net, pid);
1721 	}
1722 
1723 	return err;
1724 }
1725 
1726 static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1727 			  u32 pid, u32 seq, u16 flags)
1728 {
1729 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1730 	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1731 	int err = -EOPNOTSUPP;
1732 
1733 	if (!ops)
1734 		return -EOPNOTSUPP;
1735 
1736 	if (!tb[DCB_ATTR_IEEE])
1737 		return -EINVAL;
1738 
1739 	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1740 			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1741 	if (err)
1742 		return err;
1743 
1744 	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1745 		struct nlattr *attr;
1746 		int rem;
1747 
1748 		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1749 			struct dcb_app *app_data;
1750 
1751 			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1752 				continue;
1753 			app_data = nla_data(attr);
1754 			if (ops->ieee_delapp)
1755 				err = ops->ieee_delapp(netdev, app_data);
1756 			else
1757 				err = dcb_ieee_delapp(netdev, app_data);
1758 			if (err)
1759 				goto err;
1760 		}
1761 	}
1762 
1763 err:
1764 	dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1765 		    pid, seq, flags);
1766 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1767 	return err;
1768 }
1769 
1770 
1771 /* DCBX configuration */
1772 static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1773 			 u32 pid, u32 seq, u16 flags)
1774 {
1775 	int ret;
1776 
1777 	if (!netdev->dcbnl_ops->getdcbx)
1778 		return -EOPNOTSUPP;
1779 
1780 	ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
1781 			  DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
1782 
1783 	return ret;
1784 }
1785 
1786 static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
1787 			 u32 pid, u32 seq, u16 flags)
1788 {
1789 	int ret;
1790 	u8 value;
1791 
1792 	if (!netdev->dcbnl_ops->setdcbx)
1793 		return -EOPNOTSUPP;
1794 
1795 	if (!tb[DCB_ATTR_DCBX])
1796 		return -EINVAL;
1797 
1798 	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1799 
1800 	ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
1801 			  RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
1802 			  pid, seq, flags);
1803 
1804 	return ret;
1805 }
1806 
1807 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
1808 			    u32 pid, u32 seq, u16 flags)
1809 {
1810 	struct sk_buff *dcbnl_skb;
1811 	struct nlmsghdr *nlh;
1812 	struct dcbmsg *dcb;
1813 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1814 	u8 value;
1815 	int ret, i;
1816 	int getall = 0;
1817 
1818 	if (!netdev->dcbnl_ops->getfeatcfg)
1819 		return -EOPNOTSUPP;
1820 
1821 	if (!tb[DCB_ATTR_FEATCFG])
1822 		return -EINVAL;
1823 
1824 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1825 			       dcbnl_featcfg_nest);
1826 	if (ret)
1827 		goto err_out;
1828 
1829 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1830 	if (!dcbnl_skb) {
1831 		ret = -ENOBUFS;
1832 		goto err_out;
1833 	}
1834 
1835 	nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1836 
1837 	dcb = NLMSG_DATA(nlh);
1838 	dcb->dcb_family = AF_UNSPEC;
1839 	dcb->cmd = DCB_CMD_GFEATCFG;
1840 
1841 	nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
1842 	if (!nest) {
1843 		ret = -EMSGSIZE;
1844 		goto nla_put_failure;
1845 	}
1846 
1847 	if (data[DCB_FEATCFG_ATTR_ALL])
1848 		getall = 1;
1849 
1850 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1851 		if (!getall && !data[i])
1852 			continue;
1853 
1854 		ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1855 		if (!ret)
1856 			ret = nla_put_u8(dcbnl_skb, i, value);
1857 
1858 		if (ret) {
1859 			nla_nest_cancel(dcbnl_skb, nest);
1860 			goto nla_put_failure;
1861 		}
1862 	}
1863 	nla_nest_end(dcbnl_skb, nest);
1864 
1865 	nlmsg_end(dcbnl_skb, nlh);
1866 
1867 	return rtnl_unicast(dcbnl_skb, &init_net, pid);
1868 nla_put_failure:
1869 	nlmsg_cancel(dcbnl_skb, nlh);
1870 nlmsg_failure:
1871 	kfree_skb(dcbnl_skb);
1872 err_out:
1873 	return ret;
1874 }
1875 
1876 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
1877 			    u32 pid, u32 seq, u16 flags)
1878 {
1879 	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1880 	int ret, i;
1881 	u8 value;
1882 
1883 	if (!netdev->dcbnl_ops->setfeatcfg)
1884 		return -ENOTSUPP;
1885 
1886 	if (!tb[DCB_ATTR_FEATCFG])
1887 		return -EINVAL;
1888 
1889 	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1890 			       dcbnl_featcfg_nest);
1891 
1892 	if (ret)
1893 		goto err;
1894 
1895 	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1896 		if (data[i] == NULL)
1897 			continue;
1898 
1899 		value = nla_get_u8(data[i]);
1900 
1901 		ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1902 
1903 		if (ret)
1904 			goto err;
1905 	}
1906 err:
1907 	dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
1908 		    pid, seq, flags);
1909 
1910 	return ret;
1911 }
1912 
1913 /* Handle CEE DCBX GET commands. */
1914 static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1915 			 u32 pid, u32 seq, u16 flags)
1916 {
1917 	struct net *net = dev_net(netdev);
1918 	struct sk_buff *skb;
1919 	struct nlmsghdr *nlh;
1920 	struct dcbmsg *dcb;
1921 	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1922 	int err;
1923 
1924 	if (!ops)
1925 		return -EOPNOTSUPP;
1926 
1927 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1928 	if (!skb)
1929 		return -ENOBUFS;
1930 
1931 	nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1932 	if (nlh == NULL) {
1933 		nlmsg_free(skb);
1934 		return -EMSGSIZE;
1935 	}
1936 
1937 	dcb = NLMSG_DATA(nlh);
1938 	dcb->dcb_family = AF_UNSPEC;
1939 	dcb->cmd = DCB_CMD_CEE_GET;
1940 
1941 	err = dcbnl_cee_fill(skb, netdev);
1942 
1943 	if (err < 0) {
1944 		nlmsg_cancel(skb, nlh);
1945 		nlmsg_free(skb);
1946 	} else {
1947 		nlmsg_end(skb, nlh);
1948 		err = rtnl_unicast(skb, net, pid);
1949 	}
1950 	return err;
1951 }
1952 
1953 struct reply_func {
1954 	/* reply netlink message type */
1955 	int	type;
1956 
1957 	/* function to fill message contents */
1958 	int   (*cb)(struct net_device *, struct nlmsghdr *, u32,
1959 		    struct nlattr **, struct sk_buff *);
1960 };
1961 
1962 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1963 	/* FIXME: add reply defs */
1964 };
1965 
1966 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1967 {
1968 	struct net *net = sock_net(skb->sk);
1969 	struct net_device *netdev;
1970 	struct dcbmsg  *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
1971 	struct nlattr *tb[DCB_ATTR_MAX + 1];
1972 	u32 pid = skb ? NETLINK_CB(skb).pid : 0;
1973 	int ret = -EINVAL;
1974 	struct sk_buff *reply_skb;
1975 	struct nlmsghdr *reply_nlh;
1976 	const struct reply_func *fn;
1977 
1978 	if (!net_eq(net, &init_net))
1979 		return -EINVAL;
1980 
1981 	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1982 			  dcbnl_rtnl_policy);
1983 	if (ret < 0)
1984 		return ret;
1985 
1986 	if (dcb->cmd > DCB_CMD_MAX)
1987 		return -EINVAL;
1988 
1989 	/* check if a reply function has been defined for the command */
1990 	fn = &reply_funcs[dcb->cmd];
1991 	if (!fn->cb)
1992 		return -EOPNOTSUPP;
1993 
1994 	if (!tb[DCB_ATTR_IFNAME])
1995 		return -EINVAL;
1996 
1997 	netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
1998 	if (!netdev)
1999 		return -EINVAL;
2000 
2001 	if (!netdev->dcbnl_ops)
2002 		goto errout;
2003 
2004 	reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
2005 				 nlh->nlmsg_flags, &reply_nlh);
2006 	if (!reply_skb) {
2007 		ret = -ENOBUFS;
2008 		goto out;
2009 	}
2010 
2011 	ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
2012 	if (ret < 0) {
2013 		nlmsg_free(reply_skb);
2014 		goto out;
2015 	}
2016 
2017 	nlmsg_end(reply_skb, reply_nlh);
2018 
2019 	ret = rtnl_unicast(reply_skb, &init_net, pid);
2020 	if (ret)
2021 		goto out;
2022 
2023 	switch (dcb->cmd) {
2024 	case DCB_CMD_GSTATE:
2025 		ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
2026 		                     nlh->nlmsg_flags);
2027 		goto out;
2028 	case DCB_CMD_PFC_GCFG:
2029 		ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
2030 		                      nlh->nlmsg_flags);
2031 		goto out;
2032 	case DCB_CMD_GPERM_HWADDR:
2033 		ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
2034 		                           nlh->nlmsg_flags);
2035 		goto out;
2036 	case DCB_CMD_PGTX_GCFG:
2037 		ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
2038 		                        nlh->nlmsg_flags);
2039 		goto out;
2040 	case DCB_CMD_PGRX_GCFG:
2041 		ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
2042 		                        nlh->nlmsg_flags);
2043 		goto out;
2044 	case DCB_CMD_BCN_GCFG:
2045 		ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
2046 		                       nlh->nlmsg_flags);
2047 		goto out;
2048 	case DCB_CMD_SSTATE:
2049 		ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
2050 		                     nlh->nlmsg_flags);
2051 		goto out;
2052 	case DCB_CMD_PFC_SCFG:
2053 		ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
2054 		                      nlh->nlmsg_flags);
2055 		goto out;
2056 
2057 	case DCB_CMD_SET_ALL:
2058 		ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
2059 		                   nlh->nlmsg_flags);
2060 		goto out;
2061 	case DCB_CMD_PGTX_SCFG:
2062 		ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
2063 		                        nlh->nlmsg_flags);
2064 		goto out;
2065 	case DCB_CMD_PGRX_SCFG:
2066 		ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
2067 		                        nlh->nlmsg_flags);
2068 		goto out;
2069 	case DCB_CMD_GCAP:
2070 		ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
2071 		                   nlh->nlmsg_flags);
2072 		goto out;
2073 	case DCB_CMD_GNUMTCS:
2074 		ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
2075 		                      nlh->nlmsg_flags);
2076 		goto out;
2077 	case DCB_CMD_SNUMTCS:
2078 		ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
2079 		                      nlh->nlmsg_flags);
2080 		goto out;
2081 	case DCB_CMD_PFC_GSTATE:
2082 		ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
2083 		                        nlh->nlmsg_flags);
2084 		goto out;
2085 	case DCB_CMD_PFC_SSTATE:
2086 		ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
2087 		                        nlh->nlmsg_flags);
2088 		goto out;
2089 	case DCB_CMD_BCN_SCFG:
2090 		ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
2091 		                       nlh->nlmsg_flags);
2092 		goto out;
2093 	case DCB_CMD_GAPP:
2094 		ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
2095 		                   nlh->nlmsg_flags);
2096 		goto out;
2097 	case DCB_CMD_SAPP:
2098 		ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
2099 		                   nlh->nlmsg_flags);
2100 		goto out;
2101 	case DCB_CMD_IEEE_SET:
2102 		ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
2103 				     nlh->nlmsg_flags);
2104 		goto out;
2105 	case DCB_CMD_IEEE_GET:
2106 		ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
2107 				     nlh->nlmsg_flags);
2108 		goto out;
2109 	case DCB_CMD_IEEE_DEL:
2110 		ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2111 				     nlh->nlmsg_flags);
2112 		goto out;
2113 	case DCB_CMD_GDCBX:
2114 		ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2115 				    nlh->nlmsg_flags);
2116 		goto out;
2117 	case DCB_CMD_SDCBX:
2118 		ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
2119 				    nlh->nlmsg_flags);
2120 		goto out;
2121 	case DCB_CMD_GFEATCFG:
2122 		ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2123 				       nlh->nlmsg_flags);
2124 		goto out;
2125 	case DCB_CMD_SFEATCFG:
2126 		ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
2127 				       nlh->nlmsg_flags);
2128 		goto out;
2129 	case DCB_CMD_CEE_GET:
2130 		ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
2131 				    nlh->nlmsg_flags);
2132 		goto out;
2133 	default:
2134 		goto errout;
2135 	}
2136 errout:
2137 	ret = -EINVAL;
2138 out:
2139 	dev_put(netdev);
2140 	return ret;
2141 }
2142 
2143 /**
2144  * dcb_getapp - retrieve the DCBX application user priority
2145  *
2146  * On success returns a non-zero 802.1p user priority bitmap
2147  * otherwise returns 0 as the invalid user priority bitmap to
2148  * indicate an error.
2149  */
2150 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2151 {
2152 	struct dcb_app_type *itr;
2153 	u8 prio = 0;
2154 
2155 	spin_lock(&dcb_lock);
2156 	list_for_each_entry(itr, &dcb_app_list, list) {
2157 		if (itr->app.selector == app->selector &&
2158 		    itr->app.protocol == app->protocol &&
2159 		    itr->ifindex == dev->ifindex) {
2160 			prio = itr->app.priority;
2161 			break;
2162 		}
2163 	}
2164 	spin_unlock(&dcb_lock);
2165 
2166 	return prio;
2167 }
2168 EXPORT_SYMBOL(dcb_getapp);
2169 
2170 /**
2171  * dcb_setapp - add CEE dcb application data to app list
2172  *
2173  * Priority 0 is an invalid priority in CEE spec. This routine
2174  * removes applications from the app list if the priority is
2175  * set to zero.
2176  */
2177 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2178 {
2179 	struct dcb_app_type *itr;
2180 	struct dcb_app_type event;
2181 
2182 	event.ifindex = dev->ifindex;
2183 	memcpy(&event.app, new, sizeof(event.app));
2184 	if (dev->dcbnl_ops->getdcbx)
2185 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2186 
2187 	spin_lock(&dcb_lock);
2188 	/* Search for existing match and replace */
2189 	list_for_each_entry(itr, &dcb_app_list, list) {
2190 		if (itr->app.selector == new->selector &&
2191 		    itr->app.protocol == new->protocol &&
2192 		    itr->ifindex == dev->ifindex) {
2193 			if (new->priority)
2194 				itr->app.priority = new->priority;
2195 			else {
2196 				list_del(&itr->list);
2197 				kfree(itr);
2198 			}
2199 			goto out;
2200 		}
2201 	}
2202 	/* App type does not exist add new application type */
2203 	if (new->priority) {
2204 		struct dcb_app_type *entry;
2205 		entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2206 		if (!entry) {
2207 			spin_unlock(&dcb_lock);
2208 			return -ENOMEM;
2209 		}
2210 
2211 		memcpy(&entry->app, new, sizeof(*new));
2212 		entry->ifindex = dev->ifindex;
2213 		list_add(&entry->list, &dcb_app_list);
2214 	}
2215 out:
2216 	spin_unlock(&dcb_lock);
2217 	call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2218 	return 0;
2219 }
2220 EXPORT_SYMBOL(dcb_setapp);
2221 
2222 /**
2223  * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2224  *
2225  * Helper routine which on success returns a non-zero 802.1Qaz user
2226  * priority bitmap otherwise returns 0 to indicate the dcb_app was
2227  * not found in APP list.
2228  */
2229 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2230 {
2231 	struct dcb_app_type *itr;
2232 	u8 prio = 0;
2233 
2234 	spin_lock(&dcb_lock);
2235 	list_for_each_entry(itr, &dcb_app_list, list) {
2236 		if (itr->app.selector == app->selector &&
2237 		    itr->app.protocol == app->protocol &&
2238 		    itr->ifindex == dev->ifindex) {
2239 			prio |= 1 << itr->app.priority;
2240 		}
2241 	}
2242 	spin_unlock(&dcb_lock);
2243 
2244 	return prio;
2245 }
2246 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2247 
2248 /**
2249  * dcb_ieee_setapp - add IEEE dcb application data to app list
2250  *
2251  * This adds Application data to the list. Multiple application
2252  * entries may exists for the same selector and protocol as long
2253  * as the priorities are different.
2254  */
2255 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2256 {
2257 	struct dcb_app_type *itr, *entry;
2258 	struct dcb_app_type event;
2259 	int err = 0;
2260 
2261 	event.ifindex = dev->ifindex;
2262 	memcpy(&event.app, new, sizeof(event.app));
2263 	if (dev->dcbnl_ops->getdcbx)
2264 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2265 
2266 	spin_lock(&dcb_lock);
2267 	/* Search for existing match and abort if found */
2268 	list_for_each_entry(itr, &dcb_app_list, list) {
2269 		if (itr->app.selector == new->selector &&
2270 		    itr->app.protocol == new->protocol &&
2271 		    itr->app.priority == new->priority &&
2272 		    itr->ifindex == dev->ifindex) {
2273 			err = -EEXIST;
2274 			goto out;
2275 		}
2276 	}
2277 
2278 	/* App entry does not exist add new entry */
2279 	entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2280 	if (!entry) {
2281 		err = -ENOMEM;
2282 		goto out;
2283 	}
2284 
2285 	memcpy(&entry->app, new, sizeof(*new));
2286 	entry->ifindex = dev->ifindex;
2287 	list_add(&entry->list, &dcb_app_list);
2288 out:
2289 	spin_unlock(&dcb_lock);
2290 	if (!err)
2291 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2292 	return err;
2293 }
2294 EXPORT_SYMBOL(dcb_ieee_setapp);
2295 
2296 /**
2297  * dcb_ieee_delapp - delete IEEE dcb application data from list
2298  *
2299  * This removes a matching APP data from the APP list
2300  */
2301 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2302 {
2303 	struct dcb_app_type *itr;
2304 	struct dcb_app_type event;
2305 	int err = -ENOENT;
2306 
2307 	event.ifindex = dev->ifindex;
2308 	memcpy(&event.app, del, sizeof(event.app));
2309 	if (dev->dcbnl_ops->getdcbx)
2310 		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2311 
2312 	spin_lock(&dcb_lock);
2313 	/* Search for existing match and remove it. */
2314 	list_for_each_entry(itr, &dcb_app_list, list) {
2315 		if (itr->app.selector == del->selector &&
2316 		    itr->app.protocol == del->protocol &&
2317 		    itr->app.priority == del->priority &&
2318 		    itr->ifindex == dev->ifindex) {
2319 			list_del(&itr->list);
2320 			kfree(itr);
2321 			err = 0;
2322 			goto out;
2323 		}
2324 	}
2325 
2326 out:
2327 	spin_unlock(&dcb_lock);
2328 	if (!err)
2329 		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2330 	return err;
2331 }
2332 EXPORT_SYMBOL(dcb_ieee_delapp);
2333 
2334 static void dcb_flushapp(void)
2335 {
2336 	struct dcb_app_type *app;
2337 	struct dcb_app_type *tmp;
2338 
2339 	spin_lock(&dcb_lock);
2340 	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
2341 		list_del(&app->list);
2342 		kfree(app);
2343 	}
2344 	spin_unlock(&dcb_lock);
2345 }
2346 
2347 static int __init dcbnl_init(void)
2348 {
2349 	INIT_LIST_HEAD(&dcb_app_list);
2350 
2351 	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
2352 	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
2353 
2354 	return 0;
2355 }
2356 module_init(dcbnl_init);
2357 
2358 static void __exit dcbnl_exit(void)
2359 {
2360 	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
2361 	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
2362 	dcb_flushapp();
2363 }
2364 module_exit(dcbnl_exit);
2365