xref: /openbmc/linux/net/core/net-sysfs.c (revision 2d33394e23d63b750dcba40e5feaeba425427b52)
1 /*
2  * net-sysfs.c - network device class and attributes
3  *
4  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5  *
6  *	This program is free software; you can redistribute it and/or
7  *	modify it under the terms of the GNU General Public License
8  *	as published by the Free Software Foundation; either version
9  *	2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <net/switchdev.h>
16 #include <linux/if_arp.h>
17 #include <linux/slab.h>
18 #include <linux/nsproxy.h>
19 #include <net/sock.h>
20 #include <net/net_namespace.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/of.h>
27 
28 #include "net-sysfs.h"
29 
30 #ifdef CONFIG_SYSFS
31 static const char fmt_hex[] = "%#x\n";
32 static const char fmt_long_hex[] = "%#lx\n";
33 static const char fmt_dec[] = "%d\n";
34 static const char fmt_udec[] = "%u\n";
35 static const char fmt_ulong[] = "%lu\n";
36 static const char fmt_u64[] = "%llu\n";
37 
38 static inline int dev_isalive(const struct net_device *dev)
39 {
40 	return dev->reg_state <= NETREG_REGISTERED;
41 }
42 
43 /* use same locking rules as GIF* ioctl's */
44 static ssize_t netdev_show(const struct device *dev,
45 			   struct device_attribute *attr, char *buf,
46 			   ssize_t (*format)(const struct net_device *, char *))
47 {
48 	struct net_device *ndev = to_net_dev(dev);
49 	ssize_t ret = -EINVAL;
50 
51 	read_lock(&dev_base_lock);
52 	if (dev_isalive(ndev))
53 		ret = (*format)(ndev, buf);
54 	read_unlock(&dev_base_lock);
55 
56 	return ret;
57 }
58 
59 /* generate a show function for simple field */
60 #define NETDEVICE_SHOW(field, format_string)				\
61 static ssize_t format_##field(const struct net_device *dev, char *buf)	\
62 {									\
63 	return sprintf(buf, format_string, dev->field);			\
64 }									\
65 static ssize_t field##_show(struct device *dev,				\
66 			    struct device_attribute *attr, char *buf)	\
67 {									\
68 	return netdev_show(dev, attr, buf, format_##field);		\
69 }									\
70 
71 #define NETDEVICE_SHOW_RO(field, format_string)				\
72 NETDEVICE_SHOW(field, format_string);					\
73 static DEVICE_ATTR_RO(field)
74 
75 #define NETDEVICE_SHOW_RW(field, format_string)				\
76 NETDEVICE_SHOW(field, format_string);					\
77 static DEVICE_ATTR_RW(field)
78 
79 /* use same locking and permission rules as SIF* ioctl's */
80 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
81 			    const char *buf, size_t len,
82 			    int (*set)(struct net_device *, unsigned long))
83 {
84 	struct net_device *netdev = to_net_dev(dev);
85 	struct net *net = dev_net(netdev);
86 	unsigned long new;
87 	int ret = -EINVAL;
88 
89 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
90 		return -EPERM;
91 
92 	ret = kstrtoul(buf, 0, &new);
93 	if (ret)
94 		goto err;
95 
96 	if (!rtnl_trylock())
97 		return restart_syscall();
98 
99 	if (dev_isalive(netdev)) {
100 		if ((ret = (*set)(netdev, new)) == 0)
101 			ret = len;
102 	}
103 	rtnl_unlock();
104  err:
105 	return ret;
106 }
107 
108 NETDEVICE_SHOW_RO(dev_id, fmt_hex);
109 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
110 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
111 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
112 NETDEVICE_SHOW_RO(iflink, fmt_dec);
113 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
114 NETDEVICE_SHOW_RO(type, fmt_dec);
115 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
116 
117 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
118 {
119 	return sprintf(buf, fmt_dec, dev->name_assign_type);
120 }
121 
122 static ssize_t name_assign_type_show(struct device *dev,
123 				     struct device_attribute *attr,
124 				     char *buf)
125 {
126 	struct net_device *ndev = to_net_dev(dev);
127 	ssize_t ret = -EINVAL;
128 
129 	if (ndev->name_assign_type != NET_NAME_UNKNOWN)
130 		ret = netdev_show(dev, attr, buf, format_name_assign_type);
131 
132 	return ret;
133 }
134 static DEVICE_ATTR_RO(name_assign_type);
135 
136 /* use same locking rules as GIFHWADDR ioctl's */
137 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
138 			    char *buf)
139 {
140 	struct net_device *ndev = to_net_dev(dev);
141 	ssize_t ret = -EINVAL;
142 
143 	read_lock(&dev_base_lock);
144 	if (dev_isalive(ndev))
145 		ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
146 	read_unlock(&dev_base_lock);
147 	return ret;
148 }
149 static DEVICE_ATTR_RO(address);
150 
151 static ssize_t broadcast_show(struct device *dev,
152 			      struct device_attribute *attr, char *buf)
153 {
154 	struct net_device *ndev = to_net_dev(dev);
155 	if (dev_isalive(ndev))
156 		return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
157 	return -EINVAL;
158 }
159 static DEVICE_ATTR_RO(broadcast);
160 
161 static int change_carrier(struct net_device *dev, unsigned long new_carrier)
162 {
163 	if (!netif_running(dev))
164 		return -EINVAL;
165 	return dev_change_carrier(dev, (bool) new_carrier);
166 }
167 
168 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
169 			     const char *buf, size_t len)
170 {
171 	return netdev_store(dev, attr, buf, len, change_carrier);
172 }
173 
174 static ssize_t carrier_show(struct device *dev,
175 			    struct device_attribute *attr, char *buf)
176 {
177 	struct net_device *netdev = to_net_dev(dev);
178 	if (netif_running(netdev)) {
179 		return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
180 	}
181 	return -EINVAL;
182 }
183 static DEVICE_ATTR_RW(carrier);
184 
185 static ssize_t speed_show(struct device *dev,
186 			  struct device_attribute *attr, char *buf)
187 {
188 	struct net_device *netdev = to_net_dev(dev);
189 	int ret = -EINVAL;
190 
191 	if (!rtnl_trylock())
192 		return restart_syscall();
193 
194 	if (netif_running(netdev)) {
195 		struct ethtool_cmd cmd;
196 		if (!__ethtool_get_settings(netdev, &cmd))
197 			ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
198 	}
199 	rtnl_unlock();
200 	return ret;
201 }
202 static DEVICE_ATTR_RO(speed);
203 
204 static ssize_t duplex_show(struct device *dev,
205 			   struct device_attribute *attr, char *buf)
206 {
207 	struct net_device *netdev = to_net_dev(dev);
208 	int ret = -EINVAL;
209 
210 	if (!rtnl_trylock())
211 		return restart_syscall();
212 
213 	if (netif_running(netdev)) {
214 		struct ethtool_cmd cmd;
215 		if (!__ethtool_get_settings(netdev, &cmd)) {
216 			const char *duplex;
217 			switch (cmd.duplex) {
218 			case DUPLEX_HALF:
219 				duplex = "half";
220 				break;
221 			case DUPLEX_FULL:
222 				duplex = "full";
223 				break;
224 			default:
225 				duplex = "unknown";
226 				break;
227 			}
228 			ret = sprintf(buf, "%s\n", duplex);
229 		}
230 	}
231 	rtnl_unlock();
232 	return ret;
233 }
234 static DEVICE_ATTR_RO(duplex);
235 
236 static ssize_t dormant_show(struct device *dev,
237 			    struct device_attribute *attr, char *buf)
238 {
239 	struct net_device *netdev = to_net_dev(dev);
240 
241 	if (netif_running(netdev))
242 		return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
243 
244 	return -EINVAL;
245 }
246 static DEVICE_ATTR_RO(dormant);
247 
248 static const char *const operstates[] = {
249 	"unknown",
250 	"notpresent", /* currently unused */
251 	"down",
252 	"lowerlayerdown",
253 	"testing", /* currently unused */
254 	"dormant",
255 	"up"
256 };
257 
258 static ssize_t operstate_show(struct device *dev,
259 			      struct device_attribute *attr, char *buf)
260 {
261 	const struct net_device *netdev = to_net_dev(dev);
262 	unsigned char operstate;
263 
264 	read_lock(&dev_base_lock);
265 	operstate = netdev->operstate;
266 	if (!netif_running(netdev))
267 		operstate = IF_OPER_DOWN;
268 	read_unlock(&dev_base_lock);
269 
270 	if (operstate >= ARRAY_SIZE(operstates))
271 		return -EINVAL; /* should not happen */
272 
273 	return sprintf(buf, "%s\n", operstates[operstate]);
274 }
275 static DEVICE_ATTR_RO(operstate);
276 
277 static ssize_t carrier_changes_show(struct device *dev,
278 				    struct device_attribute *attr,
279 				    char *buf)
280 {
281 	struct net_device *netdev = to_net_dev(dev);
282 	return sprintf(buf, fmt_dec,
283 		       atomic_read(&netdev->carrier_changes));
284 }
285 static DEVICE_ATTR_RO(carrier_changes);
286 
287 /* read-write attributes */
288 
289 static int change_mtu(struct net_device *dev, unsigned long new_mtu)
290 {
291 	return dev_set_mtu(dev, (int) new_mtu);
292 }
293 
294 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
295 			 const char *buf, size_t len)
296 {
297 	return netdev_store(dev, attr, buf, len, change_mtu);
298 }
299 NETDEVICE_SHOW_RW(mtu, fmt_dec);
300 
301 static int change_flags(struct net_device *dev, unsigned long new_flags)
302 {
303 	return dev_change_flags(dev, (unsigned int) new_flags);
304 }
305 
306 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
307 			   const char *buf, size_t len)
308 {
309 	return netdev_store(dev, attr, buf, len, change_flags);
310 }
311 NETDEVICE_SHOW_RW(flags, fmt_hex);
312 
313 static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
314 {
315 	dev->tx_queue_len = new_len;
316 	return 0;
317 }
318 
319 static ssize_t tx_queue_len_store(struct device *dev,
320 				  struct device_attribute *attr,
321 				  const char *buf, size_t len)
322 {
323 	if (!capable(CAP_NET_ADMIN))
324 		return -EPERM;
325 
326 	return netdev_store(dev, attr, buf, len, change_tx_queue_len);
327 }
328 NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
329 
330 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
331 {
332 	dev->gro_flush_timeout = val;
333 	return 0;
334 }
335 
336 static ssize_t gro_flush_timeout_store(struct device *dev,
337 				  struct device_attribute *attr,
338 				  const char *buf, size_t len)
339 {
340 	if (!capable(CAP_NET_ADMIN))
341 		return -EPERM;
342 
343 	return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
344 }
345 NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
346 
347 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
348 			     const char *buf, size_t len)
349 {
350 	struct net_device *netdev = to_net_dev(dev);
351 	struct net *net = dev_net(netdev);
352 	size_t count = len;
353 	ssize_t ret;
354 
355 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
356 		return -EPERM;
357 
358 	/* ignore trailing newline */
359 	if (len >  0 && buf[len - 1] == '\n')
360 		--count;
361 
362 	if (!rtnl_trylock())
363 		return restart_syscall();
364 	ret = dev_set_alias(netdev, buf, count);
365 	rtnl_unlock();
366 
367 	return ret < 0 ? ret : len;
368 }
369 
370 static ssize_t ifalias_show(struct device *dev,
371 			    struct device_attribute *attr, char *buf)
372 {
373 	const struct net_device *netdev = to_net_dev(dev);
374 	ssize_t ret = 0;
375 
376 	if (!rtnl_trylock())
377 		return restart_syscall();
378 	if (netdev->ifalias)
379 		ret = sprintf(buf, "%s\n", netdev->ifalias);
380 	rtnl_unlock();
381 	return ret;
382 }
383 static DEVICE_ATTR_RW(ifalias);
384 
385 static int change_group(struct net_device *dev, unsigned long new_group)
386 {
387 	dev_set_group(dev, (int) new_group);
388 	return 0;
389 }
390 
391 static ssize_t group_store(struct device *dev, struct device_attribute *attr,
392 			   const char *buf, size_t len)
393 {
394 	return netdev_store(dev, attr, buf, len, change_group);
395 }
396 NETDEVICE_SHOW(group, fmt_dec);
397 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
398 
399 static ssize_t phys_port_id_show(struct device *dev,
400 				 struct device_attribute *attr, char *buf)
401 {
402 	struct net_device *netdev = to_net_dev(dev);
403 	ssize_t ret = -EINVAL;
404 
405 	if (!rtnl_trylock())
406 		return restart_syscall();
407 
408 	if (dev_isalive(netdev)) {
409 		struct netdev_phys_item_id ppid;
410 
411 		ret = dev_get_phys_port_id(netdev, &ppid);
412 		if (!ret)
413 			ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
414 	}
415 	rtnl_unlock();
416 
417 	return ret;
418 }
419 static DEVICE_ATTR_RO(phys_port_id);
420 
421 static ssize_t phys_switch_id_show(struct device *dev,
422 				   struct device_attribute *attr, char *buf)
423 {
424 	struct net_device *netdev = to_net_dev(dev);
425 	ssize_t ret = -EINVAL;
426 
427 	if (!rtnl_trylock())
428 		return restart_syscall();
429 
430 	if (dev_isalive(netdev)) {
431 		struct netdev_phys_item_id ppid;
432 
433 		ret = netdev_switch_parent_id_get(netdev, &ppid);
434 		if (!ret)
435 			ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
436 	}
437 	rtnl_unlock();
438 
439 	return ret;
440 }
441 static DEVICE_ATTR_RO(phys_switch_id);
442 
443 static struct attribute *net_class_attrs[] = {
444 	&dev_attr_netdev_group.attr,
445 	&dev_attr_type.attr,
446 	&dev_attr_dev_id.attr,
447 	&dev_attr_dev_port.attr,
448 	&dev_attr_iflink.attr,
449 	&dev_attr_ifindex.attr,
450 	&dev_attr_name_assign_type.attr,
451 	&dev_attr_addr_assign_type.attr,
452 	&dev_attr_addr_len.attr,
453 	&dev_attr_link_mode.attr,
454 	&dev_attr_address.attr,
455 	&dev_attr_broadcast.attr,
456 	&dev_attr_speed.attr,
457 	&dev_attr_duplex.attr,
458 	&dev_attr_dormant.attr,
459 	&dev_attr_operstate.attr,
460 	&dev_attr_carrier_changes.attr,
461 	&dev_attr_ifalias.attr,
462 	&dev_attr_carrier.attr,
463 	&dev_attr_mtu.attr,
464 	&dev_attr_flags.attr,
465 	&dev_attr_tx_queue_len.attr,
466 	&dev_attr_gro_flush_timeout.attr,
467 	&dev_attr_phys_port_id.attr,
468 	&dev_attr_phys_switch_id.attr,
469 	NULL,
470 };
471 ATTRIBUTE_GROUPS(net_class);
472 
473 /* Show a given an attribute in the statistics group */
474 static ssize_t netstat_show(const struct device *d,
475 			    struct device_attribute *attr, char *buf,
476 			    unsigned long offset)
477 {
478 	struct net_device *dev = to_net_dev(d);
479 	ssize_t ret = -EINVAL;
480 
481 	WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
482 			offset % sizeof(u64) != 0);
483 
484 	read_lock(&dev_base_lock);
485 	if (dev_isalive(dev)) {
486 		struct rtnl_link_stats64 temp;
487 		const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
488 
489 		ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
490 	}
491 	read_unlock(&dev_base_lock);
492 	return ret;
493 }
494 
495 /* generate a read-only statistics attribute */
496 #define NETSTAT_ENTRY(name)						\
497 static ssize_t name##_show(struct device *d,				\
498 			   struct device_attribute *attr, char *buf) 	\
499 {									\
500 	return netstat_show(d, attr, buf,				\
501 			    offsetof(struct rtnl_link_stats64, name));	\
502 }									\
503 static DEVICE_ATTR_RO(name)
504 
505 NETSTAT_ENTRY(rx_packets);
506 NETSTAT_ENTRY(tx_packets);
507 NETSTAT_ENTRY(rx_bytes);
508 NETSTAT_ENTRY(tx_bytes);
509 NETSTAT_ENTRY(rx_errors);
510 NETSTAT_ENTRY(tx_errors);
511 NETSTAT_ENTRY(rx_dropped);
512 NETSTAT_ENTRY(tx_dropped);
513 NETSTAT_ENTRY(multicast);
514 NETSTAT_ENTRY(collisions);
515 NETSTAT_ENTRY(rx_length_errors);
516 NETSTAT_ENTRY(rx_over_errors);
517 NETSTAT_ENTRY(rx_crc_errors);
518 NETSTAT_ENTRY(rx_frame_errors);
519 NETSTAT_ENTRY(rx_fifo_errors);
520 NETSTAT_ENTRY(rx_missed_errors);
521 NETSTAT_ENTRY(tx_aborted_errors);
522 NETSTAT_ENTRY(tx_carrier_errors);
523 NETSTAT_ENTRY(tx_fifo_errors);
524 NETSTAT_ENTRY(tx_heartbeat_errors);
525 NETSTAT_ENTRY(tx_window_errors);
526 NETSTAT_ENTRY(rx_compressed);
527 NETSTAT_ENTRY(tx_compressed);
528 
529 static struct attribute *netstat_attrs[] = {
530 	&dev_attr_rx_packets.attr,
531 	&dev_attr_tx_packets.attr,
532 	&dev_attr_rx_bytes.attr,
533 	&dev_attr_tx_bytes.attr,
534 	&dev_attr_rx_errors.attr,
535 	&dev_attr_tx_errors.attr,
536 	&dev_attr_rx_dropped.attr,
537 	&dev_attr_tx_dropped.attr,
538 	&dev_attr_multicast.attr,
539 	&dev_attr_collisions.attr,
540 	&dev_attr_rx_length_errors.attr,
541 	&dev_attr_rx_over_errors.attr,
542 	&dev_attr_rx_crc_errors.attr,
543 	&dev_attr_rx_frame_errors.attr,
544 	&dev_attr_rx_fifo_errors.attr,
545 	&dev_attr_rx_missed_errors.attr,
546 	&dev_attr_tx_aborted_errors.attr,
547 	&dev_attr_tx_carrier_errors.attr,
548 	&dev_attr_tx_fifo_errors.attr,
549 	&dev_attr_tx_heartbeat_errors.attr,
550 	&dev_attr_tx_window_errors.attr,
551 	&dev_attr_rx_compressed.attr,
552 	&dev_attr_tx_compressed.attr,
553 	NULL
554 };
555 
556 
557 static struct attribute_group netstat_group = {
558 	.name  = "statistics",
559 	.attrs  = netstat_attrs,
560 };
561 
562 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
563 static struct attribute *wireless_attrs[] = {
564 	NULL
565 };
566 
567 static struct attribute_group wireless_group = {
568 	.name = "wireless",
569 	.attrs = wireless_attrs,
570 };
571 #endif
572 
573 #else /* CONFIG_SYSFS */
574 #define net_class_groups	NULL
575 #endif /* CONFIG_SYSFS */
576 
577 #ifdef CONFIG_SYSFS
578 #define to_rx_queue_attr(_attr) container_of(_attr,		\
579     struct rx_queue_attribute, attr)
580 
581 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
582 
583 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
584 				  char *buf)
585 {
586 	struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
587 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
588 
589 	if (!attribute->show)
590 		return -EIO;
591 
592 	return attribute->show(queue, attribute, buf);
593 }
594 
595 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
596 				   const char *buf, size_t count)
597 {
598 	struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
599 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
600 
601 	if (!attribute->store)
602 		return -EIO;
603 
604 	return attribute->store(queue, attribute, buf, count);
605 }
606 
607 static const struct sysfs_ops rx_queue_sysfs_ops = {
608 	.show = rx_queue_attr_show,
609 	.store = rx_queue_attr_store,
610 };
611 
612 #ifdef CONFIG_RPS
613 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
614 			    struct rx_queue_attribute *attribute, char *buf)
615 {
616 	struct rps_map *map;
617 	cpumask_var_t mask;
618 	int i, len;
619 
620 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
621 		return -ENOMEM;
622 
623 	rcu_read_lock();
624 	map = rcu_dereference(queue->rps_map);
625 	if (map)
626 		for (i = 0; i < map->len; i++)
627 			cpumask_set_cpu(map->cpus[i], mask);
628 
629 	len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
630 	rcu_read_unlock();
631 	free_cpumask_var(mask);
632 
633 	return len < PAGE_SIZE ? len : -EINVAL;
634 }
635 
636 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
637 		      struct rx_queue_attribute *attribute,
638 		      const char *buf, size_t len)
639 {
640 	struct rps_map *old_map, *map;
641 	cpumask_var_t mask;
642 	int err, cpu, i;
643 	static DEFINE_SPINLOCK(rps_map_lock);
644 
645 	if (!capable(CAP_NET_ADMIN))
646 		return -EPERM;
647 
648 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
649 		return -ENOMEM;
650 
651 	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
652 	if (err) {
653 		free_cpumask_var(mask);
654 		return err;
655 	}
656 
657 	map = kzalloc(max_t(unsigned int,
658 	    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
659 	    GFP_KERNEL);
660 	if (!map) {
661 		free_cpumask_var(mask);
662 		return -ENOMEM;
663 	}
664 
665 	i = 0;
666 	for_each_cpu_and(cpu, mask, cpu_online_mask)
667 		map->cpus[i++] = cpu;
668 
669 	if (i)
670 		map->len = i;
671 	else {
672 		kfree(map);
673 		map = NULL;
674 	}
675 
676 	spin_lock(&rps_map_lock);
677 	old_map = rcu_dereference_protected(queue->rps_map,
678 					    lockdep_is_held(&rps_map_lock));
679 	rcu_assign_pointer(queue->rps_map, map);
680 	spin_unlock(&rps_map_lock);
681 
682 	if (map)
683 		static_key_slow_inc(&rps_needed);
684 	if (old_map) {
685 		kfree_rcu(old_map, rcu);
686 		static_key_slow_dec(&rps_needed);
687 	}
688 	free_cpumask_var(mask);
689 	return len;
690 }
691 
692 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
693 					   struct rx_queue_attribute *attr,
694 					   char *buf)
695 {
696 	struct rps_dev_flow_table *flow_table;
697 	unsigned long val = 0;
698 
699 	rcu_read_lock();
700 	flow_table = rcu_dereference(queue->rps_flow_table);
701 	if (flow_table)
702 		val = (unsigned long)flow_table->mask + 1;
703 	rcu_read_unlock();
704 
705 	return sprintf(buf, "%lu\n", val);
706 }
707 
708 static void rps_dev_flow_table_release(struct rcu_head *rcu)
709 {
710 	struct rps_dev_flow_table *table = container_of(rcu,
711 	    struct rps_dev_flow_table, rcu);
712 	vfree(table);
713 }
714 
715 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
716 				     struct rx_queue_attribute *attr,
717 				     const char *buf, size_t len)
718 {
719 	unsigned long mask, count;
720 	struct rps_dev_flow_table *table, *old_table;
721 	static DEFINE_SPINLOCK(rps_dev_flow_lock);
722 	int rc;
723 
724 	if (!capable(CAP_NET_ADMIN))
725 		return -EPERM;
726 
727 	rc = kstrtoul(buf, 0, &count);
728 	if (rc < 0)
729 		return rc;
730 
731 	if (count) {
732 		mask = count - 1;
733 		/* mask = roundup_pow_of_two(count) - 1;
734 		 * without overflows...
735 		 */
736 		while ((mask | (mask >> 1)) != mask)
737 			mask |= (mask >> 1);
738 		/* On 64 bit arches, must check mask fits in table->mask (u32),
739 		 * and on 32bit arches, must check
740 		 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
741 		 */
742 #if BITS_PER_LONG > 32
743 		if (mask > (unsigned long)(u32)mask)
744 			return -EINVAL;
745 #else
746 		if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
747 				/ sizeof(struct rps_dev_flow)) {
748 			/* Enforce a limit to prevent overflow */
749 			return -EINVAL;
750 		}
751 #endif
752 		table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
753 		if (!table)
754 			return -ENOMEM;
755 
756 		table->mask = mask;
757 		for (count = 0; count <= mask; count++)
758 			table->flows[count].cpu = RPS_NO_CPU;
759 	} else
760 		table = NULL;
761 
762 	spin_lock(&rps_dev_flow_lock);
763 	old_table = rcu_dereference_protected(queue->rps_flow_table,
764 					      lockdep_is_held(&rps_dev_flow_lock));
765 	rcu_assign_pointer(queue->rps_flow_table, table);
766 	spin_unlock(&rps_dev_flow_lock);
767 
768 	if (old_table)
769 		call_rcu(&old_table->rcu, rps_dev_flow_table_release);
770 
771 	return len;
772 }
773 
774 static struct rx_queue_attribute rps_cpus_attribute =
775 	__ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
776 
777 
778 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
779 	__ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
780 	    show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
781 #endif /* CONFIG_RPS */
782 
783 static struct attribute *rx_queue_default_attrs[] = {
784 #ifdef CONFIG_RPS
785 	&rps_cpus_attribute.attr,
786 	&rps_dev_flow_table_cnt_attribute.attr,
787 #endif
788 	NULL
789 };
790 
791 static void rx_queue_release(struct kobject *kobj)
792 {
793 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
794 #ifdef CONFIG_RPS
795 	struct rps_map *map;
796 	struct rps_dev_flow_table *flow_table;
797 
798 
799 	map = rcu_dereference_protected(queue->rps_map, 1);
800 	if (map) {
801 		RCU_INIT_POINTER(queue->rps_map, NULL);
802 		kfree_rcu(map, rcu);
803 	}
804 
805 	flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
806 	if (flow_table) {
807 		RCU_INIT_POINTER(queue->rps_flow_table, NULL);
808 		call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
809 	}
810 #endif
811 
812 	memset(kobj, 0, sizeof(*kobj));
813 	dev_put(queue->dev);
814 }
815 
816 static const void *rx_queue_namespace(struct kobject *kobj)
817 {
818 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
819 	struct device *dev = &queue->dev->dev;
820 	const void *ns = NULL;
821 
822 	if (dev->class && dev->class->ns_type)
823 		ns = dev->class->namespace(dev);
824 
825 	return ns;
826 }
827 
828 static struct kobj_type rx_queue_ktype = {
829 	.sysfs_ops = &rx_queue_sysfs_ops,
830 	.release = rx_queue_release,
831 	.default_attrs = rx_queue_default_attrs,
832 	.namespace = rx_queue_namespace
833 };
834 
835 static int rx_queue_add_kobject(struct net_device *dev, int index)
836 {
837 	struct netdev_rx_queue *queue = dev->_rx + index;
838 	struct kobject *kobj = &queue->kobj;
839 	int error = 0;
840 
841 	kobj->kset = dev->queues_kset;
842 	error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
843 	    "rx-%u", index);
844 	if (error)
845 		goto exit;
846 
847 	if (dev->sysfs_rx_queue_group) {
848 		error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
849 		if (error)
850 			goto exit;
851 	}
852 
853 	kobject_uevent(kobj, KOBJ_ADD);
854 	dev_hold(queue->dev);
855 
856 	return error;
857 exit:
858 	kobject_put(kobj);
859 	return error;
860 }
861 #endif /* CONFIG_SYSFS */
862 
863 int
864 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
865 {
866 #ifdef CONFIG_SYSFS
867 	int i;
868 	int error = 0;
869 
870 #ifndef CONFIG_RPS
871 	if (!dev->sysfs_rx_queue_group)
872 		return 0;
873 #endif
874 	for (i = old_num; i < new_num; i++) {
875 		error = rx_queue_add_kobject(dev, i);
876 		if (error) {
877 			new_num = old_num;
878 			break;
879 		}
880 	}
881 
882 	while (--i >= new_num) {
883 		if (dev->sysfs_rx_queue_group)
884 			sysfs_remove_group(&dev->_rx[i].kobj,
885 					   dev->sysfs_rx_queue_group);
886 		kobject_put(&dev->_rx[i].kobj);
887 	}
888 
889 	return error;
890 #else
891 	return 0;
892 #endif
893 }
894 
895 #ifdef CONFIG_SYSFS
896 /*
897  * netdev_queue sysfs structures and functions.
898  */
899 struct netdev_queue_attribute {
900 	struct attribute attr;
901 	ssize_t (*show)(struct netdev_queue *queue,
902 	    struct netdev_queue_attribute *attr, char *buf);
903 	ssize_t (*store)(struct netdev_queue *queue,
904 	    struct netdev_queue_attribute *attr, const char *buf, size_t len);
905 };
906 #define to_netdev_queue_attr(_attr) container_of(_attr,		\
907     struct netdev_queue_attribute, attr)
908 
909 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
910 
911 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
912 				      struct attribute *attr, char *buf)
913 {
914 	struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
915 	struct netdev_queue *queue = to_netdev_queue(kobj);
916 
917 	if (!attribute->show)
918 		return -EIO;
919 
920 	return attribute->show(queue, attribute, buf);
921 }
922 
923 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
924 				       struct attribute *attr,
925 				       const char *buf, size_t count)
926 {
927 	struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
928 	struct netdev_queue *queue = to_netdev_queue(kobj);
929 
930 	if (!attribute->store)
931 		return -EIO;
932 
933 	return attribute->store(queue, attribute, buf, count);
934 }
935 
936 static const struct sysfs_ops netdev_queue_sysfs_ops = {
937 	.show = netdev_queue_attr_show,
938 	.store = netdev_queue_attr_store,
939 };
940 
941 static ssize_t show_trans_timeout(struct netdev_queue *queue,
942 				  struct netdev_queue_attribute *attribute,
943 				  char *buf)
944 {
945 	unsigned long trans_timeout;
946 
947 	spin_lock_irq(&queue->_xmit_lock);
948 	trans_timeout = queue->trans_timeout;
949 	spin_unlock_irq(&queue->_xmit_lock);
950 
951 	return sprintf(buf, "%lu", trans_timeout);
952 }
953 
954 static struct netdev_queue_attribute queue_trans_timeout =
955 	__ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
956 
957 #ifdef CONFIG_BQL
958 /*
959  * Byte queue limits sysfs structures and functions.
960  */
961 static ssize_t bql_show(char *buf, unsigned int value)
962 {
963 	return sprintf(buf, "%u\n", value);
964 }
965 
966 static ssize_t bql_set(const char *buf, const size_t count,
967 		       unsigned int *pvalue)
968 {
969 	unsigned int value;
970 	int err;
971 
972 	if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
973 		value = DQL_MAX_LIMIT;
974 	else {
975 		err = kstrtouint(buf, 10, &value);
976 		if (err < 0)
977 			return err;
978 		if (value > DQL_MAX_LIMIT)
979 			return -EINVAL;
980 	}
981 
982 	*pvalue = value;
983 
984 	return count;
985 }
986 
987 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
988 				  struct netdev_queue_attribute *attr,
989 				  char *buf)
990 {
991 	struct dql *dql = &queue->dql;
992 
993 	return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
994 }
995 
996 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
997 				 struct netdev_queue_attribute *attribute,
998 				 const char *buf, size_t len)
999 {
1000 	struct dql *dql = &queue->dql;
1001 	unsigned int value;
1002 	int err;
1003 
1004 	err = kstrtouint(buf, 10, &value);
1005 	if (err < 0)
1006 		return err;
1007 
1008 	dql->slack_hold_time = msecs_to_jiffies(value);
1009 
1010 	return len;
1011 }
1012 
1013 static struct netdev_queue_attribute bql_hold_time_attribute =
1014 	__ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
1015 	    bql_set_hold_time);
1016 
1017 static ssize_t bql_show_inflight(struct netdev_queue *queue,
1018 				 struct netdev_queue_attribute *attr,
1019 				 char *buf)
1020 {
1021 	struct dql *dql = &queue->dql;
1022 
1023 	return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1024 }
1025 
1026 static struct netdev_queue_attribute bql_inflight_attribute =
1027 	__ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1028 
1029 #define BQL_ATTR(NAME, FIELD)						\
1030 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,		\
1031 				 struct netdev_queue_attribute *attr,	\
1032 				 char *buf)				\
1033 {									\
1034 	return bql_show(buf, queue->dql.FIELD);				\
1035 }									\
1036 									\
1037 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,		\
1038 				struct netdev_queue_attribute *attr,	\
1039 				const char *buf, size_t len)		\
1040 {									\
1041 	return bql_set(buf, len, &queue->dql.FIELD);			\
1042 }									\
1043 									\
1044 static struct netdev_queue_attribute bql_ ## NAME ## _attribute =	\
1045 	__ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,		\
1046 	    bql_set_ ## NAME);
1047 
1048 BQL_ATTR(limit, limit)
1049 BQL_ATTR(limit_max, max_limit)
1050 BQL_ATTR(limit_min, min_limit)
1051 
1052 static struct attribute *dql_attrs[] = {
1053 	&bql_limit_attribute.attr,
1054 	&bql_limit_max_attribute.attr,
1055 	&bql_limit_min_attribute.attr,
1056 	&bql_hold_time_attribute.attr,
1057 	&bql_inflight_attribute.attr,
1058 	NULL
1059 };
1060 
1061 static struct attribute_group dql_group = {
1062 	.name  = "byte_queue_limits",
1063 	.attrs  = dql_attrs,
1064 };
1065 #endif /* CONFIG_BQL */
1066 
1067 #ifdef CONFIG_XPS
1068 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1069 {
1070 	struct net_device *dev = queue->dev;
1071 	unsigned int i;
1072 
1073 	i = queue - dev->_tx;
1074 	BUG_ON(i >= dev->num_tx_queues);
1075 
1076 	return i;
1077 }
1078 
1079 
1080 static ssize_t show_xps_map(struct netdev_queue *queue,
1081 			    struct netdev_queue_attribute *attribute, char *buf)
1082 {
1083 	struct net_device *dev = queue->dev;
1084 	struct xps_dev_maps *dev_maps;
1085 	cpumask_var_t mask;
1086 	unsigned long index;
1087 	int i, len;
1088 
1089 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1090 		return -ENOMEM;
1091 
1092 	index = get_netdev_queue_index(queue);
1093 
1094 	rcu_read_lock();
1095 	dev_maps = rcu_dereference(dev->xps_maps);
1096 	if (dev_maps) {
1097 		for_each_possible_cpu(i) {
1098 			struct xps_map *map =
1099 			    rcu_dereference(dev_maps->cpu_map[i]);
1100 			if (map) {
1101 				int j;
1102 				for (j = 0; j < map->len; j++) {
1103 					if (map->queues[j] == index) {
1104 						cpumask_set_cpu(i, mask);
1105 						break;
1106 					}
1107 				}
1108 			}
1109 		}
1110 	}
1111 	rcu_read_unlock();
1112 
1113 	len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1114 	free_cpumask_var(mask);
1115 	return len < PAGE_SIZE ? len : -EINVAL;
1116 }
1117 
1118 static ssize_t store_xps_map(struct netdev_queue *queue,
1119 		      struct netdev_queue_attribute *attribute,
1120 		      const char *buf, size_t len)
1121 {
1122 	struct net_device *dev = queue->dev;
1123 	unsigned long index;
1124 	cpumask_var_t mask;
1125 	int err;
1126 
1127 	if (!capable(CAP_NET_ADMIN))
1128 		return -EPERM;
1129 
1130 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1131 		return -ENOMEM;
1132 
1133 	index = get_netdev_queue_index(queue);
1134 
1135 	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1136 	if (err) {
1137 		free_cpumask_var(mask);
1138 		return err;
1139 	}
1140 
1141 	err = netif_set_xps_queue(dev, mask, index);
1142 
1143 	free_cpumask_var(mask);
1144 
1145 	return err ? : len;
1146 }
1147 
1148 static struct netdev_queue_attribute xps_cpus_attribute =
1149     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1150 #endif /* CONFIG_XPS */
1151 
1152 static struct attribute *netdev_queue_default_attrs[] = {
1153 	&queue_trans_timeout.attr,
1154 #ifdef CONFIG_XPS
1155 	&xps_cpus_attribute.attr,
1156 #endif
1157 	NULL
1158 };
1159 
1160 static void netdev_queue_release(struct kobject *kobj)
1161 {
1162 	struct netdev_queue *queue = to_netdev_queue(kobj);
1163 
1164 	memset(kobj, 0, sizeof(*kobj));
1165 	dev_put(queue->dev);
1166 }
1167 
1168 static const void *netdev_queue_namespace(struct kobject *kobj)
1169 {
1170 	struct netdev_queue *queue = to_netdev_queue(kobj);
1171 	struct device *dev = &queue->dev->dev;
1172 	const void *ns = NULL;
1173 
1174 	if (dev->class && dev->class->ns_type)
1175 		ns = dev->class->namespace(dev);
1176 
1177 	return ns;
1178 }
1179 
1180 static struct kobj_type netdev_queue_ktype = {
1181 	.sysfs_ops = &netdev_queue_sysfs_ops,
1182 	.release = netdev_queue_release,
1183 	.default_attrs = netdev_queue_default_attrs,
1184 	.namespace = netdev_queue_namespace,
1185 };
1186 
1187 static int netdev_queue_add_kobject(struct net_device *dev, int index)
1188 {
1189 	struct netdev_queue *queue = dev->_tx + index;
1190 	struct kobject *kobj = &queue->kobj;
1191 	int error = 0;
1192 
1193 	kobj->kset = dev->queues_kset;
1194 	error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1195 	    "tx-%u", index);
1196 	if (error)
1197 		goto exit;
1198 
1199 #ifdef CONFIG_BQL
1200 	error = sysfs_create_group(kobj, &dql_group);
1201 	if (error)
1202 		goto exit;
1203 #endif
1204 
1205 	kobject_uevent(kobj, KOBJ_ADD);
1206 	dev_hold(queue->dev);
1207 
1208 	return 0;
1209 exit:
1210 	kobject_put(kobj);
1211 	return error;
1212 }
1213 #endif /* CONFIG_SYSFS */
1214 
1215 int
1216 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1217 {
1218 #ifdef CONFIG_SYSFS
1219 	int i;
1220 	int error = 0;
1221 
1222 	for (i = old_num; i < new_num; i++) {
1223 		error = netdev_queue_add_kobject(dev, i);
1224 		if (error) {
1225 			new_num = old_num;
1226 			break;
1227 		}
1228 	}
1229 
1230 	while (--i >= new_num) {
1231 		struct netdev_queue *queue = dev->_tx + i;
1232 
1233 #ifdef CONFIG_BQL
1234 		sysfs_remove_group(&queue->kobj, &dql_group);
1235 #endif
1236 		kobject_put(&queue->kobj);
1237 	}
1238 
1239 	return error;
1240 #else
1241 	return 0;
1242 #endif /* CONFIG_SYSFS */
1243 }
1244 
1245 static int register_queue_kobjects(struct net_device *dev)
1246 {
1247 	int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1248 
1249 #ifdef CONFIG_SYSFS
1250 	dev->queues_kset = kset_create_and_add("queues",
1251 	    NULL, &dev->dev.kobj);
1252 	if (!dev->queues_kset)
1253 		return -ENOMEM;
1254 	real_rx = dev->real_num_rx_queues;
1255 #endif
1256 	real_tx = dev->real_num_tx_queues;
1257 
1258 	error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1259 	if (error)
1260 		goto error;
1261 	rxq = real_rx;
1262 
1263 	error = netdev_queue_update_kobjects(dev, 0, real_tx);
1264 	if (error)
1265 		goto error;
1266 	txq = real_tx;
1267 
1268 	return 0;
1269 
1270 error:
1271 	netdev_queue_update_kobjects(dev, txq, 0);
1272 	net_rx_queue_update_kobjects(dev, rxq, 0);
1273 	return error;
1274 }
1275 
1276 static void remove_queue_kobjects(struct net_device *dev)
1277 {
1278 	int real_rx = 0, real_tx = 0;
1279 
1280 #ifdef CONFIG_SYSFS
1281 	real_rx = dev->real_num_rx_queues;
1282 #endif
1283 	real_tx = dev->real_num_tx_queues;
1284 
1285 	net_rx_queue_update_kobjects(dev, real_rx, 0);
1286 	netdev_queue_update_kobjects(dev, real_tx, 0);
1287 #ifdef CONFIG_SYSFS
1288 	kset_unregister(dev->queues_kset);
1289 #endif
1290 }
1291 
1292 static bool net_current_may_mount(void)
1293 {
1294 	struct net *net = current->nsproxy->net_ns;
1295 
1296 	return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1297 }
1298 
1299 static void *net_grab_current_ns(void)
1300 {
1301 	struct net *ns = current->nsproxy->net_ns;
1302 #ifdef CONFIG_NET_NS
1303 	if (ns)
1304 		atomic_inc(&ns->passive);
1305 #endif
1306 	return ns;
1307 }
1308 
1309 static const void *net_initial_ns(void)
1310 {
1311 	return &init_net;
1312 }
1313 
1314 static const void *net_netlink_ns(struct sock *sk)
1315 {
1316 	return sock_net(sk);
1317 }
1318 
1319 struct kobj_ns_type_operations net_ns_type_operations = {
1320 	.type = KOBJ_NS_TYPE_NET,
1321 	.current_may_mount = net_current_may_mount,
1322 	.grab_current_ns = net_grab_current_ns,
1323 	.netlink_ns = net_netlink_ns,
1324 	.initial_ns = net_initial_ns,
1325 	.drop_ns = net_drop_ns,
1326 };
1327 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1328 
1329 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1330 {
1331 	struct net_device *dev = to_net_dev(d);
1332 	int retval;
1333 
1334 	/* pass interface to uevent. */
1335 	retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1336 	if (retval)
1337 		goto exit;
1338 
1339 	/* pass ifindex to uevent.
1340 	 * ifindex is useful as it won't change (interface name may change)
1341 	 * and is what RtNetlink uses natively. */
1342 	retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1343 
1344 exit:
1345 	return retval;
1346 }
1347 
1348 /*
1349  *	netdev_release -- destroy and free a dead device.
1350  *	Called when last reference to device kobject is gone.
1351  */
1352 static void netdev_release(struct device *d)
1353 {
1354 	struct net_device *dev = to_net_dev(d);
1355 
1356 	BUG_ON(dev->reg_state != NETREG_RELEASED);
1357 
1358 	kfree(dev->ifalias);
1359 	netdev_freemem(dev);
1360 }
1361 
1362 static const void *net_namespace(struct device *d)
1363 {
1364 	struct net_device *dev;
1365 	dev = container_of(d, struct net_device, dev);
1366 	return dev_net(dev);
1367 }
1368 
1369 static struct class net_class = {
1370 	.name = "net",
1371 	.dev_release = netdev_release,
1372 	.dev_groups = net_class_groups,
1373 	.dev_uevent = netdev_uevent,
1374 	.ns_type = &net_ns_type_operations,
1375 	.namespace = net_namespace,
1376 };
1377 
1378 #ifdef CONFIG_OF_NET
1379 static int of_dev_node_match(struct device *dev, const void *data)
1380 {
1381 	int ret = 0;
1382 
1383 	if (dev->parent)
1384 		ret = dev->parent->of_node == data;
1385 
1386 	return ret == 0 ? dev->of_node == data : ret;
1387 }
1388 
1389 struct net_device *of_find_net_device_by_node(struct device_node *np)
1390 {
1391 	struct device *dev;
1392 
1393 	dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1394 	if (!dev)
1395 		return NULL;
1396 
1397 	return to_net_dev(dev);
1398 }
1399 EXPORT_SYMBOL(of_find_net_device_by_node);
1400 #endif
1401 
1402 /* Delete sysfs entries but hold kobject reference until after all
1403  * netdev references are gone.
1404  */
1405 void netdev_unregister_kobject(struct net_device *ndev)
1406 {
1407 	struct device *dev = &(ndev->dev);
1408 
1409 	kobject_get(&dev->kobj);
1410 
1411 	remove_queue_kobjects(ndev);
1412 
1413 	pm_runtime_set_memalloc_noio(dev, false);
1414 
1415 	device_del(dev);
1416 }
1417 
1418 /* Create sysfs entries for network device. */
1419 int netdev_register_kobject(struct net_device *ndev)
1420 {
1421 	struct device *dev = &(ndev->dev);
1422 	const struct attribute_group **groups = ndev->sysfs_groups;
1423 	int error = 0;
1424 
1425 	device_initialize(dev);
1426 	dev->class = &net_class;
1427 	dev->platform_data = ndev;
1428 	dev->groups = groups;
1429 
1430 	dev_set_name(dev, "%s", ndev->name);
1431 
1432 #ifdef CONFIG_SYSFS
1433 	/* Allow for a device specific group */
1434 	if (*groups)
1435 		groups++;
1436 
1437 	*groups++ = &netstat_group;
1438 
1439 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1440 	if (ndev->ieee80211_ptr)
1441 		*groups++ = &wireless_group;
1442 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1443 	else if (ndev->wireless_handlers)
1444 		*groups++ = &wireless_group;
1445 #endif
1446 #endif
1447 #endif /* CONFIG_SYSFS */
1448 
1449 	error = device_add(dev);
1450 	if (error)
1451 		return error;
1452 
1453 	error = register_queue_kobjects(ndev);
1454 	if (error) {
1455 		device_del(dev);
1456 		return error;
1457 	}
1458 
1459 	pm_runtime_set_memalloc_noio(dev, true);
1460 
1461 	return error;
1462 }
1463 
1464 int netdev_class_create_file_ns(struct class_attribute *class_attr,
1465 				const void *ns)
1466 {
1467 	return class_create_file_ns(&net_class, class_attr, ns);
1468 }
1469 EXPORT_SYMBOL(netdev_class_create_file_ns);
1470 
1471 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1472 				 const void *ns)
1473 {
1474 	class_remove_file_ns(&net_class, class_attr, ns);
1475 }
1476 EXPORT_SYMBOL(netdev_class_remove_file_ns);
1477 
1478 int __init netdev_kobject_init(void)
1479 {
1480 	kobj_ns_type_register(&net_ns_type_operations);
1481 	return class_register(&net_class);
1482 }
1483