xref: /openbmc/linux/include/net/sch_generic.h (revision 3932b9ca)
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
3 
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
6 #include <linux/rcupdate.h>
7 #include <linux/pkt_sched.h>
8 #include <linux/pkt_cls.h>
9 #include <net/gen_stats.h>
10 #include <net/rtnetlink.h>
11 
12 struct Qdisc_ops;
13 struct qdisc_walker;
14 struct tcf_walker;
15 struct module;
16 
17 struct qdisc_rate_table {
18 	struct tc_ratespec rate;
19 	u32		data[256];
20 	struct qdisc_rate_table *next;
21 	int		refcnt;
22 };
23 
24 enum qdisc_state_t {
25 	__QDISC_STATE_SCHED,
26 	__QDISC_STATE_DEACTIVATED,
27 	__QDISC_STATE_THROTTLED,
28 };
29 
30 /*
31  * following bits are only changed while qdisc lock is held
32  */
33 enum qdisc___state_t {
34 	__QDISC___STATE_RUNNING = 1,
35 };
36 
37 struct qdisc_size_table {
38 	struct rcu_head		rcu;
39 	struct list_head	list;
40 	struct tc_sizespec	szopts;
41 	int			refcnt;
42 	u16			data[];
43 };
44 
45 struct Qdisc {
46 	int 			(*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
47 	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
48 	unsigned int		flags;
49 #define TCQ_F_BUILTIN		1
50 #define TCQ_F_INGRESS		2
51 #define TCQ_F_CAN_BYPASS	4
52 #define TCQ_F_MQROOT		8
53 #define TCQ_F_ONETXQUEUE	0x10 /* dequeue_skb() can assume all skbs are for
54 				      * q->dev_queue : It can test
55 				      * netif_xmit_frozen_or_stopped() before
56 				      * dequeueing next packet.
57 				      * Its true for MQ/MQPRIO slaves, or non
58 				      * multiqueue device.
59 				      */
60 #define TCQ_F_WARN_NONWC	(1 << 16)
61 	u32			limit;
62 	const struct Qdisc_ops	*ops;
63 	struct qdisc_size_table	__rcu *stab;
64 	struct list_head	list;
65 	u32			handle;
66 	u32			parent;
67 	int			(*reshape_fail)(struct sk_buff *skb,
68 					struct Qdisc *q);
69 
70 	void			*u32_node;
71 
72 	/* This field is deprecated, but it is still used by CBQ
73 	 * and it will live until better solution will be invented.
74 	 */
75 	struct Qdisc		*__parent;
76 	struct netdev_queue	*dev_queue;
77 
78 	struct gnet_stats_rate_est64	rate_est;
79 	struct Qdisc		*next_sched;
80 	struct sk_buff		*gso_skb;
81 	/*
82 	 * For performance sake on SMP, we put highly modified fields at the end
83 	 */
84 	unsigned long		state;
85 	struct sk_buff_head	q;
86 	struct gnet_stats_basic_packed bstats;
87 	unsigned int		__state;
88 	struct gnet_stats_queue	qstats;
89 	struct rcu_head		rcu_head;
90 	int			padded;
91 	atomic_t		refcnt;
92 
93 	spinlock_t		busylock ____cacheline_aligned_in_smp;
94 };
95 
96 static inline bool qdisc_is_running(const struct Qdisc *qdisc)
97 {
98 	return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
99 }
100 
101 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
102 {
103 	if (qdisc_is_running(qdisc))
104 		return false;
105 	qdisc->__state |= __QDISC___STATE_RUNNING;
106 	return true;
107 }
108 
109 static inline void qdisc_run_end(struct Qdisc *qdisc)
110 {
111 	qdisc->__state &= ~__QDISC___STATE_RUNNING;
112 }
113 
114 static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
115 {
116 	return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
117 }
118 
119 static inline void qdisc_throttled(struct Qdisc *qdisc)
120 {
121 	set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
122 }
123 
124 static inline void qdisc_unthrottled(struct Qdisc *qdisc)
125 {
126 	clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
127 }
128 
129 struct Qdisc_class_ops {
130 	/* Child qdisc manipulation */
131 	struct netdev_queue *	(*select_queue)(struct Qdisc *, struct tcmsg *);
132 	int			(*graft)(struct Qdisc *, unsigned long cl,
133 					struct Qdisc *, struct Qdisc **);
134 	struct Qdisc *		(*leaf)(struct Qdisc *, unsigned long cl);
135 	void			(*qlen_notify)(struct Qdisc *, unsigned long);
136 
137 	/* Class manipulation routines */
138 	unsigned long		(*get)(struct Qdisc *, u32 classid);
139 	void			(*put)(struct Qdisc *, unsigned long);
140 	int			(*change)(struct Qdisc *, u32, u32,
141 					struct nlattr **, unsigned long *);
142 	int			(*delete)(struct Qdisc *, unsigned long);
143 	void			(*walk)(struct Qdisc *, struct qdisc_walker * arg);
144 
145 	/* Filter manipulation */
146 	struct tcf_proto **	(*tcf_chain)(struct Qdisc *, unsigned long);
147 	unsigned long		(*bind_tcf)(struct Qdisc *, unsigned long,
148 					u32 classid);
149 	void			(*unbind_tcf)(struct Qdisc *, unsigned long);
150 
151 	/* rtnetlink specific */
152 	int			(*dump)(struct Qdisc *, unsigned long,
153 					struct sk_buff *skb, struct tcmsg*);
154 	int			(*dump_stats)(struct Qdisc *, unsigned long,
155 					struct gnet_dump *);
156 };
157 
158 struct Qdisc_ops {
159 	struct Qdisc_ops	*next;
160 	const struct Qdisc_class_ops	*cl_ops;
161 	char			id[IFNAMSIZ];
162 	int			priv_size;
163 
164 	int 			(*enqueue)(struct sk_buff *, struct Qdisc *);
165 	struct sk_buff *	(*dequeue)(struct Qdisc *);
166 	struct sk_buff *	(*peek)(struct Qdisc *);
167 	unsigned int		(*drop)(struct Qdisc *);
168 
169 	int			(*init)(struct Qdisc *, struct nlattr *arg);
170 	void			(*reset)(struct Qdisc *);
171 	void			(*destroy)(struct Qdisc *);
172 	int			(*change)(struct Qdisc *, struct nlattr *arg);
173 	void			(*attach)(struct Qdisc *);
174 
175 	int			(*dump)(struct Qdisc *, struct sk_buff *);
176 	int			(*dump_stats)(struct Qdisc *, struct gnet_dump *);
177 
178 	struct module		*owner;
179 };
180 
181 
182 struct tcf_result {
183 	unsigned long	class;
184 	u32		classid;
185 };
186 
187 struct tcf_proto_ops {
188 	struct list_head	head;
189 	char			kind[IFNAMSIZ];
190 
191 	int			(*classify)(struct sk_buff *,
192 					    const struct tcf_proto *,
193 					    struct tcf_result *);
194 	int			(*init)(struct tcf_proto*);
195 	void			(*destroy)(struct tcf_proto*);
196 
197 	unsigned long		(*get)(struct tcf_proto*, u32 handle);
198 	void			(*put)(struct tcf_proto*, unsigned long);
199 	int			(*change)(struct net *net, struct sk_buff *,
200 					struct tcf_proto*, unsigned long,
201 					u32 handle, struct nlattr **,
202 					unsigned long *, bool);
203 	int			(*delete)(struct tcf_proto*, unsigned long);
204 	void			(*walk)(struct tcf_proto*, struct tcf_walker *arg);
205 
206 	/* rtnetlink specific */
207 	int			(*dump)(struct net*, struct tcf_proto*, unsigned long,
208 					struct sk_buff *skb, struct tcmsg*);
209 
210 	struct module		*owner;
211 };
212 
213 struct tcf_proto {
214 	/* Fast access part */
215 	struct tcf_proto	*next;
216 	void			*root;
217 	int			(*classify)(struct sk_buff *,
218 					    const struct tcf_proto *,
219 					    struct tcf_result *);
220 	__be16			protocol;
221 
222 	/* All the rest */
223 	u32			prio;
224 	u32			classid;
225 	struct Qdisc		*q;
226 	void			*data;
227 	const struct tcf_proto_ops	*ops;
228 };
229 
230 struct qdisc_skb_cb {
231 	unsigned int		pkt_len;
232 	u16			slave_dev_queue_mapping;
233 	u16			_pad;
234 #define QDISC_CB_PRIV_LEN 20
235 	unsigned char		data[QDISC_CB_PRIV_LEN];
236 };
237 
238 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
239 {
240 	struct qdisc_skb_cb *qcb;
241 
242 	BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
243 	BUILD_BUG_ON(sizeof(qcb->data) < sz);
244 }
245 
246 static inline int qdisc_qlen(const struct Qdisc *q)
247 {
248 	return q->q.qlen;
249 }
250 
251 static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
252 {
253 	return (struct qdisc_skb_cb *)skb->cb;
254 }
255 
256 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
257 {
258 	return &qdisc->q.lock;
259 }
260 
261 static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
262 {
263 	return qdisc->dev_queue->qdisc;
264 }
265 
266 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
267 {
268 	return qdisc->dev_queue->qdisc_sleeping;
269 }
270 
271 /* The qdisc root lock is a mechanism by which to top level
272  * of a qdisc tree can be locked from any qdisc node in the
273  * forest.  This allows changing the configuration of some
274  * aspect of the qdisc tree while blocking out asynchronous
275  * qdisc access in the packet processing paths.
276  *
277  * It is only legal to do this when the root will not change
278  * on us.  Otherwise we'll potentially lock the wrong qdisc
279  * root.  This is enforced by holding the RTNL semaphore, which
280  * all users of this lock accessor must do.
281  */
282 static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
283 {
284 	struct Qdisc *root = qdisc_root(qdisc);
285 
286 	ASSERT_RTNL();
287 	return qdisc_lock(root);
288 }
289 
290 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
291 {
292 	struct Qdisc *root = qdisc_root_sleeping(qdisc);
293 
294 	ASSERT_RTNL();
295 	return qdisc_lock(root);
296 }
297 
298 static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
299 {
300 	return qdisc->dev_queue->dev;
301 }
302 
303 static inline void sch_tree_lock(const struct Qdisc *q)
304 {
305 	spin_lock_bh(qdisc_root_sleeping_lock(q));
306 }
307 
308 static inline void sch_tree_unlock(const struct Qdisc *q)
309 {
310 	spin_unlock_bh(qdisc_root_sleeping_lock(q));
311 }
312 
313 #define tcf_tree_lock(tp)	sch_tree_lock((tp)->q)
314 #define tcf_tree_unlock(tp)	sch_tree_unlock((tp)->q)
315 
316 extern struct Qdisc noop_qdisc;
317 extern struct Qdisc_ops noop_qdisc_ops;
318 extern struct Qdisc_ops pfifo_fast_ops;
319 extern struct Qdisc_ops mq_qdisc_ops;
320 extern const struct Qdisc_ops *default_qdisc_ops;
321 
322 struct Qdisc_class_common {
323 	u32			classid;
324 	struct hlist_node	hnode;
325 };
326 
327 struct Qdisc_class_hash {
328 	struct hlist_head	*hash;
329 	unsigned int		hashsize;
330 	unsigned int		hashmask;
331 	unsigned int		hashelems;
332 };
333 
334 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
335 {
336 	id ^= id >> 8;
337 	id ^= id >> 4;
338 	return id & mask;
339 }
340 
341 static inline struct Qdisc_class_common *
342 qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
343 {
344 	struct Qdisc_class_common *cl;
345 	unsigned int h;
346 
347 	h = qdisc_class_hash(id, hash->hashmask);
348 	hlist_for_each_entry(cl, &hash->hash[h], hnode) {
349 		if (cl->classid == id)
350 			return cl;
351 	}
352 	return NULL;
353 }
354 
355 int qdisc_class_hash_init(struct Qdisc_class_hash *);
356 void qdisc_class_hash_insert(struct Qdisc_class_hash *,
357 			     struct Qdisc_class_common *);
358 void qdisc_class_hash_remove(struct Qdisc_class_hash *,
359 			     struct Qdisc_class_common *);
360 void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
361 void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
362 
363 void dev_init_scheduler(struct net_device *dev);
364 void dev_shutdown(struct net_device *dev);
365 void dev_activate(struct net_device *dev);
366 void dev_deactivate(struct net_device *dev);
367 void dev_deactivate_many(struct list_head *head);
368 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
369 			      struct Qdisc *qdisc);
370 void qdisc_reset(struct Qdisc *qdisc);
371 void qdisc_destroy(struct Qdisc *qdisc);
372 void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
373 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
374 			  const struct Qdisc_ops *ops);
375 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
376 				const struct Qdisc_ops *ops, u32 parentid);
377 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
378 			       const struct qdisc_size_table *stab);
379 void tcf_destroy(struct tcf_proto *tp);
380 void tcf_destroy_chain(struct tcf_proto **fl);
381 
382 /* Reset all TX qdiscs greater then index of a device.  */
383 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
384 {
385 	struct Qdisc *qdisc;
386 
387 	for (; i < dev->num_tx_queues; i++) {
388 		qdisc = netdev_get_tx_queue(dev, i)->qdisc;
389 		if (qdisc) {
390 			spin_lock_bh(qdisc_lock(qdisc));
391 			qdisc_reset(qdisc);
392 			spin_unlock_bh(qdisc_lock(qdisc));
393 		}
394 	}
395 }
396 
397 static inline void qdisc_reset_all_tx(struct net_device *dev)
398 {
399 	qdisc_reset_all_tx_gt(dev, 0);
400 }
401 
402 /* Are all TX queues of the device empty?  */
403 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
404 {
405 	unsigned int i;
406 	for (i = 0; i < dev->num_tx_queues; i++) {
407 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
408 		const struct Qdisc *q = txq->qdisc;
409 
410 		if (q->q.qlen)
411 			return false;
412 	}
413 	return true;
414 }
415 
416 /* Are any of the TX qdiscs changing?  */
417 static inline bool qdisc_tx_changing(const struct net_device *dev)
418 {
419 	unsigned int i;
420 	for (i = 0; i < dev->num_tx_queues; i++) {
421 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
422 		if (txq->qdisc != txq->qdisc_sleeping)
423 			return true;
424 	}
425 	return false;
426 }
427 
428 /* Is the device using the noop qdisc on all queues?  */
429 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
430 {
431 	unsigned int i;
432 	for (i = 0; i < dev->num_tx_queues; i++) {
433 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
434 		if (txq->qdisc != &noop_qdisc)
435 			return false;
436 	}
437 	return true;
438 }
439 
440 static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
441 {
442 	return qdisc_skb_cb(skb)->pkt_len;
443 }
444 
445 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
446 enum net_xmit_qdisc_t {
447 	__NET_XMIT_STOLEN = 0x00010000,
448 	__NET_XMIT_BYPASS = 0x00020000,
449 };
450 
451 #ifdef CONFIG_NET_CLS_ACT
452 #define net_xmit_drop_count(e)	((e) & __NET_XMIT_STOLEN ? 0 : 1)
453 #else
454 #define net_xmit_drop_count(e)	(1)
455 #endif
456 
457 static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
458 					   const struct Qdisc *sch)
459 {
460 #ifdef CONFIG_NET_SCHED
461 	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
462 
463 	if (stab)
464 		__qdisc_calculate_pkt_len(skb, stab);
465 #endif
466 }
467 
468 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
469 {
470 	qdisc_calculate_pkt_len(skb, sch);
471 	return sch->enqueue(skb, sch);
472 }
473 
474 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
475 {
476 	qdisc_skb_cb(skb)->pkt_len = skb->len;
477 	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
478 }
479 
480 
481 static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
482 				 const struct sk_buff *skb)
483 {
484 	bstats->bytes += qdisc_pkt_len(skb);
485 	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
486 }
487 
488 static inline void qdisc_bstats_update(struct Qdisc *sch,
489 				       const struct sk_buff *skb)
490 {
491 	bstats_update(&sch->bstats, skb);
492 }
493 
494 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
495 				       struct sk_buff_head *list)
496 {
497 	__skb_queue_tail(list, skb);
498 	sch->qstats.backlog += qdisc_pkt_len(skb);
499 
500 	return NET_XMIT_SUCCESS;
501 }
502 
503 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
504 {
505 	return __qdisc_enqueue_tail(skb, sch, &sch->q);
506 }
507 
508 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
509 						   struct sk_buff_head *list)
510 {
511 	struct sk_buff *skb = __skb_dequeue(list);
512 
513 	if (likely(skb != NULL)) {
514 		sch->qstats.backlog -= qdisc_pkt_len(skb);
515 		qdisc_bstats_update(sch, skb);
516 	}
517 
518 	return skb;
519 }
520 
521 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
522 {
523 	return __qdisc_dequeue_head(sch, &sch->q);
524 }
525 
526 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
527 					      struct sk_buff_head *list)
528 {
529 	struct sk_buff *skb = __skb_dequeue(list);
530 
531 	if (likely(skb != NULL)) {
532 		unsigned int len = qdisc_pkt_len(skb);
533 		sch->qstats.backlog -= len;
534 		kfree_skb(skb);
535 		return len;
536 	}
537 
538 	return 0;
539 }
540 
541 static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
542 {
543 	return __qdisc_queue_drop_head(sch, &sch->q);
544 }
545 
546 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
547 						   struct sk_buff_head *list)
548 {
549 	struct sk_buff *skb = __skb_dequeue_tail(list);
550 
551 	if (likely(skb != NULL))
552 		sch->qstats.backlog -= qdisc_pkt_len(skb);
553 
554 	return skb;
555 }
556 
557 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
558 {
559 	return __qdisc_dequeue_tail(sch, &sch->q);
560 }
561 
562 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
563 {
564 	return skb_peek(&sch->q);
565 }
566 
567 /* generic pseudo peek method for non-work-conserving qdisc */
568 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
569 {
570 	/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
571 	if (!sch->gso_skb) {
572 		sch->gso_skb = sch->dequeue(sch);
573 		if (sch->gso_skb)
574 			/* it's still part of the queue */
575 			sch->q.qlen++;
576 	}
577 
578 	return sch->gso_skb;
579 }
580 
581 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
582 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
583 {
584 	struct sk_buff *skb = sch->gso_skb;
585 
586 	if (skb) {
587 		sch->gso_skb = NULL;
588 		sch->q.qlen--;
589 	} else {
590 		skb = sch->dequeue(sch);
591 	}
592 
593 	return skb;
594 }
595 
596 static inline void __qdisc_reset_queue(struct Qdisc *sch,
597 				       struct sk_buff_head *list)
598 {
599 	/*
600 	 * We do not know the backlog in bytes of this list, it
601 	 * is up to the caller to correct it
602 	 */
603 	__skb_queue_purge(list);
604 }
605 
606 static inline void qdisc_reset_queue(struct Qdisc *sch)
607 {
608 	__qdisc_reset_queue(sch, &sch->q);
609 	sch->qstats.backlog = 0;
610 }
611 
612 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
613 					      struct sk_buff_head *list)
614 {
615 	struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
616 
617 	if (likely(skb != NULL)) {
618 		unsigned int len = qdisc_pkt_len(skb);
619 		kfree_skb(skb);
620 		return len;
621 	}
622 
623 	return 0;
624 }
625 
626 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
627 {
628 	return __qdisc_queue_drop(sch, &sch->q);
629 }
630 
631 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
632 {
633 	kfree_skb(skb);
634 	sch->qstats.drops++;
635 
636 	return NET_XMIT_DROP;
637 }
638 
639 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
640 {
641 	sch->qstats.drops++;
642 
643 #ifdef CONFIG_NET_CLS_ACT
644 	if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
645 		goto drop;
646 
647 	return NET_XMIT_SUCCESS;
648 
649 drop:
650 #endif
651 	kfree_skb(skb);
652 	return NET_XMIT_DROP;
653 }
654 
655 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
656    long it will take to send a packet given its size.
657  */
658 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
659 {
660 	int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
661 	if (slot < 0)
662 		slot = 0;
663 	slot >>= rtab->rate.cell_log;
664 	if (slot > 255)
665 		return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
666 	return rtab->data[slot];
667 }
668 
669 #ifdef CONFIG_NET_CLS_ACT
670 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
671 					    int action)
672 {
673 	struct sk_buff *n;
674 
675 	n = skb_clone(skb, gfp_mask);
676 
677 	if (n) {
678 		n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
679 		n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
680 		n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
681 	}
682 	return n;
683 }
684 #endif
685 
686 struct psched_ratecfg {
687 	u64	rate_bytes_ps; /* bytes per second */
688 	u32	mult;
689 	u16	overhead;
690 	u8	linklayer;
691 	u8	shift;
692 };
693 
694 static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
695 				unsigned int len)
696 {
697 	len += r->overhead;
698 
699 	if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
700 		return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
701 
702 	return ((u64)len * r->mult) >> r->shift;
703 }
704 
705 void psched_ratecfg_precompute(struct psched_ratecfg *r,
706 			       const struct tc_ratespec *conf,
707 			       u64 rate64);
708 
709 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
710 					  const struct psched_ratecfg *r)
711 {
712 	memset(res, 0, sizeof(*res));
713 
714 	/* legacy struct tc_ratespec has a 32bit @rate field
715 	 * Qdisc using 64bit rate should add new attributes
716 	 * in order to maintain compatibility.
717 	 */
718 	res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
719 
720 	res->overhead = r->overhead;
721 	res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
722 }
723 
724 #endif
725