Lines Matching refs:q

77 	struct etf_sched_data *q = qdisc_priv(sch);  in is_packet_valid()  local
82 if (q->skip_sock_check) in is_packet_valid()
94 if (sk->sk_clockid != q->clockid) in is_packet_valid()
97 if (sk->sk_txtime_deadline_mode != q->deadline_mode) in is_packet_valid()
101 now = q->get_time(); in is_packet_valid()
102 if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) in is_packet_valid()
110 struct etf_sched_data *q = qdisc_priv(sch); in etf_peek_timesortedlist() local
113 p = rb_first_cached(&q->head); in etf_peek_timesortedlist()
122 struct etf_sched_data *q = qdisc_priv(sch); in reset_watchdog() local
127 qdisc_watchdog_cancel(&q->watchdog); in reset_watchdog()
131 next = ktime_sub_ns(skb->tstamp, q->delta); in reset_watchdog()
132 qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next)); in reset_watchdog()
165 struct etf_sched_data *q = qdisc_priv(sch); in etf_enqueue_timesortedlist() local
166 struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; in etf_enqueue_timesortedlist()
189 rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); in etf_enqueue_timesortedlist()
192 sch->q.qlen++; in etf_enqueue_timesortedlist()
203 struct etf_sched_data *q = qdisc_priv(sch); in timesortedlist_drop() local
211 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_drop()
225 sch->q.qlen--; in timesortedlist_drop()
233 struct etf_sched_data *q = qdisc_priv(sch); in timesortedlist_remove() local
235 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_remove()
248 q->last = skb->tstamp; in timesortedlist_remove()
250 sch->q.qlen--; in timesortedlist_remove()
255 struct etf_sched_data *q = qdisc_priv(sch); in etf_dequeue_timesortedlist() local
263 now = q->get_time(); in etf_dequeue_timesortedlist()
275 if (q->deadline_mode) { in etf_dequeue_timesortedlist()
281 next = ktime_sub_ns(skb->tstamp, q->delta); in etf_dequeue_timesortedlist()
297 struct etf_sched_data *q) in etf_disable_offload() argument
303 if (!q->offload) in etf_disable_offload()
310 etf.queue = q->queue; in etf_disable_offload()
319 static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q, in etf_enable_offload() argument
331 etf.queue = q->queue; in etf_enable_offload()
346 struct etf_sched_data *q = qdisc_priv(sch); in etf_init() local
379 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in etf_init()
382 err = etf_enable_offload(dev, q, extack); in etf_init()
388 q->delta = qopt->delta; in etf_init()
389 q->clockid = qopt->clockid; in etf_init()
390 q->offload = OFFLOAD_IS_ON(qopt); in etf_init()
391 q->deadline_mode = DEADLINE_MODE_IS_ON(qopt); in etf_init()
392 q->skip_sock_check = SKIP_SOCK_CHECK_IS_SET(qopt); in etf_init()
394 switch (q->clockid) { in etf_init()
396 q->get_time = ktime_get_real; in etf_init()
399 q->get_time = ktime_get; in etf_init()
402 q->get_time = ktime_get_boottime; in etf_init()
405 q->get_time = ktime_get_clocktai; in etf_init()
412 qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid); in etf_init()
419 struct etf_sched_data *q = qdisc_priv(sch); in timesortedlist_clear() local
420 struct rb_node *p = rb_first_cached(&q->head); in timesortedlist_clear()
427 rb_erase_cached(&skb->rbnode, &q->head); in timesortedlist_clear()
429 sch->q.qlen--; in timesortedlist_clear()
435 struct etf_sched_data *q = qdisc_priv(sch); in etf_reset() local
438 if (q->watchdog.qdisc == sch) in etf_reset()
439 qdisc_watchdog_cancel(&q->watchdog); in etf_reset()
443 __qdisc_reset_queue(&sch->q); in etf_reset()
445 q->last = 0; in etf_reset()
450 struct etf_sched_data *q = qdisc_priv(sch); in etf_destroy() local
454 if (q->watchdog.qdisc == sch) in etf_destroy()
455 qdisc_watchdog_cancel(&q->watchdog); in etf_destroy()
457 etf_disable_offload(dev, q); in etf_destroy()
462 struct etf_sched_data *q = qdisc_priv(sch); in etf_dump() local
470 opt.delta = q->delta; in etf_dump()
471 opt.clockid = q->clockid; in etf_dump()
472 if (q->offload) in etf_dump()
475 if (q->deadline_mode) in etf_dump()
478 if (q->skip_sock_check) in etf_dump()