Lines Matching refs:q

647 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,  in cake_hash()  argument
743 if (likely(q->tags[reduced_hash] == flow_hash && in cake_hash()
744 q->flows[reduced_hash].set)) { in cake_hash()
745 q->way_directs++; in cake_hash()
758 if (q->tags[outer_hash + k] == flow_hash) { in cake_hash()
760 q->way_hits++; in cake_hash()
762 if (!q->flows[outer_hash + k].set) { in cake_hash()
777 if (!q->flows[outer_hash + k].set) { in cake_hash()
778 q->way_misses++; in cake_hash()
788 q->way_collisions++; in cake_hash()
792 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
794 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash()
796 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash()
801 q->tags[reduced_hash] = flow_hash; in cake_hash()
809 if (q->hosts[outer_hash + k].srchost_tag == in cake_hash()
815 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) in cake_hash()
818 q->hosts[outer_hash + k].srchost_tag = srchost_hash; in cake_hash()
821 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
822 q->hosts[srchost_idx].srchost_bulk_flow_count++; in cake_hash()
823 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
832 if (q->hosts[outer_hash + k].dsthost_tag == in cake_hash()
838 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) in cake_hash()
841 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; in cake_hash()
844 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
845 q->hosts[dsthost_idx].dsthost_bulk_flow_count++; in cake_hash()
846 q->flows[reduced_hash].dsthost = dsthost_idx; in cake_hash()
1155 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, in cake_ack_filter() argument
1158 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; in cake_ack_filter()
1318 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) in cake_calc_overhead() argument
1320 if (q->rate_flags & CAKE_FLAG_OVERHEAD) in cake_calc_overhead()
1323 if (q->max_netlen < len) in cake_calc_overhead()
1324 q->max_netlen = len; in cake_calc_overhead()
1325 if (q->min_netlen > len) in cake_calc_overhead()
1326 q->min_netlen = len; in cake_calc_overhead()
1328 len += q->rate_overhead; in cake_calc_overhead()
1330 if (len < q->rate_mpu) in cake_calc_overhead()
1331 len = q->rate_mpu; in cake_calc_overhead()
1333 if (q->atm_mode == CAKE_ATM_ATM) { in cake_calc_overhead()
1337 } else if (q->atm_mode == CAKE_ATM_PTM) { in cake_calc_overhead()
1345 if (q->max_adjlen < len) in cake_calc_overhead()
1346 q->max_adjlen = len; in cake_calc_overhead()
1347 if (q->min_adjlen > len) in cake_calc_overhead()
1348 q->min_adjlen = len; in cake_calc_overhead()
1353 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) in cake_overhead() argument
1361 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); in cake_overhead()
1364 return cake_calc_overhead(q, len, off); in cake_overhead()
1396 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead()
1397 cake_calc_overhead(q, last_len, off)); in cake_overhead()
1400 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) in cake_heap_swap() argument
1402 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_swap()
1403 struct cake_heap_entry jj = q->overflow_heap[j]; in cake_heap_swap()
1405 q->overflow_heap[i] = jj; in cake_heap_swap()
1406 q->overflow_heap[j] = ii; in cake_heap_swap()
1408 q->tins[ii.t].overflow_idx[ii.b] = j; in cake_heap_swap()
1409 q->tins[jj.t].overflow_idx[jj.b] = i; in cake_heap_swap()
1412 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) in cake_heap_get_backlog() argument
1414 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_get_backlog()
1416 return q->tins[ii.t].backlogs[ii.b]; in cake_heap_get_backlog()
1419 static void cake_heapify(struct cake_sched_data *q, u16 i) in cake_heapify() argument
1422 u32 mb = cake_heap_get_backlog(q, i); in cake_heapify()
1430 u32 lb = cake_heap_get_backlog(q, l); in cake_heapify()
1439 u32 rb = cake_heap_get_backlog(q, r); in cake_heapify()
1448 cake_heap_swap(q, i, m); in cake_heapify()
1456 static void cake_heapify_up(struct cake_sched_data *q, u16 i) in cake_heapify_up() argument
1460 u32 ib = cake_heap_get_backlog(q, i); in cake_heapify_up()
1461 u32 pb = cake_heap_get_backlog(q, p); in cake_heapify_up()
1464 cake_heap_swap(q, i, p); in cake_heapify_up()
1472 static int cake_advance_shaper(struct cake_sched_data *q, in cake_advance_shaper() argument
1482 if (q->rate_ns) { in cake_advance_shaper()
1484 u64 global_dur = (len * q->rate_ns) >> q->rate_shft; in cake_advance_shaper()
1495 q->time_next_packet = ktime_add_ns(q->time_next_packet, in cake_advance_shaper()
1498 q->failsafe_next_packet = \ in cake_advance_shaper()
1499 ktime_add_ns(q->failsafe_next_packet, in cake_advance_shaper()
1507 struct cake_sched_data *q = qdisc_priv(sch); in cake_drop() local
1515 if (!q->overflow_timeout) { in cake_drop()
1519 cake_heapify(q, i); in cake_drop()
1521 q->overflow_timeout = 65535; in cake_drop()
1524 qq = q->overflow_heap[0]; in cake_drop()
1528 b = &q->tins[tin]; in cake_drop()
1533 q->overflow_timeout = 0; in cake_drop()
1541 q->buffer_used -= skb->truesize; in cake_drop()
1551 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_drop()
1552 cake_advance_shaper(q, b, skb, now, true); in cake_drop()
1555 sch->q.qlen--; in cake_drop()
1557 cake_heapify(q, 0); in cake_drop()
1621 struct cake_sched_data *q = qdisc_priv(sch); in cake_select_tin() local
1630 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; in cake_select_tin()
1631 wash = !!(q->rate_flags & CAKE_FLAG_WASH); in cake_select_tin()
1635 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) in cake_select_tin()
1638 else if (mark && mark <= q->tin_cnt) in cake_select_tin()
1639 tin = q->tin_order[mark - 1]; in cake_select_tin()
1643 TC_H_MIN(skb->priority) <= q->tin_cnt) in cake_select_tin()
1644 tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; in cake_select_tin()
1649 tin = q->tin_index[dscp]; in cake_select_tin()
1651 if (unlikely(tin >= q->tin_cnt)) in cake_select_tin()
1655 return &q->tins[tin]; in cake_select_tin()
1661 struct cake_sched_data *q = qdisc_priv(sch); in cake_classify() local
1667 filter = rcu_dereference_bh(q->filter_list); in cake_classify()
1701 struct cake_sched_data *q = qdisc_priv(sch); in cake_enqueue() local
1711 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); in cake_enqueue()
1726 if (!sch->q.qlen) { in cake_enqueue()
1727 if (ktime_before(q->time_next_packet, now)) { in cake_enqueue()
1728 q->failsafe_next_packet = now; in cake_enqueue()
1729 q->time_next_packet = now; in cake_enqueue()
1730 } else if (ktime_after(q->time_next_packet, now) && in cake_enqueue()
1731 ktime_after(q->failsafe_next_packet, now)) { in cake_enqueue()
1733 min(ktime_to_ns(q->time_next_packet), in cake_enqueue()
1735 q->failsafe_next_packet)); in cake_enqueue()
1737 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_enqueue()
1745 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { in cake_enqueue()
1758 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, in cake_enqueue()
1762 sch->q.qlen++; in cake_enqueue()
1765 q->buffer_used += segs->truesize; in cake_enqueue()
1774 q->avg_window_bytes += slen; in cake_enqueue()
1781 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); in cake_enqueue()
1784 if (q->ack_filter) in cake_enqueue()
1785 ack = cake_ack_filter(q, flow); in cake_enqueue()
1792 q->buffer_used += skb->truesize - ack->truesize; in cake_enqueue()
1793 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_enqueue()
1794 cake_advance_shaper(q, b, ack, now, true); in cake_enqueue()
1799 sch->q.qlen++; in cake_enqueue()
1800 q->buffer_used += skb->truesize; in cake_enqueue()
1809 q->avg_window_bytes += len; in cake_enqueue()
1812 if (q->overflow_timeout) in cake_enqueue()
1813 cake_heapify_up(q, b->overflow_idx[idx]); in cake_enqueue()
1816 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { in cake_enqueue()
1818 ktime_to_ns(ktime_sub(now, q->last_packet_time)); in cake_enqueue()
1824 q->avg_packet_interval = \ in cake_enqueue()
1825 cake_ewma(q->avg_packet_interval, in cake_enqueue()
1827 (packet_interval > q->avg_packet_interval ? in cake_enqueue()
1830 q->last_packet_time = now; in cake_enqueue()
1832 if (packet_interval > q->avg_packet_interval) { in cake_enqueue()
1835 q->avg_window_begin)); in cake_enqueue()
1836 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; in cake_enqueue()
1839 q->avg_peak_bandwidth = in cake_enqueue()
1840 cake_ewma(q->avg_peak_bandwidth, b, in cake_enqueue()
1841 b > q->avg_peak_bandwidth ? 2 : 8); in cake_enqueue()
1842 q->avg_window_bytes = 0; in cake_enqueue()
1843 q->avg_window_begin = now; in cake_enqueue()
1846 ktime_add_ms(q->last_reconfig_time, in cake_enqueue()
1848 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; in cake_enqueue()
1853 q->avg_window_bytes = 0; in cake_enqueue()
1854 q->last_packet_time = now; in cake_enqueue()
1872 if (cake_dsrc(q->flow_mode)) in cake_enqueue()
1875 if (cake_ddst(q->flow_mode)) in cake_enqueue()
1891 if (cake_dsrc(q->flow_mode)) in cake_enqueue()
1894 if (cake_ddst(q->flow_mode)) in cake_enqueue()
1899 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1900 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1902 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1905 while (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1916 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue_one() local
1917 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue_one()
1918 struct cake_flow *flow = &b->flows[q->cur_flow]; in cake_dequeue_one()
1925 b->backlogs[q->cur_flow] -= len; in cake_dequeue_one()
1928 q->buffer_used -= skb->truesize; in cake_dequeue_one()
1929 sch->q.qlen--; in cake_dequeue_one()
1931 if (q->overflow_timeout) in cake_dequeue_one()
1932 cake_heapify(q, b->overflow_idx[q->cur_flow]); in cake_dequeue_one()
1940 struct cake_sched_data *q = qdisc_priv(sch); in cake_clear_tin() local
1943 q->cur_tin = tin; in cake_clear_tin()
1944 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
1951 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue() local
1952 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue()
1964 if (!sch->q.qlen) in cake_dequeue()
1968 if (ktime_after(q->time_next_packet, now) && in cake_dequeue()
1969 ktime_after(q->failsafe_next_packet, now)) { in cake_dequeue()
1970 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
1971 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
1974 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
1979 if (!q->rate_ns) { in cake_dequeue()
1992 q->cur_tin++; in cake_dequeue()
1994 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
1995 q->cur_tin = 0; in cake_dequeue()
1996 b = q->tins; in cake_dequeue()
2018 for (tin = 0; tin < q->tin_cnt; tin++) { in cake_dequeue()
2019 b = q->tins + tin; in cake_dequeue()
2033 q->cur_tin = best_tin; in cake_dequeue()
2034 b = q->tins + best_tin; in cake_dequeue()
2056 q->cur_flow = flow - b->flows; in cake_dequeue()
2075 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2078 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2091 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2094 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2127 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2130 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2149 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2152 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2166 !!(q->rate_flags & in cake_dequeue()
2172 if (q->rate_flags & CAKE_FLAG_INGRESS) { in cake_dequeue()
2173 len = cake_advance_shaper(q, b, skb, in cake_dequeue()
2183 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_dequeue()
2198 len = cake_advance_shaper(q, b, skb, now, false); in cake_dequeue()
2202 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { in cake_dequeue()
2203 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
2204 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
2206 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2207 } else if (!sch->q.qlen) { in cake_dequeue()
2210 for (i = 0; i < q->tin_cnt; i++) { in cake_dequeue()
2211 if (q->tins[i].decaying_flow_count) { in cake_dequeue()
2214 q->tins[i].cparams.target); in cake_dequeue()
2216 qdisc_watchdog_schedule_ns(&q->watchdog, in cake_dequeue()
2223 if (q->overflow_timeout) in cake_dequeue()
2224 q->overflow_timeout--; in cake_dequeue()
2231 struct cake_sched_data *q = qdisc_priv(sch); in cake_reset() local
2234 if (!q->tins) in cake_reset()
2302 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_besteffort() local
2303 struct cake_tin_data *b = &q->tins[0]; in cake_config_besteffort()
2305 u64 rate = q->rate_bps; in cake_config_besteffort()
2307 q->tin_cnt = 1; in cake_config_besteffort()
2309 q->tin_index = besteffort; in cake_config_besteffort()
2310 q->tin_order = normal_order; in cake_config_besteffort()
2313 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_besteffort()
2322 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_precedence() local
2324 u64 rate = q->rate_bps; in cake_config_precedence()
2328 q->tin_cnt = 8; in cake_config_precedence()
2329 q->tin_index = precedence; in cake_config_precedence()
2330 q->tin_order = normal_order; in cake_config_precedence()
2332 for (i = 0; i < q->tin_cnt; i++) { in cake_config_precedence()
2333 struct cake_tin_data *b = &q->tins[i]; in cake_config_precedence()
2335 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_precedence()
2336 us_to_ns(q->interval)); in cake_config_precedence()
2411 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv8() local
2413 u64 rate = q->rate_bps; in cake_config_diffserv8()
2417 q->tin_cnt = 8; in cake_config_diffserv8()
2420 q->tin_index = diffserv8; in cake_config_diffserv8()
2421 q->tin_order = normal_order; in cake_config_diffserv8()
2424 for (i = 0; i < q->tin_cnt; i++) { in cake_config_diffserv8()
2425 struct cake_tin_data *b = &q->tins[i]; in cake_config_diffserv8()
2427 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_diffserv8()
2428 us_to_ns(q->interval)); in cake_config_diffserv8()
2455 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv4() local
2457 u64 rate = q->rate_bps; in cake_config_diffserv4()
2460 q->tin_cnt = 4; in cake_config_diffserv4()
2463 q->tin_index = diffserv4; in cake_config_diffserv4()
2464 q->tin_order = bulk_order; in cake_config_diffserv4()
2467 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv4()
2468 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2469 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv4()
2470 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2471 cake_set_rate(&q->tins[2], rate >> 1, mtu, in cake_config_diffserv4()
2472 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2473 cake_set_rate(&q->tins[3], rate >> 2, mtu, in cake_config_diffserv4()
2474 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2477 q->tins[0].tin_quantum = quantum; in cake_config_diffserv4()
2478 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv4()
2479 q->tins[2].tin_quantum = quantum >> 1; in cake_config_diffserv4()
2480 q->tins[3].tin_quantum = quantum >> 2; in cake_config_diffserv4()
2492 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv3() local
2494 u64 rate = q->rate_bps; in cake_config_diffserv3()
2497 q->tin_cnt = 3; in cake_config_diffserv3()
2500 q->tin_index = diffserv3; in cake_config_diffserv3()
2501 q->tin_order = bulk_order; in cake_config_diffserv3()
2504 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv3()
2505 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2506 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv3()
2507 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2508 cake_set_rate(&q->tins[2], rate >> 2, mtu, in cake_config_diffserv3()
2509 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2512 q->tins[0].tin_quantum = quantum; in cake_config_diffserv3()
2513 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv3()
2514 q->tins[2].tin_quantum = quantum >> 2; in cake_config_diffserv3()
2521 struct cake_sched_data *q = qdisc_priv(sch); in cake_reconfigure() local
2524 switch (q->tin_mode) { in cake_reconfigure()
2547 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { in cake_reconfigure()
2549 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; in cake_reconfigure()
2552 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2553 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2555 if (q->buffer_config_limit) { in cake_reconfigure()
2556 q->buffer_limit = q->buffer_config_limit; in cake_reconfigure()
2557 } else if (q->rate_bps) { in cake_reconfigure()
2558 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2561 q->buffer_limit = max_t(u32, t, 4U << 20); in cake_reconfigure()
2563 q->buffer_limit = ~0; in cake_reconfigure()
2568 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
2570 q->buffer_config_limit)); in cake_reconfigure()
2576 struct cake_sched_data *q = qdisc_priv(sch); in cake_change() local
2587 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; in cake_change()
2588 q->flow_mode |= CAKE_FLOW_NAT_FLAG * in cake_change()
2598 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); in cake_change()
2601 q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]); in cake_change()
2605 q->rate_flags |= CAKE_FLAG_WASH; in cake_change()
2607 q->rate_flags &= ~CAKE_FLAG_WASH; in cake_change()
2611 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | in cake_change()
2616 q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]); in cake_change()
2619 q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]); in cake_change()
2620 q->rate_flags |= CAKE_FLAG_OVERHEAD; in cake_change()
2622 q->max_netlen = 0; in cake_change()
2623 q->max_adjlen = 0; in cake_change()
2624 q->min_netlen = ~0; in cake_change()
2625 q->min_adjlen = ~0; in cake_change()
2629 q->rate_flags &= ~CAKE_FLAG_OVERHEAD; in cake_change()
2631 q->max_netlen = 0; in cake_change()
2632 q->max_adjlen = 0; in cake_change()
2633 q->min_netlen = ~0; in cake_change()
2634 q->min_adjlen = ~0; in cake_change()
2638 q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]); in cake_change()
2641 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); in cake_change()
2643 if (!q->interval) in cake_change()
2644 q->interval = 1; in cake_change()
2648 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]); in cake_change()
2650 if (!q->target) in cake_change()
2651 q->target = 1; in cake_change()
2656 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; in cake_change()
2658 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; in cake_change()
2663 q->rate_flags |= CAKE_FLAG_INGRESS; in cake_change()
2665 q->rate_flags &= ~CAKE_FLAG_INGRESS; in cake_change()
2669 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]); in cake_change()
2672 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); in cake_change()
2676 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_change()
2678 q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; in cake_change()
2682 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); in cake_change()
2683 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; in cake_change()
2686 if (q->tins) { in cake_change()
2697 struct cake_sched_data *q = qdisc_priv(sch); in cake_destroy() local
2699 qdisc_watchdog_cancel(&q->watchdog); in cake_destroy()
2700 tcf_block_put(q->block); in cake_destroy()
2701 kvfree(q->tins); in cake_destroy()
2707 struct cake_sched_data *q = qdisc_priv(sch); in cake_init() local
2711 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; in cake_init()
2712 q->flow_mode = CAKE_FLOW_TRIPLE; in cake_init()
2714 q->rate_bps = 0; /* unlimited by default */ in cake_init()
2716 q->interval = 100000; /* 100ms default */ in cake_init()
2717 q->target = 5000; /* 5ms: codel RFC argues in cake_init()
2720 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_init()
2721 q->cur_tin = 0; in cake_init()
2722 q->cur_flow = 0; in cake_init()
2724 qdisc_watchdog_init(&q->watchdog, sch); in cake_init()
2733 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in cake_init()
2741 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), in cake_init()
2743 if (!q->tins) in cake_init()
2747 struct cake_tin_data *b = q->tins + i; in cake_init()
2763 q->overflow_heap[k].t = i; in cake_init()
2764 q->overflow_heap[k].b = j; in cake_init()
2770 q->avg_peak_bandwidth = q->rate_bps; in cake_init()
2771 q->min_netlen = ~0; in cake_init()
2772 q->min_adjlen = ~0; in cake_init()
2778 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump() local
2785 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps, in cake_dump()
2790 q->flow_mode & CAKE_FLOW_MASK)) in cake_dump()
2793 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval)) in cake_dump()
2796 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target)) in cake_dump()
2799 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) in cake_dump()
2803 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS))) in cake_dump()
2807 !!(q->rate_flags & CAKE_FLAG_INGRESS))) in cake_dump()
2810 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) in cake_dump()
2814 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) in cake_dump()
2817 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode)) in cake_dump()
2821 !!(q->rate_flags & CAKE_FLAG_WASH))) in cake_dump()
2824 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead)) in cake_dump()
2827 if (!(q->rate_flags & CAKE_FLAG_OVERHEAD)) in cake_dump()
2831 if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode)) in cake_dump()
2834 if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) in cake_dump()
2838 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) in cake_dump()
2841 if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) in cake_dump()
2853 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_stats() local
2870 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); in cake_dump_stats()
2871 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); in cake_dump_stats()
2872 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); in cake_dump_stats()
2873 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); in cake_dump_stats()
2874 PUT_STAT_U32(MAX_NETLEN, q->max_netlen); in cake_dump_stats()
2875 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); in cake_dump_stats()
2876 PUT_STAT_U32(MIN_NETLEN, q->min_netlen); in cake_dump_stats()
2877 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); in cake_dump_stats()
2896 for (i = 0; i < q->tin_cnt; i++) { in cake_dump_stats()
2897 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_dump_stats()
2965 static void cake_unbind(struct Qdisc *q, unsigned long cl) in cake_unbind() argument
2972 struct cake_sched_data *q = qdisc_priv(sch); in cake_tcf_block() local
2976 return q->block; in cake_tcf_block()
2989 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_class_stats() local
2995 if (idx < CAKE_QUEUES * q->tin_cnt) { in cake_dump_class_stats()
2997 &q->tins[q->tin_order[idx / CAKE_QUEUES]]; in cake_dump_class_stats()
3062 struct cake_sched_data *q = qdisc_priv(sch); in cake_walk() local
3068 for (i = 0; i < q->tin_cnt; i++) { in cake_walk()
3069 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_walk()