Lines Matching refs:q

182 				       struct hhf_sched_data *q)  in seek_list()  argument
191 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list()
201 q->hh_flows_current_cnt--; in seek_list()
213 struct hhf_sched_data *q) in alloc_new_hh() argument
221 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh()
228 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
229 q->hh_flows_overlimit++; in alloc_new_hh()
237 q->hh_flows_current_cnt++; in alloc_new_hh()
249 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_classify() local
259 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify()
262 bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN); in hhf_classify()
263 q->hhf_arrays_reset_timestamp = now; in hhf_classify()
267 hash = skb_get_hash_perturb(skb, &q->perturbation); in hhf_classify()
271 flow = seek_list(hash, &q->hh_flows[flow_pos], q); in hhf_classify()
294 if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) { in hhf_classify()
295 q->hhf_arrays[i][filter_pos[i]] = 0; in hhf_classify()
296 __set_bit(filter_pos[i], q->hhf_valid_bits[i]); in hhf_classify()
299 val = q->hhf_arrays[i][filter_pos[i]] + pkt_len; in hhf_classify()
305 if (min_hhf_val > q->hhf_admit_bytes) { in hhf_classify()
307 flow = alloc_new_hh(&q->hh_flows[flow_pos], q); in hhf_classify()
312 q->hh_flows_total_cnt++; in hhf_classify()
322 if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val) in hhf_classify()
323 q->hhf_arrays[i][filter_pos[i]] = min_hhf_val; in hhf_classify()
351 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_drop() local
355 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
357 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; in hhf_drop()
362 sch->q.qlen--; in hhf_drop()
368 return bucket - q->buckets; in hhf_drop()
374 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_enqueue() local
381 bucket = &q->buckets[idx]; in hhf_enqueue()
395 list_add_tail(&bucket->bucketchain, &q->old_buckets); in hhf_enqueue()
397 weight = q->hhf_non_hh_weight; in hhf_enqueue()
398 list_add_tail(&bucket->bucketchain, &q->new_buckets); in hhf_enqueue()
400 bucket->deficit = weight * q->quantum; in hhf_enqueue()
402 if (++sch->q.qlen <= sch->limit) in hhf_enqueue()
406 q->drop_overlimit++; in hhf_enqueue()
420 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_dequeue() local
426 head = &q->new_buckets; in hhf_dequeue()
428 head = &q->old_buckets; in hhf_dequeue()
435 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? in hhf_dequeue()
436 1 : q->hhf_non_hh_weight; in hhf_dequeue()
438 bucket->deficit += weight * q->quantum; in hhf_dequeue()
439 list_move_tail(&bucket->bucketchain, &q->old_buckets); in hhf_dequeue()
445 sch->q.qlen--; in hhf_dequeue()
451 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) in hhf_dequeue()
452 list_move_tail(&bucket->bucketchain, &q->old_buckets); in hhf_dequeue()
474 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_destroy() local
477 kvfree(q->hhf_arrays[i]); in hhf_destroy()
478 kvfree(q->hhf_valid_bits[i]); in hhf_destroy()
481 if (!q->hh_flows) in hhf_destroy()
486 struct list_head *head = &q->hh_flows[i]; in hhf_destroy()
495 kvfree(q->hh_flows); in hhf_destroy()
511 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_change() local
516 u32 new_quantum = q->quantum; in hhf_change()
517 u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight; in hhf_change()
539 q->quantum = new_quantum; in hhf_change()
540 q->hhf_non_hh_weight = new_hhf_non_hh_weight; in hhf_change()
543 q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); in hhf_change()
548 q->hhf_reset_timeout = usecs_to_jiffies(us); in hhf_change()
552 q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); in hhf_change()
557 q->hhf_evict_timeout = usecs_to_jiffies(us); in hhf_change()
560 qlen = sch->q.qlen; in hhf_change()
562 while (sch->q.qlen > sch->limit) { in hhf_change()
567 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, in hhf_change()
577 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_init() local
581 q->quantum = psched_mtu(qdisc_dev(sch)); in hhf_init()
582 get_random_bytes(&q->perturbation, sizeof(q->perturbation)); in hhf_init()
583 INIT_LIST_HEAD(&q->new_buckets); in hhf_init()
584 INIT_LIST_HEAD(&q->old_buckets); in hhf_init()
587 q->hhf_reset_timeout = HZ / 25; /* 40 ms */ in hhf_init()
588 q->hhf_admit_bytes = 131072; /* 128 KB */ in hhf_init()
589 q->hhf_evict_timeout = HZ; /* 1 sec */ in hhf_init()
590 q->hhf_non_hh_weight = 2; in hhf_init()
599 if (!q->hh_flows) { in hhf_init()
601 q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head), in hhf_init()
603 if (!q->hh_flows) in hhf_init()
606 INIT_LIST_HEAD(&q->hh_flows[i]); in hhf_init()
609 q->hh_flows_limit = 2 * HH_FLOWS_CNT; in hhf_init()
610 q->hh_flows_overlimit = 0; in hhf_init()
611 q->hh_flows_total_cnt = 0; in hhf_init()
612 q->hh_flows_current_cnt = 0; in hhf_init()
616 q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN, in hhf_init()
619 if (!q->hhf_arrays[i]) { in hhf_init()
626 q->hhf_arrays_reset_timestamp = hhf_time_stamp(); in hhf_init()
630 q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN / in hhf_init()
632 if (!q->hhf_valid_bits[i]) { in hhf_init()
642 struct wdrr_bucket *bucket = q->buckets + i; in hhf_init()
653 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_dump() local
661 nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) || in hhf_dump()
662 nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) || in hhf_dump()
664 jiffies_to_usecs(q->hhf_reset_timeout)) || in hhf_dump()
665 nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) || in hhf_dump()
667 jiffies_to_usecs(q->hhf_evict_timeout)) || in hhf_dump()
668 nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight)) in hhf_dump()
679 struct hhf_sched_data *q = qdisc_priv(sch); in hhf_dump_stats() local
681 .drop_overlimit = q->drop_overlimit, in hhf_dump_stats()
682 .hh_overlimit = q->hh_flows_overlimit, in hhf_dump_stats()
683 .hh_tot_count = q->hh_flows_total_cnt, in hhf_dump_stats()
684 .hh_cur_count = q->hh_flows_current_cnt, in hhf_dump_stats()