1557fc4a0SMichal Kazior /* 2557fc4a0SMichal Kazior * Copyright (c) 2016 Qualcomm Atheros, Inc 3557fc4a0SMichal Kazior * 4557fc4a0SMichal Kazior * GPL v2 5557fc4a0SMichal Kazior * 6557fc4a0SMichal Kazior * Based on net/sched/sch_fq_codel.c 7557fc4a0SMichal Kazior */ 8557fc4a0SMichal Kazior #ifndef __NET_SCHED_FQ_IMPL_H 9557fc4a0SMichal Kazior #define __NET_SCHED_FQ_IMPL_H 10557fc4a0SMichal Kazior 11557fc4a0SMichal Kazior #include <net/fq.h> 12557fc4a0SMichal Kazior 13557fc4a0SMichal Kazior /* functions that are embedded into includer */ 14557fc4a0SMichal Kazior 158c418b5bSJohannes Berg static void fq_adjust_removal(struct fq *fq, 168c418b5bSJohannes Berg struct fq_flow *flow, 178c418b5bSJohannes Berg struct sk_buff *skb) 18557fc4a0SMichal Kazior { 19557fc4a0SMichal Kazior struct fq_tin *tin = flow->tin; 20557fc4a0SMichal Kazior 21557fc4a0SMichal Kazior tin->backlog_bytes -= skb->len; 22557fc4a0SMichal Kazior tin->backlog_packets--; 23557fc4a0SMichal Kazior flow->backlog -= skb->len; 24557fc4a0SMichal Kazior fq->backlog--; 25097b065bSToke Høiland-Jørgensen fq->memory_usage -= skb->truesize; 268c418b5bSJohannes Berg } 278c418b5bSJohannes Berg 288c418b5bSJohannes Berg static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) 298c418b5bSJohannes Berg { 308c418b5bSJohannes Berg struct fq_flow *i; 31557fc4a0SMichal Kazior 32557fc4a0SMichal Kazior if (flow->backlog == 0) { 33557fc4a0SMichal Kazior list_del_init(&flow->backlogchain); 34557fc4a0SMichal Kazior } else { 35557fc4a0SMichal Kazior i = flow; 36557fc4a0SMichal Kazior 37557fc4a0SMichal Kazior list_for_each_entry_continue(i, &fq->backlogs, backlogchain) 38557fc4a0SMichal Kazior if (i->backlog < flow->backlog) 39557fc4a0SMichal Kazior break; 40557fc4a0SMichal Kazior 41557fc4a0SMichal Kazior list_move_tail(&flow->backlogchain, 42557fc4a0SMichal Kazior &i->backlogchain); 43557fc4a0SMichal Kazior } 448c418b5bSJohannes Berg } 458c418b5bSJohannes Berg 468c418b5bSJohannes Berg static struct sk_buff *fq_flow_dequeue(struct fq *fq, 478c418b5bSJohannes Berg struct fq_flow *flow) 488c418b5bSJohannes Berg { 498c418b5bSJohannes Berg struct sk_buff *skb; 508c418b5bSJohannes Berg 518c418b5bSJohannes Berg lockdep_assert_held(&fq->lock); 528c418b5bSJohannes Berg 538c418b5bSJohannes Berg skb = __skb_dequeue(&flow->queue); 548c418b5bSJohannes Berg if (!skb) 558c418b5bSJohannes Berg return NULL; 568c418b5bSJohannes Berg 578c418b5bSJohannes Berg fq_adjust_removal(fq, flow, skb); 588c418b5bSJohannes Berg fq_rejigger_backlog(fq, flow); 59557fc4a0SMichal Kazior 60557fc4a0SMichal Kazior return skb; 61557fc4a0SMichal Kazior } 62557fc4a0SMichal Kazior 63557fc4a0SMichal Kazior static struct sk_buff *fq_tin_dequeue(struct fq *fq, 64557fc4a0SMichal Kazior struct fq_tin *tin, 65557fc4a0SMichal Kazior fq_tin_dequeue_t dequeue_func) 66557fc4a0SMichal Kazior { 67557fc4a0SMichal Kazior struct fq_flow *flow; 68557fc4a0SMichal Kazior struct list_head *head; 69557fc4a0SMichal Kazior struct sk_buff *skb; 70557fc4a0SMichal Kazior 71557fc4a0SMichal Kazior lockdep_assert_held(&fq->lock); 72557fc4a0SMichal Kazior 73557fc4a0SMichal Kazior begin: 74557fc4a0SMichal Kazior head = &tin->new_flows; 75557fc4a0SMichal Kazior if (list_empty(head)) { 76557fc4a0SMichal Kazior head = &tin->old_flows; 77557fc4a0SMichal Kazior if (list_empty(head)) 78557fc4a0SMichal Kazior return NULL; 79557fc4a0SMichal Kazior } 80557fc4a0SMichal Kazior 81557fc4a0SMichal Kazior flow = list_first_entry(head, struct fq_flow, flowchain); 82557fc4a0SMichal Kazior 83557fc4a0SMichal Kazior if (flow->deficit <= 0) { 84557fc4a0SMichal Kazior flow->deficit += fq->quantum; 85557fc4a0SMichal Kazior list_move_tail(&flow->flowchain, 86557fc4a0SMichal Kazior &tin->old_flows); 87557fc4a0SMichal Kazior goto begin; 88557fc4a0SMichal Kazior } 89557fc4a0SMichal Kazior 90557fc4a0SMichal Kazior skb = dequeue_func(fq, tin, flow); 91557fc4a0SMichal Kazior if (!skb) { 92557fc4a0SMichal Kazior /* force a pass through old_flows to prevent starvation */ 93557fc4a0SMichal Kazior if ((head == &tin->new_flows) && 94557fc4a0SMichal Kazior !list_empty(&tin->old_flows)) { 95557fc4a0SMichal Kazior list_move_tail(&flow->flowchain, &tin->old_flows); 96557fc4a0SMichal Kazior } else { 97557fc4a0SMichal Kazior list_del_init(&flow->flowchain); 98557fc4a0SMichal Kazior flow->tin = NULL; 99557fc4a0SMichal Kazior } 100557fc4a0SMichal Kazior goto begin; 101557fc4a0SMichal Kazior } 102557fc4a0SMichal Kazior 103557fc4a0SMichal Kazior flow->deficit -= skb->len; 104557fc4a0SMichal Kazior tin->tx_bytes += skb->len; 105557fc4a0SMichal Kazior tin->tx_packets++; 106557fc4a0SMichal Kazior 107557fc4a0SMichal Kazior return skb; 108557fc4a0SMichal Kazior } 109557fc4a0SMichal Kazior 110*f2af2df8SFelix Fietkau static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) 111*f2af2df8SFelix Fietkau { 112*f2af2df8SFelix Fietkau u32 hash = skb_get_hash_perturb(skb, fq->perturbation); 113*f2af2df8SFelix Fietkau 114*f2af2df8SFelix Fietkau return reciprocal_scale(hash, fq->flows_cnt); 115*f2af2df8SFelix Fietkau } 116*f2af2df8SFelix Fietkau 117557fc4a0SMichal Kazior static struct fq_flow *fq_flow_classify(struct fq *fq, 118*f2af2df8SFelix Fietkau struct fq_tin *tin, u32 idx, 119557fc4a0SMichal Kazior struct sk_buff *skb, 120557fc4a0SMichal Kazior fq_flow_get_default_t get_default_func) 121557fc4a0SMichal Kazior { 122557fc4a0SMichal Kazior struct fq_flow *flow; 123557fc4a0SMichal Kazior 124557fc4a0SMichal Kazior lockdep_assert_held(&fq->lock); 125557fc4a0SMichal Kazior 126557fc4a0SMichal Kazior flow = &fq->flows[idx]; 127557fc4a0SMichal Kazior if (flow->tin && flow->tin != tin) { 128557fc4a0SMichal Kazior flow = get_default_func(fq, tin, idx, skb); 129557fc4a0SMichal Kazior tin->collisions++; 130557fc4a0SMichal Kazior fq->collisions++; 131557fc4a0SMichal Kazior } 132557fc4a0SMichal Kazior 133557fc4a0SMichal Kazior if (!flow->tin) 134557fc4a0SMichal Kazior tin->flows++; 135557fc4a0SMichal Kazior 136557fc4a0SMichal Kazior return flow; 137557fc4a0SMichal Kazior } 138557fc4a0SMichal Kazior 139b43e7199SMichal Kazior static void fq_recalc_backlog(struct fq *fq, 140557fc4a0SMichal Kazior struct fq_tin *tin, 141b43e7199SMichal Kazior struct fq_flow *flow) 142557fc4a0SMichal Kazior { 143557fc4a0SMichal Kazior struct fq_flow *i; 144557fc4a0SMichal Kazior 145557fc4a0SMichal Kazior if (list_empty(&flow->backlogchain)) 146557fc4a0SMichal Kazior list_add_tail(&flow->backlogchain, &fq->backlogs); 147557fc4a0SMichal Kazior 148557fc4a0SMichal Kazior i = flow; 149557fc4a0SMichal Kazior list_for_each_entry_continue_reverse(i, &fq->backlogs, 150557fc4a0SMichal Kazior backlogchain) 151557fc4a0SMichal Kazior if (i->backlog > flow->backlog) 152557fc4a0SMichal Kazior break; 153557fc4a0SMichal Kazior 154557fc4a0SMichal Kazior list_move(&flow->backlogchain, &i->backlogchain); 155b43e7199SMichal Kazior } 156b43e7199SMichal Kazior 157b43e7199SMichal Kazior static void fq_tin_enqueue(struct fq *fq, 158*f2af2df8SFelix Fietkau struct fq_tin *tin, u32 idx, 159b43e7199SMichal Kazior struct sk_buff *skb, 160b43e7199SMichal Kazior fq_skb_free_t free_func, 161b43e7199SMichal Kazior fq_flow_get_default_t get_default_func) 162b43e7199SMichal Kazior { 163b43e7199SMichal Kazior struct fq_flow *flow; 1640bfe649fSToke Høiland-Jørgensen bool oom; 165b43e7199SMichal Kazior 166b43e7199SMichal Kazior lockdep_assert_held(&fq->lock); 167b43e7199SMichal Kazior 168*f2af2df8SFelix Fietkau flow = fq_flow_classify(fq, tin, idx, skb, get_default_func); 169b43e7199SMichal Kazior 170b43e7199SMichal Kazior flow->tin = tin; 171b43e7199SMichal Kazior flow->backlog += skb->len; 172b43e7199SMichal Kazior tin->backlog_bytes += skb->len; 173b43e7199SMichal Kazior tin->backlog_packets++; 174097b065bSToke Høiland-Jørgensen fq->memory_usage += skb->truesize; 175b43e7199SMichal Kazior fq->backlog++; 176b43e7199SMichal Kazior 177b43e7199SMichal Kazior fq_recalc_backlog(fq, tin, flow); 178557fc4a0SMichal Kazior 179557fc4a0SMichal Kazior if (list_empty(&flow->flowchain)) { 180557fc4a0SMichal Kazior flow->deficit = fq->quantum; 181557fc4a0SMichal Kazior list_add_tail(&flow->flowchain, 182557fc4a0SMichal Kazior &tin->new_flows); 183557fc4a0SMichal Kazior } 184557fc4a0SMichal Kazior 185557fc4a0SMichal Kazior __skb_queue_tail(&flow->queue, skb); 1860bfe649fSToke Høiland-Jørgensen oom = (fq->memory_usage > fq->memory_limit); 1870bfe649fSToke Høiland-Jørgensen while (fq->backlog > fq->limit || oom) { 188557fc4a0SMichal Kazior flow = list_first_entry_or_null(&fq->backlogs, 189557fc4a0SMichal Kazior struct fq_flow, 190557fc4a0SMichal Kazior backlogchain); 191557fc4a0SMichal Kazior if (!flow) 192557fc4a0SMichal Kazior return; 193557fc4a0SMichal Kazior 194557fc4a0SMichal Kazior skb = fq_flow_dequeue(fq, flow); 195557fc4a0SMichal Kazior if (!skb) 196557fc4a0SMichal Kazior return; 197557fc4a0SMichal Kazior 198557fc4a0SMichal Kazior free_func(fq, flow->tin, flow, skb); 199557fc4a0SMichal Kazior 200557fc4a0SMichal Kazior flow->tin->overlimit++; 201557fc4a0SMichal Kazior fq->overlimit++; 2020bfe649fSToke Høiland-Jørgensen if (oom) { 203097b065bSToke Høiland-Jørgensen fq->overmemory++; 2040bfe649fSToke Høiland-Jørgensen oom = (fq->memory_usage > fq->memory_limit); 2050bfe649fSToke Høiland-Jørgensen } 206557fc4a0SMichal Kazior } 207557fc4a0SMichal Kazior } 208557fc4a0SMichal Kazior 2098c418b5bSJohannes Berg static void fq_flow_filter(struct fq *fq, 2108c418b5bSJohannes Berg struct fq_flow *flow, 2118c418b5bSJohannes Berg fq_skb_filter_t filter_func, 2128c418b5bSJohannes Berg void *filter_data, 2138c418b5bSJohannes Berg fq_skb_free_t free_func) 2148c418b5bSJohannes Berg { 2158c418b5bSJohannes Berg struct fq_tin *tin = flow->tin; 2168c418b5bSJohannes Berg struct sk_buff *skb, *tmp; 2178c418b5bSJohannes Berg 2188c418b5bSJohannes Berg lockdep_assert_held(&fq->lock); 2198c418b5bSJohannes Berg 2208c418b5bSJohannes Berg skb_queue_walk_safe(&flow->queue, skb, tmp) { 2218c418b5bSJohannes Berg if (!filter_func(fq, tin, flow, skb, filter_data)) 2228c418b5bSJohannes Berg continue; 2238c418b5bSJohannes Berg 2248c418b5bSJohannes Berg __skb_unlink(skb, &flow->queue); 2258c418b5bSJohannes Berg fq_adjust_removal(fq, flow, skb); 2268c418b5bSJohannes Berg free_func(fq, tin, flow, skb); 2278c418b5bSJohannes Berg } 2288c418b5bSJohannes Berg 2298c418b5bSJohannes Berg fq_rejigger_backlog(fq, flow); 2308c418b5bSJohannes Berg } 2318c418b5bSJohannes Berg 2328c418b5bSJohannes Berg static void fq_tin_filter(struct fq *fq, 2338c418b5bSJohannes Berg struct fq_tin *tin, 2348c418b5bSJohannes Berg fq_skb_filter_t filter_func, 2358c418b5bSJohannes Berg void *filter_data, 2368c418b5bSJohannes Berg fq_skb_free_t free_func) 2378c418b5bSJohannes Berg { 2388c418b5bSJohannes Berg struct fq_flow *flow; 2398c418b5bSJohannes Berg 2408c418b5bSJohannes Berg lockdep_assert_held(&fq->lock); 2418c418b5bSJohannes Berg 2428c418b5bSJohannes Berg list_for_each_entry(flow, &tin->new_flows, flowchain) 2438c418b5bSJohannes Berg fq_flow_filter(fq, flow, filter_func, filter_data, free_func); 2448c418b5bSJohannes Berg list_for_each_entry(flow, &tin->old_flows, flowchain) 2458c418b5bSJohannes Berg fq_flow_filter(fq, flow, filter_func, filter_data, free_func); 2468c418b5bSJohannes Berg } 2478c418b5bSJohannes Berg 248557fc4a0SMichal Kazior static void fq_flow_reset(struct fq *fq, 249557fc4a0SMichal Kazior struct fq_flow *flow, 250557fc4a0SMichal Kazior fq_skb_free_t free_func) 251557fc4a0SMichal Kazior { 252557fc4a0SMichal Kazior struct sk_buff *skb; 253557fc4a0SMichal Kazior 254557fc4a0SMichal Kazior while ((skb = fq_flow_dequeue(fq, flow))) 255557fc4a0SMichal Kazior free_func(fq, flow->tin, flow, skb); 256557fc4a0SMichal Kazior 257557fc4a0SMichal Kazior if (!list_empty(&flow->flowchain)) 258557fc4a0SMichal Kazior list_del_init(&flow->flowchain); 259557fc4a0SMichal Kazior 260557fc4a0SMichal Kazior if (!list_empty(&flow->backlogchain)) 261557fc4a0SMichal Kazior list_del_init(&flow->backlogchain); 262557fc4a0SMichal Kazior 263557fc4a0SMichal Kazior flow->tin = NULL; 264557fc4a0SMichal Kazior 265557fc4a0SMichal Kazior WARN_ON_ONCE(flow->backlog); 266557fc4a0SMichal Kazior } 267557fc4a0SMichal Kazior 268557fc4a0SMichal Kazior static void fq_tin_reset(struct fq *fq, 269557fc4a0SMichal Kazior struct fq_tin *tin, 270557fc4a0SMichal Kazior fq_skb_free_t free_func) 271557fc4a0SMichal Kazior { 272557fc4a0SMichal Kazior struct list_head *head; 273557fc4a0SMichal Kazior struct fq_flow *flow; 274557fc4a0SMichal Kazior 275557fc4a0SMichal Kazior for (;;) { 276557fc4a0SMichal Kazior head = &tin->new_flows; 277557fc4a0SMichal Kazior if (list_empty(head)) { 278557fc4a0SMichal Kazior head = &tin->old_flows; 279557fc4a0SMichal Kazior if (list_empty(head)) 280557fc4a0SMichal Kazior break; 281557fc4a0SMichal Kazior } 282557fc4a0SMichal Kazior 283557fc4a0SMichal Kazior flow = list_first_entry(head, struct fq_flow, flowchain); 284557fc4a0SMichal Kazior fq_flow_reset(fq, flow, free_func); 285557fc4a0SMichal Kazior } 286557fc4a0SMichal Kazior 287557fc4a0SMichal Kazior WARN_ON_ONCE(tin->backlog_bytes); 288557fc4a0SMichal Kazior WARN_ON_ONCE(tin->backlog_packets); 289557fc4a0SMichal Kazior } 290557fc4a0SMichal Kazior 291557fc4a0SMichal Kazior static void fq_flow_init(struct fq_flow *flow) 292557fc4a0SMichal Kazior { 293557fc4a0SMichal Kazior INIT_LIST_HEAD(&flow->flowchain); 294557fc4a0SMichal Kazior INIT_LIST_HEAD(&flow->backlogchain); 295557fc4a0SMichal Kazior __skb_queue_head_init(&flow->queue); 296557fc4a0SMichal Kazior } 297557fc4a0SMichal Kazior 298557fc4a0SMichal Kazior static void fq_tin_init(struct fq_tin *tin) 299557fc4a0SMichal Kazior { 300557fc4a0SMichal Kazior INIT_LIST_HEAD(&tin->new_flows); 301557fc4a0SMichal Kazior INIT_LIST_HEAD(&tin->old_flows); 302557fc4a0SMichal Kazior } 303557fc4a0SMichal Kazior 304557fc4a0SMichal Kazior static int fq_init(struct fq *fq, int flows_cnt) 305557fc4a0SMichal Kazior { 306557fc4a0SMichal Kazior int i; 307557fc4a0SMichal Kazior 308557fc4a0SMichal Kazior memset(fq, 0, sizeof(fq[0])); 309557fc4a0SMichal Kazior INIT_LIST_HEAD(&fq->backlogs); 310557fc4a0SMichal Kazior spin_lock_init(&fq->lock); 311557fc4a0SMichal Kazior fq->flows_cnt = max_t(u32, flows_cnt, 1); 312557fc4a0SMichal Kazior fq->perturbation = prandom_u32(); 313557fc4a0SMichal Kazior fq->quantum = 300; 314557fc4a0SMichal Kazior fq->limit = 8192; 315097b065bSToke Høiland-Jørgensen fq->memory_limit = 16 << 20; /* 16 MBytes */ 316557fc4a0SMichal Kazior 317557fc4a0SMichal Kazior fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); 318557fc4a0SMichal Kazior if (!fq->flows) 319557fc4a0SMichal Kazior return -ENOMEM; 320557fc4a0SMichal Kazior 321557fc4a0SMichal Kazior for (i = 0; i < fq->flows_cnt; i++) 322557fc4a0SMichal Kazior fq_flow_init(&fq->flows[i]); 323557fc4a0SMichal Kazior 324557fc4a0SMichal Kazior return 0; 325557fc4a0SMichal Kazior } 326557fc4a0SMichal Kazior 327557fc4a0SMichal Kazior static void fq_reset(struct fq *fq, 328557fc4a0SMichal Kazior fq_skb_free_t free_func) 329557fc4a0SMichal Kazior { 330557fc4a0SMichal Kazior int i; 331557fc4a0SMichal Kazior 332557fc4a0SMichal Kazior for (i = 0; i < fq->flows_cnt; i++) 333557fc4a0SMichal Kazior fq_flow_reset(fq, &fq->flows[i], free_func); 334557fc4a0SMichal Kazior 335557fc4a0SMichal Kazior kfree(fq->flows); 336557fc4a0SMichal Kazior fq->flows = NULL; 337557fc4a0SMichal Kazior } 338557fc4a0SMichal Kazior 339557fc4a0SMichal Kazior #endif 340