1*557fc4a0SMichal Kazior /* 2*557fc4a0SMichal Kazior * Copyright (c) 2016 Qualcomm Atheros, Inc 3*557fc4a0SMichal Kazior * 4*557fc4a0SMichal Kazior * GPL v2 5*557fc4a0SMichal Kazior * 6*557fc4a0SMichal Kazior * Based on net/sched/sch_fq_codel.c 7*557fc4a0SMichal Kazior */ 8*557fc4a0SMichal Kazior #ifndef __NET_SCHED_FQ_IMPL_H 9*557fc4a0SMichal Kazior #define __NET_SCHED_FQ_IMPL_H 10*557fc4a0SMichal Kazior 11*557fc4a0SMichal Kazior #include <net/fq.h> 12*557fc4a0SMichal Kazior 13*557fc4a0SMichal Kazior /* functions that are embedded into includer */ 14*557fc4a0SMichal Kazior 15*557fc4a0SMichal Kazior static struct sk_buff *fq_flow_dequeue(struct fq *fq, 16*557fc4a0SMichal Kazior struct fq_flow *flow) 17*557fc4a0SMichal Kazior { 18*557fc4a0SMichal Kazior struct fq_tin *tin = flow->tin; 19*557fc4a0SMichal Kazior struct fq_flow *i; 20*557fc4a0SMichal Kazior struct sk_buff *skb; 21*557fc4a0SMichal Kazior 22*557fc4a0SMichal Kazior lockdep_assert_held(&fq->lock); 23*557fc4a0SMichal Kazior 24*557fc4a0SMichal Kazior skb = __skb_dequeue(&flow->queue); 25*557fc4a0SMichal Kazior if (!skb) 26*557fc4a0SMichal Kazior return NULL; 27*557fc4a0SMichal Kazior 28*557fc4a0SMichal Kazior tin->backlog_bytes -= skb->len; 29*557fc4a0SMichal Kazior tin->backlog_packets--; 30*557fc4a0SMichal Kazior flow->backlog -= skb->len; 31*557fc4a0SMichal Kazior fq->backlog--; 32*557fc4a0SMichal Kazior 33*557fc4a0SMichal Kazior if (flow->backlog == 0) { 34*557fc4a0SMichal Kazior list_del_init(&flow->backlogchain); 35*557fc4a0SMichal Kazior } else { 36*557fc4a0SMichal Kazior i = flow; 37*557fc4a0SMichal Kazior 38*557fc4a0SMichal Kazior list_for_each_entry_continue(i, &fq->backlogs, backlogchain) 39*557fc4a0SMichal Kazior if (i->backlog < flow->backlog) 40*557fc4a0SMichal Kazior break; 41*557fc4a0SMichal Kazior 42*557fc4a0SMichal Kazior list_move_tail(&flow->backlogchain, 43*557fc4a0SMichal Kazior &i->backlogchain); 44*557fc4a0SMichal Kazior } 45*557fc4a0SMichal Kazior 46*557fc4a0SMichal Kazior return skb; 47*557fc4a0SMichal Kazior } 48*557fc4a0SMichal Kazior 49*557fc4a0SMichal Kazior static struct sk_buff *fq_tin_dequeue(struct fq *fq, 50*557fc4a0SMichal Kazior struct fq_tin *tin, 51*557fc4a0SMichal Kazior fq_tin_dequeue_t dequeue_func) 52*557fc4a0SMichal Kazior { 53*557fc4a0SMichal Kazior struct fq_flow *flow; 54*557fc4a0SMichal Kazior struct list_head *head; 55*557fc4a0SMichal Kazior struct sk_buff *skb; 56*557fc4a0SMichal Kazior 57*557fc4a0SMichal Kazior lockdep_assert_held(&fq->lock); 58*557fc4a0SMichal Kazior 59*557fc4a0SMichal Kazior begin: 60*557fc4a0SMichal Kazior head = &tin->new_flows; 61*557fc4a0SMichal Kazior if (list_empty(head)) { 62*557fc4a0SMichal Kazior head = &tin->old_flows; 63*557fc4a0SMichal Kazior if (list_empty(head)) 64*557fc4a0SMichal Kazior return NULL; 65*557fc4a0SMichal Kazior } 66*557fc4a0SMichal Kazior 67*557fc4a0SMichal Kazior flow = list_first_entry(head, struct fq_flow, flowchain); 68*557fc4a0SMichal Kazior 69*557fc4a0SMichal Kazior if (flow->deficit <= 0) { 70*557fc4a0SMichal Kazior flow->deficit += fq->quantum; 71*557fc4a0SMichal Kazior list_move_tail(&flow->flowchain, 72*557fc4a0SMichal Kazior &tin->old_flows); 73*557fc4a0SMichal Kazior goto begin; 74*557fc4a0SMichal Kazior } 75*557fc4a0SMichal Kazior 76*557fc4a0SMichal Kazior skb = dequeue_func(fq, tin, flow); 77*557fc4a0SMichal Kazior if (!skb) { 78*557fc4a0SMichal Kazior /* force a pass through old_flows to prevent starvation */ 79*557fc4a0SMichal Kazior if ((head == &tin->new_flows) && 80*557fc4a0SMichal Kazior !list_empty(&tin->old_flows)) { 81*557fc4a0SMichal Kazior list_move_tail(&flow->flowchain, &tin->old_flows); 82*557fc4a0SMichal Kazior } else { 83*557fc4a0SMichal Kazior list_del_init(&flow->flowchain); 84*557fc4a0SMichal Kazior flow->tin = NULL; 85*557fc4a0SMichal Kazior } 86*557fc4a0SMichal Kazior goto begin; 87*557fc4a0SMichal Kazior } 88*557fc4a0SMichal Kazior 89*557fc4a0SMichal Kazior flow->deficit -= skb->len; 90*557fc4a0SMichal Kazior tin->tx_bytes += skb->len; 91*557fc4a0SMichal Kazior tin->tx_packets++; 92*557fc4a0SMichal Kazior 93*557fc4a0SMichal Kazior return skb; 94*557fc4a0SMichal Kazior } 95*557fc4a0SMichal Kazior 96*557fc4a0SMichal Kazior static struct fq_flow *fq_flow_classify(struct fq *fq, 97*557fc4a0SMichal Kazior struct fq_tin *tin, 98*557fc4a0SMichal Kazior struct sk_buff *skb, 99*557fc4a0SMichal Kazior fq_flow_get_default_t get_default_func) 100*557fc4a0SMichal Kazior { 101*557fc4a0SMichal Kazior struct fq_flow *flow; 102*557fc4a0SMichal Kazior u32 hash; 103*557fc4a0SMichal Kazior u32 idx; 104*557fc4a0SMichal Kazior 105*557fc4a0SMichal Kazior lockdep_assert_held(&fq->lock); 106*557fc4a0SMichal Kazior 107*557fc4a0SMichal Kazior hash = skb_get_hash_perturb(skb, fq->perturbation); 108*557fc4a0SMichal Kazior idx = reciprocal_scale(hash, fq->flows_cnt); 109*557fc4a0SMichal Kazior flow = &fq->flows[idx]; 110*557fc4a0SMichal Kazior 111*557fc4a0SMichal Kazior if (flow->tin && flow->tin != tin) { 112*557fc4a0SMichal Kazior flow = get_default_func(fq, tin, idx, skb); 113*557fc4a0SMichal Kazior tin->collisions++; 114*557fc4a0SMichal Kazior fq->collisions++; 115*557fc4a0SMichal Kazior } 116*557fc4a0SMichal Kazior 117*557fc4a0SMichal Kazior if (!flow->tin) 118*557fc4a0SMichal Kazior tin->flows++; 119*557fc4a0SMichal Kazior 120*557fc4a0SMichal Kazior return flow; 121*557fc4a0SMichal Kazior } 122*557fc4a0SMichal Kazior 123*557fc4a0SMichal Kazior static void fq_tin_enqueue(struct fq *fq, 124*557fc4a0SMichal Kazior struct fq_tin *tin, 125*557fc4a0SMichal Kazior struct sk_buff *skb, 126*557fc4a0SMichal Kazior fq_skb_free_t free_func, 127*557fc4a0SMichal Kazior fq_flow_get_default_t get_default_func) 128*557fc4a0SMichal Kazior { 129*557fc4a0SMichal Kazior struct fq_flow *flow; 130*557fc4a0SMichal Kazior struct fq_flow *i; 131*557fc4a0SMichal Kazior 132*557fc4a0SMichal Kazior lockdep_assert_held(&fq->lock); 133*557fc4a0SMichal Kazior 134*557fc4a0SMichal Kazior flow = fq_flow_classify(fq, tin, skb, get_default_func); 135*557fc4a0SMichal Kazior 136*557fc4a0SMichal Kazior flow->tin = tin; 137*557fc4a0SMichal Kazior flow->backlog += skb->len; 138*557fc4a0SMichal Kazior tin->backlog_bytes += skb->len; 139*557fc4a0SMichal Kazior tin->backlog_packets++; 140*557fc4a0SMichal Kazior fq->backlog++; 141*557fc4a0SMichal Kazior 142*557fc4a0SMichal Kazior if (list_empty(&flow->backlogchain)) 143*557fc4a0SMichal Kazior list_add_tail(&flow->backlogchain, &fq->backlogs); 144*557fc4a0SMichal Kazior 145*557fc4a0SMichal Kazior i = flow; 146*557fc4a0SMichal Kazior list_for_each_entry_continue_reverse(i, &fq->backlogs, 147*557fc4a0SMichal Kazior backlogchain) 148*557fc4a0SMichal Kazior if (i->backlog > flow->backlog) 149*557fc4a0SMichal Kazior break; 150*557fc4a0SMichal Kazior 151*557fc4a0SMichal Kazior list_move(&flow->backlogchain, &i->backlogchain); 152*557fc4a0SMichal Kazior 153*557fc4a0SMichal Kazior if (list_empty(&flow->flowchain)) { 154*557fc4a0SMichal Kazior flow->deficit = fq->quantum; 155*557fc4a0SMichal Kazior list_add_tail(&flow->flowchain, 156*557fc4a0SMichal Kazior &tin->new_flows); 157*557fc4a0SMichal Kazior } 158*557fc4a0SMichal Kazior 159*557fc4a0SMichal Kazior __skb_queue_tail(&flow->queue, skb); 160*557fc4a0SMichal Kazior 161*557fc4a0SMichal Kazior if (fq->backlog > fq->limit) { 162*557fc4a0SMichal Kazior flow = list_first_entry_or_null(&fq->backlogs, 163*557fc4a0SMichal Kazior struct fq_flow, 164*557fc4a0SMichal Kazior backlogchain); 165*557fc4a0SMichal Kazior if (!flow) 166*557fc4a0SMichal Kazior return; 167*557fc4a0SMichal Kazior 168*557fc4a0SMichal Kazior skb = fq_flow_dequeue(fq, flow); 169*557fc4a0SMichal Kazior if (!skb) 170*557fc4a0SMichal Kazior return; 171*557fc4a0SMichal Kazior 172*557fc4a0SMichal Kazior free_func(fq, flow->tin, flow, skb); 173*557fc4a0SMichal Kazior 174*557fc4a0SMichal Kazior flow->tin->overlimit++; 175*557fc4a0SMichal Kazior fq->overlimit++; 176*557fc4a0SMichal Kazior } 177*557fc4a0SMichal Kazior } 178*557fc4a0SMichal Kazior 179*557fc4a0SMichal Kazior static void fq_flow_reset(struct fq *fq, 180*557fc4a0SMichal Kazior struct fq_flow *flow, 181*557fc4a0SMichal Kazior fq_skb_free_t free_func) 182*557fc4a0SMichal Kazior { 183*557fc4a0SMichal Kazior struct sk_buff *skb; 184*557fc4a0SMichal Kazior 185*557fc4a0SMichal Kazior while ((skb = fq_flow_dequeue(fq, flow))) 186*557fc4a0SMichal Kazior free_func(fq, flow->tin, flow, skb); 187*557fc4a0SMichal Kazior 188*557fc4a0SMichal Kazior if (!list_empty(&flow->flowchain)) 189*557fc4a0SMichal Kazior list_del_init(&flow->flowchain); 190*557fc4a0SMichal Kazior 191*557fc4a0SMichal Kazior if (!list_empty(&flow->backlogchain)) 192*557fc4a0SMichal Kazior list_del_init(&flow->backlogchain); 193*557fc4a0SMichal Kazior 194*557fc4a0SMichal Kazior flow->tin = NULL; 195*557fc4a0SMichal Kazior 196*557fc4a0SMichal Kazior WARN_ON_ONCE(flow->backlog); 197*557fc4a0SMichal Kazior } 198*557fc4a0SMichal Kazior 199*557fc4a0SMichal Kazior static void fq_tin_reset(struct fq *fq, 200*557fc4a0SMichal Kazior struct fq_tin *tin, 201*557fc4a0SMichal Kazior fq_skb_free_t free_func) 202*557fc4a0SMichal Kazior { 203*557fc4a0SMichal Kazior struct list_head *head; 204*557fc4a0SMichal Kazior struct fq_flow *flow; 205*557fc4a0SMichal Kazior 206*557fc4a0SMichal Kazior for (;;) { 207*557fc4a0SMichal Kazior head = &tin->new_flows; 208*557fc4a0SMichal Kazior if (list_empty(head)) { 209*557fc4a0SMichal Kazior head = &tin->old_flows; 210*557fc4a0SMichal Kazior if (list_empty(head)) 211*557fc4a0SMichal Kazior break; 212*557fc4a0SMichal Kazior } 213*557fc4a0SMichal Kazior 214*557fc4a0SMichal Kazior flow = list_first_entry(head, struct fq_flow, flowchain); 215*557fc4a0SMichal Kazior fq_flow_reset(fq, flow, free_func); 216*557fc4a0SMichal Kazior } 217*557fc4a0SMichal Kazior 218*557fc4a0SMichal Kazior WARN_ON_ONCE(tin->backlog_bytes); 219*557fc4a0SMichal Kazior WARN_ON_ONCE(tin->backlog_packets); 220*557fc4a0SMichal Kazior } 221*557fc4a0SMichal Kazior 222*557fc4a0SMichal Kazior static void fq_flow_init(struct fq_flow *flow) 223*557fc4a0SMichal Kazior { 224*557fc4a0SMichal Kazior INIT_LIST_HEAD(&flow->flowchain); 225*557fc4a0SMichal Kazior INIT_LIST_HEAD(&flow->backlogchain); 226*557fc4a0SMichal Kazior __skb_queue_head_init(&flow->queue); 227*557fc4a0SMichal Kazior } 228*557fc4a0SMichal Kazior 229*557fc4a0SMichal Kazior static void fq_tin_init(struct fq_tin *tin) 230*557fc4a0SMichal Kazior { 231*557fc4a0SMichal Kazior INIT_LIST_HEAD(&tin->new_flows); 232*557fc4a0SMichal Kazior INIT_LIST_HEAD(&tin->old_flows); 233*557fc4a0SMichal Kazior } 234*557fc4a0SMichal Kazior 235*557fc4a0SMichal Kazior static int fq_init(struct fq *fq, int flows_cnt) 236*557fc4a0SMichal Kazior { 237*557fc4a0SMichal Kazior int i; 238*557fc4a0SMichal Kazior 239*557fc4a0SMichal Kazior memset(fq, 0, sizeof(fq[0])); 240*557fc4a0SMichal Kazior INIT_LIST_HEAD(&fq->backlogs); 241*557fc4a0SMichal Kazior spin_lock_init(&fq->lock); 242*557fc4a0SMichal Kazior fq->flows_cnt = max_t(u32, flows_cnt, 1); 243*557fc4a0SMichal Kazior fq->perturbation = prandom_u32(); 244*557fc4a0SMichal Kazior fq->quantum = 300; 245*557fc4a0SMichal Kazior fq->limit = 8192; 246*557fc4a0SMichal Kazior 247*557fc4a0SMichal Kazior fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); 248*557fc4a0SMichal Kazior if (!fq->flows) 249*557fc4a0SMichal Kazior return -ENOMEM; 250*557fc4a0SMichal Kazior 251*557fc4a0SMichal Kazior for (i = 0; i < fq->flows_cnt; i++) 252*557fc4a0SMichal Kazior fq_flow_init(&fq->flows[i]); 253*557fc4a0SMichal Kazior 254*557fc4a0SMichal Kazior return 0; 255*557fc4a0SMichal Kazior } 256*557fc4a0SMichal Kazior 257*557fc4a0SMichal Kazior static void fq_reset(struct fq *fq, 258*557fc4a0SMichal Kazior fq_skb_free_t free_func) 259*557fc4a0SMichal Kazior { 260*557fc4a0SMichal Kazior int i; 261*557fc4a0SMichal Kazior 262*557fc4a0SMichal Kazior for (i = 0; i < fq->flows_cnt; i++) 263*557fc4a0SMichal Kazior fq_flow_reset(fq, &fq->flows[i], free_func); 264*557fc4a0SMichal Kazior 265*557fc4a0SMichal Kazior kfree(fq->flows); 266*557fc4a0SMichal Kazior fq->flows = NULL; 267*557fc4a0SMichal Kazior } 268*557fc4a0SMichal Kazior 269*557fc4a0SMichal Kazior #endif 270