1 /* Copyright (c) 2014 Broadcom Corporation 2 * 3 * Permission to use, copy, modify, and/or distribute this software for any 4 * purpose with or without fee is hereby granted, provided that the above 5 * copyright notice and this permission notice appear in all copies. 6 * 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 */ 15 16 17 #include <linux/types.h> 18 #include <linux/netdevice.h> 19 #include <linux/etherdevice.h> 20 #include <brcmu_utils.h> 21 22 #include "core.h" 23 #include "debug.h" 24 #include "bus.h" 25 #include "proto.h" 26 #include "flowring.h" 27 #include "msgbuf.h" 28 #include "common.h" 29 30 31 #define BRCMF_FLOWRING_HIGH 1024 32 #define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256) 33 #define BRCMF_FLOWRING_INVALID_IFIDX 0xff 34 35 #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16) 36 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16) 37 38 static const u8 brcmf_flowring_prio2fifo[] = { 39 1, 40 0, 41 0, 42 1, 43 2, 44 2, 45 3, 46 3 47 }; 48 49 static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 50 51 52 static bool 53 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) 54 { 55 struct brcmf_flowring_tdls_entry *search; 56 57 search = flow->tdls_entry; 58 59 while (search) { 60 if (memcmp(search->mac, mac, ETH_ALEN) == 0) 61 return true; 62 search = search->next; 63 } 64 65 return false; 66 } 67 68 69 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 70 u8 prio, u8 ifidx) 71 { 72 struct brcmf_flowring_hash *hash; 73 u16 hash_idx; 74 u32 i; 75 bool found; 76 bool sta; 77 u8 fifo; 78 u8 *mac; 79 80 fifo = brcmf_flowring_prio2fifo[prio]; 81 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); 82 mac = da; 83 if ((!sta) && (is_multicast_ether_addr(da))) { 84 mac = (u8 *)ALLFFMAC; 85 fifo = 0; 86 } 87 if ((sta) && (flow->tdls_active) && 88 (brcmf_flowring_is_tdls_mac(flow, da))) { 89 sta = false; 90 } 91 hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) : 92 BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx); 93 hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); 94 found = false; 95 hash = flow->hash; 96 for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { 97 if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) && 98 (hash[hash_idx].fifo == fifo) && 99 (hash[hash_idx].ifidx == ifidx)) { 100 found = true; 101 break; 102 } 103 hash_idx++; 104 hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); 105 } 106 if (found) 107 return hash[hash_idx].flowid; 108 109 return BRCMF_FLOWRING_INVALID_ID; 110 } 111 112 113 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 114 u8 prio, u8 ifidx) 115 { 116 struct brcmf_flowring_ring *ring; 117 struct brcmf_flowring_hash *hash; 118 u16 hash_idx; 119 u32 i; 120 bool found; 121 u8 fifo; 122 bool sta; 123 u8 *mac; 124 125 fifo = brcmf_flowring_prio2fifo[prio]; 126 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); 127 mac = da; 128 if ((!sta) && (is_multicast_ether_addr(da))) { 129 mac = (u8 *)ALLFFMAC; 130 fifo = 0; 131 } 132 if ((sta) && (flow->tdls_active) && 133 (brcmf_flowring_is_tdls_mac(flow, da))) { 134 sta = false; 135 } 136 hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) : 137 BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx); 138 hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); 139 found = false; 140 hash = flow->hash; 141 for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { 142 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) && 143 (is_zero_ether_addr(hash[hash_idx].mac))) { 144 found = true; 145 break; 146 } 147 hash_idx++; 148 hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); 149 } 150 if (found) { 151 for (i = 0; i < flow->nrofrings; i++) { 152 if (flow->rings[i] == NULL) 153 break; 154 } 155 if (i == flow->nrofrings) 156 return -ENOMEM; 157 158 ring = kzalloc(sizeof(*ring), GFP_ATOMIC); 159 if (!ring) 160 return -ENOMEM; 161 162 memcpy(hash[hash_idx].mac, mac, ETH_ALEN); 163 hash[hash_idx].fifo = fifo; 164 hash[hash_idx].ifidx = ifidx; 165 hash[hash_idx].flowid = i; 166 167 ring->hash_id = hash_idx; 168 ring->status = RING_CLOSED; 169 skb_queue_head_init(&ring->skblist); 170 flow->rings[i] = ring; 171 172 return i; 173 } 174 return BRCMF_FLOWRING_INVALID_ID; 175 } 176 177 178 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid) 179 { 180 struct brcmf_flowring_ring *ring; 181 182 ring = flow->rings[flowid]; 183 184 return flow->hash[ring->hash_id].fifo; 185 } 186 187 188 static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid, 189 bool blocked) 190 { 191 struct brcmf_flowring_ring *ring; 192 struct brcmf_bus *bus_if; 193 struct brcmf_pub *drvr; 194 struct brcmf_if *ifp; 195 bool currently_blocked; 196 int i; 197 u8 ifidx; 198 unsigned long flags; 199 200 spin_lock_irqsave(&flow->block_lock, flags); 201 202 ring = flow->rings[flowid]; 203 if (ring->blocked == blocked) { 204 spin_unlock_irqrestore(&flow->block_lock, flags); 205 return; 206 } 207 ifidx = brcmf_flowring_ifidx_get(flow, flowid); 208 209 currently_blocked = false; 210 for (i = 0; i < flow->nrofrings; i++) { 211 if ((flow->rings[i]) && (i != flowid)) { 212 ring = flow->rings[i]; 213 if ((ring->status == RING_OPEN) && 214 (brcmf_flowring_ifidx_get(flow, i) == ifidx)) { 215 if (ring->blocked) { 216 currently_blocked = true; 217 break; 218 } 219 } 220 } 221 } 222 flow->rings[flowid]->blocked = blocked; 223 if (currently_blocked) { 224 spin_unlock_irqrestore(&flow->block_lock, flags); 225 return; 226 } 227 228 bus_if = dev_get_drvdata(flow->dev); 229 drvr = bus_if->drvr; 230 ifp = brcmf_get_ifp(drvr, ifidx); 231 brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked); 232 233 spin_unlock_irqrestore(&flow->block_lock, flags); 234 } 235 236 237 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid) 238 { 239 struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); 240 struct brcmf_flowring_ring *ring; 241 struct brcmf_if *ifp; 242 u16 hash_idx; 243 u8 ifidx; 244 struct sk_buff *skb; 245 246 ring = flow->rings[flowid]; 247 if (!ring) 248 return; 249 250 ifidx = brcmf_flowring_ifidx_get(flow, flowid); 251 ifp = brcmf_get_ifp(bus_if->drvr, ifidx); 252 253 brcmf_flowring_block(flow, flowid, false); 254 hash_idx = ring->hash_id; 255 flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; 256 eth_zero_addr(flow->hash[hash_idx].mac); 257 flow->rings[flowid] = NULL; 258 259 skb = skb_dequeue(&ring->skblist); 260 while (skb) { 261 brcmf_txfinalize(ifp, skb, false); 262 skb = skb_dequeue(&ring->skblist); 263 } 264 265 kfree(ring); 266 } 267 268 269 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, 270 struct sk_buff *skb) 271 { 272 struct brcmf_flowring_ring *ring; 273 274 ring = flow->rings[flowid]; 275 276 skb_queue_tail(&ring->skblist, skb); 277 278 if (!ring->blocked && 279 (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) { 280 brcmf_flowring_block(flow, flowid, true); 281 brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid); 282 /* To prevent (work around) possible race condition, check 283 * queue len again. It is also possible to use locking to 284 * protect, but that is undesirable for every enqueue and 285 * dequeue. This simple check will solve a possible race 286 * condition if it occurs. 287 */ 288 if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW) 289 brcmf_flowring_block(flow, flowid, false); 290 } 291 return skb_queue_len(&ring->skblist); 292 } 293 294 295 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid) 296 { 297 struct brcmf_flowring_ring *ring; 298 struct sk_buff *skb; 299 300 ring = flow->rings[flowid]; 301 if (ring->status != RING_OPEN) 302 return NULL; 303 304 skb = skb_dequeue(&ring->skblist); 305 306 if (ring->blocked && 307 (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) { 308 brcmf_flowring_block(flow, flowid, false); 309 brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid); 310 } 311 312 return skb; 313 } 314 315 316 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, 317 struct sk_buff *skb) 318 { 319 struct brcmf_flowring_ring *ring; 320 321 ring = flow->rings[flowid]; 322 323 skb_queue_head(&ring->skblist, skb); 324 } 325 326 327 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid) 328 { 329 struct brcmf_flowring_ring *ring; 330 331 ring = flow->rings[flowid]; 332 if (!ring) 333 return 0; 334 335 if (ring->status != RING_OPEN) 336 return 0; 337 338 return skb_queue_len(&ring->skblist); 339 } 340 341 342 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid) 343 { 344 struct brcmf_flowring_ring *ring; 345 346 ring = flow->rings[flowid]; 347 if (!ring) { 348 brcmf_err("Ring NULL, for flowid %d\n", flowid); 349 return; 350 } 351 352 ring->status = RING_OPEN; 353 } 354 355 356 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid) 357 { 358 struct brcmf_flowring_ring *ring; 359 u16 hash_idx; 360 361 ring = flow->rings[flowid]; 362 hash_idx = ring->hash_id; 363 364 return flow->hash[hash_idx].ifidx; 365 } 366 367 368 struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings) 369 { 370 struct brcmf_flowring *flow; 371 u32 i; 372 373 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 374 if (flow) { 375 flow->dev = dev; 376 flow->nrofrings = nrofrings; 377 spin_lock_init(&flow->block_lock); 378 for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++) 379 flow->addr_mode[i] = ADDR_INDIRECT; 380 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) 381 flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; 382 flow->rings = kcalloc(nrofrings, sizeof(*flow->rings), 383 GFP_KERNEL); 384 if (!flow->rings) { 385 kfree(flow); 386 flow = NULL; 387 } 388 } 389 390 return flow; 391 } 392 393 394 void brcmf_flowring_detach(struct brcmf_flowring *flow) 395 { 396 struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); 397 struct brcmf_pub *drvr = bus_if->drvr; 398 struct brcmf_flowring_tdls_entry *search; 399 struct brcmf_flowring_tdls_entry *remove; 400 u16 flowid; 401 402 for (flowid = 0; flowid < flow->nrofrings; flowid++) { 403 if (flow->rings[flowid]) 404 brcmf_msgbuf_delete_flowring(drvr, flowid); 405 } 406 407 search = flow->tdls_entry; 408 while (search) { 409 remove = search; 410 search = search->next; 411 kfree(remove); 412 } 413 kfree(flow->rings); 414 kfree(flow); 415 } 416 417 418 void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx, 419 enum proto_addr_mode addr_mode) 420 { 421 struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); 422 struct brcmf_pub *drvr = bus_if->drvr; 423 u32 i; 424 u16 flowid; 425 426 if (flow->addr_mode[ifidx] != addr_mode) { 427 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) { 428 if (flow->hash[i].ifidx == ifidx) { 429 flowid = flow->hash[i].flowid; 430 if (flow->rings[flowid]->status != RING_OPEN) 431 continue; 432 flow->rings[flowid]->status = RING_CLOSING; 433 brcmf_msgbuf_delete_flowring(drvr, flowid); 434 } 435 } 436 flow->addr_mode[ifidx] = addr_mode; 437 } 438 } 439 440 441 void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx, 442 u8 peer[ETH_ALEN]) 443 { 444 struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); 445 struct brcmf_pub *drvr = bus_if->drvr; 446 struct brcmf_flowring_hash *hash; 447 struct brcmf_flowring_tdls_entry *prev; 448 struct brcmf_flowring_tdls_entry *search; 449 u32 i; 450 u16 flowid; 451 bool sta; 452 453 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); 454 455 search = flow->tdls_entry; 456 prev = NULL; 457 while (search) { 458 if (memcmp(search->mac, peer, ETH_ALEN) == 0) { 459 sta = false; 460 break; 461 } 462 prev = search; 463 search = search->next; 464 } 465 466 hash = flow->hash; 467 for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { 468 if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) && 469 (hash[i].ifidx == ifidx)) { 470 flowid = flow->hash[i].flowid; 471 if (flow->rings[flowid]->status == RING_OPEN) { 472 flow->rings[flowid]->status = RING_CLOSING; 473 brcmf_msgbuf_delete_flowring(drvr, flowid); 474 } 475 } 476 } 477 478 if (search) { 479 if (prev) 480 prev->next = search->next; 481 else 482 flow->tdls_entry = search->next; 483 kfree(search); 484 if (flow->tdls_entry == NULL) 485 flow->tdls_active = false; 486 } 487 } 488 489 490 void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx, 491 u8 peer[ETH_ALEN]) 492 { 493 struct brcmf_flowring_tdls_entry *tdls_entry; 494 struct brcmf_flowring_tdls_entry *search; 495 496 tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC); 497 if (tdls_entry == NULL) 498 return; 499 500 memcpy(tdls_entry->mac, peer, ETH_ALEN); 501 tdls_entry->next = NULL; 502 if (flow->tdls_entry == NULL) { 503 flow->tdls_entry = tdls_entry; 504 } else { 505 search = flow->tdls_entry; 506 if (memcmp(search->mac, peer, ETH_ALEN) == 0) 507 goto free_entry; 508 while (search->next) { 509 search = search->next; 510 if (memcmp(search->mac, peer, ETH_ALEN) == 0) 511 goto free_entry; 512 } 513 search->next = tdls_entry; 514 } 515 516 flow->tdls_active = true; 517 return; 518 519 free_entry: 520 kfree(tdls_entry); 521 } 522