1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This file is based on code from OCTEON SDK by Cavium Networks. 4 * 5 * Copyright (c) 2003-2010 Cavium Networks 6 */ 7 8 #include <linux/module.h> 9 #include <linux/kernel.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ip.h> 13 #include <linux/ratelimit.h> 14 #include <linux/string.h> 15 #include <linux/interrupt.h> 16 #include <net/dst.h> 17 #ifdef CONFIG_XFRM 18 #include <linux/xfrm.h> 19 #include <net/xfrm.h> 20 #endif /* CONFIG_XFRM */ 21 22 #include <linux/atomic.h> 23 #include <net/sch_generic.h> 24 25 #include <asm/octeon/octeon.h> 26 27 #include "ethernet-defines.h" 28 #include "octeon-ethernet.h" 29 #include "ethernet-tx.h" 30 #include "ethernet-util.h" 31 32 #include <asm/octeon/cvmx-wqe.h> 33 #include <asm/octeon/cvmx-fau.h> 34 #include <asm/octeon/cvmx-pip.h> 35 #include <asm/octeon/cvmx-pko.h> 36 #include <asm/octeon/cvmx-helper.h> 37 38 #include <asm/octeon/cvmx-gmxx-defs.h> 39 40 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) 41 42 /* 43 * You can define GET_SKBUFF_QOS() to override how the skbuff output 44 * function determines which output queue is used. The default 45 * implementation always uses the base queue for the port. If, for 46 * example, you wanted to use the skb->priority field, define 47 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) 48 */ 49 #ifndef GET_SKBUFF_QOS 50 #define GET_SKBUFF_QOS(skb) 0 51 #endif 52 53 static void cvm_oct_tx_do_cleanup(unsigned long arg); 54 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); 55 56 /* Maximum number of SKBs to try to free per xmit packet. */ 57 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) 58 59 static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau) 60 { 61 int undo; 62 63 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + 64 MAX_SKB_TO_FREE; 65 if (undo > 0) 66 cvmx_fau_atomic_add32(fau, -undo); 67 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : 68 -skb_to_free; 69 return skb_to_free; 70 } 71 72 static void cvm_oct_kick_tx_poll_watchdog(void) 73 { 74 union cvmx_ciu_timx ciu_timx; 75 76 ciu_timx.u64 = 0; 77 ciu_timx.s.one_shot = 1; 78 ciu_timx.s.len = cvm_oct_tx_poll_interval; 79 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); 80 } 81 82 static void cvm_oct_free_tx_skbs(struct net_device *dev) 83 { 84 int skb_to_free; 85 int qos, queues_per_port; 86 int total_freed = 0; 87 int total_remaining = 0; 88 unsigned long flags; 89 struct octeon_ethernet *priv = netdev_priv(dev); 90 91 queues_per_port = cvmx_pko_get_num_queues(priv->port); 92 /* Drain any pending packets in the free list */ 93 for (qos = 0; qos < queues_per_port; qos++) { 94 if (skb_queue_len(&priv->tx_free_list[qos]) == 0) 95 continue; 96 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 97 MAX_SKB_TO_FREE); 98 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, 99 priv->fau + qos * 4); 100 total_freed += skb_to_free; 101 if (skb_to_free > 0) { 102 struct sk_buff *to_free_list = NULL; 103 104 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 105 while (skb_to_free > 0) { 106 struct sk_buff *t; 107 108 t = __skb_dequeue(&priv->tx_free_list[qos]); 109 t->next = to_free_list; 110 to_free_list = t; 111 skb_to_free--; 112 } 113 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, 114 flags); 115 /* Do the actual freeing outside of the lock. */ 116 while (to_free_list) { 117 struct sk_buff *t = to_free_list; 118 119 to_free_list = to_free_list->next; 120 dev_kfree_skb_any(t); 121 } 122 } 123 total_remaining += skb_queue_len(&priv->tx_free_list[qos]); 124 } 125 if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev)) 126 netif_wake_queue(dev); 127 if (total_remaining) 128 cvm_oct_kick_tx_poll_watchdog(); 129 } 130 131 /** 132 * cvm_oct_xmit - transmit a packet 133 * @skb: Packet to send 134 * @dev: Device info structure 135 * 136 * Returns Always returns NETDEV_TX_OK 137 */ 138 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) 139 { 140 cvmx_pko_command_word0_t pko_command; 141 union cvmx_buf_ptr hw_buffer; 142 u64 old_scratch; 143 u64 old_scratch2; 144 int qos; 145 int i; 146 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; 147 struct octeon_ethernet *priv = netdev_priv(dev); 148 struct sk_buff *to_free_list; 149 int skb_to_free; 150 int buffers_to_free; 151 u32 total_to_clean; 152 unsigned long flags; 153 #if REUSE_SKBUFFS_WITHOUT_FREE 154 unsigned char *fpa_head; 155 #endif 156 157 /* 158 * Prefetch the private data structure. It is larger than the 159 * one cache line. 160 */ 161 prefetch(priv); 162 163 /* 164 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to 165 * completely remove "qos" in the event neither interface 166 * supports multiple queues per port. 167 */ 168 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || 169 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { 170 qos = GET_SKBUFF_QOS(skb); 171 if (qos <= 0) 172 qos = 0; 173 else if (qos >= cvmx_pko_get_num_queues(priv->port)) 174 qos = 0; 175 } else { 176 qos = 0; 177 } 178 179 if (USE_ASYNC_IOBDMA) { 180 /* Save scratch in case userspace is using it */ 181 CVMX_SYNCIOBDMA; 182 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 183 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); 184 185 /* 186 * Fetch and increment the number of packets to be 187 * freed. 188 */ 189 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, 190 FAU_NUM_PACKET_BUFFERS_TO_FREE, 191 0); 192 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, 193 priv->fau + qos * 4, 194 MAX_SKB_TO_FREE); 195 } 196 197 /* 198 * We have space for 6 segment pointers, If there will be more 199 * than that, we must linearize. 200 */ 201 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { 202 if (unlikely(__skb_linearize(skb))) { 203 queue_type = QUEUE_DROP; 204 if (USE_ASYNC_IOBDMA) { 205 /* 206 * Get the number of skbuffs in use 207 * by the hardware 208 */ 209 CVMX_SYNCIOBDMA; 210 skb_to_free = 211 cvmx_scratch_read64(CVMX_SCR_SCRATCH); 212 } else { 213 /* 214 * Get the number of skbuffs in use 215 * by the hardware 216 */ 217 skb_to_free = cvmx_fau_fetch_and_add32( 218 priv->fau + qos * 4, MAX_SKB_TO_FREE); 219 } 220 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, 221 priv->fau + 222 qos * 4); 223 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 224 goto skip_xmit; 225 } 226 } 227 228 /* 229 * The CN3XXX series of parts has an errata (GMX-401) which 230 * causes the GMX block to hang if a collision occurs towards 231 * the end of a <68 byte packet. As a workaround for this, we 232 * pad packets to be 68 bytes whenever we are in half duplex 233 * mode. We don't handle the case of having a small packet but 234 * no room to add the padding. The kernel should always give 235 * us at least a cache line 236 */ 237 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { 238 union cvmx_gmxx_prtx_cfg gmx_prt_cfg; 239 int interface = INTERFACE(priv->port); 240 int index = INDEX(priv->port); 241 242 if (interface < 2) { 243 /* We only need to pad packet in half duplex mode */ 244 gmx_prt_cfg.u64 = 245 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 246 if (gmx_prt_cfg.s.duplex == 0) { 247 int add_bytes = 64 - skb->len; 248 249 if ((skb_tail_pointer(skb) + add_bytes) <= 250 skb_end_pointer(skb)) 251 __skb_put_zero(skb, add_bytes); 252 } 253 } 254 } 255 256 /* Build the PKO command */ 257 pko_command.u64 = 0; 258 #ifdef __LITTLE_ENDIAN 259 pko_command.s.le = 1; 260 #endif 261 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 262 pko_command.s.segs = 1; 263 pko_command.s.total_bytes = skb->len; 264 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; 265 pko_command.s.subone0 = 1; 266 267 pko_command.s.dontfree = 1; 268 269 /* Build the PKO buffer pointer */ 270 hw_buffer.u64 = 0; 271 if (skb_shinfo(skb)->nr_frags == 0) { 272 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); 273 hw_buffer.s.pool = 0; 274 hw_buffer.s.size = skb->len; 275 } else { 276 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); 277 hw_buffer.s.pool = 0; 278 hw_buffer.s.size = skb_headlen(skb); 279 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; 280 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 281 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; 282 283 hw_buffer.s.addr = XKPHYS_TO_PHYS( 284 (u64)(page_address(fs->page.p) + 285 fs->page_offset)); 286 hw_buffer.s.size = fs->size; 287 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; 288 } 289 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); 290 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; 291 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; 292 pko_command.s.gather = 1; 293 goto dont_put_skbuff_in_hw; 294 } 295 296 /* 297 * See if we can put this skb in the FPA pool. Any strange 298 * behavior from the Linux networking stack will most likely 299 * be caused by a bug in the following code. If some field is 300 * in use by the network stack and gets carried over when a 301 * buffer is reused, bad things may happen. If in doubt and 302 * you dont need the absolute best performance, disable the 303 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has 304 * shown a 25% increase in performance under some loads. 305 */ 306 #if REUSE_SKBUFFS_WITHOUT_FREE 307 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); 308 if (unlikely(skb->data < fpa_head)) { 309 /* TX buffer beginning can't meet FPA alignment constraints */ 310 goto dont_put_skbuff_in_hw; 311 } 312 if (unlikely 313 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { 314 /* TX buffer isn't large enough for the FPA */ 315 goto dont_put_skbuff_in_hw; 316 } 317 if (unlikely(skb_shared(skb))) { 318 /* TX buffer sharing data with someone else */ 319 goto dont_put_skbuff_in_hw; 320 } 321 if (unlikely(skb_cloned(skb))) { 322 /* TX buffer has been cloned */ 323 goto dont_put_skbuff_in_hw; 324 } 325 if (unlikely(skb_header_cloned(skb))) { 326 /* TX buffer header has been cloned */ 327 goto dont_put_skbuff_in_hw; 328 } 329 if (unlikely(skb->destructor)) { 330 /* TX buffer has a destructor */ 331 goto dont_put_skbuff_in_hw; 332 } 333 if (unlikely(skb_shinfo(skb)->nr_frags)) { 334 /* TX buffer has fragments */ 335 goto dont_put_skbuff_in_hw; 336 } 337 if (unlikely 338 (skb->truesize != 339 sizeof(*skb) + skb_end_offset(skb))) { 340 /* TX buffer truesize has been changed */ 341 goto dont_put_skbuff_in_hw; 342 } 343 344 /* 345 * We can use this buffer in the FPA. We don't need the FAU 346 * update anymore 347 */ 348 pko_command.s.dontfree = 0; 349 350 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - 351 ((unsigned long)fpa_head >> 7); 352 353 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; 354 355 /* 356 * The skbuff will be reused without ever being freed. We must 357 * cleanup a bunch of core things. 358 */ 359 dst_release(skb_dst(skb)); 360 skb_dst_set(skb, NULL); 361 #ifdef CONFIG_XFRM 362 secpath_put(skb->sp); 363 skb->sp = NULL; 364 #endif 365 nf_reset(skb); 366 367 #ifdef CONFIG_NET_SCHED 368 skb->tc_index = 0; 369 skb_reset_tc(skb); 370 #endif /* CONFIG_NET_SCHED */ 371 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ 372 373 dont_put_skbuff_in_hw: 374 375 /* Check if we can use the hardware checksumming */ 376 if ((skb->protocol == htons(ETH_P_IP)) && 377 (ip_hdr(skb)->version == 4) && 378 (ip_hdr(skb)->ihl == 5) && 379 ((ip_hdr(skb)->frag_off == 0) || 380 (ip_hdr(skb)->frag_off == htons(1 << 14))) && 381 ((ip_hdr(skb)->protocol == IPPROTO_TCP) || 382 (ip_hdr(skb)->protocol == IPPROTO_UDP))) { 383 /* Use hardware checksum calc */ 384 pko_command.s.ipoffp1 = skb_network_offset(skb) + 1; 385 } 386 387 if (USE_ASYNC_IOBDMA) { 388 /* Get the number of skbuffs in use by the hardware */ 389 CVMX_SYNCIOBDMA; 390 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 391 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); 392 } else { 393 /* Get the number of skbuffs in use by the hardware */ 394 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 395 MAX_SKB_TO_FREE); 396 buffers_to_free = 397 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 398 } 399 400 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, 401 priv->fau + qos * 4); 402 403 /* 404 * If we're sending faster than the receive can free them then 405 * don't do the HW free. 406 */ 407 if ((buffers_to_free < -100) && !pko_command.s.dontfree) 408 pko_command.s.dontfree = 1; 409 410 if (pko_command.s.dontfree) { 411 queue_type = QUEUE_CORE; 412 pko_command.s.reg0 = priv->fau + qos * 4; 413 } else { 414 queue_type = QUEUE_HW; 415 } 416 if (USE_ASYNC_IOBDMA) 417 cvmx_fau_async_fetch_and_add32( 418 CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); 419 420 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 421 422 /* Drop this packet if we have too many already queued to the HW */ 423 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= 424 MAX_OUT_QUEUE_DEPTH)) { 425 if (dev->tx_queue_len != 0) { 426 /* Drop the lock when notifying the core. */ 427 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, 428 flags); 429 netif_stop_queue(dev); 430 spin_lock_irqsave(&priv->tx_free_list[qos].lock, 431 flags); 432 } else { 433 /* If not using normal queueing. */ 434 queue_type = QUEUE_DROP; 435 goto skip_xmit; 436 } 437 } 438 439 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, 440 CVMX_PKO_LOCK_NONE); 441 442 /* Send the packet to the output queue */ 443 if (unlikely(cvmx_pko_send_packet_finish(priv->port, 444 priv->queue + qos, 445 pko_command, hw_buffer, 446 CVMX_PKO_LOCK_NONE))) { 447 printk_ratelimited("%s: Failed to send the packet\n", 448 dev->name); 449 queue_type = QUEUE_DROP; 450 } 451 skip_xmit: 452 to_free_list = NULL; 453 454 switch (queue_type) { 455 case QUEUE_DROP: 456 skb->next = to_free_list; 457 to_free_list = skb; 458 dev->stats.tx_dropped++; 459 break; 460 case QUEUE_HW: 461 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); 462 break; 463 case QUEUE_CORE: 464 __skb_queue_tail(&priv->tx_free_list[qos], skb); 465 break; 466 default: 467 BUG(); 468 } 469 470 while (skb_to_free > 0) { 471 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); 472 473 t->next = to_free_list; 474 to_free_list = t; 475 skb_to_free--; 476 } 477 478 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 479 480 /* Do the actual freeing outside of the lock. */ 481 while (to_free_list) { 482 struct sk_buff *t = to_free_list; 483 484 to_free_list = to_free_list->next; 485 dev_kfree_skb_any(t); 486 } 487 488 if (USE_ASYNC_IOBDMA) { 489 CVMX_SYNCIOBDMA; 490 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 491 /* Restore the scratch area */ 492 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 493 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 494 } else { 495 total_to_clean = cvmx_fau_fetch_and_add32( 496 FAU_TOTAL_TX_TO_CLEAN, 1); 497 } 498 499 if (total_to_clean & 0x3ff) { 500 /* 501 * Schedule the cleanup tasklet every 1024 packets for 502 * the pathological case of high traffic on one port 503 * delaying clean up of packets on a different port 504 * that is blocked waiting for the cleanup. 505 */ 506 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); 507 } 508 509 cvm_oct_kick_tx_poll_watchdog(); 510 511 return NETDEV_TX_OK; 512 } 513 514 /** 515 * cvm_oct_xmit_pow - transmit a packet to the POW 516 * @skb: Packet to send 517 * @dev: Device info structure 518 519 * Returns Always returns zero 520 */ 521 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) 522 { 523 struct octeon_ethernet *priv = netdev_priv(dev); 524 void *packet_buffer; 525 void *copy_location; 526 527 /* Get a work queue entry */ 528 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); 529 530 if (unlikely(!work)) { 531 printk_ratelimited("%s: Failed to allocate a work queue entry\n", 532 dev->name); 533 dev->stats.tx_dropped++; 534 dev_kfree_skb_any(skb); 535 return 0; 536 } 537 538 /* Get a packet buffer */ 539 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); 540 if (unlikely(!packet_buffer)) { 541 printk_ratelimited("%s: Failed to allocate a packet buffer\n", 542 dev->name); 543 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); 544 dev->stats.tx_dropped++; 545 dev_kfree_skb_any(skb); 546 return 0; 547 } 548 549 /* 550 * Calculate where we need to copy the data to. We need to 551 * leave 8 bytes for a next pointer (unused). We also need to 552 * include any configure skip. Then we need to align the IP 553 * packet src and dest into the same 64bit word. The below 554 * calculation may add a little extra, but that doesn't 555 * hurt. 556 */ 557 copy_location = packet_buffer + sizeof(u64); 558 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; 559 560 /* 561 * We have to copy the packet since whoever processes this 562 * packet will free it to a hardware pool. We can't use the 563 * trick of counting outstanding packets like in 564 * cvm_oct_xmit. 565 */ 566 memcpy(copy_location, skb->data, skb->len); 567 568 /* 569 * Fill in some of the work queue fields. We may need to add 570 * more if the software at the other end needs them. 571 */ 572 if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) 573 work->word0.pip.cn38xx.hw_chksum = skb->csum; 574 work->word1.len = skb->len; 575 cvmx_wqe_set_port(work, priv->port); 576 cvmx_wqe_set_qos(work, priv->port & 0x7); 577 cvmx_wqe_set_grp(work, pow_send_group); 578 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 579 work->word1.tag = pow_send_group; /* FIXME */ 580 /* Default to zero. Sets of zero later are commented out */ 581 work->word2.u64 = 0; 582 work->word2.s.bufs = 1; 583 work->packet_ptr.u64 = 0; 584 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); 585 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; 586 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; 587 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; 588 589 if (skb->protocol == htons(ETH_P_IP)) { 590 work->word2.s.ip_offset = 14; 591 #if 0 592 work->word2.s.vlan_valid = 0; /* FIXME */ 593 work->word2.s.vlan_cfi = 0; /* FIXME */ 594 work->word2.s.vlan_id = 0; /* FIXME */ 595 work->word2.s.dec_ipcomp = 0; /* FIXME */ 596 #endif 597 work->word2.s.tcp_or_udp = 598 (ip_hdr(skb)->protocol == IPPROTO_TCP) || 599 (ip_hdr(skb)->protocol == IPPROTO_UDP); 600 #if 0 601 /* FIXME */ 602 work->word2.s.dec_ipsec = 0; 603 /* We only support IPv4 right now */ 604 work->word2.s.is_v6 = 0; 605 /* Hardware would set to zero */ 606 work->word2.s.software = 0; 607 /* No error, packet is internal */ 608 work->word2.s.L4_error = 0; 609 #endif 610 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || 611 (ip_hdr(skb)->frag_off == 612 1 << 14)); 613 #if 0 614 /* Assume Linux is sending a good packet */ 615 work->word2.s.IP_exc = 0; 616 #endif 617 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); 618 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); 619 #if 0 620 /* This is an IP packet */ 621 work->word2.s.not_IP = 0; 622 /* No error, packet is internal */ 623 work->word2.s.rcv_error = 0; 624 /* No error, packet is internal */ 625 work->word2.s.err_code = 0; 626 #endif 627 628 /* 629 * When copying the data, include 4 bytes of the 630 * ethernet header to align the same way hardware 631 * does. 632 */ 633 memcpy(work->packet_data, skb->data + 10, 634 sizeof(work->packet_data)); 635 } else { 636 #if 0 637 work->word2.snoip.vlan_valid = 0; /* FIXME */ 638 work->word2.snoip.vlan_cfi = 0; /* FIXME */ 639 work->word2.snoip.vlan_id = 0; /* FIXME */ 640 work->word2.snoip.software = 0; /* Hardware would set to zero */ 641 #endif 642 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); 643 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); 644 work->word2.snoip.is_bcast = 645 (skb->pkt_type == PACKET_BROADCAST); 646 work->word2.snoip.is_mcast = 647 (skb->pkt_type == PACKET_MULTICAST); 648 work->word2.snoip.not_IP = 1; /* IP was done up above */ 649 #if 0 650 /* No error, packet is internal */ 651 work->word2.snoip.rcv_error = 0; 652 /* No error, packet is internal */ 653 work->word2.snoip.err_code = 0; 654 #endif 655 memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); 656 } 657 658 /* Submit the packet to the POW */ 659 cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, 660 cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work)); 661 dev->stats.tx_packets++; 662 dev->stats.tx_bytes += skb->len; 663 dev_consume_skb_any(skb); 664 return 0; 665 } 666 667 /** 668 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. 669 * @dev: Device being shutdown 670 * 671 */ 672 void cvm_oct_tx_shutdown_dev(struct net_device *dev) 673 { 674 struct octeon_ethernet *priv = netdev_priv(dev); 675 unsigned long flags; 676 int qos; 677 678 for (qos = 0; qos < 16; qos++) { 679 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 680 while (skb_queue_len(&priv->tx_free_list[qos])) 681 dev_kfree_skb_any(__skb_dequeue 682 (&priv->tx_free_list[qos])); 683 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 684 } 685 } 686 687 static void cvm_oct_tx_do_cleanup(unsigned long arg) 688 { 689 int port; 690 691 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { 692 if (cvm_oct_device[port]) { 693 struct net_device *dev = cvm_oct_device[port]; 694 695 cvm_oct_free_tx_skbs(dev); 696 } 697 } 698 } 699 700 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) 701 { 702 /* Disable the interrupt. */ 703 cvmx_write_csr(CVMX_CIU_TIMX(1), 0); 704 /* Do the work in the tasklet. */ 705 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); 706 return IRQ_HANDLED; 707 } 708 709 void cvm_oct_tx_initialize(void) 710 { 711 int i; 712 713 /* Disable the interrupt. */ 714 cvmx_write_csr(CVMX_CIU_TIMX(1), 0); 715 /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */ 716 i = request_irq(OCTEON_IRQ_TIMER1, 717 cvm_oct_tx_cleanup_watchdog, 0, 718 "Ethernet", cvm_oct_device); 719 720 if (i) 721 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); 722 } 723 724 void cvm_oct_tx_shutdown(void) 725 { 726 /* Free the interrupt handler */ 727 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); 728 } 729