1 #include <linux/netdevice.h> 2 #include <linux/if_vlan.h> 3 #include <net/ip.h> 4 #include <linux/ipv6.h> 5 6 #include "qlcnic.h" 7 8 #define QLCNIC_MAC_HASH(MAC)\ 9 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25)) 10 11 #define TX_ETHER_PKT 0x01 12 #define TX_TCP_PKT 0x02 13 #define TX_UDP_PKT 0x03 14 #define TX_IP_PKT 0x04 15 #define TX_TCP_LSO 0x05 16 #define TX_TCP_LSO6 0x06 17 #define TX_TCPV6_PKT 0x0b 18 #define TX_UDPV6_PKT 0x0c 19 #define FLAGS_VLAN_TAGGED 0x10 20 #define FLAGS_VLAN_OOB 0x40 21 22 #define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ 23 (cmd_desc)->vlan_TCI = cpu_to_le16(v); 24 #define qlcnic_set_cmd_desc_port(cmd_desc, var) \ 25 ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) 26 #define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \ 27 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) 28 29 #define qlcnic_set_tx_port(_desc, _port) \ 30 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)) 31 32 #define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \ 33 ((_desc)->flags_opcode |= \ 34 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))) 35 36 #define qlcnic_set_tx_frags_len(_desc, _frags, _len) \ 37 ((_desc)->nfrags__length = \ 38 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))) 39 40 /* owner bits of status_desc */ 41 #define STATUS_OWNER_HOST (0x1ULL << 56) 42 #define STATUS_OWNER_PHANTOM (0x2ULL << 56) 43 44 /* Status descriptor: 45 0-3 port, 4-7 status, 8-11 type, 12-27 total_length 46 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset 47 53-55 desc_cnt, 56-57 owner, 58-63 opcode 48 */ 49 #define qlcnic_get_sts_port(sts_data) \ 50 ((sts_data) & 0x0F) 51 #define qlcnic_get_sts_status(sts_data) \ 52 (((sts_data) >> 4) & 0x0F) 53 #define qlcnic_get_sts_type(sts_data) \ 54 (((sts_data) >> 8) & 0x0F) 55 #define qlcnic_get_sts_totallength(sts_data) \ 56 (((sts_data) >> 12) & 0xFFFF) 57 #define qlcnic_get_sts_refhandle(sts_data) \ 58 (((sts_data) >> 28) & 0xFFFF) 59 #define qlcnic_get_sts_prot(sts_data) \ 60 (((sts_data) >> 44) & 0x0F) 61 #define qlcnic_get_sts_pkt_offset(sts_data) \ 62 (((sts_data) >> 48) & 0x1F) 63 #define qlcnic_get_sts_desc_cnt(sts_data) \ 64 (((sts_data) >> 53) & 0x7) 65 #define qlcnic_get_sts_opcode(sts_data) \ 66 (((sts_data) >> 58) & 0x03F) 67 68 #define qlcnic_get_lro_sts_refhandle(sts_data) \ 69 ((sts_data) & 0x07FFF) 70 #define qlcnic_get_lro_sts_length(sts_data) \ 71 (((sts_data) >> 16) & 0x0FFFF) 72 #define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \ 73 (((sts_data) >> 32) & 0x0FF) 74 #define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \ 75 (((sts_data) >> 40) & 0x0FF) 76 #define qlcnic_get_lro_sts_timestamp(sts_data) \ 77 (((sts_data) >> 48) & 0x1) 78 #define qlcnic_get_lro_sts_type(sts_data) \ 79 (((sts_data) >> 49) & 0x7) 80 #define qlcnic_get_lro_sts_push_flag(sts_data) \ 81 (((sts_data) >> 52) & 0x1) 82 #define qlcnic_get_lro_sts_seq_number(sts_data) \ 83 ((sts_data) & 0x0FFFFFFFF) 84 #define qlcnic_get_lro_sts_mss(sts_data1) \ 85 ((sts_data1 >> 32) & 0x0FFFF) 86 87 /* opcode field in status_desc */ 88 #define QLCNIC_SYN_OFFLOAD 0x03 89 #define QLCNIC_RXPKT_DESC 0x04 90 #define QLCNIC_OLD_RXPKT_DESC 0x3f 91 #define QLCNIC_RESPONSE_DESC 0x05 92 #define QLCNIC_LRO_DESC 0x12 93 94 /* for status field in status_desc */ 95 #define STATUS_CKSUM_LOOP 0 96 #define STATUS_CKSUM_OK 2 97 98 static void qlcnic_change_filter(struct qlcnic_adapter *adapter, 99 u64 uaddr, __le16 vlan_id, 100 struct qlcnic_host_tx_ring *tx_ring) 101 { 102 struct cmd_desc_type0 *hwdesc; 103 struct qlcnic_nic_req *req; 104 struct qlcnic_mac_req *mac_req; 105 struct qlcnic_vlan_req *vlan_req; 106 u32 producer; 107 u64 word; 108 109 producer = tx_ring->producer; 110 hwdesc = &tx_ring->desc_head[tx_ring->producer]; 111 112 req = (struct qlcnic_nic_req *)hwdesc; 113 memset(req, 0, sizeof(struct qlcnic_nic_req)); 114 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23); 115 116 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16); 117 req->req_hdr = cpu_to_le64(word); 118 119 mac_req = (struct qlcnic_mac_req *)&(req->words[0]); 120 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD; 121 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN); 122 123 vlan_req = (struct qlcnic_vlan_req *)&req->words[1]; 124 vlan_req->vlan_id = vlan_id; 125 126 tx_ring->producer = get_next_index(producer, tx_ring->num_desc); 127 smp_mb(); 128 } 129 130 static void qlcnic_send_filter(struct qlcnic_adapter *adapter, 131 struct qlcnic_host_tx_ring *tx_ring, 132 struct cmd_desc_type0 *first_desc, 133 struct sk_buff *skb) 134 { 135 struct ethhdr *phdr = (struct ethhdr *)(skb->data); 136 struct qlcnic_filter *fil, *tmp_fil; 137 struct hlist_node *tmp_hnode, *n; 138 struct hlist_head *head; 139 u64 src_addr = 0; 140 __le16 vlan_id = 0; 141 u8 hindex; 142 143 if (ether_addr_equal(phdr->h_source, adapter->mac_addr)) 144 return; 145 146 if (adapter->fhash.fnum >= adapter->fhash.fmax) 147 return; 148 149 /* Only NPAR capable devices support vlan based learning*/ 150 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 151 vlan_id = first_desc->vlan_TCI; 152 memcpy(&src_addr, phdr->h_source, ETH_ALEN); 153 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1); 154 head = &(adapter->fhash.fhead[hindex]); 155 156 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) { 157 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) && 158 tmp_fil->vlan_id == vlan_id) { 159 160 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime)) 161 qlcnic_change_filter(adapter, src_addr, vlan_id, 162 tx_ring); 163 tmp_fil->ftime = jiffies; 164 return; 165 } 166 } 167 168 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC); 169 if (!fil) 170 return; 171 172 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring); 173 174 fil->ftime = jiffies; 175 fil->vlan_id = vlan_id; 176 memcpy(fil->faddr, &src_addr, ETH_ALEN); 177 178 spin_lock(&adapter->mac_learn_lock); 179 180 hlist_add_head(&(fil->fnode), head); 181 adapter->fhash.fnum++; 182 183 spin_unlock(&adapter->mac_learn_lock); 184 } 185 186 static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, 187 struct cmd_desc_type0 *first_desc, struct sk_buff *skb) 188 { 189 u8 l4proto, opcode = 0, hdr_len = 0; 190 u16 flags = 0, vlan_tci = 0; 191 int copied, offset, copy_len, size; 192 struct cmd_desc_type0 *hwdesc; 193 struct vlan_ethhdr *vh; 194 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 195 u16 protocol = ntohs(skb->protocol); 196 u32 producer = tx_ring->producer; 197 198 if (protocol == ETH_P_8021Q) { 199 vh = (struct vlan_ethhdr *)skb->data; 200 flags = FLAGS_VLAN_TAGGED; 201 vlan_tci = ntohs(vh->h_vlan_TCI); 202 protocol = ntohs(vh->h_vlan_encapsulated_proto); 203 } else if (vlan_tx_tag_present(skb)) { 204 flags = FLAGS_VLAN_OOB; 205 vlan_tci = vlan_tx_tag_get(skb); 206 } 207 if (unlikely(adapter->pvid)) { 208 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) 209 return -EIO; 210 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 211 goto set_flags; 212 213 flags = FLAGS_VLAN_OOB; 214 vlan_tci = adapter->pvid; 215 } 216 set_flags: 217 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); 218 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); 219 220 if (*(skb->data) & BIT_0) { 221 flags |= BIT_0; 222 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); 223 } 224 opcode = TX_ETHER_PKT; 225 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 226 skb_shinfo(skb)->gso_size > 0) { 227 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 228 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 229 first_desc->total_hdr_length = hdr_len; 230 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; 231 232 /* For LSO, we need to copy the MAC/IP/TCP headers into 233 * the descriptor ring */ 234 copied = 0; 235 offset = 2; 236 237 if (flags & FLAGS_VLAN_OOB) { 238 first_desc->total_hdr_length += VLAN_HLEN; 239 first_desc->tcp_hdr_offset = VLAN_HLEN; 240 first_desc->ip_hdr_offset = VLAN_HLEN; 241 242 /* Only in case of TSO on vlan device */ 243 flags |= FLAGS_VLAN_TAGGED; 244 245 /* Create a TSO vlan header template for firmware */ 246 hwdesc = &tx_ring->desc_head[producer]; 247 tx_ring->cmd_buf_arr[producer].skb = NULL; 248 249 copy_len = min((int)sizeof(struct cmd_desc_type0) - 250 offset, hdr_len + VLAN_HLEN); 251 252 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); 253 skb_copy_from_linear_data(skb, vh, 12); 254 vh->h_vlan_proto = htons(ETH_P_8021Q); 255 vh->h_vlan_TCI = htons(vlan_tci); 256 257 skb_copy_from_linear_data_offset(skb, 12, 258 (char *)vh + 16, 259 copy_len - 16); 260 copied = copy_len - VLAN_HLEN; 261 offset = 0; 262 producer = get_next_index(producer, tx_ring->num_desc); 263 } 264 265 while (copied < hdr_len) { 266 size = (int)sizeof(struct cmd_desc_type0) - offset; 267 copy_len = min(size, (hdr_len - copied)); 268 hwdesc = &tx_ring->desc_head[producer]; 269 tx_ring->cmd_buf_arr[producer].skb = NULL; 270 skb_copy_from_linear_data_offset(skb, copied, 271 (char *)hwdesc + 272 offset, copy_len); 273 copied += copy_len; 274 offset = 0; 275 producer = get_next_index(producer, tx_ring->num_desc); 276 } 277 278 tx_ring->producer = producer; 279 smp_mb(); 280 adapter->stats.lso_frames++; 281 282 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 283 if (protocol == ETH_P_IP) { 284 l4proto = ip_hdr(skb)->protocol; 285 286 if (l4proto == IPPROTO_TCP) 287 opcode = TX_TCP_PKT; 288 else if (l4proto == IPPROTO_UDP) 289 opcode = TX_UDP_PKT; 290 } else if (protocol == ETH_P_IPV6) { 291 l4proto = ipv6_hdr(skb)->nexthdr; 292 293 if (l4proto == IPPROTO_TCP) 294 opcode = TX_TCPV6_PKT; 295 else if (l4proto == IPPROTO_UDP) 296 opcode = TX_UDPV6_PKT; 297 } 298 } 299 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 300 first_desc->ip_hdr_offset += skb_network_offset(skb); 301 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); 302 303 return 0; 304 } 305 306 static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, 307 struct qlcnic_cmd_buffer *pbuf) 308 { 309 struct qlcnic_skb_frag *nf; 310 struct skb_frag_struct *frag; 311 int i, nr_frags; 312 dma_addr_t map; 313 314 nr_frags = skb_shinfo(skb)->nr_frags; 315 nf = &pbuf->frag_array[0]; 316 317 map = pci_map_single(pdev, skb->data, skb_headlen(skb), 318 PCI_DMA_TODEVICE); 319 if (pci_dma_mapping_error(pdev, map)) 320 goto out_err; 321 322 nf->dma = map; 323 nf->length = skb_headlen(skb); 324 325 for (i = 0; i < nr_frags; i++) { 326 frag = &skb_shinfo(skb)->frags[i]; 327 nf = &pbuf->frag_array[i+1]; 328 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 329 DMA_TO_DEVICE); 330 if (dma_mapping_error(&pdev->dev, map)) 331 goto unwind; 332 333 nf->dma = map; 334 nf->length = skb_frag_size(frag); 335 } 336 337 return 0; 338 339 unwind: 340 while (--i >= 0) { 341 nf = &pbuf->frag_array[i+1]; 342 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); 343 } 344 345 nf = &pbuf->frag_array[0]; 346 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 347 348 out_err: 349 return -ENOMEM; 350 } 351 352 static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, 353 struct qlcnic_cmd_buffer *pbuf) 354 { 355 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; 356 int i, nr_frags = skb_shinfo(skb)->nr_frags; 357 358 for (i = 0; i < nr_frags; i++) { 359 nf = &pbuf->frag_array[i+1]; 360 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); 361 } 362 363 nf = &pbuf->frag_array[0]; 364 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 365 pbuf->skb = NULL; 366 } 367 368 static inline void qlcnic_clear_cmddesc(u64 *desc) 369 { 370 desc[0] = 0ULL; 371 desc[2] = 0ULL; 372 desc[7] = 0ULL; 373 } 374 375 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 376 { 377 struct qlcnic_adapter *adapter = netdev_priv(netdev); 378 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 379 struct qlcnic_cmd_buffer *pbuf; 380 struct qlcnic_skb_frag *buffrag; 381 struct cmd_desc_type0 *hwdesc, *first_desc; 382 struct pci_dev *pdev; 383 struct ethhdr *phdr; 384 int i, k, frag_count, delta = 0; 385 u32 producer, num_txd; 386 387 num_txd = tx_ring->num_desc; 388 389 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 390 netif_stop_queue(netdev); 391 return NETDEV_TX_BUSY; 392 } 393 394 if (adapter->flags & QLCNIC_MACSPOOF) { 395 phdr = (struct ethhdr *)skb->data; 396 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr)) 397 goto drop_packet; 398 } 399 400 frag_count = skb_shinfo(skb)->nr_frags + 1; 401 /* 14 frags supported for normal packet and 402 * 32 frags supported for TSO packet 403 */ 404 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { 405 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) 406 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); 407 408 if (!__pskb_pull_tail(skb, delta)) 409 goto drop_packet; 410 411 frag_count = 1 + skb_shinfo(skb)->nr_frags; 412 } 413 414 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { 415 netif_stop_queue(netdev); 416 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 417 netif_start_queue(netdev); 418 } else { 419 adapter->stats.xmit_off++; 420 return NETDEV_TX_BUSY; 421 } 422 } 423 424 producer = tx_ring->producer; 425 pbuf = &tx_ring->cmd_buf_arr[producer]; 426 pdev = adapter->pdev; 427 first_desc = &tx_ring->desc_head[producer]; 428 hwdesc = &tx_ring->desc_head[producer]; 429 qlcnic_clear_cmddesc((u64 *)hwdesc); 430 431 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { 432 adapter->stats.tx_dma_map_error++; 433 goto drop_packet; 434 } 435 436 pbuf->skb = skb; 437 pbuf->frag_count = frag_count; 438 439 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len); 440 qlcnic_set_tx_port(first_desc, adapter->portnum); 441 442 for (i = 0; i < frag_count; i++) { 443 k = i % 4; 444 445 if ((k == 0) && (i > 0)) { 446 /* move to next desc.*/ 447 producer = get_next_index(producer, num_txd); 448 hwdesc = &tx_ring->desc_head[producer]; 449 qlcnic_clear_cmddesc((u64 *)hwdesc); 450 tx_ring->cmd_buf_arr[producer].skb = NULL; 451 } 452 453 buffrag = &pbuf->frag_array[i]; 454 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length); 455 switch (k) { 456 case 0: 457 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); 458 break; 459 case 1: 460 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma); 461 break; 462 case 2: 463 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma); 464 break; 465 case 3: 466 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma); 467 break; 468 } 469 } 470 471 tx_ring->producer = get_next_index(producer, num_txd); 472 smp_mb(); 473 474 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) 475 goto unwind_buff; 476 477 if (adapter->mac_learn) 478 qlcnic_send_filter(adapter, tx_ring, first_desc, skb); 479 480 adapter->stats.txbytes += skb->len; 481 adapter->stats.xmitcalled++; 482 483 qlcnic_update_cmd_producer(tx_ring); 484 485 return NETDEV_TX_OK; 486 487 unwind_buff: 488 qlcnic_unmap_buffers(pdev, skb, pbuf); 489 drop_packet: 490 adapter->stats.txdropped++; 491 dev_kfree_skb_any(skb); 492 return NETDEV_TX_OK; 493 } 494 495 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) 496 { 497 struct net_device *netdev = adapter->netdev; 498 499 if (adapter->ahw->linkup && !linkup) { 500 netdev_info(netdev, "NIC Link is down\n"); 501 adapter->ahw->linkup = 0; 502 if (netif_running(netdev)) { 503 netif_carrier_off(netdev); 504 netif_stop_queue(netdev); 505 } 506 } else if (!adapter->ahw->linkup && linkup) { 507 netdev_info(netdev, "NIC Link is up\n"); 508 adapter->ahw->linkup = 1; 509 if (netif_running(netdev)) { 510 netif_carrier_on(netdev); 511 netif_wake_queue(netdev); 512 } 513 } 514 } 515 516 static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, 517 struct qlcnic_host_rds_ring *rds_ring, 518 struct qlcnic_rx_buffer *buffer) 519 { 520 struct sk_buff *skb; 521 dma_addr_t dma; 522 struct pci_dev *pdev = adapter->pdev; 523 524 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size); 525 if (!skb) { 526 adapter->stats.skb_alloc_failure++; 527 return -ENOMEM; 528 } 529 530 skb_reserve(skb, NET_IP_ALIGN); 531 dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, 532 PCI_DMA_FROMDEVICE); 533 534 if (pci_dma_mapping_error(pdev, dma)) { 535 adapter->stats.rx_dma_map_error++; 536 dev_kfree_skb_any(skb); 537 return -ENOMEM; 538 } 539 540 buffer->skb = skb; 541 buffer->dma = dma; 542 543 return 0; 544 } 545 546 static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter, 547 struct qlcnic_host_rds_ring *rds_ring) 548 { 549 struct rcv_desc *pdesc; 550 struct qlcnic_rx_buffer *buffer; 551 int count = 0; 552 uint32_t producer; 553 struct list_head *head; 554 555 if (!spin_trylock(&rds_ring->lock)) 556 return; 557 558 producer = rds_ring->producer; 559 head = &rds_ring->free_list; 560 561 while (!list_empty(head)) { 562 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); 563 564 if (!buffer->skb) { 565 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) 566 break; 567 } 568 569 count++; 570 list_del(&buffer->list); 571 572 /* make a rcv descriptor */ 573 pdesc = &rds_ring->desc_head[producer]; 574 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 575 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 576 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 577 producer = get_next_index(producer, rds_ring->num_desc); 578 } 579 580 if (count) { 581 rds_ring->producer = producer; 582 writel((producer - 1) & (rds_ring->num_desc - 1), 583 rds_ring->crb_rcv_producer); 584 } 585 586 spin_unlock(&rds_ring->lock); 587 } 588 589 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter) 590 { 591 u32 sw_consumer, hw_consumer; 592 int i, done, count = 0; 593 struct qlcnic_cmd_buffer *buffer; 594 struct pci_dev *pdev = adapter->pdev; 595 struct net_device *netdev = adapter->netdev; 596 struct qlcnic_skb_frag *frag; 597 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; 598 599 if (!spin_trylock(&adapter->tx_clean_lock)) 600 return 1; 601 602 sw_consumer = tx_ring->sw_consumer; 603 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 604 605 while (sw_consumer != hw_consumer) { 606 buffer = &tx_ring->cmd_buf_arr[sw_consumer]; 607 if (buffer->skb) { 608 frag = &buffer->frag_array[0]; 609 pci_unmap_single(pdev, frag->dma, frag->length, 610 PCI_DMA_TODEVICE); 611 frag->dma = 0ULL; 612 for (i = 1; i < buffer->frag_count; i++) { 613 frag++; 614 pci_unmap_page(pdev, frag->dma, frag->length, 615 PCI_DMA_TODEVICE); 616 frag->dma = 0ULL; 617 } 618 619 adapter->stats.xmitfinished++; 620 dev_kfree_skb_any(buffer->skb); 621 buffer->skb = NULL; 622 } 623 624 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); 625 if (++count >= MAX_STATUS_HANDLE) 626 break; 627 } 628 629 if (count && netif_running(netdev)) { 630 tx_ring->sw_consumer = sw_consumer; 631 632 smp_mb(); 633 634 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { 635 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { 636 netif_wake_queue(netdev); 637 adapter->stats.xmit_on++; 638 } 639 } 640 adapter->tx_timeo_cnt = 0; 641 } 642 /* 643 * If everything is freed up to consumer then check if the ring is full 644 * If the ring is full then check if more needs to be freed and 645 * schedule the call back again. 646 * 647 * This happens when there are 2 CPUs. One could be freeing and the 648 * other filling it. If the ring is full when we get out of here and 649 * the card has already interrupted the host then the host can miss the 650 * interrupt. 651 * 652 * There is still a possible race condition and the host could miss an 653 * interrupt. The card has to take care of this. 654 */ 655 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); 656 done = (sw_consumer == hw_consumer); 657 658 spin_unlock(&adapter->tx_clean_lock); 659 660 return done; 661 } 662 663 static int qlcnic_poll(struct napi_struct *napi, int budget) 664 { 665 struct qlcnic_host_sds_ring *sds_ring; 666 struct qlcnic_adapter *adapter; 667 int tx_complete, work_done; 668 669 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); 670 adapter = sds_ring->adapter; 671 672 tx_complete = qlcnic_process_cmd_ring(adapter); 673 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 674 675 if ((work_done < budget) && tx_complete) { 676 napi_complete(&sds_ring->napi); 677 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 678 qlcnic_enable_int(sds_ring); 679 } 680 681 return work_done; 682 } 683 684 static int qlcnic_rx_poll(struct napi_struct *napi, int budget) 685 { 686 struct qlcnic_host_sds_ring *sds_ring; 687 struct qlcnic_adapter *adapter; 688 int work_done; 689 690 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); 691 adapter = sds_ring->adapter; 692 693 work_done = qlcnic_process_rcv_ring(sds_ring, budget); 694 695 if (work_done < budget) { 696 napi_complete(&sds_ring->napi); 697 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) 698 qlcnic_enable_int(sds_ring); 699 } 700 701 return work_done; 702 } 703 704 static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter, 705 struct qlcnic_fw_msg *msg) 706 { 707 u32 cable_OUI; 708 u16 cable_len, link_speed; 709 u8 link_status, module, duplex, autoneg, lb_status = 0; 710 struct net_device *netdev = adapter->netdev; 711 712 adapter->ahw->has_link_events = 1; 713 714 cable_OUI = msg->body[1] & 0xffffffff; 715 cable_len = (msg->body[1] >> 32) & 0xffff; 716 link_speed = (msg->body[1] >> 48) & 0xffff; 717 718 link_status = msg->body[2] & 0xff; 719 duplex = (msg->body[2] >> 16) & 0xff; 720 autoneg = (msg->body[2] >> 24) & 0xff; 721 lb_status = (msg->body[2] >> 32) & 0x3; 722 723 module = (msg->body[2] >> 8) & 0xff; 724 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) 725 dev_info(&netdev->dev, 726 "unsupported cable: OUI 0x%x, length %d\n", 727 cable_OUI, cable_len); 728 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) 729 dev_info(&netdev->dev, "unsupported cable length %d\n", 730 cable_len); 731 732 if (!link_status && (lb_status == QLCNIC_ILB_MODE || 733 lb_status == QLCNIC_ELB_MODE)) 734 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT; 735 736 qlcnic_advert_link_change(adapter, link_status); 737 738 if (duplex == LINKEVENT_FULL_DUPLEX) 739 adapter->ahw->link_duplex = DUPLEX_FULL; 740 else 741 adapter->ahw->link_duplex = DUPLEX_HALF; 742 743 adapter->ahw->module_type = module; 744 adapter->ahw->link_autoneg = autoneg; 745 746 if (link_status) { 747 adapter->ahw->link_speed = link_speed; 748 } else { 749 adapter->ahw->link_speed = SPEED_UNKNOWN; 750 adapter->ahw->link_duplex = DUPLEX_UNKNOWN; 751 } 752 } 753 754 static void qlcnic_handle_fw_message(int desc_cnt, int index, 755 struct qlcnic_host_sds_ring *sds_ring) 756 { 757 struct qlcnic_fw_msg msg; 758 struct status_desc *desc; 759 struct qlcnic_adapter *adapter; 760 struct device *dev; 761 int i = 0, opcode, ret; 762 763 while (desc_cnt > 0 && i < 8) { 764 desc = &sds_ring->desc_head[index]; 765 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]); 766 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]); 767 768 index = get_next_index(index, sds_ring->num_desc); 769 desc_cnt--; 770 } 771 772 adapter = sds_ring->adapter; 773 dev = &adapter->pdev->dev; 774 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]); 775 776 switch (opcode) { 777 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE: 778 qlcnic_handle_linkevent(adapter, &msg); 779 break; 780 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK: 781 ret = (u32)(msg.body[1]); 782 switch (ret) { 783 case 0: 784 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE; 785 break; 786 case 1: 787 dev_info(dev, "loopback already in progress\n"); 788 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS; 789 break; 790 case 2: 791 dev_info(dev, "loopback cable is not connected\n"); 792 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN; 793 break; 794 default: 795 dev_info(dev, 796 "loopback configure request failed, err %x\n", 797 ret); 798 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR; 799 break; 800 } 801 break; 802 default: 803 break; 804 } 805 } 806 807 static struct sk_buff * 808 qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, 809 struct qlcnic_host_rds_ring *rds_ring, u16 index, 810 u16 cksum) 811 { 812 struct qlcnic_rx_buffer *buffer; 813 struct sk_buff *skb; 814 815 buffer = &rds_ring->rx_buf_arr[index]; 816 817 if (unlikely(buffer->skb == NULL)) { 818 WARN_ON(1); 819 return NULL; 820 } 821 822 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, 823 PCI_DMA_FROMDEVICE); 824 825 skb = buffer->skb; 826 827 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && 828 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) { 829 adapter->stats.csummed++; 830 skb->ip_summed = CHECKSUM_UNNECESSARY; 831 } else { 832 skb_checksum_none_assert(skb); 833 } 834 835 buffer->skb = NULL; 836 837 return skb; 838 } 839 840 static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, 841 struct sk_buff *skb, u16 *vlan_tag) 842 { 843 struct ethhdr *eth_hdr; 844 845 if (!__vlan_get_tag(skb, vlan_tag)) { 846 eth_hdr = (struct ethhdr *)skb->data; 847 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2); 848 skb_pull(skb, VLAN_HLEN); 849 } 850 if (!adapter->pvid) 851 return 0; 852 853 if (*vlan_tag == adapter->pvid) { 854 /* Outer vlan tag. Packet should follow non-vlan path */ 855 *vlan_tag = 0xffff; 856 return 0; 857 } 858 if (adapter->flags & QLCNIC_TAGGING_ENABLED) 859 return 0; 860 861 return -EINVAL; 862 } 863 864 static struct qlcnic_rx_buffer * 865 qlcnic_process_rcv(struct qlcnic_adapter *adapter, 866 struct qlcnic_host_sds_ring *sds_ring, int ring, 867 u64 sts_data0) 868 { 869 struct net_device *netdev = adapter->netdev; 870 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 871 struct qlcnic_rx_buffer *buffer; 872 struct sk_buff *skb; 873 struct qlcnic_host_rds_ring *rds_ring; 874 int index, length, cksum, pkt_offset; 875 u16 vid = 0xffff; 876 877 if (unlikely(ring >= adapter->max_rds_rings)) 878 return NULL; 879 880 rds_ring = &recv_ctx->rds_rings[ring]; 881 882 index = qlcnic_get_sts_refhandle(sts_data0); 883 if (unlikely(index >= rds_ring->num_desc)) 884 return NULL; 885 886 buffer = &rds_ring->rx_buf_arr[index]; 887 length = qlcnic_get_sts_totallength(sts_data0); 888 cksum = qlcnic_get_sts_status(sts_data0); 889 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); 890 891 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); 892 if (!skb) 893 return buffer; 894 895 if (length > rds_ring->skb_size) 896 skb_put(skb, rds_ring->skb_size); 897 else 898 skb_put(skb, length); 899 900 if (pkt_offset) 901 skb_pull(skb, pkt_offset); 902 903 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { 904 adapter->stats.rxdropped++; 905 dev_kfree_skb(skb); 906 return buffer; 907 } 908 909 skb->protocol = eth_type_trans(skb, netdev); 910 911 if (vid != 0xffff) 912 __vlan_hwaccel_put_tag(skb, vid); 913 914 napi_gro_receive(&sds_ring->napi, skb); 915 916 adapter->stats.rx_pkts++; 917 adapter->stats.rxbytes += length; 918 919 return buffer; 920 } 921 922 #define QLC_TCP_HDR_SIZE 20 923 #define QLC_TCP_TS_OPTION_SIZE 12 924 #define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE) 925 926 static struct qlcnic_rx_buffer * 927 qlcnic_process_lro(struct qlcnic_adapter *adapter, 928 int ring, u64 sts_data0, u64 sts_data1) 929 { 930 struct net_device *netdev = adapter->netdev; 931 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 932 struct qlcnic_rx_buffer *buffer; 933 struct sk_buff *skb; 934 struct qlcnic_host_rds_ring *rds_ring; 935 struct iphdr *iph; 936 struct tcphdr *th; 937 bool push, timestamp; 938 int index, l2_hdr_offset, l4_hdr_offset; 939 u16 lro_length, length, data_offset, vid = 0xffff; 940 u32 seq_number; 941 942 if (unlikely(ring > adapter->max_rds_rings)) 943 return NULL; 944 945 rds_ring = &recv_ctx->rds_rings[ring]; 946 947 index = qlcnic_get_lro_sts_refhandle(sts_data0); 948 if (unlikely(index > rds_ring->num_desc)) 949 return NULL; 950 951 buffer = &rds_ring->rx_buf_arr[index]; 952 953 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0); 954 lro_length = qlcnic_get_lro_sts_length(sts_data0); 955 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0); 956 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0); 957 push = qlcnic_get_lro_sts_push_flag(sts_data0); 958 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1); 959 960 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK); 961 if (!skb) 962 return buffer; 963 964 if (timestamp) 965 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE; 966 else 967 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE; 968 969 skb_put(skb, lro_length + data_offset); 970 skb_pull(skb, l2_hdr_offset); 971 972 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) { 973 adapter->stats.rxdropped++; 974 dev_kfree_skb(skb); 975 return buffer; 976 } 977 978 skb->protocol = eth_type_trans(skb, netdev); 979 iph = (struct iphdr *)skb->data; 980 th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 981 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 982 iph->tot_len = htons(length); 983 iph->check = 0; 984 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 985 th->psh = push; 986 th->seq = htonl(seq_number); 987 length = skb->len; 988 989 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) { 990 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1); 991 if (skb->protocol == htons(ETH_P_IPV6)) 992 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 993 else 994 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 995 } 996 997 if (vid != 0xffff) 998 __vlan_hwaccel_put_tag(skb, vid); 999 netif_receive_skb(skb); 1000 1001 adapter->stats.lro_pkts++; 1002 adapter->stats.lrobytes += length; 1003 1004 return buffer; 1005 } 1006 1007 int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max) 1008 { 1009 struct qlcnic_host_rds_ring *rds_ring; 1010 struct qlcnic_adapter *adapter = sds_ring->adapter; 1011 struct list_head *cur; 1012 struct status_desc *desc; 1013 struct qlcnic_rx_buffer *rxbuf; 1014 u64 sts_data0, sts_data1; 1015 __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM); 1016 int opcode, ring, desc_cnt, count = 0; 1017 u32 consumer = sds_ring->consumer; 1018 1019 while (count < max) { 1020 desc = &sds_ring->desc_head[consumer]; 1021 sts_data0 = le64_to_cpu(desc->status_desc_data[0]); 1022 1023 if (!(sts_data0 & STATUS_OWNER_HOST)) 1024 break; 1025 1026 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); 1027 opcode = qlcnic_get_sts_opcode(sts_data0); 1028 1029 switch (opcode) { 1030 case QLCNIC_RXPKT_DESC: 1031 case QLCNIC_OLD_RXPKT_DESC: 1032 case QLCNIC_SYN_OFFLOAD: 1033 ring = qlcnic_get_sts_type(sts_data0); 1034 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring, 1035 sts_data0); 1036 break; 1037 case QLCNIC_LRO_DESC: 1038 ring = qlcnic_get_lro_sts_type(sts_data0); 1039 sts_data1 = le64_to_cpu(desc->status_desc_data[1]); 1040 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0, 1041 sts_data1); 1042 break; 1043 case QLCNIC_RESPONSE_DESC: 1044 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); 1045 default: 1046 goto skip; 1047 } 1048 1049 WARN_ON(desc_cnt > 1); 1050 1051 if (likely(rxbuf)) 1052 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1053 else 1054 adapter->stats.null_rxbuf++; 1055 1056 skip: 1057 for (; desc_cnt > 0; desc_cnt--) { 1058 desc = &sds_ring->desc_head[consumer]; 1059 desc->status_desc_data[0] = owner_phantom; 1060 consumer = get_next_index(consumer, sds_ring->num_desc); 1061 } 1062 count++; 1063 } 1064 1065 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 1066 rds_ring = &adapter->recv_ctx->rds_rings[ring]; 1067 1068 if (!list_empty(&sds_ring->free_list[ring])) { 1069 list_for_each(cur, &sds_ring->free_list[ring]) { 1070 rxbuf = list_entry(cur, struct qlcnic_rx_buffer, 1071 list); 1072 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf); 1073 } 1074 spin_lock(&rds_ring->lock); 1075 list_splice_tail_init(&sds_ring->free_list[ring], 1076 &rds_ring->free_list); 1077 spin_unlock(&rds_ring->lock); 1078 } 1079 1080 qlcnic_post_rx_buffers_nodb(adapter, rds_ring); 1081 } 1082 1083 if (count) { 1084 sds_ring->consumer = consumer; 1085 writel(consumer, sds_ring->crb_sts_consumer); 1086 } 1087 1088 return count; 1089 } 1090 1091 void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, 1092 struct qlcnic_host_rds_ring *rds_ring) 1093 { 1094 struct rcv_desc *pdesc; 1095 struct qlcnic_rx_buffer *buffer; 1096 int count = 0; 1097 u32 producer; 1098 struct list_head *head; 1099 1100 producer = rds_ring->producer; 1101 head = &rds_ring->free_list; 1102 1103 while (!list_empty(head)) { 1104 1105 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list); 1106 1107 if (!buffer->skb) { 1108 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer)) 1109 break; 1110 } 1111 1112 count++; 1113 list_del(&buffer->list); 1114 1115 /* make a rcv descriptor */ 1116 pdesc = &rds_ring->desc_head[producer]; 1117 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1118 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1119 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1120 producer = get_next_index(producer, rds_ring->num_desc); 1121 } 1122 1123 if (count) { 1124 rds_ring->producer = producer; 1125 writel((producer-1) & (rds_ring->num_desc-1), 1126 rds_ring->crb_rcv_producer); 1127 } 1128 } 1129 1130 static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter) 1131 { 1132 int i; 1133 unsigned char *data = skb->data; 1134 1135 pr_info(KERN_INFO "\n"); 1136 for (i = 0; i < skb->len; i++) { 1137 QLCDB(adapter, DRV, "%02x ", data[i]); 1138 if ((i & 0x0f) == 8) 1139 pr_info(KERN_INFO "\n"); 1140 } 1141 } 1142 1143 static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring, 1144 u64 sts_data0) 1145 { 1146 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1147 struct sk_buff *skb; 1148 struct qlcnic_host_rds_ring *rds_ring; 1149 int index, length, cksum, pkt_offset; 1150 1151 if (unlikely(ring >= adapter->max_rds_rings)) 1152 return; 1153 1154 rds_ring = &recv_ctx->rds_rings[ring]; 1155 1156 index = qlcnic_get_sts_refhandle(sts_data0); 1157 length = qlcnic_get_sts_totallength(sts_data0); 1158 if (unlikely(index >= rds_ring->num_desc)) 1159 return; 1160 1161 cksum = qlcnic_get_sts_status(sts_data0); 1162 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0); 1163 1164 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum); 1165 if (!skb) 1166 return; 1167 1168 if (length > rds_ring->skb_size) 1169 skb_put(skb, rds_ring->skb_size); 1170 else 1171 skb_put(skb, length); 1172 1173 if (pkt_offset) 1174 skb_pull(skb, pkt_offset); 1175 1176 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr)) 1177 adapter->ahw->diag_cnt++; 1178 else 1179 dump_skb(skb, adapter); 1180 1181 dev_kfree_skb_any(skb); 1182 adapter->stats.rx_pkts++; 1183 adapter->stats.rxbytes += length; 1184 1185 return; 1186 } 1187 1188 void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring) 1189 { 1190 struct qlcnic_adapter *adapter = sds_ring->adapter; 1191 struct status_desc *desc; 1192 u64 sts_data0; 1193 int ring, opcode, desc_cnt; 1194 1195 u32 consumer = sds_ring->consumer; 1196 1197 desc = &sds_ring->desc_head[consumer]; 1198 sts_data0 = le64_to_cpu(desc->status_desc_data[0]); 1199 1200 if (!(sts_data0 & STATUS_OWNER_HOST)) 1201 return; 1202 1203 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0); 1204 opcode = qlcnic_get_sts_opcode(sts_data0); 1205 switch (opcode) { 1206 case QLCNIC_RESPONSE_DESC: 1207 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring); 1208 break; 1209 default: 1210 ring = qlcnic_get_sts_type(sts_data0); 1211 qlcnic_process_rcv_diag(adapter, ring, sts_data0); 1212 break; 1213 } 1214 1215 for (; desc_cnt > 0; desc_cnt--) { 1216 desc = &sds_ring->desc_head[consumer]; 1217 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM); 1218 consumer = get_next_index(consumer, sds_ring->num_desc); 1219 } 1220 1221 sds_ring->consumer = consumer; 1222 writel(consumer, sds_ring->crb_sts_consumer); 1223 } 1224 1225 void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac) 1226 { 1227 u32 mac_low, mac_high; 1228 int i; 1229 1230 mac_low = off1; 1231 mac_high = off2; 1232 1233 if (alt_mac) { 1234 mac_low |= (mac_low >> 16) | (mac_high << 16); 1235 mac_high >>= 16; 1236 } 1237 1238 for (i = 0; i < 2; i++) 1239 mac[i] = (u8)(mac_high >> ((1 - i) * 8)); 1240 for (i = 2; i < 6; i++) 1241 mac[i] = (u8)(mac_low >> ((5 - i) * 8)); 1242 } 1243 1244 int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev) 1245 { 1246 int ring, max_sds_rings; 1247 struct qlcnic_host_sds_ring *sds_ring; 1248 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1249 1250 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) 1251 return -ENOMEM; 1252 1253 max_sds_rings = adapter->max_sds_rings; 1254 1255 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1256 sds_ring = &recv_ctx->sds_rings[ring]; 1257 1258 if (ring == max_sds_rings - 1) 1259 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, 1260 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1261 else 1262 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, 1263 QLCNIC_NETDEV_WEIGHT*2); 1264 } 1265 1266 return 0; 1267 } 1268 1269 void qlcnic_napi_del(struct qlcnic_adapter *adapter) 1270 { 1271 int ring; 1272 struct qlcnic_host_sds_ring *sds_ring; 1273 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1274 1275 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1276 sds_ring = &recv_ctx->sds_rings[ring]; 1277 netif_napi_del(&sds_ring->napi); 1278 } 1279 1280 qlcnic_free_sds_rings(adapter->recv_ctx); 1281 } 1282 1283 void qlcnic_napi_enable(struct qlcnic_adapter *adapter) 1284 { 1285 int ring; 1286 struct qlcnic_host_sds_ring *sds_ring; 1287 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1288 1289 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1290 return; 1291 1292 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1293 sds_ring = &recv_ctx->sds_rings[ring]; 1294 napi_enable(&sds_ring->napi); 1295 qlcnic_enable_int(sds_ring); 1296 } 1297 } 1298 1299 void qlcnic_napi_disable(struct qlcnic_adapter *adapter) 1300 { 1301 int ring; 1302 struct qlcnic_host_sds_ring *sds_ring; 1303 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1304 1305 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 1306 return; 1307 1308 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1309 sds_ring = &recv_ctx->sds_rings[ring]; 1310 qlcnic_disable_int(sds_ring); 1311 napi_synchronize(&sds_ring->napi); 1312 napi_disable(&sds_ring->napi); 1313 } 1314 } 1315