Lines Matching refs:q

169 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)  in rspq_to_qset()  argument
171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
174 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
176 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
189 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
193 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
233 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
237 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
240 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
266 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
282 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, in free_tx_desc() argument
287 unsigned int cidx = q->cidx; in free_tx_desc()
290 q->cntxt_id >= FW_TUNNEL_SGEEC_START; in free_tx_desc()
292 d = &q->sdesc[cidx]; in free_tx_desc()
296 unmap_skb(d->skb, q, cidx, pdev); in free_tx_desc()
303 if (++cidx == q->size) { in free_tx_desc()
305 d = q->sdesc; in free_tx_desc()
308 q->cidx = cidx; in free_tx_desc()
322 struct sge_txq *q, in reclaim_completed_tx() argument
325 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
329 free_tx_desc(adapter, q, reclaim); in reclaim_completed_tx()
330 q->cleaned += reclaim; in reclaim_completed_tx()
331 q->in_use -= reclaim; in reclaim_completed_tx()
333 return q->processed - q->cleaned; in reclaim_completed_tx()
342 static inline int should_restart_tx(const struct sge_txq *q) in should_restart_tx() argument
344 unsigned int r = q->processed - q->cleaned; in should_restart_tx()
346 return q->in_use - r < (q->size >> 1); in should_restart_tx()
349 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, in clear_rx_desc() argument
352 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()
356 q->alloc_size, DMA_FROM_DEVICE); in clear_rx_desc()
362 q->buf_size, DMA_FROM_DEVICE); in clear_rx_desc()
376 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) in free_rx_bufs() argument
378 unsigned int cidx = q->cidx; in free_rx_bufs()
380 while (q->credits--) { in free_rx_bufs()
381 struct rx_sw_desc *d = &q->sdesc[cidx]; in free_rx_bufs()
384 clear_rx_desc(pdev, q, d); in free_rx_bufs()
385 if (++cidx == q->size) in free_rx_bufs()
389 if (q->pg_chunk.page) { in free_rx_bufs()
390 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()
391 q->pg_chunk.page = NULL; in free_rx_bufs()
438 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, in alloc_pg_chunk() argument
442 if (!q->pg_chunk.page) { in alloc_pg_chunk()
445 q->pg_chunk.page = alloc_pages(gfp, order); in alloc_pg_chunk()
446 if (unlikely(!q->pg_chunk.page)) in alloc_pg_chunk()
448 q->pg_chunk.va = page_address(q->pg_chunk.page); in alloc_pg_chunk()
449 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - in alloc_pg_chunk()
451 q->pg_chunk.offset = 0; in alloc_pg_chunk()
452 mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page, in alloc_pg_chunk()
453 0, q->alloc_size, DMA_FROM_DEVICE); in alloc_pg_chunk()
455 __free_pages(q->pg_chunk.page, order); in alloc_pg_chunk()
456 q->pg_chunk.page = NULL; in alloc_pg_chunk()
459 q->pg_chunk.mapping = mapping; in alloc_pg_chunk()
461 sd->pg_chunk = q->pg_chunk; in alloc_pg_chunk()
465 q->pg_chunk.offset += q->buf_size; in alloc_pg_chunk()
466 if (q->pg_chunk.offset == (PAGE_SIZE << order)) in alloc_pg_chunk()
467 q->pg_chunk.page = NULL; in alloc_pg_chunk()
469 q->pg_chunk.va += q->buf_size; in alloc_pg_chunk()
470 get_page(q->pg_chunk.page); in alloc_pg_chunk()
481 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
483 if (q->pend_cred >= q->credits / 4) { in ring_fl_db()
484 q->pend_cred = 0; in ring_fl_db()
486 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
501 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) in refill_fl() argument
503 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
504 struct rx_desc *d = &q->desc[q->pidx]; in refill_fl()
511 if (q->use_pages) { in refill_fl()
512 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, in refill_fl()
513 q->order))) { in refill_fl()
514 nomem: q->alloc_failed++; in refill_fl()
520 add_one_rx_chunk(mapping, d, q->gen); in refill_fl()
522 q->buf_size - SGE_PG_RSVD, in refill_fl()
527 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); in refill_fl()
533 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, in refill_fl()
534 q->gen, adap->pdev); in refill_fl()
536 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
543 if (++q->pidx == q->size) { in refill_fl()
544 q->pidx = 0; in refill_fl()
545 q->gen ^= 1; in refill_fl()
546 sd = q->sdesc; in refill_fl()
547 d = q->desc; in refill_fl()
552 q->credits += count; in refill_fl()
553 q->pend_cred += count; in refill_fl()
554 ring_fl_db(adap, q); in refill_fl()
574 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, in recycle_rx_buf() argument
577 struct rx_desc *from = &q->desc[idx]; in recycle_rx_buf()
578 struct rx_desc *to = &q->desc[q->pidx]; in recycle_rx_buf()
580 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
584 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); in recycle_rx_buf()
585 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); in recycle_rx_buf()
587 if (++q->pidx == q->size) { in recycle_rx_buf()
588 q->pidx = 0; in recycle_rx_buf()
589 q->gen ^= 1; in recycle_rx_buf()
592 q->credits++; in recycle_rx_buf()
593 q->pend_cred++; in recycle_rx_buf()
594 ring_fl_db(adap, q); in recycle_rx_buf()
643 static void t3_reset_qset(struct sge_qset *q) in t3_reset_qset() argument
645 if (q->adap && in t3_reset_qset()
646 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
647 memset(q, 0, sizeof(*q)); in t3_reset_qset()
651 q->adap = NULL; in t3_reset_qset()
652 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset()
653 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
654 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
655 q->txq_stopped = 0; in t3_reset_qset()
656 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ in t3_reset_qset()
657 q->rx_reclaim_timer.function = NULL; in t3_reset_qset()
658 q->nomem = 0; in t3_reset_qset()
659 napi_free_frags(&q->napi); in t3_reset_qset()
672 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) in t3_free_qset() argument
678 if (q->fl[i].desc) { in t3_free_qset()
680 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
682 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
683 kfree(q->fl[i].sdesc); in t3_free_qset()
685 q->fl[i].size * in t3_free_qset()
686 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
687 q->fl[i].phys_addr); in t3_free_qset()
691 if (q->txq[i].desc) { in t3_free_qset()
693 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
695 if (q->txq[i].sdesc) { in t3_free_qset()
696 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
697 q->txq[i].in_use); in t3_free_qset()
698 kfree(q->txq[i].sdesc); in t3_free_qset()
701 q->txq[i].size * in t3_free_qset()
703 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
704 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
707 if (q->rspq.desc) { in t3_free_qset()
709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset()
712 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset()
713 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset()
716 t3_reset_qset(q); in t3_free_qset()
840 struct sge_rspq *q, unsigned int len, in get_packet_pg() argument
848 newskb = skb = q->pg_skb; in get_packet_pg()
863 q->rx_recycle_buf++; in get_packet_pg()
867 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) in get_packet_pg()
1045 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) in check_ring_tx_db() argument
1048 clear_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1049 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { in check_ring_tx_db()
1050 set_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1052 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1057 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1089 const struct sge_txq *q, in write_wr_hdr_sgl() argument
1096 struct tx_sw_desc *sd = &q->sdesc[pidx]; in write_wr_hdr_sgl()
1136 if (++pidx == q->size) { in write_wr_hdr_sgl()
1139 d = q->desc; in write_wr_hdr_sgl()
1140 sd = q->sdesc; in write_wr_hdr_sgl()
1179 struct sge_txq *q, unsigned int ndesc, in write_tx_pkt_wr() argument
1184 struct tx_desc *d = &q->desc[pidx]; in write_tx_pkt_wr()
1215 q->sdesc[pidx].skb = NULL; in write_tx_pkt_wr()
1228 V_WR_TID(q->token)); in write_tx_pkt_wr()
1240 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, in write_tx_pkt_wr()
1242 htonl(V_WR_TID(q->token))); in write_tx_pkt_wr()
1246 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
1250 q->stops++; in t3_stop_tx_queue()
1268 struct sge_txq *q; in t3_eth_xmit() local
1282 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1285 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in t3_eth_xmit()
1287 credits = q->size - q->in_use; in t3_eth_xmit()
1291 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1294 dev->name, q->cntxt_id & 7); in t3_eth_xmit()
1306 q->in_use += ndesc; in t3_eth_xmit()
1307 if (unlikely(credits - ndesc < q->stop_thres)) { in t3_eth_xmit()
1308 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1310 if (should_restart_tx(q) && in t3_eth_xmit()
1312 q->restarts++; in t3_eth_xmit()
1317 gen = q->gen; in t3_eth_xmit()
1318 q->unacked += ndesc; in t3_eth_xmit()
1319 compl = (q->unacked & 8) << (S_WR_COMPL - 3); in t3_eth_xmit()
1320 q->unacked &= 7; in t3_eth_xmit()
1321 pidx = q->pidx; in t3_eth_xmit()
1322 q->pidx += ndesc; in t3_eth_xmit()
1323 if (q->pidx >= q->size) { in t3_eth_xmit()
1324 q->pidx -= q->size; in t3_eth_xmit()
1325 q->gen ^= 1; in t3_eth_xmit()
1363 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); in t3_eth_xmit()
1364 check_ring_tx_db(adap, q); in t3_eth_xmit()
1418 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, in check_desc_avail() argument
1422 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1423 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1426 if (unlikely(q->size - q->in_use < ndesc)) { in check_desc_avail()
1427 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail()
1432 if (should_restart_tx(q) && in check_desc_avail()
1436 q->stops++; in check_desc_avail()
1450 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1452 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx_imm()
1454 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1455 q->cleaned += reclaim; in reclaim_completed_tx_imm()
1473 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, in ctrl_xmit() argument
1486 wrp->wr_lo = htonl(V_WR_TID(q->token)); in ctrl_xmit()
1488 spin_lock(&q->lock); in ctrl_xmit()
1489 again:reclaim_completed_tx_imm(q); in ctrl_xmit()
1491 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); in ctrl_xmit()
1494 spin_unlock(&q->lock); in ctrl_xmit()
1500 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1502 q->in_use++; in ctrl_xmit()
1503 if (++q->pidx >= q->size) { in ctrl_xmit()
1504 q->pidx = 0; in ctrl_xmit()
1505 q->gen ^= 1; in ctrl_xmit()
1507 spin_unlock(&q->lock); in ctrl_xmit()
1510 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in ctrl_xmit()
1525 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq() local
1527 spin_lock(&q->lock); in restart_ctrlq()
1528 again:reclaim_completed_tx_imm(q); in restart_ctrlq()
1530 while (q->in_use < q->size && in restart_ctrlq()
1531 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1533 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1535 if (++q->pidx >= q->size) { in restart_ctrlq()
1536 q->pidx = 0; in restart_ctrlq()
1537 q->gen ^= 1; in restart_ctrlq()
1539 q->in_use++; in restart_ctrlq()
1542 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1546 if (should_restart_tx(q) && in restart_ctrlq()
1549 q->stops++; in restart_ctrlq()
1552 spin_unlock(&q->lock); in restart_ctrlq()
1555 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_ctrlq()
1630 struct sge_txq *q, unsigned int pidx, in write_ofld_wr() argument
1637 struct tx_desc *d = &q->desc[pidx]; in write_ofld_wr()
1640 q->sdesc[pidx].skb = NULL; in write_ofld_wr()
1661 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, in write_ofld_wr()
1694 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, in ofld_xmit() argument
1700 spin_lock(&q->lock); in ofld_xmit()
1701 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in ofld_xmit()
1703 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); in ofld_xmit()
1707 spin_unlock(&q->lock); in ofld_xmit()
1715 spin_unlock(&q->lock); in ofld_xmit()
1719 gen = q->gen; in ofld_xmit()
1720 q->in_use += ndesc; in ofld_xmit()
1721 pidx = q->pidx; in ofld_xmit()
1722 q->pidx += ndesc; in ofld_xmit()
1723 if (q->pidx >= q->size) { in ofld_xmit()
1724 q->pidx -= q->size; in ofld_xmit()
1725 q->gen ^= 1; in ofld_xmit()
1727 spin_unlock(&q->lock); in ofld_xmit()
1729 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); in ofld_xmit()
1730 check_ring_tx_db(adap, q); in ofld_xmit()
1745 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq() local
1750 spin_lock(&q->lock); in restart_offloadq()
1751 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in restart_offloadq()
1753 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1757 if (unlikely(q->size - q->in_use < ndesc)) { in restart_offloadq()
1761 if (should_restart_tx(q) && in restart_offloadq()
1764 q->stops++; in restart_offloadq()
1772 gen = q->gen; in restart_offloadq()
1773 q->in_use += ndesc; in restart_offloadq()
1774 pidx = q->pidx; in restart_offloadq()
1775 q->pidx += ndesc; in restart_offloadq()
1777 if (q->pidx >= q->size) { in restart_offloadq()
1778 q->pidx -= q->size; in restart_offloadq()
1779 q->gen ^= 1; in restart_offloadq()
1781 __skb_unlink(skb, &q->sendq); in restart_offloadq()
1782 spin_unlock(&q->lock); in restart_offloadq()
1784 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, in restart_offloadq()
1786 spin_lock(&q->lock); in restart_offloadq()
1788 spin_unlock(&q->lock); in restart_offloadq()
1791 set_bit(TXQ_RUNNING, &q->flags); in restart_offloadq()
1792 set_bit(TXQ_LAST_PKT_DB, &q->flags); in restart_offloadq()
1797 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_offloadq()
1853 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) in offload_enqueue() argument
1855 int was_empty = skb_queue_empty(&q->rx_queue); in offload_enqueue()
1857 __skb_queue_tail(&q->rx_queue, skb); in offload_enqueue()
1860 struct sge_qset *qs = rspq_to_qset(q); in offload_enqueue()
1876 struct sge_rspq *q, in deliver_partial_bundle() argument
1880 q->offload_bundles++; in deliver_partial_bundle()
1899 struct sge_rspq *q = &qs->rspq; in ofld_poll() local
1908 spin_lock_irq(&q->lock); in ofld_poll()
1910 skb_queue_splice_init(&q->rx_queue, &queue); in ofld_poll()
1913 spin_unlock_irq(&q->lock); in ofld_poll()
1916 spin_unlock_irq(&q->lock); in ofld_poll()
1928 q->offload_bundles++; in ofld_poll()
1936 spin_lock_irq(&q->lock); in ofld_poll()
1937 skb_queue_splice(&queue, &q->rx_queue); in ofld_poll()
1938 spin_unlock_irq(&q->lock); in ofld_poll()
1940 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
2287 const struct sge_rspq *q) in is_new_response() argument
2289 return (r->intr_gen & F_RSPD_GEN2) == q->gen; in is_new_response()
2292 static inline void clear_rspq_bufstate(struct sge_rspq * const q) in clear_rspq_bufstate() argument
2294 q->pg_skb = NULL; in clear_rspq_bufstate()
2295 q->rx_recycle_buf = 0; in clear_rspq_bufstate()
2325 struct sge_rspq *q = &qs->rspq; in process_responses() local
2326 struct rsp_desc *r = &q->desc[q->cidx]; in process_responses()
2332 q->next_holdoff = q->holdoff_tmr; in process_responses()
2334 while (likely(budget_left && is_new_response(r, q))) { in process_responses()
2355 q->async_notif++; in process_responses()
2360 q->next_holdoff = NOMEM_INTR_DELAY; in process_responses()
2361 q->nomem++; in process_responses()
2366 q->imm_data++; in process_responses()
2386 skb = get_packet_pg(adap, fl, q, in process_responses()
2390 q->pg_skb = skb; in process_responses()
2397 q->rx_drops++; in process_responses()
2404 q->pure_rsps++; in process_responses()
2412 if (unlikely(++q->cidx == q->size)) { in process_responses()
2413 q->cidx = 0; in process_responses()
2414 q->gen ^= 1; in process_responses()
2415 r = q->desc; in process_responses()
2419 if (++q->credits >= (q->size / 4)) { in process_responses()
2420 refill_rspq(adap, q, q->credits); in process_responses()
2421 q->credits = 0; in process_responses()
2430 rx_eth(adap, q, skb, ethpad, lro); in process_responses()
2432 q->offload_pkts++; in process_responses()
2436 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2442 clear_rspq_bufstate(q); in process_responses()
2447 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2529 struct sge_rspq *q = &qs->rspq; in process_pure_responses() local
2536 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
2537 q->cidx = 0; in process_pure_responses()
2538 q->gen ^= 1; in process_pure_responses()
2539 r = q->desc; in process_pure_responses()
2548 q->pure_rsps++; in process_pure_responses()
2549 if (++q->credits >= (q->size / 4)) { in process_pure_responses()
2550 refill_rspq(adap, q, q->credits); in process_pure_responses()
2551 q->credits = 0; in process_pure_responses()
2553 if (!is_new_response(r, q)) in process_pure_responses()
2565 return is_new_response(r, q); in process_pure_responses()
2583 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) in handle_responses() argument
2585 struct sge_qset *qs = rspq_to_qset(q); in handle_responses()
2586 struct rsp_desc *r = &q->desc[q->cidx]; in handle_responses()
2588 if (!is_new_response(r, q)) in handle_responses()
2592 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2593 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); in handle_responses()
2608 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix() local
2610 spin_lock(&q->lock); in t3_sge_intr_msix()
2612 q->unhandled_irqs++; in t3_sge_intr_msix()
2613 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2614 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_sge_intr_msix()
2615 spin_unlock(&q->lock); in t3_sge_intr_msix()
2626 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi() local
2628 spin_lock(&q->lock); in t3_sge_intr_msix_napi()
2630 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2631 q->unhandled_irqs++; in t3_sge_intr_msix_napi()
2632 spin_unlock(&q->lock); in t3_sge_intr_msix_napi()
2646 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi() local
2648 spin_lock(&q->lock); in t3_intr_msi()
2651 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2652 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_intr_msi()
2667 q->unhandled_irqs++; in t3_intr_msi()
2669 spin_unlock(&q->lock); in t3_intr_msi()
2675 struct sge_rspq *q = &qs->rspq; in rspq_check_napi() local
2678 is_new_response(&q->desc[q->cidx], q)) { in rspq_check_napi()
2696 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi() local
2698 spin_lock(&q->lock); in t3_intr_msi_napi()
2704 q->unhandled_irqs++; in t3_intr_msi_napi()
2706 spin_unlock(&q->lock); in t3_intr_msi_napi()
3040 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset() local
3042 init_qset_cntxt(q, id); in t3_sge_alloc_qset()
3043 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); in t3_sge_alloc_qset()
3044 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); in t3_sge_alloc_qset()
3046 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, in t3_sge_alloc_qset()
3049 &q->fl[0].phys_addr, &q->fl[0].sdesc); in t3_sge_alloc_qset()
3050 if (!q->fl[0].desc) in t3_sge_alloc_qset()
3053 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, in t3_sge_alloc_qset()
3056 &q->fl[1].phys_addr, &q->fl[1].sdesc); in t3_sge_alloc_qset()
3057 if (!q->fl[1].desc) in t3_sge_alloc_qset()
3060 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, in t3_sge_alloc_qset()
3062 &q->rspq.phys_addr, NULL); in t3_sge_alloc_qset()
3063 if (!q->rspq.desc) in t3_sge_alloc_qset()
3073 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], in t3_sge_alloc_qset()
3075 &q->txq[i].phys_addr, in t3_sge_alloc_qset()
3076 &q->txq[i].sdesc); in t3_sge_alloc_qset()
3077 if (!q->txq[i].desc) in t3_sge_alloc_qset()
3080 q->txq[i].gen = 1; in t3_sge_alloc_qset()
3081 q->txq[i].size = p->txq_size[i]; in t3_sge_alloc_qset()
3082 spin_lock_init(&q->txq[i].lock); in t3_sge_alloc_qset()
3083 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
3086 INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq); in t3_sge_alloc_qset()
3087 INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq); in t3_sge_alloc_qset()
3089 q->fl[0].gen = q->fl[1].gen = 1; in t3_sge_alloc_qset()
3090 q->fl[0].size = p->fl_size; in t3_sge_alloc_qset()
3091 q->fl[1].size = p->jumbo_size; in t3_sge_alloc_qset()
3093 q->rspq.gen = 1; in t3_sge_alloc_qset()
3094 q->rspq.size = p->rspq_size; in t3_sge_alloc_qset()
3095 spin_lock_init(&q->rspq.lock); in t3_sge_alloc_qset()
3096 skb_queue_head_init(&q->rspq.rx_queue); in t3_sge_alloc_qset()
3098 q->txq[TXQ_ETH].stop_thres = nports * in t3_sge_alloc_qset()
3102 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3104 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); in t3_sge_alloc_qset()
3107 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3109 q->fl[1].buf_size = is_offload(adapter) ? in t3_sge_alloc_qset()
3114 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3115 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3116 q->fl[0].order = FL0_PG_ORDER; in t3_sge_alloc_qset()
3117 q->fl[1].order = FL1_PG_ORDER; in t3_sge_alloc_qset()
3118 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3119 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3124 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, in t3_sge_alloc_qset()
3125 q->rspq.phys_addr, q->rspq.size, in t3_sge_alloc_qset()
3126 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); in t3_sge_alloc_qset()
3131 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, in t3_sge_alloc_qset()
3132 q->fl[i].phys_addr, q->fl[i].size, in t3_sge_alloc_qset()
3133 q->fl[i].buf_size - SGE_PG_RSVD, in t3_sge_alloc_qset()
3139 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, in t3_sge_alloc_qset()
3140 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, in t3_sge_alloc_qset()
3141 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, in t3_sge_alloc_qset()
3147 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, in t3_sge_alloc_qset()
3149 q->txq[TXQ_OFLD].phys_addr, in t3_sge_alloc_qset()
3150 q->txq[TXQ_OFLD].size, 0, 1, 0); in t3_sge_alloc_qset()
3156 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, in t3_sge_alloc_qset()
3158 q->txq[TXQ_CTRL].phys_addr, in t3_sge_alloc_qset()
3159 q->txq[TXQ_CTRL].size, in t3_sge_alloc_qset()
3160 q->txq[TXQ_CTRL].token, 1, 0); in t3_sge_alloc_qset()
3167 q->adap = adapter; in t3_sge_alloc_qset()
3168 q->netdev = dev; in t3_sge_alloc_qset()
3169 q->tx_q = netdevq; in t3_sge_alloc_qset()
3170 t3_update_qset_coalesce(q, p); in t3_sge_alloc_qset()
3172 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, in t3_sge_alloc_qset()
3179 if (avail < q->fl[0].size) in t3_sge_alloc_qset()
3183 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, in t3_sge_alloc_qset()
3185 if (avail < q->fl[1].size) in t3_sge_alloc_qset()
3188 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); in t3_sge_alloc_qset()
3190 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | in t3_sge_alloc_qset()
3191 V_NEWTIMER(q->rspq.holdoff_tmr)); in t3_sge_alloc_qset()
3198 t3_free_qset(adapter, q); in t3_sge_alloc_qset()
3213 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers() local
3215 if (q->tx_reclaim_timer.function) in t3_start_sge_timers()
3216 mod_timer(&q->tx_reclaim_timer, in t3_start_sge_timers()
3219 if (q->rx_reclaim_timer.function) in t3_start_sge_timers()
3220 mod_timer(&q->rx_reclaim_timer, in t3_start_sge_timers()
3236 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers() local
3238 if (q->tx_reclaim_timer.function) in t3_stop_sge_timers()
3239 del_timer_sync(&q->tx_reclaim_timer); in t3_stop_sge_timers()
3240 if (q->rx_reclaim_timer.function) in t3_stop_sge_timers()
3241 del_timer_sync(&q->rx_reclaim_timer); in t3_stop_sge_timers()
3369 struct qset_params *q = p->qset + i; in t3_sge_prep() local
3371 q->polling = adap->params.rev > 0; in t3_sge_prep()
3372 q->coalesce_usecs = 5; in t3_sge_prep()
3373 q->rspq_size = 1024; in t3_sge_prep()
3374 q->fl_size = 1024; in t3_sge_prep()
3375 q->jumbo_size = 512; in t3_sge_prep()
3376 q->txq_size[TXQ_ETH] = 1024; in t3_sge_prep()
3377 q->txq_size[TXQ_OFLD] = 1024; in t3_sge_prep()
3378 q->txq_size[TXQ_CTRL] = 256; in t3_sge_prep()
3379 q->cong_thres = 0; in t3_sge_prep()