Lines Matching refs:q

474 		struct cmdQ *q = &sge->cmdQ[0];  in sched_skb()  local
475 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in sched_skb()
476 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { in sched_skb()
477 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in sched_skb()
499 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) in free_freelQ_buffers() argument
501 unsigned int cidx = q->cidx; in free_freelQ_buffers()
503 while (q->credits--) { in free_freelQ_buffers()
504 struct freelQ_ce *ce = &q->centries[cidx]; in free_freelQ_buffers()
510 if (++cidx == q->size) in free_freelQ_buffers()
530 struct freelQ *q = &sge->freelQ[i]; in free_rx_resources() local
532 if (q->centries) { in free_rx_resources()
533 free_freelQ_buffers(pdev, q); in free_rx_resources()
534 kfree(q->centries); in free_rx_resources()
536 if (q->entries) { in free_rx_resources()
537 size = sizeof(struct freelQ_e) * q->size; in free_rx_resources()
538 dma_free_coherent(&pdev->dev, size, q->entries, in free_rx_resources()
539 q->dma_addr); in free_rx_resources()
554 struct freelQ *q = &sge->freelQ[i]; in alloc_rx_resources() local
556 q->genbit = 1; in alloc_rx_resources()
557 q->size = p->freelQ_size[i]; in alloc_rx_resources()
558 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; in alloc_rx_resources()
559 size = sizeof(struct freelQ_e) * q->size; in alloc_rx_resources()
560 q->entries = dma_alloc_coherent(&pdev->dev, size, in alloc_rx_resources()
561 &q->dma_addr, GFP_KERNEL); in alloc_rx_resources()
562 if (!q->entries) in alloc_rx_resources()
565 size = sizeof(struct freelQ_ce) * q->size; in alloc_rx_resources()
566 q->centries = kzalloc(size, GFP_KERNEL); in alloc_rx_resources()
567 if (!q->centries) in alloc_rx_resources()
612 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) in free_cmdQ_buffers() argument
616 unsigned int cidx = q->cidx; in free_cmdQ_buffers()
618 q->in_use -= n; in free_cmdQ_buffers()
619 ce = &q->centries[cidx]; in free_cmdQ_buffers()
626 if (q->sop) in free_cmdQ_buffers()
627 q->sop = 0; in free_cmdQ_buffers()
631 q->sop = 1; in free_cmdQ_buffers()
634 if (++cidx == q->size) { in free_cmdQ_buffers()
636 ce = q->centries; in free_cmdQ_buffers()
639 q->cidx = cidx; in free_cmdQ_buffers()
653 struct cmdQ *q = &sge->cmdQ[i]; in free_tx_resources() local
655 if (q->centries) { in free_tx_resources()
656 if (q->in_use) in free_tx_resources()
657 free_cmdQ_buffers(sge, q, q->in_use); in free_tx_resources()
658 kfree(q->centries); in free_tx_resources()
660 if (q->entries) { in free_tx_resources()
661 size = sizeof(struct cmdQ_e) * q->size; in free_tx_resources()
662 dma_free_coherent(&pdev->dev, size, q->entries, in free_tx_resources()
663 q->dma_addr); in free_tx_resources()
677 struct cmdQ *q = &sge->cmdQ[i]; in alloc_tx_resources() local
679 q->genbit = 1; in alloc_tx_resources()
680 q->sop = 1; in alloc_tx_resources()
681 q->size = p->cmdQ_size[i]; in alloc_tx_resources()
682 q->in_use = 0; in alloc_tx_resources()
683 q->status = 0; in alloc_tx_resources()
684 q->processed = q->cleaned = 0; in alloc_tx_resources()
685 q->stop_thres = 0; in alloc_tx_resources()
686 spin_lock_init(&q->lock); in alloc_tx_resources()
687 size = sizeof(struct cmdQ_e) * q->size; in alloc_tx_resources()
688 q->entries = dma_alloc_coherent(&pdev->dev, size, in alloc_tx_resources()
689 &q->dma_addr, GFP_KERNEL); in alloc_tx_resources()
690 if (!q->entries) in alloc_tx_resources()
693 size = sizeof(struct cmdQ_ce) * q->size; in alloc_tx_resources()
694 q->centries = kzalloc(size, GFP_KERNEL); in alloc_tx_resources()
695 if (!q->centries) in alloc_tx_resources()
821 static void refill_free_list(struct sge *sge, struct freelQ *q) in refill_free_list() argument
824 struct freelQ_ce *ce = &q->centries[q->pidx]; in refill_free_list()
825 struct freelQ_e *e = &q->entries[q->pidx]; in refill_free_list()
826 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; in refill_free_list()
828 while (q->credits < q->size) { in refill_free_list()
832 skb = dev_alloc_skb(q->rx_buffer_size); in refill_free_list()
836 skb_reserve(skb, q->dma_offset); in refill_free_list()
846 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); in refill_free_list()
848 e->gen2 = V_CMD_GEN2(q->genbit); in refill_free_list()
852 if (++q->pidx == q->size) { in refill_free_list()
853 q->pidx = 0; in refill_free_list()
854 q->genbit ^= 1; in refill_free_list()
855 ce = q->centries; in refill_free_list()
856 e = q->entries; in refill_free_list()
858 q->credits++; in refill_free_list()
1167 struct cmdQ *q) in write_large_page_tx_descs() argument
1183 if (++pidx == q->size) { in write_large_page_tx_descs()
1186 ce1 = q->centries; in write_large_page_tx_descs()
1187 e1 = q->entries; in write_large_page_tx_descs()
1203 struct cmdQ *q) in write_tx_descs() argument
1211 e = e1 = &q->entries[pidx]; in write_tx_descs()
1212 ce = &q->centries[pidx]; in write_tx_descs()
1237 if (++pidx == q->size) { in write_tx_descs()
1240 e1 = q->entries; in write_tx_descs()
1241 ce = q->centries; in write_tx_descs()
1245 nfrags, q); in write_tx_descs()
1260 if (++pidx == q->size) { in write_tx_descs()
1263 e1 = q->entries; in write_tx_descs()
1264 ce = q->centries; in write_tx_descs()
1274 nfrags, q); in write_tx_descs()
1290 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) in reclaim_completed_tx() argument
1292 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
1296 q->processed, q->cleaned); in reclaim_completed_tx()
1297 free_cmdQ_buffers(sge, q, reclaim); in reclaim_completed_tx()
1298 q->cleaned += reclaim; in reclaim_completed_tx()
1311 struct cmdQ *q = &sge->cmdQ[0]; in restart_sched() local
1315 spin_lock(&q->lock); in restart_sched()
1316 reclaim_completed_tx(sge, q); in restart_sched()
1318 credits = q->size - q->in_use; in restart_sched()
1324 q->in_use += count; in restart_sched()
1325 genbit = q->genbit; in restart_sched()
1326 pidx = q->pidx; in restart_sched()
1327 q->pidx += count; in restart_sched()
1328 if (q->pidx >= q->size) { in restart_sched()
1329 q->pidx -= q->size; in restart_sched()
1330 q->genbit ^= 1; in restart_sched()
1332 write_tx_descs(adapter, skb, pidx, genbit, q); in restart_sched()
1333 credits = q->size - q->in_use; in restart_sched()
1338 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in restart_sched()
1339 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { in restart_sched()
1340 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in restart_sched()
1344 spin_unlock(&q->lock); in restart_sched()
1399 static inline int enough_free_Tx_descs(const struct cmdQ *q) in enough_free_Tx_descs() argument
1401 unsigned int r = q->processed - q->cleaned; in enough_free_Tx_descs()
1403 return q->in_use - r < (q->size >> 1); in enough_free_Tx_descs()
1472 struct respQ *q = &sge->respQ; in process_responses() local
1473 struct respQ_e *e = &q->entries[q->cidx]; in process_responses()
1478 while (done < budget && e->GenerationBit == q->genbit) { in process_responses()
1524 if (unlikely(++q->cidx == q->size)) { in process_responses()
1525 q->cidx = 0; in process_responses()
1526 q->genbit ^= 1; in process_responses()
1527 e = q->entries; in process_responses()
1531 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { in process_responses()
1532 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); in process_responses()
1533 q->credits = 0; in process_responses()
1562 struct respQ *q = &sge->respQ; in process_pure_responses() local
1563 struct respQ_e *e = &q->entries[q->cidx]; in process_pure_responses()
1579 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
1580 q->cidx = 0; in process_pure_responses()
1581 q->genbit ^= 1; in process_pure_responses()
1582 e = q->entries; in process_pure_responses()
1586 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { in process_pure_responses()
1587 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); in process_pure_responses()
1588 q->credits = 0; in process_pure_responses()
1591 } while (e->GenerationBit == q->genbit && !e->DataValid); in process_pure_responses()
1596 return e->GenerationBit == q->genbit; in process_pure_responses()
1701 struct cmdQ *q = &sge->cmdQ[qid]; in t1_sge_tx() local
1704 spin_lock(&q->lock); in t1_sge_tx()
1706 reclaim_completed_tx(sge, q); in t1_sge_tx()
1708 pidx = q->pidx; in t1_sge_tx()
1709 credits = q->size - q->in_use; in t1_sge_tx()
1722 spin_unlock(&q->lock); in t1_sge_tx()
1726 if (unlikely(credits - count < q->stop_thres)) { in t1_sge_tx()
1743 spin_unlock(&q->lock); in t1_sge_tx()
1746 pidx = q->pidx; in t1_sge_tx()
1751 q->in_use += count; in t1_sge_tx()
1752 genbit = q->genbit; in t1_sge_tx()
1753 pidx = q->pidx; in t1_sge_tx()
1754 q->pidx += count; in t1_sge_tx()
1755 if (q->pidx >= q->size) { in t1_sge_tx()
1756 q->pidx -= q->size; in t1_sge_tx()
1757 q->genbit ^= 1; in t1_sge_tx()
1759 spin_unlock(&q->lock); in t1_sge_tx()
1761 write_tx_descs(adapter, skb, pidx, genbit, q); in t1_sge_tx()
1773 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in t1_sge_tx()
1774 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { in t1_sge_tx()
1775 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in t1_sge_tx()
1781 if (spin_trylock(&q->lock)) { in t1_sge_tx()
1782 credits = q->size - q->in_use; in t1_sge_tx()
1928 struct cmdQ *q = &sge->cmdQ[i]; in sge_tx_reclaim_cb() local
1930 if (!spin_trylock(&q->lock)) in sge_tx_reclaim_cb()
1933 reclaim_completed_tx(sge, q); in sge_tx_reclaim_cb()
1934 if (i == 0 && q->in_use) { /* flush pending credits */ in sge_tx_reclaim_cb()
1937 spin_unlock(&q->lock); in sge_tx_reclaim_cb()