Lines Matching refs:comm

19 	struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);  in spl2sw_rx_poll()  local
36 rx_pos = comm->rx_pos[queue]; in spl2sw_rx_poll()
37 rx_count = comm->rx_desc_num[queue]; in spl2sw_rx_poll()
40 sinfo = comm->rx_skb_info[queue] + rx_pos; in spl2sw_rx_poll()
41 desc = comm->rx_desc[queue] + rx_pos; in spl2sw_rx_poll()
48 if (port < MAX_NETDEV_NUM && comm->ndev[port]) in spl2sw_rx_poll()
49 stats = &comm->ndev[port]->stats; in spl2sw_rx_poll()
60 dma_unmap_single(&comm->pdev->dev, sinfo->mapping, in spl2sw_rx_poll()
61 comm->rx_desc_buff_size, DMA_FROM_DEVICE); in spl2sw_rx_poll()
66 skb->protocol = eth_type_trans(skb, comm->ndev[port]); in spl2sw_rx_poll()
74 new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size); in spl2sw_rx_poll()
76 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? in spl2sw_rx_poll()
84 sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data, in spl2sw_rx_poll()
85 comm->rx_desc_buff_size, in spl2sw_rx_poll()
87 if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) { in spl2sw_rx_poll()
89 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? in spl2sw_rx_poll()
101 desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ? in spl2sw_rx_poll()
102 RXD_EOR | comm->rx_desc_buff_size : in spl2sw_rx_poll()
103 comm->rx_desc_buff_size; in spl2sw_rx_poll()
110 rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1; in spl2sw_rx_poll()
121 comm->rx_pos[queue] = rx_pos; in spl2sw_rx_poll()
125 h_desc = comm->rx_desc[queue] + rx_pos; in spl2sw_rx_poll()
128 spin_lock_irqsave(&comm->int_mask_lock, flags); in spl2sw_rx_poll()
129 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_rx_poll()
131 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_rx_poll()
132 spin_unlock_irqrestore(&comm->int_mask_lock, flags); in spl2sw_rx_poll()
140 struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi); in spl2sw_tx_poll() local
150 spin_lock(&comm->tx_lock); in spl2sw_tx_poll()
152 tx_done_pos = comm->tx_done_pos; in spl2sw_tx_poll()
153 while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) { in spl2sw_tx_poll()
154 cmd = comm->tx_desc[tx_done_pos].cmd1; in spl2sw_tx_poll()
158 skbinfo = &comm->tx_temp_skb_info[tx_done_pos]; in spl2sw_tx_poll()
163 if (i < MAX_NETDEV_NUM && comm->ndev[i]) in spl2sw_tx_poll()
164 stats = &comm->ndev[i]->stats; in spl2sw_tx_poll()
176 dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len, in spl2sw_tx_poll()
186 if (comm->tx_desc_full == 1) in spl2sw_tx_poll()
187 comm->tx_desc_full = 0; in spl2sw_tx_poll()
192 comm->tx_done_pos = tx_done_pos; in spl2sw_tx_poll()
193 if (!comm->tx_desc_full) in spl2sw_tx_poll()
195 if (comm->ndev[i]) in spl2sw_tx_poll()
196 if (netif_queue_stopped(comm->ndev[i])) in spl2sw_tx_poll()
197 netif_wake_queue(comm->ndev[i]); in spl2sw_tx_poll()
199 spin_unlock(&comm->tx_lock); in spl2sw_tx_poll()
201 spin_lock_irqsave(&comm->int_mask_lock, flags); in spl2sw_tx_poll()
202 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_tx_poll()
204 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_tx_poll()
205 spin_unlock_irqrestore(&comm->int_mask_lock, flags); in spl2sw_tx_poll()
213 struct spl2sw_common *comm = (struct spl2sw_common *)dev_id; in spl2sw_ethernet_interrupt() local
218 status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); in spl2sw_ethernet_interrupt()
220 dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n"); in spl2sw_ethernet_interrupt()
223 writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0); in spl2sw_ethernet_interrupt()
227 spin_lock(&comm->int_mask_lock); in spl2sw_ethernet_interrupt()
228 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_ethernet_interrupt()
230 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_ethernet_interrupt()
231 spin_unlock(&comm->int_mask_lock); in spl2sw_ethernet_interrupt()
235 if (comm->ndev[i]) { in spl2sw_ethernet_interrupt()
236 comm->ndev[i]->stats.rx_fifo_errors++; in spl2sw_ethernet_interrupt()
239 dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n"); in spl2sw_ethernet_interrupt()
242 napi_schedule(&comm->rx_napi); in spl2sw_ethernet_interrupt()
247 spin_lock(&comm->int_mask_lock); in spl2sw_ethernet_interrupt()
248 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_ethernet_interrupt()
250 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_ethernet_interrupt()
251 spin_unlock(&comm->int_mask_lock); in spl2sw_ethernet_interrupt()
255 if (comm->ndev[i]) { in spl2sw_ethernet_interrupt()
256 comm->ndev[i]->stats.tx_fifo_errors++; in spl2sw_ethernet_interrupt()
259 dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n"); in spl2sw_ethernet_interrupt()
261 spin_lock(&comm->int_mask_lock); in spl2sw_ethernet_interrupt()
262 mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_ethernet_interrupt()
264 writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0); in spl2sw_ethernet_interrupt()
265 spin_unlock(&comm->int_mask_lock); in spl2sw_ethernet_interrupt()
267 napi_schedule(&comm->tx_napi); in spl2sw_ethernet_interrupt()