Lines Matching refs:pktdma

147 static int ksnav_rx_disable(struct pktdma_cfg *pktdma)  in ksnav_rx_disable()  argument
151 for (j = 0; j < pktdma->rx_ch_num; j++) { in ksnav_rx_disable()
152 v = readl(&pktdma->rx_ch[j].cfg_a); in ksnav_rx_disable()
156 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a); in ksnav_rx_disable()
159 v = readl(&pktdma->rx_ch[j].cfg_a); in ksnav_rx_disable()
167 for (j = 0; j < pktdma->rx_flow_num; j++) { in ksnav_rx_disable()
168 writel(0, &pktdma->rx_flows[j].control); in ksnav_rx_disable()
169 writel(0, &pktdma->rx_flows[j].tags); in ksnav_rx_disable()
170 writel(0, &pktdma->rx_flows[j].tag_sel); in ksnav_rx_disable()
171 writel(0, &pktdma->rx_flows[j].fdq_sel[0]); in ksnav_rx_disable()
172 writel(0, &pktdma->rx_flows[j].fdq_sel[1]); in ksnav_rx_disable()
173 writel(0, &pktdma->rx_flows[j].thresh[0]); in ksnav_rx_disable()
174 writel(0, &pktdma->rx_flows[j].thresh[1]); in ksnav_rx_disable()
175 writel(0, &pktdma->rx_flows[j].thresh[2]); in ksnav_rx_disable()
181 static int ksnav_tx_disable(struct pktdma_cfg *pktdma) in ksnav_tx_disable() argument
185 for (j = 0; j < pktdma->tx_ch_num; j++) { in ksnav_tx_disable()
186 v = readl(&pktdma->tx_ch[j].cfg_a); in ksnav_tx_disable()
190 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a); in ksnav_tx_disable()
193 v = readl(&pktdma->tx_ch[j].cfg_a); in ksnav_tx_disable()
203 int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers) in ksnav_init() argument
209 if (pktdma == NULL || rx_buffers == NULL || in ksnav_init()
213 pktdma->rx_flow = rx_buffers->rx_flow; in ksnav_init()
223 qm_buff_push(hd, pktdma->rx_free_q, in ksnav_init()
229 ksnav_rx_disable(pktdma); in ksnav_init()
232 v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q); in ksnav_init()
233 writel(v, &pktdma->rx_flows[pktdma->rx_flow].control); in ksnav_init()
234 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags); in ksnav_init()
235 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel); in ksnav_init()
237 v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0, in ksnav_init()
238 pktdma->rx_free_q); in ksnav_init()
240 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]); in ksnav_init()
241 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]); in ksnav_init()
242 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]); in ksnav_init()
243 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]); in ksnav_init()
244 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]); in ksnav_init()
246 for (j = 0; j < pktdma->rx_ch_num; j++) in ksnav_init()
247 writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a); in ksnav_init()
251 writel(0, &pktdma->global->emulation_control); in ksnav_init()
254 writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]); in ksnav_init()
257 for (j = 0; j < pktdma->tx_ch_num; j++) { in ksnav_init()
258 writel(0, &pktdma->tx_ch[j].cfg_b); in ksnav_init()
259 writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a); in ksnav_init()
265 int ksnav_close(struct pktdma_cfg *pktdma) in ksnav_close() argument
267 if (!pktdma) in ksnav_close()
270 ksnav_tx_disable(pktdma); in ksnav_close()
271 ksnav_rx_disable(pktdma); in ksnav_close()
273 queue_close(pktdma->rx_free_q); in ksnav_close()
274 queue_close(pktdma->rx_rcv_q); in ksnav_close()
275 queue_close(pktdma->tx_snd_q); in ksnav_close()
280 int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2) in ksnav_send() argument
292 qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes); in ksnav_send()
297 void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes) in ksnav_recv() argument
301 hd = qm_pop(pktdma->rx_rcv_q); in ksnav_recv()
311 void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd) in ksnav_release_rxhd() argument
318 qm_push(_hd, pktdma->rx_free_q); in ksnav_release_rxhd()