Lines Matching refs:p
157 static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) in octeon_mgmt_set_rx_irq() argument
162 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_rx_irq()
163 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); in octeon_mgmt_set_rx_irq()
165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_set_rx_irq()
166 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_rx_irq()
169 static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) in octeon_mgmt_set_tx_irq() argument
174 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_tx_irq()
175 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); in octeon_mgmt_set_tx_irq()
177 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_set_tx_irq()
178 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_tx_irq()
181 static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) in octeon_mgmt_enable_rx_irq() argument
183 octeon_mgmt_set_rx_irq(p, 1); in octeon_mgmt_enable_rx_irq()
186 static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) in octeon_mgmt_disable_rx_irq() argument
188 octeon_mgmt_set_rx_irq(p, 0); in octeon_mgmt_disable_rx_irq()
191 static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) in octeon_mgmt_enable_tx_irq() argument
193 octeon_mgmt_set_tx_irq(p, 1); in octeon_mgmt_enable_tx_irq()
196 static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) in octeon_mgmt_disable_tx_irq() argument
198 octeon_mgmt_set_tx_irq(p, 0); in octeon_mgmt_disable_tx_irq()
213 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_rx_fill_ring() local
215 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { in octeon_mgmt_rx_fill_ring()
227 __skb_queue_tail(&p->rx_list, skb); in octeon_mgmt_rx_fill_ring()
231 re.s.addr = dma_map_single(p->dev, skb->data, in octeon_mgmt_rx_fill_ring()
236 p->rx_ring[p->rx_next_fill] = re.d64; in octeon_mgmt_rx_fill_ring()
242 dma_sync_single_for_device(p->dev, p->rx_ring_handle, in octeon_mgmt_rx_fill_ring()
245 p->rx_next_fill = in octeon_mgmt_rx_fill_ring()
246 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; in octeon_mgmt_rx_fill_ring()
247 p->rx_current_fill++; in octeon_mgmt_rx_fill_ring()
249 cvmx_write_csr(p->mix + MIX_IRING2, 1); in octeon_mgmt_rx_fill_ring()
253 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) in octeon_mgmt_clean_tx_buffers() argument
261 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
263 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
265 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
268 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
272 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, in octeon_mgmt_clean_tx_buffers()
276 re.d64 = p->tx_ring[p->tx_next_clean]; in octeon_mgmt_clean_tx_buffers()
277 p->tx_next_clean = in octeon_mgmt_clean_tx_buffers()
278 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; in octeon_mgmt_clean_tx_buffers()
279 skb = __skb_dequeue(&p->tx_list); in octeon_mgmt_clean_tx_buffers()
285 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); in octeon_mgmt_clean_tx_buffers()
286 p->tx_current_fill--; in octeon_mgmt_clean_tx_buffers()
288 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
290 dma_unmap_single(p->dev, re.s.addr, re.s.len, in octeon_mgmt_clean_tx_buffers()
300 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); in octeon_mgmt_clean_tx_buffers()
302 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); in octeon_mgmt_clean_tx_buffers()
311 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
314 if (cleaned && netif_queue_stopped(p->netdev)) in octeon_mgmt_clean_tx_buffers()
315 netif_wake_queue(p->netdev); in octeon_mgmt_clean_tx_buffers()
320 struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet); in octeon_mgmt_clean_tx_tasklet() local
321 octeon_mgmt_clean_tx_buffers(p); in octeon_mgmt_clean_tx_tasklet()
322 octeon_mgmt_enable_tx_irq(p); in octeon_mgmt_clean_tx_tasklet()
327 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_update_rx_stats() local
332 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); in octeon_mgmt_update_rx_stats()
333 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); in octeon_mgmt_update_rx_stats()
337 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_update_rx_stats()
340 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_update_rx_stats()
346 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_update_tx_stats() local
353 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); in octeon_mgmt_update_tx_stats()
354 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); in octeon_mgmt_update_tx_stats()
358 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_update_tx_stats()
361 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_update_tx_stats()
369 static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, in octeon_mgmt_dequeue_rx_buffer() argument
374 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, in octeon_mgmt_dequeue_rx_buffer()
378 re.d64 = p->rx_ring[p->rx_next]; in octeon_mgmt_dequeue_rx_buffer()
379 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; in octeon_mgmt_dequeue_rx_buffer()
380 p->rx_current_fill--; in octeon_mgmt_dequeue_rx_buffer()
381 *pskb = __skb_dequeue(&p->rx_list); in octeon_mgmt_dequeue_rx_buffer()
383 dma_unmap_single(p->dev, re.s.addr, in octeon_mgmt_dequeue_rx_buffer()
391 static int octeon_mgmt_receive_one(struct octeon_mgmt *p) in octeon_mgmt_receive_one() argument
393 struct net_device *netdev = p->netdev; in octeon_mgmt_receive_one()
403 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); in octeon_mgmt_receive_one()
409 if (p->has_rx_tstamp) { in octeon_mgmt_receive_one()
432 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); in octeon_mgmt_receive_one()
463 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); in octeon_mgmt_receive_one()
472 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); in octeon_mgmt_receive_one()
476 static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) in octeon_mgmt_receive_packets() argument
482 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); in octeon_mgmt_receive_packets()
485 rc = octeon_mgmt_receive_one(p); in octeon_mgmt_receive_packets()
490 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); in octeon_mgmt_receive_packets()
493 octeon_mgmt_rx_fill_ring(p->netdev); in octeon_mgmt_receive_packets()
500 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); in octeon_mgmt_napi_poll() local
501 struct net_device *netdev = p->netdev; in octeon_mgmt_napi_poll()
504 work_done = octeon_mgmt_receive_packets(p, budget); in octeon_mgmt_napi_poll()
509 octeon_mgmt_enable_rx_irq(p); in octeon_mgmt_napi_poll()
517 static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) in octeon_mgmt_reset_hw() argument
524 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_reset_hw()
526 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_reset_hw()
529 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_reset_hw()
530 cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_reset_hw()
533 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); in octeon_mgmt_reset_hw()
535 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", in octeon_mgmt_reset_hw()
540 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", in octeon_mgmt_reset_hw()
563 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_set_rx_filtering() local
605 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_rx_filtering()
608 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_set_rx_filtering()
611 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); in octeon_mgmt_set_rx_filtering()
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); in octeon_mgmt_set_rx_filtering()
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); in octeon_mgmt_set_rx_filtering()
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); in octeon_mgmt_set_rx_filtering()
622 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); in octeon_mgmt_set_rx_filtering()
623 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); in octeon_mgmt_set_rx_filtering()
624 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); in octeon_mgmt_set_rx_filtering()
625 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); in octeon_mgmt_set_rx_filtering()
626 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); in octeon_mgmt_set_rx_filtering()
630 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); in octeon_mgmt_set_rx_filtering()
632 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_rx_filtering()
649 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_change_mtu() local
657 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); in octeon_mgmt_change_mtu()
663 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, in octeon_mgmt_change_mtu()
672 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_interrupt() local
675 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); in octeon_mgmt_interrupt()
678 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); in octeon_mgmt_interrupt()
679 cvmx_read_csr(p->mix + MIX_ISR); in octeon_mgmt_interrupt()
682 octeon_mgmt_disable_rx_irq(p); in octeon_mgmt_interrupt()
683 napi_schedule(&p->napi); in octeon_mgmt_interrupt()
686 octeon_mgmt_disable_tx_irq(p); in octeon_mgmt_interrupt()
687 tasklet_schedule(&p->tx_clean_tasklet); in octeon_mgmt_interrupt()
696 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_ioctl_hwtstamp() local
749 p->has_rx_tstamp = false; in octeon_mgmt_ioctl_hwtstamp()
750 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); in octeon_mgmt_ioctl_hwtstamp()
752 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_ioctl_hwtstamp()
769 p->has_rx_tstamp = have_hw_timestamps; in octeon_mgmt_ioctl_hwtstamp()
771 if (p->has_rx_tstamp) { in octeon_mgmt_ioctl_hwtstamp()
772 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); in octeon_mgmt_ioctl_hwtstamp()
774 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_ioctl_hwtstamp()
798 static void octeon_mgmt_disable_link(struct octeon_mgmt *p) in octeon_mgmt_disable_link() argument
803 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_disable_link()
807 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_disable_link()
812 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_disable_link()
821 static void octeon_mgmt_enable_link(struct octeon_mgmt *p) in octeon_mgmt_enable_link() argument
826 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_enable_link()
830 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_enable_link()
833 static void octeon_mgmt_update_link(struct octeon_mgmt *p) in octeon_mgmt_update_link() argument
835 struct net_device *ndev = p->netdev; in octeon_mgmt_update_link()
839 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_update_link()
881 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_update_link()
884 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_update_link()
890 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_update_link()
891 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); in octeon_mgmt_update_link()
900 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); in octeon_mgmt_update_link()
906 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_adjust_link() local
914 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_adjust_link()
917 if (!phydev->link && p->last_link) in octeon_mgmt_adjust_link()
921 (p->last_duplex != phydev->duplex || in octeon_mgmt_adjust_link()
922 p->last_link != phydev->link || in octeon_mgmt_adjust_link()
923 p->last_speed != phydev->speed)) { in octeon_mgmt_adjust_link()
924 octeon_mgmt_disable_link(p); in octeon_mgmt_adjust_link()
926 octeon_mgmt_update_link(p); in octeon_mgmt_adjust_link()
927 octeon_mgmt_enable_link(p); in octeon_mgmt_adjust_link()
930 p->last_link = phydev->link; in octeon_mgmt_adjust_link()
931 p->last_speed = phydev->speed; in octeon_mgmt_adjust_link()
932 p->last_duplex = phydev->duplex; in octeon_mgmt_adjust_link()
934 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_adjust_link()
947 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_init_phy() local
950 if (octeon_is_simulation() || p->phy_np == NULL) { in octeon_mgmt_init_phy()
956 phydev = of_phy_connect(netdev, p->phy_np, in octeon_mgmt_init_phy()
968 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_open() local
980 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), in octeon_mgmt_open()
982 if (!p->tx_ring) in octeon_mgmt_open()
984 p->tx_ring_handle = in octeon_mgmt_open()
985 dma_map_single(p->dev, p->tx_ring, in octeon_mgmt_open()
988 p->tx_next = 0; in octeon_mgmt_open()
989 p->tx_next_clean = 0; in octeon_mgmt_open()
990 p->tx_current_fill = 0; in octeon_mgmt_open()
993 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), in octeon_mgmt_open()
995 if (!p->rx_ring) in octeon_mgmt_open()
997 p->rx_ring_handle = in octeon_mgmt_open()
998 dma_map_single(p->dev, p->rx_ring, in octeon_mgmt_open()
1002 p->rx_next = 0; in octeon_mgmt_open()
1003 p->rx_next_fill = 0; in octeon_mgmt_open()
1004 p->rx_current_fill = 0; in octeon_mgmt_open()
1006 octeon_mgmt_reset_hw(p); in octeon_mgmt_open()
1008 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_open()
1013 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_open()
1015 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_open()
1032 if (p->port) { in octeon_mgmt_open()
1045 oring1.s.obase = p->tx_ring_handle >> 3; in octeon_mgmt_open()
1047 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); in octeon_mgmt_open()
1050 iring1.s.ibase = p->rx_ring_handle >> 3; in octeon_mgmt_open()
1052 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); in octeon_mgmt_open()
1071 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_open()
1075 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); in octeon_mgmt_open()
1088 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1090 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1099 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1105 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1106 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ in octeon_mgmt_open()
1114 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1116 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1119 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1124 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1126 cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1144 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); in octeon_mgmt_open()
1145 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); in octeon_mgmt_open()
1146 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); in octeon_mgmt_open()
1148 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); in octeon_mgmt_open()
1149 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); in octeon_mgmt_open()
1150 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); in octeon_mgmt_open()
1153 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); in octeon_mgmt_open()
1155 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, in octeon_mgmt_open()
1157 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); in octeon_mgmt_open()
1164 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); in octeon_mgmt_open()
1169 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); in octeon_mgmt_open()
1175 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_open()
1180 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; in octeon_mgmt_open()
1204 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_open()
1207 octeon_mgmt_disable_link(p); in octeon_mgmt_open()
1209 octeon_mgmt_update_link(p); in octeon_mgmt_open()
1210 octeon_mgmt_enable_link(p); in octeon_mgmt_open()
1212 p->last_link = 0; in octeon_mgmt_open()
1213 p->last_speed = 0; in octeon_mgmt_open()
1223 napi_enable(&p->napi); in octeon_mgmt_open()
1227 octeon_mgmt_reset_hw(p); in octeon_mgmt_open()
1228 dma_unmap_single(p->dev, p->rx_ring_handle, in octeon_mgmt_open()
1231 kfree(p->rx_ring); in octeon_mgmt_open()
1233 dma_unmap_single(p->dev, p->tx_ring_handle, in octeon_mgmt_open()
1236 kfree(p->tx_ring); in octeon_mgmt_open()
1242 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_stop() local
1244 napi_disable(&p->napi); in octeon_mgmt_stop()
1254 octeon_mgmt_reset_hw(p); in octeon_mgmt_stop()
1256 free_irq(p->irq, netdev); in octeon_mgmt_stop()
1259 skb_queue_purge(&p->tx_list); in octeon_mgmt_stop()
1260 skb_queue_purge(&p->rx_list); in octeon_mgmt_stop()
1262 dma_unmap_single(p->dev, p->rx_ring_handle, in octeon_mgmt_stop()
1265 kfree(p->rx_ring); in octeon_mgmt_stop()
1267 dma_unmap_single(p->dev, p->tx_ring_handle, in octeon_mgmt_stop()
1270 kfree(p->tx_ring); in octeon_mgmt_stop()
1278 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_xmit() local
1286 re.s.addr = dma_map_single(p->dev, skb->data, in octeon_mgmt_xmit()
1290 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1292 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { in octeon_mgmt_xmit()
1293 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1295 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1298 if (unlikely(p->tx_current_fill >= in octeon_mgmt_xmit()
1300 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1301 dma_unmap_single(p->dev, re.s.addr, re.s.len, in octeon_mgmt_xmit()
1306 __skb_queue_tail(&p->tx_list, skb); in octeon_mgmt_xmit()
1309 p->tx_ring[p->tx_next] = re.d64; in octeon_mgmt_xmit()
1310 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; in octeon_mgmt_xmit()
1311 p->tx_current_fill++; in octeon_mgmt_xmit()
1313 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1315 dma_sync_single_for_device(p->dev, p->tx_ring_handle, in octeon_mgmt_xmit()
1323 cvmx_write_csr(p->mix + MIX_ORING2, 1); in octeon_mgmt_xmit()
1335 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_poll_controller() local
1337 octeon_mgmt_receive_packets(p, 16); in octeon_mgmt_poll_controller()
1383 struct octeon_mgmt *p; in octeon_mgmt_probe() local
1398 p = netdev_priv(netdev); in octeon_mgmt_probe()
1399 netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll, in octeon_mgmt_probe()
1402 p->netdev = netdev; in octeon_mgmt_probe()
1403 p->dev = &pdev->dev; in octeon_mgmt_probe()
1404 p->has_rx_tstamp = false; in octeon_mgmt_probe()
1408 p->port = be32_to_cpup(data); in octeon_mgmt_probe()
1415 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); in octeon_mgmt_probe()
1421 p->irq = result; in octeon_mgmt_probe()
1444 p->mix_phys = res_mix->start; in octeon_mgmt_probe()
1445 p->mix_size = resource_size(res_mix); in octeon_mgmt_probe()
1446 p->agl_phys = res_agl->start; in octeon_mgmt_probe()
1447 p->agl_size = resource_size(res_agl); in octeon_mgmt_probe()
1448 p->agl_prt_ctl_phys = res_agl_prt_ctl->start; in octeon_mgmt_probe()
1449 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); in octeon_mgmt_probe()
1452 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, in octeon_mgmt_probe()
1460 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, in octeon_mgmt_probe()
1468 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, in octeon_mgmt_probe()
1469 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { in octeon_mgmt_probe()
1476 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); in octeon_mgmt_probe()
1477 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); in octeon_mgmt_probe()
1478 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, in octeon_mgmt_probe()
1479 p->agl_prt_ctl_size); in octeon_mgmt_probe()
1480 if (!p->mix || !p->agl || !p->agl_prt_ctl) { in octeon_mgmt_probe()
1486 spin_lock_init(&p->lock); in octeon_mgmt_probe()
1488 skb_queue_head_init(&p->tx_list); in octeon_mgmt_probe()
1489 skb_queue_head_init(&p->rx_list); in octeon_mgmt_probe()
1490 tasklet_setup(&p->tx_clean_tasklet, in octeon_mgmt_probe()
1505 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in octeon_mgmt_probe()
1519 of_node_put(p->phy_np); in octeon_mgmt_probe()
1527 struct octeon_mgmt *p = netdev_priv(netdev); in octeon_mgmt_remove() local
1530 of_node_put(p->phy_np); in octeon_mgmt_remove()