Lines Matching refs:oct
38 * @oct: Octeon device private data structure.
48 static int octep_alloc_ioq_vectors(struct octep_device *oct)
53 for (i = 0; i < oct->num_oqs; i++) {
54 oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
55 if (!oct->ioq_vector[i])
58 ioq_vector = oct->ioq_vector[i];
59 ioq_vector->iq = oct->iq[i];
60 ioq_vector->oq = oct->oq[i];
61 ioq_vector->octep_dev = oct;
64 dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
70 vfree(oct->ioq_vector[i]);
71 oct->ioq_vector[i] = NULL;
79 * @oct: Octeon device private data structure.
81 static void octep_free_ioq_vectors(struct octep_device *oct)
85 for (i = 0; i < oct->num_oqs; i++) {
86 if (oct->ioq_vector[i]) {
87 vfree(oct->ioq_vector[i]);
88 oct->ioq_vector[i] = NULL;
91 netdev_info(oct->netdev, "Freed IOQ Vectors\n");
97 * @oct: Octeon device private data structure.
105 static int octep_enable_msix_range(struct octep_device *oct)
111 num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
112 oct->msix_entries = kcalloc(num_msix,
114 if (!oct->msix_entries)
118 oct->msix_entries[i].entry = i;
120 msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
123 dev_err(&oct->pdev->dev,
128 oct->num_irqs = msix_allocated;
129 dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
135 pci_disable_msix(oct->pdev);
136 kfree(oct->msix_entries);
137 oct->msix_entries = NULL;
145 * @oct: Octeon device private data structure.
149 static void octep_disable_msix(struct octep_device *oct)
151 pci_disable_msix(oct->pdev);
152 kfree(oct->msix_entries);
153 oct->msix_entries = NULL;
154 dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
167 struct octep_device *oct = data;
169 return oct->hw_ops.non_ioq_intr_handler(oct);
184 struct octep_device *oct = ioq_vector->octep_dev;
186 return oct->hw_ops.ioq_intr_handler(ioq_vector);
192 * @oct: Octeon device private data structure.
199 static int octep_request_irqs(struct octep_device *oct)
201 struct net_device *netdev = oct->netdev;
208 num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
209 non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
211 oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
213 if (!oct->non_ioq_irq_names)
220 irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
221 msix_entry = &oct->msix_entries[i];
227 irq_name, oct);
237 for (j = 0; j < oct->num_oqs; j++) {
238 ioq_vector = oct->ioq_vector[j];
239 msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
263 ioq_vector = oct->ioq_vector[j];
264 msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
272 free_irq(oct->msix_entries[i].vector, oct);
274 kfree(oct->non_ioq_irq_names);
275 oct->non_ioq_irq_names = NULL;
283 * @oct: Octeon device private data structure.
287 static void octep_free_irqs(struct octep_device *oct)
292 for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
293 free_irq(oct->msix_entries[i].vector, oct);
294 kfree(oct->non_ioq_irq_names);
297 for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
298 irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
299 free_irq(oct->msix_entries[i].vector,
300 oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
302 netdev_info(oct->netdev, "IRQs freed\n");
308 * @oct: Octeon device private data structure.
316 static int octep_setup_irqs(struct octep_device *oct)
318 if (octep_alloc_ioq_vectors(oct))
321 if (octep_enable_msix_range(oct))
324 if (octep_request_irqs(oct))
330 octep_disable_msix(oct);
332 octep_free_ioq_vectors(oct);
340 * @oct: Octeon device private data structure.
342 static void octep_clean_irqs(struct octep_device *oct)
344 octep_free_irqs(oct);
345 octep_disable_msix(oct);
346 octep_free_ioq_vectors(oct);
405 * @oct: Octeon device private data structure.
407 static void octep_napi_add(struct octep_device *oct)
411 for (i = 0; i < oct->num_oqs; i++) {
412 netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
413 netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
415 oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
422 * @oct: Octeon device private data structure.
424 static void octep_napi_delete(struct octep_device *oct)
428 for (i = 0; i < oct->num_oqs; i++) {
429 netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
430 netif_napi_del(&oct->ioq_vector[i]->napi);
431 oct->oq[i]->napi = NULL;
438 * @oct: Octeon device private data structure.
440 static void octep_napi_enable(struct octep_device *oct)
444 for (i = 0; i < oct->num_oqs; i++) {
445 netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
446 napi_enable(&oct->ioq_vector[i]->napi);
453 * @oct: Octeon device private data structure.
455 static void octep_napi_disable(struct octep_device *oct)
459 for (i = 0; i < oct->num_oqs; i++) {
460 netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
461 napi_disable(&oct->ioq_vector[i]->napi);
484 struct octep_device *oct = netdev_priv(netdev);
490 oct->hw_ops.reset_io_queues(oct);
492 if (octep_setup_iqs(oct))
494 if (octep_setup_oqs(oct))
496 if (octep_setup_irqs(oct))
499 err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
502 err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
506 octep_napi_add(oct);
507 octep_napi_enable(oct);
509 oct->link_info.admin_up = 1;
510 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
512 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
514 oct->poll_non_ioq_intr = false;
517 oct->hw_ops.enable_io_queues(oct);
520 oct->hw_ops.enable_interrupts(oct);
522 octep_oq_dbell_init(oct);
524 ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID);
531 octep_clean_irqs(oct);
533 octep_free_oqs(oct);
535 octep_free_iqs(oct);
550 struct octep_device *oct = netdev_priv(netdev);
554 octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
556 octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
564 oct->link_info.admin_up = 0;
565 oct->link_info.oper_up = 0;
567 oct->hw_ops.disable_interrupts(oct);
568 octep_napi_disable(oct);
569 octep_napi_delete(oct);
571 octep_clean_irqs(oct);
572 octep_clean_iqs(oct);
574 oct->hw_ops.disable_io_queues(oct);
575 oct->hw_ops.reset_io_queues(oct);
576 octep_free_oqs(oct);
577 octep_free_iqs(oct);
579 oct->poll_non_ioq_intr = true;
580 queue_delayed_work(octep_wq, &oct->intr_poll_task,
629 struct octep_device *oct = netdev_priv(netdev);
641 if (q_no >= oct->num_iqs) {
643 q_no = q_no % oct->num_iqs;
646 iq = oct->iq[q_no];
664 ih->pkind = oct->pkind;
761 struct octep_device *oct = netdev_priv(netdev);
768 for (q = 0; q < oct->num_oqs; q++) {
769 struct octep_iq *iq = oct->iq[q];
770 struct octep_oq *oq = oct->oq[q];
794 struct octep_device *oct = container_of(work, struct octep_device,
796 struct net_device *netdev = oct->netdev;
816 struct octep_device *oct = netdev_priv(netdev);
818 queue_work(octep_wq, &oct->tx_timeout_task);
823 struct octep_device *oct = netdev_priv(netdev);
830 err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID,
835 memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
843 struct octep_device *oct = netdev_priv(netdev);
847 link_info = &oct->link_info;
851 err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu,
854 oct->link_info.mtu = new_mtu;
880 struct octep_device *oct = container_of(work, struct octep_device,
883 if (!oct->poll_non_ioq_intr) {
884 dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n");
888 oct->hw_ops.poll_non_ioq_interrupts(oct);
889 queue_delayed_work(octep_wq, &oct->intr_poll_task,
898 * Check for heartbeat miss count. Uninitialize oct device if miss count
904 struct octep_device *oct = container_of(work, struct octep_device,
909 miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
910 if (miss_cnt < oct->conf->max_hb_miss_cnt) {
911 queue_delayed_work(octep_wq, &oct->hb_task,
912 msecs_to_jiffies(oct->conf->hb_interval * 1000));
916 dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n",
919 if (netif_running(oct->netdev))
920 dev_close(oct->netdev);
933 struct octep_device *oct = container_of(work, struct octep_device,
936 octep_ctrl_net_recv_fw_messages(oct);
939 static const char *octep_devid_to_str(struct octep_device *oct)
941 switch (oct->chip_id) {
954 * @oct: Octeon device private data structure.
958 int octep_device_setup(struct octep_device *oct)
960 struct pci_dev *pdev = oct->pdev;
963 /* allocate memory for oct->conf */
964 oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
965 if (!oct->conf)
970 oct->mmio[i].hw_addr =
971 ioremap(pci_resource_start(oct->pdev, i * 2),
972 pci_resource_len(oct->pdev, i * 2));
973 if (!oct->mmio[i].hw_addr)
976 oct->mmio[i].mapped = 1;
979 oct->chip_id = pdev->device;
980 oct->rev_id = pdev->revision;
983 switch (oct->chip_id) {
987 octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct),
988 OCTEP_MINOR_REV(oct));
989 octep_device_setup_cn93_pf(oct);
997 oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
999 ret = octep_ctrl_net_init(oct);
1003 atomic_set(&oct->hb_miss_cnt, 0);
1004 INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
1005 queue_delayed_work(octep_wq, &oct->hb_task,
1006 msecs_to_jiffies(oct->conf->hb_interval * 1000));
1013 iounmap(oct->mmio[i].hw_addr);
1015 kfree(oct->conf);
1022 * @oct: Octeon device private data structure.
1026 static void octep_device_cleanup(struct octep_device *oct)
1030 oct->poll_non_ioq_intr = false;
1031 cancel_delayed_work_sync(&oct->intr_poll_task);
1032 cancel_work_sync(&oct->ctrl_mbox_task);
1034 dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
1037 vfree(oct->mbox[i]);
1038 oct->mbox[i] = NULL;
1041 octep_ctrl_net_uninit(oct);
1042 cancel_delayed_work_sync(&oct->hb_task);
1044 oct->hw_ops.soft_reset(oct);
1046 if (oct->mmio[i].mapped)
1047 iounmap(oct->mmio[i].hw_addr);
1050 kfree(oct->conf);
1051 oct->conf = NULL;
1191 struct octep_device *oct = pci_get_drvdata(pdev);
1194 if (!oct)
1197 netdev = oct->netdev;
1201 cancel_work_sync(&oct->tx_timeout_task);
1202 octep_device_cleanup(oct);