1f21fb3edSRaghu Vatsavayi /**********************************************************************
2f21fb3edSRaghu Vatsavayi * Author: Cavium, Inc.
3f21fb3edSRaghu Vatsavayi *
4f21fb3edSRaghu Vatsavayi * Contact: support@cavium.com
5f21fb3edSRaghu Vatsavayi * Please include "LiquidIO" in the subject.
6f21fb3edSRaghu Vatsavayi *
750579d3dSRaghu Vatsavayi * Copyright (c) 2003-2016 Cavium, Inc.
8f21fb3edSRaghu Vatsavayi *
9f21fb3edSRaghu Vatsavayi * This file is free software; you can redistribute it and/or modify
10f21fb3edSRaghu Vatsavayi * it under the terms of the GNU General Public License, Version 2, as
11f21fb3edSRaghu Vatsavayi * published by the Free Software Foundation.
12f21fb3edSRaghu Vatsavayi *
13f21fb3edSRaghu Vatsavayi * This file is distributed in the hope that it will be useful, but
14f21fb3edSRaghu Vatsavayi * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15f21fb3edSRaghu Vatsavayi * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16f21fb3edSRaghu Vatsavayi * NONINFRINGEMENT. See the GNU General Public License for more
17f21fb3edSRaghu Vatsavayi * details.
18f21fb3edSRaghu Vatsavayi **********************************************************************/
19f21fb3edSRaghu Vatsavayi
20f21fb3edSRaghu Vatsavayi /*! \file octeon_network.h
21f21fb3edSRaghu Vatsavayi * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
22f21fb3edSRaghu Vatsavayi */
23f21fb3edSRaghu Vatsavayi
24f21fb3edSRaghu Vatsavayi #ifndef __OCTEON_NETWORK_H__
25f21fb3edSRaghu Vatsavayi #define __OCTEON_NETWORK_H__
26f21fb3edSRaghu Vatsavayi #include <linux/ptp_clock_kernel.h>
27f21fb3edSRaghu Vatsavayi
284c2743f9SRaghu Vatsavayi #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29109cc165SJarod Wilson #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
304c2743f9SRaghu Vatsavayi
311f697ab1SSatanand Burla /* Bit mask values for lio->ifstate */
321f697ab1SSatanand Burla #define LIO_IFSTATE_DROQ_OPS 0x01
331f697ab1SSatanand Burla #define LIO_IFSTATE_REGISTERED 0x02
341f697ab1SSatanand Burla #define LIO_IFSTATE_RUNNING 0x04
351f697ab1SSatanand Burla #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36d18ca7dfSIntiyaz Basha #define LIO_IFSTATE_RESETTING 0x10
371f697ab1SSatanand Burla
3887a7c4b3SVeerasenareddy Burru struct liquidio_if_cfg_resp {
3987a7c4b3SVeerasenareddy Burru u64 rh;
4087a7c4b3SVeerasenareddy Burru struct liquidio_if_cfg_info cfg_info;
4187a7c4b3SVeerasenareddy Burru u64 status;
4287a7c4b3SVeerasenareddy Burru };
4387a7c4b3SVeerasenareddy Burru
44c33c9973SIntiyaz Basha #define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
4535878618SPradeep Nalla #define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
46c33c9973SIntiyaz Basha
47a72b2c8cSIntiyaz Basha /* Structure of a node in list of gather components maintained by
48a72b2c8cSIntiyaz Basha * NIC driver for each network device.
49a72b2c8cSIntiyaz Basha */
50a72b2c8cSIntiyaz Basha struct octnic_gather {
51a72b2c8cSIntiyaz Basha /* List manipulation. Next and prev pointers. */
52a72b2c8cSIntiyaz Basha struct list_head list;
53a72b2c8cSIntiyaz Basha
54a72b2c8cSIntiyaz Basha /* Size of the gather component at sg in bytes. */
55a72b2c8cSIntiyaz Basha int sg_size;
56a72b2c8cSIntiyaz Basha
57a72b2c8cSIntiyaz Basha /* Number of bytes that sg was adjusted to make it 8B-aligned. */
58a72b2c8cSIntiyaz Basha int adjust;
59a72b2c8cSIntiyaz Basha
60a72b2c8cSIntiyaz Basha /* Gather component that can accommodate max sized fragment list
61a72b2c8cSIntiyaz Basha * received from the IP layer.
62a72b2c8cSIntiyaz Basha */
63a72b2c8cSIntiyaz Basha struct octeon_sg_entry *sg;
64a72b2c8cSIntiyaz Basha
65a72b2c8cSIntiyaz Basha dma_addr_t sg_dma_ptr;
66a72b2c8cSIntiyaz Basha };
67a72b2c8cSIntiyaz Basha
681f164717SRaghu Vatsavayi struct oct_nic_stats_resp {
691f164717SRaghu Vatsavayi u64 rh;
701f164717SRaghu Vatsavayi struct oct_link_stats stats;
711f164717SRaghu Vatsavayi u64 status;
721f164717SRaghu Vatsavayi };
731f164717SRaghu Vatsavayi
7448875222SWeilin Chang struct oct_nic_vf_stats_resp {
7548875222SWeilin Chang u64 rh;
7648875222SWeilin Chang u64 spoofmac_cnt;
7748875222SWeilin Chang u64 status;
7848875222SWeilin Chang };
7948875222SWeilin Chang
801f164717SRaghu Vatsavayi struct oct_nic_stats_ctrl {
811f164717SRaghu Vatsavayi struct completion complete;
821f164717SRaghu Vatsavayi struct net_device *netdev;
831f164717SRaghu Vatsavayi };
841f164717SRaghu Vatsavayi
8518b338f5SWeilin Chang struct oct_nic_seapi_resp {
8618b338f5SWeilin Chang u64 rh;
8775b2c206SWeilin Chang union {
8875b2c206SWeilin Chang u32 fec_setting;
8918b338f5SWeilin Chang u32 speed;
9075b2c206SWeilin Chang };
9118b338f5SWeilin Chang u64 status;
9218b338f5SWeilin Chang };
9318b338f5SWeilin Chang
94f21fb3edSRaghu Vatsavayi /** LiquidIO per-interface network private data */
95f21fb3edSRaghu Vatsavayi struct lio {
96f21fb3edSRaghu Vatsavayi /** State of the interface. Rx/Tx happens only in the RUNNING state. */
97f21fb3edSRaghu Vatsavayi atomic_t ifstate;
98f21fb3edSRaghu Vatsavayi
99f21fb3edSRaghu Vatsavayi /** Octeon Interface index number. This device will be represented as
100f21fb3edSRaghu Vatsavayi * oct<ifidx> in the system.
101f21fb3edSRaghu Vatsavayi */
102f21fb3edSRaghu Vatsavayi int ifidx;
103f21fb3edSRaghu Vatsavayi
104f21fb3edSRaghu Vatsavayi /** Octeon Input queue to use to transmit for this network interface. */
105f21fb3edSRaghu Vatsavayi int txq;
106f21fb3edSRaghu Vatsavayi
107f21fb3edSRaghu Vatsavayi /** Octeon Output queue from which pkts arrive
108f21fb3edSRaghu Vatsavayi * for this network interface.
109f21fb3edSRaghu Vatsavayi */
110f21fb3edSRaghu Vatsavayi int rxq;
111f21fb3edSRaghu Vatsavayi
112fcd2b5e3SRaghu Vatsavayi /** Guards each glist */
113fcd2b5e3SRaghu Vatsavayi spinlock_t *glist_lock;
114f21fb3edSRaghu Vatsavayi
115fcd2b5e3SRaghu Vatsavayi /** Array of gather component linked lists */
116fcd2b5e3SRaghu Vatsavayi struct list_head *glist;
11767e303e0SVSR Burru void **glists_virt_base;
11867e303e0SVSR Burru dma_addr_t *glists_dma_base;
11967e303e0SVSR Burru u32 glist_entry_size;
120f21fb3edSRaghu Vatsavayi
121f21fb3edSRaghu Vatsavayi /** Pointer to the NIC properties for the Octeon device this network
122f21fb3edSRaghu Vatsavayi * interface is associated with.
123f21fb3edSRaghu Vatsavayi */
124f21fb3edSRaghu Vatsavayi struct octdev_props *octprops;
125f21fb3edSRaghu Vatsavayi
126f21fb3edSRaghu Vatsavayi /** Pointer to the octeon device structure. */
127f21fb3edSRaghu Vatsavayi struct octeon_device *oct_dev;
128f21fb3edSRaghu Vatsavayi
129f21fb3edSRaghu Vatsavayi struct net_device *netdev;
130f21fb3edSRaghu Vatsavayi
131f21fb3edSRaghu Vatsavayi /** Link information sent by the core application for this interface. */
132f21fb3edSRaghu Vatsavayi struct oct_link_info linfo;
133f21fb3edSRaghu Vatsavayi
1340cece6c5SRaghu Vatsavayi /** counter of link changes */
1350cece6c5SRaghu Vatsavayi u64 link_changes;
1360cece6c5SRaghu Vatsavayi
137f21fb3edSRaghu Vatsavayi /** Size of Tx queue for this octeon device. */
138f21fb3edSRaghu Vatsavayi u32 tx_qsize;
139f21fb3edSRaghu Vatsavayi
140f21fb3edSRaghu Vatsavayi /** Size of Rx queue for this octeon device. */
141f21fb3edSRaghu Vatsavayi u32 rx_qsize;
142f21fb3edSRaghu Vatsavayi
143f21fb3edSRaghu Vatsavayi /** Size of MTU this octeon device. */
144f21fb3edSRaghu Vatsavayi u32 mtu;
145f21fb3edSRaghu Vatsavayi
146f21fb3edSRaghu Vatsavayi /** msg level flag per interface. */
147f21fb3edSRaghu Vatsavayi u32 msg_enable;
148f21fb3edSRaghu Vatsavayi
149f21fb3edSRaghu Vatsavayi /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
150f21fb3edSRaghu Vatsavayi u64 dev_capability;
151f21fb3edSRaghu Vatsavayi
15201fb237aSRaghu Vatsavayi /* Copy of transmit encapsulation capabilities:
15301fb237aSRaghu Vatsavayi * TSO, TSO6, Checksums for this device for Kernel
15401fb237aSRaghu Vatsavayi * 3.10.0 onwards
15501fb237aSRaghu Vatsavayi */
15601fb237aSRaghu Vatsavayi u64 enc_dev_capability;
15701fb237aSRaghu Vatsavayi
158f21fb3edSRaghu Vatsavayi /** Copy of beacaon reg in phy */
159f21fb3edSRaghu Vatsavayi u32 phy_beacon_val;
160f21fb3edSRaghu Vatsavayi
161f21fb3edSRaghu Vatsavayi /** Copy of ctrl reg in phy */
162f21fb3edSRaghu Vatsavayi u32 led_ctrl_val;
163f21fb3edSRaghu Vatsavayi
164f21fb3edSRaghu Vatsavayi /* PTP clock information */
165f21fb3edSRaghu Vatsavayi struct ptp_clock_info ptp_info;
166f21fb3edSRaghu Vatsavayi struct ptp_clock *ptp_clock;
167f21fb3edSRaghu Vatsavayi s64 ptp_adjust;
168f21fb3edSRaghu Vatsavayi
169f21fb3edSRaghu Vatsavayi /* for atomic access to Octeon PTP reg and data struct */
170f21fb3edSRaghu Vatsavayi spinlock_t ptp_lock;
171f21fb3edSRaghu Vatsavayi
172f21fb3edSRaghu Vatsavayi /* Interface info */
173f21fb3edSRaghu Vatsavayi u32 intf_open;
174f21fb3edSRaghu Vatsavayi
175f21fb3edSRaghu Vatsavayi /* work queue for txq status */
176f21fb3edSRaghu Vatsavayi struct cavium_wq txq_status_wq;
1777b6b6c95SRaghu Vatsavayi
178031d4f12SSatanand Burla /* work queue for rxq oom status */
1794b6e326bSIntiyaz Basha struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
180031d4f12SSatanand Burla
1817b6b6c95SRaghu Vatsavayi /* work queue for link status */
1827b6b6c95SRaghu Vatsavayi struct cavium_wq link_status_wq;
1837b6b6c95SRaghu Vatsavayi
184907aaa6bSVeerasenareddy Burru /* work queue to regularly send local time to octeon firmware */
185907aaa6bSVeerasenareddy Burru struct cavium_wq sync_octeon_time_wq;
186907aaa6bSVeerasenareddy Burru
18750f7f94bSRaghu Vatsavayi int netdev_uc_count;
18835878618SPradeep Nalla struct cavium_wk stats_wk;
189f21fb3edSRaghu Vatsavayi };
190f21fb3edSRaghu Vatsavayi
191f21fb3edSRaghu Vatsavayi #define LIO_SIZE (sizeof(struct lio))
192f21fb3edSRaghu Vatsavayi #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
193f21fb3edSRaghu Vatsavayi
194f6047576SFelix Manlunas #define LIO_MAX_CORES 16
1959ff1a9baSRaghu Vatsavayi
196f21fb3edSRaghu Vatsavayi /**
197f21fb3edSRaghu Vatsavayi * \brief Enable or disable feature
198f21fb3edSRaghu Vatsavayi * @param netdev pointer to network device
199f21fb3edSRaghu Vatsavayi * @param cmd Command that just requires acknowledgment
2000cece6c5SRaghu Vatsavayi * @param param1 Parameter to command
201f21fb3edSRaghu Vatsavayi */
2020cece6c5SRaghu Vatsavayi int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
203f21fb3edSRaghu Vatsavayi
204031d4f12SSatanand Burla int setup_rx_oom_poll_fn(struct net_device *netdev);
205031d4f12SSatanand Burla
206031d4f12SSatanand Burla void cleanup_rx_oom_poll_fn(struct net_device *netdev);
207031d4f12SSatanand Burla
208f21fb3edSRaghu Vatsavayi /**
209f21fb3edSRaghu Vatsavayi * \brief Link control command completion callback
210f21fb3edSRaghu Vatsavayi * @param nctrl_ptr pointer to control packet structure
211f21fb3edSRaghu Vatsavayi *
212f21fb3edSRaghu Vatsavayi * This routine is called by the callback function when a ctrl pkt sent to
213f21fb3edSRaghu Vatsavayi * core app completes. The nctrl_ptr contains a copy of the command type
214f21fb3edSRaghu Vatsavayi * and data sent to the core app. This routine is only called if the ctrl
215f21fb3edSRaghu Vatsavayi * pkt was sent successfully to the core app.
216f21fb3edSRaghu Vatsavayi */
217f21fb3edSRaghu Vatsavayi void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
218f21fb3edSRaghu Vatsavayi
219a82457f1SIntiyaz Basha int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
220a82457f1SIntiyaz Basha u32 num_iqs, u32 num_oqs);
2218974de1bSIntiyaz Basha
2221ff39268SIntiyaz Basha irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
2231ff39268SIntiyaz Basha void *dev);
2241ff39268SIntiyaz Basha
225a82457f1SIntiyaz Basha int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
22614aec73aSIntiyaz Basha
22735878618SPradeep Nalla void lio_fetch_stats(struct work_struct *work);
228f058ca6bSPradeep Nalla
229ccdd0b4cSRaghu Vatsavayi int lio_wait_for_clean_oq(struct octeon_device *oct);
230f21fb3edSRaghu Vatsavayi /**
231f21fb3edSRaghu Vatsavayi * \brief Register ethtool operations
232f21fb3edSRaghu Vatsavayi * @param netdev pointer to network device
233f21fb3edSRaghu Vatsavayi */
234f21fb3edSRaghu Vatsavayi void liquidio_set_ethtool_ops(struct net_device *netdev);
235f21fb3edSRaghu Vatsavayi
236fd311f1eSIntiyaz Basha void lio_delete_glists(struct lio *lio);
237fd311f1eSIntiyaz Basha
238128ea394SIntiyaz Basha int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
239128ea394SIntiyaz Basha
24018b338f5SWeilin Chang int liquidio_get_speed(struct lio *lio);
24118b338f5SWeilin Chang int liquidio_set_speed(struct lio *lio, int speed);
24275b2c206SWeilin Chang int liquidio_get_fec(struct lio *lio);
24375b2c206SWeilin Chang int liquidio_set_fec(struct lio *lio, int on_off);
24418b338f5SWeilin Chang
24587a7c4b3SVeerasenareddy Burru /**
24687a7c4b3SVeerasenareddy Burru * \brief Net device change_mtu
24787a7c4b3SVeerasenareddy Burru * @param netdev network device
24887a7c4b3SVeerasenareddy Burru */
24987a7c4b3SVeerasenareddy Burru int liquidio_change_mtu(struct net_device *netdev, int new_mtu);
25087a7c4b3SVeerasenareddy Burru #define LIO_CHANGE_MTU_SUCCESS 1
25187a7c4b3SVeerasenareddy Burru #define LIO_CHANGE_MTU_FAIL 2
25287a7c4b3SVeerasenareddy Burru
253f21fb3edSRaghu Vatsavayi #define SKB_ADJ_MASK 0x3F
254f21fb3edSRaghu Vatsavayi #define SKB_ADJ (SKB_ADJ_MASK + 1)
255f21fb3edSRaghu Vatsavayi
256cabeb13bSRaghu Vatsavayi #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
257cabeb13bSRaghu Vatsavayi #define LIO_RXBUFFER_SZ 2048
258cabeb13bSRaghu Vatsavayi
259cabeb13bSRaghu Vatsavayi static inline void
recv_buffer_alloc(struct octeon_device * oct,struct octeon_skb_page_info * pg_info)260cabeb13bSRaghu Vatsavayi *recv_buffer_alloc(struct octeon_device *oct,
261cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *pg_info)
262cabeb13bSRaghu Vatsavayi {
263cabeb13bSRaghu Vatsavayi struct page *page;
264cabeb13bSRaghu Vatsavayi struct sk_buff *skb;
265cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *skb_pg_info;
266cabeb13bSRaghu Vatsavayi
267453f85d4SMel Gorman page = alloc_page(GFP_ATOMIC);
268cabeb13bSRaghu Vatsavayi if (unlikely(!page))
269cabeb13bSRaghu Vatsavayi return NULL;
270cabeb13bSRaghu Vatsavayi
271cabeb13bSRaghu Vatsavayi skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
272cabeb13bSRaghu Vatsavayi if (unlikely(!skb)) {
273cabeb13bSRaghu Vatsavayi __free_page(page);
274cabeb13bSRaghu Vatsavayi pg_info->page = NULL;
275cabeb13bSRaghu Vatsavayi return NULL;
276cabeb13bSRaghu Vatsavayi }
277f21fb3edSRaghu Vatsavayi
278f21fb3edSRaghu Vatsavayi if ((unsigned long)skb->data & SKB_ADJ_MASK) {
279f21fb3edSRaghu Vatsavayi u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
280f21fb3edSRaghu Vatsavayi
281f21fb3edSRaghu Vatsavayi skb_reserve(skb, r);
282f21fb3edSRaghu Vatsavayi }
283f21fb3edSRaghu Vatsavayi
284cabeb13bSRaghu Vatsavayi skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
285cabeb13bSRaghu Vatsavayi /* Get DMA info */
286cabeb13bSRaghu Vatsavayi pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
287cabeb13bSRaghu Vatsavayi PAGE_SIZE, DMA_FROM_DEVICE);
288cabeb13bSRaghu Vatsavayi
289cabeb13bSRaghu Vatsavayi /* Mapping failed!! */
290cabeb13bSRaghu Vatsavayi if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
291cabeb13bSRaghu Vatsavayi __free_page(page);
292cabeb13bSRaghu Vatsavayi dev_kfree_skb_any((struct sk_buff *)skb);
293cabeb13bSRaghu Vatsavayi pg_info->page = NULL;
294cabeb13bSRaghu Vatsavayi return NULL;
295cabeb13bSRaghu Vatsavayi }
296cabeb13bSRaghu Vatsavayi
297cabeb13bSRaghu Vatsavayi pg_info->page = page;
298cabeb13bSRaghu Vatsavayi pg_info->page_offset = 0;
299cabeb13bSRaghu Vatsavayi skb_pg_info->page = page;
300cabeb13bSRaghu Vatsavayi skb_pg_info->page_offset = 0;
301cabeb13bSRaghu Vatsavayi skb_pg_info->dma = pg_info->dma;
302cabeb13bSRaghu Vatsavayi
303f21fb3edSRaghu Vatsavayi return (void *)skb;
304f21fb3edSRaghu Vatsavayi }
305f21fb3edSRaghu Vatsavayi
306cabeb13bSRaghu Vatsavayi static inline void
recv_buffer_fast_alloc(u32 size)307cabeb13bSRaghu Vatsavayi *recv_buffer_fast_alloc(u32 size)
308cabeb13bSRaghu Vatsavayi {
309cabeb13bSRaghu Vatsavayi struct sk_buff *skb;
310cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *skb_pg_info;
311cabeb13bSRaghu Vatsavayi
312cabeb13bSRaghu Vatsavayi skb = dev_alloc_skb(size + SKB_ADJ);
313cabeb13bSRaghu Vatsavayi if (unlikely(!skb))
314cabeb13bSRaghu Vatsavayi return NULL;
315cabeb13bSRaghu Vatsavayi
316cabeb13bSRaghu Vatsavayi if ((unsigned long)skb->data & SKB_ADJ_MASK) {
317cabeb13bSRaghu Vatsavayi u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
318cabeb13bSRaghu Vatsavayi
319cabeb13bSRaghu Vatsavayi skb_reserve(skb, r);
320cabeb13bSRaghu Vatsavayi }
321cabeb13bSRaghu Vatsavayi
322cabeb13bSRaghu Vatsavayi skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
323cabeb13bSRaghu Vatsavayi skb_pg_info->page = NULL;
324cabeb13bSRaghu Vatsavayi skb_pg_info->page_offset = 0;
325cabeb13bSRaghu Vatsavayi skb_pg_info->dma = 0;
326cabeb13bSRaghu Vatsavayi
327cabeb13bSRaghu Vatsavayi return skb;
328cabeb13bSRaghu Vatsavayi }
329cabeb13bSRaghu Vatsavayi
330cabeb13bSRaghu Vatsavayi static inline int
recv_buffer_recycle(struct octeon_device * oct,void * buf)331cabeb13bSRaghu Vatsavayi recv_buffer_recycle(struct octeon_device *oct, void *buf)
332cabeb13bSRaghu Vatsavayi {
333cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *pg_info = buf;
334cabeb13bSRaghu Vatsavayi
335cabeb13bSRaghu Vatsavayi if (!pg_info->page) {
336cabeb13bSRaghu Vatsavayi dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
337cabeb13bSRaghu Vatsavayi __func__);
338cabeb13bSRaghu Vatsavayi return -ENOMEM;
339cabeb13bSRaghu Vatsavayi }
340cabeb13bSRaghu Vatsavayi
341cabeb13bSRaghu Vatsavayi if (unlikely(page_count(pg_info->page) != 1) ||
342cabeb13bSRaghu Vatsavayi unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
343cabeb13bSRaghu Vatsavayi dma_unmap_page(&oct->pci_dev->dev,
344cabeb13bSRaghu Vatsavayi pg_info->dma, (PAGE_SIZE << 0),
345cabeb13bSRaghu Vatsavayi DMA_FROM_DEVICE);
346cabeb13bSRaghu Vatsavayi pg_info->dma = 0;
347cabeb13bSRaghu Vatsavayi pg_info->page = NULL;
348cabeb13bSRaghu Vatsavayi pg_info->page_offset = 0;
349cabeb13bSRaghu Vatsavayi return -ENOMEM;
350cabeb13bSRaghu Vatsavayi }
351cabeb13bSRaghu Vatsavayi
352cabeb13bSRaghu Vatsavayi /* Flip to other half of the buffer */
353cabeb13bSRaghu Vatsavayi if (pg_info->page_offset == 0)
354cabeb13bSRaghu Vatsavayi pg_info->page_offset = LIO_RXBUFFER_SZ;
355cabeb13bSRaghu Vatsavayi else
356cabeb13bSRaghu Vatsavayi pg_info->page_offset = 0;
357cabeb13bSRaghu Vatsavayi page_ref_inc(pg_info->page);
358cabeb13bSRaghu Vatsavayi
359cabeb13bSRaghu Vatsavayi return 0;
360cabeb13bSRaghu Vatsavayi }
361cabeb13bSRaghu Vatsavayi
362cabeb13bSRaghu Vatsavayi static inline void
recv_buffer_reuse(struct octeon_device * oct,void * buf)363cabeb13bSRaghu Vatsavayi *recv_buffer_reuse(struct octeon_device *oct, void *buf)
364cabeb13bSRaghu Vatsavayi {
365cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
366cabeb13bSRaghu Vatsavayi struct sk_buff *skb;
367cabeb13bSRaghu Vatsavayi
368cabeb13bSRaghu Vatsavayi skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
369cabeb13bSRaghu Vatsavayi if (unlikely(!skb)) {
370cabeb13bSRaghu Vatsavayi dma_unmap_page(&oct->pci_dev->dev,
371cabeb13bSRaghu Vatsavayi pg_info->dma, (PAGE_SIZE << 0),
372cabeb13bSRaghu Vatsavayi DMA_FROM_DEVICE);
373cabeb13bSRaghu Vatsavayi return NULL;
374cabeb13bSRaghu Vatsavayi }
375cabeb13bSRaghu Vatsavayi
376cabeb13bSRaghu Vatsavayi if ((unsigned long)skb->data & SKB_ADJ_MASK) {
377cabeb13bSRaghu Vatsavayi u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
378cabeb13bSRaghu Vatsavayi
379cabeb13bSRaghu Vatsavayi skb_reserve(skb, r);
380cabeb13bSRaghu Vatsavayi }
381cabeb13bSRaghu Vatsavayi
382cabeb13bSRaghu Vatsavayi skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
383cabeb13bSRaghu Vatsavayi skb_pg_info->page = pg_info->page;
384cabeb13bSRaghu Vatsavayi skb_pg_info->page_offset = pg_info->page_offset;
385cabeb13bSRaghu Vatsavayi skb_pg_info->dma = pg_info->dma;
386cabeb13bSRaghu Vatsavayi
387cabeb13bSRaghu Vatsavayi return skb;
388cabeb13bSRaghu Vatsavayi }
389cabeb13bSRaghu Vatsavayi
390cabeb13bSRaghu Vatsavayi static inline void
recv_buffer_destroy(void * buffer,struct octeon_skb_page_info * pg_info)391cabeb13bSRaghu Vatsavayi recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
392cabeb13bSRaghu Vatsavayi {
393cabeb13bSRaghu Vatsavayi struct sk_buff *skb = (struct sk_buff *)buffer;
394cabeb13bSRaghu Vatsavayi
395cabeb13bSRaghu Vatsavayi put_page(pg_info->page);
396cabeb13bSRaghu Vatsavayi pg_info->dma = 0;
397cabeb13bSRaghu Vatsavayi pg_info->page = NULL;
398cabeb13bSRaghu Vatsavayi pg_info->page_offset = 0;
399cabeb13bSRaghu Vatsavayi
400cabeb13bSRaghu Vatsavayi if (skb)
401cabeb13bSRaghu Vatsavayi dev_kfree_skb_any(skb);
402cabeb13bSRaghu Vatsavayi }
403cabeb13bSRaghu Vatsavayi
recv_buffer_free(void * buffer)404f21fb3edSRaghu Vatsavayi static inline void recv_buffer_free(void *buffer)
405f21fb3edSRaghu Vatsavayi {
406cabeb13bSRaghu Vatsavayi struct sk_buff *skb = (struct sk_buff *)buffer;
407cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *pg_info;
408cabeb13bSRaghu Vatsavayi
409cabeb13bSRaghu Vatsavayi pg_info = ((struct octeon_skb_page_info *)(skb->cb));
410cabeb13bSRaghu Vatsavayi
411cabeb13bSRaghu Vatsavayi if (pg_info->page) {
412cabeb13bSRaghu Vatsavayi put_page(pg_info->page);
413cabeb13bSRaghu Vatsavayi pg_info->dma = 0;
414cabeb13bSRaghu Vatsavayi pg_info->page = NULL;
415cabeb13bSRaghu Vatsavayi pg_info->page_offset = 0;
416cabeb13bSRaghu Vatsavayi }
417cabeb13bSRaghu Vatsavayi
418cabeb13bSRaghu Vatsavayi dev_kfree_skb_any((struct sk_buff *)buffer);
419cabeb13bSRaghu Vatsavayi }
420cabeb13bSRaghu Vatsavayi
421cabeb13bSRaghu Vatsavayi static inline void
recv_buffer_fast_free(void * buffer)422cabeb13bSRaghu Vatsavayi recv_buffer_fast_free(void *buffer)
423cabeb13bSRaghu Vatsavayi {
424cabeb13bSRaghu Vatsavayi dev_kfree_skb_any((struct sk_buff *)buffer);
425cabeb13bSRaghu Vatsavayi }
426cabeb13bSRaghu Vatsavayi
tx_buffer_free(void * buffer)427cabeb13bSRaghu Vatsavayi static inline void tx_buffer_free(void *buffer)
428cabeb13bSRaghu Vatsavayi {
429f21fb3edSRaghu Vatsavayi dev_kfree_skb_any((struct sk_buff *)buffer);
430f21fb3edSRaghu Vatsavayi }
431f21fb3edSRaghu Vatsavayi
432f21fb3edSRaghu Vatsavayi #define lio_dma_alloc(oct, size, dma_addr) \
43397a25326SRaghu Vatsavayi dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
434f21fb3edSRaghu Vatsavayi #define lio_dma_free(oct, size, virt_addr, dma_addr) \
43597a25326SRaghu Vatsavayi dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
436f21fb3edSRaghu Vatsavayi
437cabeb13bSRaghu Vatsavayi static inline
get_rbd(struct sk_buff * skb)438cabeb13bSRaghu Vatsavayi void *get_rbd(struct sk_buff *skb)
439cabeb13bSRaghu Vatsavayi {
440cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *pg_info;
441cabeb13bSRaghu Vatsavayi unsigned char *va;
442cabeb13bSRaghu Vatsavayi
443cabeb13bSRaghu Vatsavayi pg_info = ((struct octeon_skb_page_info *)(skb->cb));
444cabeb13bSRaghu Vatsavayi va = page_address(pg_info->page) + pg_info->page_offset;
445cabeb13bSRaghu Vatsavayi
446cabeb13bSRaghu Vatsavayi return va;
447cabeb13bSRaghu Vatsavayi }
448f21fb3edSRaghu Vatsavayi
449f21fb3edSRaghu Vatsavayi static inline u64
lio_map_ring(void * buf)450cabeb13bSRaghu Vatsavayi lio_map_ring(void *buf)
451f21fb3edSRaghu Vatsavayi {
452f21fb3edSRaghu Vatsavayi dma_addr_t dma_addr;
453f21fb3edSRaghu Vatsavayi
454cabeb13bSRaghu Vatsavayi struct sk_buff *skb = (struct sk_buff *)buf;
455cabeb13bSRaghu Vatsavayi struct octeon_skb_page_info *pg_info;
456f21fb3edSRaghu Vatsavayi
457cabeb13bSRaghu Vatsavayi pg_info = ((struct octeon_skb_page_info *)(skb->cb));
458cabeb13bSRaghu Vatsavayi if (!pg_info->page) {
459cabeb13bSRaghu Vatsavayi pr_err("%s: pg_info->page NULL\n", __func__);
460cabeb13bSRaghu Vatsavayi WARN_ON(1);
461cabeb13bSRaghu Vatsavayi }
462cabeb13bSRaghu Vatsavayi
463cabeb13bSRaghu Vatsavayi /* Get DMA info */
464cabeb13bSRaghu Vatsavayi dma_addr = pg_info->dma;
465cabeb13bSRaghu Vatsavayi if (!pg_info->dma) {
466cabeb13bSRaghu Vatsavayi pr_err("%s: ERROR it should be already available\n",
467cabeb13bSRaghu Vatsavayi __func__);
468cabeb13bSRaghu Vatsavayi WARN_ON(1);
469cabeb13bSRaghu Vatsavayi }
470cabeb13bSRaghu Vatsavayi dma_addr += pg_info->page_offset;
471f21fb3edSRaghu Vatsavayi
472f21fb3edSRaghu Vatsavayi return (u64)dma_addr;
473f21fb3edSRaghu Vatsavayi }
474f21fb3edSRaghu Vatsavayi
475f21fb3edSRaghu Vatsavayi static inline void
lio_unmap_ring(struct pci_dev * pci_dev,u64 buf_ptr)476f21fb3edSRaghu Vatsavayi lio_unmap_ring(struct pci_dev *pci_dev,
477cabeb13bSRaghu Vatsavayi u64 buf_ptr)
478cabeb13bSRaghu Vatsavayi
479f21fb3edSRaghu Vatsavayi {
480cabeb13bSRaghu Vatsavayi dma_unmap_page(&pci_dev->dev,
481cabeb13bSRaghu Vatsavayi buf_ptr, (PAGE_SIZE << 0),
482f21fb3edSRaghu Vatsavayi DMA_FROM_DEVICE);
483f21fb3edSRaghu Vatsavayi }
484f21fb3edSRaghu Vatsavayi
octeon_fast_packet_alloc(u32 size)485cabeb13bSRaghu Vatsavayi static inline void *octeon_fast_packet_alloc(u32 size)
486f21fb3edSRaghu Vatsavayi {
487cabeb13bSRaghu Vatsavayi return recv_buffer_fast_alloc(size);
488f21fb3edSRaghu Vatsavayi }
489f21fb3edSRaghu Vatsavayi
octeon_fast_packet_next(struct octeon_droq * droq,struct sk_buff * nicbuf,int copy_len,int idx)490f21fb3edSRaghu Vatsavayi static inline void octeon_fast_packet_next(struct octeon_droq *droq,
491f21fb3edSRaghu Vatsavayi struct sk_buff *nicbuf,
492f21fb3edSRaghu Vatsavayi int copy_len,
493f21fb3edSRaghu Vatsavayi int idx)
494f21fb3edSRaghu Vatsavayi {
49559ae1d12SJohannes Berg skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
49659ae1d12SJohannes Berg copy_len);
497f21fb3edSRaghu Vatsavayi }
498f21fb3edSRaghu Vatsavayi
4991f697ab1SSatanand Burla /**
5001f697ab1SSatanand Burla * \brief check interface state
5011f697ab1SSatanand Burla * @param lio per-network private data
5021f697ab1SSatanand Burla * @param state_flag flag state to check
5031f697ab1SSatanand Burla */
ifstate_check(struct lio * lio,int state_flag)5041f697ab1SSatanand Burla static inline int ifstate_check(struct lio *lio, int state_flag)
5051f697ab1SSatanand Burla {
5061f697ab1SSatanand Burla return atomic_read(&lio->ifstate) & state_flag;
5071f697ab1SSatanand Burla }
5081f697ab1SSatanand Burla
5091f697ab1SSatanand Burla /**
5101f697ab1SSatanand Burla * \brief set interface state
5111f697ab1SSatanand Burla * @param lio per-network private data
5121f697ab1SSatanand Burla * @param state_flag flag state to set
5131f697ab1SSatanand Burla */
ifstate_set(struct lio * lio,int state_flag)5141f697ab1SSatanand Burla static inline void ifstate_set(struct lio *lio, int state_flag)
5151f697ab1SSatanand Burla {
5161f697ab1SSatanand Burla atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
5171f697ab1SSatanand Burla }
5181f697ab1SSatanand Burla
5191f697ab1SSatanand Burla /**
5201f697ab1SSatanand Burla * \brief clear interface state
5211f697ab1SSatanand Burla * @param lio per-network private data
5221f697ab1SSatanand Burla * @param state_flag flag state to clear
5231f697ab1SSatanand Burla */
ifstate_reset(struct lio * lio,int state_flag)5241f697ab1SSatanand Burla static inline void ifstate_reset(struct lio *lio, int state_flag)
5251f697ab1SSatanand Burla {
5261f697ab1SSatanand Burla atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
5271f697ab1SSatanand Burla }
5281f697ab1SSatanand Burla
529e65a8ccbSIntiyaz Basha /**
530e65a8ccbSIntiyaz Basha * \brief wait for all pending requests to complete
531e65a8ccbSIntiyaz Basha * @param oct Pointer to Octeon device
532e65a8ccbSIntiyaz Basha *
533e65a8ccbSIntiyaz Basha * Called during shutdown sequence
534e65a8ccbSIntiyaz Basha */
wait_for_pending_requests(struct octeon_device * oct)535e65a8ccbSIntiyaz Basha static inline int wait_for_pending_requests(struct octeon_device *oct)
536e65a8ccbSIntiyaz Basha {
537e65a8ccbSIntiyaz Basha int i, pcount = 0;
538e65a8ccbSIntiyaz Basha
539e65a8ccbSIntiyaz Basha for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
540e65a8ccbSIntiyaz Basha pcount = atomic_read(
541e65a8ccbSIntiyaz Basha &oct->response_list[OCTEON_ORDERED_SC_LIST]
542e65a8ccbSIntiyaz Basha .pending_req_count);
543e65a8ccbSIntiyaz Basha if (pcount)
544e65a8ccbSIntiyaz Basha schedule_timeout_uninterruptible(HZ / 10);
545e65a8ccbSIntiyaz Basha else
546e65a8ccbSIntiyaz Basha break;
547e65a8ccbSIntiyaz Basha }
548e65a8ccbSIntiyaz Basha
549e65a8ccbSIntiyaz Basha if (pcount)
550e65a8ccbSIntiyaz Basha return 1;
551e65a8ccbSIntiyaz Basha
552e65a8ccbSIntiyaz Basha return 0;
553e65a8ccbSIntiyaz Basha }
554e65a8ccbSIntiyaz Basha
555a28a47f1SIntiyaz Basha /**
556a28a47f1SIntiyaz Basha * \brief Stop Tx queues
557a28a47f1SIntiyaz Basha * @param netdev network device
558a28a47f1SIntiyaz Basha */
stop_txqs(struct net_device * netdev)559736b7ea5SIntiyaz Basha static inline void stop_txqs(struct net_device *netdev)
560a28a47f1SIntiyaz Basha {
561a28a47f1SIntiyaz Basha int i;
562a28a47f1SIntiyaz Basha
563c33c9973SIntiyaz Basha for (i = 0; i < netdev->real_num_tx_queues; i++)
564a28a47f1SIntiyaz Basha netif_stop_subqueue(netdev, i);
565a28a47f1SIntiyaz Basha }
566a28a47f1SIntiyaz Basha
56795fbba18SIntiyaz Basha /**
56895fbba18SIntiyaz Basha * \brief Wake Tx queues
56995fbba18SIntiyaz Basha * @param netdev network device
57095fbba18SIntiyaz Basha */
wake_txqs(struct net_device * netdev)571a96d8ad3SIntiyaz Basha static inline void wake_txqs(struct net_device *netdev)
57295fbba18SIntiyaz Basha {
57395fbba18SIntiyaz Basha struct lio *lio = GET_LIO(netdev);
5742a2fabafSIntiyaz Basha int i, qno;
57595fbba18SIntiyaz Basha
576c33c9973SIntiyaz Basha for (i = 0; i < netdev->real_num_tx_queues; i++) {
5772a2fabafSIntiyaz Basha qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
57895fbba18SIntiyaz Basha
57995fbba18SIntiyaz Basha if (__netif_subqueue_stopped(netdev, i)) {
58095fbba18SIntiyaz Basha INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
58195fbba18SIntiyaz Basha tx_restart, 1);
58295fbba18SIntiyaz Basha netif_wake_subqueue(netdev, i);
58395fbba18SIntiyaz Basha }
58495fbba18SIntiyaz Basha }
58595fbba18SIntiyaz Basha }
5865f8baa7aSIntiyaz Basha
5875f8baa7aSIntiyaz Basha /**
5885f8baa7aSIntiyaz Basha * \brief Start Tx queues
5895f8baa7aSIntiyaz Basha * @param netdev network device
5905f8baa7aSIntiyaz Basha */
start_txqs(struct net_device * netdev)591c9614a16SIntiyaz Basha static inline void start_txqs(struct net_device *netdev)
5925f8baa7aSIntiyaz Basha {
593a8c4a792SIntiyaz Basha struct lio *lio = GET_LIO(netdev);
5945f8baa7aSIntiyaz Basha int i;
5955f8baa7aSIntiyaz Basha
5962a2fabafSIntiyaz Basha if (lio->linfo.link.s.link_up) {
597c33c9973SIntiyaz Basha for (i = 0; i < netdev->real_num_tx_queues; i++)
5985f8baa7aSIntiyaz Basha netif_start_subqueue(netdev, i);
5995f8baa7aSIntiyaz Basha }
600a8c4a792SIntiyaz Basha }
6015f8baa7aSIntiyaz Basha
skb_iq(struct octeon_device * oct,struct sk_buff * skb)602c33c9973SIntiyaz Basha static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
6035da052a6SIntiyaz Basha {
604c33c9973SIntiyaz Basha return skb->queue_mapping % oct->num_iqs;
6055da052a6SIntiyaz Basha }
6065da052a6SIntiyaz Basha
60785a0cd81SIntiyaz Basha /**
60885a0cd81SIntiyaz Basha * Remove the node at the head of the list. The list would be empty at
60985a0cd81SIntiyaz Basha * the end of this call if there are no more nodes in the list.
61085a0cd81SIntiyaz Basha */
lio_list_delete_head(struct list_head * root)61185a0cd81SIntiyaz Basha static inline struct list_head *lio_list_delete_head(struct list_head *root)
61285a0cd81SIntiyaz Basha {
61385a0cd81SIntiyaz Basha struct list_head *node;
61485a0cd81SIntiyaz Basha
615*b8483ecaSGeliang Tang if (list_empty_careful(root))
61685a0cd81SIntiyaz Basha node = NULL;
61785a0cd81SIntiyaz Basha else
61885a0cd81SIntiyaz Basha node = root->next;
61985a0cd81SIntiyaz Basha
62085a0cd81SIntiyaz Basha if (node)
62185a0cd81SIntiyaz Basha list_del(node);
62285a0cd81SIntiyaz Basha
62385a0cd81SIntiyaz Basha return node;
62485a0cd81SIntiyaz Basha }
62585a0cd81SIntiyaz Basha
626f21fb3edSRaghu Vatsavayi #endif
627