xref: /openbmc/linux/include/linux/qed/qed_if.h (revision a8fe58ce)
1 /* QLogic qed NIC Driver
2  *
3  * Copyright (c) 2015 QLogic Corporation
4  *
5  * This software is available under the terms of the GNU General Public License
6  * (GPL) Version 2, available from the file COPYING in the main directory of
7  * this source tree.
8  */
9 
10 #ifndef _QED_IF_H
11 #define _QED_IF_H
12 
13 #include <linux/types.h>
14 #include <linux/interrupt.h>
15 #include <linux/netdevice.h>
16 #include <linux/pci.h>
17 #include <linux/skbuff.h>
18 #include <linux/types.h>
19 #include <asm/byteorder.h>
20 #include <linux/io.h>
21 #include <linux/compiler.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/qed/common_hsi.h>
26 #include <linux/qed/qed_chain.h>
27 
28 enum qed_led_mode {
29 	QED_LED_MODE_OFF,
30 	QED_LED_MODE_ON,
31 	QED_LED_MODE_RESTORE
32 };
33 
34 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
35 					    (void __iomem *)(reg_addr))
36 
37 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
38 
39 #define QED_COALESCE_MAX 0xFF
40 
41 /* forward */
42 struct qed_dev;
43 
44 struct qed_eth_pf_params {
45 	/* The following parameters are used during HW-init
46 	 * and these parameters need to be passed as arguments
47 	 * to update_pf_params routine invoked before slowpath start
48 	 */
49 	u16 num_cons;
50 };
51 
52 struct qed_pf_params {
53 	struct qed_eth_pf_params eth_pf_params;
54 };
55 
56 enum qed_int_mode {
57 	QED_INT_MODE_INTA,
58 	QED_INT_MODE_MSIX,
59 	QED_INT_MODE_MSI,
60 	QED_INT_MODE_POLL,
61 };
62 
63 struct qed_sb_info {
64 	struct status_block	*sb_virt;
65 	dma_addr_t		sb_phys;
66 	u32			sb_ack; /* Last given ack */
67 	u16			igu_sb_id;
68 	void __iomem		*igu_addr;
69 	u8			flags;
70 #define QED_SB_INFO_INIT        0x1
71 #define QED_SB_INFO_SETUP       0x2
72 
73 	struct qed_dev		*cdev;
74 };
75 
76 struct qed_dev_info {
77 	unsigned long	pci_mem_start;
78 	unsigned long	pci_mem_end;
79 	unsigned int	pci_irq;
80 	u8		num_hwfns;
81 
82 	u8		hw_mac[ETH_ALEN];
83 	bool		is_mf;
84 
85 	/* FW version */
86 	u16		fw_major;
87 	u16		fw_minor;
88 	u16		fw_rev;
89 	u16		fw_eng;
90 
91 	/* MFW version */
92 	u32		mfw_rev;
93 
94 	u32		flash_size;
95 	u8		mf_mode;
96 };
97 
98 enum qed_sb_type {
99 	QED_SB_TYPE_L2_QUEUE,
100 };
101 
102 enum qed_protocol {
103 	QED_PROTOCOL_ETH,
104 };
105 
106 struct qed_link_params {
107 	bool	link_up;
108 
109 #define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
110 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
111 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
112 #define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
113 	u32	override_flags;
114 	bool	autoneg;
115 	u32	adv_speeds;
116 	u32	forced_speed;
117 #define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
118 #define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
119 #define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
120 	u32	pause_config;
121 };
122 
123 struct qed_link_output {
124 	bool	link_up;
125 
126 	u32	supported_caps;         /* In SUPPORTED defs */
127 	u32	advertised_caps;        /* In ADVERTISED defs */
128 	u32	lp_caps;                /* In ADVERTISED defs */
129 	u32	speed;                  /* In Mb/s */
130 	u8	duplex;                 /* In DUPLEX defs */
131 	u8	port;                   /* In PORT defs */
132 	bool	autoneg;
133 	u32	pause_config;
134 };
135 
136 #define QED_DRV_VER_STR_SIZE 12
137 struct qed_slowpath_params {
138 	u32	int_mode;
139 	u8	drv_major;
140 	u8	drv_minor;
141 	u8	drv_rev;
142 	u8	drv_eng;
143 	u8	name[QED_DRV_VER_STR_SIZE];
144 };
145 
146 #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
147 
148 struct qed_int_info {
149 	struct msix_entry	*msix;
150 	u8			msix_cnt;
151 
152 	/* This should be updated by the protocol driver */
153 	u8			used_cnt;
154 };
155 
156 struct qed_common_cb_ops {
157 	void	(*link_update)(void			*dev,
158 			       struct qed_link_output	*link);
159 };
160 
161 struct qed_common_ops {
162 	struct qed_dev*	(*probe)(struct pci_dev *dev,
163 				 enum qed_protocol protocol,
164 				 u32 dp_module, u8 dp_level);
165 
166 	void		(*remove)(struct qed_dev *cdev);
167 
168 	int		(*set_power_state)(struct qed_dev *cdev,
169 					   pci_power_t state);
170 
171 	void		(*set_id)(struct qed_dev *cdev,
172 				  char name[],
173 				  char ver_str[]);
174 
175 	/* Client drivers need to make this call before slowpath_start.
176 	 * PF params required for the call before slowpath_start is
177 	 * documented within the qed_pf_params structure definition.
178 	 */
179 	void		(*update_pf_params)(struct qed_dev *cdev,
180 					    struct qed_pf_params *params);
181 	int		(*slowpath_start)(struct qed_dev *cdev,
182 					  struct qed_slowpath_params *params);
183 
184 	int		(*slowpath_stop)(struct qed_dev *cdev);
185 
186 	/* Requests to use `cnt' interrupts for fastpath.
187 	 * upon success, returns number of interrupts allocated for fastpath.
188 	 */
189 	int		(*set_fp_int)(struct qed_dev *cdev,
190 				      u16 cnt);
191 
192 	/* Fills `info' with pointers required for utilizing interrupts */
193 	int		(*get_fp_int)(struct qed_dev *cdev,
194 				      struct qed_int_info *info);
195 
196 	u32		(*sb_init)(struct qed_dev *cdev,
197 				   struct qed_sb_info *sb_info,
198 				   void *sb_virt_addr,
199 				   dma_addr_t sb_phy_addr,
200 				   u16 sb_id,
201 				   enum qed_sb_type type);
202 
203 	u32		(*sb_release)(struct qed_dev *cdev,
204 				      struct qed_sb_info *sb_info,
205 				      u16 sb_id);
206 
207 	void		(*simd_handler_config)(struct qed_dev *cdev,
208 					       void *token,
209 					       int index,
210 					       void (*handler)(void *));
211 
212 	void		(*simd_handler_clean)(struct qed_dev *cdev,
213 					      int index);
214 /**
215  * @brief set_link - set links according to params
216  *
217  * @param cdev
218  * @param params - values used to override the default link configuration
219  *
220  * @return 0 on success, error otherwise.
221  */
222 	int		(*set_link)(struct qed_dev *cdev,
223 				    struct qed_link_params *params);
224 
225 /**
226  * @brief get_link - returns the current link state.
227  *
228  * @param cdev
229  * @param if_link - structure to be filled with current link configuration.
230  */
231 	void		(*get_link)(struct qed_dev *cdev,
232 				    struct qed_link_output *if_link);
233 
234 /**
235  * @brief - drains chip in case Tx completions fail to arrive due to pause.
236  *
237  * @param cdev
238  */
239 	int		(*drain)(struct qed_dev *cdev);
240 
241 /**
242  * @brief update_msglvl - update module debug level
243  *
244  * @param cdev
245  * @param dp_module
246  * @param dp_level
247  */
248 	void		(*update_msglvl)(struct qed_dev *cdev,
249 					 u32 dp_module,
250 					 u8 dp_level);
251 
252 	int		(*chain_alloc)(struct qed_dev *cdev,
253 				       enum qed_chain_use_mode intended_use,
254 				       enum qed_chain_mode mode,
255 				       u16 num_elems,
256 				       size_t elem_size,
257 				       struct qed_chain *p_chain);
258 
259 	void		(*chain_free)(struct qed_dev *cdev,
260 				      struct qed_chain *p_chain);
261 
262 /**
263  * @brief set_led - Configure LED mode
264  *
265  * @param cdev
266  * @param mode - LED mode
267  *
268  * @return 0 on success, error otherwise.
269  */
270 	int (*set_led)(struct qed_dev *cdev,
271 		       enum qed_led_mode mode);
272 };
273 
274 /**
275  * @brief qed_get_protocol_version
276  *
277  * @param protocol
278  *
279  * @return version supported by qed for given protocol driver
280  */
281 u32 qed_get_protocol_version(enum qed_protocol protocol);
282 
283 #define MASK_FIELD(_name, _value) \
284 	((_value) &= (_name ## _MASK))
285 
286 #define FIELD_VALUE(_name, _value) \
287 	((_value & _name ## _MASK) << _name ## _SHIFT)
288 
289 #define SET_FIELD(value, name, flag)			       \
290 	do {						       \
291 		(value) &= ~(name ## _MASK << name ## _SHIFT); \
292 		(value) |= (((u64)flag) << (name ## _SHIFT));  \
293 	} while (0)
294 
295 #define GET_FIELD(value, name) \
296 	(((value) >> (name ## _SHIFT)) & name ## _MASK)
297 
298 /* Debug print definitions */
299 #define DP_ERR(cdev, fmt, ...)						     \
300 		pr_err("[%s:%d(%s)]" fmt,				     \
301 		       __func__, __LINE__,				     \
302 		       DP_NAME(cdev) ? DP_NAME(cdev) : "",		     \
303 		       ## __VA_ARGS__)					     \
304 
305 #define DP_NOTICE(cdev, fmt, ...)				      \
306 	do {							      \
307 		if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
308 			pr_notice("[%s:%d(%s)]" fmt,		      \
309 				  __func__, __LINE__,		      \
310 				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
311 				  ## __VA_ARGS__);		      \
312 								      \
313 		}						      \
314 	} while (0)
315 
316 #define DP_INFO(cdev, fmt, ...)					      \
317 	do {							      \
318 		if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) {   \
319 			pr_notice("[%s:%d(%s)]" fmt,		      \
320 				  __func__, __LINE__,		      \
321 				  DP_NAME(cdev) ? DP_NAME(cdev) : "", \
322 				  ## __VA_ARGS__);		      \
323 		}						      \
324 	} while (0)
325 
326 #define DP_VERBOSE(cdev, module, fmt, ...)				\
327 	do {								\
328 		if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) &&	\
329 			     ((cdev)->dp_module & module))) {		\
330 			pr_notice("[%s:%d(%s)]" fmt,			\
331 				  __func__, __LINE__,			\
332 				  DP_NAME(cdev) ? DP_NAME(cdev) : "",	\
333 				  ## __VA_ARGS__);			\
334 		}							\
335 	} while (0)
336 
337 enum DP_LEVEL {
338 	QED_LEVEL_VERBOSE	= 0x0,
339 	QED_LEVEL_INFO		= 0x1,
340 	QED_LEVEL_NOTICE	= 0x2,
341 	QED_LEVEL_ERR		= 0x3,
342 };
343 
344 #define QED_LOG_LEVEL_SHIFT     (30)
345 #define QED_LOG_VERBOSE_MASK    (0x3fffffff)
346 #define QED_LOG_INFO_MASK       (0x40000000)
347 #define QED_LOG_NOTICE_MASK     (0x80000000)
348 
349 enum DP_MODULE {
350 	QED_MSG_SPQ	= 0x10000,
351 	QED_MSG_STATS	= 0x20000,
352 	QED_MSG_DCB	= 0x40000,
353 	QED_MSG_IOV	= 0x80000,
354 	QED_MSG_SP	= 0x100000,
355 	QED_MSG_STORAGE = 0x200000,
356 	QED_MSG_CXT	= 0x800000,
357 	QED_MSG_ILT	= 0x2000000,
358 	QED_MSG_ROCE	= 0x4000000,
359 	QED_MSG_DEBUG	= 0x8000000,
360 	/* to be added...up to 0x8000000 */
361 };
362 
363 struct qed_eth_stats {
364 	u64	no_buff_discards;
365 	u64	packet_too_big_discard;
366 	u64	ttl0_discard;
367 	u64	rx_ucast_bytes;
368 	u64	rx_mcast_bytes;
369 	u64	rx_bcast_bytes;
370 	u64	rx_ucast_pkts;
371 	u64	rx_mcast_pkts;
372 	u64	rx_bcast_pkts;
373 	u64	mftag_filter_discards;
374 	u64	mac_filter_discards;
375 	u64	tx_ucast_bytes;
376 	u64	tx_mcast_bytes;
377 	u64	tx_bcast_bytes;
378 	u64	tx_ucast_pkts;
379 	u64	tx_mcast_pkts;
380 	u64	tx_bcast_pkts;
381 	u64	tx_err_drop_pkts;
382 	u64	tpa_coalesced_pkts;
383 	u64	tpa_coalesced_events;
384 	u64	tpa_aborts_num;
385 	u64	tpa_not_coalesced_pkts;
386 	u64	tpa_coalesced_bytes;
387 
388 	/* port */
389 	u64	rx_64_byte_packets;
390 	u64	rx_127_byte_packets;
391 	u64	rx_255_byte_packets;
392 	u64	rx_511_byte_packets;
393 	u64	rx_1023_byte_packets;
394 	u64	rx_1518_byte_packets;
395 	u64	rx_1522_byte_packets;
396 	u64	rx_2047_byte_packets;
397 	u64	rx_4095_byte_packets;
398 	u64	rx_9216_byte_packets;
399 	u64	rx_16383_byte_packets;
400 	u64	rx_crc_errors;
401 	u64	rx_mac_crtl_frames;
402 	u64	rx_pause_frames;
403 	u64	rx_pfc_frames;
404 	u64	rx_align_errors;
405 	u64	rx_carrier_errors;
406 	u64	rx_oversize_packets;
407 	u64	rx_jabbers;
408 	u64	rx_undersize_packets;
409 	u64	rx_fragments;
410 	u64	tx_64_byte_packets;
411 	u64	tx_65_to_127_byte_packets;
412 	u64	tx_128_to_255_byte_packets;
413 	u64	tx_256_to_511_byte_packets;
414 	u64	tx_512_to_1023_byte_packets;
415 	u64	tx_1024_to_1518_byte_packets;
416 	u64	tx_1519_to_2047_byte_packets;
417 	u64	tx_2048_to_4095_byte_packets;
418 	u64	tx_4096_to_9216_byte_packets;
419 	u64	tx_9217_to_16383_byte_packets;
420 	u64	tx_pause_frames;
421 	u64	tx_pfc_frames;
422 	u64	tx_lpi_entry_count;
423 	u64	tx_total_collisions;
424 	u64	brb_truncates;
425 	u64	brb_discards;
426 	u64	rx_mac_bytes;
427 	u64	rx_mac_uc_packets;
428 	u64	rx_mac_mc_packets;
429 	u64	rx_mac_bc_packets;
430 	u64	rx_mac_frames_ok;
431 	u64	tx_mac_bytes;
432 	u64	tx_mac_uc_packets;
433 	u64	tx_mac_mc_packets;
434 	u64	tx_mac_bc_packets;
435 	u64	tx_mac_ctrl_frames;
436 };
437 
438 #define QED_SB_IDX              0x0002
439 
440 #define RX_PI           0
441 #define TX_PI(tc)       (RX_PI + 1 + tc)
442 
443 static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
444 {
445 	u32 prod = 0;
446 	u16 rc = 0;
447 
448 	prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
449 	       STATUS_BLOCK_PROD_INDEX_MASK;
450 	if (sb_info->sb_ack != prod) {
451 		sb_info->sb_ack = prod;
452 		rc |= QED_SB_IDX;
453 	}
454 
455 	/* Let SB update */
456 	mmiowb();
457 	return rc;
458 }
459 
460 /**
461  *
462  * @brief This function creates an update command for interrupts that is
463  *        written to the IGU.
464  *
465  * @param sb_info       - This is the structure allocated and
466  *                 initialized per status block. Assumption is
467  *                 that it was initialized using qed_sb_init
468  * @param int_cmd       - Enable/Disable/Nop
469  * @param upd_flg       - whether igu consumer should be
470  *                 updated.
471  *
472  * @return inline void
473  */
474 static inline void qed_sb_ack(struct qed_sb_info *sb_info,
475 			      enum igu_int_cmd int_cmd,
476 			      u8 upd_flg)
477 {
478 	struct igu_prod_cons_update igu_ack = { 0 };
479 
480 	igu_ack.sb_id_and_flags =
481 		((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
482 		 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
483 		 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
484 		 (IGU_SEG_ACCESS_REG <<
485 		  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
486 
487 	DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
488 
489 	/* Both segments (interrupts & acks) are written to same place address;
490 	 * Need to guarantee all commands will be received (in-order) by HW.
491 	 */
492 	mmiowb();
493 	barrier();
494 }
495 
496 static inline void __internal_ram_wr(void *p_hwfn,
497 				     void __iomem *addr,
498 				     int size,
499 				     u32 *data)
500 
501 {
502 	unsigned int i;
503 
504 	for (i = 0; i < size / sizeof(*data); i++)
505 		DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
506 }
507 
508 static inline void internal_ram_wr(void __iomem *addr,
509 				   int size,
510 				   u32 *data)
511 {
512 	__internal_ram_wr(NULL, addr, size, data);
513 }
514 
515 #endif
516