xref: /openbmc/linux/drivers/scsi/elx/efct/efct_hw.h (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5   */
6  
7  #ifndef _EFCT_HW_H
8  #define _EFCT_HW_H
9  
10  #include "../libefc_sli/sli4.h"
11  
12  /*
13   * EFCT PCI IDs
14   */
15  #define EFCT_VENDOR_ID			0x10df
16  /* LightPulse 16Gb x 4 FC (lancer-g6) */
17  #define EFCT_DEVICE_LANCER_G6		0xe307
18  /* LightPulse 32Gb x 4 FC (lancer-g7) */
19  #define EFCT_DEVICE_LANCER_G7		0xf407
20  
21  /*Default RQ entries len used by driver*/
22  #define EFCT_HW_RQ_ENTRIES_MIN		512
23  #define EFCT_HW_RQ_ENTRIES_DEF		1024
24  #define EFCT_HW_RQ_ENTRIES_MAX		4096
25  
26  /*Defines the size of the RQ buffers used for each RQ*/
27  #define EFCT_HW_RQ_SIZE_HDR             128
28  #define EFCT_HW_RQ_SIZE_PAYLOAD         1024
29  
30  /*Define the maximum number of multi-receive queues*/
31  #define EFCT_HW_MAX_MRQS		8
32  
33  /*
34   * Define count of when to set the WQEC bit in a submitted
35   * WQE, causing a consummed/released completion to be posted.
36   */
37  #define EFCT_HW_WQEC_SET_COUNT		32
38  
39  /*Send frame timeout in seconds*/
40  #define EFCT_HW_SEND_FRAME_TIMEOUT	10
41  
42  /*
43   * FDT Transfer Hint value, reads greater than this value
44   * will be segmented to implement fairness. A value of zero disables
45   * the feature.
46   */
47  #define EFCT_HW_FDT_XFER_HINT		8192
48  
49  #define EFCT_HW_TIMECHECK_ITERATIONS	100
50  #define EFCT_HW_MAX_NUM_MQ		1
51  #define EFCT_HW_MAX_NUM_RQ		32
52  #define EFCT_HW_MAX_NUM_EQ		16
53  #define EFCT_HW_MAX_NUM_WQ		32
54  #define EFCT_HW_DEF_NUM_EQ		1
55  
56  #define OCE_HW_MAX_NUM_MRQ_PAIRS	16
57  
58  #define EFCT_HW_MQ_DEPTH		128
59  #define EFCT_HW_EQ_DEPTH		1024
60  
61  /*
62   * A CQ will be assinged to each WQ
63   * (CQ must have 2X entries of the WQ for abort
64   * processing), plus a separate one for each RQ PAIR and one for MQ
65   */
66  #define EFCT_HW_MAX_NUM_CQ \
67  	((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
68  
69  #define EFCT_HW_Q_HASH_SIZE		128
70  #define EFCT_HW_RQ_HEADER_SIZE		128
71  #define EFCT_HW_RQ_HEADER_INDEX		0
72  
73  #define EFCT_HW_REQUE_XRI_REGTAG	65534
74  
75  /* Options for efct_hw_command() */
76  enum efct_cmd_opts {
77  	/* command executes synchronously and busy-waits for completion */
78  	EFCT_CMD_POLL,
79  	/* command executes asynchronously. Uses callback */
80  	EFCT_CMD_NOWAIT,
81  };
82  
83  enum efct_hw_reset {
84  	EFCT_HW_RESET_FUNCTION,
85  	EFCT_HW_RESET_FIRMWARE,
86  	EFCT_HW_RESET_MAX
87  };
88  
89  enum efct_hw_topo {
90  	EFCT_HW_TOPOLOGY_AUTO,
91  	EFCT_HW_TOPOLOGY_NPORT,
92  	EFCT_HW_TOPOLOGY_LOOP,
93  	EFCT_HW_TOPOLOGY_NONE,
94  	EFCT_HW_TOPOLOGY_MAX
95  };
96  
97  /* pack fw revision values into a single uint64_t */
98  #define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \
99  			| ((uint64_t)(c) << 16) | ((uint64_t)(d)))
100  
101  #define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
102  
103  enum efct_hw_io_type {
104  	EFCT_HW_ELS_REQ,
105  	EFCT_HW_ELS_RSP,
106  	EFCT_HW_FC_CT,
107  	EFCT_HW_FC_CT_RSP,
108  	EFCT_HW_BLS_ACC,
109  	EFCT_HW_BLS_RJT,
110  	EFCT_HW_IO_TARGET_READ,
111  	EFCT_HW_IO_TARGET_WRITE,
112  	EFCT_HW_IO_TARGET_RSP,
113  	EFCT_HW_IO_DNRX_REQUEUE,
114  	EFCT_HW_IO_MAX,
115  };
116  
117  enum efct_hw_io_state {
118  	EFCT_HW_IO_STATE_FREE,
119  	EFCT_HW_IO_STATE_INUSE,
120  	EFCT_HW_IO_STATE_WAIT_FREE,
121  	EFCT_HW_IO_STATE_WAIT_SEC_HIO,
122  };
123  
124  #define EFCT_TARGET_WRITE_SKIPS	1
125  #define EFCT_TARGET_READ_SKIPS	2
126  
127  struct efct_hw;
128  struct efct_io;
129  
130  #define EFCT_CMD_CTX_POOL_SZ	32
131  /**
132   * HW command context.
133   * Stores the state for the asynchronous commands sent to the hardware.
134   */
135  struct efct_command_ctx {
136  	struct list_head	list_entry;
137  	int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
138  	void			*arg;	/* Argument for callback */
139  	/* buffer holding command / results */
140  	u8			buf[SLI4_BMBX_SIZE];
141  	void			*ctx;	/* upper layer context */
142  };
143  
144  struct efct_hw_sgl {
145  	uintptr_t		addr;
146  	size_t			len;
147  };
148  
149  union efct_hw_io_param_u {
150  	struct sli_bls_params bls;
151  	struct sli_els_params els;
152  	struct sli_ct_params fc_ct;
153  	struct sli_fcp_tgt_params fcp_tgt;
154  };
155  
156  /* WQ steering mode */
157  enum efct_hw_wq_steering {
158  	EFCT_HW_WQ_STEERING_CLASS,
159  	EFCT_HW_WQ_STEERING_REQUEST,
160  	EFCT_HW_WQ_STEERING_CPU,
161  };
162  
163  /* HW wqe object */
164  struct efct_hw_wqe {
165  	struct list_head	list_entry;
166  	bool			abort_wqe_submit_needed;
167  	bool			send_abts;
168  	u32			id;
169  	u32			abort_reqtag;
170  	u8			*wqebuf;
171  };
172  
173  struct efct_hw_io;
174  /* Typedef for HW "done" callback */
175  typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status,
176  			      u32 ext, void *ul_arg);
177  
178  /**
179   * HW IO object.
180   *
181   * Stores the per-IO information necessary
182   * for both SLI and efct.
183   * @ref:		reference counter for hw io object
184   * @state:		state of IO: free, busy, wait_free
185   * @list_entry		used for busy, wait_free, free lists
186   * @wqe			Work queue object, with link for pending
187   * @hw			pointer back to hardware context
188   * @xfer_rdy		transfer ready data
189   * @type		IO type
190   * @xbusy		Exchange is active in FW
191   * @abort_in_progress	if TRUE, abort is in progress
192   * @status_saved	if TRUE, latched status should be returned
193   * @wq_class		WQ class if steering mode is Class
194   * @reqtag		request tag for this HW IO
195   * @wq			WQ assigned to the exchange
196   * @done		Function called on IO completion
197   * @arg			argument passed to IO done callback
198   * @abort_done		Function called on abort completion
199   * @abort_arg		argument passed to abort done callback
200   * @wq_steering		WQ steering mode request
201   * @saved_status	Saved status
202   * @saved_len		Status length
203   * @saved_ext		Saved extended status
204   * @eq			EQ on which this HIO came up
205   * @sge_offset		SGE data offset
206   * @def_sgl_count	Count of SGEs in default SGL
207   * @abort_reqtag	request tag for an abort of this HW IO
208   * @indicator		Exchange indicator
209   * @def_sgl		default SGL
210   * @sgl			pointer to current active SGL
211   * @sgl_count		count of SGEs in io->sgl
212   * @first_data_sge	index of first data SGE
213   * @n_sge		number of active SGEs
214   */
215  struct efct_hw_io {
216  	struct kref		ref;
217  	enum efct_hw_io_state	state;
218  	void			(*release)(struct kref *arg);
219  	struct list_head	list_entry;
220  	struct efct_hw_wqe	wqe;
221  
222  	struct efct_hw		*hw;
223  	struct efc_dma		xfer_rdy;
224  	u16			type;
225  	bool			xbusy;
226  	int			abort_in_progress;
227  	bool			status_saved;
228  	u8			wq_class;
229  	u16			reqtag;
230  
231  	struct hw_wq		*wq;
232  	efct_hw_done_t		done;
233  	void			*arg;
234  	efct_hw_done_t		abort_done;
235  	void			*abort_arg;
236  
237  	enum efct_hw_wq_steering wq_steering;
238  
239  	u32			saved_status;
240  	u32			saved_len;
241  	u32			saved_ext;
242  
243  	struct hw_eq		*eq;
244  	u32			sge_offset;
245  	u32			def_sgl_count;
246  	u32			abort_reqtag;
247  	u32			indicator;
248  	struct efc_dma		def_sgl;
249  	struct efc_dma		*sgl;
250  	u32			sgl_count;
251  	u32			first_data_sge;
252  	u32			n_sge;
253  };
254  
255  enum efct_hw_port {
256  	EFCT_HW_PORT_INIT,
257  	EFCT_HW_PORT_SHUTDOWN,
258  };
259  
260  /* Node group rpi reference */
261  struct efct_hw_rpi_ref {
262  	atomic_t rpi_count;
263  	atomic_t rpi_attached;
264  };
265  
266  enum efct_hw_link_stat {
267  	EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
268  	EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
269  	EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
270  	EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
271  	EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
272  	EFCT_HW_LINK_STAT_CRC_COUNT,
273  	EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
274  	EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
275  	EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
276  	EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
277  	EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
278  	EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
279  	EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
280  	EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
281  	EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
282  	EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
283  	EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
284  	EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
285  	EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
286  	EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
287  	EFCT_HW_LINK_STAT_MAX,
288  };
289  
290  enum efct_hw_host_stat {
291  	EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
292  	EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
293  	EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
294  	EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
295  	EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
296  	EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
297  	EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
298  	EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
299  	EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
300  	EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
301  	EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
302  	EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
303  	EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
304  	EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
305  	EFCT_HW_HOST_STAT_MAX,
306  };
307  
308  enum efct_hw_state {
309  	EFCT_HW_STATE_UNINITIALIZED,
310  	EFCT_HW_STATE_QUEUES_ALLOCATED,
311  	EFCT_HW_STATE_ACTIVE,
312  	EFCT_HW_STATE_RESET_IN_PROGRESS,
313  	EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,
314  };
315  
316  struct efct_hw_link_stat_counts {
317  	u8		overflow;
318  	u32		counter;
319  };
320  
321  struct efct_hw_host_stat_counts {
322  	u32		counter;
323  };
324  
325  /* Structure used for the hash lookup of queue IDs */
326  struct efct_queue_hash {
327  	bool		in_use;
328  	u16		id;
329  	u16		index;
330  };
331  
332  /* WQ callback object */
333  struct hw_wq_callback {
334  	u16		instance_index;	/* use for request tag */
335  	void (*callback)(void *arg, u8 *cqe, int status);
336  	void		*arg;
337  	struct list_head list_entry;
338  };
339  
340  struct reqtag_pool {
341  	spinlock_t lock;	/* pool lock */
342  	struct hw_wq_callback *tags[U16_MAX];
343  	struct list_head freelist;
344  };
345  
346  struct efct_hw_config {
347  	u32		n_eq;
348  	u32		n_cq;
349  	u32		n_mq;
350  	u32		n_rq;
351  	u32		n_wq;
352  	u32		n_io;
353  	u32		n_sgl;
354  	u32		speed;
355  	u32		topology;
356  	/* size of the buffers for first burst */
357  	u32		rq_default_buffer_size;
358  	u8		esoc;
359  	/* MRQ RQ selection policy */
360  	u8		rq_selection_policy;
361  	/* RQ quanta if rq_selection_policy == 2 */
362  	u8		rr_quanta;
363  	u32		filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
364  };
365  
366  struct efct_hw {
367  	struct efct		*os;
368  	struct sli4		sli;
369  	u16			ulp_start;
370  	u16			ulp_max;
371  	u32			dump_size;
372  	enum efct_hw_state	state;
373  	bool			hw_setup_called;
374  	u8			sliport_healthcheck;
375  	u16			fcf_indicator;
376  
377  	/* HW configuration */
378  	struct efct_hw_config	config;
379  
380  	/* calculated queue sizes for each type */
381  	u32			num_qentries[SLI4_QTYPE_MAX];
382  
383  	/* Storage for SLI queue objects */
384  	struct sli4_queue	wq[EFCT_HW_MAX_NUM_WQ];
385  	struct sli4_queue	rq[EFCT_HW_MAX_NUM_RQ];
386  	u16			hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
387  	struct sli4_queue	mq[EFCT_HW_MAX_NUM_MQ];
388  	struct sli4_queue	cq[EFCT_HW_MAX_NUM_CQ];
389  	struct sli4_queue	eq[EFCT_HW_MAX_NUM_EQ];
390  
391  	/* HW queue */
392  	u32			eq_count;
393  	u32			cq_count;
394  	u32			mq_count;
395  	u32			wq_count;
396  	u32			rq_count;
397  	u32			cmd_head_count;
398  	struct list_head	eq_list;
399  
400  	struct efct_queue_hash	cq_hash[EFCT_HW_Q_HASH_SIZE];
401  	struct efct_queue_hash	rq_hash[EFCT_HW_Q_HASH_SIZE];
402  	struct efct_queue_hash	wq_hash[EFCT_HW_Q_HASH_SIZE];
403  
404  	/* Storage for HW queue objects */
405  	struct hw_wq		*hw_wq[EFCT_HW_MAX_NUM_WQ];
406  	struct hw_rq		*hw_rq[EFCT_HW_MAX_NUM_RQ];
407  	struct hw_mq		*hw_mq[EFCT_HW_MAX_NUM_MQ];
408  	struct hw_cq		*hw_cq[EFCT_HW_MAX_NUM_CQ];
409  	struct hw_eq		*hw_eq[EFCT_HW_MAX_NUM_EQ];
410  	/* count of hw_rq[] entries */
411  	u32			hw_rq_count;
412  	/* count of multirq RQs */
413  	u32			hw_mrq_count;
414  
415  	struct hw_wq		**wq_cpu_array;
416  
417  	/* Sequence objects used in incoming frame processing */
418  	struct efc_hw_sequence	*seq_pool;
419  
420  	/* Maintain an ordered, linked list of outstanding HW commands. */
421  	struct mutex            bmbx_lock;
422  	spinlock_t		cmd_lock;
423  	struct list_head	cmd_head;
424  	struct list_head	cmd_pending;
425  	mempool_t		*cmd_ctx_pool;
426  	mempool_t		*mbox_rqst_pool;
427  
428  	struct sli4_link_event	link;
429  
430  	/* pointer array of IO objects */
431  	struct efct_hw_io	**io;
432  	/* array of WQE buffs mapped to IO objects */
433  	u8			*wqe_buffs;
434  
435  	/* IO lock to synchronize list access */
436  	spinlock_t		io_lock;
437  	/* List of IO objects in use */
438  	struct list_head	io_inuse;
439  	/* List of IO objects waiting to be freed */
440  	struct list_head	io_wait_free;
441  	/* List of IO objects available for allocation */
442  	struct list_head	io_free;
443  
444  	struct efc_dma		loop_map;
445  
446  	struct efc_dma		xfer_rdy;
447  
448  	struct efc_dma		rnode_mem;
449  
450  	atomic_t		io_alloc_failed_count;
451  
452  	/* stat: wq sumbit count */
453  	u32			tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
454  	/* stat: wq complete count */
455  	u32			tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
456  
457  	atomic_t		send_frame_seq_id;
458  	struct reqtag_pool	*wq_reqtag_pool;
459  };
460  
461  enum efct_hw_io_count_type {
462  	EFCT_HW_IO_INUSE_COUNT,
463  	EFCT_HW_IO_FREE_COUNT,
464  	EFCT_HW_IO_WAIT_FREE_COUNT,
465  	EFCT_HW_IO_N_TOTAL_IO_COUNT,
466  };
467  
468  /* HW queue data structures */
469  struct hw_eq {
470  	struct list_head	list_entry;
471  	enum sli4_qtype		type;
472  	u32			instance;
473  	u32			entry_count;
474  	u32			entry_size;
475  	struct efct_hw		*hw;
476  	struct sli4_queue	*queue;
477  	struct list_head	cq_list;
478  	u32			use_count;
479  };
480  
481  struct hw_cq {
482  	struct list_head	list_entry;
483  	enum sli4_qtype		type;
484  	u32			instance;
485  	u32			entry_count;
486  	u32			entry_size;
487  	struct hw_eq		*eq;
488  	struct sli4_queue	*queue;
489  	struct list_head	q_list;
490  	u32			use_count;
491  };
492  
493  struct hw_q {
494  	struct list_head	list_entry;
495  	enum sli4_qtype		type;
496  };
497  
498  struct hw_mq {
499  	struct list_head	list_entry;
500  	enum sli4_qtype		type;
501  	u32			instance;
502  
503  	u32			entry_count;
504  	u32			entry_size;
505  	struct hw_cq		*cq;
506  	struct sli4_queue	*queue;
507  
508  	u32			use_count;
509  };
510  
511  struct hw_wq {
512  	struct list_head	list_entry;
513  	enum sli4_qtype		type;
514  	u32			instance;
515  	struct efct_hw		*hw;
516  
517  	u32			entry_count;
518  	u32			entry_size;
519  	struct hw_cq		*cq;
520  	struct sli4_queue	*queue;
521  	u32			class;
522  
523  	/* WQ consumed */
524  	u32			wqec_set_count;
525  	u32			wqec_count;
526  	u32			free_count;
527  	u32			total_submit_count;
528  	struct list_head	pending_list;
529  
530  	/* HW IO allocated for use with Send Frame */
531  	struct efct_hw_io	*send_frame_io;
532  
533  	/* Stats */
534  	u32			use_count;
535  	u32			wq_pending_count;
536  };
537  
538  struct hw_rq {
539  	struct list_head	list_entry;
540  	enum sli4_qtype		type;
541  	u32			instance;
542  
543  	u32			entry_count;
544  	u32			use_count;
545  	u32			hdr_entry_size;
546  	u32			first_burst_entry_size;
547  	u32			data_entry_size;
548  	bool			is_mrq;
549  	u32			base_mrq_id;
550  
551  	struct hw_cq		*cq;
552  
553  	u8			filter_mask;
554  	struct sli4_queue	*hdr;
555  	struct sli4_queue	*first_burst;
556  	struct sli4_queue	*data;
557  
558  	struct efc_hw_rq_buffer	*hdr_buf;
559  	struct efc_hw_rq_buffer	*fb_buf;
560  	struct efc_hw_rq_buffer	*payload_buf;
561  	/* RQ tracker for this RQ */
562  	struct efc_hw_sequence	**rq_tracker;
563  };
564  
565  struct efct_hw_send_frame_context {
566  	struct efct_hw		*hw;
567  	struct hw_wq_callback	*wqcb;
568  	struct efct_hw_wqe	wqe;
569  	void (*callback)(int status, void *arg);
570  	void			*arg;
571  
572  	/* General purpose elements */
573  	struct efc_hw_sequence	*seq;
574  	struct efc_dma		payload;
575  };
576  
577  struct efct_hw_grp_hdr {
578  	u32			size;
579  	__be32			magic_number;
580  	u32			word2;
581  	u8			rev_name[128];
582  	u8			date[12];
583  	u8			revision[32];
584  };
585  
586  static inline int
efct_hw_get_link_speed(struct efct_hw * hw)587  efct_hw_get_link_speed(struct efct_hw *hw) {
588  	return hw->link.speed;
589  }
590  
591  int
592  efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
593  int efct_hw_init(struct efct_hw *hw);
594  int
595  efct_hw_parse_filter(struct efct_hw *hw, void *value);
596  int
597  efct_hw_init_queues(struct efct_hw *hw);
598  int
599  efct_hw_map_wq_cpu(struct efct_hw *hw);
600  uint64_t
601  efct_get_wwnn(struct efct_hw *hw);
602  uint64_t
603  efct_get_wwpn(struct efct_hw *hw);
604  
605  int efct_hw_rx_allocate(struct efct_hw *hw);
606  int efct_hw_rx_post(struct efct_hw *hw);
607  void efct_hw_rx_free(struct efct_hw *hw);
608  int
609  efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
610  		void *arg);
611  int
612  efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
613  
614  struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
615  int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
616  u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
617  int
618  efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
619  		struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
620  		void *cb, void *arg);
621  int
622  efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
623  			struct efc_dma *sgl,
624  			u32 sgl_count);
625  int
626  efct_hw_io_init_sges(struct efct_hw *hw,
627  		     struct efct_hw_io *io, enum efct_hw_io_type type);
628  
629  int
630  efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
631  		   uintptr_t addr, u32 length);
632  int
633  efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
634  		 bool send_abts, void *cb, void *arg);
635  u32
636  efct_hw_io_get_count(struct efct_hw *hw,
637  		     enum efct_hw_io_count_type io_count_type);
638  struct efct_hw_io
639  *efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
640  void efct_hw_io_abort_all(struct efct_hw *hw);
641  void efct_hw_io_free_internal(struct kref *arg);
642  
643  /* HW WQ request tag API */
644  struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
645  void efct_hw_reqtag_pool_free(struct efct_hw *hw);
646  struct hw_wq_callback
647  *efct_hw_reqtag_alloc(struct efct_hw *hw,
648  			void (*callback)(void *arg, u8 *cqe,
649  					 int status), void *arg);
650  void
651  efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
652  struct hw_wq_callback
653  *efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
654  
655  /* RQ completion handlers for RQ pair mode */
656  int
657  efct_hw_rqpair_process_rq(struct efct_hw *hw,
658  			  struct hw_cq *cq, u8 *cqe);
659  int
660  efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq);
661  static inline void
efct_hw_sequence_copy(struct efc_hw_sequence * dst,struct efc_hw_sequence * src)662  efct_hw_sequence_copy(struct efc_hw_sequence *dst,
663  		      struct efc_hw_sequence *src)
664  {
665  	/* Copy src to dst, then zero out the linked list link */
666  	*dst = *src;
667  }
668  
669  int
670  efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq);
671  
672  static inline int
efct_hw_sequence_free(struct efct_hw * hw,struct efc_hw_sequence * seq)673  efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
674  {
675  	/* Only RQ pair mode is supported */
676  	return efct_hw_rqpair_sequence_free(hw, seq);
677  }
678  
679  int
680  efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
681  		   u32 max_isr_time_msec);
682  void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq);
683  void
684  efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
685  		   u8 *cqe, int status, u16 rid);
686  void
687  efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
688  		     u8 *cqe, u16 rid);
689  int
690  efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
691  int
692  efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
693  int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
694  int
695  efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
696  		   u8 sof, u8 eof, struct efc_dma *payload,
697  		struct efct_hw_send_frame_context *ctx,
698  		void (*callback)(void *arg, u8 *cqe, int status),
699  		void *arg);
700  int
701  efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io);
702  int
703  efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls);
704  int
705  efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
706  		 void *cb, void *arg);
707  
708  /* Function for retrieving link statistics */
709  int
710  efct_hw_get_link_stats(struct efct_hw *hw,
711  		       u8 req_ext_counters,
712  		       u8 clear_overflow_flags,
713  		       u8 clear_all_counters,
714  		       void (*efct_hw_link_stat_cb_t)(int status,
715  						      u32 num_counters,
716  		       struct efct_hw_link_stat_counts *counters, void *arg),
717  		       void *arg);
718  /* Function for retrieving host statistics */
719  int
720  efct_hw_get_host_stats(struct efct_hw *hw,
721  		       u8 cc,
722  		       void (*efct_hw_host_stat_cb_t)(int status,
723  						      u32 num_counters,
724  		       struct efct_hw_host_stat_counts *counters, void *arg),
725  		       void *arg);
726  int
727  efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma,
728  		       u32 size, u32 offset, int last,
729  		       void (*cb)(int status, u32 bytes_written,
730  				  u32 change_status, void *arg),
731  		       void *arg);
732  typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status,
733  				  u8 *mqe, void *arg);
734  int
735  efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg);
736  
737  struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count);
738  struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count);
739  u32
740  efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
741  		   u32 num_cqs, u32 entry_count);
742  struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count);
743  struct hw_wq
744  *efct_hw_new_wq(struct hw_cq *cq, u32 entry_count);
745  u32
746  efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
747  		   u32 num_rq_pairs, u32 entry_count);
748  void efct_hw_del_eq(struct hw_eq *eq);
749  void efct_hw_del_cq(struct hw_cq *cq);
750  void efct_hw_del_mq(struct hw_mq *mq);
751  void efct_hw_del_wq(struct hw_wq *wq);
752  void efct_hw_del_rq(struct hw_rq *rq);
753  void efct_hw_queue_teardown(struct efct_hw *hw);
754  void efct_hw_teardown(struct efct_hw *hw);
755  int
756  efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset);
757  
758  int
759  efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
760  		     uintptr_t value,
761  		void (*cb)(int status, uintptr_t value, void *arg),
762  		void *arg);
763  
764  #endif /* __EFCT_H__ */
765