1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * QLogic FCoE Offload Driver
4 * Copyright (c) 2016-2018 Cavium Inc.
5 */
6 #ifndef _QEDFC_H_
7 #define _QEDFC_H_
8
9 #include <scsi/libfcoe.h>
10 #include <scsi/libfc.h>
11 #include <scsi/fc/fc_fip.h>
12 #include <scsi/fc/fc_fc2.h>
13 #include <scsi/scsi_tcq.h>
14
15 /* qedf_hsi.h needs to before included any qed includes */
16 #include "qedf_hsi.h"
17
18 #include <linux/qed/qed_if.h>
19 #include <linux/qed/qed_fcoe_if.h>
20 #include <linux/qed/qed_ll2_if.h>
21 #include "qedf_version.h"
22 #include "qedf_dbg.h"
23 #include "drv_fcoe_fw_funcs.h"
24
25 /* Helpers to extract upper and lower 32-bits of pointer */
26 #define U64_HI(val) ((u32)(((u64)(val)) >> 32))
27 #define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
28
29 #define QEDF_DESCR "QLogic FCoE Offload Driver"
30 #define QEDF_MODULE_NAME "qedf"
31
32 #define QEDF_FLOGI_RETRY_CNT 3
33 #define QEDF_RPORT_RETRY_CNT 255
34 #define QEDF_MAX_SESSIONS 1024
35 #define QEDF_MAX_PAYLOAD 2048
36 #define QEDF_MAX_BDS_PER_CMD 256
37 #define QEDF_MAX_BD_LEN 0xffff
38 #define QEDF_BD_SPLIT_SZ 0x1000
39 #define QEDF_PAGE_SIZE 4096
40 #define QED_HW_DMA_BOUNDARY 0xfff
41 #define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
42 #define QEDF_MFS (QEDF_MAX_PAYLOAD + \
43 sizeof(struct fc_frame_header))
44 #define QEDF_MAX_NPIV 64
45 #define QEDF_TM_TIMEOUT 10
46 #define QEDF_ABORT_TIMEOUT (10 * 1000)
47 #define QEDF_CLEANUP_TIMEOUT 1
48 #define QEDF_MAX_CDB_LEN 16
49 #define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */
50
51 #define UPSTREAM_REMOVE 1
52 #define UPSTREAM_KEEP 1
53
54 struct qedf_mp_req {
55 uint32_t req_len;
56 void *req_buf;
57 dma_addr_t req_buf_dma;
58 struct scsi_sge *mp_req_bd;
59 dma_addr_t mp_req_bd_dma;
60 struct fc_frame_header req_fc_hdr;
61
62 uint32_t resp_len;
63 void *resp_buf;
64 dma_addr_t resp_buf_dma;
65 struct scsi_sge *mp_resp_bd;
66 dma_addr_t mp_resp_bd_dma;
67 struct fc_frame_header resp_fc_hdr;
68 };
69
70 struct qedf_els_cb_arg {
71 struct qedf_ioreq *aborted_io_req;
72 struct qedf_ioreq *io_req;
73 u8 op; /* Used to keep track of ELS op */
74 uint16_t l2_oxid;
75 u32 offset; /* Used for sequence cleanup */
76 u8 r_ctl; /* Used for sequence cleanup */
77 };
78
79 enum qedf_ioreq_event {
80 QEDF_IOREQ_EV_NONE,
81 QEDF_IOREQ_EV_ABORT_SUCCESS,
82 QEDF_IOREQ_EV_ABORT_FAILED,
83 QEDF_IOREQ_EV_SEND_RRQ,
84 QEDF_IOREQ_EV_ELS_TMO,
85 QEDF_IOREQ_EV_ELS_ERR_DETECT,
86 QEDF_IOREQ_EV_ELS_FLUSH,
87 QEDF_IOREQ_EV_CLEANUP_SUCCESS,
88 QEDF_IOREQ_EV_CLEANUP_FAILED,
89 };
90
91 #define FC_GOOD 0
92 #define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
93 #define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
94 #define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
95 #define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
96 struct qedf_ioreq {
97 struct list_head link;
98 uint16_t xid;
99 struct scsi_cmnd *sc_cmd;
100 #define QEDF_SCSI_CMD 1
101 #define QEDF_TASK_MGMT_CMD 2
102 #define QEDF_ABTS 3
103 #define QEDF_ELS 4
104 #define QEDF_CLEANUP 5
105 #define QEDF_SEQ_CLEANUP 6
106 u8 cmd_type;
107 #define QEDF_CMD_OUTSTANDING 0x0
108 #define QEDF_CMD_IN_ABORT 0x1
109 #define QEDF_CMD_IN_CLEANUP 0x2
110 #define QEDF_CMD_SRR_SENT 0x3
111 #define QEDF_CMD_DIRTY 0x4
112 #define QEDF_CMD_ERR_SCSI_DONE 0x5
113 u8 io_req_flags;
114 uint8_t tm_flags;
115 struct qedf_rport *fcport;
116 #define QEDF_CMD_ST_INACTIVE 0
117 #define QEDFC_CMD_ST_IO_ACTIVE 1
118 #define QEDFC_CMD_ST_ABORT_ACTIVE 2
119 #define QEDFC_CMD_ST_ABORT_ACTIVE_EH 3
120 #define QEDFC_CMD_ST_CLEANUP_ACTIVE 4
121 #define QEDFC_CMD_ST_CLEANUP_ACTIVE_EH 5
122 #define QEDFC_CMD_ST_RRQ_ACTIVE 6
123 #define QEDFC_CMD_ST_RRQ_WAIT 7
124 #define QEDFC_CMD_ST_OXID_RETIRE_WAIT 8
125 #define QEDFC_CMD_ST_TMF_ACTIVE 9
126 #define QEDFC_CMD_ST_DRAIN_ACTIVE 10
127 #define QEDFC_CMD_ST_CLEANED 11
128 #define QEDFC_CMD_ST_ELS_ACTIVE 12
129 atomic_t state;
130 unsigned long flags;
131 enum qedf_ioreq_event event;
132 size_t data_xfer_len;
133 /* ID: 001: Alloc cmd (qedf_alloc_cmd) */
134 /* ID: 002: Initiate ABTS (qedf_initiate_abts) */
135 /* ID: 003: For RRQ (qedf_process_abts_compl) */
136 struct kref refcount;
137 struct qedf_cmd_mgr *cmd_mgr;
138 struct io_bdt *bd_tbl;
139 struct delayed_work timeout_work;
140 struct completion tm_done;
141 struct completion abts_done;
142 struct completion cleanup_done;
143 struct fcoe_task_context *task;
144 struct fcoe_task_params *task_params;
145 struct scsi_sgl_task_params *sgl_task_params;
146 int idx;
147 int lun;
148 /*
149 * Need to allocate enough room for both sense data and FCP response data
150 * which has a max length of 8 bytes according to spec.
151 */
152 #define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
153 uint8_t *sense_buffer;
154 dma_addr_t sense_buffer_dma;
155 u32 fcp_resid;
156 u32 fcp_rsp_len;
157 u32 fcp_sns_len;
158 u8 cdb_status;
159 u8 fcp_status;
160 u8 fcp_rsp_code;
161 u8 scsi_comp_flags;
162 #define QEDF_MAX_REUSE 0xfff
163 u16 reuse_count;
164 struct qedf_mp_req mp_req;
165 void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
166 struct qedf_els_cb_arg *cb_arg;
167 int fp_idx;
168 unsigned int cpu;
169 unsigned int int_cpu;
170 #define QEDF_IOREQ_UNKNOWN_SGE 1
171 #define QEDF_IOREQ_SLOW_SGE 2
172 #define QEDF_IOREQ_FAST_SGE 3
173 u8 sge_type;
174 struct delayed_work rrq_work;
175
176 /* Used for sequence level recovery; i.e. REC/SRR */
177 uint32_t rx_buf_off;
178 uint32_t tx_buf_off;
179 uint32_t rx_id;
180 uint32_t task_retry_identifier;
181
182 /*
183 * Used to tell if we need to return a SCSI command
184 * during some form of error processing.
185 */
186 bool return_scsi_cmd_on_abts;
187
188 unsigned int alloc;
189 };
190
191 struct qedf_cmd_priv {
192 struct qedf_ioreq *io_req;
193 };
194
qedf_priv(struct scsi_cmnd * cmd)195 static inline struct qedf_cmd_priv *qedf_priv(struct scsi_cmnd *cmd)
196 {
197 return scsi_cmd_priv(cmd);
198 }
199
200 extern struct workqueue_struct *qedf_io_wq;
201
202 struct qedf_rport {
203 spinlock_t rport_lock;
204 #define QEDF_RPORT_SESSION_READY 1
205 #define QEDF_RPORT_UPLOADING_CONNECTION 2
206 #define QEDF_RPORT_IN_RESET 3
207 #define QEDF_RPORT_IN_LUN_RESET 4
208 #define QEDF_RPORT_IN_TARGET_RESET 5
209 unsigned long flags;
210 int lun_reset_lun;
211 unsigned long retry_delay_timestamp;
212 struct fc_rport *rport;
213 struct fc_rport_priv *rdata;
214 struct qedf_ctx *qedf;
215 u32 handle; /* Handle from qed */
216 u32 fw_cid; /* fw_cid from qed */
217 void __iomem *p_doorbell;
218 /* Send queue management */
219 atomic_t free_sqes;
220 atomic_t ios_to_queue;
221 atomic_t num_active_ios;
222 struct fcoe_wqe *sq;
223 dma_addr_t sq_dma;
224 u16 sq_prod_idx;
225 u16 fw_sq_prod_idx;
226 u16 sq_con_idx;
227 u32 sq_mem_size;
228 void *sq_pbl;
229 dma_addr_t sq_pbl_dma;
230 u32 sq_pbl_size;
231 u32 sid;
232 #define QEDF_RPORT_TYPE_DISK 0
233 #define QEDF_RPORT_TYPE_TAPE 1
234 uint dev_type; /* Disk or tape */
235 struct list_head peers;
236 };
237
238 /* Used to contain LL2 skb's in ll2_skb_list */
239 struct qedf_skb_work {
240 struct work_struct work;
241 struct sk_buff *skb;
242 struct qedf_ctx *qedf;
243 };
244
245 struct qedf_fastpath {
246 #define QEDF_SB_ID_NULL 0xffff
247 u16 sb_id;
248 struct qed_sb_info *sb_info;
249 struct qedf_ctx *qedf;
250 /* Keep track of number of completions on this fastpath */
251 unsigned long completions;
252 uint32_t cq_num_entries;
253 };
254
255 /* Used to pass fastpath information needed to process CQEs */
256 struct qedf_io_work {
257 struct work_struct work;
258 struct fcoe_cqe cqe;
259 struct qedf_ctx *qedf;
260 struct fc_frame *fp;
261 };
262
263 struct qedf_glbl_q_params {
264 u64 hw_p_cq; /* Completion queue PBL */
265 u64 hw_p_rq; /* Request queue PBL */
266 u64 hw_p_cmdq; /* Command queue PBL */
267 };
268
269 struct global_queue {
270 struct fcoe_cqe *cq;
271 dma_addr_t cq_dma;
272 u32 cq_mem_size;
273 u32 cq_cons_idx; /* Completion queue consumer index */
274 u32 cq_prod_idx;
275
276 void *cq_pbl;
277 dma_addr_t cq_pbl_dma;
278 u32 cq_pbl_size;
279 };
280
281 /* I/O tracing entry */
282 #define QEDF_IO_TRACE_SIZE 2048
283 struct qedf_io_log {
284 #define QEDF_IO_TRACE_REQ 0
285 #define QEDF_IO_TRACE_RSP 1
286 uint8_t direction;
287 uint16_t task_id;
288 uint32_t port_id; /* Remote port fabric ID */
289 int lun;
290 unsigned char op; /* SCSI CDB */
291 uint8_t lba[4];
292 unsigned int bufflen; /* SCSI buffer length */
293 unsigned int sg_count; /* Number of SG elements */
294 int result; /* Result passed back to mid-layer */
295 unsigned long jiffies; /* Time stamp when I/O logged */
296 int refcount; /* Reference count for task id */
297 unsigned int req_cpu; /* CPU that the task is queued on */
298 unsigned int int_cpu; /* Interrupt CPU that the task is received on */
299 unsigned int rsp_cpu; /* CPU that task is returned on */
300 u8 sge_type; /* Did we take the slow, single or fast SGE path */
301 };
302
303 /* Number of entries in BDQ */
304 #define QEDF_BDQ_SIZE 256
305 #define QEDF_BDQ_BUF_SIZE 2072
306
307 /* DMA coherent buffers for BDQ */
308 struct qedf_bdq_buf {
309 void *buf_addr;
310 dma_addr_t buf_dma;
311 };
312
313 /* Main adapter struct */
314 struct qedf_ctx {
315 struct qedf_dbg_ctx dbg_ctx;
316 struct fcoe_ctlr ctlr;
317 struct fc_lport *lport;
318 u8 data_src_addr[ETH_ALEN];
319 #define QEDF_LINK_DOWN 0
320 #define QEDF_LINK_UP 1
321 atomic_t link_state;
322 #define QEDF_DCBX_PENDING 0
323 #define QEDF_DCBX_DONE 1
324 atomic_t dcbx;
325 #define QEDF_NULL_VLAN_ID -1
326 #define QEDF_FALLBACK_VLAN 1002
327 #define QEDF_DEFAULT_PRIO 3
328 int vlan_id;
329 u8 prio;
330 struct qed_dev *cdev;
331 struct qed_dev_fcoe_info dev_info;
332 struct qed_int_info int_info;
333 uint16_t last_command;
334 spinlock_t hba_lock;
335 struct pci_dev *pdev;
336 u64 wwnn;
337 u64 wwpn;
338 u8 __aligned(16) mac[ETH_ALEN];
339 struct list_head fcports;
340 atomic_t num_offloads;
341 unsigned int curr_conn_id;
342 struct workqueue_struct *ll2_recv_wq;
343 struct workqueue_struct *link_update_wq;
344 struct devlink *devlink;
345 struct delayed_work link_update;
346 struct delayed_work link_recovery;
347 struct completion flogi_compl;
348 struct completion fipvlan_compl;
349
350 /*
351 * Used to tell if we're in the window where we are waiting for
352 * the link to come back up before informting fcoe that the link is
353 * done.
354 */
355 atomic_t link_down_tmo_valid;
356 #define QEDF_TIMER_INTERVAL (1 * HZ)
357 struct timer_list timer; /* One second book keeping timer */
358 #define QEDF_DRAIN_ACTIVE 1
359 #define QEDF_LL2_STARTED 2
360 #define QEDF_UNLOADING 3
361 #define QEDF_GRCDUMP_CAPTURE 4
362 #define QEDF_IN_RECOVERY 5
363 #define QEDF_DBG_STOP_IO 6
364 #define QEDF_PROBING 8
365 #define QEDF_STAG_IN_PROGRESS 9
366 unsigned long flags; /* Miscellaneous state flags */
367 int fipvlan_retries;
368 u8 num_queues;
369 struct global_queue **global_queues;
370 /* Pointer to array of queue structures */
371 struct qedf_glbl_q_params *p_cpuq;
372 /* Physical address of array of queue structures */
373 dma_addr_t hw_p_cpuq;
374
375 struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
376 void *bdq_pbl;
377 dma_addr_t bdq_pbl_dma;
378 size_t bdq_pbl_mem_size;
379 void *bdq_pbl_list;
380 dma_addr_t bdq_pbl_list_dma;
381 u8 bdq_pbl_list_num_entries;
382 void __iomem *bdq_primary_prod;
383 void __iomem *bdq_secondary_prod;
384 uint16_t bdq_prod_idx;
385
386 /* Structure for holding all the fastpath for this qedf_ctx */
387 struct qedf_fastpath *fp_array;
388 struct qed_fcoe_tid tasks;
389 struct qedf_cmd_mgr *cmd_mgr;
390 /* Holds the PF parameters we pass to qed to start he FCoE function */
391 struct qed_pf_params pf_params;
392 /* Used to time middle path ELS and TM commands */
393 struct workqueue_struct *timer_work_queue;
394
395 #define QEDF_IO_WORK_MIN 64
396 mempool_t *io_mempool;
397 struct workqueue_struct *dpc_wq;
398 struct delayed_work recovery_work;
399 struct delayed_work board_disable_work;
400 struct delayed_work grcdump_work;
401 struct delayed_work stag_work;
402
403 u32 slow_sge_ios;
404 u32 fast_sge_ios;
405
406 uint8_t *grcdump;
407 uint32_t grcdump_size;
408
409 struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
410 spinlock_t io_trace_lock;
411 uint16_t io_trace_idx;
412
413 bool stop_io_on_error;
414
415 u32 flogi_cnt;
416 u32 flogi_failed;
417 u32 flogi_pending;
418
419 /* Used for fc statistics */
420 struct mutex stats_mutex;
421 u64 input_requests;
422 u64 output_requests;
423 u64 control_requests;
424 u64 packet_aborts;
425 u64 alloc_failures;
426 u8 lun_resets;
427 u8 target_resets;
428 u8 task_set_fulls;
429 u8 busy;
430 /* Used for flush routine */
431 struct mutex flush_mutex;
432 };
433
434 struct io_bdt {
435 struct qedf_ioreq *io_req;
436 struct scsi_sge *bd_tbl;
437 dma_addr_t bd_tbl_dma;
438 u16 bd_valid;
439 };
440
441 struct qedf_cmd_mgr {
442 struct qedf_ctx *qedf;
443 u16 idx;
444 struct io_bdt **io_bdt_pool;
445 #define FCOE_PARAMS_NUM_TASKS 2048
446 struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
447 spinlock_t lock;
448 atomic_t free_list_cnt;
449 };
450
451 /* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
452 * Usage:
453 *
454 * void *ptr;
455 * ptr = qedf_get_task_mem(&qedf->tasks, 128);
456 */
qedf_get_task_mem(struct qed_fcoe_tid * info,u32 tid)457 static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
458 {
459 return (void *)(info->blocks[tid / info->num_tids_per_block] +
460 (tid % info->num_tids_per_block) * info->size);
461 }
462
qedf_stop_all_io(struct qedf_ctx * qedf)463 static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
464 {
465 set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
466 }
467
468 /*
469 * Externs
470 */
471
472 /*
473 * (QEDF_LOG_NPIV | QEDF_LOG_SESS | QEDF_LOG_LPORT | QEDF_LOG_ELS | QEDF_LOG_MQ
474 * | QEDF_LOG_IO | QEDF_LOG_UNSOL | QEDF_LOG_SCSI_TM | QEDF_LOG_MP_REQ |
475 * QEDF_LOG_EVT | QEDF_LOG_CONN | QEDF_LOG_DISC | QEDF_LOG_INFO)
476 */
477 #define QEDF_DEFAULT_LOG_MASK 0x3CFB6
478 extern const struct qed_fcoe_ops *qed_ops;
479 extern uint qedf_dump_frames;
480 extern uint qedf_io_tracing;
481 extern uint qedf_stop_io_on_error;
482 extern uint qedf_link_down_tmo;
483 #define QEDF_RETRY_DELAY_MAX 600 /* 60 seconds */
484 extern bool qedf_retry_delay;
485 extern uint qedf_debug;
486
487 extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
488 extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
489 extern int qedf_queuecommand(struct Scsi_Host *host,
490 struct scsi_cmnd *sc_cmd);
491 extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
492 extern u8 *qedf_get_src_mac(struct fc_lport *lport);
493 extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
494 extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
495 extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
496 struct qedf_ioreq *io_req);
497 extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
498 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
499 extern void qedf_process_error_detect(struct qedf_ctx *qedf,
500 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
501 extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
502 extern void qedf_release_cmd(struct kref *ref);
503 extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
504 bool return_scsi_cmd_on_abts);
505 extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
506 struct qedf_ioreq *io_req);
507 extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
508 u8 cmd_type);
509
510 extern const struct attribute_group *qedf_host_groups[];
511 extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
512 unsigned int timer_msec);
513 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
514 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
515 struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
516 extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
517 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
518 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
519 struct qedf_ioreq *els_req);
520 extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
521 extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
522 extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
523 bool return_scsi_cmd_on_abts);
524 extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
525 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
526 extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
527 extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
528 struct qedf_ioreq *io_req);
529 extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
530 extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
531 int result);
532 extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
533 extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
534 extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
535 extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
536 bool qedf_wait_for_upload(struct qedf_ctx *qedf);
537 extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
538 struct fcoe_cqe *cqe);
539 extern void qedf_restart_rport(struct qedf_rport *fcport);
540 extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
541 extern int qedf_post_io_req(struct qedf_rport *fcport,
542 struct qedf_ioreq *io_req);
543 extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
544 struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
545 extern int qedf_send_flogi(struct qedf_ctx *qedf);
546 extern void qedf_get_protocol_tlv_data(void *dev, void *data);
547 extern void qedf_fp_io_handler(struct work_struct *work);
548 extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
549 extern void qedf_wq_grcdump(struct work_struct *work);
550 void qedf_stag_change_work(struct work_struct *work);
551 void qedf_ctx_soft_reset(struct fc_lport *lport);
552 extern void qedf_schedule_hw_err_handler(void *dev,
553 enum qed_hw_err_type err_type);
554
555 #define FCOE_WORD_TO_BYTE 4
556 #define QEDF_MAX_TASK_NUM 0xFFFF
557 #define QL45xxx 0x165C
558 #define QL41xxx 0x8080
559 #define MAX_CT_PAYLOAD 2048
560 #define DISCOVERED_PORTS 4
561 #define NUMBER_OF_PORTS 1
562
563 struct fip_vlan {
564 struct ethhdr eth;
565 struct fip_header fip;
566 struct {
567 struct fip_mac_desc mac;
568 struct fip_wwn_desc wwnn;
569 } desc;
570 };
571
572 /* SQ/CQ Sizes */
573 #define GBL_RSVD_TASKS 16
574 #define NUM_TASKS_PER_CONNECTION 1024
575 #define NUM_RW_TASKS_PER_CONNECTION 512
576 #define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
577
578 #define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
579 #define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
580
581 #define QEDF_FCOE_PARAMS_GL_RQ_PI 0
582 #define QEDF_FCOE_PARAMS_GL_CMD_PI 1
583
584 #define QEDF_READ (1 << 1)
585 #define QEDF_WRITE (1 << 0)
586 #define MAX_FIBRE_LUNS 0xffffffff
587
588 #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
589 num_online_cpus())
590
591 /*
592 * PCI function probe defines
593 */
594 /* Probe/remove called during normal PCI probe */
595 #define QEDF_MODE_NORMAL 0
596 /* Probe/remove called from qed error recovery */
597 #define QEDF_MODE_RECOVERY 1
598
599 #define SUPPORTED_25000baseKR_Full (1<<27)
600 #define SUPPORTED_50000baseKR2_Full (1<<28)
601 #define SUPPORTED_100000baseKR4_Full (1<<29)
602 #define SUPPORTED_100000baseCR4_Full (1<<30)
603
604 #endif
605