1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef _HNS_ROCE_DEVICE_H
34 #define _HNS_ROCE_DEVICE_H
35 
36 #include <rdma/ib_verbs.h>
37 #include <rdma/hns-abi.h>
38 
39 #define PCI_REVISION_ID_HIP08			0x21
40 #define PCI_REVISION_ID_HIP09			0x30
41 
42 #define HNS_ROCE_MAX_MSG_LEN			0x80000000
43 
44 #define HNS_ROCE_IB_MIN_SQ_STRIDE		6
45 
46 #define BA_BYTE_LEN				8
47 
48 #define HNS_ROCE_MIN_CQE_NUM			0x40
49 #define HNS_ROCE_MIN_SRQ_WQE_NUM		1
50 
51 #define HNS_ROCE_MAX_IRQ_NUM			128
52 
53 #define HNS_ROCE_SGE_IN_WQE			2
54 #define HNS_ROCE_SGE_SHIFT			4
55 
56 #define EQ_ENABLE				1
57 #define EQ_DISABLE				0
58 
59 #define HNS_ROCE_CEQ				0
60 #define HNS_ROCE_AEQ				1
61 
62 #define HNS_ROCE_CEQE_SIZE 0x4
63 #define HNS_ROCE_AEQE_SIZE 0x10
64 
65 #define HNS_ROCE_V3_EQE_SIZE 0x40
66 
67 #define HNS_ROCE_V2_CQE_SIZE 32
68 #define HNS_ROCE_V3_CQE_SIZE 64
69 
70 #define HNS_ROCE_V2_QPC_SZ 256
71 #define HNS_ROCE_V3_QPC_SZ 512
72 
73 #define HNS_ROCE_MAX_PORTS			6
74 #define HNS_ROCE_GID_SIZE			16
75 #define HNS_ROCE_SGE_SIZE			16
76 #define HNS_ROCE_DWQE_SIZE			65536
77 
78 #define HNS_ROCE_HOP_NUM_0			0xff
79 
80 #define MR_TYPE_MR				0x00
81 #define MR_TYPE_FRMR				0x01
82 #define MR_TYPE_DMA				0x03
83 
84 #define HNS_ROCE_FRMR_MAX_PA			512
85 
86 #define PKEY_ID					0xffff
87 #define NODE_DESC_SIZE				64
88 #define DB_REG_OFFSET				0x1000
89 
90 /* Configure to HW for PAGE_SIZE larger than 4KB */
91 #define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)
92 
93 #define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
94 #define SRQ_DB_REG				0x230
95 
96 #define HNS_ROCE_QP_BANK_NUM 8
97 #define HNS_ROCE_CQ_BANK_NUM 4
98 
99 #define CQ_BANKID_SHIFT 2
100 
101 enum {
102 	SERV_TYPE_RC,
103 	SERV_TYPE_UC,
104 	SERV_TYPE_RD,
105 	SERV_TYPE_UD,
106 	SERV_TYPE_XRC = 5,
107 };
108 
109 enum hns_roce_qp_state {
110 	HNS_ROCE_QP_STATE_RST,
111 	HNS_ROCE_QP_STATE_INIT,
112 	HNS_ROCE_QP_STATE_RTR,
113 	HNS_ROCE_QP_STATE_RTS,
114 	HNS_ROCE_QP_STATE_SQD,
115 	HNS_ROCE_QP_STATE_ERR,
116 	HNS_ROCE_QP_NUM_STATE,
117 };
118 
119 enum hns_roce_event {
120 	HNS_ROCE_EVENT_TYPE_PATH_MIG                  = 0x01,
121 	HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED           = 0x02,
122 	HNS_ROCE_EVENT_TYPE_COMM_EST                  = 0x03,
123 	HNS_ROCE_EVENT_TYPE_SQ_DRAINED                = 0x04,
124 	HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR            = 0x05,
125 	HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR    = 0x06,
126 	HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR     = 0x07,
127 	HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH           = 0x08,
128 	HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH        = 0x09,
129 	HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR           = 0x0a,
130 	HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR           = 0x0b,
131 	HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW               = 0x0c,
132 	HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID             = 0x0d,
133 	HNS_ROCE_EVENT_TYPE_PORT_CHANGE               = 0x0f,
134 	/* 0x10 and 0x11 is unused in currently application case */
135 	HNS_ROCE_EVENT_TYPE_DB_OVERFLOW               = 0x12,
136 	HNS_ROCE_EVENT_TYPE_MB                        = 0x13,
137 	HNS_ROCE_EVENT_TYPE_FLR			      = 0x15,
138 	HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION	      = 0x16,
139 	HNS_ROCE_EVENT_TYPE_INVALID_XRCETH	      = 0x17,
140 };
141 
142 #define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
143 
144 enum {
145 	HNS_ROCE_CAP_FLAG_REREG_MR		= BIT(0),
146 	HNS_ROCE_CAP_FLAG_ROCE_V1_V2		= BIT(1),
147 	HNS_ROCE_CAP_FLAG_RQ_INLINE		= BIT(2),
148 	HNS_ROCE_CAP_FLAG_CQ_RECORD_DB		= BIT(3),
149 	HNS_ROCE_CAP_FLAG_QP_RECORD_DB		= BIT(4),
150 	HNS_ROCE_CAP_FLAG_SRQ			= BIT(5),
151 	HNS_ROCE_CAP_FLAG_XRC			= BIT(6),
152 	HNS_ROCE_CAP_FLAG_MW			= BIT(7),
153 	HNS_ROCE_CAP_FLAG_FRMR                  = BIT(8),
154 	HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL		= BIT(9),
155 	HNS_ROCE_CAP_FLAG_ATOMIC		= BIT(10),
156 	HNS_ROCE_CAP_FLAG_DIRECT_WQE		= BIT(12),
157 	HNS_ROCE_CAP_FLAG_SDI_MODE		= BIT(14),
158 	HNS_ROCE_CAP_FLAG_STASH			= BIT(17),
159 };
160 
161 #define HNS_ROCE_DB_TYPE_COUNT			2
162 #define HNS_ROCE_DB_UNIT_SIZE			4
163 
164 enum {
165 	HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
166 };
167 
168 enum hns_roce_reset_stage {
169 	HNS_ROCE_STATE_NON_RST,
170 	HNS_ROCE_STATE_RST_BEF_DOWN,
171 	HNS_ROCE_STATE_RST_DOWN,
172 	HNS_ROCE_STATE_RST_UNINIT,
173 	HNS_ROCE_STATE_RST_INIT,
174 	HNS_ROCE_STATE_RST_INITED,
175 };
176 
177 enum hns_roce_instance_state {
178 	HNS_ROCE_STATE_NON_INIT,
179 	HNS_ROCE_STATE_INIT,
180 	HNS_ROCE_STATE_INITED,
181 	HNS_ROCE_STATE_UNINIT,
182 };
183 
184 enum {
185 	HNS_ROCE_RST_DIRECT_RETURN		= 0,
186 };
187 
188 #define HNS_ROCE_CMD_SUCCESS			1
189 
190 /* The minimum page size is 4K for hardware */
191 #define HNS_HW_PAGE_SHIFT			12
192 #define HNS_HW_PAGE_SIZE			(1 << HNS_HW_PAGE_SHIFT)
193 
194 struct hns_roce_uar {
195 	u64		pfn;
196 	unsigned long	index;
197 	unsigned long	logic_idx;
198 };
199 
200 enum hns_roce_mmap_type {
201 	HNS_ROCE_MMAP_TYPE_DB = 1,
202 	HNS_ROCE_MMAP_TYPE_DWQE,
203 };
204 
205 struct hns_user_mmap_entry {
206 	struct rdma_user_mmap_entry rdma_entry;
207 	enum hns_roce_mmap_type mmap_type;
208 	u64 address;
209 };
210 
211 struct hns_roce_ucontext {
212 	struct ib_ucontext	ibucontext;
213 	struct hns_roce_uar	uar;
214 	struct list_head	page_list;
215 	struct mutex		page_mutex;
216 	struct hns_user_mmap_entry *db_mmap_entry;
217 };
218 
219 struct hns_roce_pd {
220 	struct ib_pd		ibpd;
221 	unsigned long		pdn;
222 };
223 
224 struct hns_roce_xrcd {
225 	struct ib_xrcd ibxrcd;
226 	u32 xrcdn;
227 };
228 
229 struct hns_roce_bitmap {
230 	/* Bitmap Traversal last a bit which is 1 */
231 	unsigned long		last;
232 	unsigned long		top;
233 	unsigned long		max;
234 	unsigned long		reserved_top;
235 	unsigned long		mask;
236 	spinlock_t		lock;
237 	unsigned long		*table;
238 };
239 
240 struct hns_roce_ida {
241 	struct ida ida;
242 	u32 min; /* Lowest ID to allocate.  */
243 	u32 max; /* Highest ID to allocate. */
244 };
245 
246 /* For Hardware Entry Memory */
247 struct hns_roce_hem_table {
248 	/* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
249 	u32		type;
250 	/* HEM array elment num */
251 	unsigned long	num_hem;
252 	/* Single obj size */
253 	unsigned long	obj_size;
254 	unsigned long	table_chunk_size;
255 	int		lowmem;
256 	struct mutex	mutex;
257 	struct hns_roce_hem **hem;
258 	u64		**bt_l1;
259 	dma_addr_t	*bt_l1_dma_addr;
260 	u64		**bt_l0;
261 	dma_addr_t	*bt_l0_dma_addr;
262 };
263 
264 struct hns_roce_buf_region {
265 	u32 offset; /* page offset */
266 	u32 count; /* page count */
267 	int hopnum; /* addressing hop num */
268 };
269 
270 #define HNS_ROCE_MAX_BT_REGION	3
271 #define HNS_ROCE_MAX_BT_LEVEL	3
272 struct hns_roce_hem_list {
273 	struct list_head root_bt;
274 	/* link all bt dma mem by hop config */
275 	struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
276 	struct list_head btm_bt; /* link all bottom bt in @mid_bt */
277 	dma_addr_t root_ba; /* pointer to the root ba table */
278 };
279 
280 struct hns_roce_buf_attr {
281 	struct {
282 		size_t	size;  /* region size */
283 		int	hopnum; /* multi-hop addressing hop num */
284 	} region[HNS_ROCE_MAX_BT_REGION];
285 	unsigned int region_count; /* valid region count */
286 	unsigned int page_shift;  /* buffer page shift */
287 	unsigned int user_access; /* umem access flag */
288 	bool mtt_only; /* only alloc buffer-required MTT memory */
289 };
290 
291 struct hns_roce_hem_cfg {
292 	dma_addr_t	root_ba; /* root BA table's address */
293 	bool		is_direct; /* addressing without BA table */
294 	unsigned int	ba_pg_shift; /* BA table page shift */
295 	unsigned int	buf_pg_shift; /* buffer page shift */
296 	unsigned int	buf_pg_count;  /* buffer page count */
297 	struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
298 	unsigned int	region_count;
299 };
300 
301 /* memory translate region */
302 struct hns_roce_mtr {
303 	struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
304 	struct ib_umem		*umem; /* user space buffer */
305 	struct hns_roce_buf	*kmem; /* kernel space buffer */
306 	struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
307 };
308 
309 struct hns_roce_mw {
310 	struct ib_mw		ibmw;
311 	u32			pdn;
312 	u32			rkey;
313 	int			enabled; /* MW's active status */
314 	u32			pbl_hop_num;
315 	u32			pbl_ba_pg_sz;
316 	u32			pbl_buf_pg_sz;
317 };
318 
319 struct hns_roce_mr {
320 	struct ib_mr		ibmr;
321 	u64			iova; /* MR's virtual original addr */
322 	u64			size; /* Address range of MR */
323 	u32			key; /* Key of MR */
324 	u32			pd;   /* PD num of MR */
325 	u32			access; /* Access permission of MR */
326 	int			enabled; /* MR's active status */
327 	int			type; /* MR's register type */
328 	u32			pbl_hop_num; /* multi-hop number */
329 	struct hns_roce_mtr	pbl_mtr;
330 	u32			npages;
331 	dma_addr_t		*page_list;
332 };
333 
334 struct hns_roce_mr_table {
335 	struct hns_roce_ida mtpt_ida;
336 	struct hns_roce_hem_table	mtpt_table;
337 };
338 
339 struct hns_roce_wq {
340 	u64		*wrid;     /* Work request ID */
341 	spinlock_t	lock;
342 	u32		wqe_cnt;  /* WQE num */
343 	u32		max_gs;
344 	u32		rsv_sge;
345 	u32		offset;
346 	u32		wqe_shift; /* WQE size */
347 	u32		head;
348 	u32		tail;
349 	void __iomem	*db_reg;
350 };
351 
352 struct hns_roce_sge {
353 	unsigned int	sge_cnt; /* SGE num */
354 	u32		offset;
355 	u32		sge_shift; /* SGE size */
356 };
357 
358 struct hns_roce_buf_list {
359 	void		*buf;
360 	dma_addr_t	map;
361 };
362 
363 /*
364  * %HNS_ROCE_BUF_DIRECT indicates that the all memory must be in a continuous
365  * dma address range.
366  *
367  * %HNS_ROCE_BUF_NOSLEEP indicates that the caller cannot sleep.
368  *
369  * %HNS_ROCE_BUF_NOFAIL allocation only failed when allocated size is zero, even
370  * the allocated size is smaller than the required size.
371  */
372 enum {
373 	HNS_ROCE_BUF_DIRECT = BIT(0),
374 	HNS_ROCE_BUF_NOSLEEP = BIT(1),
375 	HNS_ROCE_BUF_NOFAIL = BIT(2),
376 };
377 
378 struct hns_roce_buf {
379 	struct hns_roce_buf_list	*trunk_list;
380 	u32				ntrunks;
381 	u32				npages;
382 	unsigned int			trunk_shift;
383 	unsigned int			page_shift;
384 };
385 
386 struct hns_roce_db_pgdir {
387 	struct list_head	list;
388 	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
389 	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
390 	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
391 	u32			*page;
392 	dma_addr_t		db_dma;
393 };
394 
395 struct hns_roce_user_db_page {
396 	struct list_head	list;
397 	struct ib_umem		*umem;
398 	unsigned long		user_virt;
399 	refcount_t		refcount;
400 };
401 
402 struct hns_roce_db {
403 	u32		*db_record;
404 	union {
405 		struct hns_roce_db_pgdir *pgdir;
406 		struct hns_roce_user_db_page *user_page;
407 	} u;
408 	dma_addr_t	dma;
409 	void		*virt_addr;
410 	unsigned long	index;
411 	unsigned long	order;
412 };
413 
414 struct hns_roce_cq {
415 	struct ib_cq			ib_cq;
416 	struct hns_roce_mtr		mtr;
417 	struct hns_roce_db		db;
418 	u32				flags;
419 	spinlock_t			lock;
420 	u32				cq_depth;
421 	u32				cons_index;
422 	u32				*set_ci_db;
423 	void __iomem			*db_reg;
424 	int				arm_sn;
425 	int				cqe_size;
426 	unsigned long			cqn;
427 	u32				vector;
428 	refcount_t			refcount;
429 	struct completion		free;
430 	struct list_head		sq_list; /* all qps on this send cq */
431 	struct list_head		rq_list; /* all qps on this recv cq */
432 	int				is_armed; /* cq is armed */
433 	struct list_head		node; /* all armed cqs are on a list */
434 };
435 
436 struct hns_roce_idx_que {
437 	struct hns_roce_mtr		mtr;
438 	u32				entry_shift;
439 	unsigned long			*bitmap;
440 	u32				head;
441 	u32				tail;
442 };
443 
444 struct hns_roce_srq {
445 	struct ib_srq		ibsrq;
446 	unsigned long		srqn;
447 	u32			wqe_cnt;
448 	int			max_gs;
449 	u32			rsv_sge;
450 	u32			wqe_shift;
451 	u32			cqn;
452 	u32			xrcdn;
453 	void __iomem		*db_reg;
454 
455 	refcount_t		refcount;
456 	struct completion	free;
457 
458 	struct hns_roce_mtr	buf_mtr;
459 
460 	u64		       *wrid;
461 	struct hns_roce_idx_que idx_que;
462 	spinlock_t		lock;
463 	struct mutex		mutex;
464 	void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
465 };
466 
467 struct hns_roce_uar_table {
468 	struct hns_roce_bitmap bitmap;
469 };
470 
471 struct hns_roce_bank {
472 	struct ida ida;
473 	u32 inuse; /* Number of IDs allocated */
474 	u32 min; /* Lowest ID to allocate.  */
475 	u32 max; /* Highest ID to allocate. */
476 	u32 next; /* Next ID to allocate. */
477 };
478 
479 struct hns_roce_idx_table {
480 	u32 *spare_idx;
481 	u32 head;
482 	u32 tail;
483 };
484 
485 struct hns_roce_qp_table {
486 	struct hns_roce_hem_table	qp_table;
487 	struct hns_roce_hem_table	irrl_table;
488 	struct hns_roce_hem_table	trrl_table;
489 	struct hns_roce_hem_table	sccc_table;
490 	struct mutex			scc_mutex;
491 	struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
492 	struct mutex bank_mutex;
493 	struct hns_roce_idx_table	idx_table;
494 };
495 
496 struct hns_roce_cq_table {
497 	struct xarray			array;
498 	struct hns_roce_hem_table	table;
499 	struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
500 	struct mutex			bank_mutex;
501 };
502 
503 struct hns_roce_srq_table {
504 	struct hns_roce_ida		srq_ida;
505 	struct xarray			xa;
506 	struct hns_roce_hem_table	table;
507 };
508 
509 struct hns_roce_av {
510 	u8 port;
511 	u8 gid_index;
512 	u8 stat_rate;
513 	u8 hop_limit;
514 	u32 flowlabel;
515 	u16 udp_sport;
516 	u8 sl;
517 	u8 tclass;
518 	u8 dgid[HNS_ROCE_GID_SIZE];
519 	u8 mac[ETH_ALEN];
520 	u16 vlan_id;
521 	u8 vlan_en;
522 };
523 
524 struct hns_roce_ah {
525 	struct ib_ah		ibah;
526 	struct hns_roce_av	av;
527 };
528 
529 struct hns_roce_cmd_context {
530 	struct completion	done;
531 	int			result;
532 	int			next;
533 	u64			out_param;
534 	u16			token;
535 	u16			busy;
536 };
537 
538 struct hns_roce_cmdq {
539 	struct dma_pool		*pool;
540 	struct semaphore	poll_sem;
541 	/*
542 	 * Event mode: cmd register mutex protection,
543 	 * ensure to not exceed max_cmds and user use limit region
544 	 */
545 	struct semaphore	event_sem;
546 	int			max_cmds;
547 	spinlock_t		context_lock;
548 	int			free_head;
549 	struct hns_roce_cmd_context *context;
550 	/*
551 	 * Process whether use event mode, init default non-zero
552 	 * After the event queue of cmd event ready,
553 	 * can switch into event mode
554 	 * close device, switch into poll mode(non event mode)
555 	 */
556 	u8			use_events;
557 };
558 
559 struct hns_roce_cmd_mailbox {
560 	void		       *buf;
561 	dma_addr_t		dma;
562 };
563 
564 struct hns_roce_dev;
565 
566 struct hns_roce_rinl_sge {
567 	void			*addr;
568 	u32			len;
569 };
570 
571 struct hns_roce_rinl_wqe {
572 	struct hns_roce_rinl_sge *sg_list;
573 	u32			 sge_cnt;
574 };
575 
576 struct hns_roce_rinl_buf {
577 	struct hns_roce_rinl_wqe *wqe_list;
578 	u32			 wqe_cnt;
579 };
580 
581 enum {
582 	HNS_ROCE_FLUSH_FLAG = 0,
583 };
584 
585 struct hns_roce_work {
586 	struct hns_roce_dev *hr_dev;
587 	struct work_struct work;
588 	int event_type;
589 	int sub_type;
590 	u32 queue_num;
591 };
592 
593 struct hns_roce_qp {
594 	struct ib_qp		ibqp;
595 	struct hns_roce_wq	rq;
596 	struct hns_roce_db	rdb;
597 	struct hns_roce_db	sdb;
598 	unsigned long		en_flags;
599 	u32			doorbell_qpn;
600 	enum ib_sig_type	sq_signal_bits;
601 	struct hns_roce_wq	sq;
602 
603 	struct hns_roce_mtr	mtr;
604 
605 	u32			buff_size;
606 	struct mutex		mutex;
607 	u8			port;
608 	u8			phy_port;
609 	u8			sl;
610 	u8			resp_depth;
611 	u8			state;
612 	u32                     atomic_rd_en;
613 	u32			qkey;
614 	void			(*event)(struct hns_roce_qp *qp,
615 					 enum hns_roce_event event_type);
616 	unsigned long		qpn;
617 
618 	u32			xrcdn;
619 
620 	refcount_t		refcount;
621 	struct completion	free;
622 
623 	struct hns_roce_sge	sge;
624 	u32			next_sge;
625 	enum ib_mtu		path_mtu;
626 	u32			max_inline_data;
627 
628 	/* 0: flush needed, 1: unneeded */
629 	unsigned long		flush_flag;
630 	struct hns_roce_work	flush_work;
631 	struct hns_roce_rinl_buf rq_inl_buf;
632 	struct list_head	node; /* all qps are on a list */
633 	struct list_head	rq_node; /* all recv qps are on a list */
634 	struct list_head	sq_node; /* all send qps are on a list */
635 	struct hns_user_mmap_entry *dwqe_mmap_entry;
636 };
637 
638 struct hns_roce_ib_iboe {
639 	spinlock_t		lock;
640 	struct net_device      *netdevs[HNS_ROCE_MAX_PORTS];
641 	struct notifier_block	nb;
642 	u8			phy_port[HNS_ROCE_MAX_PORTS];
643 };
644 
645 struct hns_roce_ceqe {
646 	__le32	comp;
647 	__le32	rsv[15];
648 };
649 
650 struct hns_roce_aeqe {
651 	__le32 asyn;
652 	union {
653 		struct {
654 			__le32 num;
655 			u32 rsv0;
656 			u32 rsv1;
657 		} queue_event;
658 
659 		struct {
660 			__le64  out_param;
661 			__le16  token;
662 			u8	status;
663 			u8	rsv0;
664 		} __packed cmd;
665 	 } event;
666 	__le32 rsv[12];
667 };
668 
669 struct hns_roce_eq {
670 	struct hns_roce_dev		*hr_dev;
671 	void __iomem			*db_reg;
672 
673 	int				type_flag; /* Aeq:1 ceq:0 */
674 	int				eqn;
675 	u32				entries;
676 	int				eqe_size;
677 	int				irq;
678 	u32				cons_index;
679 	int				over_ignore;
680 	int				coalesce;
681 	int				arm_st;
682 	int				hop_num;
683 	struct hns_roce_mtr		mtr;
684 	u16				eq_max_cnt;
685 	u32				eq_period;
686 	int				shift;
687 	int				event_type;
688 	int				sub_type;
689 };
690 
691 struct hns_roce_eq_table {
692 	struct hns_roce_eq	*eq;
693 };
694 
695 enum cong_type {
696 	CONG_TYPE_DCQCN,
697 	CONG_TYPE_LDCP,
698 	CONG_TYPE_HC3,
699 	CONG_TYPE_DIP,
700 };
701 
702 struct hns_roce_caps {
703 	u64		fw_ver;
704 	u8		num_ports;
705 	int		gid_table_len[HNS_ROCE_MAX_PORTS];
706 	int		pkey_table_len[HNS_ROCE_MAX_PORTS];
707 	int		local_ca_ack_delay;
708 	int		num_uars;
709 	u32		phy_num_uars;
710 	u32		max_sq_sg;
711 	u32		max_sq_inline;
712 	u32		max_rq_sg;
713 	u32		max_extend_sg;
714 	u32		num_qps;
715 	u32		num_pi_qps;
716 	u32		reserved_qps;
717 	int		num_qpc_timer;
718 	int		num_cqc_timer;
719 	u32		num_srqs;
720 	u32		max_wqes;
721 	u32		max_srq_wrs;
722 	u32		max_srq_sges;
723 	u32		max_sq_desc_sz;
724 	u32		max_rq_desc_sz;
725 	u32		max_srq_desc_sz;
726 	int		max_qp_init_rdma;
727 	int		max_qp_dest_rdma;
728 	u32		num_cqs;
729 	u32		max_cqes;
730 	u32		min_cqes;
731 	u32		min_wqes;
732 	u32		reserved_cqs;
733 	u32		reserved_srqs;
734 	int		num_aeq_vectors;
735 	int		num_comp_vectors;
736 	int		num_other_vectors;
737 	u32		num_mtpts;
738 	u32		num_mtt_segs;
739 	u32		num_srqwqe_segs;
740 	u32		num_idx_segs;
741 	int		reserved_mrws;
742 	int		reserved_uars;
743 	int		num_pds;
744 	int		reserved_pds;
745 	u32		num_xrcds;
746 	u32		reserved_xrcds;
747 	u32		mtt_entry_sz;
748 	u32		cqe_sz;
749 	u32		page_size_cap;
750 	u32		reserved_lkey;
751 	int		mtpt_entry_sz;
752 	int		qpc_sz;
753 	int		irrl_entry_sz;
754 	int		trrl_entry_sz;
755 	int		cqc_entry_sz;
756 	int		sccc_sz;
757 	int		qpc_timer_entry_sz;
758 	int		cqc_timer_entry_sz;
759 	int		srqc_entry_sz;
760 	int		idx_entry_sz;
761 	u32		pbl_ba_pg_sz;
762 	u32		pbl_buf_pg_sz;
763 	u32		pbl_hop_num;
764 	int		aeqe_depth;
765 	int		ceqe_depth;
766 	u32		aeqe_size;
767 	u32		ceqe_size;
768 	enum ib_mtu	max_mtu;
769 	u32		qpc_bt_num;
770 	u32		qpc_timer_bt_num;
771 	u32		srqc_bt_num;
772 	u32		cqc_bt_num;
773 	u32		cqc_timer_bt_num;
774 	u32		mpt_bt_num;
775 	u32		eqc_bt_num;
776 	u32		smac_bt_num;
777 	u32		sgid_bt_num;
778 	u32		sccc_bt_num;
779 	u32		gmv_bt_num;
780 	u32		qpc_ba_pg_sz;
781 	u32		qpc_buf_pg_sz;
782 	u32		qpc_hop_num;
783 	u32		srqc_ba_pg_sz;
784 	u32		srqc_buf_pg_sz;
785 	u32		srqc_hop_num;
786 	u32		cqc_ba_pg_sz;
787 	u32		cqc_buf_pg_sz;
788 	u32		cqc_hop_num;
789 	u32		mpt_ba_pg_sz;
790 	u32		mpt_buf_pg_sz;
791 	u32		mpt_hop_num;
792 	u32		mtt_ba_pg_sz;
793 	u32		mtt_buf_pg_sz;
794 	u32		mtt_hop_num;
795 	u32		wqe_sq_hop_num;
796 	u32		wqe_sge_hop_num;
797 	u32		wqe_rq_hop_num;
798 	u32		sccc_ba_pg_sz;
799 	u32		sccc_buf_pg_sz;
800 	u32		sccc_hop_num;
801 	u32		qpc_timer_ba_pg_sz;
802 	u32		qpc_timer_buf_pg_sz;
803 	u32		qpc_timer_hop_num;
804 	u32		cqc_timer_ba_pg_sz;
805 	u32		cqc_timer_buf_pg_sz;
806 	u32		cqc_timer_hop_num;
807 	u32		cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
808 	u32		cqe_buf_pg_sz;
809 	u32		cqe_hop_num;
810 	u32		srqwqe_ba_pg_sz;
811 	u32		srqwqe_buf_pg_sz;
812 	u32		srqwqe_hop_num;
813 	u32		idx_ba_pg_sz;
814 	u32		idx_buf_pg_sz;
815 	u32		idx_hop_num;
816 	u32		eqe_ba_pg_sz;
817 	u32		eqe_buf_pg_sz;
818 	u32		eqe_hop_num;
819 	u32		gmv_entry_num;
820 	u32		gmv_entry_sz;
821 	u32		gmv_ba_pg_sz;
822 	u32		gmv_buf_pg_sz;
823 	u32		gmv_hop_num;
824 	u32		sl_num;
825 	u32		llm_buf_pg_sz;
826 	u32		chunk_sz; /* chunk size in non multihop mode */
827 	u64		flags;
828 	u16		default_ceq_max_cnt;
829 	u16		default_ceq_period;
830 	u16		default_aeq_max_cnt;
831 	u16		default_aeq_period;
832 	u16		default_aeq_arm_st;
833 	u16		default_ceq_arm_st;
834 	enum cong_type	cong_type;
835 };
836 
837 struct hns_roce_dfx_hw {
838 	int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
839 			      int *buffer);
840 };
841 
842 enum hns_roce_device_state {
843 	HNS_ROCE_DEVICE_STATE_INITED,
844 	HNS_ROCE_DEVICE_STATE_RST_DOWN,
845 	HNS_ROCE_DEVICE_STATE_UNINIT,
846 };
847 
848 struct hns_roce_hw {
849 	int (*cmq_init)(struct hns_roce_dev *hr_dev);
850 	void (*cmq_exit)(struct hns_roce_dev *hr_dev);
851 	int (*hw_profile)(struct hns_roce_dev *hr_dev);
852 	int (*hw_init)(struct hns_roce_dev *hr_dev);
853 	void (*hw_exit)(struct hns_roce_dev *hr_dev);
854 	int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
855 			 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
856 			 u16 token, int event);
857 	int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
858 			      unsigned int timeout);
859 	bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
860 	int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
861 		       const union ib_gid *gid, const struct ib_gid_attr *attr);
862 	int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
863 		       const u8 *addr);
864 	int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
865 			  struct hns_roce_mr *mr);
866 	int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
867 				struct hns_roce_mr *mr, int flags,
868 				void *mb_buf);
869 	int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
870 			       struct hns_roce_mr *mr);
871 	int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
872 	void (*write_cqc)(struct hns_roce_dev *hr_dev,
873 			  struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
874 			  dma_addr_t dma_handle);
875 	int (*set_hem)(struct hns_roce_dev *hr_dev,
876 		       struct hns_roce_hem_table *table, int obj, int step_idx);
877 	int (*clear_hem)(struct hns_roce_dev *hr_dev,
878 			 struct hns_roce_hem_table *table, int obj,
879 			 int step_idx);
880 	int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
881 			 int attr_mask, enum ib_qp_state cur_state,
882 			 enum ib_qp_state new_state);
883 	int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
884 			 struct hns_roce_qp *hr_qp);
885 	int (*init_eq)(struct hns_roce_dev *hr_dev);
886 	void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
887 	int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
888 	const struct ib_device_ops *hns_roce_dev_ops;
889 	const struct ib_device_ops *hns_roce_dev_srq_ops;
890 };
891 
892 struct hns_roce_dev {
893 	struct ib_device	ib_dev;
894 	struct pci_dev		*pci_dev;
895 	struct device		*dev;
896 	struct hns_roce_uar     priv_uar;
897 	const char		*irq_names[HNS_ROCE_MAX_IRQ_NUM];
898 	spinlock_t		sm_lock;
899 	bool			active;
900 	bool			is_reset;
901 	bool			dis_db;
902 	unsigned long		reset_cnt;
903 	struct hns_roce_ib_iboe iboe;
904 	enum hns_roce_device_state state;
905 	struct list_head	qp_list; /* list of all qps on this dev */
906 	spinlock_t		qp_list_lock; /* protect qp_list */
907 	struct list_head	dip_list; /* list of all dest ips on this dev */
908 	spinlock_t		dip_list_lock; /* protect dip_list */
909 
910 	struct list_head        pgdir_list;
911 	struct mutex            pgdir_mutex;
912 	int			irq[HNS_ROCE_MAX_IRQ_NUM];
913 	u8 __iomem		*reg_base;
914 	void __iomem		*mem_base;
915 	struct hns_roce_caps	caps;
916 	struct xarray		qp_table_xa;
917 
918 	unsigned char	dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
919 	u64			sys_image_guid;
920 	u32                     vendor_id;
921 	u32                     vendor_part_id;
922 	u32                     hw_rev;
923 	void __iomem            *priv_addr;
924 
925 	struct hns_roce_cmdq	cmd;
926 	struct hns_roce_ida pd_ida;
927 	struct hns_roce_ida xrcd_ida;
928 	struct hns_roce_ida uar_ida;
929 	struct hns_roce_mr_table  mr_table;
930 	struct hns_roce_cq_table  cq_table;
931 	struct hns_roce_srq_table srq_table;
932 	struct hns_roce_qp_table  qp_table;
933 	struct hns_roce_eq_table  eq_table;
934 	struct hns_roce_hem_table  qpc_timer_table;
935 	struct hns_roce_hem_table  cqc_timer_table;
936 	/* GMV is the memory area that the driver allocates for the hardware
937 	 * to store SGID, SMAC and VLAN information.
938 	 */
939 	struct hns_roce_hem_table  gmv_table;
940 
941 	int			cmd_mod;
942 	int			loop_idc;
943 	u32			sdb_offset;
944 	u32			odb_offset;
945 	const struct hns_roce_hw *hw;
946 	void			*priv;
947 	struct workqueue_struct *irq_workq;
948 	const struct hns_roce_dfx_hw *dfx;
949 	u32 func_num;
950 	u32 is_vf;
951 	u32 cong_algo_tmpl_id;
952 	u64 dwqe_page;
953 };
954 
955 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
956 {
957 	return container_of(ib_dev, struct hns_roce_dev, ib_dev);
958 }
959 
960 static inline struct hns_roce_ucontext
961 			*to_hr_ucontext(struct ib_ucontext *ibucontext)
962 {
963 	return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
964 }
965 
966 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
967 {
968 	return container_of(ibpd, struct hns_roce_pd, ibpd);
969 }
970 
971 static inline struct hns_roce_xrcd *to_hr_xrcd(struct ib_xrcd *ibxrcd)
972 {
973 	return container_of(ibxrcd, struct hns_roce_xrcd, ibxrcd);
974 }
975 
976 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
977 {
978 	return container_of(ibah, struct hns_roce_ah, ibah);
979 }
980 
981 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
982 {
983 	return container_of(ibmr, struct hns_roce_mr, ibmr);
984 }
985 
986 static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
987 {
988 	return container_of(ibmw, struct hns_roce_mw, ibmw);
989 }
990 
991 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
992 {
993 	return container_of(ibqp, struct hns_roce_qp, ibqp);
994 }
995 
996 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
997 {
998 	return container_of(ib_cq, struct hns_roce_cq, ib_cq);
999 }
1000 
1001 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
1002 {
1003 	return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1004 }
1005 
1006 static inline struct hns_user_mmap_entry *
1007 to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
1008 {
1009 	return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
1010 }
1011 
1012 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1013 {
1014 	writeq(*(u64 *)val, dest);
1015 }
1016 
1017 static inline struct hns_roce_qp
1018 	*__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1019 {
1020 	return xa_load(&hr_dev->qp_table_xa, qpn);
1021 }
1022 
1023 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf,
1024 					unsigned int offset)
1025 {
1026 	return (char *)(buf->trunk_list[offset >> buf->trunk_shift].buf) +
1027 			(offset & ((1 << buf->trunk_shift) - 1));
1028 }
1029 
1030 static inline dma_addr_t hns_roce_buf_dma_addr(struct hns_roce_buf *buf,
1031 					       unsigned int offset)
1032 {
1033 	return buf->trunk_list[offset >> buf->trunk_shift].map +
1034 			(offset & ((1 << buf->trunk_shift) - 1));
1035 }
1036 
1037 static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, u32 idx)
1038 {
1039 	return hns_roce_buf_dma_addr(buf, idx << buf->page_shift);
1040 }
1041 
1042 #define hr_hw_page_align(x)		ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1043 
1044 static inline u64 to_hr_hw_page_addr(u64 addr)
1045 {
1046 	return addr >> HNS_HW_PAGE_SHIFT;
1047 }
1048 
1049 static inline u32 to_hr_hw_page_shift(u32 page_shift)
1050 {
1051 	return page_shift - HNS_HW_PAGE_SHIFT;
1052 }
1053 
1054 static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
1055 {
1056 	if (count > 0)
1057 		return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
1058 
1059 	return 0;
1060 }
1061 
1062 static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
1063 {
1064 	return hr_hw_page_align(count << buf_shift);
1065 }
1066 
1067 static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
1068 {
1069 	return hr_hw_page_align(count << buf_shift) >> buf_shift;
1070 }
1071 
1072 static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
1073 {
1074 	if (!count)
1075 		return 0;
1076 
1077 	return ilog2(to_hr_hem_entries_count(count, buf_shift));
1078 }
1079 
1080 #define DSCP_SHIFT 2
1081 
1082 static inline u8 get_tclass(const struct ib_global_route *grh)
1083 {
1084 	return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
1085 	       grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
1086 }
1087 
1088 void hns_roce_init_uar_table(struct hns_roce_dev *dev);
1089 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1090 
1091 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1092 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1093 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1094 			u64 out_param);
1095 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1096 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1097 
1098 /* hns roce hw need current block and next block addr from mtt */
1099 #define MTT_MIN_COUNT	 2
1100 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1101 		      u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1102 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1103 			struct hns_roce_buf_attr *buf_attr,
1104 			unsigned int page_shift, struct ib_udata *udata,
1105 			unsigned long user_addr);
1106 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
1107 			  struct hns_roce_mtr *mtr);
1108 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1109 		     dma_addr_t *pages, unsigned int page_cnt);
1110 
1111 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1112 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1113 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1114 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1115 void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1116 void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
1117 
1118 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
1119 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1120 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1121 
1122 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
1123 
1124 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1125 		       struct ib_udata *udata);
1126 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1127 static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
1128 {
1129 	return 0;
1130 }
1131 
1132 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1133 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1134 
1135 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1136 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1137 				   u64 virt_addr, int access_flags,
1138 				   struct ib_udata *udata);
1139 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
1140 				     u64 length, u64 virt_addr,
1141 				     int mr_access_flags, struct ib_pd *pd,
1142 				     struct ib_udata *udata);
1143 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1144 				u32 max_num_sg);
1145 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1146 		       unsigned int *sg_offset);
1147 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1148 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
1149 			    struct hns_roce_cmd_mailbox *mailbox,
1150 			    unsigned long mpt_index);
1151 unsigned long key_to_hw_index(u32 key);
1152 
1153 int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1154 int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1155 
1156 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1157 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
1158 					u32 page_shift, u32 flags);
1159 
1160 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1161 			   int buf_cnt, struct hns_roce_buf *buf,
1162 			   unsigned int page_shift);
1163 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1164 			   int buf_cnt, struct ib_umem *umem,
1165 			   unsigned int page_shift);
1166 
1167 int hns_roce_create_srq(struct ib_srq *srq,
1168 			struct ib_srq_init_attr *srq_init_attr,
1169 			struct ib_udata *udata);
1170 int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
1171 			enum ib_srq_attr_mask srq_attr_mask,
1172 			struct ib_udata *udata);
1173 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1174 
1175 int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1176 int hns_roce_dealloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
1177 
1178 int hns_roce_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *init_attr,
1179 		       struct ib_udata *udata);
1180 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1181 		       int attr_mask, struct ib_udata *udata);
1182 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1183 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1184 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n);
1185 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n);
1186 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1187 			  struct ib_cq *ib_cq);
1188 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
1189 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1190 		       struct hns_roce_cq *recv_cq);
1191 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1192 			 struct hns_roce_cq *recv_cq);
1193 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1194 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1195 			 struct ib_udata *udata);
1196 __be32 send_ieth(const struct ib_send_wr *wr);
1197 int to_hr_qp_type(int qp_type);
1198 
1199 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1200 		       struct ib_udata *udata);
1201 
1202 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1203 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
1204 			 struct hns_roce_db *db);
1205 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1206 			    struct hns_roce_db *db);
1207 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1208 		      int order);
1209 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1210 
1211 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1212 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1213 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
1214 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1215 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1216 u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
1217 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1218 int hns_roce_init(struct hns_roce_dev *hr_dev);
1219 void hns_roce_exit(struct hns_roce_dev *hr_dev);
1220 int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
1221 			       struct ib_cq *ib_cq);
1222 struct hns_user_mmap_entry *
1223 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
1224 				size_t length,
1225 				enum hns_roce_mmap_type mmap_type);
1226 #endif /* _HNS_ROCE_DEVICE_H */
1227