1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef _HNS_ROCE_HEM_H
35 #define _HNS_ROCE_HEM_H
36 
37 #define HW_SYNC_SLEEP_TIME_INTERVAL	20
38 #define HW_SYNC_TIMEOUT_MSECS           (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
39 #define BT_CMD_SYNC_SHIFT		31
40 
41 enum {
42 	/* MAP HEM(Hardware Entry Memory) */
43 	HEM_TYPE_QPC = 0,
44 	HEM_TYPE_MTPT,
45 	HEM_TYPE_CQC,
46 	HEM_TYPE_SRQC,
47 	HEM_TYPE_SCCC,
48 	HEM_TYPE_QPC_TIMER,
49 	HEM_TYPE_CQC_TIMER,
50 	HEM_TYPE_GMV,
51 
52 	 /* UNMAP HEM */
53 	HEM_TYPE_MTT,
54 	HEM_TYPE_CQE,
55 	HEM_TYPE_SRQWQE,
56 	HEM_TYPE_IDX,
57 	HEM_TYPE_IRRL,
58 	HEM_TYPE_TRRL,
59 };
60 
61 #define HNS_ROCE_HEM_CHUNK_LEN	\
62 	 ((256 - sizeof(struct list_head) - 2 * sizeof(int)) /	 \
63 	 (sizeof(struct scatterlist) + sizeof(void *)))
64 
65 #define check_whether_bt_num_3(type, hop_num) \
66 	(type < HEM_TYPE_MTT && hop_num == 2)
67 
68 #define check_whether_bt_num_2(type, hop_num) \
69 	((type < HEM_TYPE_MTT && hop_num == 1) || \
70 	(type >= HEM_TYPE_MTT && hop_num == 2))
71 
72 #define check_whether_bt_num_1(type, hop_num) \
73 	((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
74 	(type >= HEM_TYPE_MTT && hop_num == 1) || \
75 	(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
76 
77 enum {
78 	 HNS_ROCE_HEM_PAGE_SHIFT = 12,
79 	 HNS_ROCE_HEM_PAGE_SIZE  = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
80 };
81 
82 struct hns_roce_hem_chunk {
83 	struct list_head	 list;
84 	int			 npages;
85 	int			 nsg;
86 	struct scatterlist	 mem[HNS_ROCE_HEM_CHUNK_LEN];
87 	void			 *buf[HNS_ROCE_HEM_CHUNK_LEN];
88 };
89 
90 struct hns_roce_hem {
91 	struct list_head	 chunk_list;
92 	int			 refcount;
93 };
94 
95 struct hns_roce_hem_iter {
96 	struct hns_roce_hem		 *hem;
97 	struct hns_roce_hem_chunk	 *chunk;
98 	int				 page_idx;
99 };
100 
101 struct hns_roce_hem_mhop {
102 	u32	hop_num;
103 	u32	buf_chunk_size;
104 	u32	bt_chunk_size;
105 	u32	ba_l0_num;
106 	u32	l0_idx; /* level 0 base address table index */
107 	u32	l1_idx; /* level 1 base address table index */
108 	u32	l2_idx; /* level 2 base address table index */
109 };
110 
111 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
112 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
113 		       struct hns_roce_hem_table *table, unsigned long obj);
114 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
115 			struct hns_roce_hem_table *table, unsigned long obj);
116 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
117 			  struct hns_roce_hem_table *table, unsigned long obj,
118 			  dma_addr_t *dma_handle);
119 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
120 			    struct hns_roce_hem_table *table, u32 type,
121 			    unsigned long obj_size, unsigned long nobj,
122 			    int use_lowmem);
123 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
124 				struct hns_roce_hem_table *table);
125 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
126 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
127 			   struct hns_roce_hem_table *table, unsigned long *obj,
128 			   struct hns_roce_hem_mhop *mhop);
129 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
130 
131 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list);
132 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
133 				   int region_cnt, int unit);
134 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
135 			      struct hns_roce_hem_list *hem_list,
136 			      const struct hns_roce_buf_region *regions,
137 			      int region_cnt, unsigned int bt_pg_shift);
138 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
139 			       struct hns_roce_hem_list *hem_list);
140 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
141 				 struct hns_roce_hem_list *hem_list,
142 				 int offset, int *mtt_cnt, u64 *phy_addr);
143 
144 static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
145 				      struct hns_roce_hem_iter *iter)
146 {
147 	iter->hem = hem;
148 	iter->chunk = list_empty(&hem->chunk_list) ? NULL :
149 				 list_entry(hem->chunk_list.next,
150 					    struct hns_roce_hem_chunk, list);
151 	iter->page_idx = 0;
152 }
153 
154 static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
155 {
156 	return !iter->chunk;
157 }
158 
159 static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
160 {
161 	if (++iter->page_idx >= iter->chunk->nsg) {
162 		if (iter->chunk->list.next == &iter->hem->chunk_list) {
163 			iter->chunk = NULL;
164 			return;
165 		}
166 
167 		iter->chunk = list_entry(iter->chunk->list.next,
168 					 struct hns_roce_hem_chunk, list);
169 		iter->page_idx = 0;
170 	}
171 }
172 
173 static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
174 {
175 	return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
176 }
177 
178 #endif /* _HNS_ROCE_HEM_H */
179