1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef _HNS_ROCE_HEM_H
35 #define _HNS_ROCE_HEM_H
36 
37 #define HW_SYNC_SLEEP_TIME_INTERVAL	20
38 #define HW_SYNC_TIMEOUT_MSECS           (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
39 #define BT_CMD_SYNC_SHIFT		31
40 
41 enum {
42 	/* MAP HEM(Hardware Entry Memory) */
43 	HEM_TYPE_QPC = 0,
44 	HEM_TYPE_MTPT,
45 	HEM_TYPE_CQC,
46 	HEM_TYPE_SRQC,
47 	HEM_TYPE_SCCC,
48 	HEM_TYPE_QPC_TIMER,
49 	HEM_TYPE_CQC_TIMER,
50 
51 	 /* UNMAP HEM */
52 	HEM_TYPE_MTT,
53 	HEM_TYPE_CQE,
54 	HEM_TYPE_SRQWQE,
55 	HEM_TYPE_IDX,
56 	HEM_TYPE_IRRL,
57 	HEM_TYPE_TRRL,
58 };
59 
60 #define HNS_ROCE_HEM_CHUNK_LEN	\
61 	 ((256 - sizeof(struct list_head) - 2 * sizeof(int)) /	 \
62 	 (sizeof(struct scatterlist)))
63 
64 #define check_whether_bt_num_3(type, hop_num) \
65 	(type < HEM_TYPE_MTT && hop_num == 2)
66 
67 #define check_whether_bt_num_2(type, hop_num) \
68 	((type < HEM_TYPE_MTT && hop_num == 1) || \
69 	(type >= HEM_TYPE_MTT && hop_num == 2))
70 
71 #define check_whether_bt_num_1(type, hop_num) \
72 	((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
73 	(type >= HEM_TYPE_MTT && hop_num == 1) || \
74 	(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
75 
76 enum {
77 	 HNS_ROCE_HEM_PAGE_SHIFT = 12,
78 	 HNS_ROCE_HEM_PAGE_SIZE  = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
79 };
80 
81 struct hns_roce_hem_chunk {
82 	struct list_head	 list;
83 	int			 npages;
84 	int			 nsg;
85 	struct scatterlist	 mem[HNS_ROCE_HEM_CHUNK_LEN];
86 	void			 *buf[HNS_ROCE_HEM_CHUNK_LEN];
87 };
88 
89 struct hns_roce_hem {
90 	struct list_head	 chunk_list;
91 	int			 refcount;
92 };
93 
94 struct hns_roce_hem_iter {
95 	struct hns_roce_hem		 *hem;
96 	struct hns_roce_hem_chunk	 *chunk;
97 	int				 page_idx;
98 };
99 
100 struct hns_roce_hem_mhop {
101 	u32	hop_num;
102 	u32	buf_chunk_size;
103 	u32	bt_chunk_size;
104 	u32	ba_l0_num;
105 	u32	l0_idx; /* level 0 base address table index */
106 	u32	l1_idx; /* level 1 base address table index */
107 	u32	l2_idx; /* level 2 base address table index */
108 };
109 
110 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
111 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
112 		       struct hns_roce_hem_table *table, unsigned long obj);
113 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
114 			struct hns_roce_hem_table *table, unsigned long obj);
115 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
116 			  struct hns_roce_hem_table *table, unsigned long obj,
117 			  dma_addr_t *dma_handle);
118 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
119 			     struct hns_roce_hem_table *table,
120 			     unsigned long start, unsigned long end);
121 void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
122 			      struct hns_roce_hem_table *table,
123 			      unsigned long start, unsigned long end);
124 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
125 			    struct hns_roce_hem_table *table, u32 type,
126 			    unsigned long obj_size, unsigned long nobj,
127 			    int use_lowmem);
128 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
129 				struct hns_roce_hem_table *table);
130 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
131 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
132 			   struct hns_roce_hem_table *table, unsigned long *obj,
133 			   struct hns_roce_hem_mhop *mhop);
134 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
135 
136 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
137 			    int bt_page_order);
138 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
139 				   int region_cnt, int unit);
140 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
141 			      struct hns_roce_hem_list *hem_list,
142 			      const struct hns_roce_buf_region *regions,
143 			      int region_cnt);
144 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
145 			       struct hns_roce_hem_list *hem_list);
146 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
147 				 struct hns_roce_hem_list *hem_list,
148 				 int offset, int *mtt_cnt, u64 *phy_addr);
149 
150 static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
151 				      struct hns_roce_hem_iter *iter)
152 {
153 	iter->hem = hem;
154 	iter->chunk = list_empty(&hem->chunk_list) ? NULL :
155 				 list_entry(hem->chunk_list.next,
156 					    struct hns_roce_hem_chunk, list);
157 	iter->page_idx = 0;
158 }
159 
160 static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
161 {
162 	return !iter->chunk;
163 }
164 
165 static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
166 {
167 	if (++iter->page_idx >= iter->chunk->nsg) {
168 		if (iter->chunk->list.next == &iter->hem->chunk_list) {
169 			iter->chunk = NULL;
170 			return;
171 		}
172 
173 		iter->chunk = list_entry(iter->chunk->list.next,
174 					 struct hns_roce_hem_chunk, list);
175 		iter->page_idx = 0;
176 	}
177 }
178 
179 static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
180 {
181 	return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
182 }
183 
184 #endif /*_HNS_ROCE_HEM_H*/
185