1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef _HNS_ROCE_HEM_H 35 #define _HNS_ROCE_HEM_H 36 37 #define HW_SYNC_TIMEOUT_MSECS 500 38 #define HW_SYNC_SLEEP_TIME_INTERVAL 20 39 #define BT_CMD_SYNC_SHIFT 31 40 41 enum { 42 /* MAP HEM(Hardware Entry Memory) */ 43 HEM_TYPE_QPC = 0, 44 HEM_TYPE_MTPT, 45 HEM_TYPE_CQC, 46 HEM_TYPE_SRQC, 47 48 /* UNMAP HEM */ 49 HEM_TYPE_MTT, 50 HEM_TYPE_CQE, 51 HEM_TYPE_SRQWQE, 52 HEM_TYPE_IDX, 53 HEM_TYPE_IRRL, 54 HEM_TYPE_TRRL, 55 }; 56 57 #define HNS_ROCE_HEM_CHUNK_LEN \ 58 ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ 59 (sizeof(struct scatterlist))) 60 61 #define check_whether_bt_num_3(type, hop_num) \ 62 (type < HEM_TYPE_MTT && hop_num == 2) 63 64 #define check_whether_bt_num_2(type, hop_num) \ 65 ((type < HEM_TYPE_MTT && hop_num == 1) || \ 66 (type >= HEM_TYPE_MTT && hop_num == 2)) 67 68 #define check_whether_bt_num_1(type, hop_num) \ 69 ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ 70 (type >= HEM_TYPE_MTT && hop_num == 1) || \ 71 (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) 72 73 enum { 74 HNS_ROCE_HEM_PAGE_SHIFT = 12, 75 HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT, 76 }; 77 78 struct hns_roce_hem_chunk { 79 struct list_head list; 80 int npages; 81 int nsg; 82 struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN]; 83 void *buf[HNS_ROCE_HEM_CHUNK_LEN]; 84 }; 85 86 struct hns_roce_hem { 87 struct list_head chunk_list; 88 int refcount; 89 }; 90 91 struct hns_roce_hem_iter { 92 struct hns_roce_hem *hem; 93 struct hns_roce_hem_chunk *chunk; 94 int page_idx; 95 }; 96 97 struct hns_roce_hem_mhop { 98 u32 hop_num; 99 u32 buf_chunk_size; 100 u32 bt_chunk_size; 101 u32 ba_l0_num; 102 u32 l0_idx;/* level 0 base address table index */ 103 u32 l1_idx;/* level 1 base address table index */ 104 u32 l2_idx;/* level 2 base address table index */ 105 }; 106 107 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); 108 int hns_roce_table_get(struct hns_roce_dev *hr_dev, 109 struct hns_roce_hem_table *table, unsigned long obj); 110 void hns_roce_table_put(struct hns_roce_dev *hr_dev, 111 struct hns_roce_hem_table *table, unsigned long obj); 112 void *hns_roce_table_find(struct hns_roce_dev *hr_dev, 113 struct hns_roce_hem_table *table, unsigned long obj, 114 dma_addr_t *dma_handle); 115 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, 116 struct hns_roce_hem_table *table, 117 unsigned long start, unsigned long end); 118 void hns_roce_table_put_range(struct hns_roce_dev *hr_dev, 119 struct hns_roce_hem_table *table, 120 unsigned long start, unsigned long end); 121 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, 122 struct hns_roce_hem_table *table, u32 type, 123 unsigned long obj_size, unsigned long nobj, 124 int use_lowmem); 125 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, 126 struct hns_roce_hem_table *table); 127 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); 128 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, 129 struct hns_roce_hem_table *table, unsigned long *obj, 130 struct hns_roce_hem_mhop *mhop); 131 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type); 132 133 static inline void hns_roce_hem_first(struct hns_roce_hem *hem, 134 struct hns_roce_hem_iter *iter) 135 { 136 iter->hem = hem; 137 iter->chunk = list_empty(&hem->chunk_list) ? NULL : 138 list_entry(hem->chunk_list.next, 139 struct hns_roce_hem_chunk, list); 140 iter->page_idx = 0; 141 } 142 143 static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter) 144 { 145 return !iter->chunk; 146 } 147 148 static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter) 149 { 150 if (++iter->page_idx >= iter->chunk->nsg) { 151 if (iter->chunk->list.next == &iter->hem->chunk_list) { 152 iter->chunk = NULL; 153 return; 154 } 155 156 iter->chunk = list_entry(iter->chunk->list.next, 157 struct hns_roce_hem_chunk, list); 158 iter->page_idx = 0; 159 } 160 } 161 162 static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter) 163 { 164 return sg_dma_address(&iter->chunk->mem[iter->page_idx]); 165 } 166 167 #endif /*_HNS_ROCE_HEM_H*/ 168