1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Multicore Navigator driver for TI Keystone 2 devices. 4 * 5 * (C) Copyright 2012-2014 6 * Texas Instruments Incorporated, <www.ti.com> 7 */ 8 #include <common.h> 9 #include <asm/io.h> 10 #include <asm/ti-common/keystone_nav.h> 11 12 struct qm_config qm_memmap = { 13 .stat_cfg = CONFIG_KSNAV_QM_QUEUE_STATUS_BASE, 14 .queue = (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE, 15 .mngr_vbusm = CONFIG_KSNAV_QM_BASE_ADDRESS, 16 .i_lram = CONFIG_KSNAV_QM_LINK_RAM_BASE, 17 .proxy = (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE, 18 .status_ram = CONFIG_KSNAV_QM_STATUS_RAM_BASE, 19 .mngr_cfg = (void *)CONFIG_KSNAV_QM_CONF_BASE, 20 .intd_cfg = CONFIG_KSNAV_QM_INTD_CONF_BASE, 21 .desc_mem = (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE, 22 .region_num = CONFIG_KSNAV_QM_REGION_NUM, 23 .pdsp_cmd = CONFIG_KSNAV_QM_PDSP1_CMD_BASE, 24 .pdsp_ctl = CONFIG_KSNAV_QM_PDSP1_CTRL_BASE, 25 .pdsp_iram = CONFIG_KSNAV_QM_PDSP1_IRAM_BASE, 26 .qpool_num = CONFIG_KSNAV_QM_QPOOL_NUM, 27 }; 28 29 /* 30 * We are going to use only one type of descriptors - host packet 31 * descriptors. We staticaly allocate memory for them here 32 */ 33 struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc)); 34 35 static struct qm_config *qm_cfg; 36 37 inline int num_of_desc_to_reg(int num_descr) 38 { 39 int j, num; 40 41 for (j = 0, num = 32; j < 15; j++, num *= 2) { 42 if (num_descr <= num) 43 return j; 44 } 45 46 return 15; 47 } 48 49 int _qm_init(struct qm_config *cfg) 50 { 51 u32 j; 52 53 qm_cfg = cfg; 54 55 qm_cfg->mngr_cfg->link_ram_base0 = qm_cfg->i_lram; 56 qm_cfg->mngr_cfg->link_ram_size0 = HDESC_NUM * 8 - 1; 57 qm_cfg->mngr_cfg->link_ram_base1 = 0; 58 qm_cfg->mngr_cfg->link_ram_size1 = 0; 59 qm_cfg->mngr_cfg->link_ram_base2 = 0; 60 61 qm_cfg->desc_mem[0].base_addr = (u32)desc_pool; 62 qm_cfg->desc_mem[0].start_idx = 0; 63 qm_cfg->desc_mem[0].desc_reg_size = 64 (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) | 65 num_of_desc_to_reg(HDESC_NUM); 66 67 memset(desc_pool, 0, sizeof(desc_pool)); 68 for (j = 0; j < HDESC_NUM; j++) 69 qm_push(&desc_pool[j], qm_cfg->qpool_num); 70 71 return QM_OK; 72 } 73 74 int qm_init(void) 75 { 76 return _qm_init(&qm_memmap); 77 } 78 79 void qm_close(void) 80 { 81 u32 j; 82 83 queue_close(qm_cfg->qpool_num); 84 85 qm_cfg->mngr_cfg->link_ram_base0 = 0; 86 qm_cfg->mngr_cfg->link_ram_size0 = 0; 87 qm_cfg->mngr_cfg->link_ram_base1 = 0; 88 qm_cfg->mngr_cfg->link_ram_size1 = 0; 89 qm_cfg->mngr_cfg->link_ram_base2 = 0; 90 91 for (j = 0; j < qm_cfg->region_num; j++) { 92 qm_cfg->desc_mem[j].base_addr = 0; 93 qm_cfg->desc_mem[j].start_idx = 0; 94 qm_cfg->desc_mem[j].desc_reg_size = 0; 95 } 96 97 qm_cfg = NULL; 98 } 99 100 void qm_push(struct qm_host_desc *hd, u32 qnum) 101 { 102 u32 regd; 103 104 cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4); 105 regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1); 106 writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh); 107 } 108 109 void qm_buff_push(struct qm_host_desc *hd, u32 qnum, 110 void *buff_ptr, u32 buff_len) 111 { 112 hd->orig_buff_len = buff_len; 113 hd->buff_len = buff_len; 114 hd->orig_buff_ptr = (u32)buff_ptr; 115 hd->buff_ptr = (u32)buff_ptr; 116 qm_push(hd, qnum); 117 } 118 119 struct qm_host_desc *qm_pop(u32 qnum) 120 { 121 u32 uhd; 122 123 uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf; 124 if (uhd) 125 cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4); 126 127 return (struct qm_host_desc *)uhd; 128 } 129 130 struct qm_host_desc *qm_pop_from_free_pool(void) 131 { 132 return qm_pop(qm_cfg->qpool_num); 133 } 134 135 void queue_close(u32 qnum) 136 { 137 struct qm_host_desc *hd; 138 139 while ((hd = qm_pop(qnum))) 140 ; 141 } 142 143 /** 144 * DMA API 145 */ 146 147 static int ksnav_rx_disable(struct pktdma_cfg *pktdma) 148 { 149 u32 j, v, k; 150 151 for (j = 0; j < pktdma->rx_ch_num; j++) { 152 v = readl(&pktdma->rx_ch[j].cfg_a); 153 if (!(v & CPDMA_CHAN_A_ENABLE)) 154 continue; 155 156 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a); 157 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { 158 udelay(100); 159 v = readl(&pktdma->rx_ch[j].cfg_a); 160 if (!(v & CPDMA_CHAN_A_ENABLE)) 161 continue; 162 } 163 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ 164 } 165 166 /* Clear all of the flow registers */ 167 for (j = 0; j < pktdma->rx_flow_num; j++) { 168 writel(0, &pktdma->rx_flows[j].control); 169 writel(0, &pktdma->rx_flows[j].tags); 170 writel(0, &pktdma->rx_flows[j].tag_sel); 171 writel(0, &pktdma->rx_flows[j].fdq_sel[0]); 172 writel(0, &pktdma->rx_flows[j].fdq_sel[1]); 173 writel(0, &pktdma->rx_flows[j].thresh[0]); 174 writel(0, &pktdma->rx_flows[j].thresh[1]); 175 writel(0, &pktdma->rx_flows[j].thresh[2]); 176 } 177 178 return QM_OK; 179 } 180 181 static int ksnav_tx_disable(struct pktdma_cfg *pktdma) 182 { 183 u32 j, v, k; 184 185 for (j = 0; j < pktdma->tx_ch_num; j++) { 186 v = readl(&pktdma->tx_ch[j].cfg_a); 187 if (!(v & CPDMA_CHAN_A_ENABLE)) 188 continue; 189 190 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a); 191 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { 192 udelay(100); 193 v = readl(&pktdma->tx_ch[j].cfg_a); 194 if (!(v & CPDMA_CHAN_A_ENABLE)) 195 continue; 196 } 197 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ 198 } 199 200 return QM_OK; 201 } 202 203 int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers) 204 { 205 u32 j, v; 206 struct qm_host_desc *hd; 207 u8 *rx_ptr; 208 209 if (pktdma == NULL || rx_buffers == NULL || 210 rx_buffers->buff_ptr == NULL || qm_cfg == NULL) 211 return QM_ERR; 212 213 pktdma->rx_flow = rx_buffers->rx_flow; 214 215 /* init rx queue */ 216 rx_ptr = rx_buffers->buff_ptr; 217 218 for (j = 0; j < rx_buffers->num_buffs; j++) { 219 hd = qm_pop(qm_cfg->qpool_num); 220 if (hd == NULL) 221 return QM_ERR; 222 223 qm_buff_push(hd, pktdma->rx_free_q, 224 rx_ptr, rx_buffers->buff_len); 225 226 rx_ptr += rx_buffers->buff_len; 227 } 228 229 ksnav_rx_disable(pktdma); 230 231 /* configure rx channels */ 232 v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q); 233 writel(v, &pktdma->rx_flows[pktdma->rx_flow].control); 234 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags); 235 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel); 236 237 v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0, 238 pktdma->rx_free_q); 239 240 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]); 241 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]); 242 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]); 243 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]); 244 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]); 245 246 for (j = 0; j < pktdma->rx_ch_num; j++) 247 writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a); 248 249 /* configure tx channels */ 250 /* Disable loopback in the tx direction */ 251 writel(0, &pktdma->global->emulation_control); 252 253 /* Set QM base address, only for K2x devices */ 254 writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]); 255 256 /* Enable all channels. The current state isn't important */ 257 for (j = 0; j < pktdma->tx_ch_num; j++) { 258 writel(0, &pktdma->tx_ch[j].cfg_b); 259 writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a); 260 } 261 262 return QM_OK; 263 } 264 265 int ksnav_close(struct pktdma_cfg *pktdma) 266 { 267 if (!pktdma) 268 return QM_ERR; 269 270 ksnav_tx_disable(pktdma); 271 ksnav_rx_disable(pktdma); 272 273 queue_close(pktdma->rx_free_q); 274 queue_close(pktdma->rx_rcv_q); 275 queue_close(pktdma->tx_snd_q); 276 277 return QM_OK; 278 } 279 280 int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2) 281 { 282 struct qm_host_desc *hd; 283 284 hd = qm_pop(qm_cfg->qpool_num); 285 if (hd == NULL) 286 return QM_ERR; 287 288 hd->desc_info = num_bytes; 289 hd->swinfo[2] = swinfo2; 290 hd->packet_info = qm_cfg->qpool_num; 291 292 qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes); 293 294 return QM_OK; 295 } 296 297 void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes) 298 { 299 struct qm_host_desc *hd; 300 301 hd = qm_pop(pktdma->rx_rcv_q); 302 if (!hd) 303 return NULL; 304 305 *pkt = (u32 *)hd->buff_ptr; 306 *num_bytes = hd->desc_info & 0x3fffff; 307 308 return hd; 309 } 310 311 void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd) 312 { 313 struct qm_host_desc *_hd = (struct qm_host_desc *)hd; 314 315 _hd->buff_len = _hd->orig_buff_len; 316 _hd->buff_ptr = _hd->orig_buff_ptr; 317 318 qm_push(_hd, pktdma->rx_free_q); 319 } 320