1 /* 2 * Multicore Navigator driver for TI Keystone 2 devices. 3 * 4 * (C) Copyright 2012-2014 5 * Texas Instruments Incorporated, <www.ti.com> 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 #include <common.h> 10 #include <asm/io.h> 11 #include <asm/ti-common/keystone_nav.h> 12 13 struct qm_config qm_memmap = { 14 .stat_cfg = CONFIG_KSNAV_QM_QUEUE_STATUS_BASE, 15 .queue = (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE, 16 .mngr_vbusm = CONFIG_KSNAV_QM_BASE_ADDRESS, 17 .i_lram = CONFIG_KSNAV_QM_LINK_RAM_BASE, 18 .proxy = (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE, 19 .status_ram = CONFIG_KSNAV_QM_STATUS_RAM_BASE, 20 .mngr_cfg = (void *)CONFIG_KSNAV_QM_CONF_BASE, 21 .intd_cfg = CONFIG_KSNAV_QM_INTD_CONF_BASE, 22 .desc_mem = (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE, 23 .region_num = CONFIG_KSNAV_QM_REGION_NUM, 24 .pdsp_cmd = CONFIG_KSNAV_QM_PDSP1_CMD_BASE, 25 .pdsp_ctl = CONFIG_KSNAV_QM_PDSP1_CTRL_BASE, 26 .pdsp_iram = CONFIG_KSNAV_QM_PDSP1_IRAM_BASE, 27 .qpool_num = CONFIG_KSNAV_QM_QPOOL_NUM, 28 }; 29 30 /* 31 * We are going to use only one type of descriptors - host packet 32 * descriptors. We staticaly allocate memory for them here 33 */ 34 struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc)); 35 36 static struct qm_config *qm_cfg; 37 38 inline int num_of_desc_to_reg(int num_descr) 39 { 40 int j, num; 41 42 for (j = 0, num = 32; j < 15; j++, num *= 2) { 43 if (num_descr <= num) 44 return j; 45 } 46 47 return 15; 48 } 49 50 int _qm_init(struct qm_config *cfg) 51 { 52 u32 j; 53 54 qm_cfg = cfg; 55 56 qm_cfg->mngr_cfg->link_ram_base0 = qm_cfg->i_lram; 57 qm_cfg->mngr_cfg->link_ram_size0 = HDESC_NUM * 8 - 1; 58 qm_cfg->mngr_cfg->link_ram_base1 = 0; 59 qm_cfg->mngr_cfg->link_ram_size1 = 0; 60 qm_cfg->mngr_cfg->link_ram_base2 = 0; 61 62 qm_cfg->desc_mem[0].base_addr = (u32)desc_pool; 63 qm_cfg->desc_mem[0].start_idx = 0; 64 qm_cfg->desc_mem[0].desc_reg_size = 65 (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) | 66 num_of_desc_to_reg(HDESC_NUM); 67 68 memset(desc_pool, 0, sizeof(desc_pool)); 69 for (j = 0; j < HDESC_NUM; j++) 70 qm_push(&desc_pool[j], qm_cfg->qpool_num); 71 72 return QM_OK; 73 } 74 75 int qm_init(void) 76 { 77 return _qm_init(&qm_memmap); 78 } 79 80 void qm_close(void) 81 { 82 u32 j; 83 84 queue_close(qm_cfg->qpool_num); 85 86 qm_cfg->mngr_cfg->link_ram_base0 = 0; 87 qm_cfg->mngr_cfg->link_ram_size0 = 0; 88 qm_cfg->mngr_cfg->link_ram_base1 = 0; 89 qm_cfg->mngr_cfg->link_ram_size1 = 0; 90 qm_cfg->mngr_cfg->link_ram_base2 = 0; 91 92 for (j = 0; j < qm_cfg->region_num; j++) { 93 qm_cfg->desc_mem[j].base_addr = 0; 94 qm_cfg->desc_mem[j].start_idx = 0; 95 qm_cfg->desc_mem[j].desc_reg_size = 0; 96 } 97 98 qm_cfg = NULL; 99 } 100 101 void qm_push(struct qm_host_desc *hd, u32 qnum) 102 { 103 u32 regd; 104 105 cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4); 106 regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1); 107 writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh); 108 } 109 110 void qm_buff_push(struct qm_host_desc *hd, u32 qnum, 111 void *buff_ptr, u32 buff_len) 112 { 113 hd->orig_buff_len = buff_len; 114 hd->buff_len = buff_len; 115 hd->orig_buff_ptr = (u32)buff_ptr; 116 hd->buff_ptr = (u32)buff_ptr; 117 qm_push(hd, qnum); 118 } 119 120 struct qm_host_desc *qm_pop(u32 qnum) 121 { 122 u32 uhd; 123 124 uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf; 125 if (uhd) 126 cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4); 127 128 return (struct qm_host_desc *)uhd; 129 } 130 131 struct qm_host_desc *qm_pop_from_free_pool(void) 132 { 133 return qm_pop(qm_cfg->qpool_num); 134 } 135 136 void queue_close(u32 qnum) 137 { 138 struct qm_host_desc *hd; 139 140 while ((hd = qm_pop(qnum))) 141 ; 142 } 143 144 /** 145 * DMA API 146 */ 147 148 static int ksnav_rx_disable(struct pktdma_cfg *pktdma) 149 { 150 u32 j, v, k; 151 152 for (j = 0; j < pktdma->rx_ch_num; j++) { 153 v = readl(&pktdma->rx_ch[j].cfg_a); 154 if (!(v & CPDMA_CHAN_A_ENABLE)) 155 continue; 156 157 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->rx_ch[j].cfg_a); 158 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { 159 udelay(100); 160 v = readl(&pktdma->rx_ch[j].cfg_a); 161 if (!(v & CPDMA_CHAN_A_ENABLE)) 162 continue; 163 } 164 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ 165 } 166 167 /* Clear all of the flow registers */ 168 for (j = 0; j < pktdma->rx_flow_num; j++) { 169 writel(0, &pktdma->rx_flows[j].control); 170 writel(0, &pktdma->rx_flows[j].tags); 171 writel(0, &pktdma->rx_flows[j].tag_sel); 172 writel(0, &pktdma->rx_flows[j].fdq_sel[0]); 173 writel(0, &pktdma->rx_flows[j].fdq_sel[1]); 174 writel(0, &pktdma->rx_flows[j].thresh[0]); 175 writel(0, &pktdma->rx_flows[j].thresh[1]); 176 writel(0, &pktdma->rx_flows[j].thresh[2]); 177 } 178 179 return QM_OK; 180 } 181 182 static int ksnav_tx_disable(struct pktdma_cfg *pktdma) 183 { 184 u32 j, v, k; 185 186 for (j = 0; j < pktdma->tx_ch_num; j++) { 187 v = readl(&pktdma->tx_ch[j].cfg_a); 188 if (!(v & CPDMA_CHAN_A_ENABLE)) 189 continue; 190 191 writel(v | CPDMA_CHAN_A_TDOWN, &pktdma->tx_ch[j].cfg_a); 192 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { 193 udelay(100); 194 v = readl(&pktdma->tx_ch[j].cfg_a); 195 if (!(v & CPDMA_CHAN_A_ENABLE)) 196 continue; 197 } 198 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ 199 } 200 201 return QM_OK; 202 } 203 204 int ksnav_init(struct pktdma_cfg *pktdma, struct rx_buff_desc *rx_buffers) 205 { 206 u32 j, v; 207 struct qm_host_desc *hd; 208 u8 *rx_ptr; 209 210 if (pktdma == NULL || rx_buffers == NULL || 211 rx_buffers->buff_ptr == NULL || qm_cfg == NULL) 212 return QM_ERR; 213 214 pktdma->rx_flow = rx_buffers->rx_flow; 215 216 /* init rx queue */ 217 rx_ptr = rx_buffers->buff_ptr; 218 219 for (j = 0; j < rx_buffers->num_buffs; j++) { 220 hd = qm_pop(qm_cfg->qpool_num); 221 if (hd == NULL) 222 return QM_ERR; 223 224 qm_buff_push(hd, pktdma->rx_free_q, 225 rx_ptr, rx_buffers->buff_len); 226 227 rx_ptr += rx_buffers->buff_len; 228 } 229 230 ksnav_rx_disable(pktdma); 231 232 /* configure rx channels */ 233 v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, pktdma->rx_rcv_q); 234 writel(v, &pktdma->rx_flows[pktdma->rx_flow].control); 235 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tags); 236 writel(0, &pktdma->rx_flows[pktdma->rx_flow].tag_sel); 237 238 v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, pktdma->rx_free_q, 0, 239 pktdma->rx_free_q); 240 241 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[0]); 242 writel(v, &pktdma->rx_flows[pktdma->rx_flow].fdq_sel[1]); 243 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[0]); 244 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[1]); 245 writel(0, &pktdma->rx_flows[pktdma->rx_flow].thresh[2]); 246 247 for (j = 0; j < pktdma->rx_ch_num; j++) 248 writel(CPDMA_CHAN_A_ENABLE, &pktdma->rx_ch[j].cfg_a); 249 250 /* configure tx channels */ 251 /* Disable loopback in the tx direction */ 252 writel(0, &pktdma->global->emulation_control); 253 254 /* Set QM base address, only for K2x devices */ 255 writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &pktdma->global->qm_base_addr[0]); 256 257 /* Enable all channels. The current state isn't important */ 258 for (j = 0; j < pktdma->tx_ch_num; j++) { 259 writel(0, &pktdma->tx_ch[j].cfg_b); 260 writel(CPDMA_CHAN_A_ENABLE, &pktdma->tx_ch[j].cfg_a); 261 } 262 263 return QM_OK; 264 } 265 266 int ksnav_close(struct pktdma_cfg *pktdma) 267 { 268 if (!pktdma) 269 return QM_ERR; 270 271 ksnav_tx_disable(pktdma); 272 ksnav_rx_disable(pktdma); 273 274 queue_close(pktdma->rx_free_q); 275 queue_close(pktdma->rx_rcv_q); 276 queue_close(pktdma->tx_snd_q); 277 278 return QM_OK; 279 } 280 281 int ksnav_send(struct pktdma_cfg *pktdma, u32 *pkt, int num_bytes, u32 swinfo2) 282 { 283 struct qm_host_desc *hd; 284 285 hd = qm_pop(qm_cfg->qpool_num); 286 if (hd == NULL) 287 return QM_ERR; 288 289 hd->desc_info = num_bytes; 290 hd->swinfo[2] = swinfo2; 291 hd->packet_info = qm_cfg->qpool_num; 292 293 qm_buff_push(hd, pktdma->tx_snd_q, pkt, num_bytes); 294 295 return QM_OK; 296 } 297 298 void *ksnav_recv(struct pktdma_cfg *pktdma, u32 **pkt, int *num_bytes) 299 { 300 struct qm_host_desc *hd; 301 302 hd = qm_pop(pktdma->rx_rcv_q); 303 if (!hd) 304 return NULL; 305 306 *pkt = (u32 *)hd->buff_ptr; 307 *num_bytes = hd->desc_info & 0x3fffff; 308 309 return hd; 310 } 311 312 void ksnav_release_rxhd(struct pktdma_cfg *pktdma, void *hd) 313 { 314 struct qm_host_desc *_hd = (struct qm_host_desc *)hd; 315 316 _hd->buff_len = _hd->orig_buff_len; 317 _hd->buff_ptr = _hd->orig_buff_ptr; 318 319 qm_push(_hd, pktdma->rx_free_q); 320 } 321