1 /* 2 * Multicore Navigator driver for TI Keystone 2 devices. 3 * 4 * (C) Copyright 2012-2014 5 * Texas Instruments Incorporated, <www.ti.com> 6 * 7 * SPDX-License-Identifier: GPL-2.0+ 8 */ 9 #include <common.h> 10 #include <asm/io.h> 11 #include <asm/ti-common/keystone_nav.h> 12 13 struct qm_config qm_memmap = { 14 .stat_cfg = CONFIG_KSNAV_QM_QUEUE_STATUS_BASE, 15 .queue = (void *)CONFIG_KSNAV_QM_MANAGER_QUEUES_BASE, 16 .mngr_vbusm = CONFIG_KSNAV_QM_BASE_ADDRESS, 17 .i_lram = CONFIG_KSNAV_QM_LINK_RAM_BASE, 18 .proxy = (void *)CONFIG_KSNAV_QM_MANAGER_Q_PROXY_BASE, 19 .status_ram = CONFIG_KSNAV_QM_STATUS_RAM_BASE, 20 .mngr_cfg = (void *)CONFIG_KSNAV_QM_CONF_BASE, 21 .intd_cfg = CONFIG_KSNAV_QM_INTD_CONF_BASE, 22 .desc_mem = (void *)CONFIG_KSNAV_QM_DESC_SETUP_BASE, 23 .region_num = CONFIG_KSNAV_QM_REGION_NUM, 24 .pdsp_cmd = CONFIG_KSNAV_QM_PDSP1_CMD_BASE, 25 .pdsp_ctl = CONFIG_KSNAV_QM_PDSP1_CTRL_BASE, 26 .pdsp_iram = CONFIG_KSNAV_QM_PDSP1_IRAM_BASE, 27 .qpool_num = CONFIG_KSNAV_QM_QPOOL_NUM, 28 }; 29 30 /* 31 * We are going to use only one type of descriptors - host packet 32 * descriptors. We staticaly allocate memory for them here 33 */ 34 struct qm_host_desc desc_pool[HDESC_NUM] __aligned(sizeof(struct qm_host_desc)); 35 36 static struct qm_config *qm_cfg; 37 38 inline int num_of_desc_to_reg(int num_descr) 39 { 40 int j, num; 41 42 for (j = 0, num = 32; j < 15; j++, num *= 2) { 43 if (num_descr <= num) 44 return j; 45 } 46 47 return 15; 48 } 49 50 int _qm_init(struct qm_config *cfg) 51 { 52 u32 j; 53 54 qm_cfg = cfg; 55 56 qm_cfg->mngr_cfg->link_ram_base0 = qm_cfg->i_lram; 57 qm_cfg->mngr_cfg->link_ram_size0 = HDESC_NUM * 8; 58 qm_cfg->mngr_cfg->link_ram_base1 = 0; 59 qm_cfg->mngr_cfg->link_ram_size1 = 0; 60 qm_cfg->mngr_cfg->link_ram_base2 = 0; 61 62 qm_cfg->desc_mem[0].base_addr = (u32)desc_pool; 63 qm_cfg->desc_mem[0].start_idx = 0; 64 qm_cfg->desc_mem[0].desc_reg_size = 65 (((sizeof(struct qm_host_desc) >> 4) - 1) << 16) | 66 num_of_desc_to_reg(HDESC_NUM); 67 68 memset(desc_pool, 0, sizeof(desc_pool)); 69 for (j = 0; j < HDESC_NUM; j++) 70 qm_push(&desc_pool[j], qm_cfg->qpool_num); 71 72 return QM_OK; 73 } 74 75 int qm_init(void) 76 { 77 return _qm_init(&qm_memmap); 78 } 79 80 void qm_close(void) 81 { 82 u32 j; 83 84 if (qm_cfg == NULL) 85 return; 86 87 queue_close(qm_cfg->qpool_num); 88 89 qm_cfg->mngr_cfg->link_ram_base0 = 0; 90 qm_cfg->mngr_cfg->link_ram_size0 = 0; 91 qm_cfg->mngr_cfg->link_ram_base1 = 0; 92 qm_cfg->mngr_cfg->link_ram_size1 = 0; 93 qm_cfg->mngr_cfg->link_ram_base2 = 0; 94 95 for (j = 0; j < qm_cfg->region_num; j++) { 96 qm_cfg->desc_mem[j].base_addr = 0; 97 qm_cfg->desc_mem[j].start_idx = 0; 98 qm_cfg->desc_mem[j].desc_reg_size = 0; 99 } 100 101 qm_cfg = NULL; 102 } 103 104 void qm_push(struct qm_host_desc *hd, u32 qnum) 105 { 106 u32 regd; 107 108 if (!qm_cfg) 109 return; 110 111 cpu_to_bus((u32 *)hd, sizeof(struct qm_host_desc)/4); 112 regd = (u32)hd | ((sizeof(struct qm_host_desc) >> 4) - 1); 113 writel(regd, &qm_cfg->queue[qnum].ptr_size_thresh); 114 } 115 116 void qm_buff_push(struct qm_host_desc *hd, u32 qnum, 117 void *buff_ptr, u32 buff_len) 118 { 119 hd->orig_buff_len = buff_len; 120 hd->buff_len = buff_len; 121 hd->orig_buff_ptr = (u32)buff_ptr; 122 hd->buff_ptr = (u32)buff_ptr; 123 qm_push(hd, qnum); 124 } 125 126 struct qm_host_desc *qm_pop(u32 qnum) 127 { 128 u32 uhd; 129 130 if (!qm_cfg) 131 return NULL; 132 133 uhd = readl(&qm_cfg->queue[qnum].ptr_size_thresh) & ~0xf; 134 if (uhd) 135 cpu_to_bus((u32 *)uhd, sizeof(struct qm_host_desc)/4); 136 137 return (struct qm_host_desc *)uhd; 138 } 139 140 struct qm_host_desc *qm_pop_from_free_pool(void) 141 { 142 if (!qm_cfg) 143 return NULL; 144 145 return qm_pop(qm_cfg->qpool_num); 146 } 147 148 void queue_close(u32 qnum) 149 { 150 struct qm_host_desc *hd; 151 152 while ((hd = qm_pop(qnum))) 153 ; 154 } 155 156 /** 157 * DMA API 158 */ 159 struct pktdma_cfg netcp_pktdma = { 160 .global = (void *)CONFIG_KSNAV_NETCP_PDMA_CTRL_BASE, 161 .tx_ch = (void *)CONFIG_KSNAV_NETCP_PDMA_TX_BASE, 162 .tx_ch_num = CONFIG_KSNAV_NETCP_PDMA_TX_CH_NUM, 163 .rx_ch = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_BASE, 164 .rx_ch_num = CONFIG_KSNAV_NETCP_PDMA_RX_CH_NUM, 165 .tx_sched = (u32 *)CONFIG_KSNAV_NETCP_PDMA_SCHED_BASE, 166 .rx_flows = (void *)CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_BASE, 167 .rx_flow_num = CONFIG_KSNAV_NETCP_PDMA_RX_FLOW_NUM, 168 .rx_free_q = CONFIG_KSNAV_NETCP_PDMA_RX_FREE_QUEUE, 169 .rx_rcv_q = CONFIG_KSNAV_NETCP_PDMA_RX_RCV_QUEUE, 170 .tx_snd_q = CONFIG_KSNAV_NETCP_PDMA_TX_SND_QUEUE, 171 }; 172 173 struct pktdma_cfg *netcp; 174 175 static int netcp_rx_disable(void) 176 { 177 u32 j, v, k; 178 179 for (j = 0; j < netcp->rx_ch_num; j++) { 180 v = readl(&netcp->rx_ch[j].cfg_a); 181 if (!(v & CPDMA_CHAN_A_ENABLE)) 182 continue; 183 184 writel(v | CPDMA_CHAN_A_TDOWN, &netcp->rx_ch[j].cfg_a); 185 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { 186 udelay(100); 187 v = readl(&netcp->rx_ch[j].cfg_a); 188 if (!(v & CPDMA_CHAN_A_ENABLE)) 189 continue; 190 } 191 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ 192 } 193 194 /* Clear all of the flow registers */ 195 for (j = 0; j < netcp->rx_flow_num; j++) { 196 writel(0, &netcp->rx_flows[j].control); 197 writel(0, &netcp->rx_flows[j].tags); 198 writel(0, &netcp->rx_flows[j].tag_sel); 199 writel(0, &netcp->rx_flows[j].fdq_sel[0]); 200 writel(0, &netcp->rx_flows[j].fdq_sel[1]); 201 writel(0, &netcp->rx_flows[j].thresh[0]); 202 writel(0, &netcp->rx_flows[j].thresh[1]); 203 writel(0, &netcp->rx_flows[j].thresh[2]); 204 } 205 206 return QM_OK; 207 } 208 209 static int netcp_tx_disable(void) 210 { 211 u32 j, v, k; 212 213 for (j = 0; j < netcp->tx_ch_num; j++) { 214 v = readl(&netcp->tx_ch[j].cfg_a); 215 if (!(v & CPDMA_CHAN_A_ENABLE)) 216 continue; 217 218 writel(v | CPDMA_CHAN_A_TDOWN, &netcp->tx_ch[j].cfg_a); 219 for (k = 0; k < TDOWN_TIMEOUT_COUNT; k++) { 220 udelay(100); 221 v = readl(&netcp->tx_ch[j].cfg_a); 222 if (!(v & CPDMA_CHAN_A_ENABLE)) 223 continue; 224 } 225 /* TODO: teardown error on if TDOWN_TIMEOUT_COUNT is reached */ 226 } 227 228 return QM_OK; 229 } 230 231 static int _netcp_init(struct pktdma_cfg *netcp_cfg, 232 struct rx_buff_desc *rx_buffers) 233 { 234 u32 j, v; 235 struct qm_host_desc *hd; 236 u8 *rx_ptr; 237 238 if (netcp_cfg == NULL || rx_buffers == NULL || 239 rx_buffers->buff_ptr == NULL || qm_cfg == NULL) 240 return QM_ERR; 241 242 netcp = netcp_cfg; 243 netcp->rx_flow = rx_buffers->rx_flow; 244 245 /* init rx queue */ 246 rx_ptr = rx_buffers->buff_ptr; 247 248 for (j = 0; j < rx_buffers->num_buffs; j++) { 249 hd = qm_pop(qm_cfg->qpool_num); 250 if (hd == NULL) 251 return QM_ERR; 252 253 qm_buff_push(hd, netcp->rx_free_q, 254 rx_ptr, rx_buffers->buff_len); 255 256 rx_ptr += rx_buffers->buff_len; 257 } 258 259 netcp_rx_disable(); 260 261 /* configure rx channels */ 262 v = CPDMA_REG_VAL_MAKE_RX_FLOW_A(1, 1, 0, 0, 0, 0, 0, netcp->rx_rcv_q); 263 writel(v, &netcp->rx_flows[netcp->rx_flow].control); 264 writel(0, &netcp->rx_flows[netcp->rx_flow].tags); 265 writel(0, &netcp->rx_flows[netcp->rx_flow].tag_sel); 266 267 v = CPDMA_REG_VAL_MAKE_RX_FLOW_D(0, netcp->rx_free_q, 0, 268 netcp->rx_free_q); 269 270 writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[0]); 271 writel(v, &netcp->rx_flows[netcp->rx_flow].fdq_sel[1]); 272 writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[0]); 273 writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[1]); 274 writel(0, &netcp->rx_flows[netcp->rx_flow].thresh[2]); 275 276 for (j = 0; j < netcp->rx_ch_num; j++) 277 writel(CPDMA_CHAN_A_ENABLE, &netcp->rx_ch[j].cfg_a); 278 279 /* configure tx channels */ 280 /* Disable loopback in the tx direction */ 281 writel(0, &netcp->global->emulation_control); 282 283 /* Set QM base address, only for K2x devices */ 284 writel(CONFIG_KSNAV_QM_BASE_ADDRESS, &netcp->global->qm_base_addr[0]); 285 286 /* Enable all channels. The current state isn't important */ 287 for (j = 0; j < netcp->tx_ch_num; j++) { 288 writel(0, &netcp->tx_ch[j].cfg_b); 289 writel(CPDMA_CHAN_A_ENABLE, &netcp->tx_ch[j].cfg_a); 290 } 291 292 return QM_OK; 293 } 294 295 int netcp_init(struct rx_buff_desc *rx_buffers) 296 { 297 return _netcp_init(&netcp_pktdma, rx_buffers); 298 } 299 300 int netcp_close(void) 301 { 302 if (!netcp) 303 return QM_ERR; 304 305 netcp_tx_disable(); 306 netcp_rx_disable(); 307 308 queue_close(netcp->rx_free_q); 309 queue_close(netcp->rx_rcv_q); 310 queue_close(netcp->tx_snd_q); 311 312 return QM_OK; 313 } 314 315 int netcp_send(u32 *pkt, int num_bytes, u32 swinfo2) 316 { 317 struct qm_host_desc *hd; 318 319 hd = qm_pop(qm_cfg->qpool_num); 320 if (hd == NULL) 321 return QM_ERR; 322 323 hd->desc_info = num_bytes; 324 hd->swinfo[2] = swinfo2; 325 hd->packet_info = qm_cfg->qpool_num; 326 327 qm_buff_push(hd, netcp->tx_snd_q, pkt, num_bytes); 328 329 return QM_OK; 330 } 331 332 void *netcp_recv(u32 **pkt, int *num_bytes) 333 { 334 struct qm_host_desc *hd; 335 336 hd = qm_pop(netcp->rx_rcv_q); 337 if (!hd) 338 return NULL; 339 340 *pkt = (u32 *)hd->buff_ptr; 341 *num_bytes = hd->desc_info & 0x3fffff; 342 343 return hd; 344 } 345 346 void netcp_release_rxhd(void *hd) 347 { 348 struct qm_host_desc *_hd = (struct qm_host_desc *)hd; 349 350 _hd->buff_len = _hd->orig_buff_len; 351 _hd->buff_ptr = _hd->orig_buff_ptr; 352 353 qm_push(_hd, netcp->rx_free_q); 354 } 355