1 /* Altera TSE SGDMA and MSGDMA Linux driver 2 * Copyright (C) 2014 Altera Corporation. All rights reserved 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/list.h> 18 #include "altera_utils.h" 19 #include "altera_tse.h" 20 #include "altera_sgdmahw.h" 21 #include "altera_sgdma.h" 22 23 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, 24 struct sgdma_descrip __iomem *ndesc, 25 dma_addr_t ndesc_phys, 26 dma_addr_t raddr, 27 dma_addr_t waddr, 28 u16 length, 29 int generate_eop, 30 int rfixed, 31 int wfixed); 32 33 static int sgdma_async_write(struct altera_tse_private *priv, 34 struct sgdma_descrip __iomem *desc); 35 36 static int sgdma_async_read(struct altera_tse_private *priv); 37 38 static dma_addr_t 39 sgdma_txphysaddr(struct altera_tse_private *priv, 40 struct sgdma_descrip __iomem *desc); 41 42 static dma_addr_t 43 sgdma_rxphysaddr(struct altera_tse_private *priv, 44 struct sgdma_descrip __iomem *desc); 45 46 static int sgdma_txbusy(struct altera_tse_private *priv); 47 48 static int sgdma_rxbusy(struct altera_tse_private *priv); 49 50 static void 51 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer); 52 53 static void 54 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer); 55 56 static struct tse_buffer * 57 dequeue_tx(struct altera_tse_private *priv); 58 59 static struct tse_buffer * 60 dequeue_rx(struct altera_tse_private *priv); 61 62 static struct tse_buffer * 63 queue_rx_peekhead(struct altera_tse_private *priv); 64 65 int sgdma_initialize(struct altera_tse_private *priv) 66 { 67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD | 68 SGDMA_CTRLREG_INTEN; 69 70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | 71 SGDMA_CTRLREG_INTEN | 72 SGDMA_CTRLREG_ILASTD; 73 74 INIT_LIST_HEAD(&priv->txlisthd); 75 INIT_LIST_HEAD(&priv->rxlisthd); 76 77 priv->rxdescphys = (dma_addr_t) 0; 78 priv->txdescphys = (dma_addr_t) 0; 79 80 priv->rxdescphys = dma_map_single(priv->device, 81 (void __force *)priv->rx_dma_desc, 82 priv->rxdescmem, DMA_BIDIRECTIONAL); 83 84 if (dma_mapping_error(priv->device, priv->rxdescphys)) { 85 sgdma_uninitialize(priv); 86 netdev_err(priv->dev, "error mapping rx descriptor memory\n"); 87 return -EINVAL; 88 } 89 90 priv->txdescphys = dma_map_single(priv->device, 91 (void __force *)priv->tx_dma_desc, 92 priv->txdescmem, DMA_TO_DEVICE); 93 94 if (dma_mapping_error(priv->device, priv->txdescphys)) { 95 sgdma_uninitialize(priv); 96 netdev_err(priv->dev, "error mapping tx descriptor memory\n"); 97 return -EINVAL; 98 } 99 100 /* Initialize descriptor memory to all 0's, sync memory to cache */ 101 memset_io(priv->tx_dma_desc, 0, priv->txdescmem); 102 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); 103 104 dma_sync_single_for_device(priv->device, priv->txdescphys, 105 priv->txdescmem, DMA_TO_DEVICE); 106 107 dma_sync_single_for_device(priv->device, priv->rxdescphys, 108 priv->rxdescmem, DMA_TO_DEVICE); 109 110 return 0; 111 } 112 113 void sgdma_uninitialize(struct altera_tse_private *priv) 114 { 115 if (priv->rxdescphys) 116 dma_unmap_single(priv->device, priv->rxdescphys, 117 priv->rxdescmem, DMA_BIDIRECTIONAL); 118 119 if (priv->txdescphys) 120 dma_unmap_single(priv->device, priv->txdescphys, 121 priv->txdescmem, DMA_TO_DEVICE); 122 } 123 124 /* This function resets the SGDMA controller and clears the 125 * descriptor memory used for transmits and receives. 126 */ 127 void sgdma_reset(struct altera_tse_private *priv) 128 { 129 /* Initialize descriptor memory to 0 */ 130 memset_io(priv->tx_dma_desc, 0, priv->txdescmem); 131 memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); 132 133 csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); 134 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); 135 136 csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); 137 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); 138 } 139 140 /* For SGDMA, interrupts remain enabled after initially enabling, 141 * so no need to provide implementations for abstract enable 142 * and disable 143 */ 144 145 void sgdma_enable_rxirq(struct altera_tse_private *priv) 146 { 147 } 148 149 void sgdma_enable_txirq(struct altera_tse_private *priv) 150 { 151 } 152 153 void sgdma_disable_rxirq(struct altera_tse_private *priv) 154 { 155 } 156 157 void sgdma_disable_txirq(struct altera_tse_private *priv) 158 { 159 } 160 161 void sgdma_clear_rxirq(struct altera_tse_private *priv) 162 { 163 tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), 164 SGDMA_CTRLREG_CLRINT); 165 } 166 167 void sgdma_clear_txirq(struct altera_tse_private *priv) 168 { 169 tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), 170 SGDMA_CTRLREG_CLRINT); 171 } 172 173 /* transmits buffer through SGDMA. Returns number of buffers 174 * transmitted, 0 if not possible. 175 * 176 * tx_lock is held by the caller 177 */ 178 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 179 { 180 struct sgdma_descrip __iomem *descbase = 181 (struct sgdma_descrip __iomem *)priv->tx_dma_desc; 182 183 struct sgdma_descrip __iomem *cdesc = &descbase[0]; 184 struct sgdma_descrip __iomem *ndesc = &descbase[1]; 185 186 /* wait 'til the tx sgdma is ready for the next transmit request */ 187 if (sgdma_txbusy(priv)) 188 return 0; 189 190 sgdma_setup_descrip(cdesc, /* current descriptor */ 191 ndesc, /* next descriptor */ 192 sgdma_txphysaddr(priv, ndesc), 193 buffer->dma_addr, /* address of packet to xmit */ 194 0, /* write addr 0 for tx dma */ 195 buffer->len, /* length of packet */ 196 SGDMA_CONTROL_EOP, /* Generate EOP */ 197 0, /* read fixed */ 198 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ 199 200 sgdma_async_write(priv, cdesc); 201 202 /* enqueue the request to the pending transmit queue */ 203 queue_tx(priv, buffer); 204 205 return 1; 206 } 207 208 209 /* tx_lock held to protect access to queued tx list 210 */ 211 u32 sgdma_tx_completions(struct altera_tse_private *priv) 212 { 213 u32 ready = 0; 214 215 if (!sgdma_txbusy(priv) && 216 ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) 217 & SGDMA_CONTROL_HW_OWNED) == 0) && 218 (dequeue_tx(priv))) { 219 ready = 1; 220 } 221 222 return ready; 223 } 224 225 void sgdma_start_rxdma(struct altera_tse_private *priv) 226 { 227 sgdma_async_read(priv); 228 } 229 230 void sgdma_add_rx_desc(struct altera_tse_private *priv, 231 struct tse_buffer *rxbuffer) 232 { 233 queue_rx(priv, rxbuffer); 234 } 235 236 /* status is returned on upper 16 bits, 237 * length is returned in lower 16 bits 238 */ 239 u32 sgdma_rx_status(struct altera_tse_private *priv) 240 { 241 struct sgdma_descrip __iomem *base = 242 (struct sgdma_descrip __iomem *)priv->rx_dma_desc; 243 struct sgdma_descrip __iomem *desc = NULL; 244 struct tse_buffer *rxbuffer = NULL; 245 unsigned int rxstatus = 0; 246 247 u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); 248 249 desc = &base[0]; 250 if (sts & SGDMA_STSREG_EOP) { 251 unsigned int pktlength = 0; 252 unsigned int pktstatus = 0; 253 dma_sync_single_for_cpu(priv->device, 254 priv->rxdescphys, 255 SGDMA_DESC_LEN, 256 DMA_FROM_DEVICE); 257 258 pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); 259 pktstatus = csrrd8(desc, sgdma_descroffs(status)); 260 rxstatus = pktstatus & ~SGDMA_STATUS_EOP; 261 rxstatus = rxstatus << 16; 262 rxstatus |= (pktlength & 0xffff); 263 264 if (rxstatus) { 265 csrwr8(0, desc, sgdma_descroffs(status)); 266 267 rxbuffer = dequeue_rx(priv); 268 if (rxbuffer == NULL) 269 netdev_info(priv->dev, 270 "sgdma rx and rx queue empty!\n"); 271 272 /* Clear control */ 273 csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); 274 /* clear status */ 275 csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); 276 277 /* kick the rx sgdma after reaping this descriptor */ 278 sgdma_async_read(priv); 279 280 } else { 281 /* If the SGDMA indicated an end of packet on recv, 282 * then it's expected that the rxstatus from the 283 * descriptor is non-zero - meaning a valid packet 284 * with a nonzero length, or an error has been 285 * indicated. if not, then all we can do is signal 286 * an error and return no packet received. Most likely 287 * there is a system design error, or an error in the 288 * underlying kernel (cache or cache management problem) 289 */ 290 netdev_err(priv->dev, 291 "SGDMA RX Error Info: %x, %x, %x\n", 292 sts, csrrd8(desc, sgdma_descroffs(status)), 293 rxstatus); 294 } 295 } else if (sts == 0) { 296 sgdma_async_read(priv); 297 } 298 299 return rxstatus; 300 } 301 302 303 /* Private functions */ 304 static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, 305 struct sgdma_descrip __iomem *ndesc, 306 dma_addr_t ndesc_phys, 307 dma_addr_t raddr, 308 dma_addr_t waddr, 309 u16 length, 310 int generate_eop, 311 int rfixed, 312 int wfixed) 313 { 314 /* Clear the next descriptor as not owned by hardware */ 315 316 u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); 317 ctrl &= ~SGDMA_CONTROL_HW_OWNED; 318 csrwr8(ctrl, ndesc, sgdma_descroffs(control)); 319 320 ctrl = SGDMA_CONTROL_HW_OWNED; 321 ctrl |= generate_eop; 322 ctrl |= rfixed; 323 ctrl |= wfixed; 324 325 /* Channel is implicitly zero, initialized to 0 by default */ 326 csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); 327 csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); 328 329 csrwr32(0, desc, sgdma_descroffs(pad1)); 330 csrwr32(0, desc, sgdma_descroffs(pad2)); 331 csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); 332 333 csrwr8(ctrl, desc, sgdma_descroffs(control)); 334 csrwr8(0, desc, sgdma_descroffs(status)); 335 csrwr8(0, desc, sgdma_descroffs(wburst)); 336 csrwr8(0, desc, sgdma_descroffs(rburst)); 337 csrwr16(length, desc, sgdma_descroffs(bytes)); 338 csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); 339 } 340 341 /* If hardware is busy, don't restart async read. 342 * if status register is 0 - meaning initial state, restart async read, 343 * probably for the first time when populating a receive buffer. 344 * If read status indicate not busy and a status, restart the async 345 * DMA read. 346 */ 347 static int sgdma_async_read(struct altera_tse_private *priv) 348 { 349 struct sgdma_descrip __iomem *descbase = 350 (struct sgdma_descrip __iomem *)priv->rx_dma_desc; 351 352 struct sgdma_descrip __iomem *cdesc = &descbase[0]; 353 struct sgdma_descrip __iomem *ndesc = &descbase[1]; 354 struct tse_buffer *rxbuffer = NULL; 355 356 if (!sgdma_rxbusy(priv)) { 357 rxbuffer = queue_rx_peekhead(priv); 358 if (rxbuffer == NULL) { 359 netdev_err(priv->dev, "no rx buffers available\n"); 360 return 0; 361 } 362 363 sgdma_setup_descrip(cdesc, /* current descriptor */ 364 ndesc, /* next descriptor */ 365 sgdma_rxphysaddr(priv, ndesc), 366 0, /* read addr 0 for rx dma */ 367 rxbuffer->dma_addr, /* write addr for rx dma */ 368 0, /* read 'til EOP */ 369 0, /* EOP: NA for rx dma */ 370 0, /* read fixed: NA for rx dma */ 371 0); /* SOP: NA for rx DMA */ 372 373 dma_sync_single_for_device(priv->device, 374 priv->rxdescphys, 375 SGDMA_DESC_LEN, 376 DMA_TO_DEVICE); 377 378 csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), 379 priv->rx_dma_csr, 380 sgdma_csroffs(next_descrip)); 381 382 csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), 383 priv->rx_dma_csr, 384 sgdma_csroffs(control)); 385 386 return 1; 387 } 388 389 return 0; 390 } 391 392 static int sgdma_async_write(struct altera_tse_private *priv, 393 struct sgdma_descrip __iomem *desc) 394 { 395 if (sgdma_txbusy(priv)) 396 return 0; 397 398 /* clear control and status */ 399 csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); 400 csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); 401 402 dma_sync_single_for_device(priv->device, priv->txdescphys, 403 SGDMA_DESC_LEN, DMA_TO_DEVICE); 404 405 csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), 406 priv->tx_dma_csr, 407 sgdma_csroffs(next_descrip)); 408 409 csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), 410 priv->tx_dma_csr, 411 sgdma_csroffs(control)); 412 413 return 1; 414 } 415 416 static dma_addr_t 417 sgdma_txphysaddr(struct altera_tse_private *priv, 418 struct sgdma_descrip __iomem *desc) 419 { 420 dma_addr_t paddr = priv->txdescmem_busaddr; 421 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; 422 return (dma_addr_t)((uintptr_t)paddr + offs); 423 } 424 425 static dma_addr_t 426 sgdma_rxphysaddr(struct altera_tse_private *priv, 427 struct sgdma_descrip __iomem *desc) 428 { 429 dma_addr_t paddr = priv->rxdescmem_busaddr; 430 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; 431 return (dma_addr_t)((uintptr_t)paddr + offs); 432 } 433 434 #define list_remove_head(list, entry, type, member) \ 435 do { \ 436 entry = NULL; \ 437 if (!list_empty(list)) { \ 438 entry = list_entry((list)->next, type, member); \ 439 list_del_init(&entry->member); \ 440 } \ 441 } while (0) 442 443 #define list_peek_head(list, entry, type, member) \ 444 do { \ 445 entry = NULL; \ 446 if (!list_empty(list)) { \ 447 entry = list_entry((list)->next, type, member); \ 448 } \ 449 } while (0) 450 451 /* adds a tse_buffer to the tail of a tx buffer list. 452 * assumes the caller is managing and holding a mutual exclusion 453 * primitive to avoid simultaneous pushes/pops to the list. 454 */ 455 static void 456 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer) 457 { 458 list_add_tail(&buffer->lh, &priv->txlisthd); 459 } 460 461 462 /* adds a tse_buffer to the tail of a rx buffer list 463 * assumes the caller is managing and holding a mutual exclusion 464 * primitive to avoid simultaneous pushes/pops to the list. 465 */ 466 static void 467 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer) 468 { 469 list_add_tail(&buffer->lh, &priv->rxlisthd); 470 } 471 472 /* dequeues a tse_buffer from the transmit buffer list, otherwise 473 * returns NULL if empty. 474 * assumes the caller is managing and holding a mutual exclusion 475 * primitive to avoid simultaneous pushes/pops to the list. 476 */ 477 static struct tse_buffer * 478 dequeue_tx(struct altera_tse_private *priv) 479 { 480 struct tse_buffer *buffer = NULL; 481 list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh); 482 return buffer; 483 } 484 485 /* dequeues a tse_buffer from the receive buffer list, otherwise 486 * returns NULL if empty 487 * assumes the caller is managing and holding a mutual exclusion 488 * primitive to avoid simultaneous pushes/pops to the list. 489 */ 490 static struct tse_buffer * 491 dequeue_rx(struct altera_tse_private *priv) 492 { 493 struct tse_buffer *buffer = NULL; 494 list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); 495 return buffer; 496 } 497 498 /* dequeues a tse_buffer from the receive buffer list, otherwise 499 * returns NULL if empty 500 * assumes the caller is managing and holding a mutual exclusion 501 * primitive to avoid simultaneous pushes/pops to the list while the 502 * head is being examined. 503 */ 504 static struct tse_buffer * 505 queue_rx_peekhead(struct altera_tse_private *priv) 506 { 507 struct tse_buffer *buffer = NULL; 508 list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); 509 return buffer; 510 } 511 512 /* check and return rx sgdma status without polling 513 */ 514 static int sgdma_rxbusy(struct altera_tse_private *priv) 515 { 516 return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) 517 & SGDMA_STSREG_BUSY; 518 } 519 520 /* waits for the tx sgdma to finish it's current operation, returns 0 521 * when it transitions to nonbusy, returns 1 if the operation times out 522 */ 523 static int sgdma_txbusy(struct altera_tse_private *priv) 524 { 525 int delay = 0; 526 527 /* if DMA is busy, wait for current transactino to finish */ 528 while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) 529 & SGDMA_STSREG_BUSY) && (delay++ < 100)) 530 udelay(1); 531 532 if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) 533 & SGDMA_STSREG_BUSY) { 534 netdev_err(priv->dev, "timeout waiting for tx dma\n"); 535 return 1; 536 } 537 return 0; 538 } 539