1 /* 2 * Driver for Marvell NETA network controller Buffer Manager. 3 * 4 * Copyright (C) 2015 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * This file is licensed under the terms of the GNU General Public 9 * License version 2. This program is licensed "as is" without any 10 * warranty of any kind, whether express or implied. 11 */ 12 13 #include <linux/clk.h> 14 #include <linux/genalloc.h> 15 #include <linux/io.h> 16 #include <linux/kernel.h> 17 #include <linux/mbus.h> 18 #include <linux/module.h> 19 #include <linux/netdevice.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/skbuff.h> 23 #include <net/hwbm.h> 24 #include "mvneta_bm.h" 25 26 #define MVNETA_BM_DRIVER_NAME "mvneta_bm" 27 #define MVNETA_BM_DRIVER_VERSION "1.0" 28 29 static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) 30 { 31 writel(data, priv->reg_base + offset); 32 } 33 34 static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) 35 { 36 return readl(priv->reg_base + offset); 37 } 38 39 static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) 40 { 41 u32 val; 42 43 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); 44 val |= MVNETA_BM_POOL_ENABLE_MASK; 45 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); 46 47 /* Clear BM cause register */ 48 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); 49 } 50 51 static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) 52 { 53 u32 val; 54 55 val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); 56 val &= ~MVNETA_BM_POOL_ENABLE_MASK; 57 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); 58 } 59 60 static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) 61 { 62 u32 val; 63 64 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); 65 val |= mask; 66 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); 67 } 68 69 static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) 70 { 71 u32 val; 72 73 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); 74 val &= ~mask; 75 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); 76 } 77 78 static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, 79 u8 target_id, u8 attr) 80 { 81 u32 val; 82 83 val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); 84 val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); 85 val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); 86 val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); 87 val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); 88 89 mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); 90 } 91 92 int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) 93 { 94 struct mvneta_bm_pool *bm_pool = 95 (struct mvneta_bm_pool *)hwbm_pool->priv; 96 struct mvneta_bm *priv = bm_pool->priv; 97 dma_addr_t phys_addr; 98 99 /* In order to update buf_cookie field of RX descriptor properly, 100 * BM hardware expects buf virtual address to be placed in the 101 * first four bytes of mapped buffer. 102 */ 103 *(u32 *)buf = (u32)buf; 104 phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, 105 DMA_FROM_DEVICE); 106 if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) 107 return -ENOMEM; 108 109 mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); 110 return 0; 111 } 112 EXPORT_SYMBOL_GPL(mvneta_bm_construct); 113 114 /* Create pool */ 115 static int mvneta_bm_pool_create(struct mvneta_bm *priv, 116 struct mvneta_bm_pool *bm_pool) 117 { 118 struct platform_device *pdev = priv->pdev; 119 u8 target_id, attr; 120 int size_bytes, err; 121 size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; 122 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, 123 &bm_pool->phys_addr, 124 GFP_KERNEL); 125 if (!bm_pool->virt_addr) 126 return -ENOMEM; 127 128 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { 129 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, 130 bm_pool->phys_addr); 131 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 132 bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); 133 return -ENOMEM; 134 } 135 136 err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, 137 &attr); 138 if (err < 0) { 139 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, 140 bm_pool->phys_addr); 141 return err; 142 } 143 144 /* Set pool address */ 145 mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), 146 bm_pool->phys_addr); 147 148 mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); 149 mvneta_bm_pool_enable(priv, bm_pool->id); 150 151 return 0; 152 } 153 154 /* Notify the driver that BM pool is being used as specific type and return the 155 * pool pointer on success 156 */ 157 struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, 158 enum mvneta_bm_type type, u8 port_id, 159 int pkt_size) 160 { 161 struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; 162 int num, err; 163 164 if (new_pool->type == MVNETA_BM_LONG && 165 new_pool->port_map != 1 << port_id) { 166 dev_err(&priv->pdev->dev, 167 "long pool cannot be shared by the ports\n"); 168 return NULL; 169 } 170 171 if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { 172 dev_err(&priv->pdev->dev, 173 "mixing pools' types between the ports is forbidden\n"); 174 return NULL; 175 } 176 177 if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) 178 new_pool->pkt_size = pkt_size; 179 180 /* Allocate buffers in case BM pool hasn't been used yet */ 181 if (new_pool->type == MVNETA_BM_FREE) { 182 struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; 183 184 new_pool->priv = priv; 185 new_pool->type = type; 186 new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); 187 hwbm_pool->frag_size = 188 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + 189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 190 hwbm_pool->construct = mvneta_bm_construct; 191 hwbm_pool->priv = new_pool; 192 spin_lock_init(&hwbm_pool->lock); 193 194 /* Create new pool */ 195 err = mvneta_bm_pool_create(priv, new_pool); 196 if (err) { 197 dev_err(&priv->pdev->dev, "fail to create pool %d\n", 198 new_pool->id); 199 return NULL; 200 } 201 202 /* Allocate buffers for this pool */ 203 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); 204 if (num != hwbm_pool->size) { 205 WARN(1, "pool %d: %d of %d allocated\n", 206 new_pool->id, num, hwbm_pool->size); 207 return NULL; 208 } 209 } 210 211 return new_pool; 212 } 213 EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); 214 215 /* Free all buffers from the pool */ 216 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, 217 u8 port_map) 218 { 219 int i; 220 221 bm_pool->port_map &= ~port_map; 222 if (bm_pool->port_map) 223 return; 224 225 mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); 226 227 for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { 228 dma_addr_t buf_phys_addr; 229 u32 *vaddr; 230 231 /* Get buffer physical address (indirect access) */ 232 buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); 233 234 /* Work-around to the problems when destroying the pool, 235 * when it occurs that a read access to BPPI returns 0. 236 */ 237 if (buf_phys_addr == 0) 238 continue; 239 240 vaddr = phys_to_virt(buf_phys_addr); 241 if (!vaddr) 242 break; 243 244 dma_unmap_single(&priv->pdev->dev, buf_phys_addr, 245 bm_pool->buf_size, DMA_FROM_DEVICE); 246 hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); 247 } 248 249 mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); 250 251 /* Update BM driver with number of buffers removed from pool */ 252 bm_pool->hwbm_pool.buf_num -= i; 253 } 254 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); 255 256 /* Cleanup pool */ 257 void mvneta_bm_pool_destroy(struct mvneta_bm *priv, 258 struct mvneta_bm_pool *bm_pool, u8 port_map) 259 { 260 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; 261 bm_pool->port_map &= ~port_map; 262 if (bm_pool->port_map) 263 return; 264 265 bm_pool->type = MVNETA_BM_FREE; 266 267 mvneta_bm_bufs_free(priv, bm_pool, port_map); 268 if (hwbm_pool->buf_num) 269 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); 270 271 if (bm_pool->virt_addr) { 272 dma_free_coherent(&priv->pdev->dev, 273 sizeof(u32) * hwbm_pool->size, 274 bm_pool->virt_addr, bm_pool->phys_addr); 275 bm_pool->virt_addr = NULL; 276 } 277 278 mvneta_bm_pool_disable(priv, bm_pool->id); 279 } 280 EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); 281 282 static void mvneta_bm_pools_init(struct mvneta_bm *priv) 283 { 284 struct device_node *dn = priv->pdev->dev.of_node; 285 struct mvneta_bm_pool *bm_pool; 286 char prop[15]; 287 u32 size; 288 int i; 289 290 /* Activate BM unit */ 291 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); 292 293 /* Create all pools with maximum size */ 294 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { 295 bm_pool = &priv->bm_pools[i]; 296 bm_pool->id = i; 297 bm_pool->type = MVNETA_BM_FREE; 298 299 /* Reset read pointer */ 300 mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); 301 302 /* Reset write pointer */ 303 mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); 304 305 /* Configure pool size according to DT or use default value */ 306 sprintf(prop, "pool%d,capacity", i); 307 if (of_property_read_u32(dn, prop, &size)) { 308 size = MVNETA_BM_POOL_CAP_DEF; 309 } else if (size > MVNETA_BM_POOL_CAP_MAX) { 310 dev_warn(&priv->pdev->dev, 311 "Illegal pool %d capacity %d, set to %d\n", 312 i, size, MVNETA_BM_POOL_CAP_MAX); 313 size = MVNETA_BM_POOL_CAP_MAX; 314 } else if (size < MVNETA_BM_POOL_CAP_MIN) { 315 dev_warn(&priv->pdev->dev, 316 "Illegal pool %d capacity %d, set to %d\n", 317 i, size, MVNETA_BM_POOL_CAP_MIN); 318 size = MVNETA_BM_POOL_CAP_MIN; 319 } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { 320 dev_warn(&priv->pdev->dev, 321 "Illegal pool %d capacity %d, round to %d\n", 322 i, size, ALIGN(size, 323 MVNETA_BM_POOL_CAP_ALIGN)); 324 size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); 325 } 326 bm_pool->hwbm_pool.size = size; 327 328 mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), 329 bm_pool->hwbm_pool.size); 330 331 /* Obtain custom pkt_size from DT */ 332 sprintf(prop, "pool%d,pkt-size", i); 333 if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) 334 bm_pool->pkt_size = 0; 335 } 336 } 337 338 static void mvneta_bm_default_set(struct mvneta_bm *priv) 339 { 340 u32 val; 341 342 /* Mask BM all interrupts */ 343 mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); 344 345 /* Clear BM cause register */ 346 mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); 347 348 /* Set BM configuration register */ 349 val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); 350 351 /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ 352 val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; 353 val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; 354 mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); 355 } 356 357 static int mvneta_bm_init(struct mvneta_bm *priv) 358 { 359 mvneta_bm_default_set(priv); 360 361 /* Allocate and initialize BM pools structures */ 362 priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, 363 sizeof(struct mvneta_bm_pool), 364 GFP_KERNEL); 365 if (!priv->bm_pools) 366 return -ENOMEM; 367 368 mvneta_bm_pools_init(priv); 369 370 return 0; 371 } 372 373 static int mvneta_bm_get_sram(struct device_node *dn, 374 struct mvneta_bm *priv) 375 { 376 priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); 377 if (!priv->bppi_pool) 378 return -ENOMEM; 379 380 priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, 381 MVNETA_BM_BPPI_SIZE, 382 &priv->bppi_phys_addr); 383 if (!priv->bppi_virt_addr) 384 return -ENOMEM; 385 386 return 0; 387 } 388 389 static void mvneta_bm_put_sram(struct mvneta_bm *priv) 390 { 391 gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, 392 MVNETA_BM_BPPI_SIZE); 393 } 394 395 static int mvneta_bm_probe(struct platform_device *pdev) 396 { 397 struct device_node *dn = pdev->dev.of_node; 398 struct mvneta_bm *priv; 399 struct resource *res; 400 int err; 401 402 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); 403 if (!priv) 404 return -ENOMEM; 405 406 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 407 priv->reg_base = devm_ioremap_resource(&pdev->dev, res); 408 if (IS_ERR(priv->reg_base)) 409 return PTR_ERR(priv->reg_base); 410 411 priv->clk = devm_clk_get(&pdev->dev, NULL); 412 if (IS_ERR(priv->clk)) 413 return PTR_ERR(priv->clk); 414 err = clk_prepare_enable(priv->clk); 415 if (err < 0) 416 return err; 417 418 err = mvneta_bm_get_sram(dn, priv); 419 if (err < 0) { 420 dev_err(&pdev->dev, "failed to allocate internal memory\n"); 421 goto err_clk; 422 } 423 424 priv->pdev = pdev; 425 426 /* Initialize buffer manager internals */ 427 err = mvneta_bm_init(priv); 428 if (err < 0) { 429 dev_err(&pdev->dev, "failed to initialize controller\n"); 430 goto err_sram; 431 } 432 433 dn->data = priv; 434 platform_set_drvdata(pdev, priv); 435 436 dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); 437 438 return 0; 439 440 err_sram: 441 mvneta_bm_put_sram(priv); 442 err_clk: 443 clk_disable_unprepare(priv->clk); 444 return err; 445 } 446 447 static int mvneta_bm_remove(struct platform_device *pdev) 448 { 449 struct mvneta_bm *priv = platform_get_drvdata(pdev); 450 u8 all_ports_map = 0xff; 451 int i = 0; 452 453 for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { 454 struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; 455 456 mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); 457 } 458 459 mvneta_bm_put_sram(priv); 460 461 /* Dectivate BM unit */ 462 mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); 463 464 clk_disable_unprepare(priv->clk); 465 466 return 0; 467 } 468 469 static const struct of_device_id mvneta_bm_match[] = { 470 { .compatible = "marvell,armada-380-neta-bm" }, 471 { } 472 }; 473 MODULE_DEVICE_TABLE(of, mvneta_bm_match); 474 475 static struct platform_driver mvneta_bm_driver = { 476 .probe = mvneta_bm_probe, 477 .remove = mvneta_bm_remove, 478 .driver = { 479 .name = MVNETA_BM_DRIVER_NAME, 480 .of_match_table = mvneta_bm_match, 481 }, 482 }; 483 484 module_platform_driver(mvneta_bm_driver); 485 486 MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); 487 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); 488 MODULE_LICENSE("GPL v2"); 489