1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/skbuff.h> 13 #include <linux/slab.h> 14 15 #include "hnae.h" 16 17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev) 18 19 static struct class *hnae_class; 20 21 static void 22 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head) 23 { 24 unsigned long flags; 25 26 spin_lock_irqsave(lock, flags); 27 list_add_tail_rcu(node, head); 28 spin_unlock_irqrestore(lock, flags); 29 } 30 31 static void hnae_list_del(spinlock_t *lock, struct list_head *node) 32 { 33 unsigned long flags; 34 35 spin_lock_irqsave(lock, flags); 36 list_del_rcu(node); 37 spin_unlock_irqrestore(lock, flags); 38 } 39 40 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 41 { 42 unsigned int order = hnae_page_order(ring); 43 struct page *p = dev_alloc_pages(order); 44 45 if (!p) 46 return -ENOMEM; 47 48 cb->priv = p; 49 cb->page_offset = 0; 50 cb->reuse_flag = 0; 51 cb->buf = page_address(p); 52 cb->length = hnae_page_size(ring); 53 cb->type = DESC_TYPE_PAGE; 54 55 return 0; 56 } 57 58 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 59 { 60 if (cb->type == DESC_TYPE_SKB) 61 dev_kfree_skb_any((struct sk_buff *)cb->priv); 62 else if (unlikely(is_rx_ring(ring))) 63 put_page((struct page *)cb->priv); 64 memset(cb, 0, sizeof(*cb)); 65 } 66 67 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 68 { 69 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 70 cb->length, ring_to_dma_dir(ring)); 71 72 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 73 return -EIO; 74 75 return 0; 76 } 77 78 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 79 { 80 if (cb->type == DESC_TYPE_SKB) 81 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 82 ring_to_dma_dir(ring)); 83 else 84 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 85 ring_to_dma_dir(ring)); 86 } 87 88 static struct hnae_buf_ops hnae_bops = { 89 .alloc_buffer = hnae_alloc_buffer, 90 .free_buffer = hnae_free_buffer, 91 .map_buffer = hnae_map_buffer, 92 .unmap_buffer = hnae_unmap_buffer, 93 }; 94 95 static int __ae_match(struct device *dev, const void *data) 96 { 97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 98 const char *ae_id = data; 99 100 if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) 101 return 1; 102 103 return 0; 104 } 105 106 static struct hnae_ae_dev *find_ae(const char *ae_id) 107 { 108 struct device *dev; 109 110 WARN_ON(!ae_id); 111 112 dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); 113 114 return dev ? cls_to_ae_dev(dev) : NULL; 115 } 116 117 static void hnae_free_buffers(struct hnae_ring *ring) 118 { 119 int i; 120 121 for (i = 0; i < ring->desc_num; i++) 122 hnae_free_buffer_detach(ring, i); 123 } 124 125 /* Allocate memory for raw pkg, and map with dma */ 126 static int hnae_alloc_buffers(struct hnae_ring *ring) 127 { 128 int i, j, ret; 129 130 for (i = 0; i < ring->desc_num; i++) { 131 ret = hnae_alloc_buffer_attach(ring, i); 132 if (ret) 133 goto out_buffer_fail; 134 } 135 136 return 0; 137 138 out_buffer_fail: 139 for (j = i - 1; j >= 0; j--) 140 hnae_free_buffer_detach(ring, j); 141 return ret; 142 } 143 144 /* free desc along with its attached buffer */ 145 static void hnae_free_desc(struct hnae_ring *ring) 146 { 147 hnae_free_buffers(ring); 148 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 149 ring->desc_num * sizeof(ring->desc[0]), 150 ring_to_dma_dir(ring)); 151 ring->desc_dma_addr = 0; 152 kfree(ring->desc); 153 ring->desc = NULL; 154 } 155 156 /* alloc desc, without buffer attached */ 157 static int hnae_alloc_desc(struct hnae_ring *ring) 158 { 159 int size = ring->desc_num * sizeof(ring->desc[0]); 160 161 ring->desc = kzalloc(size, GFP_KERNEL); 162 if (!ring->desc) 163 return -ENOMEM; 164 165 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), 166 ring->desc, size, ring_to_dma_dir(ring)); 167 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { 168 ring->desc_dma_addr = 0; 169 kfree(ring->desc); 170 ring->desc = NULL; 171 return -ENOMEM; 172 } 173 174 return 0; 175 } 176 177 /* fini ring, also free the buffer for the ring */ 178 static void hnae_fini_ring(struct hnae_ring *ring) 179 { 180 hnae_free_desc(ring); 181 kfree(ring->desc_cb); 182 ring->desc_cb = NULL; 183 ring->next_to_clean = 0; 184 ring->next_to_use = 0; 185 } 186 187 /* init ring, and with buffer for rx ring */ 188 static int 189 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) 190 { 191 int ret; 192 193 if (ring->desc_num <= 0 || ring->buf_size <= 0) 194 return -EINVAL; 195 196 ring->q = q; 197 ring->flags = flags; 198 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); 199 200 /* not matter for tx or rx ring, the ntc and ntc start from 0 */ 201 assert(ring->next_to_use == 0); 202 assert(ring->next_to_clean == 0); 203 204 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 205 GFP_KERNEL); 206 if (!ring->desc_cb) { 207 ret = -ENOMEM; 208 goto out; 209 } 210 211 ret = hnae_alloc_desc(ring); 212 if (ret) 213 goto out_with_desc_cb; 214 215 if (is_rx_ring(ring)) { 216 ret = hnae_alloc_buffers(ring); 217 if (ret) 218 goto out_with_desc; 219 } 220 221 return 0; 222 223 out_with_desc: 224 hnae_free_desc(ring); 225 out_with_desc_cb: 226 kfree(ring->desc_cb); 227 ring->desc_cb = NULL; 228 out: 229 return ret; 230 } 231 232 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, 233 struct hnae_ae_dev *dev) 234 { 235 int ret; 236 237 q->dev = dev; 238 q->handle = h; 239 240 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); 241 if (ret) 242 goto out; 243 244 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); 245 if (ret) 246 goto out_with_tx_ring; 247 248 if (dev->ops->init_queue) 249 dev->ops->init_queue(q); 250 251 return 0; 252 253 out_with_tx_ring: 254 hnae_fini_ring(&q->tx_ring); 255 out: 256 return ret; 257 } 258 259 static void hnae_fini_queue(struct hnae_queue *q) 260 { 261 if (q->dev->ops->fini_queue) 262 q->dev->ops->fini_queue(q); 263 264 hnae_fini_ring(&q->tx_ring); 265 hnae_fini_ring(&q->rx_ring); 266 } 267 268 /** 269 * ae_chain - define ae chain head 270 */ 271 static RAW_NOTIFIER_HEAD(ae_chain); 272 273 int hnae_register_notifier(struct notifier_block *nb) 274 { 275 return raw_notifier_chain_register(&ae_chain, nb); 276 } 277 EXPORT_SYMBOL(hnae_register_notifier); 278 279 void hnae_unregister_notifier(struct notifier_block *nb) 280 { 281 if (raw_notifier_chain_unregister(&ae_chain, nb)) 282 dev_err(NULL, "notifier chain unregister fail\n"); 283 } 284 EXPORT_SYMBOL(hnae_unregister_notifier); 285 286 int hnae_reinit_handle(struct hnae_handle *handle) 287 { 288 int i, j; 289 int ret; 290 291 for (i = 0; i < handle->q_num; i++) /* free ring*/ 292 hnae_fini_queue(handle->qs[i]); 293 294 if (handle->dev->ops->reset) 295 handle->dev->ops->reset(handle); 296 297 for (i = 0; i < handle->q_num; i++) {/* reinit ring*/ 298 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); 299 if (ret) 300 goto out_when_init_queue; 301 } 302 return 0; 303 out_when_init_queue: 304 for (j = i - 1; j >= 0; j--) 305 hnae_fini_queue(handle->qs[j]); 306 return ret; 307 } 308 EXPORT_SYMBOL(hnae_reinit_handle); 309 310 /* hnae_get_handle - get a handle from the AE 311 * @owner_dev: the dev use this handle 312 * @ae_id: the id of the ae to be used 313 * @ae_opts: the options set for the handle 314 * @bops: the callbacks for buffer management 315 * 316 * return handle ptr or ERR_PTR 317 */ 318 struct hnae_handle *hnae_get_handle(struct device *owner_dev, 319 const char *ae_id, u32 port_id, 320 struct hnae_buf_ops *bops) 321 { 322 struct hnae_ae_dev *dev; 323 struct hnae_handle *handle; 324 int i, j; 325 int ret; 326 327 dev = find_ae(ae_id); 328 if (!dev) 329 return ERR_PTR(-ENODEV); 330 331 handle = dev->ops->get_handle(dev, port_id); 332 if (IS_ERR(handle)) 333 return handle; 334 335 handle->dev = dev; 336 handle->owner_dev = owner_dev; 337 handle->bops = bops ? bops : &hnae_bops; 338 handle->eport_id = port_id; 339 340 for (i = 0; i < handle->q_num; i++) { 341 ret = hnae_init_queue(handle, handle->qs[i], dev); 342 if (ret) 343 goto out_when_init_queue; 344 } 345 346 __module_get(dev->owner); 347 348 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list); 349 350 return handle; 351 352 out_when_init_queue: 353 for (j = i - 1; j >= 0; j--) 354 hnae_fini_queue(handle->qs[j]); 355 356 return ERR_PTR(-ENOMEM); 357 } 358 EXPORT_SYMBOL(hnae_get_handle); 359 360 void hnae_put_handle(struct hnae_handle *h) 361 { 362 struct hnae_ae_dev *dev = h->dev; 363 int i; 364 365 for (i = 0; i < h->q_num; i++) 366 hnae_fini_queue(h->qs[i]); 367 368 if (h->dev->ops->reset) 369 h->dev->ops->reset(h); 370 371 hnae_list_del(&dev->lock, &h->node); 372 373 if (dev->ops->put_handle) 374 dev->ops->put_handle(h); 375 376 module_put(dev->owner); 377 } 378 EXPORT_SYMBOL(hnae_put_handle); 379 380 static void hnae_release(struct device *dev) 381 { 382 } 383 384 /** 385 * hnae_ae_register - register a AE engine to hnae framework 386 * @hdev: the hnae ae engine device 387 * @owner: the module who provides this dev 388 * NOTE: the duplicated name will not be checked 389 */ 390 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) 391 { 392 static atomic_t id = ATOMIC_INIT(-1); 393 int ret; 394 395 if (!hdev->dev) 396 return -ENODEV; 397 398 if (!hdev->ops || !hdev->ops->get_handle || 399 !hdev->ops->toggle_ring_irq || 400 !hdev->ops->toggle_queue_status || 401 !hdev->ops->get_status || !hdev->ops->adjust_link) 402 return -EINVAL; 403 404 hdev->owner = owner; 405 hdev->id = (int)atomic_inc_return(&id); 406 hdev->cls_dev.parent = hdev->dev; 407 hdev->cls_dev.class = hnae_class; 408 hdev->cls_dev.release = hnae_release; 409 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); 410 ret = device_register(&hdev->cls_dev); 411 if (ret) 412 return ret; 413 414 __module_get(THIS_MODULE); 415 416 INIT_LIST_HEAD(&hdev->handle_list); 417 spin_lock_init(&hdev->lock); 418 419 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL); 420 if (ret) 421 dev_dbg(hdev->dev, 422 "has not notifier for AE: %s\n", hdev->name); 423 424 return 0; 425 } 426 EXPORT_SYMBOL(hnae_ae_register); 427 428 /** 429 * hnae_ae_unregister - unregisters a HNAE AE engine 430 * @cdev: the device to unregister 431 */ 432 void hnae_ae_unregister(struct hnae_ae_dev *hdev) 433 { 434 device_unregister(&hdev->cls_dev); 435 module_put(THIS_MODULE); 436 } 437 EXPORT_SYMBOL(hnae_ae_unregister); 438 439 static ssize_t handles_show(struct device *dev, 440 struct device_attribute *attr, char *buf) 441 { 442 ssize_t s = 0; 443 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 444 struct hnae_handle *h; 445 int i = 0, j; 446 447 list_for_each_entry_rcu(h, &hdev->handle_list, node) { 448 s += sprintf(buf + s, "handle %d (eport_id=%u from %s):\n", 449 i++, h->eport_id, h->dev->name); 450 for (j = 0; j < h->q_num; j++) { 451 s += sprintf(buf + s, "\tqueue[%d] on 0x%llx\n", 452 j, (u64)h->qs[i]->io_base); 453 #define HANDEL_TX_MSG "\t\ttx_ring on 0x%llx:%u,%u,%u,%u,%u,%llu,%llu\n" 454 s += sprintf(buf + s, 455 HANDEL_TX_MSG, 456 (u64)h->qs[i]->tx_ring.io_base, 457 h->qs[i]->tx_ring.buf_size, 458 h->qs[i]->tx_ring.desc_num, 459 h->qs[i]->tx_ring.max_desc_num_per_pkt, 460 h->qs[i]->tx_ring.max_raw_data_sz_per_desc, 461 h->qs[i]->tx_ring.max_pkt_size, 462 h->qs[i]->tx_ring.stats.sw_err_cnt, 463 h->qs[i]->tx_ring.stats.io_err_cnt); 464 s += sprintf(buf + s, 465 "\t\trx_ring on 0x%llx:%u,%u,%llu,%llu,%llu\n", 466 (u64)h->qs[i]->rx_ring.io_base, 467 h->qs[i]->rx_ring.buf_size, 468 h->qs[i]->rx_ring.desc_num, 469 h->qs[i]->rx_ring.stats.sw_err_cnt, 470 h->qs[i]->rx_ring.stats.io_err_cnt, 471 h->qs[i]->rx_ring.stats.seg_pkt_cnt); 472 } 473 } 474 475 return s; 476 } 477 478 static DEVICE_ATTR_RO(handles); 479 static struct attribute *hnae_class_attrs[] = { 480 &dev_attr_handles.attr, 481 NULL, 482 }; 483 ATTRIBUTE_GROUPS(hnae_class); 484 485 static int __init hnae_init(void) 486 { 487 hnae_class = class_create(THIS_MODULE, "hnae"); 488 if (IS_ERR(hnae_class)) 489 return PTR_ERR(hnae_class); 490 491 hnae_class->dev_groups = hnae_class_groups; 492 return 0; 493 } 494 495 static void __exit hnae_exit(void) 496 { 497 class_destroy(hnae_class); 498 } 499 500 subsys_initcall(hnae_init); 501 module_exit(hnae_exit); 502 503 MODULE_AUTHOR("Hisilicon, Inc."); 504 MODULE_LICENSE("GPL"); 505 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework"); 506 507 /* vi: set tw=78 noet: */ 508