1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/of.h> 13 #include <linux/skbuff.h> 14 #include <linux/slab.h> 15 #include "hnae.h" 16 17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev) 18 19 static struct class *hnae_class; 20 21 static void 22 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head) 23 { 24 unsigned long flags; 25 26 spin_lock_irqsave(lock, flags); 27 list_add_tail_rcu(node, head); 28 spin_unlock_irqrestore(lock, flags); 29 } 30 31 static void hnae_list_del(spinlock_t *lock, struct list_head *node) 32 { 33 unsigned long flags; 34 35 spin_lock_irqsave(lock, flags); 36 list_del_rcu(node); 37 spin_unlock_irqrestore(lock, flags); 38 } 39 40 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 41 { 42 unsigned int order = hnae_page_order(ring); 43 struct page *p = dev_alloc_pages(order); 44 45 if (!p) 46 return -ENOMEM; 47 48 cb->priv = p; 49 cb->page_offset = 0; 50 cb->reuse_flag = 0; 51 cb->buf = page_address(p); 52 cb->length = hnae_page_size(ring); 53 cb->type = DESC_TYPE_PAGE; 54 55 return 0; 56 } 57 58 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 59 { 60 if (unlikely(!cb->priv)) 61 return; 62 63 if (cb->type == DESC_TYPE_SKB) 64 dev_kfree_skb_any((struct sk_buff *)cb->priv); 65 else if (unlikely(is_rx_ring(ring))) 66 put_page((struct page *)cb->priv); 67 68 cb->priv = NULL; 69 } 70 71 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 72 { 73 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 74 cb->length, ring_to_dma_dir(ring)); 75 76 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 77 return -EIO; 78 79 return 0; 80 } 81 82 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 83 { 84 if (cb->type == DESC_TYPE_SKB) 85 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 86 ring_to_dma_dir(ring)); 87 else if (cb->length) 88 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 89 ring_to_dma_dir(ring)); 90 } 91 92 static struct hnae_buf_ops hnae_bops = { 93 .alloc_buffer = hnae_alloc_buffer, 94 .free_buffer = hnae_free_buffer, 95 .map_buffer = hnae_map_buffer, 96 .unmap_buffer = hnae_unmap_buffer, 97 }; 98 99 static int __ae_match(struct device *dev, const void *data) 100 { 101 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 102 103 if (dev_of_node(hdev->dev)) 104 return (data == &hdev->dev->of_node->fwnode); 105 else if (is_acpi_node(hdev->dev->fwnode)) 106 return (data == hdev->dev->fwnode); 107 108 dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n"); 109 return 0; 110 } 111 112 static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode) 113 { 114 struct device *dev; 115 116 WARN_ON(!fwnode); 117 118 dev = class_find_device(hnae_class, NULL, fwnode, __ae_match); 119 120 return dev ? cls_to_ae_dev(dev) : NULL; 121 } 122 123 static void hnae_free_buffers(struct hnae_ring *ring) 124 { 125 int i; 126 127 for (i = 0; i < ring->desc_num; i++) 128 hnae_free_buffer_detach(ring, i); 129 } 130 131 /* Allocate memory for raw pkg, and map with dma */ 132 static int hnae_alloc_buffers(struct hnae_ring *ring) 133 { 134 int i, j, ret; 135 136 for (i = 0; i < ring->desc_num; i++) { 137 ret = hnae_alloc_buffer_attach(ring, i); 138 if (ret) 139 goto out_buffer_fail; 140 } 141 142 return 0; 143 144 out_buffer_fail: 145 for (j = i - 1; j >= 0; j--) 146 hnae_free_buffer_detach(ring, j); 147 return ret; 148 } 149 150 /* free desc along with its attached buffer */ 151 static void hnae_free_desc(struct hnae_ring *ring) 152 { 153 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 154 ring->desc_num * sizeof(ring->desc[0]), 155 ring_to_dma_dir(ring)); 156 ring->desc_dma_addr = 0; 157 kfree(ring->desc); 158 ring->desc = NULL; 159 } 160 161 /* alloc desc, without buffer attached */ 162 static int hnae_alloc_desc(struct hnae_ring *ring) 163 { 164 int size = ring->desc_num * sizeof(ring->desc[0]); 165 166 ring->desc = kzalloc(size, GFP_KERNEL); 167 if (!ring->desc) 168 return -ENOMEM; 169 170 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), 171 ring->desc, size, ring_to_dma_dir(ring)); 172 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { 173 ring->desc_dma_addr = 0; 174 kfree(ring->desc); 175 ring->desc = NULL; 176 return -ENOMEM; 177 } 178 179 return 0; 180 } 181 182 /* fini ring, also free the buffer for the ring */ 183 static void hnae_fini_ring(struct hnae_ring *ring) 184 { 185 if (is_rx_ring(ring)) 186 hnae_free_buffers(ring); 187 188 hnae_free_desc(ring); 189 kfree(ring->desc_cb); 190 ring->desc_cb = NULL; 191 ring->next_to_clean = 0; 192 ring->next_to_use = 0; 193 } 194 195 /* init ring, and with buffer for rx ring */ 196 static int 197 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) 198 { 199 int ret; 200 201 if (ring->desc_num <= 0 || ring->buf_size <= 0) 202 return -EINVAL; 203 204 ring->q = q; 205 ring->flags = flags; 206 spin_lock_init(&ring->lock); 207 ring->coal_param = q->handle->coal_param; 208 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); 209 210 /* not matter for tx or rx ring, the ntc and ntc start from 0 */ 211 assert(ring->next_to_use == 0); 212 assert(ring->next_to_clean == 0); 213 214 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 215 GFP_KERNEL); 216 if (!ring->desc_cb) { 217 ret = -ENOMEM; 218 goto out; 219 } 220 221 ret = hnae_alloc_desc(ring); 222 if (ret) 223 goto out_with_desc_cb; 224 225 if (is_rx_ring(ring)) { 226 ret = hnae_alloc_buffers(ring); 227 if (ret) 228 goto out_with_desc; 229 } 230 231 return 0; 232 233 out_with_desc: 234 hnae_free_desc(ring); 235 out_with_desc_cb: 236 kfree(ring->desc_cb); 237 ring->desc_cb = NULL; 238 out: 239 return ret; 240 } 241 242 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, 243 struct hnae_ae_dev *dev) 244 { 245 int ret; 246 247 q->dev = dev; 248 q->handle = h; 249 250 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); 251 if (ret) 252 goto out; 253 254 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); 255 if (ret) 256 goto out_with_tx_ring; 257 258 if (dev->ops->init_queue) 259 dev->ops->init_queue(q); 260 261 return 0; 262 263 out_with_tx_ring: 264 hnae_fini_ring(&q->tx_ring); 265 out: 266 return ret; 267 } 268 269 static void hnae_fini_queue(struct hnae_queue *q) 270 { 271 if (q->dev->ops->fini_queue) 272 q->dev->ops->fini_queue(q); 273 274 hnae_fini_ring(&q->tx_ring); 275 hnae_fini_ring(&q->rx_ring); 276 } 277 278 /** 279 * ae_chain - define ae chain head 280 */ 281 static RAW_NOTIFIER_HEAD(ae_chain); 282 283 int hnae_register_notifier(struct notifier_block *nb) 284 { 285 return raw_notifier_chain_register(&ae_chain, nb); 286 } 287 EXPORT_SYMBOL(hnae_register_notifier); 288 289 void hnae_unregister_notifier(struct notifier_block *nb) 290 { 291 if (raw_notifier_chain_unregister(&ae_chain, nb)) 292 dev_err(NULL, "notifier chain unregister fail\n"); 293 } 294 EXPORT_SYMBOL(hnae_unregister_notifier); 295 296 int hnae_reinit_handle(struct hnae_handle *handle) 297 { 298 int i, j; 299 int ret; 300 301 for (i = 0; i < handle->q_num; i++) /* free ring*/ 302 hnae_fini_queue(handle->qs[i]); 303 304 if (handle->dev->ops->reset) 305 handle->dev->ops->reset(handle); 306 307 for (i = 0; i < handle->q_num; i++) {/* reinit ring*/ 308 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); 309 if (ret) 310 goto out_when_init_queue; 311 } 312 return 0; 313 out_when_init_queue: 314 for (j = i - 1; j >= 0; j--) 315 hnae_fini_queue(handle->qs[j]); 316 return ret; 317 } 318 EXPORT_SYMBOL(hnae_reinit_handle); 319 320 /* hnae_get_handle - get a handle from the AE 321 * @owner_dev: the dev use this handle 322 * @ae_id: the id of the ae to be used 323 * @ae_opts: the options set for the handle 324 * @bops: the callbacks for buffer management 325 * 326 * return handle ptr or ERR_PTR 327 */ 328 struct hnae_handle *hnae_get_handle(struct device *owner_dev, 329 const struct fwnode_handle *fwnode, 330 u32 port_id, 331 struct hnae_buf_ops *bops) 332 { 333 struct hnae_ae_dev *dev; 334 struct hnae_handle *handle; 335 int i, j; 336 int ret; 337 338 dev = find_ae(fwnode); 339 if (!dev) 340 return ERR_PTR(-ENODEV); 341 342 handle = dev->ops->get_handle(dev, port_id); 343 if (IS_ERR(handle)) { 344 put_device(&dev->cls_dev); 345 return handle; 346 } 347 348 handle->dev = dev; 349 handle->owner_dev = owner_dev; 350 handle->bops = bops ? bops : &hnae_bops; 351 handle->eport_id = port_id; 352 353 for (i = 0; i < handle->q_num; i++) { 354 ret = hnae_init_queue(handle, handle->qs[i], dev); 355 if (ret) 356 goto out_when_init_queue; 357 } 358 359 __module_get(dev->owner); 360 361 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list); 362 363 return handle; 364 365 out_when_init_queue: 366 for (j = i - 1; j >= 0; j--) 367 hnae_fini_queue(handle->qs[j]); 368 369 put_device(&dev->cls_dev); 370 371 return ERR_PTR(-ENOMEM); 372 } 373 EXPORT_SYMBOL(hnae_get_handle); 374 375 void hnae_put_handle(struct hnae_handle *h) 376 { 377 struct hnae_ae_dev *dev = h->dev; 378 int i; 379 380 for (i = 0; i < h->q_num; i++) 381 hnae_fini_queue(h->qs[i]); 382 383 if (h->dev->ops->reset) 384 h->dev->ops->reset(h); 385 386 hnae_list_del(&dev->lock, &h->node); 387 388 if (dev->ops->put_handle) 389 dev->ops->put_handle(h); 390 391 module_put(dev->owner); 392 393 put_device(&dev->cls_dev); 394 } 395 EXPORT_SYMBOL(hnae_put_handle); 396 397 static void hnae_release(struct device *dev) 398 { 399 } 400 401 /** 402 * hnae_ae_register - register a AE engine to hnae framework 403 * @hdev: the hnae ae engine device 404 * @owner: the module who provides this dev 405 * NOTE: the duplicated name will not be checked 406 */ 407 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) 408 { 409 static atomic_t id = ATOMIC_INIT(-1); 410 int ret; 411 412 if (!hdev->dev) 413 return -ENODEV; 414 415 if (!hdev->ops || !hdev->ops->get_handle || 416 !hdev->ops->toggle_ring_irq || 417 !hdev->ops->get_status || !hdev->ops->adjust_link) 418 return -EINVAL; 419 420 hdev->owner = owner; 421 hdev->id = (int)atomic_inc_return(&id); 422 hdev->cls_dev.parent = hdev->dev; 423 hdev->cls_dev.class = hnae_class; 424 hdev->cls_dev.release = hnae_release; 425 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); 426 ret = device_register(&hdev->cls_dev); 427 if (ret) 428 return ret; 429 430 __module_get(THIS_MODULE); 431 432 INIT_LIST_HEAD(&hdev->handle_list); 433 spin_lock_init(&hdev->lock); 434 435 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL); 436 if (ret) 437 dev_dbg(hdev->dev, 438 "has not notifier for AE: %s\n", hdev->name); 439 440 return 0; 441 } 442 EXPORT_SYMBOL(hnae_ae_register); 443 444 /** 445 * hnae_ae_unregister - unregisters a HNAE AE engine 446 * @cdev: the device to unregister 447 */ 448 void hnae_ae_unregister(struct hnae_ae_dev *hdev) 449 { 450 device_unregister(&hdev->cls_dev); 451 module_put(THIS_MODULE); 452 } 453 EXPORT_SYMBOL(hnae_ae_unregister); 454 455 static int __init hnae_init(void) 456 { 457 hnae_class = class_create(THIS_MODULE, "hnae"); 458 return PTR_ERR_OR_ZERO(hnae_class); 459 } 460 461 static void __exit hnae_exit(void) 462 { 463 class_destroy(hnae_class); 464 } 465 466 subsys_initcall(hnae_init); 467 module_exit(hnae_exit); 468 469 MODULE_AUTHOR("Hisilicon, Inc."); 470 MODULE_LICENSE("GPL"); 471 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework"); 472 473 /* vi: set tw=78 noet: */ 474