1 /* 2 * Copyright (c) 2014-2015 Hisilicon Limited. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 */ 9 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/skbuff.h> 13 #include <linux/slab.h> 14 15 #include "hnae.h" 16 17 #define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev) 18 19 static struct class *hnae_class; 20 21 static void 22 hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head) 23 { 24 unsigned long flags; 25 26 spin_lock_irqsave(lock, flags); 27 list_add_tail_rcu(node, head); 28 spin_unlock_irqrestore(lock, flags); 29 } 30 31 static void hnae_list_del(spinlock_t *lock, struct list_head *node) 32 { 33 unsigned long flags; 34 35 spin_lock_irqsave(lock, flags); 36 list_del_rcu(node); 37 spin_unlock_irqrestore(lock, flags); 38 } 39 40 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 41 { 42 unsigned int order = hnae_page_order(ring); 43 struct page *p = dev_alloc_pages(order); 44 45 if (!p) 46 return -ENOMEM; 47 48 cb->priv = p; 49 cb->page_offset = 0; 50 cb->reuse_flag = 0; 51 cb->buf = page_address(p); 52 cb->length = hnae_page_size(ring); 53 cb->type = DESC_TYPE_PAGE; 54 55 return 0; 56 } 57 58 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 59 { 60 if (cb->type == DESC_TYPE_SKB) 61 dev_kfree_skb_any((struct sk_buff *)cb->priv); 62 else if (unlikely(is_rx_ring(ring))) 63 put_page((struct page *)cb->priv); 64 memset(cb, 0, sizeof(*cb)); 65 } 66 67 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 68 { 69 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, 70 cb->length, ring_to_dma_dir(ring)); 71 72 if (dma_mapping_error(ring_to_dev(ring), cb->dma)) 73 return -EIO; 74 75 return 0; 76 } 77 78 static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) 79 { 80 if (cb->type == DESC_TYPE_SKB) 81 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 82 ring_to_dma_dir(ring)); 83 else 84 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 85 ring_to_dma_dir(ring)); 86 } 87 88 static struct hnae_buf_ops hnae_bops = { 89 .alloc_buffer = hnae_alloc_buffer, 90 .free_buffer = hnae_free_buffer, 91 .map_buffer = hnae_map_buffer, 92 .unmap_buffer = hnae_unmap_buffer, 93 }; 94 95 static int __ae_match(struct device *dev, const void *data) 96 { 97 struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); 98 99 if (dev_of_node(hdev->dev)) 100 return (data == &hdev->dev->of_node->fwnode); 101 else if (is_acpi_node(hdev->dev->fwnode)) 102 return (data == hdev->dev->fwnode); 103 104 dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n"); 105 return 0; 106 } 107 108 static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode) 109 { 110 struct device *dev; 111 112 WARN_ON(!fwnode); 113 114 dev = class_find_device(hnae_class, NULL, fwnode, __ae_match); 115 116 return dev ? cls_to_ae_dev(dev) : NULL; 117 } 118 119 static void hnae_free_buffers(struct hnae_ring *ring) 120 { 121 int i; 122 123 for (i = 0; i < ring->desc_num; i++) 124 hnae_free_buffer_detach(ring, i); 125 } 126 127 /* Allocate memory for raw pkg, and map with dma */ 128 static int hnae_alloc_buffers(struct hnae_ring *ring) 129 { 130 int i, j, ret; 131 132 for (i = 0; i < ring->desc_num; i++) { 133 ret = hnae_alloc_buffer_attach(ring, i); 134 if (ret) 135 goto out_buffer_fail; 136 } 137 138 return 0; 139 140 out_buffer_fail: 141 for (j = i - 1; j >= 0; j--) 142 hnae_free_buffer_detach(ring, j); 143 return ret; 144 } 145 146 /* free desc along with its attached buffer */ 147 static void hnae_free_desc(struct hnae_ring *ring) 148 { 149 hnae_free_buffers(ring); 150 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 151 ring->desc_num * sizeof(ring->desc[0]), 152 ring_to_dma_dir(ring)); 153 ring->desc_dma_addr = 0; 154 kfree(ring->desc); 155 ring->desc = NULL; 156 } 157 158 /* alloc desc, without buffer attached */ 159 static int hnae_alloc_desc(struct hnae_ring *ring) 160 { 161 int size = ring->desc_num * sizeof(ring->desc[0]); 162 163 ring->desc = kzalloc(size, GFP_KERNEL); 164 if (!ring->desc) 165 return -ENOMEM; 166 167 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), 168 ring->desc, size, ring_to_dma_dir(ring)); 169 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { 170 ring->desc_dma_addr = 0; 171 kfree(ring->desc); 172 ring->desc = NULL; 173 return -ENOMEM; 174 } 175 176 return 0; 177 } 178 179 /* fini ring, also free the buffer for the ring */ 180 static void hnae_fini_ring(struct hnae_ring *ring) 181 { 182 hnae_free_desc(ring); 183 kfree(ring->desc_cb); 184 ring->desc_cb = NULL; 185 ring->next_to_clean = 0; 186 ring->next_to_use = 0; 187 } 188 189 /* init ring, and with buffer for rx ring */ 190 static int 191 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) 192 { 193 int ret; 194 195 if (ring->desc_num <= 0 || ring->buf_size <= 0) 196 return -EINVAL; 197 198 ring->q = q; 199 ring->flags = flags; 200 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); 201 202 /* not matter for tx or rx ring, the ntc and ntc start from 0 */ 203 assert(ring->next_to_use == 0); 204 assert(ring->next_to_clean == 0); 205 206 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), 207 GFP_KERNEL); 208 if (!ring->desc_cb) { 209 ret = -ENOMEM; 210 goto out; 211 } 212 213 ret = hnae_alloc_desc(ring); 214 if (ret) 215 goto out_with_desc_cb; 216 217 if (is_rx_ring(ring)) { 218 ret = hnae_alloc_buffers(ring); 219 if (ret) 220 goto out_with_desc; 221 } 222 223 return 0; 224 225 out_with_desc: 226 hnae_free_desc(ring); 227 out_with_desc_cb: 228 kfree(ring->desc_cb); 229 ring->desc_cb = NULL; 230 out: 231 return ret; 232 } 233 234 static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, 235 struct hnae_ae_dev *dev) 236 { 237 int ret; 238 239 q->dev = dev; 240 q->handle = h; 241 242 ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); 243 if (ret) 244 goto out; 245 246 ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); 247 if (ret) 248 goto out_with_tx_ring; 249 250 if (dev->ops->init_queue) 251 dev->ops->init_queue(q); 252 253 return 0; 254 255 out_with_tx_ring: 256 hnae_fini_ring(&q->tx_ring); 257 out: 258 return ret; 259 } 260 261 static void hnae_fini_queue(struct hnae_queue *q) 262 { 263 if (q->dev->ops->fini_queue) 264 q->dev->ops->fini_queue(q); 265 266 hnae_fini_ring(&q->tx_ring); 267 hnae_fini_ring(&q->rx_ring); 268 } 269 270 /** 271 * ae_chain - define ae chain head 272 */ 273 static RAW_NOTIFIER_HEAD(ae_chain); 274 275 int hnae_register_notifier(struct notifier_block *nb) 276 { 277 return raw_notifier_chain_register(&ae_chain, nb); 278 } 279 EXPORT_SYMBOL(hnae_register_notifier); 280 281 void hnae_unregister_notifier(struct notifier_block *nb) 282 { 283 if (raw_notifier_chain_unregister(&ae_chain, nb)) 284 dev_err(NULL, "notifier chain unregister fail\n"); 285 } 286 EXPORT_SYMBOL(hnae_unregister_notifier); 287 288 int hnae_reinit_handle(struct hnae_handle *handle) 289 { 290 int i, j; 291 int ret; 292 293 for (i = 0; i < handle->q_num; i++) /* free ring*/ 294 hnae_fini_queue(handle->qs[i]); 295 296 if (handle->dev->ops->reset) 297 handle->dev->ops->reset(handle); 298 299 for (i = 0; i < handle->q_num; i++) {/* reinit ring*/ 300 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); 301 if (ret) 302 goto out_when_init_queue; 303 } 304 return 0; 305 out_when_init_queue: 306 for (j = i - 1; j >= 0; j--) 307 hnae_fini_queue(handle->qs[j]); 308 return ret; 309 } 310 EXPORT_SYMBOL(hnae_reinit_handle); 311 312 /* hnae_get_handle - get a handle from the AE 313 * @owner_dev: the dev use this handle 314 * @ae_id: the id of the ae to be used 315 * @ae_opts: the options set for the handle 316 * @bops: the callbacks for buffer management 317 * 318 * return handle ptr or ERR_PTR 319 */ 320 struct hnae_handle *hnae_get_handle(struct device *owner_dev, 321 const struct fwnode_handle *fwnode, 322 u32 port_id, 323 struct hnae_buf_ops *bops) 324 { 325 struct hnae_ae_dev *dev; 326 struct hnae_handle *handle; 327 int i, j; 328 int ret; 329 330 dev = find_ae(fwnode); 331 if (!dev) 332 return ERR_PTR(-ENODEV); 333 334 handle = dev->ops->get_handle(dev, port_id); 335 if (IS_ERR(handle)) 336 return handle; 337 338 handle->dev = dev; 339 handle->owner_dev = owner_dev; 340 handle->bops = bops ? bops : &hnae_bops; 341 handle->eport_id = port_id; 342 343 for (i = 0; i < handle->q_num; i++) { 344 ret = hnae_init_queue(handle, handle->qs[i], dev); 345 if (ret) 346 goto out_when_init_queue; 347 } 348 349 __module_get(dev->owner); 350 351 hnae_list_add(&dev->lock, &handle->node, &dev->handle_list); 352 353 return handle; 354 355 out_when_init_queue: 356 for (j = i - 1; j >= 0; j--) 357 hnae_fini_queue(handle->qs[j]); 358 359 return ERR_PTR(-ENOMEM); 360 } 361 EXPORT_SYMBOL(hnae_get_handle); 362 363 void hnae_put_handle(struct hnae_handle *h) 364 { 365 struct hnae_ae_dev *dev = h->dev; 366 int i; 367 368 for (i = 0; i < h->q_num; i++) 369 hnae_fini_queue(h->qs[i]); 370 371 if (h->dev->ops->reset) 372 h->dev->ops->reset(h); 373 374 hnae_list_del(&dev->lock, &h->node); 375 376 if (dev->ops->put_handle) 377 dev->ops->put_handle(h); 378 379 module_put(dev->owner); 380 } 381 EXPORT_SYMBOL(hnae_put_handle); 382 383 static void hnae_release(struct device *dev) 384 { 385 } 386 387 /** 388 * hnae_ae_register - register a AE engine to hnae framework 389 * @hdev: the hnae ae engine device 390 * @owner: the module who provides this dev 391 * NOTE: the duplicated name will not be checked 392 */ 393 int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) 394 { 395 static atomic_t id = ATOMIC_INIT(-1); 396 int ret; 397 398 if (!hdev->dev) 399 return -ENODEV; 400 401 if (!hdev->ops || !hdev->ops->get_handle || 402 !hdev->ops->toggle_ring_irq || 403 !hdev->ops->get_status || !hdev->ops->adjust_link) 404 return -EINVAL; 405 406 hdev->owner = owner; 407 hdev->id = (int)atomic_inc_return(&id); 408 hdev->cls_dev.parent = hdev->dev; 409 hdev->cls_dev.class = hnae_class; 410 hdev->cls_dev.release = hnae_release; 411 (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); 412 ret = device_register(&hdev->cls_dev); 413 if (ret) 414 return ret; 415 416 __module_get(THIS_MODULE); 417 418 INIT_LIST_HEAD(&hdev->handle_list); 419 spin_lock_init(&hdev->lock); 420 421 ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL); 422 if (ret) 423 dev_dbg(hdev->dev, 424 "has not notifier for AE: %s\n", hdev->name); 425 426 return 0; 427 } 428 EXPORT_SYMBOL(hnae_ae_register); 429 430 /** 431 * hnae_ae_unregister - unregisters a HNAE AE engine 432 * @cdev: the device to unregister 433 */ 434 void hnae_ae_unregister(struct hnae_ae_dev *hdev) 435 { 436 device_unregister(&hdev->cls_dev); 437 module_put(THIS_MODULE); 438 } 439 EXPORT_SYMBOL(hnae_ae_unregister); 440 441 static int __init hnae_init(void) 442 { 443 hnae_class = class_create(THIS_MODULE, "hnae"); 444 return PTR_ERR_OR_ZERO(hnae_class); 445 } 446 447 static void __exit hnae_exit(void) 448 { 449 class_destroy(hnae_class); 450 } 451 452 subsys_initcall(hnae_init); 453 module_exit(hnae_exit); 454 455 MODULE_AUTHOR("Hisilicon, Inc."); 456 MODULE_LICENSE("GPL"); 457 MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework"); 458 459 /* vi: set tw=78 noet: */ 460