1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/errno.h> 34 #include <linux/slab.h> 35 #include <linux/mm.h> 36 #include <linux/export.h> 37 #include <linux/bitmap.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/vmalloc.h> 40 #include <linux/mlx5/driver.h> 41 42 #include "mlx5_core.h" 43 44 /* Handling for queue buffers -- we allocate a bunch of memory and 45 * register it in a memory region at HCA virtual address 0. 46 */ 47 48 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev, 49 size_t size, dma_addr_t *dma_handle, 50 int node) 51 { 52 struct mlx5_priv *priv = &dev->priv; 53 int original_node; 54 void *cpu_handle; 55 56 mutex_lock(&priv->alloc_mutex); 57 original_node = dev_to_node(&dev->pdev->dev); 58 set_dev_node(&dev->pdev->dev, node); 59 cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, 60 dma_handle, GFP_KERNEL); 61 set_dev_node(&dev->pdev->dev, original_node); 62 mutex_unlock(&priv->alloc_mutex); 63 return cpu_handle; 64 } 65 66 int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, 67 struct mlx5_buf *buf, int node) 68 { 69 dma_addr_t t; 70 71 buf->size = size; 72 buf->npages = 1; 73 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; 74 buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, 75 &t, node); 76 if (!buf->direct.buf) 77 return -ENOMEM; 78 79 buf->direct.map = t; 80 81 while (t & ((1 << buf->page_shift) - 1)) { 82 --buf->page_shift; 83 buf->npages *= 2; 84 } 85 86 return 0; 87 } 88 89 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) 90 { 91 return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); 92 } 93 EXPORT_SYMBOL_GPL(mlx5_buf_alloc); 94 95 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) 96 { 97 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, 98 buf->direct.map); 99 } 100 EXPORT_SYMBOL_GPL(mlx5_buf_free); 101 102 static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, 103 int node) 104 { 105 struct mlx5_db_pgdir *pgdir; 106 107 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); 108 if (!pgdir) 109 return NULL; 110 111 bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE); 112 113 pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, 114 &pgdir->db_dma, node); 115 if (!pgdir->db_page) { 116 kfree(pgdir); 117 return NULL; 118 } 119 120 return pgdir; 121 } 122 123 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir, 124 struct mlx5_db *db) 125 { 126 int offset; 127 int i; 128 129 i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE); 130 if (i >= MLX5_DB_PER_PAGE) 131 return -ENOMEM; 132 133 __clear_bit(i, pgdir->bitmap); 134 135 db->u.pgdir = pgdir; 136 db->index = i; 137 offset = db->index * L1_CACHE_BYTES; 138 db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); 139 db->dma = pgdir->db_dma + offset; 140 141 db->db[0] = 0; 142 db->db[1] = 0; 143 144 return 0; 145 } 146 147 int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node) 148 { 149 struct mlx5_db_pgdir *pgdir; 150 int ret = 0; 151 152 mutex_lock(&dev->priv.pgdir_mutex); 153 154 list_for_each_entry(pgdir, &dev->priv.pgdir_list, list) 155 if (!mlx5_alloc_db_from_pgdir(pgdir, db)) 156 goto out; 157 158 pgdir = mlx5_alloc_db_pgdir(dev, node); 159 if (!pgdir) { 160 ret = -ENOMEM; 161 goto out; 162 } 163 164 list_add(&pgdir->list, &dev->priv.pgdir_list); 165 166 /* This should never fail -- we just allocated an empty page: */ 167 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db)); 168 169 out: 170 mutex_unlock(&dev->priv.pgdir_mutex); 171 172 return ret; 173 } 174 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node); 175 176 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) 177 { 178 return mlx5_db_alloc_node(dev, db, dev->priv.numa_node); 179 } 180 EXPORT_SYMBOL_GPL(mlx5_db_alloc); 181 182 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) 183 { 184 mutex_lock(&dev->priv.pgdir_mutex); 185 186 __set_bit(db->index, db->u.pgdir->bitmap); 187 188 if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) { 189 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 190 db->u.pgdir->db_page, db->u.pgdir->db_dma); 191 list_del(&db->u.pgdir->list); 192 kfree(db->u.pgdir); 193 } 194 195 mutex_unlock(&dev->priv.pgdir_mutex); 196 } 197 EXPORT_SYMBOL_GPL(mlx5_db_free); 198 199 200 void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) 201 { 202 u64 addr; 203 int i; 204 205 for (i = 0; i < buf->npages; i++) { 206 addr = buf->direct.map + (i << buf->page_shift); 207 208 pas[i] = cpu_to_be64(addr); 209 } 210 } 211 EXPORT_SYMBOL_GPL(mlx5_fill_page_array); 212