1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/slab.h> 34 35 #include "mlx4_ib.h" 36 37 struct mlx4_ib_db_pgdir { 38 struct list_head list; 39 DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); 40 DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); 41 unsigned long *bits[2]; 42 __be32 *db_page; 43 dma_addr_t db_dma; 44 }; 45 46 static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) 47 { 48 struct mlx4_ib_db_pgdir *pgdir; 49 50 pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); 51 if (!pgdir) 52 return NULL; 53 54 bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); 55 pgdir->bits[0] = pgdir->order0; 56 pgdir->bits[1] = pgdir->order1; 57 pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, 58 PAGE_SIZE, &pgdir->db_dma, 59 GFP_KERNEL); 60 if (!pgdir->db_page) { 61 kfree(pgdir); 62 return NULL; 63 } 64 65 return pgdir; 66 } 67 68 static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, 69 struct mlx4_ib_db *db, int order) 70 { 71 int o; 72 int i; 73 74 for (o = order; o <= 1; ++o) { 75 i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); 76 if (i < MLX4_IB_DB_PER_PAGE >> o) 77 goto found; 78 } 79 80 return -ENOMEM; 81 82 found: 83 clear_bit(i, pgdir->bits[o]); 84 85 i <<= o; 86 87 if (o > order) 88 set_bit(i ^ 1, pgdir->bits[order]); 89 90 db->u.pgdir = pgdir; 91 db->index = i; 92 db->db = pgdir->db_page + db->index; 93 db->dma = pgdir->db_dma + db->index * 4; 94 db->order = order; 95 96 return 0; 97 } 98 99 int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) 100 { 101 struct mlx4_ib_db_pgdir *pgdir; 102 int ret = 0; 103 104 mutex_lock(&dev->pgdir_mutex); 105 106 list_for_each_entry(pgdir, &dev->pgdir_list, list) 107 if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) 108 goto out; 109 110 pgdir = mlx4_ib_alloc_db_pgdir(dev); 111 if (!pgdir) { 112 ret = -ENOMEM; 113 goto out; 114 } 115 116 list_add(&pgdir->list, &dev->pgdir_list); 117 118 /* This should never fail -- we just allocated an empty page: */ 119 WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); 120 121 out: 122 mutex_unlock(&dev->pgdir_mutex); 123 124 return ret; 125 } 126 127 void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) 128 { 129 int o; 130 int i; 131 132 mutex_lock(&dev->pgdir_mutex); 133 134 o = db->order; 135 i = db->index; 136 137 if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { 138 clear_bit(i ^ 1, db->u.pgdir->order0); 139 ++o; 140 } 141 142 i >>= o; 143 set_bit(i, db->u.pgdir->bits[o]); 144 145 if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { 146 dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, 147 db->u.pgdir->db_page, db->u.pgdir->db_dma); 148 list_del(&db->u.pgdir->list); 149 kfree(db->u.pgdir); 150 } 151 152 mutex_unlock(&dev->pgdir_mutex); 153 } 154 155 struct mlx4_ib_user_db_page { 156 struct list_head list; 157 struct ib_umem *umem; 158 unsigned long user_virt; 159 int refcnt; 160 }; 161 162 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 163 struct mlx4_ib_db *db) 164 { 165 struct mlx4_ib_user_db_page *page; 166 struct ib_umem_chunk *chunk; 167 int err = 0; 168 169 mutex_lock(&context->db_page_mutex); 170 171 list_for_each_entry(page, &context->db_page_list, list) 172 if (page->user_virt == (virt & PAGE_MASK)) 173 goto found; 174 175 page = kmalloc(sizeof *page, GFP_KERNEL); 176 if (!page) { 177 err = -ENOMEM; 178 goto out; 179 } 180 181 page->user_virt = (virt & PAGE_MASK); 182 page->refcnt = 0; 183 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 184 PAGE_SIZE, 0); 185 if (IS_ERR(page->umem)) { 186 err = PTR_ERR(page->umem); 187 kfree(page); 188 goto out; 189 } 190 191 list_add(&page->list, &context->db_page_list); 192 193 found: 194 chunk = list_entry(page->umem->chunk_list.next, struct ib_umem_chunk, list); 195 db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK); 196 db->u.user_page = page; 197 ++page->refcnt; 198 199 out: 200 mutex_unlock(&context->db_page_mutex); 201 202 return err; 203 } 204 205 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) 206 { 207 mutex_lock(&context->db_page_mutex); 208 209 if (!--db->u.user_page->refcnt) { 210 list_del(&db->u.user_page->list); 211 ib_umem_release(db->u.user_page->umem); 212 kfree(db->u.user_page); 213 } 214 215 mutex_unlock(&context->db_page_mutex); 216 } 217