1 /* 2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/vmalloc.h> 36 #include <linux/mm.h> 37 #include <linux/errno.h> 38 #include <asm/pgtable.h> 39 40 #include "rxe.h" 41 #include "rxe_loc.h" 42 #include "rxe_queue.h" 43 44 void rxe_mmap_release(struct kref *ref) 45 { 46 struct rxe_mmap_info *ip = container_of(ref, 47 struct rxe_mmap_info, ref); 48 struct rxe_dev *rxe = to_rdev(ip->context->device); 49 50 spin_lock_bh(&rxe->pending_lock); 51 52 if (!list_empty(&ip->pending_mmaps)) 53 list_del(&ip->pending_mmaps); 54 55 spin_unlock_bh(&rxe->pending_lock); 56 57 vfree(ip->obj); /* buf */ 58 kfree(ip); 59 } 60 61 /* 62 * open and close keep track of how many times the memory region is mapped, 63 * to avoid releasing it. 64 */ 65 static void rxe_vma_open(struct vm_area_struct *vma) 66 { 67 struct rxe_mmap_info *ip = vma->vm_private_data; 68 69 kref_get(&ip->ref); 70 } 71 72 static void rxe_vma_close(struct vm_area_struct *vma) 73 { 74 struct rxe_mmap_info *ip = vma->vm_private_data; 75 76 kref_put(&ip->ref, rxe_mmap_release); 77 } 78 79 static const struct vm_operations_struct rxe_vm_ops = { 80 .open = rxe_vma_open, 81 .close = rxe_vma_close, 82 }; 83 84 /** 85 * rxe_mmap - create a new mmap region 86 * @context: the IB user context of the process making the mmap() call 87 * @vma: the VMA to be initialized 88 * Return zero if the mmap is OK. Otherwise, return an errno. 89 */ 90 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 91 { 92 struct rxe_dev *rxe = to_rdev(context->device); 93 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 94 unsigned long size = vma->vm_end - vma->vm_start; 95 struct rxe_mmap_info *ip, *pp; 96 int ret; 97 98 /* 99 * Search the device's list of objects waiting for a mmap call. 100 * Normally, this list is very short since a call to create a 101 * CQ, QP, or SRQ is soon followed by a call to mmap(). 102 */ 103 spin_lock_bh(&rxe->pending_lock); 104 list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { 105 if (context != ip->context || (__u64)offset != ip->info.offset) 106 continue; 107 108 /* Don't allow a mmap larger than the object. */ 109 if (size > ip->info.size) { 110 pr_err("mmap region is larger than the object!\n"); 111 spin_unlock_bh(&rxe->pending_lock); 112 ret = -EINVAL; 113 goto done; 114 } 115 116 goto found_it; 117 } 118 pr_warn("unable to find pending mmap info\n"); 119 spin_unlock_bh(&rxe->pending_lock); 120 ret = -EINVAL; 121 goto done; 122 123 found_it: 124 list_del_init(&ip->pending_mmaps); 125 spin_unlock_bh(&rxe->pending_lock); 126 127 ret = remap_vmalloc_range(vma, ip->obj, 0); 128 if (ret) { 129 pr_err("err %d from remap_vmalloc_range\n", ret); 130 goto done; 131 } 132 133 vma->vm_ops = &rxe_vm_ops; 134 vma->vm_private_data = ip; 135 rxe_vma_open(vma); 136 done: 137 return ret; 138 } 139 140 /* 141 * Allocate information for rxe_mmap 142 */ 143 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, 144 u32 size, 145 struct ib_ucontext *context, 146 void *obj) 147 { 148 struct rxe_mmap_info *ip; 149 150 ip = kmalloc(sizeof(*ip), GFP_KERNEL); 151 if (!ip) 152 return NULL; 153 154 size = PAGE_ALIGN(size); 155 156 spin_lock_bh(&rxe->mmap_offset_lock); 157 158 if (rxe->mmap_offset == 0) 159 rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); 160 161 ip->info.offset = rxe->mmap_offset; 162 rxe->mmap_offset += ALIGN(size, SHMLBA); 163 164 spin_unlock_bh(&rxe->mmap_offset_lock); 165 166 INIT_LIST_HEAD(&ip->pending_mmaps); 167 ip->info.size = size; 168 ip->context = context; 169 ip->obj = obj; 170 kref_init(&ip->ref); 171 172 return ip; 173 } 174