18ada2c1cSShachar Raindel /* 28ada2c1cSShachar Raindel * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 38ada2c1cSShachar Raindel * 48ada2c1cSShachar Raindel * This software is available to you under a choice of one of two 58ada2c1cSShachar Raindel * licenses. You may choose to be licensed under the terms of the GNU 68ada2c1cSShachar Raindel * General Public License (GPL) Version 2, available from the file 78ada2c1cSShachar Raindel * COPYING in the main directory of this source tree, or the 88ada2c1cSShachar Raindel * OpenIB.org BSD license below: 98ada2c1cSShachar Raindel * 108ada2c1cSShachar Raindel * Redistribution and use in source and binary forms, with or 118ada2c1cSShachar Raindel * without modification, are permitted provided that the following 128ada2c1cSShachar Raindel * conditions are met: 138ada2c1cSShachar Raindel * 148ada2c1cSShachar Raindel * - Redistributions of source code must retain the above 158ada2c1cSShachar Raindel * copyright notice, this list of conditions and the following 168ada2c1cSShachar Raindel * disclaimer. 178ada2c1cSShachar Raindel * 188ada2c1cSShachar Raindel * - Redistributions in binary form must reproduce the above 198ada2c1cSShachar Raindel * copyright notice, this list of conditions and the following 208ada2c1cSShachar Raindel * disclaimer in the documentation and/or other materials 218ada2c1cSShachar Raindel * provided with the distribution. 228ada2c1cSShachar Raindel * 238ada2c1cSShachar Raindel * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 248ada2c1cSShachar Raindel * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 258ada2c1cSShachar Raindel * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 268ada2c1cSShachar Raindel * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 278ada2c1cSShachar Raindel * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 288ada2c1cSShachar Raindel * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 298ada2c1cSShachar Raindel * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 308ada2c1cSShachar Raindel * SOFTWARE. 318ada2c1cSShachar Raindel */ 328ada2c1cSShachar Raindel 338ada2c1cSShachar Raindel #ifndef IB_UMEM_ODP_H 348ada2c1cSShachar Raindel #define IB_UMEM_ODP_H 358ada2c1cSShachar Raindel 368ada2c1cSShachar Raindel #include <rdma/ib_umem.h> 37882214e2SHaggai Eran #include <rdma/ib_verbs.h> 38882214e2SHaggai Eran #include <linux/interval_tree.h> 39882214e2SHaggai Eran 40882214e2SHaggai Eran struct umem_odp_node { 41882214e2SHaggai Eran u64 __subtree_last; 42882214e2SHaggai Eran struct rb_node rb; 43882214e2SHaggai Eran }; 448ada2c1cSShachar Raindel 458ada2c1cSShachar Raindel struct ib_umem_odp { 4641b4deeaSJason Gunthorpe struct ib_umem umem; 47c9990ab3SJason Gunthorpe struct ib_ucontext_per_mm *per_mm; 48c9990ab3SJason Gunthorpe 498ada2c1cSShachar Raindel /* 508ada2c1cSShachar Raindel * An array of the pages included in the on-demand paging umem. 518ada2c1cSShachar Raindel * Indices of pages that are currently not mapped into the device will 528ada2c1cSShachar Raindel * contain NULL. 538ada2c1cSShachar Raindel */ 548ada2c1cSShachar Raindel struct page **page_list; 558ada2c1cSShachar Raindel /* 568ada2c1cSShachar Raindel * An array of the same size as page_list, with DMA addresses mapped 578ada2c1cSShachar Raindel * for pages the pages in page_list. The lower two bits designate 588ada2c1cSShachar Raindel * access permissions. See ODP_READ_ALLOWED_BIT and 598ada2c1cSShachar Raindel * ODP_WRITE_ALLOWED_BIT. 608ada2c1cSShachar Raindel */ 618ada2c1cSShachar Raindel dma_addr_t *dma_list; 628ada2c1cSShachar Raindel /* 638ada2c1cSShachar Raindel * The umem_mutex protects the page_list and dma_list fields of an ODP 64882214e2SHaggai Eran * umem, allowing only a single thread to map/unmap pages. The mutex 65882214e2SHaggai Eran * also protects access to the mmu notifier counters. 668ada2c1cSShachar Raindel */ 678ada2c1cSShachar Raindel struct mutex umem_mutex; 688ada2c1cSShachar Raindel void *private; /* for the HW driver to use. */ 69882214e2SHaggai Eran 70882214e2SHaggai Eran int notifiers_seq; 71882214e2SHaggai Eran int notifiers_count; 72882214e2SHaggai Eran 73882214e2SHaggai Eran /* Tree tracking */ 74882214e2SHaggai Eran struct umem_odp_node interval_tree; 75882214e2SHaggai Eran 76882214e2SHaggai Eran struct completion notifier_completion; 77882214e2SHaggai Eran int dying; 78d07d1d70SArtemy Kovalyov struct work_struct work; 798ada2c1cSShachar Raindel }; 808ada2c1cSShachar Raindel 81b5231b01SJason Gunthorpe static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) 82b5231b01SJason Gunthorpe { 8341b4deeaSJason Gunthorpe return container_of(umem, struct ib_umem_odp, umem); 84b5231b01SJason Gunthorpe } 85b5231b01SJason Gunthorpe 86*13859d5dSLeon Romanovsky /* 87*13859d5dSLeon Romanovsky * The lower 2 bits of the DMA address signal the R/W permissions for 88*13859d5dSLeon Romanovsky * the entry. To upgrade the permissions, provide the appropriate 89*13859d5dSLeon Romanovsky * bitmask to the map_dma_pages function. 90*13859d5dSLeon Romanovsky * 91*13859d5dSLeon Romanovsky * Be aware that upgrading a mapped address might result in change of 92*13859d5dSLeon Romanovsky * the DMA address for the page. 93*13859d5dSLeon Romanovsky */ 94*13859d5dSLeon Romanovsky #define ODP_READ_ALLOWED_BIT (1<<0ULL) 95*13859d5dSLeon Romanovsky #define ODP_WRITE_ALLOWED_BIT (1<<1ULL) 96*13859d5dSLeon Romanovsky 97*13859d5dSLeon Romanovsky #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) 98*13859d5dSLeon Romanovsky 998ada2c1cSShachar Raindel #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1008ada2c1cSShachar Raindel 101f27a0d50SJason Gunthorpe struct ib_ucontext_per_mm { 102f27a0d50SJason Gunthorpe struct ib_ucontext *context; 103f27a0d50SJason Gunthorpe struct mm_struct *mm; 104f27a0d50SJason Gunthorpe struct pid *tgid; 105be7a57b4SJason Gunthorpe bool active; 106f27a0d50SJason Gunthorpe 107f27a0d50SJason Gunthorpe struct rb_root_cached umem_tree; 108f27a0d50SJason Gunthorpe /* Protects umem_tree */ 109f27a0d50SJason Gunthorpe struct rw_semaphore umem_rwsem; 110f27a0d50SJason Gunthorpe 111f27a0d50SJason Gunthorpe struct mmu_notifier mn; 112f27a0d50SJason Gunthorpe unsigned int odp_mrs_count; 113f27a0d50SJason Gunthorpe 114f27a0d50SJason Gunthorpe struct list_head ucontext_list; 11556ac9dd9SJason Gunthorpe struct rcu_head rcu; 116f27a0d50SJason Gunthorpe }; 117f27a0d50SJason Gunthorpe 11841b4deeaSJason Gunthorpe int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access); 119f27a0d50SJason Gunthorpe struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, 120b5231b01SJason Gunthorpe unsigned long addr, size_t size); 121b5231b01SJason Gunthorpe void ib_umem_odp_release(struct ib_umem_odp *umem_odp); 1228ada2c1cSShachar Raindel 123b5231b01SJason Gunthorpe int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, 124b5231b01SJason Gunthorpe u64 bcnt, u64 access_mask, 125b5231b01SJason Gunthorpe unsigned long current_seq); 1268ada2c1cSShachar Raindel 127b5231b01SJason Gunthorpe void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, 1288ada2c1cSShachar Raindel u64 bound); 1298ada2c1cSShachar Raindel 130b5231b01SJason Gunthorpe typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end, 131882214e2SHaggai Eran void *cookie); 132882214e2SHaggai Eran /* 133882214e2SHaggai Eran * Call the callback on each ib_umem in the range. Returns the logical or of 134882214e2SHaggai Eran * the return values of the functions called. 135882214e2SHaggai Eran */ 136f808c13fSDavidlohr Bueso int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, 137f808c13fSDavidlohr Bueso u64 start, u64 end, 13893065ac7SMichal Hocko umem_call_back cb, 13993065ac7SMichal Hocko bool blockable, void *cookie); 140882214e2SHaggai Eran 141d07d1d70SArtemy Kovalyov /* 142d07d1d70SArtemy Kovalyov * Find first region intersecting with address range. 143d07d1d70SArtemy Kovalyov * Return NULL if not found 144d07d1d70SArtemy Kovalyov */ 145f808c13fSDavidlohr Bueso struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, 146d07d1d70SArtemy Kovalyov u64 addr, u64 length); 147882214e2SHaggai Eran 148b5231b01SJason Gunthorpe static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, 149882214e2SHaggai Eran unsigned long mmu_seq) 150882214e2SHaggai Eran { 151882214e2SHaggai Eran /* 152882214e2SHaggai Eran * This code is strongly based on the KVM code from 153882214e2SHaggai Eran * mmu_notifier_retry. Should be called with 154b5231b01SJason Gunthorpe * the relevant locks taken (umem_odp->umem_mutex 155882214e2SHaggai Eran * and the ucontext umem_mutex semaphore locked for read). 156882214e2SHaggai Eran */ 157882214e2SHaggai Eran 158b5231b01SJason Gunthorpe if (unlikely(umem_odp->notifiers_count)) 159882214e2SHaggai Eran return 1; 160b5231b01SJason Gunthorpe if (umem_odp->notifiers_seq != mmu_seq) 161882214e2SHaggai Eran return 1; 162882214e2SHaggai Eran return 0; 163882214e2SHaggai Eran } 164882214e2SHaggai Eran 1658ada2c1cSShachar Raindel #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1668ada2c1cSShachar Raindel 16741b4deeaSJason Gunthorpe static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) 1688ada2c1cSShachar Raindel { 1698ada2c1cSShachar Raindel return -EINVAL; 1708ada2c1cSShachar Raindel } 1718ada2c1cSShachar Raindel 172b5231b01SJason Gunthorpe static inline struct ib_umem_odp * 173b5231b01SJason Gunthorpe ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size) 174d07d1d70SArtemy Kovalyov { 175d07d1d70SArtemy Kovalyov return ERR_PTR(-EINVAL); 176d07d1d70SArtemy Kovalyov } 177d07d1d70SArtemy Kovalyov 178b5231b01SJason Gunthorpe static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} 1798ada2c1cSShachar Raindel 1808ada2c1cSShachar Raindel #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1818ada2c1cSShachar Raindel 1828ada2c1cSShachar Raindel #endif /* IB_UMEM_ODP_H */ 183