18ada2c1cSShachar Raindel /* 28ada2c1cSShachar Raindel * Copyright (c) 2014 Mellanox Technologies. All rights reserved. 38ada2c1cSShachar Raindel * 48ada2c1cSShachar Raindel * This software is available to you under a choice of one of two 58ada2c1cSShachar Raindel * licenses. You may choose to be licensed under the terms of the GNU 68ada2c1cSShachar Raindel * General Public License (GPL) Version 2, available from the file 78ada2c1cSShachar Raindel * COPYING in the main directory of this source tree, or the 88ada2c1cSShachar Raindel * OpenIB.org BSD license below: 98ada2c1cSShachar Raindel * 108ada2c1cSShachar Raindel * Redistribution and use in source and binary forms, with or 118ada2c1cSShachar Raindel * without modification, are permitted provided that the following 128ada2c1cSShachar Raindel * conditions are met: 138ada2c1cSShachar Raindel * 148ada2c1cSShachar Raindel * - Redistributions of source code must retain the above 158ada2c1cSShachar Raindel * copyright notice, this list of conditions and the following 168ada2c1cSShachar Raindel * disclaimer. 178ada2c1cSShachar Raindel * 188ada2c1cSShachar Raindel * - Redistributions in binary form must reproduce the above 198ada2c1cSShachar Raindel * copyright notice, this list of conditions and the following 208ada2c1cSShachar Raindel * disclaimer in the documentation and/or other materials 218ada2c1cSShachar Raindel * provided with the distribution. 228ada2c1cSShachar Raindel * 238ada2c1cSShachar Raindel * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 248ada2c1cSShachar Raindel * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 258ada2c1cSShachar Raindel * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 268ada2c1cSShachar Raindel * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 278ada2c1cSShachar Raindel * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 288ada2c1cSShachar Raindel * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 298ada2c1cSShachar Raindel * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 308ada2c1cSShachar Raindel * SOFTWARE. 318ada2c1cSShachar Raindel */ 328ada2c1cSShachar Raindel 338ada2c1cSShachar Raindel #ifndef IB_UMEM_ODP_H 348ada2c1cSShachar Raindel #define IB_UMEM_ODP_H 358ada2c1cSShachar Raindel 368ada2c1cSShachar Raindel #include <rdma/ib_umem.h> 37882214e2SHaggai Eran #include <rdma/ib_verbs.h> 38882214e2SHaggai Eran #include <linux/interval_tree.h> 39882214e2SHaggai Eran 40882214e2SHaggai Eran struct umem_odp_node { 41882214e2SHaggai Eran u64 __subtree_last; 42882214e2SHaggai Eran struct rb_node rb; 43882214e2SHaggai Eran }; 448ada2c1cSShachar Raindel 458ada2c1cSShachar Raindel struct ib_umem_odp { 4641b4deeaSJason Gunthorpe struct ib_umem umem; 478ada2c1cSShachar Raindel /* 488ada2c1cSShachar Raindel * An array of the pages included in the on-demand paging umem. 498ada2c1cSShachar Raindel * Indices of pages that are currently not mapped into the device will 508ada2c1cSShachar Raindel * contain NULL. 518ada2c1cSShachar Raindel */ 528ada2c1cSShachar Raindel struct page **page_list; 538ada2c1cSShachar Raindel /* 548ada2c1cSShachar Raindel * An array of the same size as page_list, with DMA addresses mapped 558ada2c1cSShachar Raindel * for pages the pages in page_list. The lower two bits designate 568ada2c1cSShachar Raindel * access permissions. See ODP_READ_ALLOWED_BIT and 578ada2c1cSShachar Raindel * ODP_WRITE_ALLOWED_BIT. 588ada2c1cSShachar Raindel */ 598ada2c1cSShachar Raindel dma_addr_t *dma_list; 608ada2c1cSShachar Raindel /* 618ada2c1cSShachar Raindel * The umem_mutex protects the page_list and dma_list fields of an ODP 62882214e2SHaggai Eran * umem, allowing only a single thread to map/unmap pages. The mutex 63882214e2SHaggai Eran * also protects access to the mmu notifier counters. 648ada2c1cSShachar Raindel */ 658ada2c1cSShachar Raindel struct mutex umem_mutex; 668ada2c1cSShachar Raindel void *private; /* for the HW driver to use. */ 67882214e2SHaggai Eran 68882214e2SHaggai Eran /* When false, use the notifier counter in the ucontext struct. */ 69882214e2SHaggai Eran bool mn_counters_active; 70882214e2SHaggai Eran int notifiers_seq; 71882214e2SHaggai Eran int notifiers_count; 72882214e2SHaggai Eran 73882214e2SHaggai Eran /* A linked list of umems that don't have private mmu notifier 74882214e2SHaggai Eran * counters yet. */ 75882214e2SHaggai Eran struct list_head no_private_counters; 76882214e2SHaggai Eran 77882214e2SHaggai Eran /* Tree tracking */ 78882214e2SHaggai Eran struct umem_odp_node interval_tree; 79882214e2SHaggai Eran 80882214e2SHaggai Eran struct completion notifier_completion; 81882214e2SHaggai Eran int dying; 82d07d1d70SArtemy Kovalyov struct work_struct work; 838ada2c1cSShachar Raindel }; 848ada2c1cSShachar Raindel 85b5231b01SJason Gunthorpe static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) 86b5231b01SJason Gunthorpe { 8741b4deeaSJason Gunthorpe return container_of(umem, struct ib_umem_odp, umem); 88b5231b01SJason Gunthorpe } 89b5231b01SJason Gunthorpe 908ada2c1cSShachar Raindel #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 918ada2c1cSShachar Raindel 9241b4deeaSJason Gunthorpe int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access); 93b5231b01SJason Gunthorpe struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context, 94b5231b01SJason Gunthorpe unsigned long addr, size_t size); 95b5231b01SJason Gunthorpe void ib_umem_odp_release(struct ib_umem_odp *umem_odp); 968ada2c1cSShachar Raindel 978ada2c1cSShachar Raindel /* 988ada2c1cSShachar Raindel * The lower 2 bits of the DMA address signal the R/W permissions for 998ada2c1cSShachar Raindel * the entry. To upgrade the permissions, provide the appropriate 1008ada2c1cSShachar Raindel * bitmask to the map_dma_pages function. 1018ada2c1cSShachar Raindel * 1028ada2c1cSShachar Raindel * Be aware that upgrading a mapped address might result in change of 1038ada2c1cSShachar Raindel * the DMA address for the page. 1048ada2c1cSShachar Raindel */ 1058ada2c1cSShachar Raindel #define ODP_READ_ALLOWED_BIT (1<<0ULL) 1068ada2c1cSShachar Raindel #define ODP_WRITE_ALLOWED_BIT (1<<1ULL) 1078ada2c1cSShachar Raindel 1088ada2c1cSShachar Raindel #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) 1098ada2c1cSShachar Raindel 110b5231b01SJason Gunthorpe int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, 111b5231b01SJason Gunthorpe u64 bcnt, u64 access_mask, 112b5231b01SJason Gunthorpe unsigned long current_seq); 1138ada2c1cSShachar Raindel 114b5231b01SJason Gunthorpe void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, 1158ada2c1cSShachar Raindel u64 bound); 1168ada2c1cSShachar Raindel 117b5231b01SJason Gunthorpe typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end, 118882214e2SHaggai Eran void *cookie); 119882214e2SHaggai Eran /* 120882214e2SHaggai Eran * Call the callback on each ib_umem in the range. Returns the logical or of 121882214e2SHaggai Eran * the return values of the functions called. 122882214e2SHaggai Eran */ 123f808c13fSDavidlohr Bueso int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root, 124f808c13fSDavidlohr Bueso u64 start, u64 end, 12593065ac7SMichal Hocko umem_call_back cb, 12693065ac7SMichal Hocko bool blockable, void *cookie); 127882214e2SHaggai Eran 128d07d1d70SArtemy Kovalyov /* 129d07d1d70SArtemy Kovalyov * Find first region intersecting with address range. 130d07d1d70SArtemy Kovalyov * Return NULL if not found 131d07d1d70SArtemy Kovalyov */ 132f808c13fSDavidlohr Bueso struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root, 133d07d1d70SArtemy Kovalyov u64 addr, u64 length); 134882214e2SHaggai Eran 135b5231b01SJason Gunthorpe static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp, 136882214e2SHaggai Eran unsigned long mmu_seq) 137882214e2SHaggai Eran { 138882214e2SHaggai Eran /* 139882214e2SHaggai Eran * This code is strongly based on the KVM code from 140882214e2SHaggai Eran * mmu_notifier_retry. Should be called with 141b5231b01SJason Gunthorpe * the relevant locks taken (umem_odp->umem_mutex 142882214e2SHaggai Eran * and the ucontext umem_mutex semaphore locked for read). 143882214e2SHaggai Eran */ 144882214e2SHaggai Eran 145882214e2SHaggai Eran /* Do not allow page faults while the new ib_umem hasn't seen a state 146882214e2SHaggai Eran * with zero notifiers yet, and doesn't have its own valid set of 147882214e2SHaggai Eran * private counters. */ 148b5231b01SJason Gunthorpe if (!umem_odp->mn_counters_active) 149882214e2SHaggai Eran return 1; 150882214e2SHaggai Eran 151b5231b01SJason Gunthorpe if (unlikely(umem_odp->notifiers_count)) 152882214e2SHaggai Eran return 1; 153b5231b01SJason Gunthorpe if (umem_odp->notifiers_seq != mmu_seq) 154882214e2SHaggai Eran return 1; 155882214e2SHaggai Eran return 0; 156882214e2SHaggai Eran } 157882214e2SHaggai Eran 1588ada2c1cSShachar Raindel #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1598ada2c1cSShachar Raindel 16041b4deeaSJason Gunthorpe static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access) 1618ada2c1cSShachar Raindel { 1628ada2c1cSShachar Raindel return -EINVAL; 1638ada2c1cSShachar Raindel } 1648ada2c1cSShachar Raindel 165b5231b01SJason Gunthorpe static inline struct ib_umem_odp * 166b5231b01SJason Gunthorpe ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size) 167d07d1d70SArtemy Kovalyov { 168d07d1d70SArtemy Kovalyov return ERR_PTR(-EINVAL); 169d07d1d70SArtemy Kovalyov } 170d07d1d70SArtemy Kovalyov 171b5231b01SJason Gunthorpe static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {} 1728ada2c1cSShachar Raindel 1738ada2c1cSShachar Raindel #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 1748ada2c1cSShachar Raindel 1758ada2c1cSShachar Raindel #endif /* IB_UMEM_ODP_H */ 176