1 /* 2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of EITHER the GNU General Public License 6 * version 2 as published by the Free Software Foundation or the BSD 7 * 2-Clause License. This program is distributed in the hope that it 8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED 9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. 10 * See the GNU General Public License version 2 for more details at 11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program available in the file COPYING in the main 15 * directory of this source tree. 16 * 17 * The BSD 2-Clause License 18 * 19 * Redistribution and use in source and binary forms, with or 20 * without modification, are permitted provided that the following 21 * conditions are met: 22 * 23 * - Redistributions of source code must retain the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer. 26 * 27 * - Redistributions in binary form must reproduce the above 28 * copyright notice, this list of conditions and the following 29 * disclaimer in the documentation and/or other materials 30 * provided with the distribution. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 */ 45 46 #ifndef __PVRDMA_H__ 47 #define __PVRDMA_H__ 48 49 #include <linux/compiler.h> 50 #include <linux/interrupt.h> 51 #include <linux/list.h> 52 #include <linux/mutex.h> 53 #include <linux/pci.h> 54 #include <linux/semaphore.h> 55 #include <linux/workqueue.h> 56 #include <rdma/ib_umem.h> 57 #include <rdma/ib_verbs.h> 58 #include <rdma/vmw_pvrdma-abi.h> 59 60 #include "pvrdma_ring.h" 61 #include "pvrdma_dev_api.h" 62 #include "pvrdma_verbs.h" 63 64 /* NOT the same as BIT_MASK(). */ 65 #define PVRDMA_MASK(n) ((n << 1) - 1) 66 67 /* 68 * VMware PVRDMA PCI device id. 69 */ 70 #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 71 72 #define PVRDMA_NUM_RING_PAGES 4 73 #define PVRDMA_QP_NUM_HEADER_PAGES 1 74 75 struct pvrdma_dev; 76 77 struct pvrdma_page_dir { 78 dma_addr_t dir_dma; 79 u64 *dir; 80 int ntables; 81 u64 **tables; 82 u64 npages; 83 void **pages; 84 }; 85 86 struct pvrdma_cq { 87 struct ib_cq ibcq; 88 int offset; 89 spinlock_t cq_lock; /* Poll lock. */ 90 struct pvrdma_uar_map *uar; 91 struct ib_umem *umem; 92 struct pvrdma_ring_state *ring_state; 93 struct pvrdma_page_dir pdir; 94 u32 cq_handle; 95 bool is_kernel; 96 atomic_t refcnt; 97 wait_queue_head_t wait; 98 }; 99 100 struct pvrdma_id_table { 101 u32 last; 102 u32 top; 103 u32 max; 104 u32 mask; 105 spinlock_t lock; /* Table lock. */ 106 unsigned long *table; 107 }; 108 109 struct pvrdma_uar_map { 110 unsigned long pfn; 111 void __iomem *map; 112 int index; 113 }; 114 115 struct pvrdma_uar_table { 116 struct pvrdma_id_table tbl; 117 int size; 118 }; 119 120 struct pvrdma_ucontext { 121 struct ib_ucontext ibucontext; 122 struct pvrdma_dev *dev; 123 struct pvrdma_uar_map uar; 124 u64 ctx_handle; 125 }; 126 127 struct pvrdma_pd { 128 struct ib_pd ibpd; 129 u32 pdn; 130 u32 pd_handle; 131 int privileged; 132 }; 133 134 struct pvrdma_mr { 135 u32 mr_handle; 136 u64 iova; 137 u64 size; 138 }; 139 140 struct pvrdma_user_mr { 141 struct ib_mr ibmr; 142 struct ib_umem *umem; 143 struct pvrdma_mr mmr; 144 struct pvrdma_page_dir pdir; 145 u64 *pages; 146 u32 npages; 147 u32 max_pages; 148 u32 page_shift; 149 }; 150 151 struct pvrdma_wq { 152 struct pvrdma_ring *ring; 153 spinlock_t lock; /* Work queue lock. */ 154 int wqe_cnt; 155 int wqe_size; 156 int max_sg; 157 int offset; 158 }; 159 160 struct pvrdma_ah { 161 struct ib_ah ibah; 162 struct pvrdma_av av; 163 }; 164 165 struct pvrdma_qp { 166 struct ib_qp ibqp; 167 u32 qp_handle; 168 u32 qkey; 169 struct pvrdma_wq sq; 170 struct pvrdma_wq rq; 171 struct ib_umem *rumem; 172 struct ib_umem *sumem; 173 struct pvrdma_page_dir pdir; 174 int npages; 175 int npages_send; 176 int npages_recv; 177 u32 flags; 178 u8 port; 179 u8 state; 180 bool is_kernel; 181 struct mutex mutex; /* QP state mutex. */ 182 atomic_t refcnt; 183 wait_queue_head_t wait; 184 }; 185 186 struct pvrdma_dev { 187 /* PCI device-related information. */ 188 struct ib_device ib_dev; 189 struct pci_dev *pdev; 190 void __iomem *regs; 191 struct pvrdma_device_shared_region *dsr; /* Shared region pointer */ 192 dma_addr_t dsrbase; /* Shared region base address */ 193 void *cmd_slot; 194 void *resp_slot; 195 unsigned long flags; 196 struct list_head device_link; 197 unsigned int dsr_version; 198 199 /* Locking and interrupt information. */ 200 spinlock_t cmd_lock; /* Command lock. */ 201 struct semaphore cmd_sema; 202 struct completion cmd_done; 203 unsigned int nr_vectors; 204 205 /* RDMA-related device information. */ 206 union ib_gid *sgid_tbl; 207 struct pvrdma_ring_state *async_ring_state; 208 struct pvrdma_page_dir async_pdir; 209 struct pvrdma_ring_state *cq_ring_state; 210 struct pvrdma_page_dir cq_pdir; 211 struct pvrdma_cq **cq_tbl; 212 spinlock_t cq_tbl_lock; 213 struct pvrdma_qp **qp_tbl; 214 spinlock_t qp_tbl_lock; 215 struct pvrdma_uar_table uar_table; 216 struct pvrdma_uar_map driver_uar; 217 __be64 sys_image_guid; 218 spinlock_t desc_lock; /* Device modification lock. */ 219 u32 port_cap_mask; 220 struct mutex port_mutex; /* Port modification mutex. */ 221 bool ib_active; 222 atomic_t num_qps; 223 atomic_t num_cqs; 224 atomic_t num_pds; 225 atomic_t num_ahs; 226 227 /* Network device information. */ 228 struct net_device *netdev; 229 struct notifier_block nb_netdev; 230 }; 231 232 struct pvrdma_netdevice_work { 233 struct work_struct work; 234 struct net_device *event_netdev; 235 unsigned long event; 236 }; 237 238 static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev) 239 { 240 return container_of(ibdev, struct pvrdma_dev, ib_dev); 241 } 242 243 static inline struct 244 pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext) 245 { 246 return container_of(ibucontext, struct pvrdma_ucontext, ibucontext); 247 } 248 249 static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd) 250 { 251 return container_of(ibpd, struct pvrdma_pd, ibpd); 252 } 253 254 static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq) 255 { 256 return container_of(ibcq, struct pvrdma_cq, ibcq); 257 } 258 259 static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr) 260 { 261 return container_of(ibmr, struct pvrdma_user_mr, ibmr); 262 } 263 264 static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp) 265 { 266 return container_of(ibqp, struct pvrdma_qp, ibqp); 267 } 268 269 static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah) 270 { 271 return container_of(ibah, struct pvrdma_ah, ibah); 272 } 273 274 static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val) 275 { 276 writel(cpu_to_le32(val), dev->regs + reg); 277 } 278 279 static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg) 280 { 281 return le32_to_cpu(readl(dev->regs + reg)); 282 } 283 284 static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val) 285 { 286 writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET); 287 } 288 289 static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val) 290 { 291 writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET); 292 } 293 294 static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir, 295 u64 offset) 296 { 297 return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE); 298 } 299 300 static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu) 301 { 302 return (enum pvrdma_mtu)mtu; 303 } 304 305 static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu) 306 { 307 return (enum ib_mtu)mtu; 308 } 309 310 static inline enum pvrdma_port_state ib_port_state_to_pvrdma( 311 enum ib_port_state state) 312 { 313 return (enum pvrdma_port_state)state; 314 } 315 316 static inline enum ib_port_state pvrdma_port_state_to_ib( 317 enum pvrdma_port_state state) 318 { 319 return (enum ib_port_state)state; 320 } 321 322 static inline int ib_port_cap_flags_to_pvrdma(int flags) 323 { 324 return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX); 325 } 326 327 static inline int pvrdma_port_cap_flags_to_ib(int flags) 328 { 329 return flags; 330 } 331 332 static inline enum pvrdma_port_width ib_port_width_to_pvrdma( 333 enum ib_port_width width) 334 { 335 return (enum pvrdma_port_width)width; 336 } 337 338 static inline enum ib_port_width pvrdma_port_width_to_ib( 339 enum pvrdma_port_width width) 340 { 341 return (enum ib_port_width)width; 342 } 343 344 static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma( 345 enum ib_port_speed speed) 346 { 347 return (enum pvrdma_port_speed)speed; 348 } 349 350 static inline enum ib_port_speed pvrdma_port_speed_to_ib( 351 enum pvrdma_port_speed speed) 352 { 353 return (enum ib_port_speed)speed; 354 } 355 356 static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask) 357 { 358 return attr_mask; 359 } 360 361 static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask) 362 { 363 return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX); 364 } 365 366 static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma( 367 enum ib_mig_state state) 368 { 369 return (enum pvrdma_mig_state)state; 370 } 371 372 static inline enum ib_mig_state pvrdma_mig_state_to_ib( 373 enum pvrdma_mig_state state) 374 { 375 return (enum ib_mig_state)state; 376 } 377 378 static inline int ib_access_flags_to_pvrdma(int flags) 379 { 380 return flags; 381 } 382 383 static inline int pvrdma_access_flags_to_ib(int flags) 384 { 385 return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX); 386 } 387 388 static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type) 389 { 390 return (enum pvrdma_qp_type)type; 391 } 392 393 static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type) 394 { 395 return (enum ib_qp_type)type; 396 } 397 398 static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state) 399 { 400 return (enum pvrdma_qp_state)state; 401 } 402 403 static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state) 404 { 405 return (enum ib_qp_state)state; 406 } 407 408 static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) 409 { 410 return (enum pvrdma_wr_opcode)op; 411 } 412 413 static inline enum ib_wc_status pvrdma_wc_status_to_ib( 414 enum pvrdma_wc_status status) 415 { 416 return (enum ib_wc_status)status; 417 } 418 419 static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode) 420 { 421 switch (opcode) { 422 case PVRDMA_WC_SEND: 423 return IB_WC_SEND; 424 case PVRDMA_WC_RDMA_WRITE: 425 return IB_WC_RDMA_WRITE; 426 case PVRDMA_WC_RDMA_READ: 427 return IB_WC_RDMA_READ; 428 case PVRDMA_WC_COMP_SWAP: 429 return IB_WC_COMP_SWAP; 430 case PVRDMA_WC_FETCH_ADD: 431 return IB_WC_FETCH_ADD; 432 case PVRDMA_WC_LOCAL_INV: 433 return IB_WC_LOCAL_INV; 434 case PVRDMA_WC_FAST_REG_MR: 435 return IB_WC_REG_MR; 436 case PVRDMA_WC_MASKED_COMP_SWAP: 437 return IB_WC_MASKED_COMP_SWAP; 438 case PVRDMA_WC_MASKED_FETCH_ADD: 439 return IB_WC_MASKED_FETCH_ADD; 440 case PVRDMA_WC_RECV: 441 return IB_WC_RECV; 442 case PVRDMA_WC_RECV_RDMA_WITH_IMM: 443 return IB_WC_RECV_RDMA_WITH_IMM; 444 default: 445 return IB_WC_SEND; 446 } 447 } 448 449 static inline int pvrdma_wc_flags_to_ib(int flags) 450 { 451 return flags; 452 } 453 454 static inline int ib_send_flags_to_pvrdma(int flags) 455 { 456 return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX); 457 } 458 459 void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, 460 const struct pvrdma_qp_cap *src); 461 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, 462 const struct ib_qp_cap *src); 463 void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src); 464 void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src); 465 void pvrdma_global_route_to_ib(struct ib_global_route *dst, 466 const struct pvrdma_global_route *src); 467 void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst, 468 const struct ib_global_route *src); 469 void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst, 470 const struct pvrdma_ah_attr *src); 471 void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst, 472 const struct rdma_ah_attr *src); 473 u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type); 474 475 int pvrdma_uar_table_init(struct pvrdma_dev *dev); 476 void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev); 477 478 int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar); 479 void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar); 480 481 void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq); 482 483 int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir, 484 u64 npages, bool alloc_pages); 485 void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev, 486 struct pvrdma_page_dir *pdir); 487 int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx, 488 dma_addr_t daddr); 489 int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir, 490 struct ib_umem *umem, u64 offset); 491 dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx); 492 int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir, 493 u64 *page_list, int num_pages); 494 495 int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req, 496 union pvrdma_cmd_resp *rsp, unsigned resp_code); 497 498 #endif /* __PVRDMA_H__ */ 499