1 /* 2 * Copyright 2015 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #ifndef _MISC_CXL_H 11 #define _MISC_CXL_H 12 13 #include <linux/pci.h> 14 #include <linux/poll.h> 15 #include <linux/interrupt.h> 16 #include <uapi/misc/cxl.h> 17 18 /* 19 * This documents the in kernel API for driver to use CXL. It allows kernel 20 * drivers to bind to AFUs using an AFU configuration record exposed as a PCI 21 * configuration record. 22 * 23 * This API enables control over AFU and contexts which can't be part of the 24 * generic PCI API. This API is agnostic to the actual AFU. 25 */ 26 27 #define CXL_SLOT_FLAG_DMA 0x1 28 29 /* 30 * Checks if the given card is in a cxl capable slot. Pass CXL_SLOT_FLAG_DMA if 31 * the card requires CAPP DMA mode to also check if the system supports it. 32 * This is intended to be used by bi-modal devices to determine if they can use 33 * cxl mode or if they should continue running in PCI mode. 34 * 35 * Note that this only checks if the slot is cxl capable - it does not 36 * currently check if the CAPP is currently available for chips where it can be 37 * assigned to different PHBs on a first come first serve basis (i.e. P8) 38 */ 39 bool cxl_slot_is_supported(struct pci_dev *dev, int flags); 40 41 42 #define CXL_BIMODE_CXL 1 43 #define CXL_BIMODE_PCI 2 44 45 /* 46 * Check the mode that the given bi-modal CXL adapter is currently in and 47 * change it if necessary. This does not apply to AFU drivers. 48 * 49 * If the mode matches the requested mode this function will return 0 - if the 50 * driver was expecting the generic CXL driver to have bound to the adapter and 51 * it gets this return value it should fail the probe function to give the CXL 52 * driver a chance to probe it. 53 * 54 * If the mode does not match it will start a background task to unplug the 55 * device from Linux and switch its mode, and will return -EBUSY. At this 56 * point the calling driver should make sure it has released the device and 57 * fail its probe function. 58 * 59 * The offset of the CXL VSEC can be provided to this function. If 0 is passed, 60 * this function will search for a CXL VSEC with ID 0x1280 and return -ENODEV 61 * if it is not found. 62 */ 63 #ifdef CONFIG_CXL_BIMODAL 64 int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec); 65 #endif 66 67 /* Get the AFU associated with a pci_dev */ 68 struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev); 69 70 /* Get the AFU conf record number associated with a pci_dev */ 71 unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev); 72 73 74 /* 75 * Context lifetime overview: 76 * 77 * An AFU context may be inited and then started and stoppped multiple times 78 * before it's released. ie. 79 * - cxl_dev_context_init() 80 * - cxl_start_context() 81 * - cxl_stop_context() 82 * - cxl_start_context() 83 * - cxl_stop_context() 84 * ...repeat... 85 * - cxl_release_context() 86 * Once released, a context can't be started again. 87 * 88 * One context is inited by the cxl driver for every pci_dev. This is to be 89 * used as a default kernel context. cxl_get_context() will get this 90 * context. This context will be released by PCI hot unplug, so doesn't need to 91 * be released explicitly by drivers. 92 * 93 * Additional kernel contexts may be inited using cxl_dev_context_init(). 94 * These must be released using cxl_context_detach(). 95 * 96 * Once a context has been inited, IRQs may be configured. Firstly these IRQs 97 * must be allocated (cxl_allocate_afu_irqs()), then individually mapped to 98 * specific handlers (cxl_map_afu_irq()). 99 * 100 * These IRQs can be unmapped (cxl_unmap_afu_irq()) and finally released 101 * (cxl_free_afu_irqs()). 102 * 103 * The AFU can be reset (cxl_afu_reset()). This will cause the PSL/AFU 104 * hardware to lose track of all contexts. It's upto the caller of 105 * cxl_afu_reset() to restart these contexts. 106 */ 107 108 /* 109 * On pci_enabled_device(), the cxl driver will init a single cxl context for 110 * use by the driver. It doesn't start this context (as that will likely 111 * generate DMA traffic for most AFUs). 112 * 113 * This gets the default context associated with this pci_dev. This context 114 * doesn't need to be released as this will be done by the PCI subsystem on hot 115 * unplug. 116 */ 117 struct cxl_context *cxl_get_context(struct pci_dev *dev); 118 /* 119 * Allocate and initalise a context associated with a AFU PCI device. This 120 * doesn't start the context in the AFU. 121 */ 122 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev); 123 /* 124 * Release and free a context. Context should be stopped before calling. 125 */ 126 int cxl_release_context(struct cxl_context *ctx); 127 128 /* 129 * Set and get private data associated with a context. Allows drivers to have a 130 * back pointer to some useful structure. 131 */ 132 int cxl_set_priv(struct cxl_context *ctx, void *priv); 133 void *cxl_get_priv(struct cxl_context *ctx); 134 135 /* 136 * Allocate AFU interrupts for this context. num=0 will allocate the default 137 * for this AFU as given in the AFU descriptor. This number doesn't include the 138 * interrupt 0 (CAIA defines AFU IRQ 0 for page faults). Each interrupt to be 139 * used must map a handler with cxl_map_afu_irq. 140 */ 141 int cxl_allocate_afu_irqs(struct cxl_context *cxl, int num); 142 /* Free allocated interrupts */ 143 void cxl_free_afu_irqs(struct cxl_context *cxl); 144 145 /* 146 * Map a handler for an AFU interrupt associated with a particular context. AFU 147 * IRQS numbers start from 1 (CAIA defines AFU IRQ 0 for page faults). cookie 148 * is private data is that will be provided to the interrupt handler. 149 */ 150 int cxl_map_afu_irq(struct cxl_context *cxl, int num, 151 irq_handler_t handler, void *cookie, char *name); 152 /* unmap mapped IRQ handlers */ 153 void cxl_unmap_afu_irq(struct cxl_context *cxl, int num, void *cookie); 154 155 /* 156 * Start work on the AFU. This starts an cxl context and associates it with a 157 * task. task == NULL will make it a kernel context. 158 */ 159 int cxl_start_context(struct cxl_context *ctx, u64 wed, 160 struct task_struct *task); 161 /* 162 * Stop a context and remove it from the PSL 163 */ 164 int cxl_stop_context(struct cxl_context *ctx); 165 166 /* Reset the AFU */ 167 int cxl_afu_reset(struct cxl_context *ctx); 168 169 /* 170 * Set a context as a master context. 171 * This sets the default problem space area mapped as the full space, rather 172 * than just the per context area (for slaves). 173 */ 174 void cxl_set_master(struct cxl_context *ctx); 175 176 /* 177 * Sets the context to use real mode memory accesses to operate with 178 * translation disabled. Note that this only makes sense for kernel contexts 179 * under bare metal, and will not work with virtualisation. May only be 180 * performed on stopped contexts. 181 */ 182 int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode); 183 184 /* 185 * Map and unmap the AFU Problem Space area. The amount and location mapped 186 * depends on if this context is a master or slave. 187 */ 188 void __iomem *cxl_psa_map(struct cxl_context *ctx); 189 void cxl_psa_unmap(void __iomem *addr); 190 191 /* Get the process element for this context */ 192 int cxl_process_element(struct cxl_context *ctx); 193 194 /* 195 * Limit the number of interrupts that a single context can allocate via 196 * cxl_start_work. If using the api with a real phb, this may be used to 197 * request that additional default contexts be created when allocating 198 * interrupts via pci_enable_msix_range. These will be set to the same running 199 * state as the default context, and if that is running it will reuse the 200 * parameters previously passed to cxl_start_context for the default context. 201 */ 202 int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs); 203 int cxl_get_max_irqs_per_process(struct pci_dev *dev); 204 205 /* 206 * Use to simultaneously iterate over hardware interrupt numbers, contexts and 207 * afu interrupt numbers allocated for the device via pci_enable_msix_range and 208 * is a useful convenience function when working with hardware that has 209 * limitations on the number of interrupts per process. *ctx and *afu_irq 210 * should be NULL and 0 to start the iteration. 211 */ 212 int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq); 213 214 /* 215 * These calls allow drivers to create their own file descriptors and make them 216 * identical to the cxl file descriptor user API. An example use case: 217 * 218 * struct file_operations cxl_my_fops = {}; 219 * ...... 220 * // Init the context 221 * ctx = cxl_dev_context_init(dev); 222 * if (IS_ERR(ctx)) 223 * return PTR_ERR(ctx); 224 * // Create and attach a new file descriptor to my file ops 225 * file = cxl_get_fd(ctx, &cxl_my_fops, &fd); 226 * // Start context 227 * rc = cxl_start_work(ctx, &work.work); 228 * if (rc) { 229 * fput(file); 230 * put_unused_fd(fd); 231 * return -ENODEV; 232 * } 233 * // No error paths after installing the fd 234 * fd_install(fd, file); 235 * return fd; 236 * 237 * This inits a context, and gets a file descriptor and associates some file 238 * ops to that file descriptor. If the file ops are blank, the cxl driver will 239 * fill them in with the default ones that mimic the standard user API. Once 240 * completed, the file descriptor can be installed. Once the file descriptor is 241 * installed, it's visible to the user so no errors must occur past this point. 242 * 243 * If cxl_fd_release() file op call is installed, the context will be stopped 244 * and released when the fd is released. Hence the driver won't need to manage 245 * this itself. 246 */ 247 248 /* 249 * Take a context and associate it with my file ops. Returns the associated 250 * file and file descriptor. Any file ops which are blank are filled in by the 251 * cxl driver with the default ops to mimic the standard API. 252 */ 253 struct file *cxl_get_fd(struct cxl_context *ctx, struct file_operations *fops, 254 int *fd); 255 /* Get the context associated with this file */ 256 struct cxl_context *cxl_fops_get_context(struct file *file); 257 /* 258 * Start a context associated a struct cxl_ioctl_start_work used by the 259 * standard cxl user API. 260 */ 261 int cxl_start_work(struct cxl_context *ctx, 262 struct cxl_ioctl_start_work *work); 263 /* 264 * Export all the existing fops so drivers can use them 265 */ 266 int cxl_fd_open(struct inode *inode, struct file *file); 267 int cxl_fd_release(struct inode *inode, struct file *file); 268 long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 269 int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm); 270 unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll); 271 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count, 272 loff_t *off); 273 274 /* 275 * For EEH, a driver may want to assert a PERST will reload the same image 276 * from flash into the FPGA. 277 * 278 * This is a property of the entire adapter, not a single AFU, so drivers 279 * should set this property with care! 280 */ 281 void cxl_perst_reloads_same_image(struct cxl_afu *afu, 282 bool perst_reloads_same_image); 283 284 /* 285 * Read the VPD for the card where the AFU resides 286 */ 287 ssize_t cxl_read_adapter_vpd(struct pci_dev *dev, void *buf, size_t count); 288 289 /* 290 * AFU driver ops allow an AFU driver to create their own events to pass to 291 * userspace through the file descriptor as a simpler alternative to overriding 292 * the read() and poll() calls that works with the generic cxl events. These 293 * events are given priority over the generic cxl events, so they will be 294 * delivered first if multiple types of events are pending. 295 * 296 * The AFU driver must call cxl_context_events_pending() to notify the cxl 297 * driver that new events are ready to be delivered for a specific context. 298 * cxl_context_events_pending() will adjust the current count of AFU driver 299 * events for this context, and wake up anyone waiting on the context wait 300 * queue. 301 * 302 * The cxl driver will then call fetch_event() to get a structure defining 303 * the size and address of the driver specific event data. The cxl driver 304 * will build a cxl header with type and process_element fields filled in, 305 * and header.size set to sizeof(struct cxl_event_header) + data_size. 306 * The total size of the event is limited to CXL_READ_MIN_SIZE (4K). 307 * 308 * fetch_event() is called with a spin lock held, so it must not sleep. 309 * 310 * The cxl driver will then deliver the event to userspace, and finally 311 * call event_delivered() to return the status of the operation, identified 312 * by cxl context and AFU driver event data pointers. 313 * 0 Success 314 * -EFAULT copy_to_user() has failed 315 * -EINVAL Event data pointer is NULL, or event size is greater than 316 * CXL_READ_MIN_SIZE. 317 */ 318 struct cxl_afu_driver_ops { 319 struct cxl_event_afu_driver_reserved *(*fetch_event) ( 320 struct cxl_context *ctx); 321 void (*event_delivered) (struct cxl_context *ctx, 322 struct cxl_event_afu_driver_reserved *event, 323 int rc); 324 }; 325 326 /* 327 * Associate the above driver ops with a specific context. 328 * Reset the current count of AFU driver events. 329 */ 330 void cxl_set_driver_ops(struct cxl_context *ctx, 331 struct cxl_afu_driver_ops *ops); 332 333 /* Notify cxl driver that new events are ready to be delivered for context */ 334 void cxl_context_events_pending(struct cxl_context *ctx, 335 unsigned int new_events); 336 337 #endif /* _MISC_CXL_H */ 338