1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 3 #include <linux/irq.h> 4 #include <linux/module.h> 5 #include <linux/ntb.h> 6 #include <linux/msi.h> 7 #include <linux/pci.h> 8 9 struct ntb_msi { 10 u64 base_addr; 11 u64 end_addr; 12 13 void (*desc_changed)(void *ctx); 14 15 u32 __iomem *peer_mws[]; 16 }; 17 18 /** 19 * ntb_msi_init() - Initialize the MSI context 20 * @ntb: NTB device context 21 * 22 * This function must be called before any other ntb_msi function. 23 * It initializes the context for MSI operations and maps 24 * the peer memory windows. 25 * 26 * This function reserves the last N outbound memory windows (where N 27 * is the number of peers). 28 * 29 * Return: Zero on success, otherwise a negative error number. 30 */ 31 int ntb_msi_init(struct ntb_dev *ntb, 32 void (*desc_changed)(void *ctx)) 33 { 34 phys_addr_t mw_phys_addr; 35 resource_size_t mw_size; 36 size_t struct_size; 37 int peer_widx; 38 int peers; 39 int ret; 40 int i; 41 42 peers = ntb_peer_port_count(ntb); 43 if (peers <= 0) 44 return -EINVAL; 45 46 struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers; 47 48 ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); 49 if (!ntb->msi) 50 return -ENOMEM; 51 52 ntb->msi->desc_changed = desc_changed; 53 54 for (i = 0; i < peers; i++) { 55 peer_widx = ntb_peer_mw_count(ntb) - 1 - i; 56 57 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, 58 &mw_size); 59 if (ret) 60 goto unroll; 61 62 ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr, 63 mw_size); 64 if (!ntb->msi->peer_mws[i]) { 65 ret = -EFAULT; 66 goto unroll; 67 } 68 } 69 70 return 0; 71 72 unroll: 73 for (i = 0; i < peers; i++) 74 if (ntb->msi->peer_mws[i]) 75 devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]); 76 77 devm_kfree(&ntb->dev, ntb->msi); 78 ntb->msi = NULL; 79 return ret; 80 } 81 EXPORT_SYMBOL(ntb_msi_init); 82 83 /** 84 * ntb_msi_setup_mws() - Initialize the MSI inbound memory windows 85 * @ntb: NTB device context 86 * 87 * This function sets up the required inbound memory windows. It should be 88 * called from a work function after a link up event. 89 * 90 * Over the entire network, this function will reserves the last N 91 * inbound memory windows for each peer (where N is the number of peers). 92 * 93 * ntb_msi_init() must be called before this function. 94 * 95 * Return: Zero on success, otherwise a negative error number. 96 */ 97 int ntb_msi_setup_mws(struct ntb_dev *ntb) 98 { 99 struct msi_desc *desc; 100 u64 addr; 101 int peer, peer_widx; 102 resource_size_t addr_align, size_align, size_max; 103 resource_size_t mw_size = SZ_32K; 104 resource_size_t mw_min_size = mw_size; 105 int i; 106 int ret; 107 108 if (!ntb->msi) 109 return -EINVAL; 110 111 desc = first_msi_entry(&ntb->pdev->dev); 112 addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); 113 114 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 115 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 116 if (peer_widx < 0) 117 return peer_widx; 118 119 ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align, 120 NULL, NULL); 121 if (ret) 122 return ret; 123 124 addr &= ~(addr_align - 1); 125 } 126 127 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 128 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 129 if (peer_widx < 0) { 130 ret = peer_widx; 131 goto error_out; 132 } 133 134 ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL, 135 &size_align, &size_max); 136 if (ret) 137 goto error_out; 138 139 mw_size = round_up(mw_size, size_align); 140 mw_size = max(mw_size, size_max); 141 if (mw_size < mw_min_size) 142 mw_min_size = mw_size; 143 144 ret = ntb_mw_set_trans(ntb, peer, peer_widx, 145 addr, mw_size); 146 if (ret) 147 goto error_out; 148 } 149 150 ntb->msi->base_addr = addr; 151 ntb->msi->end_addr = addr + mw_min_size; 152 153 return 0; 154 155 error_out: 156 for (i = 0; i < peer; i++) { 157 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 158 if (peer_widx < 0) 159 continue; 160 161 ntb_mw_clear_trans(ntb, i, peer_widx); 162 } 163 164 return ret; 165 } 166 EXPORT_SYMBOL(ntb_msi_setup_mws); 167 168 /** 169 * ntb_msi_clear_mws() - Clear all inbound memory windows 170 * @ntb: NTB device context 171 * 172 * This function tears down the resources used by ntb_msi_setup_mws(). 173 */ 174 void ntb_msi_clear_mws(struct ntb_dev *ntb) 175 { 176 int peer; 177 int peer_widx; 178 179 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 180 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 181 if (peer_widx < 0) 182 continue; 183 184 ntb_mw_clear_trans(ntb, peer, peer_widx); 185 } 186 } 187 EXPORT_SYMBOL(ntb_msi_clear_mws); 188 189 struct ntb_msi_devres { 190 struct ntb_dev *ntb; 191 struct msi_desc *entry; 192 struct ntb_msi_desc *msi_desc; 193 }; 194 195 static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry, 196 struct ntb_msi_desc *msi_desc) 197 { 198 u64 addr; 199 200 addr = entry->msg.address_lo + 201 ((uint64_t)entry->msg.address_hi << 32); 202 203 if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) { 204 dev_warn_once(&ntb->dev, 205 "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n", 206 entry->irq, addr, ntb->msi->base_addr, 207 ntb->msi->end_addr); 208 return -EFAULT; 209 } 210 211 msi_desc->addr_offset = addr - ntb->msi->base_addr; 212 msi_desc->data = entry->msg.data; 213 214 return 0; 215 } 216 217 static void ntb_msi_write_msg(struct msi_desc *entry, void *data) 218 { 219 struct ntb_msi_devres *dr = data; 220 221 WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc)); 222 223 if (dr->ntb->msi->desc_changed) 224 dr->ntb->msi->desc_changed(dr->ntb->ctx); 225 } 226 227 static void ntbm_msi_callback_release(struct device *dev, void *res) 228 { 229 struct ntb_msi_devres *dr = res; 230 231 dr->entry->write_msi_msg = NULL; 232 dr->entry->write_msi_msg_data = NULL; 233 } 234 235 static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry, 236 struct ntb_msi_desc *msi_desc) 237 { 238 struct ntb_msi_devres *dr; 239 240 dr = devres_alloc(ntbm_msi_callback_release, 241 sizeof(struct ntb_msi_devres), GFP_KERNEL); 242 if (!dr) 243 return -ENOMEM; 244 245 dr->ntb = ntb; 246 dr->entry = entry; 247 dr->msi_desc = msi_desc; 248 249 devres_add(&ntb->dev, dr); 250 251 dr->entry->write_msi_msg = ntb_msi_write_msg; 252 dr->entry->write_msi_msg_data = dr; 253 254 return 0; 255 } 256 257 /** 258 * ntbm_msi_request_threaded_irq() - allocate an MSI interrupt 259 * @ntb: NTB device context 260 * @handler: Function to be called when the IRQ occurs 261 * @thread_fn: Function to be called in a threaded interrupt context. NULL 262 * for clients which handle everything in @handler 263 * @devname: An ascii name for the claiming device, dev_name(dev) if NULL 264 * @dev_id: A cookie passed back to the handler function 265 * 266 * This function assigns an interrupt handler to an unused 267 * MSI interrupt and returns the descriptor used to trigger 268 * it. The descriptor can then be sent to a peer to trigger 269 * the interrupt. 270 * 271 * The interrupt resource is managed with devres so it will 272 * be automatically freed when the NTB device is torn down. 273 * 274 * If an IRQ allocated with this function needs to be freed 275 * separately, ntbm_free_irq() must be used. 276 * 277 * Return: IRQ number assigned on success, otherwise a negative error number. 278 */ 279 int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, 280 irq_handler_t thread_fn, 281 const char *name, void *dev_id, 282 struct ntb_msi_desc *msi_desc) 283 { 284 struct msi_desc *entry; 285 int ret; 286 287 if (!ntb->msi) 288 return -EINVAL; 289 290 for_each_pci_msi_entry(entry, ntb->pdev) { 291 if (irq_has_action(entry->irq)) 292 continue; 293 294 ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler, 295 thread_fn, 0, name, dev_id); 296 if (ret) 297 continue; 298 299 if (ntb_msi_set_desc(ntb, entry, msi_desc)) { 300 devm_free_irq(&ntb->dev, entry->irq, dev_id); 301 continue; 302 } 303 304 ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); 305 if (ret) { 306 devm_free_irq(&ntb->dev, entry->irq, dev_id); 307 return ret; 308 } 309 310 311 return entry->irq; 312 } 313 314 return -ENODEV; 315 } 316 EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); 317 318 static int ntbm_msi_callback_match(struct device *dev, void *res, void *data) 319 { 320 struct ntb_dev *ntb = dev_ntb(dev); 321 struct ntb_msi_devres *dr = res; 322 323 return dr->ntb == ntb && dr->entry == data; 324 } 325 326 /** 327 * ntbm_msi_free_irq() - free an interrupt 328 * @ntb: NTB device context 329 * @irq: Interrupt line to free 330 * @dev_id: Device identity to free 331 * 332 * This function should be used to manually free IRQs allocated with 333 * ntbm_request_[threaded_]irq(). 334 */ 335 void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id) 336 { 337 struct msi_desc *entry = irq_get_msi_desc(irq); 338 339 entry->write_msi_msg = NULL; 340 entry->write_msi_msg_data = NULL; 341 342 WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release, 343 ntbm_msi_callback_match, entry)); 344 345 devm_free_irq(&ntb->dev, irq, dev_id); 346 } 347 EXPORT_SYMBOL(ntbm_msi_free_irq); 348 349 /** 350 * ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer 351 * @ntb: NTB device context 352 * @peer: Peer index 353 * @desc: MSI descriptor data which triggers the interrupt 354 * 355 * This function triggers an interrupt on a peer. It requires 356 * the descriptor structure to have been passed from that peer 357 * by some other means. 358 * 359 * Return: Zero on success, otherwise a negative error number. 360 */ 361 int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, 362 struct ntb_msi_desc *desc) 363 { 364 int idx; 365 366 if (!ntb->msi) 367 return -EINVAL; 368 369 idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]); 370 371 iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]); 372 373 return 0; 374 } 375 EXPORT_SYMBOL(ntb_msi_peer_trigger); 376 377 /** 378 * ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt 379 * @ntb: NTB device context 380 * @peer: Peer index 381 * @desc: MSI descriptor data which triggers the interrupt 382 * @msi_addr: Physical address to trigger the interrupt 383 * 384 * This function allows using DMA engines to trigger an interrupt 385 * (for example, trigger an interrupt to process the data after 386 * sending it). To trigger the interrupt, write @desc.data to the address 387 * returned in @msi_addr 388 * 389 * Return: Zero on success, otherwise a negative error number. 390 */ 391 int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, 392 struct ntb_msi_desc *desc, 393 phys_addr_t *msi_addr) 394 { 395 int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer; 396 phys_addr_t mw_phys_addr; 397 int ret; 398 399 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL); 400 if (ret) 401 return ret; 402 403 if (msi_addr) 404 *msi_addr = mw_phys_addr + desc->addr_offset; 405 406 return 0; 407 } 408 EXPORT_SYMBOL(ntb_msi_peer_addr); 409