1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 3 #include <linux/irq.h> 4 #include <linux/module.h> 5 #include <linux/ntb.h> 6 #include <linux/msi.h> 7 #include <linux/pci.h> 8 9 struct ntb_msi { 10 u64 base_addr; 11 u64 end_addr; 12 13 void (*desc_changed)(void *ctx); 14 15 u32 __iomem *peer_mws[]; 16 }; 17 18 /** 19 * ntb_msi_init() - Initialize the MSI context 20 * @ntb: NTB device context 21 * 22 * This function must be called before any other ntb_msi function. 23 * It initializes the context for MSI operations and maps 24 * the peer memory windows. 25 * 26 * This function reserves the last N outbound memory windows (where N 27 * is the number of peers). 28 * 29 * Return: Zero on success, otherwise a negative error number. 30 */ 31 int ntb_msi_init(struct ntb_dev *ntb, 32 void (*desc_changed)(void *ctx)) 33 { 34 phys_addr_t mw_phys_addr; 35 resource_size_t mw_size; 36 size_t struct_size; 37 int peer_widx; 38 int peers; 39 int ret; 40 int i; 41 42 peers = ntb_peer_port_count(ntb); 43 if (peers <= 0) 44 return -EINVAL; 45 46 struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers; 47 48 ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL); 49 if (!ntb->msi) 50 return -ENOMEM; 51 52 ntb->msi->desc_changed = desc_changed; 53 54 for (i = 0; i < peers; i++) { 55 peer_widx = ntb_peer_mw_count(ntb) - 1 - i; 56 57 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, 58 &mw_size); 59 if (ret) 60 goto unroll; 61 62 ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr, 63 mw_size); 64 if (!ntb->msi->peer_mws[i]) { 65 ret = -EFAULT; 66 goto unroll; 67 } 68 } 69 70 return 0; 71 72 unroll: 73 for (i = 0; i < peers; i++) 74 if (ntb->msi->peer_mws[i]) 75 devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]); 76 77 devm_kfree(&ntb->dev, ntb->msi); 78 ntb->msi = NULL; 79 return ret; 80 } 81 EXPORT_SYMBOL(ntb_msi_init); 82 83 /** 84 * ntb_msi_setup_mws() - Initialize the MSI inbound memory windows 85 * @ntb: NTB device context 86 * 87 * This function sets up the required inbound memory windows. It should be 88 * called from a work function after a link up event. 89 * 90 * Over the entire network, this function will reserves the last N 91 * inbound memory windows for each peer (where N is the number of peers). 92 * 93 * ntb_msi_init() must be called before this function. 94 * 95 * Return: Zero on success, otherwise a negative error number. 96 */ 97 int ntb_msi_setup_mws(struct ntb_dev *ntb) 98 { 99 struct msi_desc *desc; 100 u64 addr; 101 int peer, peer_widx; 102 resource_size_t addr_align, size_align, size_max; 103 resource_size_t mw_size = SZ_32K; 104 resource_size_t mw_min_size = mw_size; 105 int i; 106 int ret; 107 108 if (!ntb->msi) 109 return -EINVAL; 110 111 msi_lock_descs(&ntb->pdev->dev); 112 desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED); 113 addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); 114 msi_unlock_descs(&ntb->pdev->dev); 115 116 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 117 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 118 if (peer_widx < 0) 119 return peer_widx; 120 121 ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align, 122 NULL, NULL); 123 if (ret) 124 return ret; 125 126 addr &= ~(addr_align - 1); 127 } 128 129 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 130 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 131 if (peer_widx < 0) { 132 ret = peer_widx; 133 goto error_out; 134 } 135 136 ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL, 137 &size_align, &size_max); 138 if (ret) 139 goto error_out; 140 141 mw_size = round_up(mw_size, size_align); 142 mw_size = max(mw_size, size_max); 143 if (mw_size < mw_min_size) 144 mw_min_size = mw_size; 145 146 ret = ntb_mw_set_trans(ntb, peer, peer_widx, 147 addr, mw_size); 148 if (ret) 149 goto error_out; 150 } 151 152 ntb->msi->base_addr = addr; 153 ntb->msi->end_addr = addr + mw_min_size; 154 155 return 0; 156 157 error_out: 158 for (i = 0; i < peer; i++) { 159 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 160 if (peer_widx < 0) 161 continue; 162 163 ntb_mw_clear_trans(ntb, i, peer_widx); 164 } 165 166 return ret; 167 } 168 EXPORT_SYMBOL(ntb_msi_setup_mws); 169 170 /** 171 * ntb_msi_clear_mws() - Clear all inbound memory windows 172 * @ntb: NTB device context 173 * 174 * This function tears down the resources used by ntb_msi_setup_mws(). 175 */ 176 void ntb_msi_clear_mws(struct ntb_dev *ntb) 177 { 178 int peer; 179 int peer_widx; 180 181 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 182 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 183 if (peer_widx < 0) 184 continue; 185 186 ntb_mw_clear_trans(ntb, peer, peer_widx); 187 } 188 } 189 EXPORT_SYMBOL(ntb_msi_clear_mws); 190 191 struct ntb_msi_devres { 192 struct ntb_dev *ntb; 193 struct msi_desc *entry; 194 struct ntb_msi_desc *msi_desc; 195 }; 196 197 static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry, 198 struct ntb_msi_desc *msi_desc) 199 { 200 u64 addr; 201 202 addr = entry->msg.address_lo + 203 ((uint64_t)entry->msg.address_hi << 32); 204 205 if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) { 206 dev_warn_once(&ntb->dev, 207 "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n", 208 entry->irq, addr, ntb->msi->base_addr, 209 ntb->msi->end_addr); 210 return -EFAULT; 211 } 212 213 msi_desc->addr_offset = addr - ntb->msi->base_addr; 214 msi_desc->data = entry->msg.data; 215 216 return 0; 217 } 218 219 static void ntb_msi_write_msg(struct msi_desc *entry, void *data) 220 { 221 struct ntb_msi_devres *dr = data; 222 223 WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc)); 224 225 if (dr->ntb->msi->desc_changed) 226 dr->ntb->msi->desc_changed(dr->ntb->ctx); 227 } 228 229 static void ntbm_msi_callback_release(struct device *dev, void *res) 230 { 231 struct ntb_msi_devres *dr = res; 232 233 dr->entry->write_msi_msg = NULL; 234 dr->entry->write_msi_msg_data = NULL; 235 } 236 237 static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry, 238 struct ntb_msi_desc *msi_desc) 239 { 240 struct ntb_msi_devres *dr; 241 242 dr = devres_alloc(ntbm_msi_callback_release, 243 sizeof(struct ntb_msi_devres), GFP_KERNEL); 244 if (!dr) 245 return -ENOMEM; 246 247 dr->ntb = ntb; 248 dr->entry = entry; 249 dr->msi_desc = msi_desc; 250 251 devres_add(&ntb->dev, dr); 252 253 dr->entry->write_msi_msg = ntb_msi_write_msg; 254 dr->entry->write_msi_msg_data = dr; 255 256 return 0; 257 } 258 259 /** 260 * ntbm_msi_request_threaded_irq() - allocate an MSI interrupt 261 * @ntb: NTB device context 262 * @handler: Function to be called when the IRQ occurs 263 * @thread_fn: Function to be called in a threaded interrupt context. NULL 264 * for clients which handle everything in @handler 265 * @name: An ascii name for the claiming device, dev_name(dev) if NULL 266 * @dev_id: A cookie passed back to the handler function 267 * @msi_desc: MSI descriptor data which triggers the interrupt 268 * 269 * This function assigns an interrupt handler to an unused 270 * MSI interrupt and returns the descriptor used to trigger 271 * it. The descriptor can then be sent to a peer to trigger 272 * the interrupt. 273 * 274 * The interrupt resource is managed with devres so it will 275 * be automatically freed when the NTB device is torn down. 276 * 277 * If an IRQ allocated with this function needs to be freed 278 * separately, ntbm_free_irq() must be used. 279 * 280 * Return: IRQ number assigned on success, otherwise a negative error number. 281 */ 282 int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, 283 irq_handler_t thread_fn, 284 const char *name, void *dev_id, 285 struct ntb_msi_desc *msi_desc) 286 { 287 struct device *dev = &ntb->pdev->dev; 288 struct msi_desc *entry; 289 int ret; 290 291 if (!ntb->msi) 292 return -EINVAL; 293 294 msi_lock_descs(dev); 295 msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) { 296 if (irq_has_action(entry->irq)) 297 continue; 298 299 ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler, 300 thread_fn, 0, name, dev_id); 301 if (ret) 302 continue; 303 304 if (ntb_msi_set_desc(ntb, entry, msi_desc)) { 305 devm_free_irq(&ntb->dev, entry->irq, dev_id); 306 continue; 307 } 308 309 ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); 310 if (ret) { 311 devm_free_irq(&ntb->dev, entry->irq, dev_id); 312 goto unlock; 313 } 314 315 ret = entry->irq; 316 goto unlock; 317 } 318 ret = -ENODEV; 319 320 unlock: 321 msi_unlock_descs(dev); 322 return ret; 323 } 324 EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); 325 326 static int ntbm_msi_callback_match(struct device *dev, void *res, void *data) 327 { 328 struct ntb_dev *ntb = dev_ntb(dev); 329 struct ntb_msi_devres *dr = res; 330 331 return dr->ntb == ntb && dr->entry == data; 332 } 333 334 /** 335 * ntbm_msi_free_irq() - free an interrupt 336 * @ntb: NTB device context 337 * @irq: Interrupt line to free 338 * @dev_id: Device identity to free 339 * 340 * This function should be used to manually free IRQs allocated with 341 * ntbm_request_[threaded_]irq(). 342 */ 343 void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id) 344 { 345 struct msi_desc *entry = irq_get_msi_desc(irq); 346 347 entry->write_msi_msg = NULL; 348 entry->write_msi_msg_data = NULL; 349 350 WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release, 351 ntbm_msi_callback_match, entry)); 352 353 devm_free_irq(&ntb->dev, irq, dev_id); 354 } 355 EXPORT_SYMBOL(ntbm_msi_free_irq); 356 357 /** 358 * ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer 359 * @ntb: NTB device context 360 * @peer: Peer index 361 * @desc: MSI descriptor data which triggers the interrupt 362 * 363 * This function triggers an interrupt on a peer. It requires 364 * the descriptor structure to have been passed from that peer 365 * by some other means. 366 * 367 * Return: Zero on success, otherwise a negative error number. 368 */ 369 int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, 370 struct ntb_msi_desc *desc) 371 { 372 int idx; 373 374 if (!ntb->msi) 375 return -EINVAL; 376 377 idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]); 378 379 iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]); 380 381 return 0; 382 } 383 EXPORT_SYMBOL(ntb_msi_peer_trigger); 384 385 /** 386 * ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt 387 * @ntb: NTB device context 388 * @peer: Peer index 389 * @desc: MSI descriptor data which triggers the interrupt 390 * @msi_addr: Physical address to trigger the interrupt 391 * 392 * This function allows using DMA engines to trigger an interrupt 393 * (for example, trigger an interrupt to process the data after 394 * sending it). To trigger the interrupt, write @desc.data to the address 395 * returned in @msi_addr 396 * 397 * Return: Zero on success, otherwise a negative error number. 398 */ 399 int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, 400 struct ntb_msi_desc *desc, 401 phys_addr_t *msi_addr) 402 { 403 int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer; 404 phys_addr_t mw_phys_addr; 405 int ret; 406 407 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL); 408 if (ret) 409 return ret; 410 411 if (msi_addr) 412 *msi_addr = mw_phys_addr + desc->addr_offset; 413 414 return 0; 415 } 416 EXPORT_SYMBOL(ntb_msi_peer_addr); 417