1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 3 #include <linux/irq.h> 4 #include <linux/module.h> 5 #include <linux/ntb.h> 6 #include <linux/msi.h> 7 #include <linux/pci.h> 8 9 struct ntb_msi { 10 u64 base_addr; 11 u64 end_addr; 12 13 void (*desc_changed)(void *ctx); 14 15 u32 __iomem *peer_mws[]; 16 }; 17 18 /** 19 * ntb_msi_init() - Initialize the MSI context 20 * @ntb: NTB device context 21 * 22 * This function must be called before any other ntb_msi function. 23 * It initializes the context for MSI operations and maps 24 * the peer memory windows. 25 * 26 * This function reserves the last N outbound memory windows (where N 27 * is the number of peers). 28 * 29 * Return: Zero on success, otherwise a negative error number. 30 */ 31 int ntb_msi_init(struct ntb_dev *ntb, 32 void (*desc_changed)(void *ctx)) 33 { 34 phys_addr_t mw_phys_addr; 35 resource_size_t mw_size; 36 int peer_widx; 37 int peers; 38 int ret; 39 int i; 40 41 peers = ntb_peer_port_count(ntb); 42 if (peers <= 0) 43 return -EINVAL; 44 45 ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers), 46 GFP_KERNEL); 47 if (!ntb->msi) 48 return -ENOMEM; 49 50 ntb->msi->desc_changed = desc_changed; 51 52 for (i = 0; i < peers; i++) { 53 peer_widx = ntb_peer_mw_count(ntb) - 1 - i; 54 55 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, 56 &mw_size); 57 if (ret) 58 goto unroll; 59 60 ntb->msi->peer_mws[i] = devm_ioremap(&ntb->dev, mw_phys_addr, 61 mw_size); 62 if (!ntb->msi->peer_mws[i]) { 63 ret = -EFAULT; 64 goto unroll; 65 } 66 } 67 68 return 0; 69 70 unroll: 71 for (i = 0; i < peers; i++) 72 if (ntb->msi->peer_mws[i]) 73 devm_iounmap(&ntb->dev, ntb->msi->peer_mws[i]); 74 75 devm_kfree(&ntb->dev, ntb->msi); 76 ntb->msi = NULL; 77 return ret; 78 } 79 EXPORT_SYMBOL(ntb_msi_init); 80 81 /** 82 * ntb_msi_setup_mws() - Initialize the MSI inbound memory windows 83 * @ntb: NTB device context 84 * 85 * This function sets up the required inbound memory windows. It should be 86 * called from a work function after a link up event. 87 * 88 * Over the entire network, this function will reserves the last N 89 * inbound memory windows for each peer (where N is the number of peers). 90 * 91 * ntb_msi_init() must be called before this function. 92 * 93 * Return: Zero on success, otherwise a negative error number. 94 */ 95 int ntb_msi_setup_mws(struct ntb_dev *ntb) 96 { 97 struct msi_desc *desc; 98 u64 addr; 99 int peer, peer_widx; 100 resource_size_t addr_align, size_align, size_max; 101 resource_size_t mw_size = SZ_32K; 102 resource_size_t mw_min_size = mw_size; 103 int i; 104 int ret; 105 106 if (!ntb->msi) 107 return -EINVAL; 108 109 msi_lock_descs(&ntb->pdev->dev); 110 desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED); 111 addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32); 112 msi_unlock_descs(&ntb->pdev->dev); 113 114 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 115 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 116 if (peer_widx < 0) 117 return peer_widx; 118 119 ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align, 120 NULL, NULL); 121 if (ret) 122 return ret; 123 124 addr &= ~(addr_align - 1); 125 } 126 127 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 128 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 129 if (peer_widx < 0) { 130 ret = peer_widx; 131 goto error_out; 132 } 133 134 ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL, 135 &size_align, &size_max); 136 if (ret) 137 goto error_out; 138 139 mw_size = round_up(mw_size, size_align); 140 mw_size = max(mw_size, size_max); 141 if (mw_size < mw_min_size) 142 mw_min_size = mw_size; 143 144 ret = ntb_mw_set_trans(ntb, peer, peer_widx, 145 addr, mw_size); 146 if (ret) 147 goto error_out; 148 } 149 150 ntb->msi->base_addr = addr; 151 ntb->msi->end_addr = addr + mw_min_size; 152 153 return 0; 154 155 error_out: 156 for (i = 0; i < peer; i++) { 157 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 158 if (peer_widx < 0) 159 continue; 160 161 ntb_mw_clear_trans(ntb, i, peer_widx); 162 } 163 164 return ret; 165 } 166 EXPORT_SYMBOL(ntb_msi_setup_mws); 167 168 /** 169 * ntb_msi_clear_mws() - Clear all inbound memory windows 170 * @ntb: NTB device context 171 * 172 * This function tears down the resources used by ntb_msi_setup_mws(). 173 */ 174 void ntb_msi_clear_mws(struct ntb_dev *ntb) 175 { 176 int peer; 177 int peer_widx; 178 179 for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) { 180 peer_widx = ntb_peer_highest_mw_idx(ntb, peer); 181 if (peer_widx < 0) 182 continue; 183 184 ntb_mw_clear_trans(ntb, peer, peer_widx); 185 } 186 } 187 EXPORT_SYMBOL(ntb_msi_clear_mws); 188 189 struct ntb_msi_devres { 190 struct ntb_dev *ntb; 191 struct msi_desc *entry; 192 struct ntb_msi_desc *msi_desc; 193 }; 194 195 static int ntb_msi_set_desc(struct ntb_dev *ntb, struct msi_desc *entry, 196 struct ntb_msi_desc *msi_desc) 197 { 198 u64 addr; 199 200 addr = entry->msg.address_lo + 201 ((uint64_t)entry->msg.address_hi << 32); 202 203 if (addr < ntb->msi->base_addr || addr >= ntb->msi->end_addr) { 204 dev_warn_once(&ntb->dev, 205 "IRQ %d: MSI Address not within the memory window (%llx, [%llx %llx])\n", 206 entry->irq, addr, ntb->msi->base_addr, 207 ntb->msi->end_addr); 208 return -EFAULT; 209 } 210 211 msi_desc->addr_offset = addr - ntb->msi->base_addr; 212 msi_desc->data = entry->msg.data; 213 214 return 0; 215 } 216 217 static void ntb_msi_write_msg(struct msi_desc *entry, void *data) 218 { 219 struct ntb_msi_devres *dr = data; 220 221 WARN_ON(ntb_msi_set_desc(dr->ntb, entry, dr->msi_desc)); 222 223 if (dr->ntb->msi->desc_changed) 224 dr->ntb->msi->desc_changed(dr->ntb->ctx); 225 } 226 227 static void ntbm_msi_callback_release(struct device *dev, void *res) 228 { 229 struct ntb_msi_devres *dr = res; 230 231 dr->entry->write_msi_msg = NULL; 232 dr->entry->write_msi_msg_data = NULL; 233 } 234 235 static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry, 236 struct ntb_msi_desc *msi_desc) 237 { 238 struct ntb_msi_devres *dr; 239 240 dr = devres_alloc(ntbm_msi_callback_release, 241 sizeof(struct ntb_msi_devres), GFP_KERNEL); 242 if (!dr) 243 return -ENOMEM; 244 245 dr->ntb = ntb; 246 dr->entry = entry; 247 dr->msi_desc = msi_desc; 248 249 devres_add(&ntb->dev, dr); 250 251 dr->entry->write_msi_msg = ntb_msi_write_msg; 252 dr->entry->write_msi_msg_data = dr; 253 254 return 0; 255 } 256 257 /** 258 * ntbm_msi_request_threaded_irq() - allocate an MSI interrupt 259 * @ntb: NTB device context 260 * @handler: Function to be called when the IRQ occurs 261 * @thread_fn: Function to be called in a threaded interrupt context. NULL 262 * for clients which handle everything in @handler 263 * @name: An ascii name for the claiming device, dev_name(dev) if NULL 264 * @dev_id: A cookie passed back to the handler function 265 * @msi_desc: MSI descriptor data which triggers the interrupt 266 * 267 * This function assigns an interrupt handler to an unused 268 * MSI interrupt and returns the descriptor used to trigger 269 * it. The descriptor can then be sent to a peer to trigger 270 * the interrupt. 271 * 272 * The interrupt resource is managed with devres so it will 273 * be automatically freed when the NTB device is torn down. 274 * 275 * If an IRQ allocated with this function needs to be freed 276 * separately, ntbm_free_irq() must be used. 277 * 278 * Return: IRQ number assigned on success, otherwise a negative error number. 279 */ 280 int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, 281 irq_handler_t thread_fn, 282 const char *name, void *dev_id, 283 struct ntb_msi_desc *msi_desc) 284 { 285 struct device *dev = &ntb->pdev->dev; 286 struct msi_desc *entry; 287 int ret; 288 289 if (!ntb->msi) 290 return -EINVAL; 291 292 msi_lock_descs(dev); 293 msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) { 294 if (irq_has_action(entry->irq)) 295 continue; 296 297 ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler, 298 thread_fn, 0, name, dev_id); 299 if (ret) 300 continue; 301 302 if (ntb_msi_set_desc(ntb, entry, msi_desc)) { 303 devm_free_irq(&ntb->dev, entry->irq, dev_id); 304 continue; 305 } 306 307 ret = ntbm_msi_setup_callback(ntb, entry, msi_desc); 308 if (ret) { 309 devm_free_irq(&ntb->dev, entry->irq, dev_id); 310 goto unlock; 311 } 312 313 ret = entry->irq; 314 goto unlock; 315 } 316 ret = -ENODEV; 317 318 unlock: 319 msi_unlock_descs(dev); 320 return ret; 321 } 322 EXPORT_SYMBOL(ntbm_msi_request_threaded_irq); 323 324 static int ntbm_msi_callback_match(struct device *dev, void *res, void *data) 325 { 326 struct ntb_dev *ntb = dev_ntb(dev); 327 struct ntb_msi_devres *dr = res; 328 329 return dr->ntb == ntb && dr->entry == data; 330 } 331 332 /** 333 * ntbm_msi_free_irq() - free an interrupt 334 * @ntb: NTB device context 335 * @irq: Interrupt line to free 336 * @dev_id: Device identity to free 337 * 338 * This function should be used to manually free IRQs allocated with 339 * ntbm_request_[threaded_]irq(). 340 */ 341 void ntbm_msi_free_irq(struct ntb_dev *ntb, unsigned int irq, void *dev_id) 342 { 343 struct msi_desc *entry = irq_get_msi_desc(irq); 344 345 entry->write_msi_msg = NULL; 346 entry->write_msi_msg_data = NULL; 347 348 WARN_ON(devres_destroy(&ntb->dev, ntbm_msi_callback_release, 349 ntbm_msi_callback_match, entry)); 350 351 devm_free_irq(&ntb->dev, irq, dev_id); 352 } 353 EXPORT_SYMBOL(ntbm_msi_free_irq); 354 355 /** 356 * ntb_msi_peer_trigger() - Trigger an interrupt handler on a peer 357 * @ntb: NTB device context 358 * @peer: Peer index 359 * @desc: MSI descriptor data which triggers the interrupt 360 * 361 * This function triggers an interrupt on a peer. It requires 362 * the descriptor structure to have been passed from that peer 363 * by some other means. 364 * 365 * Return: Zero on success, otherwise a negative error number. 366 */ 367 int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer, 368 struct ntb_msi_desc *desc) 369 { 370 int idx; 371 372 if (!ntb->msi) 373 return -EINVAL; 374 375 idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]); 376 377 iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]); 378 379 return 0; 380 } 381 EXPORT_SYMBOL(ntb_msi_peer_trigger); 382 383 /** 384 * ntb_msi_peer_addr() - Get the DMA address to trigger a peer's MSI interrupt 385 * @ntb: NTB device context 386 * @peer: Peer index 387 * @desc: MSI descriptor data which triggers the interrupt 388 * @msi_addr: Physical address to trigger the interrupt 389 * 390 * This function allows using DMA engines to trigger an interrupt 391 * (for example, trigger an interrupt to process the data after 392 * sending it). To trigger the interrupt, write @desc.data to the address 393 * returned in @msi_addr 394 * 395 * Return: Zero on success, otherwise a negative error number. 396 */ 397 int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer, 398 struct ntb_msi_desc *desc, 399 phys_addr_t *msi_addr) 400 { 401 int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer; 402 phys_addr_t mw_phys_addr; 403 int ret; 404 405 ret = ntb_peer_mw_get_addr(ntb, peer_widx, &mw_phys_addr, NULL); 406 if (ret) 407 return ret; 408 409 if (msi_addr) 410 *msi_addr = mw_phys_addr + desc->addr_offset; 411 412 return 0; 413 } 414 EXPORT_SYMBOL(ntb_msi_peer_addr); 415