1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * CXL Flash Device Driver 4 * 5 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation 7 * 8 * Copyright (C) 2018 IBM Corporation 9 */ 10 11 #include <linux/file.h> 12 #include <linux/idr.h> 13 #include <linux/module.h> 14 #include <linux/mount.h> 15 #include <linux/pseudo_fs.h> 16 #include <linux/poll.h> 17 #include <linux/sched/signal.h> 18 19 #include <misc/ocxl.h> 20 21 #include <uapi/misc/cxl.h> 22 23 #include "backend.h" 24 #include "ocxl_hw.h" 25 26 /* 27 * Pseudo-filesystem to allocate inodes. 28 */ 29 30 #define OCXLFLASH_FS_MAGIC 0x1697698f 31 32 static int ocxlflash_fs_cnt; 33 static struct vfsmount *ocxlflash_vfs_mount; 34 35 static int ocxlflash_fs_init_fs_context(struct fs_context *fc) 36 { 37 return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM; 38 } 39 40 static struct file_system_type ocxlflash_fs_type = { 41 .name = "ocxlflash", 42 .owner = THIS_MODULE, 43 .init_fs_context = ocxlflash_fs_init_fs_context, 44 .kill_sb = kill_anon_super, 45 }; 46 47 /* 48 * ocxlflash_release_mapping() - release the memory mapping 49 * @ctx: Context whose mapping is to be released. 50 */ 51 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) 52 { 53 if (ctx->mapping) 54 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 55 ctx->mapping = NULL; 56 } 57 58 /* 59 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file 60 * @dev: Generic device of the host. 61 * @name: Name of the pseudo filesystem. 62 * @fops: File operations. 63 * @priv: Private data. 64 * @flags: Flags for the file. 65 * 66 * Return: pointer to the file on success, ERR_PTR on failure 67 */ 68 static struct file *ocxlflash_getfile(struct device *dev, const char *name, 69 const struct file_operations *fops, 70 void *priv, int flags) 71 { 72 struct file *file; 73 struct inode *inode; 74 int rc; 75 76 if (fops->owner && !try_module_get(fops->owner)) { 77 dev_err(dev, "%s: Owner does not exist\n", __func__); 78 rc = -ENOENT; 79 goto err1; 80 } 81 82 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, 83 &ocxlflash_fs_cnt); 84 if (unlikely(rc < 0)) { 85 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", 86 __func__, rc); 87 goto err2; 88 } 89 90 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); 91 if (IS_ERR(inode)) { 92 rc = PTR_ERR(inode); 93 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", 94 __func__, rc); 95 goto err3; 96 } 97 98 file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name, 99 flags & (O_ACCMODE | O_NONBLOCK), fops); 100 if (IS_ERR(file)) { 101 rc = PTR_ERR(file); 102 dev_err(dev, "%s: alloc_file failed rc=%d\n", 103 __func__, rc); 104 goto err4; 105 } 106 107 file->private_data = priv; 108 out: 109 return file; 110 err4: 111 iput(inode); 112 err3: 113 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 114 err2: 115 module_put(fops->owner); 116 err1: 117 file = ERR_PTR(rc); 118 goto out; 119 } 120 121 /** 122 * ocxlflash_psa_map() - map the process specific MMIO space 123 * @ctx_cookie: Adapter context for which the mapping needs to be done. 124 * 125 * Return: MMIO pointer of the mapped region 126 */ 127 static void __iomem *ocxlflash_psa_map(void *ctx_cookie) 128 { 129 struct ocxlflash_context *ctx = ctx_cookie; 130 struct device *dev = ctx->hw_afu->dev; 131 132 mutex_lock(&ctx->state_mutex); 133 if (ctx->state != STARTED) { 134 dev_err(dev, "%s: Context not started, state=%d\n", __func__, 135 ctx->state); 136 mutex_unlock(&ctx->state_mutex); 137 return NULL; 138 } 139 mutex_unlock(&ctx->state_mutex); 140 141 return ioremap(ctx->psn_phys, ctx->psn_size); 142 } 143 144 /** 145 * ocxlflash_psa_unmap() - unmap the process specific MMIO space 146 * @addr: MMIO pointer to unmap. 147 */ 148 static void ocxlflash_psa_unmap(void __iomem *addr) 149 { 150 iounmap(addr); 151 } 152 153 /** 154 * ocxlflash_process_element() - get process element of the adapter context 155 * @ctx_cookie: Adapter context associated with the process element. 156 * 157 * Return: process element of the adapter context 158 */ 159 static int ocxlflash_process_element(void *ctx_cookie) 160 { 161 struct ocxlflash_context *ctx = ctx_cookie; 162 163 return ctx->pe; 164 } 165 166 /** 167 * afu_map_irq() - map the interrupt of the adapter context 168 * @flags: Flags. 169 * @ctx: Adapter context. 170 * @num: Per-context AFU interrupt number. 171 * @handler: Interrupt handler to register. 172 * @cookie: Interrupt handler private data. 173 * @name: Name of the interrupt. 174 * 175 * Return: 0 on success, -errno on failure 176 */ 177 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, 178 irq_handler_t handler, void *cookie, char *name) 179 { 180 struct ocxl_hw_afu *afu = ctx->hw_afu; 181 struct device *dev = afu->dev; 182 struct ocxlflash_irqs *irq; 183 void __iomem *vtrig; 184 u32 virq; 185 int rc = 0; 186 187 if (num < 0 || num >= ctx->num_irqs) { 188 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 189 rc = -ENOENT; 190 goto out; 191 } 192 193 irq = &ctx->irqs[num]; 194 virq = irq_create_mapping(NULL, irq->hwirq); 195 if (unlikely(!virq)) { 196 dev_err(dev, "%s: irq_create_mapping failed\n", __func__); 197 rc = -ENOMEM; 198 goto out; 199 } 200 201 rc = request_irq(virq, handler, 0, name, cookie); 202 if (unlikely(rc)) { 203 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); 204 goto err1; 205 } 206 207 vtrig = ioremap(irq->ptrig, PAGE_SIZE); 208 if (unlikely(!vtrig)) { 209 dev_err(dev, "%s: Trigger page mapping failed\n", __func__); 210 rc = -ENOMEM; 211 goto err2; 212 } 213 214 irq->virq = virq; 215 irq->vtrig = vtrig; 216 out: 217 return rc; 218 err2: 219 free_irq(virq, cookie); 220 err1: 221 irq_dispose_mapping(virq); 222 goto out; 223 } 224 225 /** 226 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context 227 * @ctx_cookie: Adapter context. 228 * @num: Per-context AFU interrupt number. 229 * @handler: Interrupt handler to register. 230 * @cookie: Interrupt handler private data. 231 * @name: Name of the interrupt. 232 * 233 * Return: 0 on success, -errno on failure 234 */ 235 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, 236 irq_handler_t handler, void *cookie, 237 char *name) 238 { 239 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); 240 } 241 242 /** 243 * afu_unmap_irq() - unmap the interrupt 244 * @flags: Flags. 245 * @ctx: Adapter context. 246 * @num: Per-context AFU interrupt number. 247 * @cookie: Interrupt handler private data. 248 */ 249 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, 250 void *cookie) 251 { 252 struct ocxl_hw_afu *afu = ctx->hw_afu; 253 struct device *dev = afu->dev; 254 struct ocxlflash_irqs *irq; 255 256 if (num < 0 || num >= ctx->num_irqs) { 257 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 258 return; 259 } 260 261 irq = &ctx->irqs[num]; 262 if (irq->vtrig) 263 iounmap(irq->vtrig); 264 265 if (irq_find_mapping(NULL, irq->hwirq)) { 266 free_irq(irq->virq, cookie); 267 irq_dispose_mapping(irq->virq); 268 } 269 270 memset(irq, 0, sizeof(*irq)); 271 } 272 273 /** 274 * ocxlflash_unmap_afu_irq() - unmap the interrupt 275 * @ctx_cookie: Adapter context. 276 * @num: Per-context AFU interrupt number. 277 * @cookie: Interrupt handler private data. 278 */ 279 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) 280 { 281 return afu_unmap_irq(0, ctx_cookie, num, cookie); 282 } 283 284 /** 285 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt 286 * @ctx_cookie: Context associated with the interrupt. 287 * @irq: Interrupt number. 288 * 289 * Return: effective address of the mapped region 290 */ 291 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) 292 { 293 struct ocxlflash_context *ctx = ctx_cookie; 294 295 if (irq < 0 || irq >= ctx->num_irqs) 296 return 0; 297 298 return (__force u64)ctx->irqs[irq].vtrig; 299 } 300 301 /** 302 * ocxlflash_xsl_fault() - callback when translation error is triggered 303 * @data: Private data provided at callback registration, the context. 304 * @addr: Address that triggered the error. 305 * @dsisr: Value of dsisr register. 306 */ 307 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) 308 { 309 struct ocxlflash_context *ctx = data; 310 311 spin_lock(&ctx->slock); 312 ctx->fault_addr = addr; 313 ctx->fault_dsisr = dsisr; 314 ctx->pending_fault = true; 315 spin_unlock(&ctx->slock); 316 317 wake_up_all(&ctx->wq); 318 } 319 320 /** 321 * start_context() - local routine to start a context 322 * @ctx: Adapter context to be started. 323 * 324 * Assign the context specific MMIO space, add and enable the PE. 325 * 326 * Return: 0 on success, -errno on failure 327 */ 328 static int start_context(struct ocxlflash_context *ctx) 329 { 330 struct ocxl_hw_afu *afu = ctx->hw_afu; 331 struct ocxl_afu_config *acfg = &afu->acfg; 332 void *link_token = afu->link_token; 333 struct device *dev = afu->dev; 334 bool master = ctx->master; 335 struct mm_struct *mm; 336 int rc = 0; 337 u32 pid; 338 339 mutex_lock(&ctx->state_mutex); 340 if (ctx->state != OPENED) { 341 dev_err(dev, "%s: Context state invalid, state=%d\n", 342 __func__, ctx->state); 343 rc = -EINVAL; 344 goto out; 345 } 346 347 if (master) { 348 ctx->psn_size = acfg->global_mmio_size; 349 ctx->psn_phys = afu->gmmio_phys; 350 } else { 351 ctx->psn_size = acfg->pp_mmio_stride; 352 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); 353 } 354 355 /* pid and mm not set for master contexts */ 356 if (master) { 357 pid = 0; 358 mm = NULL; 359 } else { 360 pid = current->mm->context.id; 361 mm = current->mm; 362 } 363 364 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm, 365 ocxlflash_xsl_fault, ctx); 366 if (unlikely(rc)) { 367 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", 368 __func__, rc); 369 goto out; 370 } 371 372 ctx->state = STARTED; 373 out: 374 mutex_unlock(&ctx->state_mutex); 375 return rc; 376 } 377 378 /** 379 * ocxlflash_start_context() - start a kernel context 380 * @ctx_cookie: Adapter context to be started. 381 * 382 * Return: 0 on success, -errno on failure 383 */ 384 static int ocxlflash_start_context(void *ctx_cookie) 385 { 386 struct ocxlflash_context *ctx = ctx_cookie; 387 388 return start_context(ctx); 389 } 390 391 /** 392 * ocxlflash_stop_context() - stop a context 393 * @ctx_cookie: Adapter context to be stopped. 394 * 395 * Return: 0 on success, -errno on failure 396 */ 397 static int ocxlflash_stop_context(void *ctx_cookie) 398 { 399 struct ocxlflash_context *ctx = ctx_cookie; 400 struct ocxl_hw_afu *afu = ctx->hw_afu; 401 struct ocxl_afu_config *acfg = &afu->acfg; 402 struct pci_dev *pdev = afu->pdev; 403 struct device *dev = afu->dev; 404 enum ocxlflash_ctx_state state; 405 int rc = 0; 406 407 mutex_lock(&ctx->state_mutex); 408 state = ctx->state; 409 ctx->state = CLOSED; 410 mutex_unlock(&ctx->state_mutex); 411 if (state != STARTED) 412 goto out; 413 414 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, 415 ctx->pe); 416 if (unlikely(rc)) { 417 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", 418 __func__, rc); 419 /* If EBUSY, PE could be referenced in future by the AFU */ 420 if (rc == -EBUSY) 421 goto out; 422 } 423 424 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); 425 if (unlikely(rc)) { 426 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", 427 __func__, rc); 428 goto out; 429 } 430 out: 431 return rc; 432 } 433 434 /** 435 * ocxlflash_afu_reset() - reset the AFU 436 * @ctx_cookie: Adapter context. 437 */ 438 static int ocxlflash_afu_reset(void *ctx_cookie) 439 { 440 struct ocxlflash_context *ctx = ctx_cookie; 441 struct device *dev = ctx->hw_afu->dev; 442 443 /* Pending implementation from OCXL transport services */ 444 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); 445 446 /* Silently return success until it is implemented */ 447 return 0; 448 } 449 450 /** 451 * ocxlflash_set_master() - sets the context as master 452 * @ctx_cookie: Adapter context to set as master. 453 */ 454 static void ocxlflash_set_master(void *ctx_cookie) 455 { 456 struct ocxlflash_context *ctx = ctx_cookie; 457 458 ctx->master = true; 459 } 460 461 /** 462 * ocxlflash_get_context() - obtains the context associated with the host 463 * @pdev: PCI device associated with the host. 464 * @afu_cookie: Hardware AFU associated with the host. 465 * 466 * Return: returns the pointer to host adapter context 467 */ 468 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) 469 { 470 struct ocxl_hw_afu *afu = afu_cookie; 471 472 return afu->ocxl_ctx; 473 } 474 475 /** 476 * ocxlflash_dev_context_init() - allocate and initialize an adapter context 477 * @pdev: PCI device associated with the host. 478 * @afu_cookie: Hardware AFU associated with the host. 479 * 480 * Return: returns the adapter context on success, ERR_PTR on failure 481 */ 482 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) 483 { 484 struct ocxl_hw_afu *afu = afu_cookie; 485 struct device *dev = afu->dev; 486 struct ocxlflash_context *ctx; 487 int rc; 488 489 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 490 if (unlikely(!ctx)) { 491 dev_err(dev, "%s: Context allocation failed\n", __func__); 492 rc = -ENOMEM; 493 goto err1; 494 } 495 496 idr_preload(GFP_KERNEL); 497 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); 498 idr_preload_end(); 499 if (unlikely(rc < 0)) { 500 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); 501 goto err2; 502 } 503 504 spin_lock_init(&ctx->slock); 505 init_waitqueue_head(&ctx->wq); 506 mutex_init(&ctx->state_mutex); 507 508 ctx->state = OPENED; 509 ctx->pe = rc; 510 ctx->master = false; 511 ctx->mapping = NULL; 512 ctx->hw_afu = afu; 513 ctx->irq_bitmap = 0; 514 ctx->pending_irq = false; 515 ctx->pending_fault = false; 516 out: 517 return ctx; 518 err2: 519 kfree(ctx); 520 err1: 521 ctx = ERR_PTR(rc); 522 goto out; 523 } 524 525 /** 526 * ocxlflash_release_context() - releases an adapter context 527 * @ctx_cookie: Adapter context to be released. 528 * 529 * Return: 0 on success, -errno on failure 530 */ 531 static int ocxlflash_release_context(void *ctx_cookie) 532 { 533 struct ocxlflash_context *ctx = ctx_cookie; 534 struct device *dev; 535 int rc = 0; 536 537 if (!ctx) 538 goto out; 539 540 dev = ctx->hw_afu->dev; 541 mutex_lock(&ctx->state_mutex); 542 if (ctx->state >= STARTED) { 543 dev_err(dev, "%s: Context in use, state=%d\n", __func__, 544 ctx->state); 545 mutex_unlock(&ctx->state_mutex); 546 rc = -EBUSY; 547 goto out; 548 } 549 mutex_unlock(&ctx->state_mutex); 550 551 idr_remove(&ctx->hw_afu->idr, ctx->pe); 552 ocxlflash_release_mapping(ctx); 553 kfree(ctx); 554 out: 555 return rc; 556 } 557 558 /** 559 * ocxlflash_perst_reloads_same_image() - sets the image reload policy 560 * @afu_cookie: Hardware AFU associated with the host. 561 * @image: Whether to load the same image on PERST. 562 */ 563 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) 564 { 565 struct ocxl_hw_afu *afu = afu_cookie; 566 567 afu->perst_same_image = image; 568 } 569 570 /** 571 * ocxlflash_read_adapter_vpd() - reads the adapter VPD 572 * @pdev: PCI device associated with the host. 573 * @buf: Buffer to get the VPD data. 574 * @count: Size of buffer (maximum bytes that can be read). 575 * 576 * Return: size of VPD on success, -errno on failure 577 */ 578 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, 579 size_t count) 580 { 581 return pci_read_vpd(pdev, 0, count, buf); 582 } 583 584 /** 585 * free_afu_irqs() - internal service to free interrupts 586 * @ctx: Adapter context. 587 */ 588 static void free_afu_irqs(struct ocxlflash_context *ctx) 589 { 590 struct ocxl_hw_afu *afu = ctx->hw_afu; 591 struct device *dev = afu->dev; 592 int i; 593 594 if (!ctx->irqs) { 595 dev_err(dev, "%s: Interrupts not allocated\n", __func__); 596 return; 597 } 598 599 for (i = ctx->num_irqs; i >= 0; i--) 600 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); 601 602 kfree(ctx->irqs); 603 ctx->irqs = NULL; 604 } 605 606 /** 607 * alloc_afu_irqs() - internal service to allocate interrupts 608 * @ctx: Context associated with the request. 609 * @num: Number of interrupts requested. 610 * 611 * Return: 0 on success, -errno on failure 612 */ 613 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) 614 { 615 struct ocxl_hw_afu *afu = ctx->hw_afu; 616 struct device *dev = afu->dev; 617 struct ocxlflash_irqs *irqs; 618 u64 addr; 619 int rc = 0; 620 int hwirq; 621 int i; 622 623 if (ctx->irqs) { 624 dev_err(dev, "%s: Interrupts already allocated\n", __func__); 625 rc = -EEXIST; 626 goto out; 627 } 628 629 if (num > OCXL_MAX_IRQS) { 630 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); 631 rc = -EINVAL; 632 goto out; 633 } 634 635 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); 636 if (unlikely(!irqs)) { 637 dev_err(dev, "%s: Context irqs allocation failed\n", __func__); 638 rc = -ENOMEM; 639 goto out; 640 } 641 642 for (i = 0; i < num; i++) { 643 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr); 644 if (unlikely(rc)) { 645 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", 646 __func__, rc); 647 goto err; 648 } 649 650 irqs[i].hwirq = hwirq; 651 irqs[i].ptrig = addr; 652 } 653 654 ctx->irqs = irqs; 655 ctx->num_irqs = num; 656 out: 657 return rc; 658 err: 659 for (i = i-1; i >= 0; i--) 660 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); 661 kfree(irqs); 662 goto out; 663 } 664 665 /** 666 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts 667 * @ctx_cookie: Context associated with the request. 668 * @num: Number of interrupts requested. 669 * 670 * Return: 0 on success, -errno on failure 671 */ 672 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) 673 { 674 return alloc_afu_irqs(ctx_cookie, num); 675 } 676 677 /** 678 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context 679 * @ctx_cookie: Adapter context. 680 */ 681 static void ocxlflash_free_afu_irqs(void *ctx_cookie) 682 { 683 free_afu_irqs(ctx_cookie); 684 } 685 686 /** 687 * ocxlflash_unconfig_afu() - unconfigure the AFU 688 * @afu: AFU associated with the host. 689 */ 690 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) 691 { 692 if (afu->gmmio_virt) { 693 iounmap(afu->gmmio_virt); 694 afu->gmmio_virt = NULL; 695 } 696 } 697 698 /** 699 * ocxlflash_destroy_afu() - destroy the AFU structure 700 * @afu_cookie: AFU to be freed. 701 */ 702 static void ocxlflash_destroy_afu(void *afu_cookie) 703 { 704 struct ocxl_hw_afu *afu = afu_cookie; 705 int pos; 706 707 if (!afu) 708 return; 709 710 ocxlflash_release_context(afu->ocxl_ctx); 711 idr_destroy(&afu->idr); 712 713 /* Disable the AFU */ 714 pos = afu->acfg.dvsec_afu_control_pos; 715 ocxl_config_set_afu_state(afu->pdev, pos, 0); 716 717 ocxlflash_unconfig_afu(afu); 718 kfree(afu); 719 } 720 721 /** 722 * ocxlflash_config_fn() - configure the host function 723 * @pdev: PCI device associated with the host. 724 * @afu: AFU associated with the host. 725 * 726 * Return: 0 on success, -errno on failure 727 */ 728 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 729 { 730 struct ocxl_fn_config *fcfg = &afu->fcfg; 731 struct device *dev = &pdev->dev; 732 u16 base, enabled, supported; 733 int rc = 0; 734 735 /* Read DVSEC config of the function */ 736 rc = ocxl_config_read_function(pdev, fcfg); 737 if (unlikely(rc)) { 738 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", 739 __func__, rc); 740 goto out; 741 } 742 743 /* Check if function has AFUs defined, only 1 per function supported */ 744 if (fcfg->max_afu_index >= 0) { 745 afu->is_present = true; 746 if (fcfg->max_afu_index != 0) 747 dev_warn(dev, "%s: Unexpected AFU index value %d\n", 748 __func__, fcfg->max_afu_index); 749 } 750 751 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); 752 if (unlikely(rc)) { 753 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", 754 __func__, rc); 755 goto out; 756 } 757 758 afu->fn_actag_base = base; 759 afu->fn_actag_enabled = enabled; 760 761 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); 762 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", 763 __func__, base, enabled); 764 765 rc = ocxl_link_setup(pdev, 0, &afu->link_token); 766 if (unlikely(rc)) { 767 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", 768 __func__, rc); 769 goto out; 770 } 771 772 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); 773 if (unlikely(rc)) { 774 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", 775 __func__, rc); 776 goto err; 777 } 778 out: 779 return rc; 780 err: 781 ocxl_link_release(pdev, afu->link_token); 782 goto out; 783 } 784 785 /** 786 * ocxlflash_unconfig_fn() - unconfigure the host function 787 * @pdev: PCI device associated with the host. 788 * @afu: AFU associated with the host. 789 */ 790 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 791 { 792 ocxl_link_release(pdev, afu->link_token); 793 } 794 795 /** 796 * ocxlflash_map_mmio() - map the AFU MMIO space 797 * @afu: AFU associated with the host. 798 * 799 * Return: 0 on success, -errno on failure 800 */ 801 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) 802 { 803 struct ocxl_afu_config *acfg = &afu->acfg; 804 struct pci_dev *pdev = afu->pdev; 805 struct device *dev = afu->dev; 806 phys_addr_t gmmio, ppmmio; 807 int rc = 0; 808 809 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); 810 if (unlikely(rc)) { 811 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", 812 __func__, rc); 813 goto out; 814 } 815 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); 816 gmmio += acfg->global_mmio_offset; 817 818 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); 819 if (unlikely(rc)) { 820 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", 821 __func__, rc); 822 goto err1; 823 } 824 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); 825 ppmmio += acfg->pp_mmio_offset; 826 827 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); 828 if (unlikely(!afu->gmmio_virt)) { 829 dev_err(dev, "%s: MMIO mapping failed\n", __func__); 830 rc = -ENOMEM; 831 goto err2; 832 } 833 834 afu->gmmio_phys = gmmio; 835 afu->ppmmio_phys = ppmmio; 836 out: 837 return rc; 838 err2: 839 pci_release_region(pdev, acfg->pp_mmio_bar); 840 err1: 841 pci_release_region(pdev, acfg->global_mmio_bar); 842 goto out; 843 } 844 845 /** 846 * ocxlflash_config_afu() - configure the host AFU 847 * @pdev: PCI device associated with the host. 848 * @afu: AFU associated with the host. 849 * 850 * Must be called _after_ host function configuration. 851 * 852 * Return: 0 on success, -errno on failure 853 */ 854 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 855 { 856 struct ocxl_afu_config *acfg = &afu->acfg; 857 struct ocxl_fn_config *fcfg = &afu->fcfg; 858 struct device *dev = &pdev->dev; 859 int count; 860 int base; 861 int pos; 862 int rc = 0; 863 864 /* This HW AFU function does not have any AFUs defined */ 865 if (!afu->is_present) 866 goto out; 867 868 /* Read AFU config at index 0 */ 869 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); 870 if (unlikely(rc)) { 871 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", 872 __func__, rc); 873 goto out; 874 } 875 876 /* Only one AFU per function is supported, so actag_base is same */ 877 base = afu->fn_actag_base; 878 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); 879 pos = acfg->dvsec_afu_control_pos; 880 881 ocxl_config_set_afu_actag(pdev, pos, base, count); 882 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); 883 afu->afu_actag_base = base; 884 afu->afu_actag_enabled = count; 885 afu->max_pasid = 1 << acfg->pasid_supported_log; 886 887 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); 888 889 rc = ocxlflash_map_mmio(afu); 890 if (unlikely(rc)) { 891 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", 892 __func__, rc); 893 goto out; 894 } 895 896 /* Enable the AFU */ 897 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); 898 out: 899 return rc; 900 } 901 902 /** 903 * ocxlflash_create_afu() - create the AFU for OCXL 904 * @pdev: PCI device associated with the host. 905 * 906 * Return: AFU on success, NULL on failure 907 */ 908 static void *ocxlflash_create_afu(struct pci_dev *pdev) 909 { 910 struct device *dev = &pdev->dev; 911 struct ocxlflash_context *ctx; 912 struct ocxl_hw_afu *afu; 913 int rc; 914 915 afu = kzalloc(sizeof(*afu), GFP_KERNEL); 916 if (unlikely(!afu)) { 917 dev_err(dev, "%s: HW AFU allocation failed\n", __func__); 918 goto out; 919 } 920 921 afu->pdev = pdev; 922 afu->dev = dev; 923 idr_init(&afu->idr); 924 925 rc = ocxlflash_config_fn(pdev, afu); 926 if (unlikely(rc)) { 927 dev_err(dev, "%s: Function configuration failed rc=%d\n", 928 __func__, rc); 929 goto err1; 930 } 931 932 rc = ocxlflash_config_afu(pdev, afu); 933 if (unlikely(rc)) { 934 dev_err(dev, "%s: AFU configuration failed rc=%d\n", 935 __func__, rc); 936 goto err2; 937 } 938 939 ctx = ocxlflash_dev_context_init(pdev, afu); 940 if (IS_ERR(ctx)) { 941 rc = PTR_ERR(ctx); 942 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", 943 __func__, rc); 944 goto err3; 945 } 946 947 afu->ocxl_ctx = ctx; 948 out: 949 return afu; 950 err3: 951 ocxlflash_unconfig_afu(afu); 952 err2: 953 ocxlflash_unconfig_fn(pdev, afu); 954 err1: 955 idr_destroy(&afu->idr); 956 kfree(afu); 957 afu = NULL; 958 goto out; 959 } 960 961 /** 962 * ctx_event_pending() - check for any event pending on the context 963 * @ctx: Context to be checked. 964 * 965 * Return: true if there is an event pending, false if none pending 966 */ 967 static inline bool ctx_event_pending(struct ocxlflash_context *ctx) 968 { 969 if (ctx->pending_irq || ctx->pending_fault) 970 return true; 971 972 return false; 973 } 974 975 /** 976 * afu_poll() - poll the AFU for events on the context 977 * @file: File associated with the adapter context. 978 * @poll: Poll structure from the user. 979 * 980 * Return: poll mask 981 */ 982 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) 983 { 984 struct ocxlflash_context *ctx = file->private_data; 985 struct device *dev = ctx->hw_afu->dev; 986 ulong lock_flags; 987 int mask = 0; 988 989 poll_wait(file, &ctx->wq, poll); 990 991 spin_lock_irqsave(&ctx->slock, lock_flags); 992 if (ctx_event_pending(ctx)) 993 mask |= POLLIN | POLLRDNORM; 994 else if (ctx->state == CLOSED) 995 mask |= POLLERR; 996 spin_unlock_irqrestore(&ctx->slock, lock_flags); 997 998 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", 999 __func__, ctx->pe, mask); 1000 1001 return mask; 1002 } 1003 1004 /** 1005 * afu_read() - perform a read on the context for any event 1006 * @file: File associated with the adapter context. 1007 * @buf: Buffer to receive the data. 1008 * @count: Size of buffer (maximum bytes that can be read). 1009 * @off: Offset. 1010 * 1011 * Return: size of the data read on success, -errno on failure 1012 */ 1013 static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 1014 loff_t *off) 1015 { 1016 struct ocxlflash_context *ctx = file->private_data; 1017 struct device *dev = ctx->hw_afu->dev; 1018 struct cxl_event event; 1019 ulong lock_flags; 1020 ssize_t esize; 1021 ssize_t rc; 1022 int bit; 1023 DEFINE_WAIT(event_wait); 1024 1025 if (*off != 0) { 1026 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", 1027 __func__, *off); 1028 rc = -EINVAL; 1029 goto out; 1030 } 1031 1032 spin_lock_irqsave(&ctx->slock, lock_flags); 1033 1034 for (;;) { 1035 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); 1036 1037 if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) 1038 break; 1039 1040 if (file->f_flags & O_NONBLOCK) { 1041 dev_err(dev, "%s: File cannot be blocked on I/O\n", 1042 __func__); 1043 rc = -EAGAIN; 1044 goto err; 1045 } 1046 1047 if (signal_pending(current)) { 1048 dev_err(dev, "%s: Signal pending on the process\n", 1049 __func__); 1050 rc = -ERESTARTSYS; 1051 goto err; 1052 } 1053 1054 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1055 schedule(); 1056 spin_lock_irqsave(&ctx->slock, lock_flags); 1057 } 1058 1059 finish_wait(&ctx->wq, &event_wait); 1060 1061 memset(&event, 0, sizeof(event)); 1062 event.header.process_element = ctx->pe; 1063 event.header.size = sizeof(struct cxl_event_header); 1064 if (ctx->pending_irq) { 1065 esize = sizeof(struct cxl_event_afu_interrupt); 1066 event.header.size += esize; 1067 event.header.type = CXL_EVENT_AFU_INTERRUPT; 1068 1069 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); 1070 clear_bit(bit, &ctx->irq_bitmap); 1071 event.irq.irq = bit + 1; 1072 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) 1073 ctx->pending_irq = false; 1074 } else if (ctx->pending_fault) { 1075 event.header.size += sizeof(struct cxl_event_data_storage); 1076 event.header.type = CXL_EVENT_DATA_STORAGE; 1077 event.fault.addr = ctx->fault_addr; 1078 event.fault.dsisr = ctx->fault_dsisr; 1079 ctx->pending_fault = false; 1080 } 1081 1082 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1083 1084 if (copy_to_user(buf, &event, event.header.size)) { 1085 dev_err(dev, "%s: copy_to_user failed\n", __func__); 1086 rc = -EFAULT; 1087 goto out; 1088 } 1089 1090 rc = event.header.size; 1091 out: 1092 return rc; 1093 err: 1094 finish_wait(&ctx->wq, &event_wait); 1095 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1096 goto out; 1097 } 1098 1099 /** 1100 * afu_release() - release and free the context 1101 * @inode: File inode pointer. 1102 * @file: File associated with the context. 1103 * 1104 * Return: 0 on success, -errno on failure 1105 */ 1106 static int afu_release(struct inode *inode, struct file *file) 1107 { 1108 struct ocxlflash_context *ctx = file->private_data; 1109 int i; 1110 1111 /* Unmap and free the interrupts associated with the context */ 1112 for (i = ctx->num_irqs; i >= 0; i--) 1113 afu_unmap_irq(0, ctx, i, ctx); 1114 free_afu_irqs(ctx); 1115 1116 return ocxlflash_release_context(ctx); 1117 } 1118 1119 /** 1120 * ocxlflash_mmap_fault() - mmap fault handler 1121 * @vmf: VM fault associated with current fault. 1122 * 1123 * Return: 0 on success, -errno on failure 1124 */ 1125 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) 1126 { 1127 struct vm_area_struct *vma = vmf->vma; 1128 struct ocxlflash_context *ctx = vma->vm_file->private_data; 1129 struct device *dev = ctx->hw_afu->dev; 1130 u64 mmio_area, offset; 1131 1132 offset = vmf->pgoff << PAGE_SHIFT; 1133 if (offset >= ctx->psn_size) 1134 return VM_FAULT_SIGBUS; 1135 1136 mutex_lock(&ctx->state_mutex); 1137 if (ctx->state != STARTED) { 1138 dev_err(dev, "%s: Context not started, state=%d\n", 1139 __func__, ctx->state); 1140 mutex_unlock(&ctx->state_mutex); 1141 return VM_FAULT_SIGBUS; 1142 } 1143 mutex_unlock(&ctx->state_mutex); 1144 1145 mmio_area = ctx->psn_phys; 1146 mmio_area += offset; 1147 1148 return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); 1149 } 1150 1151 static const struct vm_operations_struct ocxlflash_vmops = { 1152 .fault = ocxlflash_mmap_fault, 1153 }; 1154 1155 /** 1156 * afu_mmap() - map the fault handler operations 1157 * @file: File associated with the context. 1158 * @vma: VM area associated with mapping. 1159 * 1160 * Return: 0 on success, -errno on failure 1161 */ 1162 static int afu_mmap(struct file *file, struct vm_area_struct *vma) 1163 { 1164 struct ocxlflash_context *ctx = file->private_data; 1165 1166 if ((vma_pages(vma) + vma->vm_pgoff) > 1167 (ctx->psn_size >> PAGE_SHIFT)) 1168 return -EINVAL; 1169 1170 vma->vm_flags |= VM_IO | VM_PFNMAP; 1171 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1172 vma->vm_ops = &ocxlflash_vmops; 1173 return 0; 1174 } 1175 1176 static const struct file_operations ocxl_afu_fops = { 1177 .owner = THIS_MODULE, 1178 .poll = afu_poll, 1179 .read = afu_read, 1180 .release = afu_release, 1181 .mmap = afu_mmap, 1182 }; 1183 1184 #define PATCH_FOPS(NAME) \ 1185 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) 1186 1187 /** 1188 * ocxlflash_get_fd() - get file descriptor for an adapter context 1189 * @ctx_cookie: Adapter context. 1190 * @fops: File operations to be associated. 1191 * @fd: File descriptor to be returned back. 1192 * 1193 * Return: pointer to the file on success, ERR_PTR on failure 1194 */ 1195 static struct file *ocxlflash_get_fd(void *ctx_cookie, 1196 struct file_operations *fops, int *fd) 1197 { 1198 struct ocxlflash_context *ctx = ctx_cookie; 1199 struct device *dev = ctx->hw_afu->dev; 1200 struct file *file; 1201 int flags, fdtmp; 1202 int rc = 0; 1203 char *name = NULL; 1204 1205 /* Only allow one fd per context */ 1206 if (ctx->mapping) { 1207 dev_err(dev, "%s: Context is already mapped to an fd\n", 1208 __func__); 1209 rc = -EEXIST; 1210 goto err1; 1211 } 1212 1213 flags = O_RDWR | O_CLOEXEC; 1214 1215 /* This code is similar to anon_inode_getfd() */ 1216 rc = get_unused_fd_flags(flags); 1217 if (unlikely(rc < 0)) { 1218 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", 1219 __func__, rc); 1220 goto err1; 1221 } 1222 fdtmp = rc; 1223 1224 /* Patch the file ops that are not defined */ 1225 if (fops) { 1226 PATCH_FOPS(poll); 1227 PATCH_FOPS(read); 1228 PATCH_FOPS(release); 1229 PATCH_FOPS(mmap); 1230 } else /* Use default ops */ 1231 fops = (struct file_operations *)&ocxl_afu_fops; 1232 1233 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); 1234 file = ocxlflash_getfile(dev, name, fops, ctx, flags); 1235 kfree(name); 1236 if (IS_ERR(file)) { 1237 rc = PTR_ERR(file); 1238 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", 1239 __func__, rc); 1240 goto err2; 1241 } 1242 1243 ctx->mapping = file->f_mapping; 1244 *fd = fdtmp; 1245 out: 1246 return file; 1247 err2: 1248 put_unused_fd(fdtmp); 1249 err1: 1250 file = ERR_PTR(rc); 1251 goto out; 1252 } 1253 1254 /** 1255 * ocxlflash_fops_get_context() - get the context associated with the file 1256 * @file: File associated with the adapter context. 1257 * 1258 * Return: pointer to the context 1259 */ 1260 static void *ocxlflash_fops_get_context(struct file *file) 1261 { 1262 return file->private_data; 1263 } 1264 1265 /** 1266 * ocxlflash_afu_irq() - interrupt handler for user contexts 1267 * @irq: Interrupt number. 1268 * @data: Private data provided at interrupt registration, the context. 1269 * 1270 * Return: Always return IRQ_HANDLED. 1271 */ 1272 static irqreturn_t ocxlflash_afu_irq(int irq, void *data) 1273 { 1274 struct ocxlflash_context *ctx = data; 1275 struct device *dev = ctx->hw_afu->dev; 1276 int i; 1277 1278 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", 1279 __func__, ctx->pe, irq); 1280 1281 for (i = 0; i < ctx->num_irqs; i++) { 1282 if (ctx->irqs[i].virq == irq) 1283 break; 1284 } 1285 if (unlikely(i >= ctx->num_irqs)) { 1286 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); 1287 goto out; 1288 } 1289 1290 spin_lock(&ctx->slock); 1291 set_bit(i - 1, &ctx->irq_bitmap); 1292 ctx->pending_irq = true; 1293 spin_unlock(&ctx->slock); 1294 1295 wake_up_all(&ctx->wq); 1296 out: 1297 return IRQ_HANDLED; 1298 } 1299 1300 /** 1301 * ocxlflash_start_work() - start a user context 1302 * @ctx_cookie: Context to be started. 1303 * @num_irqs: Number of interrupts requested. 1304 * 1305 * Return: 0 on success, -errno on failure 1306 */ 1307 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) 1308 { 1309 struct ocxlflash_context *ctx = ctx_cookie; 1310 struct ocxl_hw_afu *afu = ctx->hw_afu; 1311 struct device *dev = afu->dev; 1312 char *name; 1313 int rc = 0; 1314 int i; 1315 1316 rc = alloc_afu_irqs(ctx, num_irqs); 1317 if (unlikely(rc < 0)) { 1318 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); 1319 goto out; 1320 } 1321 1322 for (i = 0; i < num_irqs; i++) { 1323 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", 1324 dev_name(dev), ctx->pe, i); 1325 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); 1326 kfree(name); 1327 if (unlikely(rc < 0)) { 1328 dev_err(dev, "%s: afu_map_irq failed rc=%d\n", 1329 __func__, rc); 1330 goto err; 1331 } 1332 } 1333 1334 rc = start_context(ctx); 1335 if (unlikely(rc)) { 1336 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); 1337 goto err; 1338 } 1339 out: 1340 return rc; 1341 err: 1342 for (i = i-1; i >= 0; i--) 1343 afu_unmap_irq(0, ctx, i, ctx); 1344 free_afu_irqs(ctx); 1345 goto out; 1346 }; 1347 1348 /** 1349 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor 1350 * @file: File installed with adapter file descriptor. 1351 * @vma: VM area associated with mapping. 1352 * 1353 * Return: 0 on success, -errno on failure 1354 */ 1355 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) 1356 { 1357 return afu_mmap(file, vma); 1358 } 1359 1360 /** 1361 * ocxlflash_fd_release() - release the context associated with the file 1362 * @inode: File inode pointer. 1363 * @file: File associated with the adapter context. 1364 * 1365 * Return: 0 on success, -errno on failure 1366 */ 1367 static int ocxlflash_fd_release(struct inode *inode, struct file *file) 1368 { 1369 return afu_release(inode, file); 1370 } 1371 1372 /* Backend ops to ocxlflash services */ 1373 const struct cxlflash_backend_ops cxlflash_ocxl_ops = { 1374 .module = THIS_MODULE, 1375 .psa_map = ocxlflash_psa_map, 1376 .psa_unmap = ocxlflash_psa_unmap, 1377 .process_element = ocxlflash_process_element, 1378 .map_afu_irq = ocxlflash_map_afu_irq, 1379 .unmap_afu_irq = ocxlflash_unmap_afu_irq, 1380 .get_irq_objhndl = ocxlflash_get_irq_objhndl, 1381 .start_context = ocxlflash_start_context, 1382 .stop_context = ocxlflash_stop_context, 1383 .afu_reset = ocxlflash_afu_reset, 1384 .set_master = ocxlflash_set_master, 1385 .get_context = ocxlflash_get_context, 1386 .dev_context_init = ocxlflash_dev_context_init, 1387 .release_context = ocxlflash_release_context, 1388 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, 1389 .read_adapter_vpd = ocxlflash_read_adapter_vpd, 1390 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, 1391 .free_afu_irqs = ocxlflash_free_afu_irqs, 1392 .create_afu = ocxlflash_create_afu, 1393 .destroy_afu = ocxlflash_destroy_afu, 1394 .get_fd = ocxlflash_get_fd, 1395 .fops_get_context = ocxlflash_fops_get_context, 1396 .start_work = ocxlflash_start_work, 1397 .fd_mmap = ocxlflash_fd_mmap, 1398 .fd_release = ocxlflash_fd_release, 1399 }; 1400