1c21e0bbfSMatthew R. Ochs /* 2c21e0bbfSMatthew R. Ochs * CXL Flash Device Driver 3c21e0bbfSMatthew R. Ochs * 4c21e0bbfSMatthew R. Ochs * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5c21e0bbfSMatthew R. Ochs * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6c21e0bbfSMatthew R. Ochs * 7c21e0bbfSMatthew R. Ochs * Copyright (C) 2015 IBM Corporation 8c21e0bbfSMatthew R. Ochs * 9c21e0bbfSMatthew R. Ochs * This program is free software; you can redistribute it and/or 10c21e0bbfSMatthew R. Ochs * modify it under the terms of the GNU General Public License 11c21e0bbfSMatthew R. Ochs * as published by the Free Software Foundation; either version 12c21e0bbfSMatthew R. Ochs * 2 of the License, or (at your option) any later version. 13c21e0bbfSMatthew R. Ochs */ 14c21e0bbfSMatthew R. Ochs 15c21e0bbfSMatthew R. Ochs #include <linux/delay.h> 16c21e0bbfSMatthew R. Ochs #include <linux/list.h> 17c21e0bbfSMatthew R. Ochs #include <linux/module.h> 18c21e0bbfSMatthew R. Ochs #include <linux/pci.h> 19c21e0bbfSMatthew R. Ochs 20c21e0bbfSMatthew R. Ochs #include <asm/unaligned.h> 21c21e0bbfSMatthew R. Ochs 22c21e0bbfSMatthew R. Ochs #include <misc/cxl.h> 23c21e0bbfSMatthew R. Ochs 24c21e0bbfSMatthew R. Ochs #include <scsi/scsi_cmnd.h> 25c21e0bbfSMatthew R. Ochs #include <scsi/scsi_host.h> 2665be2c79SMatthew R. Ochs #include <uapi/scsi/cxlflash_ioctl.h> 27c21e0bbfSMatthew R. Ochs 28c21e0bbfSMatthew R. Ochs #include "main.h" 29c21e0bbfSMatthew R. Ochs #include "sislite.h" 30c21e0bbfSMatthew R. Ochs #include "common.h" 31c21e0bbfSMatthew R. Ochs 32c21e0bbfSMatthew R. Ochs MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); 33c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); 34c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 35c21e0bbfSMatthew R. Ochs MODULE_LICENSE("GPL"); 36c21e0bbfSMatthew R. Ochs 37c21e0bbfSMatthew R. Ochs /** 38c21e0bbfSMatthew R. Ochs * process_cmd_err() - command error handler 39c21e0bbfSMatthew R. Ochs * @cmd: AFU command that experienced the error. 40c21e0bbfSMatthew R. Ochs * @scp: SCSI command associated with the AFU command in error. 41c21e0bbfSMatthew R. Ochs * 42c21e0bbfSMatthew R. Ochs * Translates error bits from AFU command to SCSI command results. 43c21e0bbfSMatthew R. Ochs */ 44c21e0bbfSMatthew R. Ochs static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 45c21e0bbfSMatthew R. Ochs { 46fb67d44dSMatthew R. Ochs struct afu *afu = cmd->parent; 47fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 48fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 49c21e0bbfSMatthew R. Ochs struct sisl_ioarcb *ioarcb; 50c21e0bbfSMatthew R. Ochs struct sisl_ioasa *ioasa; 518396012fSMatthew R. Ochs u32 resid; 52c21e0bbfSMatthew R. Ochs 53c21e0bbfSMatthew R. Ochs if (unlikely(!cmd)) 54c21e0bbfSMatthew R. Ochs return; 55c21e0bbfSMatthew R. Ochs 56c21e0bbfSMatthew R. Ochs ioarcb = &(cmd->rcb); 57c21e0bbfSMatthew R. Ochs ioasa = &(cmd->sa); 58c21e0bbfSMatthew R. Ochs 59c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 608396012fSMatthew R. Ochs resid = ioasa->resid; 618396012fSMatthew R. Ochs scsi_set_resid(scp, resid); 62fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 638396012fSMatthew R. Ochs __func__, cmd, scp, resid); 64c21e0bbfSMatthew R. Ochs } 65c21e0bbfSMatthew R. Ochs 66c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 67fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", 68c21e0bbfSMatthew R. Ochs __func__, cmd, scp); 69c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 70c21e0bbfSMatthew R. Ochs } 71c21e0bbfSMatthew R. Ochs 72fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " 73fb67d44dSMatthew R. Ochs "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, 74fb67d44dSMatthew R. Ochs ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, 75fb67d44dSMatthew R. Ochs ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); 76c21e0bbfSMatthew R. Ochs 77c21e0bbfSMatthew R. Ochs if (ioasa->rc.scsi_rc) { 78c21e0bbfSMatthew R. Ochs /* We have a SCSI status */ 79c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { 80c21e0bbfSMatthew R. Ochs memcpy(scp->sense_buffer, ioasa->sense_data, 81c21e0bbfSMatthew R. Ochs SISL_SENSE_DATA_LEN); 82c21e0bbfSMatthew R. Ochs scp->result = ioasa->rc.scsi_rc; 83c21e0bbfSMatthew R. Ochs } else 84c21e0bbfSMatthew R. Ochs scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); 85c21e0bbfSMatthew R. Ochs } 86c21e0bbfSMatthew R. Ochs 87c21e0bbfSMatthew R. Ochs /* 88c21e0bbfSMatthew R. Ochs * We encountered an error. Set scp->result based on nature 89c21e0bbfSMatthew R. Ochs * of error. 90c21e0bbfSMatthew R. Ochs */ 91c21e0bbfSMatthew R. Ochs if (ioasa->rc.fc_rc) { 92c21e0bbfSMatthew R. Ochs /* We have an FC status */ 93c21e0bbfSMatthew R. Ochs switch (ioasa->rc.fc_rc) { 94c21e0bbfSMatthew R. Ochs case SISL_FC_RC_LINKDOWN: 95c21e0bbfSMatthew R. Ochs scp->result = (DID_REQUEUE << 16); 96c21e0bbfSMatthew R. Ochs break; 97c21e0bbfSMatthew R. Ochs case SISL_FC_RC_RESID: 98c21e0bbfSMatthew R. Ochs /* This indicates an FCP resid underrun */ 99c21e0bbfSMatthew R. Ochs if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { 100c21e0bbfSMatthew R. Ochs /* If the SISL_RC_FLAGS_OVERRUN flag was set, 101c21e0bbfSMatthew R. Ochs * then we will handle this error else where. 102c21e0bbfSMatthew R. Ochs * If not then we must handle it here. 1038396012fSMatthew R. Ochs * This is probably an AFU bug. 104c21e0bbfSMatthew R. Ochs */ 105c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 106c21e0bbfSMatthew R. Ochs } 107c21e0bbfSMatthew R. Ochs break; 108c21e0bbfSMatthew R. Ochs case SISL_FC_RC_RESIDERR: 109c21e0bbfSMatthew R. Ochs /* Resid mismatch between adapter and device */ 110c21e0bbfSMatthew R. Ochs case SISL_FC_RC_TGTABORT: 111c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTOK: 112c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTFAIL: 113c21e0bbfSMatthew R. Ochs case SISL_FC_RC_NOLOGI: 114c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTPEND: 115c21e0bbfSMatthew R. Ochs case SISL_FC_RC_WRABORTPEND: 116c21e0bbfSMatthew R. Ochs case SISL_FC_RC_NOEXP: 117c21e0bbfSMatthew R. Ochs case SISL_FC_RC_INUSE: 118c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 119c21e0bbfSMatthew R. Ochs break; 120c21e0bbfSMatthew R. Ochs } 121c21e0bbfSMatthew R. Ochs } 122c21e0bbfSMatthew R. Ochs 123c21e0bbfSMatthew R. Ochs if (ioasa->rc.afu_rc) { 124c21e0bbfSMatthew R. Ochs /* We have an AFU error */ 125c21e0bbfSMatthew R. Ochs switch (ioasa->rc.afu_rc) { 126c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_NO_CHANNELS: 1278396012fSMatthew R. Ochs scp->result = (DID_NO_CONNECT << 16); 128c21e0bbfSMatthew R. Ochs break; 129c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_DATA_DMA_ERR: 130c21e0bbfSMatthew R. Ochs switch (ioasa->afu_extra) { 131c21e0bbfSMatthew R. Ochs case SISL_AFU_DMA_ERR_PAGE_IN: 132c21e0bbfSMatthew R. Ochs /* Retry */ 133c21e0bbfSMatthew R. Ochs scp->result = (DID_IMM_RETRY << 16); 134c21e0bbfSMatthew R. Ochs break; 135c21e0bbfSMatthew R. Ochs case SISL_AFU_DMA_ERR_INVALID_EA: 136c21e0bbfSMatthew R. Ochs default: 137c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 138c21e0bbfSMatthew R. Ochs } 139c21e0bbfSMatthew R. Ochs break; 140c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_OUT_OF_DATA_BUFS: 141c21e0bbfSMatthew R. Ochs /* Retry */ 142c21e0bbfSMatthew R. Ochs scp->result = (DID_ALLOC_FAILURE << 16); 143c21e0bbfSMatthew R. Ochs break; 144c21e0bbfSMatthew R. Ochs default: 145c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 146c21e0bbfSMatthew R. Ochs } 147c21e0bbfSMatthew R. Ochs } 148c21e0bbfSMatthew R. Ochs } 149c21e0bbfSMatthew R. Ochs 150c21e0bbfSMatthew R. Ochs /** 151c21e0bbfSMatthew R. Ochs * cmd_complete() - command completion handler 152c21e0bbfSMatthew R. Ochs * @cmd: AFU command that has completed. 153c21e0bbfSMatthew R. Ochs * 154c21e0bbfSMatthew R. Ochs * Prepares and submits command that has either completed or timed out to 155c21e0bbfSMatthew R. Ochs * the SCSI stack. Checks AFU command back into command pool for non-internal 156fe7f9698SMatthew R. Ochs * (cmd->scp populated) commands. 157c21e0bbfSMatthew R. Ochs */ 158c21e0bbfSMatthew R. Ochs static void cmd_complete(struct afu_cmd *cmd) 159c21e0bbfSMatthew R. Ochs { 160c21e0bbfSMatthew R. Ochs struct scsi_cmnd *scp; 161c21e0bbfSMatthew R. Ochs ulong lock_flags; 162c21e0bbfSMatthew R. Ochs struct afu *afu = cmd->parent; 163c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 164fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 165c21e0bbfSMatthew R. Ochs bool cmd_is_tmf; 166c21e0bbfSMatthew R. Ochs 167fe7f9698SMatthew R. Ochs if (cmd->scp) { 168fe7f9698SMatthew R. Ochs scp = cmd->scp; 1698396012fSMatthew R. Ochs if (unlikely(cmd->sa.ioasc)) 170c21e0bbfSMatthew R. Ochs process_cmd_err(cmd, scp); 171c21e0bbfSMatthew R. Ochs else 172c21e0bbfSMatthew R. Ochs scp->result = (DID_OK << 16); 173c21e0bbfSMatthew R. Ochs 174c21e0bbfSMatthew R. Ochs cmd_is_tmf = cmd->cmd_tmf; 175c21e0bbfSMatthew R. Ochs 176fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", 177fb67d44dSMatthew R. Ochs __func__, scp, scp->result, cmd->sa.ioasc); 178c21e0bbfSMatthew R. Ochs 179c21e0bbfSMatthew R. Ochs scsi_dma_unmap(scp); 180c21e0bbfSMatthew R. Ochs scp->scsi_done(scp); 181c21e0bbfSMatthew R. Ochs 182c21e0bbfSMatthew R. Ochs if (cmd_is_tmf) { 183018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 184c21e0bbfSMatthew R. Ochs cfg->tmf_active = false; 185c21e0bbfSMatthew R. Ochs wake_up_all_locked(&cfg->tmf_waitq); 186018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 187c21e0bbfSMatthew R. Ochs } 188c21e0bbfSMatthew R. Ochs } else 189c21e0bbfSMatthew R. Ochs complete(&cmd->cevent); 190c21e0bbfSMatthew R. Ochs } 191c21e0bbfSMatthew R. Ochs 192c21e0bbfSMatthew R. Ochs /** 1939c7d1ee5SMatthew R. Ochs * context_reset() - reset command owner context via specified register 19415305514SMatthew R. Ochs * @cmd: AFU command that timed out. 1959c7d1ee5SMatthew R. Ochs * @reset_reg: MMIO register to perform reset. 19615305514SMatthew R. Ochs */ 1979c7d1ee5SMatthew R. Ochs static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) 19815305514SMatthew R. Ochs { 19915305514SMatthew R. Ochs int nretry = 0; 20015305514SMatthew R. Ochs u64 rrin = 0x1; 20115305514SMatthew R. Ochs struct afu *afu = cmd->parent; 2023d2f617dSUma Krishnan struct cxlflash_cfg *cfg = afu->parent; 2033d2f617dSUma Krishnan struct device *dev = &cfg->dev->dev; 20415305514SMatthew R. Ochs 205fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd); 20615305514SMatthew R. Ochs 2079c7d1ee5SMatthew R. Ochs writeq_be(rrin, reset_reg); 20815305514SMatthew R. Ochs do { 2099c7d1ee5SMatthew R. Ochs rrin = readq_be(reset_reg); 21015305514SMatthew R. Ochs if (rrin != 0x1) 21115305514SMatthew R. Ochs break; 21215305514SMatthew R. Ochs /* Double delay each time */ 213ea765431SManoj N. Kumar udelay(1 << nretry); 21415305514SMatthew R. Ochs } while (nretry++ < MC_ROOM_RETRY_CNT); 2153d2f617dSUma Krishnan 216fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n", 2173d2f617dSUma Krishnan __func__, rrin, nretry); 21815305514SMatthew R. Ochs } 21915305514SMatthew R. Ochs 22015305514SMatthew R. Ochs /** 2219c7d1ee5SMatthew R. Ochs * context_reset_ioarrin() - reset command owner context via IOARRIN register 2229c7d1ee5SMatthew R. Ochs * @cmd: AFU command that timed out. 2239c7d1ee5SMatthew R. Ochs */ 2249c7d1ee5SMatthew R. Ochs static void context_reset_ioarrin(struct afu_cmd *cmd) 2259c7d1ee5SMatthew R. Ochs { 2269c7d1ee5SMatthew R. Ochs struct afu *afu = cmd->parent; 2279c7d1ee5SMatthew R. Ochs 2289c7d1ee5SMatthew R. Ochs context_reset(cmd, &afu->host_map->ioarrin); 2299c7d1ee5SMatthew R. Ochs } 2309c7d1ee5SMatthew R. Ochs 2319c7d1ee5SMatthew R. Ochs /** 232696d0b0cSMatthew R. Ochs * context_reset_sq() - reset command owner context w/ SQ Context Reset register 233696d0b0cSMatthew R. Ochs * @cmd: AFU command that timed out. 234696d0b0cSMatthew R. Ochs */ 235696d0b0cSMatthew R. Ochs static void context_reset_sq(struct afu_cmd *cmd) 236696d0b0cSMatthew R. Ochs { 237696d0b0cSMatthew R. Ochs struct afu *afu = cmd->parent; 238696d0b0cSMatthew R. Ochs 239696d0b0cSMatthew R. Ochs context_reset(cmd, &afu->host_map->sq_ctx_reset); 240696d0b0cSMatthew R. Ochs } 241696d0b0cSMatthew R. Ochs 242696d0b0cSMatthew R. Ochs /** 24348b4be36SMatthew R. Ochs * send_cmd_ioarrin() - sends an AFU command via IOARRIN register 24415305514SMatthew R. Ochs * @afu: AFU associated with the host. 24515305514SMatthew R. Ochs * @cmd: AFU command to send. 24615305514SMatthew R. Ochs * 24715305514SMatthew R. Ochs * Return: 2481284fb0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 24915305514SMatthew R. Ochs */ 25048b4be36SMatthew R. Ochs static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) 25115305514SMatthew R. Ochs { 25215305514SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 25315305514SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 25415305514SMatthew R. Ochs int rc = 0; 25511f7b184SUma Krishnan s64 room; 25611f7b184SUma Krishnan ulong lock_flags; 25715305514SMatthew R. Ochs 25815305514SMatthew R. Ochs /* 25911f7b184SUma Krishnan * To avoid the performance penalty of MMIO, spread the update of 26011f7b184SUma Krishnan * 'room' over multiple commands. 26115305514SMatthew R. Ochs */ 26211f7b184SUma Krishnan spin_lock_irqsave(&afu->rrin_slock, lock_flags); 26311f7b184SUma Krishnan if (--afu->room < 0) { 26415305514SMatthew R. Ochs room = readq_be(&afu->host_map->cmd_room); 26511f7b184SUma Krishnan if (room <= 0) { 26611f7b184SUma Krishnan dev_dbg_ratelimited(dev, "%s: no cmd_room to send " 26711f7b184SUma Krishnan "0x%02X, room=0x%016llX\n", 26811f7b184SUma Krishnan __func__, cmd->rcb.cdb[0], room); 26911f7b184SUma Krishnan afu->room = 0; 27011f7b184SUma Krishnan rc = SCSI_MLQUEUE_HOST_BUSY; 27111f7b184SUma Krishnan goto out; 27211f7b184SUma Krishnan } 27311f7b184SUma Krishnan afu->room = room - 1; 27415305514SMatthew R. Ochs } 27515305514SMatthew R. Ochs 27615305514SMatthew R. Ochs writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); 27715305514SMatthew R. Ochs out: 27811f7b184SUma Krishnan spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); 279fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, 280fb67d44dSMatthew R. Ochs cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); 28115305514SMatthew R. Ochs return rc; 28215305514SMatthew R. Ochs } 28315305514SMatthew R. Ochs 28415305514SMatthew R. Ochs /** 285696d0b0cSMatthew R. Ochs * send_cmd_sq() - sends an AFU command via SQ ring 286696d0b0cSMatthew R. Ochs * @afu: AFU associated with the host. 287696d0b0cSMatthew R. Ochs * @cmd: AFU command to send. 288696d0b0cSMatthew R. Ochs * 289696d0b0cSMatthew R. Ochs * Return: 290696d0b0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 291696d0b0cSMatthew R. Ochs */ 292696d0b0cSMatthew R. Ochs static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) 293696d0b0cSMatthew R. Ochs { 294696d0b0cSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 295696d0b0cSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 296696d0b0cSMatthew R. Ochs int rc = 0; 297696d0b0cSMatthew R. Ochs int newval; 298696d0b0cSMatthew R. Ochs ulong lock_flags; 299696d0b0cSMatthew R. Ochs 300696d0b0cSMatthew R. Ochs newval = atomic_dec_if_positive(&afu->hsq_credits); 301696d0b0cSMatthew R. Ochs if (newval <= 0) { 302696d0b0cSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 303696d0b0cSMatthew R. Ochs goto out; 304696d0b0cSMatthew R. Ochs } 305696d0b0cSMatthew R. Ochs 306696d0b0cSMatthew R. Ochs cmd->rcb.ioasa = &cmd->sa; 307696d0b0cSMatthew R. Ochs 308696d0b0cSMatthew R. Ochs spin_lock_irqsave(&afu->hsq_slock, lock_flags); 309696d0b0cSMatthew R. Ochs 310696d0b0cSMatthew R. Ochs *afu->hsq_curr = cmd->rcb; 311696d0b0cSMatthew R. Ochs if (afu->hsq_curr < afu->hsq_end) 312696d0b0cSMatthew R. Ochs afu->hsq_curr++; 313696d0b0cSMatthew R. Ochs else 314696d0b0cSMatthew R. Ochs afu->hsq_curr = afu->hsq_start; 315696d0b0cSMatthew R. Ochs writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail); 316696d0b0cSMatthew R. Ochs 317696d0b0cSMatthew R. Ochs spin_unlock_irqrestore(&afu->hsq_slock, lock_flags); 318696d0b0cSMatthew R. Ochs out: 319fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " 320fb67d44dSMatthew R. Ochs "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, 321fb67d44dSMatthew R. Ochs cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr, 322696d0b0cSMatthew R. Ochs readq_be(&afu->host_map->sq_head), 323696d0b0cSMatthew R. Ochs readq_be(&afu->host_map->sq_tail)); 324696d0b0cSMatthew R. Ochs return rc; 325696d0b0cSMatthew R. Ochs } 326696d0b0cSMatthew R. Ochs 327696d0b0cSMatthew R. Ochs /** 32815305514SMatthew R. Ochs * wait_resp() - polls for a response or timeout to a sent AFU command 32915305514SMatthew R. Ochs * @afu: AFU associated with the host. 33015305514SMatthew R. Ochs * @cmd: AFU command that was sent. 3319ba848acSMatthew R. Ochs * 3329ba848acSMatthew R. Ochs * Return: 3339ba848acSMatthew R. Ochs * 0 on success, -1 on timeout/error 33415305514SMatthew R. Ochs */ 3359ba848acSMatthew R. Ochs static int wait_resp(struct afu *afu, struct afu_cmd *cmd) 33615305514SMatthew R. Ochs { 337fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 338fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 3399ba848acSMatthew R. Ochs int rc = 0; 34015305514SMatthew R. Ochs ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 34115305514SMatthew R. Ochs 34215305514SMatthew R. Ochs timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 3439ba848acSMatthew R. Ochs if (!timeout) { 34448b4be36SMatthew R. Ochs afu->context_reset(cmd); 3459ba848acSMatthew R. Ochs rc = -1; 3469ba848acSMatthew R. Ochs } 34715305514SMatthew R. Ochs 3489ba848acSMatthew R. Ochs if (unlikely(cmd->sa.ioasc != 0)) { 349fb67d44dSMatthew R. Ochs dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", 350fb67d44dSMatthew R. Ochs __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); 3519ba848acSMatthew R. Ochs rc = -1; 3529ba848acSMatthew R. Ochs } 3539ba848acSMatthew R. Ochs 3549ba848acSMatthew R. Ochs return rc; 35515305514SMatthew R. Ochs } 35615305514SMatthew R. Ochs 35715305514SMatthew R. Ochs /** 358c21e0bbfSMatthew R. Ochs * send_tmf() - sends a Task Management Function (TMF) 359c21e0bbfSMatthew R. Ochs * @afu: AFU to checkout from. 360c21e0bbfSMatthew R. Ochs * @scp: SCSI command from stack. 361c21e0bbfSMatthew R. Ochs * @tmfcmd: TMF command to send. 362c21e0bbfSMatthew R. Ochs * 363c21e0bbfSMatthew R. Ochs * Return: 3641284fb0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 365c21e0bbfSMatthew R. Ochs */ 366c21e0bbfSMatthew R. Ochs static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) 367c21e0bbfSMatthew R. Ochs { 368fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(scp->device->host); 369d4ace351SMatthew R. Ochs struct afu_cmd *cmd = sc_to_afucz(scp); 3704392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 371c21e0bbfSMatthew R. Ochs ulong lock_flags; 372c21e0bbfSMatthew R. Ochs int rc = 0; 373018d1dc9SMatthew R. Ochs ulong to; 374c21e0bbfSMatthew R. Ochs 375018d1dc9SMatthew R. Ochs /* When Task Management Function is active do not send another */ 376018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 377c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) 378018d1dc9SMatthew R. Ochs wait_event_interruptible_lock_irq(cfg->tmf_waitq, 379018d1dc9SMatthew R. Ochs !cfg->tmf_active, 380018d1dc9SMatthew R. Ochs cfg->tmf_slock); 381c21e0bbfSMatthew R. Ochs cfg->tmf_active = true; 382018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 383c21e0bbfSMatthew R. Ochs 384fe7f9698SMatthew R. Ochs cmd->scp = scp; 385d4ace351SMatthew R. Ochs cmd->parent = afu; 386d4ace351SMatthew R. Ochs cmd->cmd_tmf = true; 387d4ace351SMatthew R. Ochs 388c21e0bbfSMatthew R. Ochs cmd->rcb.ctx_id = afu->ctx_hndl; 3895fbb96c8SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 3908fa4f177SMatthew R. Ochs cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); 391c21e0bbfSMatthew R. Ochs cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 392c21e0bbfSMatthew R. Ochs cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 393d4ace351SMatthew R. Ochs SISL_REQ_FLAGS_SUP_UNDERRUN | 394d4ace351SMatthew R. Ochs SISL_REQ_FLAGS_TMF_CMD); 395c21e0bbfSMatthew R. Ochs memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 396c21e0bbfSMatthew R. Ochs 39748b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 398c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 399018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 400c21e0bbfSMatthew R. Ochs cfg->tmf_active = false; 401018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 402c21e0bbfSMatthew R. Ochs goto out; 403c21e0bbfSMatthew R. Ochs } 404c21e0bbfSMatthew R. Ochs 405018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 406018d1dc9SMatthew R. Ochs to = msecs_to_jiffies(5000); 407018d1dc9SMatthew R. Ochs to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, 408018d1dc9SMatthew R. Ochs !cfg->tmf_active, 409018d1dc9SMatthew R. Ochs cfg->tmf_slock, 410018d1dc9SMatthew R. Ochs to); 411018d1dc9SMatthew R. Ochs if (!to) { 412018d1dc9SMatthew R. Ochs cfg->tmf_active = false; 413fb67d44dSMatthew R. Ochs dev_err(dev, "%s: TMF timed out\n", __func__); 414018d1dc9SMatthew R. Ochs rc = -1; 415018d1dc9SMatthew R. Ochs } 416018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 417c21e0bbfSMatthew R. Ochs out: 418c21e0bbfSMatthew R. Ochs return rc; 419c21e0bbfSMatthew R. Ochs } 420c21e0bbfSMatthew R. Ochs 421c21e0bbfSMatthew R. Ochs /** 422c21e0bbfSMatthew R. Ochs * cxlflash_driver_info() - information handler for this host driver 423c21e0bbfSMatthew R. Ochs * @host: SCSI host associated with device. 424c21e0bbfSMatthew R. Ochs * 425c21e0bbfSMatthew R. Ochs * Return: A string describing the device. 426c21e0bbfSMatthew R. Ochs */ 427c21e0bbfSMatthew R. Ochs static const char *cxlflash_driver_info(struct Scsi_Host *host) 428c21e0bbfSMatthew R. Ochs { 429c21e0bbfSMatthew R. Ochs return CXLFLASH_ADAPTER_NAME; 430c21e0bbfSMatthew R. Ochs } 431c21e0bbfSMatthew R. Ochs 432c21e0bbfSMatthew R. Ochs /** 433c21e0bbfSMatthew R. Ochs * cxlflash_queuecommand() - sends a mid-layer request 434c21e0bbfSMatthew R. Ochs * @host: SCSI host associated with device. 435c21e0bbfSMatthew R. Ochs * @scp: SCSI command to send. 436c21e0bbfSMatthew R. Ochs * 4371284fb0cSMatthew R. Ochs * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 438c21e0bbfSMatthew R. Ochs */ 439c21e0bbfSMatthew R. Ochs static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 440c21e0bbfSMatthew R. Ochs { 441fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 442c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 4434392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 4445fbb96c8SMatthew R. Ochs struct afu_cmd *cmd = sc_to_afucz(scp); 4459d89326cSMatthew R. Ochs struct scatterlist *sg = scsi_sglist(scp); 4469d89326cSMatthew R. Ochs u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; 447c21e0bbfSMatthew R. Ochs ulong lock_flags; 4489d89326cSMatthew R. Ochs int nseg = 0; 449c21e0bbfSMatthew R. Ochs int rc = 0; 450c21e0bbfSMatthew R. Ochs 4514392ba49SMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 452fb67d44dSMatthew R. Ochs "cdb=(%08x-%08x-%08x-%08x)\n", 453c21e0bbfSMatthew R. Ochs __func__, scp, host->host_no, scp->device->channel, 454c21e0bbfSMatthew R. Ochs scp->device->id, scp->device->lun, 455c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 456c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 457c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 458c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 459c21e0bbfSMatthew R. Ochs 460018d1dc9SMatthew R. Ochs /* 461018d1dc9SMatthew R. Ochs * If a Task Management Function is active, wait for it to complete 462c21e0bbfSMatthew R. Ochs * before continuing with regular commands. 463c21e0bbfSMatthew R. Ochs */ 464018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 465c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) { 466018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 467c21e0bbfSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 468c21e0bbfSMatthew R. Ochs goto out; 469c21e0bbfSMatthew R. Ochs } 470018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 471c21e0bbfSMatthew R. Ochs 4725cdac81aSMatthew R. Ochs switch (cfg->state) { 473439e85c1SMatthew R. Ochs case STATE_RESET: 474fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); 4755cdac81aSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 4765cdac81aSMatthew R. Ochs goto out; 4775cdac81aSMatthew R. Ochs case STATE_FAILTERM: 478fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); 4795cdac81aSMatthew R. Ochs scp->result = (DID_NO_CONNECT << 16); 4805cdac81aSMatthew R. Ochs scp->scsi_done(scp); 4815cdac81aSMatthew R. Ochs rc = 0; 4825cdac81aSMatthew R. Ochs goto out; 4835cdac81aSMatthew R. Ochs default: 4845cdac81aSMatthew R. Ochs break; 4855cdac81aSMatthew R. Ochs } 4865cdac81aSMatthew R. Ochs 4879d89326cSMatthew R. Ochs if (likely(sg)) { 4889d89326cSMatthew R. Ochs nseg = scsi_dma_map(scp); 4899d89326cSMatthew R. Ochs if (unlikely(nseg < 0)) { 490fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Fail DMA map\n", __func__); 4919d89326cSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 4929d89326cSMatthew R. Ochs goto out; 4939d89326cSMatthew R. Ochs } 4949d89326cSMatthew R. Ochs 4959d89326cSMatthew R. Ochs cmd->rcb.data_len = sg_dma_len(sg); 4969d89326cSMatthew R. Ochs cmd->rcb.data_ea = sg_dma_address(sg); 4979d89326cSMatthew R. Ochs } 4989d89326cSMatthew R. Ochs 499fe7f9698SMatthew R. Ochs cmd->scp = scp; 5009d89326cSMatthew R. Ochs cmd->parent = afu; 5019d89326cSMatthew R. Ochs 502c21e0bbfSMatthew R. Ochs cmd->rcb.ctx_id = afu->ctx_hndl; 5035fbb96c8SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 5048fa4f177SMatthew R. Ochs cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); 505c21e0bbfSMatthew R. Ochs cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 506c21e0bbfSMatthew R. Ochs 507c21e0bbfSMatthew R. Ochs if (scp->sc_data_direction == DMA_TO_DEVICE) 5089d89326cSMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 509c21e0bbfSMatthew R. Ochs 5109d89326cSMatthew R. Ochs cmd->rcb.req_flags = req_flags; 511c21e0bbfSMatthew R. Ochs memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 512c21e0bbfSMatthew R. Ochs 51348b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 5145fbb96c8SMatthew R. Ochs if (unlikely(rc)) 515c21e0bbfSMatthew R. Ochs scsi_dma_unmap(scp); 516c21e0bbfSMatthew R. Ochs out: 517c21e0bbfSMatthew R. Ochs return rc; 518c21e0bbfSMatthew R. Ochs } 519c21e0bbfSMatthew R. Ochs 520c21e0bbfSMatthew R. Ochs /** 521c21e0bbfSMatthew R. Ochs * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe 5221284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 523c21e0bbfSMatthew R. Ochs */ 524c21e0bbfSMatthew R. Ochs static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) 525c21e0bbfSMatthew R. Ochs { 526c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 527c21e0bbfSMatthew R. Ochs 528c21e0bbfSMatthew R. Ochs if (pci_channel_offline(pdev)) 529439e85c1SMatthew R. Ochs wait_event_timeout(cfg->reset_waitq, 530c21e0bbfSMatthew R. Ochs !pci_channel_offline(pdev), 531c21e0bbfSMatthew R. Ochs CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); 532c21e0bbfSMatthew R. Ochs } 533c21e0bbfSMatthew R. Ochs 534c21e0bbfSMatthew R. Ochs /** 535c21e0bbfSMatthew R. Ochs * free_mem() - free memory associated with the AFU 5361284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 537c21e0bbfSMatthew R. Ochs */ 538c21e0bbfSMatthew R. Ochs static void free_mem(struct cxlflash_cfg *cfg) 539c21e0bbfSMatthew R. Ochs { 540c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 541c21e0bbfSMatthew R. Ochs 542c21e0bbfSMatthew R. Ochs if (cfg->afu) { 543c21e0bbfSMatthew R. Ochs free_pages((ulong)afu, get_order(sizeof(struct afu))); 544c21e0bbfSMatthew R. Ochs cfg->afu = NULL; 545c21e0bbfSMatthew R. Ochs } 546c21e0bbfSMatthew R. Ochs } 547c21e0bbfSMatthew R. Ochs 548c21e0bbfSMatthew R. Ochs /** 549c21e0bbfSMatthew R. Ochs * stop_afu() - stops the AFU command timers and unmaps the MMIO space 5501284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 551c21e0bbfSMatthew R. Ochs * 552c21e0bbfSMatthew R. Ochs * Safe to call with AFU in a partially allocated/initialized state. 553ee91e332SManoj Kumar * 5540df5bef7SUma Krishnan * Cancels scheduled worker threads, waits for any active internal AFU 555cba06e6dSMatthew R. Ochs * commands to timeout, disables IRQ polling and then unmaps the MMIO space. 556c21e0bbfSMatthew R. Ochs */ 557c21e0bbfSMatthew R. Ochs static void stop_afu(struct cxlflash_cfg *cfg) 558c21e0bbfSMatthew R. Ochs { 559c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 560c21e0bbfSMatthew R. Ochs 5610df5bef7SUma Krishnan cancel_work_sync(&cfg->work_q); 5620df5bef7SUma Krishnan 563c21e0bbfSMatthew R. Ochs if (likely(afu)) { 564de01283bSMatthew R. Ochs while (atomic_read(&afu->cmds_active)) 565de01283bSMatthew R. Ochs ssleep(1); 566cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) 567cba06e6dSMatthew R. Ochs irq_poll_disable(&afu->irqpoll); 568c21e0bbfSMatthew R. Ochs if (likely(afu->afu_map)) { 5691786f4a0SMatthew R. Ochs cxl_psa_unmap((void __iomem *)afu->afu_map); 570c21e0bbfSMatthew R. Ochs afu->afu_map = NULL; 571c21e0bbfSMatthew R. Ochs } 572c21e0bbfSMatthew R. Ochs } 573c21e0bbfSMatthew R. Ochs } 574c21e0bbfSMatthew R. Ochs 575c21e0bbfSMatthew R. Ochs /** 5769526f360SManoj N. Kumar * term_intr() - disables all AFU interrupts 5771284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 578c21e0bbfSMatthew R. Ochs * @level: Depth of allocation, where to begin waterfall tear down. 579c21e0bbfSMatthew R. Ochs * 580c21e0bbfSMatthew R. Ochs * Safe to call with AFU/MC in partially allocated/initialized state. 581c21e0bbfSMatthew R. Ochs */ 5829526f360SManoj N. Kumar static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) 583c21e0bbfSMatthew R. Ochs { 584c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 5854392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 586c21e0bbfSMatthew R. Ochs 587c21e0bbfSMatthew R. Ochs if (!afu || !cfg->mcctx) { 5889526f360SManoj N. Kumar dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); 589c21e0bbfSMatthew R. Ochs return; 590c21e0bbfSMatthew R. Ochs } 591c21e0bbfSMatthew R. Ochs 592c21e0bbfSMatthew R. Ochs switch (level) { 593c21e0bbfSMatthew R. Ochs case UNMAP_THREE: 594c21e0bbfSMatthew R. Ochs cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 595c21e0bbfSMatthew R. Ochs case UNMAP_TWO: 596c21e0bbfSMatthew R. Ochs cxl_unmap_afu_irq(cfg->mcctx, 2, afu); 597c21e0bbfSMatthew R. Ochs case UNMAP_ONE: 598c21e0bbfSMatthew R. Ochs cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 599c21e0bbfSMatthew R. Ochs case FREE_IRQ: 600c21e0bbfSMatthew R. Ochs cxl_free_afu_irqs(cfg->mcctx); 6019526f360SManoj N. Kumar /* fall through */ 6029526f360SManoj N. Kumar case UNDO_NOOP: 6039526f360SManoj N. Kumar /* No action required */ 6049526f360SManoj N. Kumar break; 605c21e0bbfSMatthew R. Ochs } 606c21e0bbfSMatthew R. Ochs } 607c21e0bbfSMatthew R. Ochs 608c21e0bbfSMatthew R. Ochs /** 6099526f360SManoj N. Kumar * term_mc() - terminates the master context 6109526f360SManoj N. Kumar * @cfg: Internal structure associated with the host. 6119526f360SManoj N. Kumar * @level: Depth of allocation, where to begin waterfall tear down. 6129526f360SManoj N. Kumar * 6139526f360SManoj N. Kumar * Safe to call with AFU/MC in partially allocated/initialized state. 6149526f360SManoj N. Kumar */ 6159526f360SManoj N. Kumar static void term_mc(struct cxlflash_cfg *cfg) 6169526f360SManoj N. Kumar { 6179526f360SManoj N. Kumar int rc = 0; 6189526f360SManoj N. Kumar struct afu *afu = cfg->afu; 6199526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 6209526f360SManoj N. Kumar 6219526f360SManoj N. Kumar if (!afu || !cfg->mcctx) { 6229526f360SManoj N. Kumar dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); 6239526f360SManoj N. Kumar return; 6249526f360SManoj N. Kumar } 6259526f360SManoj N. Kumar 6269526f360SManoj N. Kumar rc = cxl_stop_context(cfg->mcctx); 6279526f360SManoj N. Kumar WARN_ON(rc); 6289526f360SManoj N. Kumar cfg->mcctx = NULL; 6299526f360SManoj N. Kumar } 6309526f360SManoj N. Kumar 6319526f360SManoj N. Kumar /** 632c21e0bbfSMatthew R. Ochs * term_afu() - terminates the AFU 6331284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 634c21e0bbfSMatthew R. Ochs * 635c21e0bbfSMatthew R. Ochs * Safe to call with AFU/MC in partially allocated/initialized state. 636c21e0bbfSMatthew R. Ochs */ 637c21e0bbfSMatthew R. Ochs static void term_afu(struct cxlflash_cfg *cfg) 638c21e0bbfSMatthew R. Ochs { 639fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 640fb67d44dSMatthew R. Ochs 6419526f360SManoj N. Kumar /* 6429526f360SManoj N. Kumar * Tear down is carefully orchestrated to ensure 6439526f360SManoj N. Kumar * no interrupts can come in when the problem state 6449526f360SManoj N. Kumar * area is unmapped. 6459526f360SManoj N. Kumar * 6469526f360SManoj N. Kumar * 1) Disable all AFU interrupts 6479526f360SManoj N. Kumar * 2) Unmap the problem state area 6489526f360SManoj N. Kumar * 3) Stop the master context 6499526f360SManoj N. Kumar */ 6509526f360SManoj N. Kumar term_intr(cfg, UNMAP_THREE); 651c21e0bbfSMatthew R. Ochs if (cfg->afu) 652c21e0bbfSMatthew R. Ochs stop_afu(cfg); 653c21e0bbfSMatthew R. Ochs 6549526f360SManoj N. Kumar term_mc(cfg); 6556ded8b3cSUma Krishnan 656fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__); 657c21e0bbfSMatthew R. Ochs } 658c21e0bbfSMatthew R. Ochs 659c21e0bbfSMatthew R. Ochs /** 660704c4b0dSUma Krishnan * notify_shutdown() - notifies device of pending shutdown 661704c4b0dSUma Krishnan * @cfg: Internal structure associated with the host. 662704c4b0dSUma Krishnan * @wait: Whether to wait for shutdown processing to complete. 663704c4b0dSUma Krishnan * 664704c4b0dSUma Krishnan * This function will notify the AFU that the adapter is being shutdown 665704c4b0dSUma Krishnan * and will wait for shutdown processing to complete if wait is true. 666704c4b0dSUma Krishnan * This notification should flush pending I/Os to the device and halt 667704c4b0dSUma Krishnan * further I/Os until the next AFU reset is issued and device restarted. 668704c4b0dSUma Krishnan */ 669704c4b0dSUma Krishnan static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) 670704c4b0dSUma Krishnan { 671704c4b0dSUma Krishnan struct afu *afu = cfg->afu; 672704c4b0dSUma Krishnan struct device *dev = &cfg->dev->dev; 673704c4b0dSUma Krishnan struct dev_dependent_vals *ddv; 6740aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 675704c4b0dSUma Krishnan u64 reg, status; 676704c4b0dSUma Krishnan int i, retry_cnt = 0; 677704c4b0dSUma Krishnan 678704c4b0dSUma Krishnan ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; 679704c4b0dSUma Krishnan if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) 680704c4b0dSUma Krishnan return; 681704c4b0dSUma Krishnan 6821bd2b282SUma Krishnan if (!afu || !afu->afu_map) { 683fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); 6841bd2b282SUma Krishnan return; 6851bd2b282SUma Krishnan } 6861bd2b282SUma Krishnan 687704c4b0dSUma Krishnan /* Notify AFU */ 68878ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 6890aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 6900aa14887SMatthew R. Ochs 6910aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); 692704c4b0dSUma Krishnan reg |= SISL_FC_SHUTDOWN_NORMAL; 6930aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); 694704c4b0dSUma Krishnan } 695704c4b0dSUma Krishnan 696704c4b0dSUma Krishnan if (!wait) 697704c4b0dSUma Krishnan return; 698704c4b0dSUma Krishnan 699704c4b0dSUma Krishnan /* Wait up to 1.5 seconds for shutdown processing to complete */ 70078ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 7010aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 702704c4b0dSUma Krishnan retry_cnt = 0; 7030aa14887SMatthew R. Ochs 704704c4b0dSUma Krishnan while (true) { 7050aa14887SMatthew R. Ochs status = readq_be(&fc_port_regs[FC_STATUS / 8]); 706704c4b0dSUma Krishnan if (status & SISL_STATUS_SHUTDOWN_COMPLETE) 707704c4b0dSUma Krishnan break; 708704c4b0dSUma Krishnan if (++retry_cnt >= MC_RETRY_CNT) { 709704c4b0dSUma Krishnan dev_dbg(dev, "%s: port %d shutdown processing " 710704c4b0dSUma Krishnan "not yet completed\n", __func__, i); 711704c4b0dSUma Krishnan break; 712704c4b0dSUma Krishnan } 713704c4b0dSUma Krishnan msleep(100 * retry_cnt); 714704c4b0dSUma Krishnan } 715704c4b0dSUma Krishnan } 716704c4b0dSUma Krishnan } 717704c4b0dSUma Krishnan 718704c4b0dSUma Krishnan /** 719c21e0bbfSMatthew R. Ochs * cxlflash_remove() - PCI entry point to tear down host 720c21e0bbfSMatthew R. Ochs * @pdev: PCI device associated with the host. 721c21e0bbfSMatthew R. Ochs * 722c21e0bbfSMatthew R. Ochs * Safe to use as a cleanup in partially allocated/initialized state. 723c21e0bbfSMatthew R. Ochs */ 724c21e0bbfSMatthew R. Ochs static void cxlflash_remove(struct pci_dev *pdev) 725c21e0bbfSMatthew R. Ochs { 726c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 727fb67d44dSMatthew R. Ochs struct device *dev = &pdev->dev; 728c21e0bbfSMatthew R. Ochs ulong lock_flags; 729c21e0bbfSMatthew R. Ochs 730babf985dSUma Krishnan if (!pci_is_enabled(pdev)) { 731fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Device is disabled\n", __func__); 732babf985dSUma Krishnan return; 733babf985dSUma Krishnan } 734babf985dSUma Krishnan 735c21e0bbfSMatthew R. Ochs /* If a Task Management Function is active, wait for it to complete 736c21e0bbfSMatthew R. Ochs * before continuing with remove. 737c21e0bbfSMatthew R. Ochs */ 738018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 739c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) 740018d1dc9SMatthew R. Ochs wait_event_interruptible_lock_irq(cfg->tmf_waitq, 741018d1dc9SMatthew R. Ochs !cfg->tmf_active, 742018d1dc9SMatthew R. Ochs cfg->tmf_slock); 743018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 744c21e0bbfSMatthew R. Ochs 745704c4b0dSUma Krishnan /* Notify AFU and wait for shutdown processing to complete */ 746704c4b0dSUma Krishnan notify_shutdown(cfg, true); 747704c4b0dSUma Krishnan 7485cdac81aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 74965be2c79SMatthew R. Ochs cxlflash_stop_term_user_contexts(cfg); 7505cdac81aSMatthew R. Ochs 751c21e0bbfSMatthew R. Ochs switch (cfg->init_state) { 752c21e0bbfSMatthew R. Ochs case INIT_STATE_SCSI: 75365be2c79SMatthew R. Ochs cxlflash_term_local_luns(cfg); 754c21e0bbfSMatthew R. Ochs scsi_remove_host(cfg->host); 755f15fbf8dSMatthew R. Ochs /* fall through */ 756c21e0bbfSMatthew R. Ochs case INIT_STATE_AFU: 757b45cdbafSManoj Kumar term_afu(cfg); 758c21e0bbfSMatthew R. Ochs case INIT_STATE_PCI: 759c21e0bbfSMatthew R. Ochs pci_disable_device(pdev); 760c21e0bbfSMatthew R. Ochs case INIT_STATE_NONE: 761c21e0bbfSMatthew R. Ochs free_mem(cfg); 7628b5b1e87SMatthew R. Ochs scsi_host_put(cfg->host); 763c21e0bbfSMatthew R. Ochs break; 764c21e0bbfSMatthew R. Ochs } 765c21e0bbfSMatthew R. Ochs 766fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__); 767c21e0bbfSMatthew R. Ochs } 768c21e0bbfSMatthew R. Ochs 769c21e0bbfSMatthew R. Ochs /** 770c21e0bbfSMatthew R. Ochs * alloc_mem() - allocates the AFU and its command pool 7711284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 772c21e0bbfSMatthew R. Ochs * 773c21e0bbfSMatthew R. Ochs * A partially allocated state remains on failure. 774c21e0bbfSMatthew R. Ochs * 775c21e0bbfSMatthew R. Ochs * Return: 776c21e0bbfSMatthew R. Ochs * 0 on success 777c21e0bbfSMatthew R. Ochs * -ENOMEM on failure to allocate memory 778c21e0bbfSMatthew R. Ochs */ 779c21e0bbfSMatthew R. Ochs static int alloc_mem(struct cxlflash_cfg *cfg) 780c21e0bbfSMatthew R. Ochs { 781c21e0bbfSMatthew R. Ochs int rc = 0; 7824392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 783c21e0bbfSMatthew R. Ochs 784696d0b0cSMatthew R. Ochs /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ 785c21e0bbfSMatthew R. Ochs cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 786c21e0bbfSMatthew R. Ochs get_order(sizeof(struct afu))); 787c21e0bbfSMatthew R. Ochs if (unlikely(!cfg->afu)) { 7884392ba49SMatthew R. Ochs dev_err(dev, "%s: cannot get %d free pages\n", 789c21e0bbfSMatthew R. Ochs __func__, get_order(sizeof(struct afu))); 790c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 791c21e0bbfSMatthew R. Ochs goto out; 792c21e0bbfSMatthew R. Ochs } 793c21e0bbfSMatthew R. Ochs cfg->afu->parent = cfg; 794c21e0bbfSMatthew R. Ochs cfg->afu->afu_map = NULL; 795c21e0bbfSMatthew R. Ochs out: 796c21e0bbfSMatthew R. Ochs return rc; 797c21e0bbfSMatthew R. Ochs } 798c21e0bbfSMatthew R. Ochs 799c21e0bbfSMatthew R. Ochs /** 800c21e0bbfSMatthew R. Ochs * init_pci() - initializes the host as a PCI device 8011284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 802c21e0bbfSMatthew R. Ochs * 8031284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 804c21e0bbfSMatthew R. Ochs */ 805c21e0bbfSMatthew R. Ochs static int init_pci(struct cxlflash_cfg *cfg) 806c21e0bbfSMatthew R. Ochs { 807c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 808fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 809c21e0bbfSMatthew R. Ochs int rc = 0; 810c21e0bbfSMatthew R. Ochs 811c21e0bbfSMatthew R. Ochs rc = pci_enable_device(pdev); 812c21e0bbfSMatthew R. Ochs if (rc || pci_channel_offline(pdev)) { 813c21e0bbfSMatthew R. Ochs if (pci_channel_offline(pdev)) { 814c21e0bbfSMatthew R. Ochs cxlflash_wait_for_pci_err_recovery(cfg); 815c21e0bbfSMatthew R. Ochs rc = pci_enable_device(pdev); 816c21e0bbfSMatthew R. Ochs } 817c21e0bbfSMatthew R. Ochs 818c21e0bbfSMatthew R. Ochs if (rc) { 819fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Cannot enable adapter\n", __func__); 820c21e0bbfSMatthew R. Ochs cxlflash_wait_for_pci_err_recovery(cfg); 821961487e4SManoj N. Kumar goto out; 822c21e0bbfSMatthew R. Ochs } 823c21e0bbfSMatthew R. Ochs } 824c21e0bbfSMatthew R. Ochs 825c21e0bbfSMatthew R. Ochs out: 826fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 827c21e0bbfSMatthew R. Ochs return rc; 828c21e0bbfSMatthew R. Ochs } 829c21e0bbfSMatthew R. Ochs 830c21e0bbfSMatthew R. Ochs /** 831c21e0bbfSMatthew R. Ochs * init_scsi() - adds the host to the SCSI stack and kicks off host scan 8321284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 833c21e0bbfSMatthew R. Ochs * 8341284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 835c21e0bbfSMatthew R. Ochs */ 836c21e0bbfSMatthew R. Ochs static int init_scsi(struct cxlflash_cfg *cfg) 837c21e0bbfSMatthew R. Ochs { 838c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 839fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 840c21e0bbfSMatthew R. Ochs int rc = 0; 841c21e0bbfSMatthew R. Ochs 842c21e0bbfSMatthew R. Ochs rc = scsi_add_host(cfg->host, &pdev->dev); 843c21e0bbfSMatthew R. Ochs if (rc) { 844fb67d44dSMatthew R. Ochs dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); 845c21e0bbfSMatthew R. Ochs goto out; 846c21e0bbfSMatthew R. Ochs } 847c21e0bbfSMatthew R. Ochs 848c21e0bbfSMatthew R. Ochs scsi_scan_host(cfg->host); 849c21e0bbfSMatthew R. Ochs 850c21e0bbfSMatthew R. Ochs out: 851fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 852c21e0bbfSMatthew R. Ochs return rc; 853c21e0bbfSMatthew R. Ochs } 854c21e0bbfSMatthew R. Ochs 855c21e0bbfSMatthew R. Ochs /** 856c21e0bbfSMatthew R. Ochs * set_port_online() - transitions the specified host FC port to online state 857c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 858c21e0bbfSMatthew R. Ochs * 859c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. Online state means 860c21e0bbfSMatthew R. Ochs * that the FC link layer has synced, completed the handshaking process, and 861c21e0bbfSMatthew R. Ochs * is ready for login to start. 862c21e0bbfSMatthew R. Ochs */ 8631786f4a0SMatthew R. Ochs static void set_port_online(__be64 __iomem *fc_regs) 864c21e0bbfSMatthew R. Ochs { 865c21e0bbfSMatthew R. Ochs u64 cmdcfg; 866c21e0bbfSMatthew R. Ochs 867c21e0bbfSMatthew R. Ochs cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 868c21e0bbfSMatthew R. Ochs cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ 869c21e0bbfSMatthew R. Ochs cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ 870c21e0bbfSMatthew R. Ochs writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 871c21e0bbfSMatthew R. Ochs } 872c21e0bbfSMatthew R. Ochs 873c21e0bbfSMatthew R. Ochs /** 874c21e0bbfSMatthew R. Ochs * set_port_offline() - transitions the specified host FC port to offline state 875c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 876c21e0bbfSMatthew R. Ochs * 877c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. 878c21e0bbfSMatthew R. Ochs */ 8791786f4a0SMatthew R. Ochs static void set_port_offline(__be64 __iomem *fc_regs) 880c21e0bbfSMatthew R. Ochs { 881c21e0bbfSMatthew R. Ochs u64 cmdcfg; 882c21e0bbfSMatthew R. Ochs 883c21e0bbfSMatthew R. Ochs cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 884c21e0bbfSMatthew R. Ochs cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ 885c21e0bbfSMatthew R. Ochs cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ 886c21e0bbfSMatthew R. Ochs writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 887c21e0bbfSMatthew R. Ochs } 888c21e0bbfSMatthew R. Ochs 889c21e0bbfSMatthew R. Ochs /** 890c21e0bbfSMatthew R. Ochs * wait_port_online() - waits for the specified host FC port come online 891c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 892c21e0bbfSMatthew R. Ochs * @delay_us: Number of microseconds to delay between reading port status. 893c21e0bbfSMatthew R. Ochs * @nretry: Number of cycles to retry reading port status. 894c21e0bbfSMatthew R. Ochs * 895c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. This will timeout 896c21e0bbfSMatthew R. Ochs * when the cable is not plugged in. 897c21e0bbfSMatthew R. Ochs * 898c21e0bbfSMatthew R. Ochs * Return: 899c21e0bbfSMatthew R. Ochs * TRUE (1) when the specified port is online 900c21e0bbfSMatthew R. Ochs * FALSE (0) when the specified port fails to come online after timeout 901c21e0bbfSMatthew R. Ochs */ 902fb67d44dSMatthew R. Ochs static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 903c21e0bbfSMatthew R. Ochs { 904c21e0bbfSMatthew R. Ochs u64 status; 905c21e0bbfSMatthew R. Ochs 906fb67d44dSMatthew R. Ochs WARN_ON(delay_us < 1000); 907c21e0bbfSMatthew R. Ochs 908c21e0bbfSMatthew R. Ochs do { 909c21e0bbfSMatthew R. Ochs msleep(delay_us / 1000); 910c21e0bbfSMatthew R. Ochs status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 91105dab432SMatthew R. Ochs if (status == U64_MAX) 91205dab432SMatthew R. Ochs nretry /= 2; 913c21e0bbfSMatthew R. Ochs } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 914c21e0bbfSMatthew R. Ochs nretry--); 915c21e0bbfSMatthew R. Ochs 916c21e0bbfSMatthew R. Ochs return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); 917c21e0bbfSMatthew R. Ochs } 918c21e0bbfSMatthew R. Ochs 919c21e0bbfSMatthew R. Ochs /** 920c21e0bbfSMatthew R. Ochs * wait_port_offline() - waits for the specified host FC port go offline 921c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 922c21e0bbfSMatthew R. Ochs * @delay_us: Number of microseconds to delay between reading port status. 923c21e0bbfSMatthew R. Ochs * @nretry: Number of cycles to retry reading port status. 924c21e0bbfSMatthew R. Ochs * 925c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. 926c21e0bbfSMatthew R. Ochs * 927c21e0bbfSMatthew R. Ochs * Return: 928c21e0bbfSMatthew R. Ochs * TRUE (1) when the specified port is offline 929c21e0bbfSMatthew R. Ochs * FALSE (0) when the specified port fails to go offline after timeout 930c21e0bbfSMatthew R. Ochs */ 931fb67d44dSMatthew R. Ochs static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 932c21e0bbfSMatthew R. Ochs { 933c21e0bbfSMatthew R. Ochs u64 status; 934c21e0bbfSMatthew R. Ochs 935fb67d44dSMatthew R. Ochs WARN_ON(delay_us < 1000); 936c21e0bbfSMatthew R. Ochs 937c21e0bbfSMatthew R. Ochs do { 938c21e0bbfSMatthew R. Ochs msleep(delay_us / 1000); 939c21e0bbfSMatthew R. Ochs status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 94005dab432SMatthew R. Ochs if (status == U64_MAX) 94105dab432SMatthew R. Ochs nretry /= 2; 942c21e0bbfSMatthew R. Ochs } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 943c21e0bbfSMatthew R. Ochs nretry--); 944c21e0bbfSMatthew R. Ochs 945c21e0bbfSMatthew R. Ochs return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); 946c21e0bbfSMatthew R. Ochs } 947c21e0bbfSMatthew R. Ochs 948c21e0bbfSMatthew R. Ochs /** 949c21e0bbfSMatthew R. Ochs * afu_set_wwpn() - configures the WWPN for the specified host FC port 950c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host that owns the specified FC port. 951c21e0bbfSMatthew R. Ochs * @port: Port number being configured. 952c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 953c21e0bbfSMatthew R. Ochs * @wwpn: The world-wide-port-number previously discovered for port. 954c21e0bbfSMatthew R. Ochs * 955c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. As part of the 956c21e0bbfSMatthew R. Ochs * sequence to configure the WWPN, the port is toggled offline and then back 957c21e0bbfSMatthew R. Ochs * online. This toggling action can cause this routine to delay up to a few 958c21e0bbfSMatthew R. Ochs * seconds. When configured to use the internal LUN feature of the AFU, a 959c21e0bbfSMatthew R. Ochs * failure to come online is overridden. 960c21e0bbfSMatthew R. Ochs */ 961f8013261SMatthew R. Ochs static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 9621786f4a0SMatthew R. Ochs u64 wwpn) 963c21e0bbfSMatthew R. Ochs { 964fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 965fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 966fb67d44dSMatthew R. Ochs 967c21e0bbfSMatthew R. Ochs set_port_offline(fc_regs); 968c21e0bbfSMatthew R. Ochs if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 969c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) { 970fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", 971c21e0bbfSMatthew R. Ochs __func__, port); 972c21e0bbfSMatthew R. Ochs } 973c21e0bbfSMatthew R. Ochs 974c21e0bbfSMatthew R. Ochs writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 975c21e0bbfSMatthew R. Ochs 976c21e0bbfSMatthew R. Ochs set_port_online(fc_regs); 977c21e0bbfSMatthew R. Ochs if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 978c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) { 979fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: wait on port %d to go online timed out\n", 980c21e0bbfSMatthew R. Ochs __func__, port); 981c21e0bbfSMatthew R. Ochs } 982c21e0bbfSMatthew R. Ochs } 983c21e0bbfSMatthew R. Ochs 984c21e0bbfSMatthew R. Ochs /** 985c21e0bbfSMatthew R. Ochs * afu_link_reset() - resets the specified host FC port 986c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host that owns the specified FC port. 987c21e0bbfSMatthew R. Ochs * @port: Port number being configured. 988c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 989c21e0bbfSMatthew R. Ochs * 990c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. The sequence to 991c21e0bbfSMatthew R. Ochs * reset the port involves toggling it offline and then back online. This 992c21e0bbfSMatthew R. Ochs * action can cause this routine to delay up to a few seconds. An effort 993c21e0bbfSMatthew R. Ochs * is made to maintain link with the device by switching to host to use 994c21e0bbfSMatthew R. Ochs * the alternate port exclusively while the reset takes place. 995c21e0bbfSMatthew R. Ochs * failure to come online is overridden. 996c21e0bbfSMatthew R. Ochs */ 9971786f4a0SMatthew R. Ochs static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 998c21e0bbfSMatthew R. Ochs { 999fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 1000fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1001c21e0bbfSMatthew R. Ochs u64 port_sel; 1002c21e0bbfSMatthew R. Ochs 1003c21e0bbfSMatthew R. Ochs /* first switch the AFU to the other links, if any */ 1004c21e0bbfSMatthew R. Ochs port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); 10054da74db0SDan Carpenter port_sel &= ~(1ULL << port); 1006c21e0bbfSMatthew R. Ochs writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1007c21e0bbfSMatthew R. Ochs cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1008c21e0bbfSMatthew R. Ochs 1009c21e0bbfSMatthew R. Ochs set_port_offline(fc_regs); 1010c21e0bbfSMatthew R. Ochs if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1011c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) 1012fb67d44dSMatthew R. Ochs dev_err(dev, "%s: wait on port %d to go offline timed out\n", 1013c21e0bbfSMatthew R. Ochs __func__, port); 1014c21e0bbfSMatthew R. Ochs 1015c21e0bbfSMatthew R. Ochs set_port_online(fc_regs); 1016c21e0bbfSMatthew R. Ochs if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1017c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) 1018fb67d44dSMatthew R. Ochs dev_err(dev, "%s: wait on port %d to go online timed out\n", 1019c21e0bbfSMatthew R. Ochs __func__, port); 1020c21e0bbfSMatthew R. Ochs 1021c21e0bbfSMatthew R. Ochs /* switch back to include this port */ 10224da74db0SDan Carpenter port_sel |= (1ULL << port); 1023c21e0bbfSMatthew R. Ochs writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1024c21e0bbfSMatthew R. Ochs cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1025c21e0bbfSMatthew R. Ochs 1026fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); 1027c21e0bbfSMatthew R. Ochs } 1028c21e0bbfSMatthew R. Ochs 1029c21e0bbfSMatthew R. Ochs /* 1030c21e0bbfSMatthew R. Ochs * Asynchronous interrupt information table 1031c21e0bbfSMatthew R. Ochs */ 1032c21e0bbfSMatthew R. Ochs static const struct asyc_intr_info ainfo[] = { 1033c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, 1034c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, 1035c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, 1036e6e6df3fSManoj Kumar {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, 1037c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, 1038ef51074aSMatthew R. Ochs {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, 1039c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, 1040bbbfae96SUma Krishnan {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0}, 1041c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, 1042c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, 1043c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, 1044a9be294eSManoj Kumar {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, 1045c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, 1046ef51074aSMatthew R. Ochs {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, 1047c21e0bbfSMatthew R. Ochs {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, 1048bbbfae96SUma Krishnan {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0}, 1049c21e0bbfSMatthew R. Ochs {0x0, "", 0, 0} /* terminator */ 1050c21e0bbfSMatthew R. Ochs }; 1051c21e0bbfSMatthew R. Ochs 1052c21e0bbfSMatthew R. Ochs /** 1053c21e0bbfSMatthew R. Ochs * find_ainfo() - locates and returns asynchronous interrupt information 1054c21e0bbfSMatthew R. Ochs * @status: Status code set by AFU on error. 1055c21e0bbfSMatthew R. Ochs * 1056c21e0bbfSMatthew R. Ochs * Return: The located information or NULL when the status code is invalid. 1057c21e0bbfSMatthew R. Ochs */ 1058c21e0bbfSMatthew R. Ochs static const struct asyc_intr_info *find_ainfo(u64 status) 1059c21e0bbfSMatthew R. Ochs { 1060c21e0bbfSMatthew R. Ochs const struct asyc_intr_info *info; 1061c21e0bbfSMatthew R. Ochs 1062c21e0bbfSMatthew R. Ochs for (info = &ainfo[0]; info->status; info++) 1063c21e0bbfSMatthew R. Ochs if (info->status == status) 1064c21e0bbfSMatthew R. Ochs return info; 1065c21e0bbfSMatthew R. Ochs 1066c21e0bbfSMatthew R. Ochs return NULL; 1067c21e0bbfSMatthew R. Ochs } 1068c21e0bbfSMatthew R. Ochs 1069c21e0bbfSMatthew R. Ochs /** 1070c21e0bbfSMatthew R. Ochs * afu_err_intr_init() - clears and initializes the AFU for error interrupts 1071c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host. 1072c21e0bbfSMatthew R. Ochs */ 1073c21e0bbfSMatthew R. Ochs static void afu_err_intr_init(struct afu *afu) 1074c21e0bbfSMatthew R. Ochs { 107578ae028eSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 10760aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 1077c21e0bbfSMatthew R. Ochs int i; 1078c21e0bbfSMatthew R. Ochs u64 reg; 1079c21e0bbfSMatthew R. Ochs 1080c21e0bbfSMatthew R. Ochs /* global async interrupts: AFU clears afu_ctrl on context exit 1081c21e0bbfSMatthew R. Ochs * if async interrupts were sent to that context. This prevents 1082c21e0bbfSMatthew R. Ochs * the AFU form sending further async interrupts when 1083c21e0bbfSMatthew R. Ochs * there is 1084c21e0bbfSMatthew R. Ochs * nobody to receive them. 1085c21e0bbfSMatthew R. Ochs */ 1086c21e0bbfSMatthew R. Ochs 1087c21e0bbfSMatthew R. Ochs /* mask all */ 1088c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); 1089c21e0bbfSMatthew R. Ochs /* set LISN# to send and point to master context */ 1090c21e0bbfSMatthew R. Ochs reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); 1091c21e0bbfSMatthew R. Ochs 1092c21e0bbfSMatthew R. Ochs if (afu->internal_lun) 1093c21e0bbfSMatthew R. Ochs reg |= 1; /* Bit 63 indicates local lun */ 1094c21e0bbfSMatthew R. Ochs writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); 1095c21e0bbfSMatthew R. Ochs /* clear all */ 1096c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1097c21e0bbfSMatthew R. Ochs /* unmask bits that are of interest */ 1098c21e0bbfSMatthew R. Ochs /* note: afu can send an interrupt after this step */ 1099c21e0bbfSMatthew R. Ochs writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); 1100c21e0bbfSMatthew R. Ochs /* clear again in case a bit came on after previous clear but before */ 1101c21e0bbfSMatthew R. Ochs /* unmask */ 1102c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1103c21e0bbfSMatthew R. Ochs 1104c21e0bbfSMatthew R. Ochs /* Clear/Set internal lun bits */ 11050aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, 0); 11060aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); 1107c21e0bbfSMatthew R. Ochs reg &= SISL_FC_INTERNAL_MASK; 1108c21e0bbfSMatthew R. Ochs if (afu->internal_lun) 1109c21e0bbfSMatthew R. Ochs reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); 11100aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); 1111c21e0bbfSMatthew R. Ochs 1112c21e0bbfSMatthew R. Ochs /* now clear FC errors */ 111378ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 11140aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 11150aa14887SMatthew R. Ochs 11160aa14887SMatthew R. Ochs writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); 11170aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); 1118c21e0bbfSMatthew R. Ochs } 1119c21e0bbfSMatthew R. Ochs 1120c21e0bbfSMatthew R. Ochs /* sync interrupts for master's IOARRIN write */ 1121c21e0bbfSMatthew R. Ochs /* note that unlike asyncs, there can be no pending sync interrupts */ 1122c21e0bbfSMatthew R. Ochs /* at this time (this is a fresh context and master has not written */ 1123c21e0bbfSMatthew R. Ochs /* IOARRIN yet), so there is nothing to clear. */ 1124c21e0bbfSMatthew R. Ochs 1125c21e0bbfSMatthew R. Ochs /* set LISN#, it is always sent to the context that wrote IOARRIN */ 1126c21e0bbfSMatthew R. Ochs writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); 1127c21e0bbfSMatthew R. Ochs writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); 1128c21e0bbfSMatthew R. Ochs } 1129c21e0bbfSMatthew R. Ochs 1130c21e0bbfSMatthew R. Ochs /** 1131c21e0bbfSMatthew R. Ochs * cxlflash_sync_err_irq() - interrupt handler for synchronous errors 1132c21e0bbfSMatthew R. Ochs * @irq: Interrupt number. 1133c21e0bbfSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 1134c21e0bbfSMatthew R. Ochs * 1135c21e0bbfSMatthew R. Ochs * Return: Always return IRQ_HANDLED. 1136c21e0bbfSMatthew R. Ochs */ 1137c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1138c21e0bbfSMatthew R. Ochs { 1139c21e0bbfSMatthew R. Ochs struct afu *afu = (struct afu *)data; 1140fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 1141fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1142c21e0bbfSMatthew R. Ochs u64 reg; 1143c21e0bbfSMatthew R. Ochs u64 reg_unmasked; 1144c21e0bbfSMatthew R. Ochs 1145c21e0bbfSMatthew R. Ochs reg = readq_be(&afu->host_map->intr_status); 1146c21e0bbfSMatthew R. Ochs reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1147c21e0bbfSMatthew R. Ochs 1148c21e0bbfSMatthew R. Ochs if (reg_unmasked == 0UL) { 1149fb67d44dSMatthew R. Ochs dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", 1150fb67d44dSMatthew R. Ochs __func__, reg); 1151c21e0bbfSMatthew R. Ochs goto cxlflash_sync_err_irq_exit; 1152c21e0bbfSMatthew R. Ochs } 1153c21e0bbfSMatthew R. Ochs 1154fb67d44dSMatthew R. Ochs dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", 1155fb67d44dSMatthew R. Ochs __func__, reg); 1156c21e0bbfSMatthew R. Ochs 1157c21e0bbfSMatthew R. Ochs writeq_be(reg_unmasked, &afu->host_map->intr_clear); 1158c21e0bbfSMatthew R. Ochs 1159c21e0bbfSMatthew R. Ochs cxlflash_sync_err_irq_exit: 1160c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1161c21e0bbfSMatthew R. Ochs } 1162c21e0bbfSMatthew R. Ochs 1163c21e0bbfSMatthew R. Ochs /** 116476a6ebbeSMatthew R. Ochs * process_hrrq() - process the read-response queue 116576a6ebbeSMatthew R. Ochs * @afu: AFU associated with the host. 1166f918b4a8SMatthew R. Ochs * @doneq: Queue of commands harvested from the RRQ. 1167cba06e6dSMatthew R. Ochs * @budget: Threshold of RRQ entries to process. 1168f918b4a8SMatthew R. Ochs * 1169f918b4a8SMatthew R. Ochs * This routine must be called holding the disabled RRQ spin lock. 1170c21e0bbfSMatthew R. Ochs * 117176a6ebbeSMatthew R. Ochs * Return: The number of entries processed. 1172c21e0bbfSMatthew R. Ochs */ 1173cba06e6dSMatthew R. Ochs static int process_hrrq(struct afu *afu, struct list_head *doneq, int budget) 1174c21e0bbfSMatthew R. Ochs { 1175c21e0bbfSMatthew R. Ochs struct afu_cmd *cmd; 1176696d0b0cSMatthew R. Ochs struct sisl_ioasa *ioasa; 1177696d0b0cSMatthew R. Ochs struct sisl_ioarcb *ioarcb; 1178c21e0bbfSMatthew R. Ochs bool toggle = afu->toggle; 117976a6ebbeSMatthew R. Ochs int num_hrrq = 0; 1180c21e0bbfSMatthew R. Ochs u64 entry, 1181c21e0bbfSMatthew R. Ochs *hrrq_start = afu->hrrq_start, 1182c21e0bbfSMatthew R. Ochs *hrrq_end = afu->hrrq_end, 1183c21e0bbfSMatthew R. Ochs *hrrq_curr = afu->hrrq_curr; 1184c21e0bbfSMatthew R. Ochs 1185cba06e6dSMatthew R. Ochs /* Process ready RRQ entries up to the specified budget (if any) */ 1186c21e0bbfSMatthew R. Ochs while (true) { 1187c21e0bbfSMatthew R. Ochs entry = *hrrq_curr; 1188c21e0bbfSMatthew R. Ochs 1189c21e0bbfSMatthew R. Ochs if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1190c21e0bbfSMatthew R. Ochs break; 1191c21e0bbfSMatthew R. Ochs 1192696d0b0cSMatthew R. Ochs entry &= ~SISL_RESP_HANDLE_T_BIT; 1193696d0b0cSMatthew R. Ochs 1194696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1195696d0b0cSMatthew R. Ochs ioasa = (struct sisl_ioasa *)entry; 1196696d0b0cSMatthew R. Ochs cmd = container_of(ioasa, struct afu_cmd, sa); 1197696d0b0cSMatthew R. Ochs } else { 1198696d0b0cSMatthew R. Ochs ioarcb = (struct sisl_ioarcb *)entry; 1199696d0b0cSMatthew R. Ochs cmd = container_of(ioarcb, struct afu_cmd, rcb); 1200696d0b0cSMatthew R. Ochs } 1201696d0b0cSMatthew R. Ochs 1202f918b4a8SMatthew R. Ochs list_add_tail(&cmd->queue, doneq); 1203c21e0bbfSMatthew R. Ochs 1204c21e0bbfSMatthew R. Ochs /* Advance to next entry or wrap and flip the toggle bit */ 1205c21e0bbfSMatthew R. Ochs if (hrrq_curr < hrrq_end) 1206c21e0bbfSMatthew R. Ochs hrrq_curr++; 1207c21e0bbfSMatthew R. Ochs else { 1208c21e0bbfSMatthew R. Ochs hrrq_curr = hrrq_start; 1209c21e0bbfSMatthew R. Ochs toggle ^= SISL_RESP_HANDLE_T_BIT; 1210c21e0bbfSMatthew R. Ochs } 1211696d0b0cSMatthew R. Ochs 1212696d0b0cSMatthew R. Ochs atomic_inc(&afu->hsq_credits); 121376a6ebbeSMatthew R. Ochs num_hrrq++; 1214cba06e6dSMatthew R. Ochs 1215cba06e6dSMatthew R. Ochs if (budget > 0 && num_hrrq >= budget) 1216cba06e6dSMatthew R. Ochs break; 1217c21e0bbfSMatthew R. Ochs } 1218c21e0bbfSMatthew R. Ochs 1219c21e0bbfSMatthew R. Ochs afu->hrrq_curr = hrrq_curr; 1220c21e0bbfSMatthew R. Ochs afu->toggle = toggle; 1221c21e0bbfSMatthew R. Ochs 122276a6ebbeSMatthew R. Ochs return num_hrrq; 122376a6ebbeSMatthew R. Ochs } 122476a6ebbeSMatthew R. Ochs 122576a6ebbeSMatthew R. Ochs /** 1226f918b4a8SMatthew R. Ochs * process_cmd_doneq() - process a queue of harvested RRQ commands 1227f918b4a8SMatthew R. Ochs * @doneq: Queue of completed commands. 1228f918b4a8SMatthew R. Ochs * 1229f918b4a8SMatthew R. Ochs * Note that upon return the queue can no longer be trusted. 1230f918b4a8SMatthew R. Ochs */ 1231f918b4a8SMatthew R. Ochs static void process_cmd_doneq(struct list_head *doneq) 1232f918b4a8SMatthew R. Ochs { 1233f918b4a8SMatthew R. Ochs struct afu_cmd *cmd, *tmp; 1234f918b4a8SMatthew R. Ochs 1235f918b4a8SMatthew R. Ochs WARN_ON(list_empty(doneq)); 1236f918b4a8SMatthew R. Ochs 1237f918b4a8SMatthew R. Ochs list_for_each_entry_safe(cmd, tmp, doneq, queue) 1238f918b4a8SMatthew R. Ochs cmd_complete(cmd); 1239f918b4a8SMatthew R. Ochs } 1240f918b4a8SMatthew R. Ochs 1241f918b4a8SMatthew R. Ochs /** 1242cba06e6dSMatthew R. Ochs * cxlflash_irqpoll() - process a queue of harvested RRQ commands 1243cba06e6dSMatthew R. Ochs * @irqpoll: IRQ poll structure associated with queue to poll. 1244cba06e6dSMatthew R. Ochs * @budget: Threshold of RRQ entries to process per poll. 1245cba06e6dSMatthew R. Ochs * 1246cba06e6dSMatthew R. Ochs * Return: The number of entries processed. 1247cba06e6dSMatthew R. Ochs */ 1248cba06e6dSMatthew R. Ochs static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) 1249cba06e6dSMatthew R. Ochs { 1250cba06e6dSMatthew R. Ochs struct afu *afu = container_of(irqpoll, struct afu, irqpoll); 1251cba06e6dSMatthew R. Ochs unsigned long hrrq_flags; 1252cba06e6dSMatthew R. Ochs LIST_HEAD(doneq); 1253cba06e6dSMatthew R. Ochs int num_entries = 0; 1254cba06e6dSMatthew R. Ochs 1255cba06e6dSMatthew R. Ochs spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags); 1256cba06e6dSMatthew R. Ochs 1257cba06e6dSMatthew R. Ochs num_entries = process_hrrq(afu, &doneq, budget); 1258cba06e6dSMatthew R. Ochs if (num_entries < budget) 1259cba06e6dSMatthew R. Ochs irq_poll_complete(irqpoll); 1260cba06e6dSMatthew R. Ochs 1261cba06e6dSMatthew R. Ochs spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags); 1262cba06e6dSMatthew R. Ochs 1263cba06e6dSMatthew R. Ochs process_cmd_doneq(&doneq); 1264cba06e6dSMatthew R. Ochs return num_entries; 1265cba06e6dSMatthew R. Ochs } 1266cba06e6dSMatthew R. Ochs 1267cba06e6dSMatthew R. Ochs /** 126876a6ebbeSMatthew R. Ochs * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) 126976a6ebbeSMatthew R. Ochs * @irq: Interrupt number. 127076a6ebbeSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 127176a6ebbeSMatthew R. Ochs * 1272f918b4a8SMatthew R. Ochs * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. 127376a6ebbeSMatthew R. Ochs */ 127476a6ebbeSMatthew R. Ochs static irqreturn_t cxlflash_rrq_irq(int irq, void *data) 127576a6ebbeSMatthew R. Ochs { 127676a6ebbeSMatthew R. Ochs struct afu *afu = (struct afu *)data; 1277f918b4a8SMatthew R. Ochs unsigned long hrrq_flags; 1278f918b4a8SMatthew R. Ochs LIST_HEAD(doneq); 1279f918b4a8SMatthew R. Ochs int num_entries = 0; 128076a6ebbeSMatthew R. Ochs 1281f918b4a8SMatthew R. Ochs spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags); 1282cba06e6dSMatthew R. Ochs 1283cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) { 1284cba06e6dSMatthew R. Ochs irq_poll_sched(&afu->irqpoll); 1285cba06e6dSMatthew R. Ochs spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags); 1286cba06e6dSMatthew R. Ochs return IRQ_HANDLED; 1287cba06e6dSMatthew R. Ochs } 1288cba06e6dSMatthew R. Ochs 1289cba06e6dSMatthew R. Ochs num_entries = process_hrrq(afu, &doneq, -1); 1290f918b4a8SMatthew R. Ochs spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags); 1291f918b4a8SMatthew R. Ochs 1292f918b4a8SMatthew R. Ochs if (num_entries == 0) 1293f918b4a8SMatthew R. Ochs return IRQ_NONE; 1294f918b4a8SMatthew R. Ochs 1295f918b4a8SMatthew R. Ochs process_cmd_doneq(&doneq); 1296c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1297c21e0bbfSMatthew R. Ochs } 1298c21e0bbfSMatthew R. Ochs 1299c21e0bbfSMatthew R. Ochs /** 1300c21e0bbfSMatthew R. Ochs * cxlflash_async_err_irq() - interrupt handler for asynchronous errors 1301c21e0bbfSMatthew R. Ochs * @irq: Interrupt number. 1302c21e0bbfSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 1303c21e0bbfSMatthew R. Ochs * 1304c21e0bbfSMatthew R. Ochs * Return: Always return IRQ_HANDLED. 1305c21e0bbfSMatthew R. Ochs */ 1306c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_async_err_irq(int irq, void *data) 1307c21e0bbfSMatthew R. Ochs { 1308c21e0bbfSMatthew R. Ochs struct afu *afu = (struct afu *)data; 13094392ba49SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 13104392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1311c21e0bbfSMatthew R. Ochs u64 reg_unmasked; 1312c21e0bbfSMatthew R. Ochs const struct asyc_intr_info *info; 13131786f4a0SMatthew R. Ochs struct sisl_global_map __iomem *global = &afu->afu_map->global; 13140aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 1315c21e0bbfSMatthew R. Ochs u64 reg; 1316c21e0bbfSMatthew R. Ochs u8 port; 1317c21e0bbfSMatthew R. Ochs int i; 1318c21e0bbfSMatthew R. Ochs 1319c21e0bbfSMatthew R. Ochs reg = readq_be(&global->regs.aintr_status); 1320c21e0bbfSMatthew R. Ochs reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1321c21e0bbfSMatthew R. Ochs 1322c21e0bbfSMatthew R. Ochs if (reg_unmasked == 0) { 1323fb67d44dSMatthew R. Ochs dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", 1324c21e0bbfSMatthew R. Ochs __func__, reg); 1325c21e0bbfSMatthew R. Ochs goto out; 1326c21e0bbfSMatthew R. Ochs } 1327c21e0bbfSMatthew R. Ochs 1328f15fbf8dSMatthew R. Ochs /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ 1329c21e0bbfSMatthew R. Ochs writeq_be(reg_unmasked, &global->regs.aintr_clear); 1330c21e0bbfSMatthew R. Ochs 1331f15fbf8dSMatthew R. Ochs /* Check each bit that is on */ 1332c21e0bbfSMatthew R. Ochs for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { 1333c21e0bbfSMatthew R. Ochs info = find_ainfo(1ULL << i); 133416798d34SMatthew R. Ochs if (((reg_unmasked & 0x1) == 0) || !info) 1335c21e0bbfSMatthew R. Ochs continue; 1336c21e0bbfSMatthew R. Ochs 1337c21e0bbfSMatthew R. Ochs port = info->port; 13380aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 1339c21e0bbfSMatthew R. Ochs 1340fb67d44dSMatthew R. Ochs dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", 1341c21e0bbfSMatthew R. Ochs __func__, port, info->desc, 13420aa14887SMatthew R. Ochs readq_be(&fc_port_regs[FC_STATUS / 8])); 1343c21e0bbfSMatthew R. Ochs 1344c21e0bbfSMatthew R. Ochs /* 1345f15fbf8dSMatthew R. Ochs * Do link reset first, some OTHER errors will set FC_ERROR 1346c21e0bbfSMatthew R. Ochs * again if cleared before or w/o a reset 1347c21e0bbfSMatthew R. Ochs */ 1348c21e0bbfSMatthew R. Ochs if (info->action & LINK_RESET) { 13494392ba49SMatthew R. Ochs dev_err(dev, "%s: FC Port %d: resetting link\n", 1350c21e0bbfSMatthew R. Ochs __func__, port); 1351c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_REQUIRED; 1352c21e0bbfSMatthew R. Ochs cfg->lr_port = port; 1353c21e0bbfSMatthew R. Ochs schedule_work(&cfg->work_q); 1354c21e0bbfSMatthew R. Ochs } 1355c21e0bbfSMatthew R. Ochs 1356c21e0bbfSMatthew R. Ochs if (info->action & CLR_FC_ERROR) { 13570aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_ERROR / 8]); 1358c21e0bbfSMatthew R. Ochs 1359c21e0bbfSMatthew R. Ochs /* 1360f15fbf8dSMatthew R. Ochs * Since all errors are unmasked, FC_ERROR and FC_ERRCAP 1361c21e0bbfSMatthew R. Ochs * should be the same and tracing one is sufficient. 1362c21e0bbfSMatthew R. Ochs */ 1363c21e0bbfSMatthew R. Ochs 1364fb67d44dSMatthew R. Ochs dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", 1365c21e0bbfSMatthew R. Ochs __func__, port, reg); 1366c21e0bbfSMatthew R. Ochs 13670aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); 13680aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); 1369c21e0bbfSMatthew R. Ochs } 1370ef51074aSMatthew R. Ochs 1371ef51074aSMatthew R. Ochs if (info->action & SCAN_HOST) { 1372ef51074aSMatthew R. Ochs atomic_inc(&cfg->scan_host_needed); 1373ef51074aSMatthew R. Ochs schedule_work(&cfg->work_q); 1374ef51074aSMatthew R. Ochs } 1375c21e0bbfSMatthew R. Ochs } 1376c21e0bbfSMatthew R. Ochs 1377c21e0bbfSMatthew R. Ochs out: 1378c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1379c21e0bbfSMatthew R. Ochs } 1380c21e0bbfSMatthew R. Ochs 1381c21e0bbfSMatthew R. Ochs /** 1382c21e0bbfSMatthew R. Ochs * start_context() - starts the master context 13831284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1384c21e0bbfSMatthew R. Ochs * 1385c21e0bbfSMatthew R. Ochs * Return: A success or failure value from CXL services. 1386c21e0bbfSMatthew R. Ochs */ 1387c21e0bbfSMatthew R. Ochs static int start_context(struct cxlflash_cfg *cfg) 1388c21e0bbfSMatthew R. Ochs { 1389fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1390c21e0bbfSMatthew R. Ochs int rc = 0; 1391c21e0bbfSMatthew R. Ochs 1392c21e0bbfSMatthew R. Ochs rc = cxl_start_context(cfg->mcctx, 1393c21e0bbfSMatthew R. Ochs cfg->afu->work.work_element_descriptor, 1394c21e0bbfSMatthew R. Ochs NULL); 1395c21e0bbfSMatthew R. Ochs 1396fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1397c21e0bbfSMatthew R. Ochs return rc; 1398c21e0bbfSMatthew R. Ochs } 1399c21e0bbfSMatthew R. Ochs 1400c21e0bbfSMatthew R. Ochs /** 1401c21e0bbfSMatthew R. Ochs * read_vpd() - obtains the WWPNs from VPD 14021284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 140378ae028eSMatthew R. Ochs * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs 1404c21e0bbfSMatthew R. Ochs * 14051284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1406c21e0bbfSMatthew R. Ochs */ 1407c21e0bbfSMatthew R. Ochs static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1408c21e0bbfSMatthew R. Ochs { 1409fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1410fb67d44dSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1411c21e0bbfSMatthew R. Ochs int rc = 0; 1412c21e0bbfSMatthew R. Ochs int ro_start, ro_size, i, j, k; 1413c21e0bbfSMatthew R. Ochs ssize_t vpd_size; 1414c21e0bbfSMatthew R. Ochs char vpd_data[CXLFLASH_VPD_LEN]; 1415c21e0bbfSMatthew R. Ochs char tmp_buf[WWPN_BUF_LEN] = { 0 }; 141678ae028eSMatthew R. Ochs char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6" }; 1417c21e0bbfSMatthew R. Ochs 1418c21e0bbfSMatthew R. Ochs /* Get the VPD data from the device */ 1419fb67d44dSMatthew R. Ochs vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); 1420c21e0bbfSMatthew R. Ochs if (unlikely(vpd_size <= 0)) { 1421fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", 1422c21e0bbfSMatthew R. Ochs __func__, vpd_size); 1423c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1424c21e0bbfSMatthew R. Ochs goto out; 1425c21e0bbfSMatthew R. Ochs } 1426c21e0bbfSMatthew R. Ochs 1427c21e0bbfSMatthew R. Ochs /* Get the read only section offset */ 1428c21e0bbfSMatthew R. Ochs ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1429c21e0bbfSMatthew R. Ochs PCI_VPD_LRDT_RO_DATA); 1430c21e0bbfSMatthew R. Ochs if (unlikely(ro_start < 0)) { 1431fb67d44dSMatthew R. Ochs dev_err(dev, "%s: VPD Read-only data not found\n", __func__); 1432c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1433c21e0bbfSMatthew R. Ochs goto out; 1434c21e0bbfSMatthew R. Ochs } 1435c21e0bbfSMatthew R. Ochs 1436c21e0bbfSMatthew R. Ochs /* Get the read only section size, cap when extends beyond read VPD */ 1437c21e0bbfSMatthew R. Ochs ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1438c21e0bbfSMatthew R. Ochs j = ro_size; 1439c21e0bbfSMatthew R. Ochs i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1440c21e0bbfSMatthew R. Ochs if (unlikely((i + j) > vpd_size)) { 1441fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", 1442c21e0bbfSMatthew R. Ochs __func__, (i + j), vpd_size); 1443c21e0bbfSMatthew R. Ochs ro_size = vpd_size - i; 1444c21e0bbfSMatthew R. Ochs } 1445c21e0bbfSMatthew R. Ochs 1446c21e0bbfSMatthew R. Ochs /* 1447c21e0bbfSMatthew R. Ochs * Find the offset of the WWPN tag within the read only 1448c21e0bbfSMatthew R. Ochs * VPD data and validate the found field (partials are 1449c21e0bbfSMatthew R. Ochs * no good to us). Convert the ASCII data to an integer 1450c21e0bbfSMatthew R. Ochs * value. Note that we must copy to a temporary buffer 1451c21e0bbfSMatthew R. Ochs * because the conversion service requires that the ASCII 1452c21e0bbfSMatthew R. Ochs * string be terminated. 1453c21e0bbfSMatthew R. Ochs */ 145478ae028eSMatthew R. Ochs for (k = 0; k < cfg->num_fc_ports; k++) { 1455c21e0bbfSMatthew R. Ochs j = ro_size; 1456c21e0bbfSMatthew R. Ochs i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1457c21e0bbfSMatthew R. Ochs 1458c21e0bbfSMatthew R. Ochs i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 1459c21e0bbfSMatthew R. Ochs if (unlikely(i < 0)) { 1460fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Port %d WWPN not found in VPD\n", 1461fb67d44dSMatthew R. Ochs __func__, k); 1462c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1463c21e0bbfSMatthew R. Ochs goto out; 1464c21e0bbfSMatthew R. Ochs } 1465c21e0bbfSMatthew R. Ochs 1466c21e0bbfSMatthew R. Ochs j = pci_vpd_info_field_size(&vpd_data[i]); 1467c21e0bbfSMatthew R. Ochs i += PCI_VPD_INFO_FLD_HDR_SIZE; 1468c21e0bbfSMatthew R. Ochs if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1469fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", 1470c21e0bbfSMatthew R. Ochs __func__, k); 1471c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1472c21e0bbfSMatthew R. Ochs goto out; 1473c21e0bbfSMatthew R. Ochs } 1474c21e0bbfSMatthew R. Ochs 1475c21e0bbfSMatthew R. Ochs memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1476c21e0bbfSMatthew R. Ochs rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1477c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1478fb67d44dSMatthew R. Ochs dev_err(dev, "%s: WWPN conversion failed for port %d\n", 1479fb67d44dSMatthew R. Ochs __func__, k); 1480c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1481c21e0bbfSMatthew R. Ochs goto out; 1482c21e0bbfSMatthew R. Ochs } 148378ae028eSMatthew R. Ochs 148478ae028eSMatthew R. Ochs dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); 1485c21e0bbfSMatthew R. Ochs } 1486c21e0bbfSMatthew R. Ochs 1487c21e0bbfSMatthew R. Ochs out: 1488fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1489c21e0bbfSMatthew R. Ochs return rc; 1490c21e0bbfSMatthew R. Ochs } 1491c21e0bbfSMatthew R. Ochs 1492c21e0bbfSMatthew R. Ochs /** 1493c21e0bbfSMatthew R. Ochs * init_pcr() - initialize the provisioning and control registers 14941284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1495c21e0bbfSMatthew R. Ochs * 1496c21e0bbfSMatthew R. Ochs * Also sets up fast access to the mapped registers and initializes AFU 1497c21e0bbfSMatthew R. Ochs * command fields that never change. 1498c21e0bbfSMatthew R. Ochs */ 149915305514SMatthew R. Ochs static void init_pcr(struct cxlflash_cfg *cfg) 1500c21e0bbfSMatthew R. Ochs { 1501c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 15021786f4a0SMatthew R. Ochs struct sisl_ctrl_map __iomem *ctrl_map; 1503c21e0bbfSMatthew R. Ochs int i; 1504c21e0bbfSMatthew R. Ochs 1505c21e0bbfSMatthew R. Ochs for (i = 0; i < MAX_CONTEXT; i++) { 1506c21e0bbfSMatthew R. Ochs ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1507f15fbf8dSMatthew R. Ochs /* Disrupt any clients that could be running */ 1508c21e0bbfSMatthew R. Ochs /* e.g. clients that survived a master restart */ 1509c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->rht_start); 1510c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->rht_cnt_id); 1511c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->ctx_cap); 1512c21e0bbfSMatthew R. Ochs } 1513c21e0bbfSMatthew R. Ochs 1514f15fbf8dSMatthew R. Ochs /* Copy frequently used fields into afu */ 1515c21e0bbfSMatthew R. Ochs afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); 1516c21e0bbfSMatthew R. Ochs afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; 1517c21e0bbfSMatthew R. Ochs afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; 1518c21e0bbfSMatthew R. Ochs 1519c21e0bbfSMatthew R. Ochs /* Program the Endian Control for the master context */ 1520c21e0bbfSMatthew R. Ochs writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); 1521c21e0bbfSMatthew R. Ochs } 1522c21e0bbfSMatthew R. Ochs 1523c21e0bbfSMatthew R. Ochs /** 1524c21e0bbfSMatthew R. Ochs * init_global() - initialize AFU global registers 15251284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1526c21e0bbfSMatthew R. Ochs */ 152715305514SMatthew R. Ochs static int init_global(struct cxlflash_cfg *cfg) 1528c21e0bbfSMatthew R. Ochs { 1529c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 15304392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 15310aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 153278ae028eSMatthew R. Ochs u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ 1533c21e0bbfSMatthew R. Ochs int i = 0, num_ports = 0; 1534c21e0bbfSMatthew R. Ochs int rc = 0; 1535c21e0bbfSMatthew R. Ochs u64 reg; 1536c21e0bbfSMatthew R. Ochs 1537c21e0bbfSMatthew R. Ochs rc = read_vpd(cfg, &wwpn[0]); 1538c21e0bbfSMatthew R. Ochs if (rc) { 15394392ba49SMatthew R. Ochs dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); 1540c21e0bbfSMatthew R. Ochs goto out; 1541c21e0bbfSMatthew R. Ochs } 1542c21e0bbfSMatthew R. Ochs 1543696d0b0cSMatthew R. Ochs /* Set up RRQ and SQ in AFU for master issued cmds */ 1544c21e0bbfSMatthew R. Ochs writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); 1545c21e0bbfSMatthew R. Ochs writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); 1546c21e0bbfSMatthew R. Ochs 1547696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1548696d0b0cSMatthew R. Ochs writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start); 1549696d0b0cSMatthew R. Ochs writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end); 1550696d0b0cSMatthew R. Ochs } 1551696d0b0cSMatthew R. Ochs 1552c21e0bbfSMatthew R. Ochs /* AFU configuration */ 1553c21e0bbfSMatthew R. Ochs reg = readq_be(&afu->afu_map->global.regs.afu_config); 1554c21e0bbfSMatthew R. Ochs reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1555c21e0bbfSMatthew R. Ochs /* enable all auto retry options and control endianness */ 1556c21e0bbfSMatthew R. Ochs /* leave others at default: */ 1557c21e0bbfSMatthew R. Ochs /* CTX_CAP write protected, mbox_r does not clear on read and */ 1558c21e0bbfSMatthew R. Ochs /* checker on if dual afu */ 1559c21e0bbfSMatthew R. Ochs writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1560c21e0bbfSMatthew R. Ochs 1561f15fbf8dSMatthew R. Ochs /* Global port select: select either port */ 1562c21e0bbfSMatthew R. Ochs if (afu->internal_lun) { 1563f15fbf8dSMatthew R. Ochs /* Only use port 0 */ 1564c21e0bbfSMatthew R. Ochs writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 156578ae028eSMatthew R. Ochs num_ports = 0; 1566c21e0bbfSMatthew R. Ochs } else { 15678fa4f177SMatthew R. Ochs writeq_be(PORT_MASK(cfg->num_fc_ports), 15688fa4f177SMatthew R. Ochs &afu->afu_map->global.regs.afu_port_sel); 156978ae028eSMatthew R. Ochs num_ports = cfg->num_fc_ports; 1570c21e0bbfSMatthew R. Ochs } 1571c21e0bbfSMatthew R. Ochs 1572c21e0bbfSMatthew R. Ochs for (i = 0; i < num_ports; i++) { 15730aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 15740aa14887SMatthew R. Ochs 1575f15fbf8dSMatthew R. Ochs /* Unmask all errors (but they are still masked at AFU) */ 15760aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); 1577f15fbf8dSMatthew R. Ochs /* Clear CRC error cnt & set a threshold */ 15780aa14887SMatthew R. Ochs (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); 15790aa14887SMatthew R. Ochs writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); 1580c21e0bbfSMatthew R. Ochs 1581f15fbf8dSMatthew R. Ochs /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1582f8013261SMatthew R. Ochs if (wwpn[i] != 0) 15830aa14887SMatthew R. Ochs afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); 1584c21e0bbfSMatthew R. Ochs /* Programming WWPN back to back causes additional 1585c21e0bbfSMatthew R. Ochs * offline/online transitions and a PLOGI 1586c21e0bbfSMatthew R. Ochs */ 1587c21e0bbfSMatthew R. Ochs msleep(100); 1588c21e0bbfSMatthew R. Ochs } 1589c21e0bbfSMatthew R. Ochs 1590f15fbf8dSMatthew R. Ochs /* Set up master's own CTX_CAP to allow real mode, host translation */ 1591f15fbf8dSMatthew R. Ochs /* tables, afu cmds and read/write GSCSI cmds. */ 1592c21e0bbfSMatthew R. Ochs /* First, unlock ctx_cap write by reading mbox */ 1593c21e0bbfSMatthew R. Ochs (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ 1594c21e0bbfSMatthew R. Ochs writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1595c21e0bbfSMatthew R. Ochs SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1596c21e0bbfSMatthew R. Ochs SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1597c21e0bbfSMatthew R. Ochs &afu->ctrl_map->ctx_cap); 1598f15fbf8dSMatthew R. Ochs /* Initialize heartbeat */ 1599c21e0bbfSMatthew R. Ochs afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1600c21e0bbfSMatthew R. Ochs out: 1601c21e0bbfSMatthew R. Ochs return rc; 1602c21e0bbfSMatthew R. Ochs } 1603c21e0bbfSMatthew R. Ochs 1604c21e0bbfSMatthew R. Ochs /** 1605c21e0bbfSMatthew R. Ochs * start_afu() - initializes and starts the AFU 16061284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1607c21e0bbfSMatthew R. Ochs */ 1608c21e0bbfSMatthew R. Ochs static int start_afu(struct cxlflash_cfg *cfg) 1609c21e0bbfSMatthew R. Ochs { 1610c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 1611fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1612c21e0bbfSMatthew R. Ochs int rc = 0; 1613c21e0bbfSMatthew R. Ochs 1614c21e0bbfSMatthew R. Ochs init_pcr(cfg); 1615c21e0bbfSMatthew R. Ochs 1616f918b4a8SMatthew R. Ochs /* Initialize RRQ */ 1617af10483eSMatthew R. Ochs memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); 1618c21e0bbfSMatthew R. Ochs afu->hrrq_start = &afu->rrq_entry[0]; 1619c21e0bbfSMatthew R. Ochs afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; 1620c21e0bbfSMatthew R. Ochs afu->hrrq_curr = afu->hrrq_start; 1621c21e0bbfSMatthew R. Ochs afu->toggle = 1; 1622f918b4a8SMatthew R. Ochs spin_lock_init(&afu->hrrq_slock); 1623c21e0bbfSMatthew R. Ochs 1624696d0b0cSMatthew R. Ochs /* Initialize SQ */ 1625696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1626696d0b0cSMatthew R. Ochs memset(&afu->sq, 0, sizeof(afu->sq)); 1627696d0b0cSMatthew R. Ochs afu->hsq_start = &afu->sq[0]; 1628696d0b0cSMatthew R. Ochs afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1]; 1629696d0b0cSMatthew R. Ochs afu->hsq_curr = afu->hsq_start; 1630696d0b0cSMatthew R. Ochs 1631696d0b0cSMatthew R. Ochs spin_lock_init(&afu->hsq_slock); 1632696d0b0cSMatthew R. Ochs atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1); 1633696d0b0cSMatthew R. Ochs } 1634696d0b0cSMatthew R. Ochs 1635cba06e6dSMatthew R. Ochs /* Initialize IRQ poll */ 1636cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) 1637cba06e6dSMatthew R. Ochs irq_poll_init(&afu->irqpoll, afu->irqpoll_weight, 1638cba06e6dSMatthew R. Ochs cxlflash_irqpoll); 1639cba06e6dSMatthew R. Ochs 1640c21e0bbfSMatthew R. Ochs rc = init_global(cfg); 1641c21e0bbfSMatthew R. Ochs 1642fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1643c21e0bbfSMatthew R. Ochs return rc; 1644c21e0bbfSMatthew R. Ochs } 1645c21e0bbfSMatthew R. Ochs 1646c21e0bbfSMatthew R. Ochs /** 16479526f360SManoj N. Kumar * init_intr() - setup interrupt handlers for the master context 16481284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1649c21e0bbfSMatthew R. Ochs * 16501284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1651c21e0bbfSMatthew R. Ochs */ 16529526f360SManoj N. Kumar static enum undo_level init_intr(struct cxlflash_cfg *cfg, 16539526f360SManoj N. Kumar struct cxl_context *ctx) 1654c21e0bbfSMatthew R. Ochs { 1655c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 16569526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 1657c21e0bbfSMatthew R. Ochs int rc = 0; 16589526f360SManoj N. Kumar enum undo_level level = UNDO_NOOP; 1659c21e0bbfSMatthew R. Ochs 1660c21e0bbfSMatthew R. Ochs rc = cxl_allocate_afu_irqs(ctx, 3); 1661c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1662fb67d44dSMatthew R. Ochs dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", 1663c21e0bbfSMatthew R. Ochs __func__, rc); 16649526f360SManoj N. Kumar level = UNDO_NOOP; 1665c21e0bbfSMatthew R. Ochs goto out; 1666c21e0bbfSMatthew R. Ochs } 1667c21e0bbfSMatthew R. Ochs 1668c21e0bbfSMatthew R. Ochs rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, 1669c21e0bbfSMatthew R. Ochs "SISL_MSI_SYNC_ERROR"); 1670c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1671fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); 1672c21e0bbfSMatthew R. Ochs level = FREE_IRQ; 1673c21e0bbfSMatthew R. Ochs goto out; 1674c21e0bbfSMatthew R. Ochs } 1675c21e0bbfSMatthew R. Ochs 1676c21e0bbfSMatthew R. Ochs rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, 1677c21e0bbfSMatthew R. Ochs "SISL_MSI_RRQ_UPDATED"); 1678c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1679fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); 1680c21e0bbfSMatthew R. Ochs level = UNMAP_ONE; 1681c21e0bbfSMatthew R. Ochs goto out; 1682c21e0bbfSMatthew R. Ochs } 1683c21e0bbfSMatthew R. Ochs 1684c21e0bbfSMatthew R. Ochs rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, 1685c21e0bbfSMatthew R. Ochs "SISL_MSI_ASYNC_ERROR"); 1686c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1687fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); 1688c21e0bbfSMatthew R. Ochs level = UNMAP_TWO; 1689c21e0bbfSMatthew R. Ochs goto out; 1690c21e0bbfSMatthew R. Ochs } 16919526f360SManoj N. Kumar out: 16929526f360SManoj N. Kumar return level; 16939526f360SManoj N. Kumar } 1694c21e0bbfSMatthew R. Ochs 16959526f360SManoj N. Kumar /** 16969526f360SManoj N. Kumar * init_mc() - create and register as the master context 16979526f360SManoj N. Kumar * @cfg: Internal structure associated with the host. 16989526f360SManoj N. Kumar * 16999526f360SManoj N. Kumar * Return: 0 on success, -errno on failure 17009526f360SManoj N. Kumar */ 17019526f360SManoj N. Kumar static int init_mc(struct cxlflash_cfg *cfg) 17029526f360SManoj N. Kumar { 17039526f360SManoj N. Kumar struct cxl_context *ctx; 17049526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 17059526f360SManoj N. Kumar int rc = 0; 17069526f360SManoj N. Kumar enum undo_level level; 17079526f360SManoj N. Kumar 17089526f360SManoj N. Kumar ctx = cxl_get_context(cfg->dev); 17099526f360SManoj N. Kumar if (unlikely(!ctx)) { 17109526f360SManoj N. Kumar rc = -ENOMEM; 17119526f360SManoj N. Kumar goto ret; 17129526f360SManoj N. Kumar } 17139526f360SManoj N. Kumar cfg->mcctx = ctx; 17149526f360SManoj N. Kumar 17159526f360SManoj N. Kumar /* Set it up as a master with the CXL */ 17169526f360SManoj N. Kumar cxl_set_master(ctx); 17179526f360SManoj N. Kumar 17189526f360SManoj N. Kumar /* During initialization reset the AFU to start from a clean slate */ 17199526f360SManoj N. Kumar rc = cxl_afu_reset(cfg->mcctx); 17209526f360SManoj N. Kumar if (unlikely(rc)) { 1721fb67d44dSMatthew R. Ochs dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc); 17229526f360SManoj N. Kumar goto ret; 17239526f360SManoj N. Kumar } 17249526f360SManoj N. Kumar 17259526f360SManoj N. Kumar level = init_intr(cfg, ctx); 17269526f360SManoj N. Kumar if (unlikely(level)) { 1727fb67d44dSMatthew R. Ochs dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); 17289526f360SManoj N. Kumar goto out; 17299526f360SManoj N. Kumar } 1730c21e0bbfSMatthew R. Ochs 1731c21e0bbfSMatthew R. Ochs /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1732c21e0bbfSMatthew R. Ochs * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1733c21e0bbfSMatthew R. Ochs * element (pe) that is embedded in the context (ctx) 1734c21e0bbfSMatthew R. Ochs */ 1735c21e0bbfSMatthew R. Ochs rc = start_context(cfg); 1736c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1737c21e0bbfSMatthew R. Ochs dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 1738c21e0bbfSMatthew R. Ochs level = UNMAP_THREE; 1739c21e0bbfSMatthew R. Ochs goto out; 1740c21e0bbfSMatthew R. Ochs } 1741c21e0bbfSMatthew R. Ochs ret: 1742fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1743c21e0bbfSMatthew R. Ochs return rc; 1744c21e0bbfSMatthew R. Ochs out: 17459526f360SManoj N. Kumar term_intr(cfg, level); 1746c21e0bbfSMatthew R. Ochs goto ret; 1747c21e0bbfSMatthew R. Ochs } 1748c21e0bbfSMatthew R. Ochs 1749c21e0bbfSMatthew R. Ochs /** 1750c21e0bbfSMatthew R. Ochs * init_afu() - setup as master context and start AFU 17511284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1752c21e0bbfSMatthew R. Ochs * 1753c21e0bbfSMatthew R. Ochs * This routine is a higher level of control for configuring the 1754c21e0bbfSMatthew R. Ochs * AFU on probe and reset paths. 1755c21e0bbfSMatthew R. Ochs * 17561284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1757c21e0bbfSMatthew R. Ochs */ 1758c21e0bbfSMatthew R. Ochs static int init_afu(struct cxlflash_cfg *cfg) 1759c21e0bbfSMatthew R. Ochs { 1760c21e0bbfSMatthew R. Ochs u64 reg; 1761c21e0bbfSMatthew R. Ochs int rc = 0; 1762c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 1763c21e0bbfSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1764c21e0bbfSMatthew R. Ochs 17655cdac81aSMatthew R. Ochs cxl_perst_reloads_same_image(cfg->cxl_afu, true); 17665cdac81aSMatthew R. Ochs 1767c21e0bbfSMatthew R. Ochs rc = init_mc(cfg); 1768c21e0bbfSMatthew R. Ochs if (rc) { 1769fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_mc failed rc=%d\n", 1770c21e0bbfSMatthew R. Ochs __func__, rc); 1771ee3491baSMatthew R. Ochs goto out; 1772c21e0bbfSMatthew R. Ochs } 1773c21e0bbfSMatthew R. Ochs 1774f15fbf8dSMatthew R. Ochs /* Map the entire MMIO space of the AFU */ 1775c21e0bbfSMatthew R. Ochs afu->afu_map = cxl_psa_map(cfg->mcctx); 1776c21e0bbfSMatthew R. Ochs if (!afu->afu_map) { 1777fb67d44dSMatthew R. Ochs dev_err(dev, "%s: cxl_psa_map failed\n", __func__); 1778ee3491baSMatthew R. Ochs rc = -ENOMEM; 1779c21e0bbfSMatthew R. Ochs goto err1; 1780c21e0bbfSMatthew R. Ochs } 1781c21e0bbfSMatthew R. Ochs 1782e5ce067bSMatthew R. Ochs /* No byte reverse on reading afu_version or string will be backwards */ 1783e5ce067bSMatthew R. Ochs reg = readq(&afu->afu_map->global.regs.afu_version); 1784e5ce067bSMatthew R. Ochs memcpy(afu->version, ®, sizeof(reg)); 1785c21e0bbfSMatthew R. Ochs afu->interface_version = 1786c21e0bbfSMatthew R. Ochs readq_be(&afu->afu_map->global.regs.interface_version); 1787e5ce067bSMatthew R. Ochs if ((afu->interface_version + 1) == 0) { 1788fb67d44dSMatthew R. Ochs dev_err(dev, "Back level AFU, please upgrade. AFU version %s " 1789fb67d44dSMatthew R. Ochs "interface version %016llx\n", afu->version, 1790e5ce067bSMatthew R. Ochs afu->interface_version); 1791e5ce067bSMatthew R. Ochs rc = -EINVAL; 17920df5bef7SUma Krishnan goto err1; 1793ee3491baSMatthew R. Ochs } 1794ee3491baSMatthew R. Ochs 1795696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1796696d0b0cSMatthew R. Ochs afu->send_cmd = send_cmd_sq; 1797696d0b0cSMatthew R. Ochs afu->context_reset = context_reset_sq; 1798696d0b0cSMatthew R. Ochs } else { 179948b4be36SMatthew R. Ochs afu->send_cmd = send_cmd_ioarrin; 180048b4be36SMatthew R. Ochs afu->context_reset = context_reset_ioarrin; 1801696d0b0cSMatthew R. Ochs } 180248b4be36SMatthew R. Ochs 1803fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, 1804ee3491baSMatthew R. Ochs afu->version, afu->interface_version); 1805c21e0bbfSMatthew R. Ochs 1806c21e0bbfSMatthew R. Ochs rc = start_afu(cfg); 1807c21e0bbfSMatthew R. Ochs if (rc) { 1808fb67d44dSMatthew R. Ochs dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); 18090df5bef7SUma Krishnan goto err1; 1810c21e0bbfSMatthew R. Ochs } 1811c21e0bbfSMatthew R. Ochs 1812c21e0bbfSMatthew R. Ochs afu_err_intr_init(cfg->afu); 181311f7b184SUma Krishnan spin_lock_init(&afu->rrin_slock); 181411f7b184SUma Krishnan afu->room = readq_be(&afu->host_map->cmd_room); 1815c21e0bbfSMatthew R. Ochs 18162cb79266SMatthew R. Ochs /* Restore the LUN mappings */ 18172cb79266SMatthew R. Ochs cxlflash_restore_luntable(cfg); 1818ee3491baSMatthew R. Ochs out: 1819fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1820c21e0bbfSMatthew R. Ochs return rc; 1821ee3491baSMatthew R. Ochs 1822ee3491baSMatthew R. Ochs err1: 18239526f360SManoj N. Kumar term_intr(cfg, UNMAP_THREE); 18249526f360SManoj N. Kumar term_mc(cfg); 1825ee3491baSMatthew R. Ochs goto out; 1826c21e0bbfSMatthew R. Ochs } 1827c21e0bbfSMatthew R. Ochs 1828c21e0bbfSMatthew R. Ochs /** 1829c21e0bbfSMatthew R. Ochs * cxlflash_afu_sync() - builds and sends an AFU sync command 1830c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host. 1831c21e0bbfSMatthew R. Ochs * @ctx_hndl_u: Identifies context requesting sync. 1832c21e0bbfSMatthew R. Ochs * @res_hndl_u: Identifies resource requesting sync. 1833c21e0bbfSMatthew R. Ochs * @mode: Type of sync to issue (lightweight, heavyweight, global). 1834c21e0bbfSMatthew R. Ochs * 1835c21e0bbfSMatthew R. Ochs * The AFU can only take 1 sync command at a time. This routine enforces this 1836f15fbf8dSMatthew R. Ochs * limitation by using a mutex to provide exclusive access to the AFU during 1837c21e0bbfSMatthew R. Ochs * the sync. This design point requires calling threads to not be on interrupt 1838c21e0bbfSMatthew R. Ochs * context due to the possibility of sleeping during concurrent sync operations. 1839c21e0bbfSMatthew R. Ochs * 18405cdac81aSMatthew R. Ochs * AFU sync operations are only necessary and allowed when the device is 18415cdac81aSMatthew R. Ochs * operating normally. When not operating normally, sync requests can occur as 18425cdac81aSMatthew R. Ochs * part of cleaning up resources associated with an adapter prior to removal. 18435cdac81aSMatthew R. Ochs * In this scenario, these requests are simply ignored (safe due to the AFU 18445cdac81aSMatthew R. Ochs * going away). 18455cdac81aSMatthew R. Ochs * 1846c21e0bbfSMatthew R. Ochs * Return: 1847c21e0bbfSMatthew R. Ochs * 0 on success 1848c21e0bbfSMatthew R. Ochs * -1 on failure 1849c21e0bbfSMatthew R. Ochs */ 1850c21e0bbfSMatthew R. Ochs int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, 1851c21e0bbfSMatthew R. Ochs res_hndl_t res_hndl_u, u8 mode) 1852c21e0bbfSMatthew R. Ochs { 18535cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 18544392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1855c21e0bbfSMatthew R. Ochs struct afu_cmd *cmd = NULL; 1856350bb478SMatthew R. Ochs char *buf = NULL; 1857c21e0bbfSMatthew R. Ochs int rc = 0; 1858c21e0bbfSMatthew R. Ochs static DEFINE_MUTEX(sync_active); 1859c21e0bbfSMatthew R. Ochs 18605cdac81aSMatthew R. Ochs if (cfg->state != STATE_NORMAL) { 1861fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Sync not required state=%u\n", 1862fb67d44dSMatthew R. Ochs __func__, cfg->state); 18635cdac81aSMatthew R. Ochs return 0; 18645cdac81aSMatthew R. Ochs } 18655cdac81aSMatthew R. Ochs 1866c21e0bbfSMatthew R. Ochs mutex_lock(&sync_active); 1867de01283bSMatthew R. Ochs atomic_inc(&afu->cmds_active); 1868350bb478SMatthew R. Ochs buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); 1869350bb478SMatthew R. Ochs if (unlikely(!buf)) { 1870350bb478SMatthew R. Ochs dev_err(dev, "%s: no memory for command\n", __func__); 1871c21e0bbfSMatthew R. Ochs rc = -1; 1872c21e0bbfSMatthew R. Ochs goto out; 1873c21e0bbfSMatthew R. Ochs } 1874c21e0bbfSMatthew R. Ochs 1875350bb478SMatthew R. Ochs cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); 1876350bb478SMatthew R. Ochs init_completion(&cmd->cevent); 1877350bb478SMatthew R. Ochs cmd->parent = afu; 1878350bb478SMatthew R. Ochs 1879fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); 1880c21e0bbfSMatthew R. Ochs 1881c21e0bbfSMatthew R. Ochs cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 1882350bb478SMatthew R. Ochs cmd->rcb.ctx_id = afu->ctx_hndl; 1883350bb478SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 1884c21e0bbfSMatthew R. Ochs cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; 1885c21e0bbfSMatthew R. Ochs 1886c21e0bbfSMatthew R. Ochs cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ 1887c21e0bbfSMatthew R. Ochs cmd->rcb.cdb[1] = mode; 1888c21e0bbfSMatthew R. Ochs 1889c21e0bbfSMatthew R. Ochs /* The cdb is aligned, no unaligned accessors required */ 18901786f4a0SMatthew R. Ochs *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); 18911786f4a0SMatthew R. Ochs *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); 1892c21e0bbfSMatthew R. Ochs 189348b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 1894c21e0bbfSMatthew R. Ochs if (unlikely(rc)) 1895c21e0bbfSMatthew R. Ochs goto out; 1896c21e0bbfSMatthew R. Ochs 18979ba848acSMatthew R. Ochs rc = wait_resp(afu, cmd); 18989ba848acSMatthew R. Ochs if (unlikely(rc)) 1899c21e0bbfSMatthew R. Ochs rc = -1; 1900c21e0bbfSMatthew R. Ochs out: 1901de01283bSMatthew R. Ochs atomic_dec(&afu->cmds_active); 1902c21e0bbfSMatthew R. Ochs mutex_unlock(&sync_active); 1903350bb478SMatthew R. Ochs kfree(buf); 1904fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1905c21e0bbfSMatthew R. Ochs return rc; 1906c21e0bbfSMatthew R. Ochs } 1907c21e0bbfSMatthew R. Ochs 1908c21e0bbfSMatthew R. Ochs /** 190915305514SMatthew R. Ochs * afu_reset() - resets the AFU 191015305514SMatthew R. Ochs * @cfg: Internal structure associated with the host. 1911c21e0bbfSMatthew R. Ochs * 19121284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1913c21e0bbfSMatthew R. Ochs */ 191415305514SMatthew R. Ochs static int afu_reset(struct cxlflash_cfg *cfg) 1915c21e0bbfSMatthew R. Ochs { 1916fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1917c21e0bbfSMatthew R. Ochs int rc = 0; 1918fb67d44dSMatthew R. Ochs 1919c21e0bbfSMatthew R. Ochs /* Stop the context before the reset. Since the context is 1920c21e0bbfSMatthew R. Ochs * no longer available restart it after the reset is complete 1921c21e0bbfSMatthew R. Ochs */ 1922c21e0bbfSMatthew R. Ochs term_afu(cfg); 1923c21e0bbfSMatthew R. Ochs 1924c21e0bbfSMatthew R. Ochs rc = init_afu(cfg); 1925c21e0bbfSMatthew R. Ochs 1926fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1927c21e0bbfSMatthew R. Ochs return rc; 1928c21e0bbfSMatthew R. Ochs } 1929c21e0bbfSMatthew R. Ochs 1930c21e0bbfSMatthew R. Ochs /** 1931f411396dSManoj N. Kumar * drain_ioctls() - wait until all currently executing ioctls have completed 1932f411396dSManoj N. Kumar * @cfg: Internal structure associated with the host. 1933f411396dSManoj N. Kumar * 1934f411396dSManoj N. Kumar * Obtain write access to read/write semaphore that wraps ioctl 1935f411396dSManoj N. Kumar * handling to 'drain' ioctls currently executing. 1936f411396dSManoj N. Kumar */ 1937f411396dSManoj N. Kumar static void drain_ioctls(struct cxlflash_cfg *cfg) 1938f411396dSManoj N. Kumar { 1939f411396dSManoj N. Kumar down_write(&cfg->ioctl_rwsem); 1940f411396dSManoj N. Kumar up_write(&cfg->ioctl_rwsem); 1941f411396dSManoj N. Kumar } 1942f411396dSManoj N. Kumar 1943f411396dSManoj N. Kumar /** 194415305514SMatthew R. Ochs * cxlflash_eh_device_reset_handler() - reset a single LUN 194515305514SMatthew R. Ochs * @scp: SCSI command to send. 194615305514SMatthew R. Ochs * 194715305514SMatthew R. Ochs * Return: 194815305514SMatthew R. Ochs * SUCCESS as defined in scsi/scsi.h 194915305514SMatthew R. Ochs * FAILED as defined in scsi/scsi.h 195015305514SMatthew R. Ochs */ 195115305514SMatthew R. Ochs static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) 195215305514SMatthew R. Ochs { 195315305514SMatthew R. Ochs int rc = SUCCESS; 195415305514SMatthew R. Ochs struct Scsi_Host *host = scp->device->host; 1955fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 1956fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 195715305514SMatthew R. Ochs struct afu *afu = cfg->afu; 195815305514SMatthew R. Ochs int rcr = 0; 195915305514SMatthew R. Ochs 1960fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " 1961fb67d44dSMatthew R. Ochs "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, 1962fb67d44dSMatthew R. Ochs scp->device->channel, scp->device->id, scp->device->lun, 196315305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 196415305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 196515305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 196615305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 196715305514SMatthew R. Ochs 1968ed486daaSMatthew R. Ochs retry: 196915305514SMatthew R. Ochs switch (cfg->state) { 197015305514SMatthew R. Ochs case STATE_NORMAL: 197115305514SMatthew R. Ochs rcr = send_tmf(afu, scp, TMF_LUN_RESET); 197215305514SMatthew R. Ochs if (unlikely(rcr)) 197315305514SMatthew R. Ochs rc = FAILED; 197415305514SMatthew R. Ochs break; 197515305514SMatthew R. Ochs case STATE_RESET: 197615305514SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 1977ed486daaSMatthew R. Ochs goto retry; 197815305514SMatthew R. Ochs default: 197915305514SMatthew R. Ochs rc = FAILED; 198015305514SMatthew R. Ochs break; 198115305514SMatthew R. Ochs } 198215305514SMatthew R. Ochs 1983fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 198415305514SMatthew R. Ochs return rc; 198515305514SMatthew R. Ochs } 198615305514SMatthew R. Ochs 198715305514SMatthew R. Ochs /** 198815305514SMatthew R. Ochs * cxlflash_eh_host_reset_handler() - reset the host adapter 198915305514SMatthew R. Ochs * @scp: SCSI command from stack identifying host. 199015305514SMatthew R. Ochs * 19911d3324c3SMatthew R. Ochs * Following a reset, the state is evaluated again in case an EEH occurred 19921d3324c3SMatthew R. Ochs * during the reset. In such a scenario, the host reset will either yield 19931d3324c3SMatthew R. Ochs * until the EEH recovery is complete or return success or failure based 19941d3324c3SMatthew R. Ochs * upon the current device state. 19951d3324c3SMatthew R. Ochs * 199615305514SMatthew R. Ochs * Return: 199715305514SMatthew R. Ochs * SUCCESS as defined in scsi/scsi.h 199815305514SMatthew R. Ochs * FAILED as defined in scsi/scsi.h 199915305514SMatthew R. Ochs */ 200015305514SMatthew R. Ochs static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) 200115305514SMatthew R. Ochs { 200215305514SMatthew R. Ochs int rc = SUCCESS; 200315305514SMatthew R. Ochs int rcr = 0; 200415305514SMatthew R. Ochs struct Scsi_Host *host = scp->device->host; 2005fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 2006fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 200715305514SMatthew R. Ochs 2008fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " 2009fb67d44dSMatthew R. Ochs "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, 2010fb67d44dSMatthew R. Ochs scp->device->channel, scp->device->id, scp->device->lun, 201115305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 201215305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 201315305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 201415305514SMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 201515305514SMatthew R. Ochs 201615305514SMatthew R. Ochs switch (cfg->state) { 201715305514SMatthew R. Ochs case STATE_NORMAL: 201815305514SMatthew R. Ochs cfg->state = STATE_RESET; 2019f411396dSManoj N. Kumar drain_ioctls(cfg); 202015305514SMatthew R. Ochs cxlflash_mark_contexts_error(cfg); 202115305514SMatthew R. Ochs rcr = afu_reset(cfg); 202215305514SMatthew R. Ochs if (rcr) { 202315305514SMatthew R. Ochs rc = FAILED; 202415305514SMatthew R. Ochs cfg->state = STATE_FAILTERM; 202515305514SMatthew R. Ochs } else 202615305514SMatthew R. Ochs cfg->state = STATE_NORMAL; 202715305514SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 20281d3324c3SMatthew R. Ochs ssleep(1); 20291d3324c3SMatthew R. Ochs /* fall through */ 203015305514SMatthew R. Ochs case STATE_RESET: 203115305514SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 203215305514SMatthew R. Ochs if (cfg->state == STATE_NORMAL) 203315305514SMatthew R. Ochs break; 203415305514SMatthew R. Ochs /* fall through */ 203515305514SMatthew R. Ochs default: 203615305514SMatthew R. Ochs rc = FAILED; 203715305514SMatthew R. Ochs break; 203815305514SMatthew R. Ochs } 203915305514SMatthew R. Ochs 2040fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 204115305514SMatthew R. Ochs return rc; 204215305514SMatthew R. Ochs } 204315305514SMatthew R. Ochs 204415305514SMatthew R. Ochs /** 204515305514SMatthew R. Ochs * cxlflash_change_queue_depth() - change the queue depth for the device 204615305514SMatthew R. Ochs * @sdev: SCSI device destined for queue depth change. 204715305514SMatthew R. Ochs * @qdepth: Requested queue depth value to set. 204815305514SMatthew R. Ochs * 204915305514SMatthew R. Ochs * The requested queue depth is capped to the maximum supported value. 205015305514SMatthew R. Ochs * 205115305514SMatthew R. Ochs * Return: The actual queue depth set. 205215305514SMatthew R. Ochs */ 205315305514SMatthew R. Ochs static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) 205415305514SMatthew R. Ochs { 205515305514SMatthew R. Ochs 205615305514SMatthew R. Ochs if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) 205715305514SMatthew R. Ochs qdepth = CXLFLASH_MAX_CMDS_PER_LUN; 205815305514SMatthew R. Ochs 205915305514SMatthew R. Ochs scsi_change_queue_depth(sdev, qdepth); 206015305514SMatthew R. Ochs return sdev->queue_depth; 206115305514SMatthew R. Ochs } 206215305514SMatthew R. Ochs 206315305514SMatthew R. Ochs /** 206415305514SMatthew R. Ochs * cxlflash_show_port_status() - queries and presents the current port status 2065e0f01a21SMatthew R. Ochs * @port: Desired port for status reporting. 20663b225cd3SMatthew R. Ochs * @cfg: Internal structure associated with the host. 206715305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 206815305514SMatthew R. Ochs * 206978ae028eSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf or -EINVAL. 207015305514SMatthew R. Ochs */ 20713b225cd3SMatthew R. Ochs static ssize_t cxlflash_show_port_status(u32 port, 20723b225cd3SMatthew R. Ochs struct cxlflash_cfg *cfg, 20733b225cd3SMatthew R. Ochs char *buf) 207415305514SMatthew R. Ochs { 207578ae028eSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 207615305514SMatthew R. Ochs char *disp_status; 207715305514SMatthew R. Ochs u64 status; 20780aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 207915305514SMatthew R. Ochs 208078ae028eSMatthew R. Ochs WARN_ON(port >= MAX_FC_PORTS); 208178ae028eSMatthew R. Ochs 208278ae028eSMatthew R. Ochs if (port >= cfg->num_fc_ports) { 208378ae028eSMatthew R. Ochs dev_info(dev, "%s: Port %d not supported on this card.\n", 208478ae028eSMatthew R. Ochs __func__, port); 208578ae028eSMatthew R. Ochs return -EINVAL; 208678ae028eSMatthew R. Ochs } 208715305514SMatthew R. Ochs 20880aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 20890aa14887SMatthew R. Ochs status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); 2090e0f01a21SMatthew R. Ochs status &= FC_MTIP_STATUS_MASK; 209115305514SMatthew R. Ochs 209215305514SMatthew R. Ochs if (status == FC_MTIP_STATUS_ONLINE) 209315305514SMatthew R. Ochs disp_status = "online"; 209415305514SMatthew R. Ochs else if (status == FC_MTIP_STATUS_OFFLINE) 209515305514SMatthew R. Ochs disp_status = "offline"; 209615305514SMatthew R. Ochs else 209715305514SMatthew R. Ochs disp_status = "unknown"; 209815305514SMatthew R. Ochs 2099e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); 210015305514SMatthew R. Ochs } 210115305514SMatthew R. Ochs 210215305514SMatthew R. Ochs /** 2103e0f01a21SMatthew R. Ochs * port0_show() - queries and presents the current status of port 0 2104e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2105e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2106e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2107e0f01a21SMatthew R. Ochs * 2108e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2109e0f01a21SMatthew R. Ochs */ 2110e0f01a21SMatthew R. Ochs static ssize_t port0_show(struct device *dev, 2111e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2112e0f01a21SMatthew R. Ochs char *buf) 2113e0f01a21SMatthew R. Ochs { 2114fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2115e0f01a21SMatthew R. Ochs 21163b225cd3SMatthew R. Ochs return cxlflash_show_port_status(0, cfg, buf); 2117e0f01a21SMatthew R. Ochs } 2118e0f01a21SMatthew R. Ochs 2119e0f01a21SMatthew R. Ochs /** 2120e0f01a21SMatthew R. Ochs * port1_show() - queries and presents the current status of port 1 2121e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2122e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2123e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2124e0f01a21SMatthew R. Ochs * 2125e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2126e0f01a21SMatthew R. Ochs */ 2127e0f01a21SMatthew R. Ochs static ssize_t port1_show(struct device *dev, 2128e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2129e0f01a21SMatthew R. Ochs char *buf) 2130e0f01a21SMatthew R. Ochs { 2131fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2132e0f01a21SMatthew R. Ochs 21333b225cd3SMatthew R. Ochs return cxlflash_show_port_status(1, cfg, buf); 2134e0f01a21SMatthew R. Ochs } 2135e0f01a21SMatthew R. Ochs 2136e0f01a21SMatthew R. Ochs /** 2137e0f01a21SMatthew R. Ochs * lun_mode_show() - presents the current LUN mode of the host 213815305514SMatthew R. Ochs * @dev: Generic device associated with the host. 2139e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the LUN mode. 214015305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. 214115305514SMatthew R. Ochs * 214215305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 214315305514SMatthew R. Ochs */ 2144e0f01a21SMatthew R. Ochs static ssize_t lun_mode_show(struct device *dev, 214515305514SMatthew R. Ochs struct device_attribute *attr, char *buf) 214615305514SMatthew R. Ochs { 2147fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 214815305514SMatthew R. Ochs struct afu *afu = cfg->afu; 214915305514SMatthew R. Ochs 2150e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 215115305514SMatthew R. Ochs } 215215305514SMatthew R. Ochs 215315305514SMatthew R. Ochs /** 2154e0f01a21SMatthew R. Ochs * lun_mode_store() - sets the LUN mode of the host 215515305514SMatthew R. Ochs * @dev: Generic device associated with the host. 2156e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the LUN mode. 215715305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. 215815305514SMatthew R. Ochs * @count: Length of data resizing in @buf. 215915305514SMatthew R. Ochs * 216015305514SMatthew R. Ochs * The CXL Flash AFU supports a dummy LUN mode where the external 216115305514SMatthew R. Ochs * links and storage are not required. Space on the FPGA is used 216215305514SMatthew R. Ochs * to create 1 or 2 small LUNs which are presented to the system 216315305514SMatthew R. Ochs * as if they were a normal storage device. This feature is useful 216415305514SMatthew R. Ochs * during development and also provides manufacturing with a way 216515305514SMatthew R. Ochs * to test the AFU without an actual device. 216615305514SMatthew R. Ochs * 216715305514SMatthew R. Ochs * 0 = external LUN[s] (default) 216815305514SMatthew R. Ochs * 1 = internal LUN (1 x 64K, 512B blocks, id 0) 216915305514SMatthew R. Ochs * 2 = internal LUN (1 x 64K, 4K blocks, id 0) 217015305514SMatthew R. Ochs * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) 217115305514SMatthew R. Ochs * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) 217215305514SMatthew R. Ochs * 217315305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 217415305514SMatthew R. Ochs */ 2175e0f01a21SMatthew R. Ochs static ssize_t lun_mode_store(struct device *dev, 217615305514SMatthew R. Ochs struct device_attribute *attr, 217715305514SMatthew R. Ochs const char *buf, size_t count) 217815305514SMatthew R. Ochs { 217915305514SMatthew R. Ochs struct Scsi_Host *shost = class_to_shost(dev); 2180fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(shost); 218115305514SMatthew R. Ochs struct afu *afu = cfg->afu; 218215305514SMatthew R. Ochs int rc; 218315305514SMatthew R. Ochs u32 lun_mode; 218415305514SMatthew R. Ochs 218515305514SMatthew R. Ochs rc = kstrtouint(buf, 10, &lun_mode); 218615305514SMatthew R. Ochs if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { 218715305514SMatthew R. Ochs afu->internal_lun = lun_mode; 2188603ecce9SManoj N. Kumar 2189603ecce9SManoj N. Kumar /* 2190603ecce9SManoj N. Kumar * When configured for internal LUN, there is only one channel, 219178ae028eSMatthew R. Ochs * channel number 0, else there will be one less than the number 219278ae028eSMatthew R. Ochs * of fc ports for this card. 2193603ecce9SManoj N. Kumar */ 2194603ecce9SManoj N. Kumar if (afu->internal_lun) 2195603ecce9SManoj N. Kumar shost->max_channel = 0; 2196603ecce9SManoj N. Kumar else 21978fa4f177SMatthew R. Ochs shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); 2198603ecce9SManoj N. Kumar 219915305514SMatthew R. Ochs afu_reset(cfg); 220015305514SMatthew R. Ochs scsi_scan_host(cfg->host); 220115305514SMatthew R. Ochs } 220215305514SMatthew R. Ochs 220315305514SMatthew R. Ochs return count; 220415305514SMatthew R. Ochs } 220515305514SMatthew R. Ochs 220615305514SMatthew R. Ochs /** 2207e0f01a21SMatthew R. Ochs * ioctl_version_show() - presents the current ioctl version of the host 220815305514SMatthew R. Ochs * @dev: Generic device associated with the host. 220915305514SMatthew R. Ochs * @attr: Device attribute representing the ioctl version. 221015305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. 221115305514SMatthew R. Ochs * 221215305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 221315305514SMatthew R. Ochs */ 2214e0f01a21SMatthew R. Ochs static ssize_t ioctl_version_show(struct device *dev, 2215e0f01a21SMatthew R. Ochs struct device_attribute *attr, char *buf) 221615305514SMatthew R. Ochs { 221715305514SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); 221815305514SMatthew R. Ochs } 221915305514SMatthew R. Ochs 222015305514SMatthew R. Ochs /** 2221e0f01a21SMatthew R. Ochs * cxlflash_show_port_lun_table() - queries and presents the port LUN table 2222e0f01a21SMatthew R. Ochs * @port: Desired port for status reporting. 22233b225cd3SMatthew R. Ochs * @cfg: Internal structure associated with the host. 2224e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2225e0f01a21SMatthew R. Ochs * 222678ae028eSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf or -EINVAL. 2227e0f01a21SMatthew R. Ochs */ 2228e0f01a21SMatthew R. Ochs static ssize_t cxlflash_show_port_lun_table(u32 port, 22293b225cd3SMatthew R. Ochs struct cxlflash_cfg *cfg, 2230e0f01a21SMatthew R. Ochs char *buf) 2231e0f01a21SMatthew R. Ochs { 223278ae028eSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 22330aa14887SMatthew R. Ochs __be64 __iomem *fc_port_luns; 2234e0f01a21SMatthew R. Ochs int i; 2235e0f01a21SMatthew R. Ochs ssize_t bytes = 0; 2236e0f01a21SMatthew R. Ochs 223778ae028eSMatthew R. Ochs WARN_ON(port >= MAX_FC_PORTS); 223878ae028eSMatthew R. Ochs 223978ae028eSMatthew R. Ochs if (port >= cfg->num_fc_ports) { 224078ae028eSMatthew R. Ochs dev_info(dev, "%s: Port %d not supported on this card.\n", 224178ae028eSMatthew R. Ochs __func__, port); 224278ae028eSMatthew R. Ochs return -EINVAL; 224378ae028eSMatthew R. Ochs } 2244e0f01a21SMatthew R. Ochs 22450aa14887SMatthew R. Ochs fc_port_luns = get_fc_port_luns(cfg, port); 2246e0f01a21SMatthew R. Ochs 2247e0f01a21SMatthew R. Ochs for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2248e0f01a21SMatthew R. Ochs bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 22490aa14887SMatthew R. Ochs "%03d: %016llx\n", 22500aa14887SMatthew R. Ochs i, readq_be(&fc_port_luns[i])); 2251e0f01a21SMatthew R. Ochs return bytes; 2252e0f01a21SMatthew R. Ochs } 2253e0f01a21SMatthew R. Ochs 2254e0f01a21SMatthew R. Ochs /** 2255e0f01a21SMatthew R. Ochs * port0_lun_table_show() - presents the current LUN table of port 0 2256e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2257e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2258e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2259e0f01a21SMatthew R. Ochs * 2260e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2261e0f01a21SMatthew R. Ochs */ 2262e0f01a21SMatthew R. Ochs static ssize_t port0_lun_table_show(struct device *dev, 2263e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2264e0f01a21SMatthew R. Ochs char *buf) 2265e0f01a21SMatthew R. Ochs { 2266fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2267e0f01a21SMatthew R. Ochs 22683b225cd3SMatthew R. Ochs return cxlflash_show_port_lun_table(0, cfg, buf); 2269e0f01a21SMatthew R. Ochs } 2270e0f01a21SMatthew R. Ochs 2271e0f01a21SMatthew R. Ochs /** 2272e0f01a21SMatthew R. Ochs * port1_lun_table_show() - presents the current LUN table of port 1 2273e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2274e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2275e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2276e0f01a21SMatthew R. Ochs * 2277e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2278e0f01a21SMatthew R. Ochs */ 2279e0f01a21SMatthew R. Ochs static ssize_t port1_lun_table_show(struct device *dev, 2280e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2281e0f01a21SMatthew R. Ochs char *buf) 2282e0f01a21SMatthew R. Ochs { 2283fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2284e0f01a21SMatthew R. Ochs 22853b225cd3SMatthew R. Ochs return cxlflash_show_port_lun_table(1, cfg, buf); 2286e0f01a21SMatthew R. Ochs } 2287e0f01a21SMatthew R. Ochs 2288e0f01a21SMatthew R. Ochs /** 2289cba06e6dSMatthew R. Ochs * irqpoll_weight_show() - presents the current IRQ poll weight for the host 2290cba06e6dSMatthew R. Ochs * @dev: Generic device associated with the host. 2291cba06e6dSMatthew R. Ochs * @attr: Device attribute representing the IRQ poll weight. 2292cba06e6dSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll 2293cba06e6dSMatthew R. Ochs * weight in ASCII. 2294cba06e6dSMatthew R. Ochs * 2295cba06e6dSMatthew R. Ochs * An IRQ poll weight of 0 indicates polling is disabled. 2296cba06e6dSMatthew R. Ochs * 2297cba06e6dSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2298cba06e6dSMatthew R. Ochs */ 2299cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_show(struct device *dev, 2300cba06e6dSMatthew R. Ochs struct device_attribute *attr, char *buf) 2301cba06e6dSMatthew R. Ochs { 2302cba06e6dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2303cba06e6dSMatthew R. Ochs struct afu *afu = cfg->afu; 2304cba06e6dSMatthew R. Ochs 2305cba06e6dSMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); 2306cba06e6dSMatthew R. Ochs } 2307cba06e6dSMatthew R. Ochs 2308cba06e6dSMatthew R. Ochs /** 2309cba06e6dSMatthew R. Ochs * irqpoll_weight_store() - sets the current IRQ poll weight for the host 2310cba06e6dSMatthew R. Ochs * @dev: Generic device associated with the host. 2311cba06e6dSMatthew R. Ochs * @attr: Device attribute representing the IRQ poll weight. 2312cba06e6dSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll 2313cba06e6dSMatthew R. Ochs * weight in ASCII. 2314cba06e6dSMatthew R. Ochs * @count: Length of data resizing in @buf. 2315cba06e6dSMatthew R. Ochs * 2316cba06e6dSMatthew R. Ochs * An IRQ poll weight of 0 indicates polling is disabled. 2317cba06e6dSMatthew R. Ochs * 2318cba06e6dSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2319cba06e6dSMatthew R. Ochs */ 2320cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_store(struct device *dev, 2321cba06e6dSMatthew R. Ochs struct device_attribute *attr, 2322cba06e6dSMatthew R. Ochs const char *buf, size_t count) 2323cba06e6dSMatthew R. Ochs { 2324cba06e6dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2325cba06e6dSMatthew R. Ochs struct device *cfgdev = &cfg->dev->dev; 2326cba06e6dSMatthew R. Ochs struct afu *afu = cfg->afu; 2327cba06e6dSMatthew R. Ochs u32 weight; 2328cba06e6dSMatthew R. Ochs int rc; 2329cba06e6dSMatthew R. Ochs 2330cba06e6dSMatthew R. Ochs rc = kstrtouint(buf, 10, &weight); 2331cba06e6dSMatthew R. Ochs if (rc) 2332cba06e6dSMatthew R. Ochs return -EINVAL; 2333cba06e6dSMatthew R. Ochs 2334cba06e6dSMatthew R. Ochs if (weight > 256) { 2335cba06e6dSMatthew R. Ochs dev_info(cfgdev, 2336cba06e6dSMatthew R. Ochs "Invalid IRQ poll weight. It must be 256 or less.\n"); 2337cba06e6dSMatthew R. Ochs return -EINVAL; 2338cba06e6dSMatthew R. Ochs } 2339cba06e6dSMatthew R. Ochs 2340cba06e6dSMatthew R. Ochs if (weight == afu->irqpoll_weight) { 2341cba06e6dSMatthew R. Ochs dev_info(cfgdev, 2342cba06e6dSMatthew R. Ochs "Current IRQ poll weight has the same weight.\n"); 2343cba06e6dSMatthew R. Ochs return -EINVAL; 2344cba06e6dSMatthew R. Ochs } 2345cba06e6dSMatthew R. Ochs 2346cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) 2347cba06e6dSMatthew R. Ochs irq_poll_disable(&afu->irqpoll); 2348cba06e6dSMatthew R. Ochs 2349cba06e6dSMatthew R. Ochs afu->irqpoll_weight = weight; 2350cba06e6dSMatthew R. Ochs 2351cba06e6dSMatthew R. Ochs if (weight > 0) 2352cba06e6dSMatthew R. Ochs irq_poll_init(&afu->irqpoll, weight, cxlflash_irqpoll); 2353cba06e6dSMatthew R. Ochs 2354cba06e6dSMatthew R. Ochs return count; 2355cba06e6dSMatthew R. Ochs } 2356cba06e6dSMatthew R. Ochs 2357cba06e6dSMatthew R. Ochs /** 2358e0f01a21SMatthew R. Ochs * mode_show() - presents the current mode of the device 235915305514SMatthew R. Ochs * @dev: Generic device associated with the device. 236015305514SMatthew R. Ochs * @attr: Device attribute representing the device mode. 236115305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. 236215305514SMatthew R. Ochs * 236315305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 236415305514SMatthew R. Ochs */ 2365e0f01a21SMatthew R. Ochs static ssize_t mode_show(struct device *dev, 236615305514SMatthew R. Ochs struct device_attribute *attr, char *buf) 236715305514SMatthew R. Ochs { 236815305514SMatthew R. Ochs struct scsi_device *sdev = to_scsi_device(dev); 236915305514SMatthew R. Ochs 2370e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", 237115305514SMatthew R. Ochs sdev->hostdata ? "superpipe" : "legacy"); 237215305514SMatthew R. Ochs } 237315305514SMatthew R. Ochs 237415305514SMatthew R. Ochs /* 237515305514SMatthew R. Ochs * Host attributes 237615305514SMatthew R. Ochs */ 2377e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0); 2378e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1); 2379e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RW(lun_mode); 2380e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(ioctl_version); 2381e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0_lun_table); 2382e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1_lun_table); 2383cba06e6dSMatthew R. Ochs static DEVICE_ATTR_RW(irqpoll_weight); 238415305514SMatthew R. Ochs 238515305514SMatthew R. Ochs static struct device_attribute *cxlflash_host_attrs[] = { 238615305514SMatthew R. Ochs &dev_attr_port0, 238715305514SMatthew R. Ochs &dev_attr_port1, 238815305514SMatthew R. Ochs &dev_attr_lun_mode, 238915305514SMatthew R. Ochs &dev_attr_ioctl_version, 2390e0f01a21SMatthew R. Ochs &dev_attr_port0_lun_table, 2391e0f01a21SMatthew R. Ochs &dev_attr_port1_lun_table, 2392cba06e6dSMatthew R. Ochs &dev_attr_irqpoll_weight, 239315305514SMatthew R. Ochs NULL 239415305514SMatthew R. Ochs }; 239515305514SMatthew R. Ochs 239615305514SMatthew R. Ochs /* 239715305514SMatthew R. Ochs * Device attributes 239815305514SMatthew R. Ochs */ 2399e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(mode); 240015305514SMatthew R. Ochs 240115305514SMatthew R. Ochs static struct device_attribute *cxlflash_dev_attrs[] = { 240215305514SMatthew R. Ochs &dev_attr_mode, 240315305514SMatthew R. Ochs NULL 240415305514SMatthew R. Ochs }; 240515305514SMatthew R. Ochs 240615305514SMatthew R. Ochs /* 240715305514SMatthew R. Ochs * Host template 240815305514SMatthew R. Ochs */ 240915305514SMatthew R. Ochs static struct scsi_host_template driver_template = { 241015305514SMatthew R. Ochs .module = THIS_MODULE, 241115305514SMatthew R. Ochs .name = CXLFLASH_ADAPTER_NAME, 241215305514SMatthew R. Ochs .info = cxlflash_driver_info, 241315305514SMatthew R. Ochs .ioctl = cxlflash_ioctl, 241415305514SMatthew R. Ochs .proc_name = CXLFLASH_NAME, 241515305514SMatthew R. Ochs .queuecommand = cxlflash_queuecommand, 241615305514SMatthew R. Ochs .eh_device_reset_handler = cxlflash_eh_device_reset_handler, 241715305514SMatthew R. Ochs .eh_host_reset_handler = cxlflash_eh_host_reset_handler, 241815305514SMatthew R. Ochs .change_queue_depth = cxlflash_change_queue_depth, 241983430833SManoj N. Kumar .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, 242015305514SMatthew R. Ochs .can_queue = CXLFLASH_MAX_CMDS, 24215fbb96c8SMatthew R. Ochs .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, 242215305514SMatthew R. Ochs .this_id = -1, 242368ab2d76SUma Krishnan .sg_tablesize = 1, /* No scatter gather support */ 242415305514SMatthew R. Ochs .max_sectors = CXLFLASH_MAX_SECTORS, 242515305514SMatthew R. Ochs .use_clustering = ENABLE_CLUSTERING, 242615305514SMatthew R. Ochs .shost_attrs = cxlflash_host_attrs, 242715305514SMatthew R. Ochs .sdev_attrs = cxlflash_dev_attrs, 242815305514SMatthew R. Ochs }; 242915305514SMatthew R. Ochs 243015305514SMatthew R. Ochs /* 243115305514SMatthew R. Ochs * Device dependent values 243215305514SMatthew R. Ochs */ 243396e1b660SUma Krishnan static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 243496e1b660SUma Krishnan 0ULL }; 243596e1b660SUma Krishnan static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, 2436704c4b0dSUma Krishnan CXLFLASH_NOTIFY_SHUTDOWN }; 243794344520SMatthew R. Ochs static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, 243894344520SMatthew R. Ochs CXLFLASH_NOTIFY_SHUTDOWN }; 243915305514SMatthew R. Ochs 244015305514SMatthew R. Ochs /* 244115305514SMatthew R. Ochs * PCI device binding table 244215305514SMatthew R. Ochs */ 244315305514SMatthew R. Ochs static struct pci_device_id cxlflash_pci_table[] = { 244415305514SMatthew R. Ochs {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, 244515305514SMatthew R. Ochs PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, 2446a2746fb1SManoj Kumar {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, 2447a2746fb1SManoj Kumar PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, 244894344520SMatthew R. Ochs {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, 244994344520SMatthew R. Ochs PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, 245015305514SMatthew R. Ochs {} 245115305514SMatthew R. Ochs }; 245215305514SMatthew R. Ochs 245315305514SMatthew R. Ochs MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); 245415305514SMatthew R. Ochs 245515305514SMatthew R. Ochs /** 2456c21e0bbfSMatthew R. Ochs * cxlflash_worker_thread() - work thread handler for the AFU 2457c21e0bbfSMatthew R. Ochs * @work: Work structure contained within cxlflash associated with host. 2458c21e0bbfSMatthew R. Ochs * 2459c21e0bbfSMatthew R. Ochs * Handles the following events: 2460c21e0bbfSMatthew R. Ochs * - Link reset which cannot be performed on interrupt context due to 2461c21e0bbfSMatthew R. Ochs * blocking up to a few seconds 2462ef51074aSMatthew R. Ochs * - Rescan the host 2463c21e0bbfSMatthew R. Ochs */ 2464c21e0bbfSMatthew R. Ochs static void cxlflash_worker_thread(struct work_struct *work) 2465c21e0bbfSMatthew R. Ochs { 24665cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, 24675cdac81aSMatthew R. Ochs work_q); 2468c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 24694392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 24700aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 2471c21e0bbfSMatthew R. Ochs int port; 2472c21e0bbfSMatthew R. Ochs ulong lock_flags; 2473c21e0bbfSMatthew R. Ochs 24745cdac81aSMatthew R. Ochs /* Avoid MMIO if the device has failed */ 24755cdac81aSMatthew R. Ochs 24765cdac81aSMatthew R. Ochs if (cfg->state != STATE_NORMAL) 24775cdac81aSMatthew R. Ochs return; 24785cdac81aSMatthew R. Ochs 2479c21e0bbfSMatthew R. Ochs spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2480c21e0bbfSMatthew R. Ochs 2481c21e0bbfSMatthew R. Ochs if (cfg->lr_state == LINK_RESET_REQUIRED) { 2482c21e0bbfSMatthew R. Ochs port = cfg->lr_port; 2483c21e0bbfSMatthew R. Ochs if (port < 0) 24844392ba49SMatthew R. Ochs dev_err(dev, "%s: invalid port index %d\n", 24854392ba49SMatthew R. Ochs __func__, port); 2486c21e0bbfSMatthew R. Ochs else { 2487c21e0bbfSMatthew R. Ochs spin_unlock_irqrestore(cfg->host->host_lock, 2488c21e0bbfSMatthew R. Ochs lock_flags); 2489c21e0bbfSMatthew R. Ochs 2490c21e0bbfSMatthew R. Ochs /* The reset can block... */ 24910aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 24920aa14887SMatthew R. Ochs afu_link_reset(afu, port, fc_port_regs); 2493c21e0bbfSMatthew R. Ochs spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2494c21e0bbfSMatthew R. Ochs } 2495c21e0bbfSMatthew R. Ochs 2496c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_COMPLETE; 2497c21e0bbfSMatthew R. Ochs } 2498c21e0bbfSMatthew R. Ochs 2499c21e0bbfSMatthew R. Ochs spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 2500ef51074aSMatthew R. Ochs 2501ef51074aSMatthew R. Ochs if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 2502ef51074aSMatthew R. Ochs scsi_scan_host(cfg->host); 2503c21e0bbfSMatthew R. Ochs } 2504c21e0bbfSMatthew R. Ochs 2505c21e0bbfSMatthew R. Ochs /** 2506c21e0bbfSMatthew R. Ochs * cxlflash_probe() - PCI entry point to add host 2507c21e0bbfSMatthew R. Ochs * @pdev: PCI device associated with the host. 2508c21e0bbfSMatthew R. Ochs * @dev_id: PCI device id associated with device. 2509c21e0bbfSMatthew R. Ochs * 25101284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 2511c21e0bbfSMatthew R. Ochs */ 2512c21e0bbfSMatthew R. Ochs static int cxlflash_probe(struct pci_dev *pdev, 2513c21e0bbfSMatthew R. Ochs const struct pci_device_id *dev_id) 2514c21e0bbfSMatthew R. Ochs { 2515c21e0bbfSMatthew R. Ochs struct Scsi_Host *host; 2516c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = NULL; 2517fb67d44dSMatthew R. Ochs struct device *dev = &pdev->dev; 2518c21e0bbfSMatthew R. Ochs struct dev_dependent_vals *ddv; 2519c21e0bbfSMatthew R. Ochs int rc = 0; 252078ae028eSMatthew R. Ochs int k; 2521c21e0bbfSMatthew R. Ochs 2522c21e0bbfSMatthew R. Ochs dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", 2523c21e0bbfSMatthew R. Ochs __func__, pdev->irq); 2524c21e0bbfSMatthew R. Ochs 2525c21e0bbfSMatthew R. Ochs ddv = (struct dev_dependent_vals *)dev_id->driver_data; 2526c21e0bbfSMatthew R. Ochs driver_template.max_sectors = ddv->max_sectors; 2527c21e0bbfSMatthew R. Ochs 2528c21e0bbfSMatthew R. Ochs host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 2529c21e0bbfSMatthew R. Ochs if (!host) { 2530fb67d44dSMatthew R. Ochs dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); 2531c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 2532c21e0bbfSMatthew R. Ochs goto out; 2533c21e0bbfSMatthew R. Ochs } 2534c21e0bbfSMatthew R. Ochs 2535c21e0bbfSMatthew R. Ochs host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; 2536c21e0bbfSMatthew R. Ochs host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; 25378fa4f177SMatthew R. Ochs host->max_channel = PORTNUM2CHAN(NUM_FC_PORTS); 2538c21e0bbfSMatthew R. Ochs host->unique_id = host->host_no; 2539c21e0bbfSMatthew R. Ochs host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 2540c21e0bbfSMatthew R. Ochs 2541fb67d44dSMatthew R. Ochs cfg = shost_priv(host); 2542c21e0bbfSMatthew R. Ochs cfg->host = host; 2543c21e0bbfSMatthew R. Ochs rc = alloc_mem(cfg); 2544c21e0bbfSMatthew R. Ochs if (rc) { 2545fb67d44dSMatthew R. Ochs dev_err(dev, "%s: alloc_mem failed\n", __func__); 2546c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 25478b5b1e87SMatthew R. Ochs scsi_host_put(cfg->host); 2548c21e0bbfSMatthew R. Ochs goto out; 2549c21e0bbfSMatthew R. Ochs } 2550c21e0bbfSMatthew R. Ochs 2551c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_NONE; 2552c21e0bbfSMatthew R. Ochs cfg->dev = pdev; 255378ae028eSMatthew R. Ochs cfg->num_fc_ports = NUM_FC_PORTS; 255417ead26fSMatthew R. Ochs cfg->cxl_fops = cxlflash_cxl_fops; 25552cb79266SMatthew R. Ochs 25562cb79266SMatthew R. Ochs /* 255778ae028eSMatthew R. Ochs * Promoted LUNs move to the top of the LUN table. The rest stay on 255878ae028eSMatthew R. Ochs * the bottom half. The bottom half grows from the end (index = 255), 255978ae028eSMatthew R. Ochs * whereas the top half grows from the beginning (index = 0). 256078ae028eSMatthew R. Ochs * 256178ae028eSMatthew R. Ochs * Initialize the last LUN index for all possible ports. 25622cb79266SMatthew R. Ochs */ 25632cb79266SMatthew R. Ochs cfg->promote_lun_index = 0; 256478ae028eSMatthew R. Ochs 256578ae028eSMatthew R. Ochs for (k = 0; k < MAX_FC_PORTS; k++) 256678ae028eSMatthew R. Ochs cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; 25672cb79266SMatthew R. Ochs 2568c21e0bbfSMatthew R. Ochs cfg->dev_id = (struct pci_device_id *)dev_id; 2569c21e0bbfSMatthew R. Ochs 2570c21e0bbfSMatthew R. Ochs init_waitqueue_head(&cfg->tmf_waitq); 2571439e85c1SMatthew R. Ochs init_waitqueue_head(&cfg->reset_waitq); 2572c21e0bbfSMatthew R. Ochs 2573c21e0bbfSMatthew R. Ochs INIT_WORK(&cfg->work_q, cxlflash_worker_thread); 2574c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_INVALID; 2575c21e0bbfSMatthew R. Ochs cfg->lr_port = -1; 25760d73122cSMatthew R. Ochs spin_lock_init(&cfg->tmf_slock); 257765be2c79SMatthew R. Ochs mutex_init(&cfg->ctx_tbl_list_mutex); 257865be2c79SMatthew R. Ochs mutex_init(&cfg->ctx_recovery_mutex); 25790a27ae51SMatthew R. Ochs init_rwsem(&cfg->ioctl_rwsem); 258065be2c79SMatthew R. Ochs INIT_LIST_HEAD(&cfg->ctx_err_recovery); 258165be2c79SMatthew R. Ochs INIT_LIST_HEAD(&cfg->lluns); 2582c21e0bbfSMatthew R. Ochs 2583c21e0bbfSMatthew R. Ochs pci_set_drvdata(pdev, cfg); 2584c21e0bbfSMatthew R. Ochs 2585c21e0bbfSMatthew R. Ochs cfg->cxl_afu = cxl_pci_to_afu(pdev); 2586c21e0bbfSMatthew R. Ochs 2587c21e0bbfSMatthew R. Ochs rc = init_pci(cfg); 2588c21e0bbfSMatthew R. Ochs if (rc) { 2589fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); 2590c21e0bbfSMatthew R. Ochs goto out_remove; 2591c21e0bbfSMatthew R. Ochs } 2592c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_PCI; 2593c21e0bbfSMatthew R. Ochs 2594c21e0bbfSMatthew R. Ochs rc = init_afu(cfg); 2595c21e0bbfSMatthew R. Ochs if (rc) { 2596fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); 2597c21e0bbfSMatthew R. Ochs goto out_remove; 2598c21e0bbfSMatthew R. Ochs } 2599c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_AFU; 2600c21e0bbfSMatthew R. Ochs 2601c21e0bbfSMatthew R. Ochs rc = init_scsi(cfg); 2602c21e0bbfSMatthew R. Ochs if (rc) { 2603fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); 2604c21e0bbfSMatthew R. Ochs goto out_remove; 2605c21e0bbfSMatthew R. Ochs } 2606c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_SCSI; 2607c21e0bbfSMatthew R. Ochs 2608c21e0bbfSMatthew R. Ochs out: 2609fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2610c21e0bbfSMatthew R. Ochs return rc; 2611c21e0bbfSMatthew R. Ochs 2612c21e0bbfSMatthew R. Ochs out_remove: 2613c21e0bbfSMatthew R. Ochs cxlflash_remove(pdev); 2614c21e0bbfSMatthew R. Ochs goto out; 2615c21e0bbfSMatthew R. Ochs } 2616c21e0bbfSMatthew R. Ochs 26175cdac81aSMatthew R. Ochs /** 26185cdac81aSMatthew R. Ochs * cxlflash_pci_error_detected() - called when a PCI error is detected 26195cdac81aSMatthew R. Ochs * @pdev: PCI device struct. 26205cdac81aSMatthew R. Ochs * @state: PCI channel state. 26215cdac81aSMatthew R. Ochs * 26221d3324c3SMatthew R. Ochs * When an EEH occurs during an active reset, wait until the reset is 26231d3324c3SMatthew R. Ochs * complete and then take action based upon the device state. 26241d3324c3SMatthew R. Ochs * 26255cdac81aSMatthew R. Ochs * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 26265cdac81aSMatthew R. Ochs */ 26275cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 26285cdac81aSMatthew R. Ochs pci_channel_state_t state) 26295cdac81aSMatthew R. Ochs { 263065be2c79SMatthew R. Ochs int rc = 0; 26315cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 26325cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 26335cdac81aSMatthew R. Ochs 26345cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); 26355cdac81aSMatthew R. Ochs 26365cdac81aSMatthew R. Ochs switch (state) { 26375cdac81aSMatthew R. Ochs case pci_channel_io_frozen: 26381d3324c3SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 26391d3324c3SMatthew R. Ochs if (cfg->state == STATE_FAILTERM) 26401d3324c3SMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 26411d3324c3SMatthew R. Ochs 2642439e85c1SMatthew R. Ochs cfg->state = STATE_RESET; 26435cdac81aSMatthew R. Ochs scsi_block_requests(cfg->host); 26440a27ae51SMatthew R. Ochs drain_ioctls(cfg); 264565be2c79SMatthew R. Ochs rc = cxlflash_mark_contexts_error(cfg); 264665be2c79SMatthew R. Ochs if (unlikely(rc)) 2647fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", 264865be2c79SMatthew R. Ochs __func__, rc); 26499526f360SManoj N. Kumar term_afu(cfg); 26505cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_NEED_RESET; 26515cdac81aSMatthew R. Ochs case pci_channel_io_perm_failure: 26525cdac81aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 2653439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 26545cdac81aSMatthew R. Ochs scsi_unblock_requests(cfg->host); 26555cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 26565cdac81aSMatthew R. Ochs default: 26575cdac81aSMatthew R. Ochs break; 26585cdac81aSMatthew R. Ochs } 26595cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_NEED_RESET; 26605cdac81aSMatthew R. Ochs } 26615cdac81aSMatthew R. Ochs 26625cdac81aSMatthew R. Ochs /** 26635cdac81aSMatthew R. Ochs * cxlflash_pci_slot_reset() - called when PCI slot has been reset 26645cdac81aSMatthew R. Ochs * @pdev: PCI device struct. 26655cdac81aSMatthew R. Ochs * 26665cdac81aSMatthew R. Ochs * This routine is called by the pci error recovery code after the PCI 26675cdac81aSMatthew R. Ochs * slot has been reset, just before we should resume normal operations. 26685cdac81aSMatthew R. Ochs * 26695cdac81aSMatthew R. Ochs * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT 26705cdac81aSMatthew R. Ochs */ 26715cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) 26725cdac81aSMatthew R. Ochs { 26735cdac81aSMatthew R. Ochs int rc = 0; 26745cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 26755cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 26765cdac81aSMatthew R. Ochs 26775cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 26785cdac81aSMatthew R. Ochs 26795cdac81aSMatthew R. Ochs rc = init_afu(cfg); 26805cdac81aSMatthew R. Ochs if (unlikely(rc)) { 2681fb67d44dSMatthew R. Ochs dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); 26825cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 26835cdac81aSMatthew R. Ochs } 26845cdac81aSMatthew R. Ochs 26855cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_RECOVERED; 26865cdac81aSMatthew R. Ochs } 26875cdac81aSMatthew R. Ochs 26885cdac81aSMatthew R. Ochs /** 26895cdac81aSMatthew R. Ochs * cxlflash_pci_resume() - called when normal operation can resume 26905cdac81aSMatthew R. Ochs * @pdev: PCI device struct 26915cdac81aSMatthew R. Ochs */ 26925cdac81aSMatthew R. Ochs static void cxlflash_pci_resume(struct pci_dev *pdev) 26935cdac81aSMatthew R. Ochs { 26945cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 26955cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 26965cdac81aSMatthew R. Ochs 26975cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 26985cdac81aSMatthew R. Ochs 26995cdac81aSMatthew R. Ochs cfg->state = STATE_NORMAL; 2700439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 27015cdac81aSMatthew R. Ochs scsi_unblock_requests(cfg->host); 27025cdac81aSMatthew R. Ochs } 27035cdac81aSMatthew R. Ochs 27045cdac81aSMatthew R. Ochs static const struct pci_error_handlers cxlflash_err_handler = { 27055cdac81aSMatthew R. Ochs .error_detected = cxlflash_pci_error_detected, 27065cdac81aSMatthew R. Ochs .slot_reset = cxlflash_pci_slot_reset, 27075cdac81aSMatthew R. Ochs .resume = cxlflash_pci_resume, 27085cdac81aSMatthew R. Ochs }; 27095cdac81aSMatthew R. Ochs 2710c21e0bbfSMatthew R. Ochs /* 2711c21e0bbfSMatthew R. Ochs * PCI device structure 2712c21e0bbfSMatthew R. Ochs */ 2713c21e0bbfSMatthew R. Ochs static struct pci_driver cxlflash_driver = { 2714c21e0bbfSMatthew R. Ochs .name = CXLFLASH_NAME, 2715c21e0bbfSMatthew R. Ochs .id_table = cxlflash_pci_table, 2716c21e0bbfSMatthew R. Ochs .probe = cxlflash_probe, 2717c21e0bbfSMatthew R. Ochs .remove = cxlflash_remove, 2718babf985dSUma Krishnan .shutdown = cxlflash_remove, 27195cdac81aSMatthew R. Ochs .err_handler = &cxlflash_err_handler, 2720c21e0bbfSMatthew R. Ochs }; 2721c21e0bbfSMatthew R. Ochs 2722c21e0bbfSMatthew R. Ochs /** 2723c21e0bbfSMatthew R. Ochs * init_cxlflash() - module entry point 2724c21e0bbfSMatthew R. Ochs * 27251284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 2726c21e0bbfSMatthew R. Ochs */ 2727c21e0bbfSMatthew R. Ochs static int __init init_cxlflash(void) 2728c21e0bbfSMatthew R. Ochs { 272965be2c79SMatthew R. Ochs cxlflash_list_init(); 273065be2c79SMatthew R. Ochs 2731c21e0bbfSMatthew R. Ochs return pci_register_driver(&cxlflash_driver); 2732c21e0bbfSMatthew R. Ochs } 2733c21e0bbfSMatthew R. Ochs 2734c21e0bbfSMatthew R. Ochs /** 2735c21e0bbfSMatthew R. Ochs * exit_cxlflash() - module exit point 2736c21e0bbfSMatthew R. Ochs */ 2737c21e0bbfSMatthew R. Ochs static void __exit exit_cxlflash(void) 2738c21e0bbfSMatthew R. Ochs { 273965be2c79SMatthew R. Ochs cxlflash_term_global_luns(); 274065be2c79SMatthew R. Ochs cxlflash_free_errpage(); 274165be2c79SMatthew R. Ochs 2742c21e0bbfSMatthew R. Ochs pci_unregister_driver(&cxlflash_driver); 2743c21e0bbfSMatthew R. Ochs } 2744c21e0bbfSMatthew R. Ochs 2745c21e0bbfSMatthew R. Ochs module_init(init_cxlflash); 2746c21e0bbfSMatthew R. Ochs module_exit(exit_cxlflash); 2747