1c21e0bbfSMatthew R. Ochs /* 2c21e0bbfSMatthew R. Ochs * CXL Flash Device Driver 3c21e0bbfSMatthew R. Ochs * 4c21e0bbfSMatthew R. Ochs * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5c21e0bbfSMatthew R. Ochs * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6c21e0bbfSMatthew R. Ochs * 7c21e0bbfSMatthew R. Ochs * Copyright (C) 2015 IBM Corporation 8c21e0bbfSMatthew R. Ochs * 9c21e0bbfSMatthew R. Ochs * This program is free software; you can redistribute it and/or 10c21e0bbfSMatthew R. Ochs * modify it under the terms of the GNU General Public License 11c21e0bbfSMatthew R. Ochs * as published by the Free Software Foundation; either version 12c21e0bbfSMatthew R. Ochs * 2 of the License, or (at your option) any later version. 13c21e0bbfSMatthew R. Ochs */ 14c21e0bbfSMatthew R. Ochs 15c21e0bbfSMatthew R. Ochs #include <linux/delay.h> 16c21e0bbfSMatthew R. Ochs #include <linux/list.h> 17c21e0bbfSMatthew R. Ochs #include <linux/module.h> 18c21e0bbfSMatthew R. Ochs #include <linux/pci.h> 19c21e0bbfSMatthew R. Ochs 20c21e0bbfSMatthew R. Ochs #include <asm/unaligned.h> 21c21e0bbfSMatthew R. Ochs 22c21e0bbfSMatthew R. Ochs #include <misc/cxl.h> 23c21e0bbfSMatthew R. Ochs 24c21e0bbfSMatthew R. Ochs #include <scsi/scsi_cmnd.h> 25c21e0bbfSMatthew R. Ochs #include <scsi/scsi_host.h> 2665be2c79SMatthew R. Ochs #include <uapi/scsi/cxlflash_ioctl.h> 27c21e0bbfSMatthew R. Ochs 28c21e0bbfSMatthew R. Ochs #include "main.h" 29c21e0bbfSMatthew R. Ochs #include "sislite.h" 30c21e0bbfSMatthew R. Ochs #include "common.h" 31c21e0bbfSMatthew R. Ochs 32c21e0bbfSMatthew R. Ochs MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); 33c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); 34c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 35c21e0bbfSMatthew R. Ochs MODULE_LICENSE("GPL"); 36c21e0bbfSMatthew R. Ochs 37a834a36bSUma Krishnan static struct class *cxlflash_class; 38a834a36bSUma Krishnan static u32 cxlflash_major; 39a834a36bSUma Krishnan static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); 40a834a36bSUma Krishnan 41c21e0bbfSMatthew R. Ochs /** 42c21e0bbfSMatthew R. Ochs * process_cmd_err() - command error handler 43c21e0bbfSMatthew R. Ochs * @cmd: AFU command that experienced the error. 44c21e0bbfSMatthew R. Ochs * @scp: SCSI command associated with the AFU command in error. 45c21e0bbfSMatthew R. Ochs * 46c21e0bbfSMatthew R. Ochs * Translates error bits from AFU command to SCSI command results. 47c21e0bbfSMatthew R. Ochs */ 48c21e0bbfSMatthew R. Ochs static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 49c21e0bbfSMatthew R. Ochs { 50fb67d44dSMatthew R. Ochs struct afu *afu = cmd->parent; 51fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 52fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 53c21e0bbfSMatthew R. Ochs struct sisl_ioarcb *ioarcb; 54c21e0bbfSMatthew R. Ochs struct sisl_ioasa *ioasa; 558396012fSMatthew R. Ochs u32 resid; 56c21e0bbfSMatthew R. Ochs 57c21e0bbfSMatthew R. Ochs if (unlikely(!cmd)) 58c21e0bbfSMatthew R. Ochs return; 59c21e0bbfSMatthew R. Ochs 60c21e0bbfSMatthew R. Ochs ioarcb = &(cmd->rcb); 61c21e0bbfSMatthew R. Ochs ioasa = &(cmd->sa); 62c21e0bbfSMatthew R. Ochs 63c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 648396012fSMatthew R. Ochs resid = ioasa->resid; 658396012fSMatthew R. Ochs scsi_set_resid(scp, resid); 66fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 678396012fSMatthew R. Ochs __func__, cmd, scp, resid); 68c21e0bbfSMatthew R. Ochs } 69c21e0bbfSMatthew R. Ochs 70c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 71fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", 72c21e0bbfSMatthew R. Ochs __func__, cmd, scp); 73c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 74c21e0bbfSMatthew R. Ochs } 75c21e0bbfSMatthew R. Ochs 76fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " 77fb67d44dSMatthew R. Ochs "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, 78fb67d44dSMatthew R. Ochs ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, 79fb67d44dSMatthew R. Ochs ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); 80c21e0bbfSMatthew R. Ochs 81c21e0bbfSMatthew R. Ochs if (ioasa->rc.scsi_rc) { 82c21e0bbfSMatthew R. Ochs /* We have a SCSI status */ 83c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { 84c21e0bbfSMatthew R. Ochs memcpy(scp->sense_buffer, ioasa->sense_data, 85c21e0bbfSMatthew R. Ochs SISL_SENSE_DATA_LEN); 86c21e0bbfSMatthew R. Ochs scp->result = ioasa->rc.scsi_rc; 87c21e0bbfSMatthew R. Ochs } else 88c21e0bbfSMatthew R. Ochs scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); 89c21e0bbfSMatthew R. Ochs } 90c21e0bbfSMatthew R. Ochs 91c21e0bbfSMatthew R. Ochs /* 92c21e0bbfSMatthew R. Ochs * We encountered an error. Set scp->result based on nature 93c21e0bbfSMatthew R. Ochs * of error. 94c21e0bbfSMatthew R. Ochs */ 95c21e0bbfSMatthew R. Ochs if (ioasa->rc.fc_rc) { 96c21e0bbfSMatthew R. Ochs /* We have an FC status */ 97c21e0bbfSMatthew R. Ochs switch (ioasa->rc.fc_rc) { 98c21e0bbfSMatthew R. Ochs case SISL_FC_RC_LINKDOWN: 99c21e0bbfSMatthew R. Ochs scp->result = (DID_REQUEUE << 16); 100c21e0bbfSMatthew R. Ochs break; 101c21e0bbfSMatthew R. Ochs case SISL_FC_RC_RESID: 102c21e0bbfSMatthew R. Ochs /* This indicates an FCP resid underrun */ 103c21e0bbfSMatthew R. Ochs if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { 104c21e0bbfSMatthew R. Ochs /* If the SISL_RC_FLAGS_OVERRUN flag was set, 105c21e0bbfSMatthew R. Ochs * then we will handle this error else where. 106c21e0bbfSMatthew R. Ochs * If not then we must handle it here. 1078396012fSMatthew R. Ochs * This is probably an AFU bug. 108c21e0bbfSMatthew R. Ochs */ 109c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 110c21e0bbfSMatthew R. Ochs } 111c21e0bbfSMatthew R. Ochs break; 112c21e0bbfSMatthew R. Ochs case SISL_FC_RC_RESIDERR: 113c21e0bbfSMatthew R. Ochs /* Resid mismatch between adapter and device */ 114c21e0bbfSMatthew R. Ochs case SISL_FC_RC_TGTABORT: 115c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTOK: 116c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTFAIL: 117c21e0bbfSMatthew R. Ochs case SISL_FC_RC_NOLOGI: 118c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTPEND: 119c21e0bbfSMatthew R. Ochs case SISL_FC_RC_WRABORTPEND: 120c21e0bbfSMatthew R. Ochs case SISL_FC_RC_NOEXP: 121c21e0bbfSMatthew R. Ochs case SISL_FC_RC_INUSE: 122c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 123c21e0bbfSMatthew R. Ochs break; 124c21e0bbfSMatthew R. Ochs } 125c21e0bbfSMatthew R. Ochs } 126c21e0bbfSMatthew R. Ochs 127c21e0bbfSMatthew R. Ochs if (ioasa->rc.afu_rc) { 128c21e0bbfSMatthew R. Ochs /* We have an AFU error */ 129c21e0bbfSMatthew R. Ochs switch (ioasa->rc.afu_rc) { 130c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_NO_CHANNELS: 1318396012fSMatthew R. Ochs scp->result = (DID_NO_CONNECT << 16); 132c21e0bbfSMatthew R. Ochs break; 133c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_DATA_DMA_ERR: 134c21e0bbfSMatthew R. Ochs switch (ioasa->afu_extra) { 135c21e0bbfSMatthew R. Ochs case SISL_AFU_DMA_ERR_PAGE_IN: 136c21e0bbfSMatthew R. Ochs /* Retry */ 137c21e0bbfSMatthew R. Ochs scp->result = (DID_IMM_RETRY << 16); 138c21e0bbfSMatthew R. Ochs break; 139c21e0bbfSMatthew R. Ochs case SISL_AFU_DMA_ERR_INVALID_EA: 140c21e0bbfSMatthew R. Ochs default: 141c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 142c21e0bbfSMatthew R. Ochs } 143c21e0bbfSMatthew R. Ochs break; 144c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_OUT_OF_DATA_BUFS: 145c21e0bbfSMatthew R. Ochs /* Retry */ 146c21e0bbfSMatthew R. Ochs scp->result = (DID_ALLOC_FAILURE << 16); 147c21e0bbfSMatthew R. Ochs break; 148c21e0bbfSMatthew R. Ochs default: 149c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 150c21e0bbfSMatthew R. Ochs } 151c21e0bbfSMatthew R. Ochs } 152c21e0bbfSMatthew R. Ochs } 153c21e0bbfSMatthew R. Ochs 154c21e0bbfSMatthew R. Ochs /** 155c21e0bbfSMatthew R. Ochs * cmd_complete() - command completion handler 156c21e0bbfSMatthew R. Ochs * @cmd: AFU command that has completed. 157c21e0bbfSMatthew R. Ochs * 1588ba1ddb3SMatthew R. Ochs * For SCSI commands this routine prepares and submits commands that have 1598ba1ddb3SMatthew R. Ochs * either completed or timed out to the SCSI stack. For internal commands 1608ba1ddb3SMatthew R. Ochs * (TMF or AFU), this routine simply notifies the originator that the 1618ba1ddb3SMatthew R. Ochs * command has completed. 162c21e0bbfSMatthew R. Ochs */ 163c21e0bbfSMatthew R. Ochs static void cmd_complete(struct afu_cmd *cmd) 164c21e0bbfSMatthew R. Ochs { 165c21e0bbfSMatthew R. Ochs struct scsi_cmnd *scp; 166c21e0bbfSMatthew R. Ochs ulong lock_flags; 167c21e0bbfSMatthew R. Ochs struct afu *afu = cmd->parent; 168c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 169fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 170a002bf83SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 171c21e0bbfSMatthew R. Ochs 172a002bf83SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 173a002bf83SUma Krishnan list_del(&cmd->list); 174a002bf83SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 175a002bf83SUma Krishnan 176fe7f9698SMatthew R. Ochs if (cmd->scp) { 177fe7f9698SMatthew R. Ochs scp = cmd->scp; 1788396012fSMatthew R. Ochs if (unlikely(cmd->sa.ioasc)) 179c21e0bbfSMatthew R. Ochs process_cmd_err(cmd, scp); 180c21e0bbfSMatthew R. Ochs else 181c21e0bbfSMatthew R. Ochs scp->result = (DID_OK << 16); 182c21e0bbfSMatthew R. Ochs 183fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", 184fb67d44dSMatthew R. Ochs __func__, scp, scp->result, cmd->sa.ioasc); 185c21e0bbfSMatthew R. Ochs scp->scsi_done(scp); 1868ba1ddb3SMatthew R. Ochs } else if (cmd->cmd_tmf) { 187018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 188c21e0bbfSMatthew R. Ochs cfg->tmf_active = false; 189c21e0bbfSMatthew R. Ochs wake_up_all_locked(&cfg->tmf_waitq); 190018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 191c21e0bbfSMatthew R. Ochs } else 192c21e0bbfSMatthew R. Ochs complete(&cmd->cevent); 193c21e0bbfSMatthew R. Ochs } 194c21e0bbfSMatthew R. Ochs 195c21e0bbfSMatthew R. Ochs /** 196a1ea04b3SUma Krishnan * flush_pending_cmds() - flush all pending commands on this hardware queue 197a1ea04b3SUma Krishnan * @hwq: Hardware queue to flush. 198a1ea04b3SUma Krishnan * 199a1ea04b3SUma Krishnan * The hardware send queue lock associated with this hardware queue must be 200a1ea04b3SUma Krishnan * held when calling this routine. 201a1ea04b3SUma Krishnan */ 202a1ea04b3SUma Krishnan static void flush_pending_cmds(struct hwq *hwq) 203a1ea04b3SUma Krishnan { 2048ba1ddb3SMatthew R. Ochs struct cxlflash_cfg *cfg = hwq->afu->parent; 205a1ea04b3SUma Krishnan struct afu_cmd *cmd, *tmp; 206a1ea04b3SUma Krishnan struct scsi_cmnd *scp; 2078ba1ddb3SMatthew R. Ochs ulong lock_flags; 208a1ea04b3SUma Krishnan 209a1ea04b3SUma Krishnan list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { 210a1ea04b3SUma Krishnan /* Bypass command when on a doneq, cmd_complete() will handle */ 211a1ea04b3SUma Krishnan if (!list_empty(&cmd->queue)) 212a1ea04b3SUma Krishnan continue; 213a1ea04b3SUma Krishnan 214a1ea04b3SUma Krishnan list_del(&cmd->list); 215a1ea04b3SUma Krishnan 216a1ea04b3SUma Krishnan if (cmd->scp) { 217a1ea04b3SUma Krishnan scp = cmd->scp; 218a1ea04b3SUma Krishnan scp->result = (DID_IMM_RETRY << 16); 219a1ea04b3SUma Krishnan scp->scsi_done(scp); 220a1ea04b3SUma Krishnan } else { 221a1ea04b3SUma Krishnan cmd->cmd_aborted = true; 2228ba1ddb3SMatthew R. Ochs 2238ba1ddb3SMatthew R. Ochs if (cmd->cmd_tmf) { 2248ba1ddb3SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 2258ba1ddb3SMatthew R. Ochs cfg->tmf_active = false; 2268ba1ddb3SMatthew R. Ochs wake_up_all_locked(&cfg->tmf_waitq); 2278ba1ddb3SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, 2288ba1ddb3SMatthew R. Ochs lock_flags); 2298ba1ddb3SMatthew R. Ochs } else 230a1ea04b3SUma Krishnan complete(&cmd->cevent); 231a1ea04b3SUma Krishnan } 232a1ea04b3SUma Krishnan } 233a1ea04b3SUma Krishnan } 234a1ea04b3SUma Krishnan 235a1ea04b3SUma Krishnan /** 236a96851d3SUma Krishnan * context_reset() - reset context via specified register 237a96851d3SUma Krishnan * @hwq: Hardware queue owning the context to be reset. 2389c7d1ee5SMatthew R. Ochs * @reset_reg: MMIO register to perform reset. 239a96851d3SUma Krishnan * 2407c4c41f1SUma Krishnan * When the reset is successful, the SISLite specification guarantees that 2417c4c41f1SUma Krishnan * the AFU has aborted all currently pending I/O. Accordingly, these commands 2427c4c41f1SUma Krishnan * must be flushed. 2437c4c41f1SUma Krishnan * 244a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 24515305514SMatthew R. Ochs */ 246a96851d3SUma Krishnan static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) 24715305514SMatthew R. Ochs { 248a96851d3SUma Krishnan struct cxlflash_cfg *cfg = hwq->afu->parent; 2493d2f617dSUma Krishnan struct device *dev = &cfg->dev->dev; 250a96851d3SUma Krishnan int rc = -ETIMEDOUT; 251a96851d3SUma Krishnan int nretry = 0; 252a96851d3SUma Krishnan u64 val = 0x1; 2537c4c41f1SUma Krishnan ulong lock_flags; 25415305514SMatthew R. Ochs 255a96851d3SUma Krishnan dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); 25615305514SMatthew R. Ochs 2577c4c41f1SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 2587c4c41f1SUma Krishnan 259a96851d3SUma Krishnan writeq_be(val, reset_reg); 26015305514SMatthew R. Ochs do { 261a96851d3SUma Krishnan val = readq_be(reset_reg); 262a96851d3SUma Krishnan if ((val & 0x1) == 0x0) { 263a96851d3SUma Krishnan rc = 0; 26415305514SMatthew R. Ochs break; 265a96851d3SUma Krishnan } 266a96851d3SUma Krishnan 26715305514SMatthew R. Ochs /* Double delay each time */ 268ea765431SManoj N. Kumar udelay(1 << nretry); 26915305514SMatthew R. Ochs } while (nretry++ < MC_ROOM_RETRY_CNT); 2703d2f617dSUma Krishnan 2717c4c41f1SUma Krishnan if (!rc) 2727c4c41f1SUma Krishnan flush_pending_cmds(hwq); 2737c4c41f1SUma Krishnan 2747c4c41f1SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 2757c4c41f1SUma Krishnan 276a96851d3SUma Krishnan dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", 277a96851d3SUma Krishnan __func__, rc, val, nretry); 278a96851d3SUma Krishnan return rc; 27915305514SMatthew R. Ochs } 28015305514SMatthew R. Ochs 28115305514SMatthew R. Ochs /** 282a96851d3SUma Krishnan * context_reset_ioarrin() - reset context via IOARRIN register 283a96851d3SUma Krishnan * @hwq: Hardware queue owning the context to be reset. 284a96851d3SUma Krishnan * 285a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 2869c7d1ee5SMatthew R. Ochs */ 287a96851d3SUma Krishnan static int context_reset_ioarrin(struct hwq *hwq) 2889c7d1ee5SMatthew R. Ochs { 289a96851d3SUma Krishnan return context_reset(hwq, &hwq->host_map->ioarrin); 2909c7d1ee5SMatthew R. Ochs } 2919c7d1ee5SMatthew R. Ochs 2929c7d1ee5SMatthew R. Ochs /** 293a96851d3SUma Krishnan * context_reset_sq() - reset context via SQ_CONTEXT_RESET register 294a96851d3SUma Krishnan * @hwq: Hardware queue owning the context to be reset. 295a96851d3SUma Krishnan * 296a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 297696d0b0cSMatthew R. Ochs */ 298a96851d3SUma Krishnan static int context_reset_sq(struct hwq *hwq) 299696d0b0cSMatthew R. Ochs { 300a96851d3SUma Krishnan return context_reset(hwq, &hwq->host_map->sq_ctx_reset); 301696d0b0cSMatthew R. Ochs } 302696d0b0cSMatthew R. Ochs 303696d0b0cSMatthew R. Ochs /** 30448b4be36SMatthew R. Ochs * send_cmd_ioarrin() - sends an AFU command via IOARRIN register 30515305514SMatthew R. Ochs * @afu: AFU associated with the host. 30615305514SMatthew R. Ochs * @cmd: AFU command to send. 30715305514SMatthew R. Ochs * 30815305514SMatthew R. Ochs * Return: 3091284fb0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 31015305514SMatthew R. Ochs */ 31148b4be36SMatthew R. Ochs static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) 31215305514SMatthew R. Ochs { 31315305514SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 31415305514SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 315bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 31615305514SMatthew R. Ochs int rc = 0; 31711f7b184SUma Krishnan s64 room; 31811f7b184SUma Krishnan ulong lock_flags; 31915305514SMatthew R. Ochs 32015305514SMatthew R. Ochs /* 32111f7b184SUma Krishnan * To avoid the performance penalty of MMIO, spread the update of 32211f7b184SUma Krishnan * 'room' over multiple commands. 32315305514SMatthew R. Ochs */ 32466ea9bccSUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 325bfc0bab1SUma Krishnan if (--hwq->room < 0) { 326bfc0bab1SUma Krishnan room = readq_be(&hwq->host_map->cmd_room); 32711f7b184SUma Krishnan if (room <= 0) { 32811f7b184SUma Krishnan dev_dbg_ratelimited(dev, "%s: no cmd_room to send " 32911f7b184SUma Krishnan "0x%02X, room=0x%016llX\n", 33011f7b184SUma Krishnan __func__, cmd->rcb.cdb[0], room); 331bfc0bab1SUma Krishnan hwq->room = 0; 33211f7b184SUma Krishnan rc = SCSI_MLQUEUE_HOST_BUSY; 33311f7b184SUma Krishnan goto out; 33411f7b184SUma Krishnan } 335bfc0bab1SUma Krishnan hwq->room = room - 1; 33615305514SMatthew R. Ochs } 33715305514SMatthew R. Ochs 338a002bf83SUma Krishnan list_add(&cmd->list, &hwq->pending_cmds); 339bfc0bab1SUma Krishnan writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); 34015305514SMatthew R. Ochs out: 34166ea9bccSUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 342d58188c3SUma Krishnan dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", 343d58188c3SUma Krishnan __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); 34415305514SMatthew R. Ochs return rc; 34515305514SMatthew R. Ochs } 34615305514SMatthew R. Ochs 34715305514SMatthew R. Ochs /** 348696d0b0cSMatthew R. Ochs * send_cmd_sq() - sends an AFU command via SQ ring 349696d0b0cSMatthew R. Ochs * @afu: AFU associated with the host. 350696d0b0cSMatthew R. Ochs * @cmd: AFU command to send. 351696d0b0cSMatthew R. Ochs * 352696d0b0cSMatthew R. Ochs * Return: 353696d0b0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 354696d0b0cSMatthew R. Ochs */ 355696d0b0cSMatthew R. Ochs static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) 356696d0b0cSMatthew R. Ochs { 357696d0b0cSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 358696d0b0cSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 359bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 360696d0b0cSMatthew R. Ochs int rc = 0; 361696d0b0cSMatthew R. Ochs int newval; 362696d0b0cSMatthew R. Ochs ulong lock_flags; 363696d0b0cSMatthew R. Ochs 364bfc0bab1SUma Krishnan newval = atomic_dec_if_positive(&hwq->hsq_credits); 365696d0b0cSMatthew R. Ochs if (newval <= 0) { 366696d0b0cSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 367696d0b0cSMatthew R. Ochs goto out; 368696d0b0cSMatthew R. Ochs } 369696d0b0cSMatthew R. Ochs 370696d0b0cSMatthew R. Ochs cmd->rcb.ioasa = &cmd->sa; 371696d0b0cSMatthew R. Ochs 372bfc0bab1SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 373696d0b0cSMatthew R. Ochs 374bfc0bab1SUma Krishnan *hwq->hsq_curr = cmd->rcb; 375bfc0bab1SUma Krishnan if (hwq->hsq_curr < hwq->hsq_end) 376bfc0bab1SUma Krishnan hwq->hsq_curr++; 377696d0b0cSMatthew R. Ochs else 378bfc0bab1SUma Krishnan hwq->hsq_curr = hwq->hsq_start; 379a002bf83SUma Krishnan 380a002bf83SUma Krishnan list_add(&cmd->list, &hwq->pending_cmds); 381bfc0bab1SUma Krishnan writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); 382696d0b0cSMatthew R. Ochs 383bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 384696d0b0cSMatthew R. Ochs out: 385fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " 386fb67d44dSMatthew R. Ochs "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, 387bfc0bab1SUma Krishnan cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, 388bfc0bab1SUma Krishnan readq_be(&hwq->host_map->sq_head), 389bfc0bab1SUma Krishnan readq_be(&hwq->host_map->sq_tail)); 390696d0b0cSMatthew R. Ochs return rc; 391696d0b0cSMatthew R. Ochs } 392696d0b0cSMatthew R. Ochs 393696d0b0cSMatthew R. Ochs /** 39415305514SMatthew R. Ochs * wait_resp() - polls for a response or timeout to a sent AFU command 39515305514SMatthew R. Ochs * @afu: AFU associated with the host. 39615305514SMatthew R. Ochs * @cmd: AFU command that was sent. 3979ba848acSMatthew R. Ochs * 398a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 39915305514SMatthew R. Ochs */ 4009ba848acSMatthew R. Ochs static int wait_resp(struct afu *afu, struct afu_cmd *cmd) 40115305514SMatthew R. Ochs { 402fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 403fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 4049ba848acSMatthew R. Ochs int rc = 0; 40515305514SMatthew R. Ochs ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 40615305514SMatthew R. Ochs 40715305514SMatthew R. Ochs timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 408a96851d3SUma Krishnan if (!timeout) 409a96851d3SUma Krishnan rc = -ETIMEDOUT; 41015305514SMatthew R. Ochs 411a1ea04b3SUma Krishnan if (cmd->cmd_aborted) 412a1ea04b3SUma Krishnan rc = -EAGAIN; 413a1ea04b3SUma Krishnan 4149ba848acSMatthew R. Ochs if (unlikely(cmd->sa.ioasc != 0)) { 415fb67d44dSMatthew R. Ochs dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", 416fb67d44dSMatthew R. Ochs __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); 417a96851d3SUma Krishnan rc = -EIO; 4189ba848acSMatthew R. Ochs } 4199ba848acSMatthew R. Ochs 4209ba848acSMatthew R. Ochs return rc; 42115305514SMatthew R. Ochs } 42215305514SMatthew R. Ochs 42315305514SMatthew R. Ochs /** 4241dd0c0e4SMatthew R. Ochs * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command 4251dd0c0e4SMatthew R. Ochs * @host: SCSI host associated with device. 4261dd0c0e4SMatthew R. Ochs * @scp: SCSI command to send. 4271dd0c0e4SMatthew R. Ochs * @afu: SCSI command to send. 4281dd0c0e4SMatthew R. Ochs * 4291dd0c0e4SMatthew R. Ochs * Hashes a command based upon the hardware queue mode. 4301dd0c0e4SMatthew R. Ochs * 4311dd0c0e4SMatthew R. Ochs * Return: Trusted index of target hardware queue 4321dd0c0e4SMatthew R. Ochs */ 4331dd0c0e4SMatthew R. Ochs static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, 4341dd0c0e4SMatthew R. Ochs struct afu *afu) 4351dd0c0e4SMatthew R. Ochs { 4361dd0c0e4SMatthew R. Ochs u32 tag; 4371dd0c0e4SMatthew R. Ochs u32 hwq = 0; 4381dd0c0e4SMatthew R. Ochs 4391dd0c0e4SMatthew R. Ochs if (afu->num_hwqs == 1) 4401dd0c0e4SMatthew R. Ochs return 0; 4411dd0c0e4SMatthew R. Ochs 4421dd0c0e4SMatthew R. Ochs switch (afu->hwq_mode) { 4431dd0c0e4SMatthew R. Ochs case HWQ_MODE_RR: 4441dd0c0e4SMatthew R. Ochs hwq = afu->hwq_rr_count++ % afu->num_hwqs; 4451dd0c0e4SMatthew R. Ochs break; 4461dd0c0e4SMatthew R. Ochs case HWQ_MODE_TAG: 4471dd0c0e4SMatthew R. Ochs tag = blk_mq_unique_tag(scp->request); 4481dd0c0e4SMatthew R. Ochs hwq = blk_mq_unique_tag_to_hwq(tag); 4491dd0c0e4SMatthew R. Ochs break; 4501dd0c0e4SMatthew R. Ochs case HWQ_MODE_CPU: 4511dd0c0e4SMatthew R. Ochs hwq = smp_processor_id() % afu->num_hwqs; 4521dd0c0e4SMatthew R. Ochs break; 4531dd0c0e4SMatthew R. Ochs default: 4541dd0c0e4SMatthew R. Ochs WARN_ON_ONCE(1); 4551dd0c0e4SMatthew R. Ochs } 4561dd0c0e4SMatthew R. Ochs 4571dd0c0e4SMatthew R. Ochs return hwq; 4581dd0c0e4SMatthew R. Ochs } 4591dd0c0e4SMatthew R. Ochs 4601dd0c0e4SMatthew R. Ochs /** 461c21e0bbfSMatthew R. Ochs * send_tmf() - sends a Task Management Function (TMF) 46232abbedaSMatthew R. Ochs * @cfg: Internal structure associated with the host. 46332abbedaSMatthew R. Ochs * @sdev: SCSI device destined for TMF. 464c21e0bbfSMatthew R. Ochs * @tmfcmd: TMF command to send. 465c21e0bbfSMatthew R. Ochs * 466c21e0bbfSMatthew R. Ochs * Return: 4678ba1ddb3SMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure 468c21e0bbfSMatthew R. Ochs */ 46932abbedaSMatthew R. Ochs static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev, 47032abbedaSMatthew R. Ochs u64 tmfcmd) 471c21e0bbfSMatthew R. Ochs { 47232abbedaSMatthew R. Ochs struct afu *afu = cfg->afu; 4738ba1ddb3SMatthew R. Ochs struct afu_cmd *cmd = NULL; 4744392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 47532abbedaSMatthew R. Ochs struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 4769a597cd4SUma Krishnan bool needs_deletion = false; 4778ba1ddb3SMatthew R. Ochs char *buf = NULL; 478c21e0bbfSMatthew R. Ochs ulong lock_flags; 479c21e0bbfSMatthew R. Ochs int rc = 0; 480018d1dc9SMatthew R. Ochs ulong to; 481c21e0bbfSMatthew R. Ochs 4828ba1ddb3SMatthew R. Ochs buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); 4838ba1ddb3SMatthew R. Ochs if (unlikely(!buf)) { 4848ba1ddb3SMatthew R. Ochs dev_err(dev, "%s: no memory for command\n", __func__); 4858ba1ddb3SMatthew R. Ochs rc = -ENOMEM; 4868ba1ddb3SMatthew R. Ochs goto out; 4878ba1ddb3SMatthew R. Ochs } 4888ba1ddb3SMatthew R. Ochs 4898ba1ddb3SMatthew R. Ochs cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); 4908ba1ddb3SMatthew R. Ochs INIT_LIST_HEAD(&cmd->queue); 4918ba1ddb3SMatthew R. Ochs 492018d1dc9SMatthew R. Ochs /* When Task Management Function is active do not send another */ 493018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 494c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) 495018d1dc9SMatthew R. Ochs wait_event_interruptible_lock_irq(cfg->tmf_waitq, 496018d1dc9SMatthew R. Ochs !cfg->tmf_active, 497018d1dc9SMatthew R. Ochs cfg->tmf_slock); 498c21e0bbfSMatthew R. Ochs cfg->tmf_active = true; 499018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 500c21e0bbfSMatthew R. Ochs 501d4ace351SMatthew R. Ochs cmd->parent = afu; 502d4ace351SMatthew R. Ochs cmd->cmd_tmf = true; 50332abbedaSMatthew R. Ochs cmd->hwq_index = hwq->index; 504d4ace351SMatthew R. Ochs 505bfc0bab1SUma Krishnan cmd->rcb.ctx_id = hwq->ctx_hndl; 5065fbb96c8SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 50732abbedaSMatthew R. Ochs cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel); 50832abbedaSMatthew R. Ochs cmd->rcb.lun_id = lun_to_lunid(sdev->lun); 509c21e0bbfSMatthew R. Ochs cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 510d4ace351SMatthew R. Ochs SISL_REQ_FLAGS_SUP_UNDERRUN | 511d4ace351SMatthew R. Ochs SISL_REQ_FLAGS_TMF_CMD); 512c21e0bbfSMatthew R. Ochs memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 513c21e0bbfSMatthew R. Ochs 51448b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 515c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 516018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 517c21e0bbfSMatthew R. Ochs cfg->tmf_active = false; 518018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 519c21e0bbfSMatthew R. Ochs goto out; 520c21e0bbfSMatthew R. Ochs } 521c21e0bbfSMatthew R. Ochs 522018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 523018d1dc9SMatthew R. Ochs to = msecs_to_jiffies(5000); 524018d1dc9SMatthew R. Ochs to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, 525018d1dc9SMatthew R. Ochs !cfg->tmf_active, 526018d1dc9SMatthew R. Ochs cfg->tmf_slock, 527018d1dc9SMatthew R. Ochs to); 528018d1dc9SMatthew R. Ochs if (!to) { 529fb67d44dSMatthew R. Ochs dev_err(dev, "%s: TMF timed out\n", __func__); 5308ba1ddb3SMatthew R. Ochs rc = -ETIMEDOUT; 5319a597cd4SUma Krishnan needs_deletion = true; 5328ba1ddb3SMatthew R. Ochs } else if (cmd->cmd_aborted) { 5338ba1ddb3SMatthew R. Ochs dev_err(dev, "%s: TMF aborted\n", __func__); 5348ba1ddb3SMatthew R. Ochs rc = -EAGAIN; 5358ba1ddb3SMatthew R. Ochs } else if (cmd->sa.ioasc) { 5368ba1ddb3SMatthew R. Ochs dev_err(dev, "%s: TMF failed ioasc=%08x\n", 5378ba1ddb3SMatthew R. Ochs __func__, cmd->sa.ioasc); 5388ba1ddb3SMatthew R. Ochs rc = -EIO; 539018d1dc9SMatthew R. Ochs } 5408ba1ddb3SMatthew R. Ochs cfg->tmf_active = false; 541018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 5429a597cd4SUma Krishnan 5439a597cd4SUma Krishnan if (needs_deletion) { 5449a597cd4SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 5459a597cd4SUma Krishnan list_del(&cmd->list); 5469a597cd4SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 5479a597cd4SUma Krishnan } 548c21e0bbfSMatthew R. Ochs out: 5498ba1ddb3SMatthew R. Ochs kfree(buf); 550c21e0bbfSMatthew R. Ochs return rc; 551c21e0bbfSMatthew R. Ochs } 552c21e0bbfSMatthew R. Ochs 553c21e0bbfSMatthew R. Ochs /** 554c21e0bbfSMatthew R. Ochs * cxlflash_driver_info() - information handler for this host driver 555c21e0bbfSMatthew R. Ochs * @host: SCSI host associated with device. 556c21e0bbfSMatthew R. Ochs * 557c21e0bbfSMatthew R. Ochs * Return: A string describing the device. 558c21e0bbfSMatthew R. Ochs */ 559c21e0bbfSMatthew R. Ochs static const char *cxlflash_driver_info(struct Scsi_Host *host) 560c21e0bbfSMatthew R. Ochs { 561c21e0bbfSMatthew R. Ochs return CXLFLASH_ADAPTER_NAME; 562c21e0bbfSMatthew R. Ochs } 563c21e0bbfSMatthew R. Ochs 564c21e0bbfSMatthew R. Ochs /** 565c21e0bbfSMatthew R. Ochs * cxlflash_queuecommand() - sends a mid-layer request 566c21e0bbfSMatthew R. Ochs * @host: SCSI host associated with device. 567c21e0bbfSMatthew R. Ochs * @scp: SCSI command to send. 568c21e0bbfSMatthew R. Ochs * 5691284fb0cSMatthew R. Ochs * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 570c21e0bbfSMatthew R. Ochs */ 571c21e0bbfSMatthew R. Ochs static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 572c21e0bbfSMatthew R. Ochs { 573fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 574c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 5754392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 576479ad8e9SMatthew R. Ochs struct afu_cmd *cmd = sc_to_afuci(scp); 5779d89326cSMatthew R. Ochs struct scatterlist *sg = scsi_sglist(scp); 5781dd0c0e4SMatthew R. Ochs int hwq_index = cmd_to_target_hwq(host, scp, afu); 5791dd0c0e4SMatthew R. Ochs struct hwq *hwq = get_hwq(afu, hwq_index); 5809d89326cSMatthew R. Ochs u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; 581c21e0bbfSMatthew R. Ochs ulong lock_flags; 582c21e0bbfSMatthew R. Ochs int rc = 0; 583c21e0bbfSMatthew R. Ochs 5844392ba49SMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 585fb67d44dSMatthew R. Ochs "cdb=(%08x-%08x-%08x-%08x)\n", 586c21e0bbfSMatthew R. Ochs __func__, scp, host->host_no, scp->device->channel, 587c21e0bbfSMatthew R. Ochs scp->device->id, scp->device->lun, 588c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 589c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 590c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 591c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 592c21e0bbfSMatthew R. Ochs 593018d1dc9SMatthew R. Ochs /* 594018d1dc9SMatthew R. Ochs * If a Task Management Function is active, wait for it to complete 595c21e0bbfSMatthew R. Ochs * before continuing with regular commands. 596c21e0bbfSMatthew R. Ochs */ 597018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 598c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) { 599018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 600c21e0bbfSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 601c21e0bbfSMatthew R. Ochs goto out; 602c21e0bbfSMatthew R. Ochs } 603018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 604c21e0bbfSMatthew R. Ochs 6055cdac81aSMatthew R. Ochs switch (cfg->state) { 606323e3342SMatthew R. Ochs case STATE_PROBING: 607323e3342SMatthew R. Ochs case STATE_PROBED: 608439e85c1SMatthew R. Ochs case STATE_RESET: 609fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); 6105cdac81aSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 6115cdac81aSMatthew R. Ochs goto out; 6125cdac81aSMatthew R. Ochs case STATE_FAILTERM: 613fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); 6145cdac81aSMatthew R. Ochs scp->result = (DID_NO_CONNECT << 16); 6155cdac81aSMatthew R. Ochs scp->scsi_done(scp); 6165cdac81aSMatthew R. Ochs rc = 0; 6175cdac81aSMatthew R. Ochs goto out; 6185cdac81aSMatthew R. Ochs default: 619e0f76ad1SUma Krishnan atomic_inc(&afu->cmds_active); 6205cdac81aSMatthew R. Ochs break; 6215cdac81aSMatthew R. Ochs } 6225cdac81aSMatthew R. Ochs 6239d89326cSMatthew R. Ochs if (likely(sg)) { 62450b787f7SMatthew R. Ochs cmd->rcb.data_len = sg->length; 62550b787f7SMatthew R. Ochs cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); 6269d89326cSMatthew R. Ochs } 6279d89326cSMatthew R. Ochs 628fe7f9698SMatthew R. Ochs cmd->scp = scp; 6299d89326cSMatthew R. Ochs cmd->parent = afu; 6301dd0c0e4SMatthew R. Ochs cmd->hwq_index = hwq_index; 6319d89326cSMatthew R. Ochs 63296cf727fSUma Krishnan cmd->sa.ioasc = 0; 633bfc0bab1SUma Krishnan cmd->rcb.ctx_id = hwq->ctx_hndl; 6345fbb96c8SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 6358fa4f177SMatthew R. Ochs cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); 636c21e0bbfSMatthew R. Ochs cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 637c21e0bbfSMatthew R. Ochs 638c21e0bbfSMatthew R. Ochs if (scp->sc_data_direction == DMA_TO_DEVICE) 6399d89326cSMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 640c21e0bbfSMatthew R. Ochs 6419d89326cSMatthew R. Ochs cmd->rcb.req_flags = req_flags; 642c21e0bbfSMatthew R. Ochs memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 643c21e0bbfSMatthew R. Ochs 64448b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 645e0f76ad1SUma Krishnan atomic_dec(&afu->cmds_active); 646c21e0bbfSMatthew R. Ochs out: 647c21e0bbfSMatthew R. Ochs return rc; 648c21e0bbfSMatthew R. Ochs } 649c21e0bbfSMatthew R. Ochs 650c21e0bbfSMatthew R. Ochs /** 651c21e0bbfSMatthew R. Ochs * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe 6521284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 653c21e0bbfSMatthew R. Ochs */ 654c21e0bbfSMatthew R. Ochs static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) 655c21e0bbfSMatthew R. Ochs { 656c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 657c21e0bbfSMatthew R. Ochs 658c21e0bbfSMatthew R. Ochs if (pci_channel_offline(pdev)) 659439e85c1SMatthew R. Ochs wait_event_timeout(cfg->reset_waitq, 660c21e0bbfSMatthew R. Ochs !pci_channel_offline(pdev), 661c21e0bbfSMatthew R. Ochs CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); 662c21e0bbfSMatthew R. Ochs } 663c21e0bbfSMatthew R. Ochs 664c21e0bbfSMatthew R. Ochs /** 665c21e0bbfSMatthew R. Ochs * free_mem() - free memory associated with the AFU 6661284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 667c21e0bbfSMatthew R. Ochs */ 668c21e0bbfSMatthew R. Ochs static void free_mem(struct cxlflash_cfg *cfg) 669c21e0bbfSMatthew R. Ochs { 670c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 671c21e0bbfSMatthew R. Ochs 672c21e0bbfSMatthew R. Ochs if (cfg->afu) { 673c21e0bbfSMatthew R. Ochs free_pages((ulong)afu, get_order(sizeof(struct afu))); 674c21e0bbfSMatthew R. Ochs cfg->afu = NULL; 675c21e0bbfSMatthew R. Ochs } 676c21e0bbfSMatthew R. Ochs } 677c21e0bbfSMatthew R. Ochs 678c21e0bbfSMatthew R. Ochs /** 6790b09e711SUma Krishnan * cxlflash_reset_sync() - synchronizing point for asynchronous resets 6800b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 6810b09e711SUma Krishnan */ 6820b09e711SUma Krishnan static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) 6830b09e711SUma Krishnan { 6840b09e711SUma Krishnan if (cfg->async_reset_cookie == 0) 6850b09e711SUma Krishnan return; 6860b09e711SUma Krishnan 6870b09e711SUma Krishnan /* Wait until all async calls prior to this cookie have completed */ 6880b09e711SUma Krishnan async_synchronize_cookie(cfg->async_reset_cookie + 1); 6890b09e711SUma Krishnan cfg->async_reset_cookie = 0; 6900b09e711SUma Krishnan } 6910b09e711SUma Krishnan 6920b09e711SUma Krishnan /** 693c21e0bbfSMatthew R. Ochs * stop_afu() - stops the AFU command timers and unmaps the MMIO space 6941284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 695c21e0bbfSMatthew R. Ochs * 696c21e0bbfSMatthew R. Ochs * Safe to call with AFU in a partially allocated/initialized state. 697ee91e332SManoj Kumar * 6980df5bef7SUma Krishnan * Cancels scheduled worker threads, waits for any active internal AFU 699cba06e6dSMatthew R. Ochs * commands to timeout, disables IRQ polling and then unmaps the MMIO space. 700c21e0bbfSMatthew R. Ochs */ 701c21e0bbfSMatthew R. Ochs static void stop_afu(struct cxlflash_cfg *cfg) 702c21e0bbfSMatthew R. Ochs { 703c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 704bfc0bab1SUma Krishnan struct hwq *hwq; 705bfc0bab1SUma Krishnan int i; 706c21e0bbfSMatthew R. Ochs 7070df5bef7SUma Krishnan cancel_work_sync(&cfg->work_q); 7080b09e711SUma Krishnan if (!current_is_async()) 7090b09e711SUma Krishnan cxlflash_reset_sync(cfg); 7100df5bef7SUma Krishnan 711c21e0bbfSMatthew R. Ochs if (likely(afu)) { 712de01283bSMatthew R. Ochs while (atomic_read(&afu->cmds_active)) 713de01283bSMatthew R. Ochs ssleep(1); 714bfc0bab1SUma Krishnan 715bfc0bab1SUma Krishnan if (afu_is_irqpoll_enabled(afu)) { 7163065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 717bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 718bfc0bab1SUma Krishnan 719bfc0bab1SUma Krishnan irq_poll_disable(&hwq->irqpoll); 720bfc0bab1SUma Krishnan } 721bfc0bab1SUma Krishnan } 722bfc0bab1SUma Krishnan 723c21e0bbfSMatthew R. Ochs if (likely(afu->afu_map)) { 72425b8e08eSMatthew R. Ochs cfg->ops->psa_unmap(afu->afu_map); 725c21e0bbfSMatthew R. Ochs afu->afu_map = NULL; 726c21e0bbfSMatthew R. Ochs } 727c21e0bbfSMatthew R. Ochs } 728c21e0bbfSMatthew R. Ochs } 729c21e0bbfSMatthew R. Ochs 730c21e0bbfSMatthew R. Ochs /** 7319526f360SManoj N. Kumar * term_intr() - disables all AFU interrupts 7321284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 733c21e0bbfSMatthew R. Ochs * @level: Depth of allocation, where to begin waterfall tear down. 734bfc0bab1SUma Krishnan * @index: Index of the hardware queue. 735c21e0bbfSMatthew R. Ochs * 736c21e0bbfSMatthew R. Ochs * Safe to call with AFU/MC in partially allocated/initialized state. 737c21e0bbfSMatthew R. Ochs */ 738bfc0bab1SUma Krishnan static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, 739bfc0bab1SUma Krishnan u32 index) 740c21e0bbfSMatthew R. Ochs { 741c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 7424392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 743bfc0bab1SUma Krishnan struct hwq *hwq; 744c21e0bbfSMatthew R. Ochs 745bfc0bab1SUma Krishnan if (!afu) { 746bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL afu\n", __func__); 747bfc0bab1SUma Krishnan return; 748bfc0bab1SUma Krishnan } 749bfc0bab1SUma Krishnan 750bfc0bab1SUma Krishnan hwq = get_hwq(afu, index); 751bfc0bab1SUma Krishnan 752b070545dSUma Krishnan if (!hwq->ctx_cookie) { 753bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL MC\n", __func__); 754c21e0bbfSMatthew R. Ochs return; 755c21e0bbfSMatthew R. Ochs } 756c21e0bbfSMatthew R. Ochs 757c21e0bbfSMatthew R. Ochs switch (level) { 758c21e0bbfSMatthew R. Ochs case UNMAP_THREE: 759bfc0bab1SUma Krishnan /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ 760bfc0bab1SUma Krishnan if (index == PRIMARY_HWQ) 76125b8e08eSMatthew R. Ochs cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); 762c21e0bbfSMatthew R. Ochs case UNMAP_TWO: 76325b8e08eSMatthew R. Ochs cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); 764c21e0bbfSMatthew R. Ochs case UNMAP_ONE: 76525b8e08eSMatthew R. Ochs cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); 766c21e0bbfSMatthew R. Ochs case FREE_IRQ: 76725b8e08eSMatthew R. Ochs cfg->ops->free_afu_irqs(hwq->ctx_cookie); 7689526f360SManoj N. Kumar /* fall through */ 7699526f360SManoj N. Kumar case UNDO_NOOP: 7709526f360SManoj N. Kumar /* No action required */ 7719526f360SManoj N. Kumar break; 772c21e0bbfSMatthew R. Ochs } 773c21e0bbfSMatthew R. Ochs } 774c21e0bbfSMatthew R. Ochs 775c21e0bbfSMatthew R. Ochs /** 7769526f360SManoj N. Kumar * term_mc() - terminates the master context 7779526f360SManoj N. Kumar * @cfg: Internal structure associated with the host. 778bfc0bab1SUma Krishnan * @index: Index of the hardware queue. 7799526f360SManoj N. Kumar * 7809526f360SManoj N. Kumar * Safe to call with AFU/MC in partially allocated/initialized state. 7819526f360SManoj N. Kumar */ 782bfc0bab1SUma Krishnan static void term_mc(struct cxlflash_cfg *cfg, u32 index) 7839526f360SManoj N. Kumar { 7849526f360SManoj N. Kumar struct afu *afu = cfg->afu; 7859526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 786bfc0bab1SUma Krishnan struct hwq *hwq; 787a1ea04b3SUma Krishnan ulong lock_flags; 7889526f360SManoj N. Kumar 789bfc0bab1SUma Krishnan if (!afu) { 790bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL afu\n", __func__); 7919526f360SManoj N. Kumar return; 7929526f360SManoj N. Kumar } 7939526f360SManoj N. Kumar 794bfc0bab1SUma Krishnan hwq = get_hwq(afu, index); 795bfc0bab1SUma Krishnan 796b070545dSUma Krishnan if (!hwq->ctx_cookie) { 797bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL MC\n", __func__); 798bfc0bab1SUma Krishnan return; 799bfc0bab1SUma Krishnan } 800bfc0bab1SUma Krishnan 80125b8e08eSMatthew R. Ochs WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie)); 802bfc0bab1SUma Krishnan if (index != PRIMARY_HWQ) 80325b8e08eSMatthew R. Ochs WARN_ON(cfg->ops->release_context(hwq->ctx_cookie)); 804b070545dSUma Krishnan hwq->ctx_cookie = NULL; 805a1ea04b3SUma Krishnan 806d2d354a6SUma Krishnan spin_lock_irqsave(&hwq->hrrq_slock, lock_flags); 807d2d354a6SUma Krishnan hwq->hrrq_online = false; 808d2d354a6SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags); 809d2d354a6SUma Krishnan 810a1ea04b3SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 811a1ea04b3SUma Krishnan flush_pending_cmds(hwq); 812a1ea04b3SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 8139526f360SManoj N. Kumar } 8149526f360SManoj N. Kumar 8159526f360SManoj N. Kumar /** 816c21e0bbfSMatthew R. Ochs * term_afu() - terminates the AFU 8171284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 818c21e0bbfSMatthew R. Ochs * 819c21e0bbfSMatthew R. Ochs * Safe to call with AFU/MC in partially allocated/initialized state. 820c21e0bbfSMatthew R. Ochs */ 821c21e0bbfSMatthew R. Ochs static void term_afu(struct cxlflash_cfg *cfg) 822c21e0bbfSMatthew R. Ochs { 823fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 824bfc0bab1SUma Krishnan int k; 825fb67d44dSMatthew R. Ochs 8269526f360SManoj N. Kumar /* 8279526f360SManoj N. Kumar * Tear down is carefully orchestrated to ensure 8289526f360SManoj N. Kumar * no interrupts can come in when the problem state 8299526f360SManoj N. Kumar * area is unmapped. 8309526f360SManoj N. Kumar * 831bfc0bab1SUma Krishnan * 1) Disable all AFU interrupts for each master 8329526f360SManoj N. Kumar * 2) Unmap the problem state area 833bfc0bab1SUma Krishnan * 3) Stop each master context 8349526f360SManoj N. Kumar */ 8353065267aSMatthew R. Ochs for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) 836bfc0bab1SUma Krishnan term_intr(cfg, UNMAP_THREE, k); 837bfc0bab1SUma Krishnan 838c21e0bbfSMatthew R. Ochs stop_afu(cfg); 839c21e0bbfSMatthew R. Ochs 8403065267aSMatthew R. Ochs for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) 841bfc0bab1SUma Krishnan term_mc(cfg, k); 8426ded8b3cSUma Krishnan 843fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__); 844c21e0bbfSMatthew R. Ochs } 845c21e0bbfSMatthew R. Ochs 846c21e0bbfSMatthew R. Ochs /** 847704c4b0dSUma Krishnan * notify_shutdown() - notifies device of pending shutdown 848704c4b0dSUma Krishnan * @cfg: Internal structure associated with the host. 849704c4b0dSUma Krishnan * @wait: Whether to wait for shutdown processing to complete. 850704c4b0dSUma Krishnan * 851704c4b0dSUma Krishnan * This function will notify the AFU that the adapter is being shutdown 852704c4b0dSUma Krishnan * and will wait for shutdown processing to complete if wait is true. 853704c4b0dSUma Krishnan * This notification should flush pending I/Os to the device and halt 854704c4b0dSUma Krishnan * further I/Os until the next AFU reset is issued and device restarted. 855704c4b0dSUma Krishnan */ 856704c4b0dSUma Krishnan static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) 857704c4b0dSUma Krishnan { 858704c4b0dSUma Krishnan struct afu *afu = cfg->afu; 859704c4b0dSUma Krishnan struct device *dev = &cfg->dev->dev; 860704c4b0dSUma Krishnan struct dev_dependent_vals *ddv; 8610aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 862704c4b0dSUma Krishnan u64 reg, status; 863704c4b0dSUma Krishnan int i, retry_cnt = 0; 864704c4b0dSUma Krishnan 865704c4b0dSUma Krishnan ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; 866704c4b0dSUma Krishnan if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) 867704c4b0dSUma Krishnan return; 868704c4b0dSUma Krishnan 8691bd2b282SUma Krishnan if (!afu || !afu->afu_map) { 870fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); 8711bd2b282SUma Krishnan return; 8721bd2b282SUma Krishnan } 8731bd2b282SUma Krishnan 874704c4b0dSUma Krishnan /* Notify AFU */ 87578ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 8760aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 8770aa14887SMatthew R. Ochs 8780aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); 879704c4b0dSUma Krishnan reg |= SISL_FC_SHUTDOWN_NORMAL; 8800aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); 881704c4b0dSUma Krishnan } 882704c4b0dSUma Krishnan 883704c4b0dSUma Krishnan if (!wait) 884704c4b0dSUma Krishnan return; 885704c4b0dSUma Krishnan 886704c4b0dSUma Krishnan /* Wait up to 1.5 seconds for shutdown processing to complete */ 88778ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 8880aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 889704c4b0dSUma Krishnan retry_cnt = 0; 8900aa14887SMatthew R. Ochs 891704c4b0dSUma Krishnan while (true) { 8920aa14887SMatthew R. Ochs status = readq_be(&fc_port_regs[FC_STATUS / 8]); 893704c4b0dSUma Krishnan if (status & SISL_STATUS_SHUTDOWN_COMPLETE) 894704c4b0dSUma Krishnan break; 895704c4b0dSUma Krishnan if (++retry_cnt >= MC_RETRY_CNT) { 896704c4b0dSUma Krishnan dev_dbg(dev, "%s: port %d shutdown processing " 897704c4b0dSUma Krishnan "not yet completed\n", __func__, i); 898704c4b0dSUma Krishnan break; 899704c4b0dSUma Krishnan } 900704c4b0dSUma Krishnan msleep(100 * retry_cnt); 901704c4b0dSUma Krishnan } 902704c4b0dSUma Krishnan } 903704c4b0dSUma Krishnan } 904704c4b0dSUma Krishnan 905704c4b0dSUma Krishnan /** 906a834a36bSUma Krishnan * cxlflash_get_minor() - gets the first available minor number 907a834a36bSUma Krishnan * 908a834a36bSUma Krishnan * Return: Unique minor number that can be used to create the character device. 909a834a36bSUma Krishnan */ 910a834a36bSUma Krishnan static int cxlflash_get_minor(void) 911a834a36bSUma Krishnan { 912a834a36bSUma Krishnan int minor; 913a834a36bSUma Krishnan long bit; 914a834a36bSUma Krishnan 915a834a36bSUma Krishnan bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); 916a834a36bSUma Krishnan if (bit >= CXLFLASH_MAX_ADAPTERS) 917a834a36bSUma Krishnan return -1; 918a834a36bSUma Krishnan 919a834a36bSUma Krishnan minor = bit & MINORMASK; 920a834a36bSUma Krishnan set_bit(minor, cxlflash_minor); 921a834a36bSUma Krishnan return minor; 922a834a36bSUma Krishnan } 923a834a36bSUma Krishnan 924a834a36bSUma Krishnan /** 925a834a36bSUma Krishnan * cxlflash_put_minor() - releases the minor number 926a834a36bSUma Krishnan * @minor: Minor number that is no longer needed. 927a834a36bSUma Krishnan */ 928a834a36bSUma Krishnan static void cxlflash_put_minor(int minor) 929a834a36bSUma Krishnan { 930a834a36bSUma Krishnan clear_bit(minor, cxlflash_minor); 931a834a36bSUma Krishnan } 932a834a36bSUma Krishnan 933a834a36bSUma Krishnan /** 934a834a36bSUma Krishnan * cxlflash_release_chrdev() - release the character device for the host 935a834a36bSUma Krishnan * @cfg: Internal structure associated with the host. 936a834a36bSUma Krishnan */ 937a834a36bSUma Krishnan static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) 938a834a36bSUma Krishnan { 939a834a36bSUma Krishnan device_unregister(cfg->chardev); 940a834a36bSUma Krishnan cfg->chardev = NULL; 941a834a36bSUma Krishnan cdev_del(&cfg->cdev); 942a834a36bSUma Krishnan cxlflash_put_minor(MINOR(cfg->cdev.dev)); 943a834a36bSUma Krishnan } 944a834a36bSUma Krishnan 945a834a36bSUma Krishnan /** 946c21e0bbfSMatthew R. Ochs * cxlflash_remove() - PCI entry point to tear down host 947c21e0bbfSMatthew R. Ochs * @pdev: PCI device associated with the host. 948c21e0bbfSMatthew R. Ochs * 949323e3342SMatthew R. Ochs * Safe to use as a cleanup in partially allocated/initialized state. Note that 950323e3342SMatthew R. Ochs * the reset_waitq is flushed as part of the stop/termination of user contexts. 951c21e0bbfSMatthew R. Ochs */ 952c21e0bbfSMatthew R. Ochs static void cxlflash_remove(struct pci_dev *pdev) 953c21e0bbfSMatthew R. Ochs { 954c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 955fb67d44dSMatthew R. Ochs struct device *dev = &pdev->dev; 956c21e0bbfSMatthew R. Ochs ulong lock_flags; 957c21e0bbfSMatthew R. Ochs 958babf985dSUma Krishnan if (!pci_is_enabled(pdev)) { 959fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Device is disabled\n", __func__); 960babf985dSUma Krishnan return; 961babf985dSUma Krishnan } 962babf985dSUma Krishnan 963a3feb6efSUma Krishnan /* Yield to running recovery threads before continuing with remove */ 964a3feb6efSUma Krishnan wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && 965a3feb6efSUma Krishnan cfg->state != STATE_PROBING); 966018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 967c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) 968018d1dc9SMatthew R. Ochs wait_event_interruptible_lock_irq(cfg->tmf_waitq, 969018d1dc9SMatthew R. Ochs !cfg->tmf_active, 970018d1dc9SMatthew R. Ochs cfg->tmf_slock); 971018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 972c21e0bbfSMatthew R. Ochs 973704c4b0dSUma Krishnan /* Notify AFU and wait for shutdown processing to complete */ 974704c4b0dSUma Krishnan notify_shutdown(cfg, true); 975704c4b0dSUma Krishnan 9765cdac81aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 97765be2c79SMatthew R. Ochs cxlflash_stop_term_user_contexts(cfg); 9785cdac81aSMatthew R. Ochs 979c21e0bbfSMatthew R. Ochs switch (cfg->init_state) { 980a834a36bSUma Krishnan case INIT_STATE_CDEV: 981a834a36bSUma Krishnan cxlflash_release_chrdev(cfg); 982c21e0bbfSMatthew R. Ochs case INIT_STATE_SCSI: 98365be2c79SMatthew R. Ochs cxlflash_term_local_luns(cfg); 984c21e0bbfSMatthew R. Ochs scsi_remove_host(cfg->host); 985c21e0bbfSMatthew R. Ochs case INIT_STATE_AFU: 986b45cdbafSManoj Kumar term_afu(cfg); 987c21e0bbfSMatthew R. Ochs case INIT_STATE_PCI: 98848e077dbSUma Krishnan cfg->ops->destroy_afu(cfg->afu_cookie); 989c21e0bbfSMatthew R. Ochs pci_disable_device(pdev); 990c21e0bbfSMatthew R. Ochs case INIT_STATE_NONE: 991c21e0bbfSMatthew R. Ochs free_mem(cfg); 9928b5b1e87SMatthew R. Ochs scsi_host_put(cfg->host); 993c21e0bbfSMatthew R. Ochs break; 994c21e0bbfSMatthew R. Ochs } 995c21e0bbfSMatthew R. Ochs 996fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__); 997c21e0bbfSMatthew R. Ochs } 998c21e0bbfSMatthew R. Ochs 999c21e0bbfSMatthew R. Ochs /** 1000c21e0bbfSMatthew R. Ochs * alloc_mem() - allocates the AFU and its command pool 10011284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1002c21e0bbfSMatthew R. Ochs * 1003c21e0bbfSMatthew R. Ochs * A partially allocated state remains on failure. 1004c21e0bbfSMatthew R. Ochs * 1005c21e0bbfSMatthew R. Ochs * Return: 1006c21e0bbfSMatthew R. Ochs * 0 on success 1007c21e0bbfSMatthew R. Ochs * -ENOMEM on failure to allocate memory 1008c21e0bbfSMatthew R. Ochs */ 1009c21e0bbfSMatthew R. Ochs static int alloc_mem(struct cxlflash_cfg *cfg) 1010c21e0bbfSMatthew R. Ochs { 1011c21e0bbfSMatthew R. Ochs int rc = 0; 10124392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1013c21e0bbfSMatthew R. Ochs 1014696d0b0cSMatthew R. Ochs /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ 1015c21e0bbfSMatthew R. Ochs cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1016c21e0bbfSMatthew R. Ochs get_order(sizeof(struct afu))); 1017c21e0bbfSMatthew R. Ochs if (unlikely(!cfg->afu)) { 10184392ba49SMatthew R. Ochs dev_err(dev, "%s: cannot get %d free pages\n", 1019c21e0bbfSMatthew R. Ochs __func__, get_order(sizeof(struct afu))); 1020c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 1021c21e0bbfSMatthew R. Ochs goto out; 1022c21e0bbfSMatthew R. Ochs } 1023c21e0bbfSMatthew R. Ochs cfg->afu->parent = cfg; 10243065267aSMatthew R. Ochs cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; 1025c21e0bbfSMatthew R. Ochs cfg->afu->afu_map = NULL; 1026c21e0bbfSMatthew R. Ochs out: 1027c21e0bbfSMatthew R. Ochs return rc; 1028c21e0bbfSMatthew R. Ochs } 1029c21e0bbfSMatthew R. Ochs 1030c21e0bbfSMatthew R. Ochs /** 1031c21e0bbfSMatthew R. Ochs * init_pci() - initializes the host as a PCI device 10321284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1033c21e0bbfSMatthew R. Ochs * 10341284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1035c21e0bbfSMatthew R. Ochs */ 1036c21e0bbfSMatthew R. Ochs static int init_pci(struct cxlflash_cfg *cfg) 1037c21e0bbfSMatthew R. Ochs { 1038c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1039fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1040c21e0bbfSMatthew R. Ochs int rc = 0; 1041c21e0bbfSMatthew R. Ochs 1042c21e0bbfSMatthew R. Ochs rc = pci_enable_device(pdev); 1043c21e0bbfSMatthew R. Ochs if (rc || pci_channel_offline(pdev)) { 1044c21e0bbfSMatthew R. Ochs if (pci_channel_offline(pdev)) { 1045c21e0bbfSMatthew R. Ochs cxlflash_wait_for_pci_err_recovery(cfg); 1046c21e0bbfSMatthew R. Ochs rc = pci_enable_device(pdev); 1047c21e0bbfSMatthew R. Ochs } 1048c21e0bbfSMatthew R. Ochs 1049c21e0bbfSMatthew R. Ochs if (rc) { 1050fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Cannot enable adapter\n", __func__); 1051c21e0bbfSMatthew R. Ochs cxlflash_wait_for_pci_err_recovery(cfg); 1052961487e4SManoj N. Kumar goto out; 1053c21e0bbfSMatthew R. Ochs } 1054c21e0bbfSMatthew R. Ochs } 1055c21e0bbfSMatthew R. Ochs 1056c21e0bbfSMatthew R. Ochs out: 1057fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1058c21e0bbfSMatthew R. Ochs return rc; 1059c21e0bbfSMatthew R. Ochs } 1060c21e0bbfSMatthew R. Ochs 1061c21e0bbfSMatthew R. Ochs /** 1062c21e0bbfSMatthew R. Ochs * init_scsi() - adds the host to the SCSI stack and kicks off host scan 10631284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1064c21e0bbfSMatthew R. Ochs * 10651284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1066c21e0bbfSMatthew R. Ochs */ 1067c21e0bbfSMatthew R. Ochs static int init_scsi(struct cxlflash_cfg *cfg) 1068c21e0bbfSMatthew R. Ochs { 1069c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1070fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1071c21e0bbfSMatthew R. Ochs int rc = 0; 1072c21e0bbfSMatthew R. Ochs 1073c21e0bbfSMatthew R. Ochs rc = scsi_add_host(cfg->host, &pdev->dev); 1074c21e0bbfSMatthew R. Ochs if (rc) { 1075fb67d44dSMatthew R. Ochs dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); 1076c21e0bbfSMatthew R. Ochs goto out; 1077c21e0bbfSMatthew R. Ochs } 1078c21e0bbfSMatthew R. Ochs 1079c21e0bbfSMatthew R. Ochs scsi_scan_host(cfg->host); 1080c21e0bbfSMatthew R. Ochs 1081c21e0bbfSMatthew R. Ochs out: 1082fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1083c21e0bbfSMatthew R. Ochs return rc; 1084c21e0bbfSMatthew R. Ochs } 1085c21e0bbfSMatthew R. Ochs 1086c21e0bbfSMatthew R. Ochs /** 1087c21e0bbfSMatthew R. Ochs * set_port_online() - transitions the specified host FC port to online state 1088c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1089c21e0bbfSMatthew R. Ochs * 1090c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. Online state means 1091c21e0bbfSMatthew R. Ochs * that the FC link layer has synced, completed the handshaking process, and 1092c21e0bbfSMatthew R. Ochs * is ready for login to start. 1093c21e0bbfSMatthew R. Ochs */ 10941786f4a0SMatthew R. Ochs static void set_port_online(__be64 __iomem *fc_regs) 1095c21e0bbfSMatthew R. Ochs { 1096c21e0bbfSMatthew R. Ochs u64 cmdcfg; 1097c21e0bbfSMatthew R. Ochs 1098c21e0bbfSMatthew R. Ochs cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 1099c21e0bbfSMatthew R. Ochs cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ 1100c21e0bbfSMatthew R. Ochs cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ 1101c21e0bbfSMatthew R. Ochs writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 1102c21e0bbfSMatthew R. Ochs } 1103c21e0bbfSMatthew R. Ochs 1104c21e0bbfSMatthew R. Ochs /** 1105c21e0bbfSMatthew R. Ochs * set_port_offline() - transitions the specified host FC port to offline state 1106c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1107c21e0bbfSMatthew R. Ochs * 1108c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. 1109c21e0bbfSMatthew R. Ochs */ 11101786f4a0SMatthew R. Ochs static void set_port_offline(__be64 __iomem *fc_regs) 1111c21e0bbfSMatthew R. Ochs { 1112c21e0bbfSMatthew R. Ochs u64 cmdcfg; 1113c21e0bbfSMatthew R. Ochs 1114c21e0bbfSMatthew R. Ochs cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 1115c21e0bbfSMatthew R. Ochs cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ 1116c21e0bbfSMatthew R. Ochs cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ 1117c21e0bbfSMatthew R. Ochs writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 1118c21e0bbfSMatthew R. Ochs } 1119c21e0bbfSMatthew R. Ochs 1120c21e0bbfSMatthew R. Ochs /** 1121c21e0bbfSMatthew R. Ochs * wait_port_online() - waits for the specified host FC port come online 1122c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1123c21e0bbfSMatthew R. Ochs * @delay_us: Number of microseconds to delay between reading port status. 1124c21e0bbfSMatthew R. Ochs * @nretry: Number of cycles to retry reading port status. 1125c21e0bbfSMatthew R. Ochs * 1126c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. This will timeout 1127c21e0bbfSMatthew R. Ochs * when the cable is not plugged in. 1128c21e0bbfSMatthew R. Ochs * 1129c21e0bbfSMatthew R. Ochs * Return: 1130c21e0bbfSMatthew R. Ochs * TRUE (1) when the specified port is online 1131c21e0bbfSMatthew R. Ochs * FALSE (0) when the specified port fails to come online after timeout 1132c21e0bbfSMatthew R. Ochs */ 1133fb67d44dSMatthew R. Ochs static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1134c21e0bbfSMatthew R. Ochs { 1135c21e0bbfSMatthew R. Ochs u64 status; 1136c21e0bbfSMatthew R. Ochs 1137fb67d44dSMatthew R. Ochs WARN_ON(delay_us < 1000); 1138c21e0bbfSMatthew R. Ochs 1139c21e0bbfSMatthew R. Ochs do { 1140c21e0bbfSMatthew R. Ochs msleep(delay_us / 1000); 1141c21e0bbfSMatthew R. Ochs status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 114205dab432SMatthew R. Ochs if (status == U64_MAX) 114305dab432SMatthew R. Ochs nretry /= 2; 1144c21e0bbfSMatthew R. Ochs } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 1145c21e0bbfSMatthew R. Ochs nretry--); 1146c21e0bbfSMatthew R. Ochs 1147c21e0bbfSMatthew R. Ochs return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); 1148c21e0bbfSMatthew R. Ochs } 1149c21e0bbfSMatthew R. Ochs 1150c21e0bbfSMatthew R. Ochs /** 1151c21e0bbfSMatthew R. Ochs * wait_port_offline() - waits for the specified host FC port go offline 1152c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1153c21e0bbfSMatthew R. Ochs * @delay_us: Number of microseconds to delay between reading port status. 1154c21e0bbfSMatthew R. Ochs * @nretry: Number of cycles to retry reading port status. 1155c21e0bbfSMatthew R. Ochs * 1156c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. 1157c21e0bbfSMatthew R. Ochs * 1158c21e0bbfSMatthew R. Ochs * Return: 1159c21e0bbfSMatthew R. Ochs * TRUE (1) when the specified port is offline 1160c21e0bbfSMatthew R. Ochs * FALSE (0) when the specified port fails to go offline after timeout 1161c21e0bbfSMatthew R. Ochs */ 1162fb67d44dSMatthew R. Ochs static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1163c21e0bbfSMatthew R. Ochs { 1164c21e0bbfSMatthew R. Ochs u64 status; 1165c21e0bbfSMatthew R. Ochs 1166fb67d44dSMatthew R. Ochs WARN_ON(delay_us < 1000); 1167c21e0bbfSMatthew R. Ochs 1168c21e0bbfSMatthew R. Ochs do { 1169c21e0bbfSMatthew R. Ochs msleep(delay_us / 1000); 1170c21e0bbfSMatthew R. Ochs status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 117105dab432SMatthew R. Ochs if (status == U64_MAX) 117205dab432SMatthew R. Ochs nretry /= 2; 1173c21e0bbfSMatthew R. Ochs } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 1174c21e0bbfSMatthew R. Ochs nretry--); 1175c21e0bbfSMatthew R. Ochs 1176c21e0bbfSMatthew R. Ochs return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); 1177c21e0bbfSMatthew R. Ochs } 1178c21e0bbfSMatthew R. Ochs 1179c21e0bbfSMatthew R. Ochs /** 1180c21e0bbfSMatthew R. Ochs * afu_set_wwpn() - configures the WWPN for the specified host FC port 1181c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host that owns the specified FC port. 1182c21e0bbfSMatthew R. Ochs * @port: Port number being configured. 1183c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1184c21e0bbfSMatthew R. Ochs * @wwpn: The world-wide-port-number previously discovered for port. 1185c21e0bbfSMatthew R. Ochs * 1186c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. As part of the 1187c21e0bbfSMatthew R. Ochs * sequence to configure the WWPN, the port is toggled offline and then back 1188c21e0bbfSMatthew R. Ochs * online. This toggling action can cause this routine to delay up to a few 1189c21e0bbfSMatthew R. Ochs * seconds. When configured to use the internal LUN feature of the AFU, a 1190c21e0bbfSMatthew R. Ochs * failure to come online is overridden. 1191c21e0bbfSMatthew R. Ochs */ 1192f8013261SMatthew R. Ochs static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 11931786f4a0SMatthew R. Ochs u64 wwpn) 1194c21e0bbfSMatthew R. Ochs { 1195fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 1196fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1197fb67d44dSMatthew R. Ochs 1198c21e0bbfSMatthew R. Ochs set_port_offline(fc_regs); 1199c21e0bbfSMatthew R. Ochs if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1200c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) { 1201fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", 1202c21e0bbfSMatthew R. Ochs __func__, port); 1203c21e0bbfSMatthew R. Ochs } 1204c21e0bbfSMatthew R. Ochs 1205c21e0bbfSMatthew R. Ochs writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 1206c21e0bbfSMatthew R. Ochs 1207c21e0bbfSMatthew R. Ochs set_port_online(fc_regs); 1208c21e0bbfSMatthew R. Ochs if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1209c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) { 1210fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: wait on port %d to go online timed out\n", 1211c21e0bbfSMatthew R. Ochs __func__, port); 1212c21e0bbfSMatthew R. Ochs } 1213c21e0bbfSMatthew R. Ochs } 1214c21e0bbfSMatthew R. Ochs 1215c21e0bbfSMatthew R. Ochs /** 1216c21e0bbfSMatthew R. Ochs * afu_link_reset() - resets the specified host FC port 1217c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host that owns the specified FC port. 1218c21e0bbfSMatthew R. Ochs * @port: Port number being configured. 1219c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1220c21e0bbfSMatthew R. Ochs * 1221c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. The sequence to 1222c21e0bbfSMatthew R. Ochs * reset the port involves toggling it offline and then back online. This 1223c21e0bbfSMatthew R. Ochs * action can cause this routine to delay up to a few seconds. An effort 1224c21e0bbfSMatthew R. Ochs * is made to maintain link with the device by switching to host to use 1225c21e0bbfSMatthew R. Ochs * the alternate port exclusively while the reset takes place. 1226c21e0bbfSMatthew R. Ochs * failure to come online is overridden. 1227c21e0bbfSMatthew R. Ochs */ 12281786f4a0SMatthew R. Ochs static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 1229c21e0bbfSMatthew R. Ochs { 1230fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 1231fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1232c21e0bbfSMatthew R. Ochs u64 port_sel; 1233c21e0bbfSMatthew R. Ochs 1234c21e0bbfSMatthew R. Ochs /* first switch the AFU to the other links, if any */ 1235c21e0bbfSMatthew R. Ochs port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); 12364da74db0SDan Carpenter port_sel &= ~(1ULL << port); 1237c21e0bbfSMatthew R. Ochs writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1238c21e0bbfSMatthew R. Ochs cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1239c21e0bbfSMatthew R. Ochs 1240c21e0bbfSMatthew R. Ochs set_port_offline(fc_regs); 1241c21e0bbfSMatthew R. Ochs if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1242c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) 1243fb67d44dSMatthew R. Ochs dev_err(dev, "%s: wait on port %d to go offline timed out\n", 1244c21e0bbfSMatthew R. Ochs __func__, port); 1245c21e0bbfSMatthew R. Ochs 1246c21e0bbfSMatthew R. Ochs set_port_online(fc_regs); 1247c21e0bbfSMatthew R. Ochs if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1248c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) 1249fb67d44dSMatthew R. Ochs dev_err(dev, "%s: wait on port %d to go online timed out\n", 1250c21e0bbfSMatthew R. Ochs __func__, port); 1251c21e0bbfSMatthew R. Ochs 1252c21e0bbfSMatthew R. Ochs /* switch back to include this port */ 12534da74db0SDan Carpenter port_sel |= (1ULL << port); 1254c21e0bbfSMatthew R. Ochs writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1255c21e0bbfSMatthew R. Ochs cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1256c21e0bbfSMatthew R. Ochs 1257fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); 1258c21e0bbfSMatthew R. Ochs } 1259c21e0bbfSMatthew R. Ochs 1260c21e0bbfSMatthew R. Ochs /** 1261c21e0bbfSMatthew R. Ochs * afu_err_intr_init() - clears and initializes the AFU for error interrupts 1262c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host. 1263c21e0bbfSMatthew R. Ochs */ 1264c21e0bbfSMatthew R. Ochs static void afu_err_intr_init(struct afu *afu) 1265c21e0bbfSMatthew R. Ochs { 126678ae028eSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 12670aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 1268c21e0bbfSMatthew R. Ochs int i; 1269bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 1270c21e0bbfSMatthew R. Ochs u64 reg; 1271c21e0bbfSMatthew R. Ochs 1272c21e0bbfSMatthew R. Ochs /* global async interrupts: AFU clears afu_ctrl on context exit 1273c21e0bbfSMatthew R. Ochs * if async interrupts were sent to that context. This prevents 1274c21e0bbfSMatthew R. Ochs * the AFU form sending further async interrupts when 1275c21e0bbfSMatthew R. Ochs * there is 1276c21e0bbfSMatthew R. Ochs * nobody to receive them. 1277c21e0bbfSMatthew R. Ochs */ 1278c21e0bbfSMatthew R. Ochs 1279c21e0bbfSMatthew R. Ochs /* mask all */ 1280c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); 1281bfc0bab1SUma Krishnan /* set LISN# to send and point to primary master context */ 1282bfc0bab1SUma Krishnan reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); 1283c21e0bbfSMatthew R. Ochs 1284c21e0bbfSMatthew R. Ochs if (afu->internal_lun) 1285c21e0bbfSMatthew R. Ochs reg |= 1; /* Bit 63 indicates local lun */ 1286c21e0bbfSMatthew R. Ochs writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); 1287c21e0bbfSMatthew R. Ochs /* clear all */ 1288c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1289c21e0bbfSMatthew R. Ochs /* unmask bits that are of interest */ 1290c21e0bbfSMatthew R. Ochs /* note: afu can send an interrupt after this step */ 1291c21e0bbfSMatthew R. Ochs writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); 1292c21e0bbfSMatthew R. Ochs /* clear again in case a bit came on after previous clear but before */ 1293c21e0bbfSMatthew R. Ochs /* unmask */ 1294c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1295c21e0bbfSMatthew R. Ochs 1296c21e0bbfSMatthew R. Ochs /* Clear/Set internal lun bits */ 12970aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, 0); 12980aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); 1299c21e0bbfSMatthew R. Ochs reg &= SISL_FC_INTERNAL_MASK; 1300c21e0bbfSMatthew R. Ochs if (afu->internal_lun) 1301c21e0bbfSMatthew R. Ochs reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); 13020aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); 1303c21e0bbfSMatthew R. Ochs 1304c21e0bbfSMatthew R. Ochs /* now clear FC errors */ 130578ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 13060aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 13070aa14887SMatthew R. Ochs 13080aa14887SMatthew R. Ochs writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); 13090aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); 1310c21e0bbfSMatthew R. Ochs } 1311c21e0bbfSMatthew R. Ochs 1312c21e0bbfSMatthew R. Ochs /* sync interrupts for master's IOARRIN write */ 1313c21e0bbfSMatthew R. Ochs /* note that unlike asyncs, there can be no pending sync interrupts */ 1314c21e0bbfSMatthew R. Ochs /* at this time (this is a fresh context and master has not written */ 1315c21e0bbfSMatthew R. Ochs /* IOARRIN yet), so there is nothing to clear. */ 1316c21e0bbfSMatthew R. Ochs 1317c21e0bbfSMatthew R. Ochs /* set LISN#, it is always sent to the context that wrote IOARRIN */ 13183065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1319bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1320bfc0bab1SUma Krishnan 1321465891feSMatthew R. Ochs reg = readq_be(&hwq->host_map->ctx_ctrl); 1322465891feSMatthew R. Ochs WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); 1323465891feSMatthew R. Ochs reg |= SISL_MSI_SYNC_ERROR; 1324465891feSMatthew R. Ochs writeq_be(reg, &hwq->host_map->ctx_ctrl); 1325bfc0bab1SUma Krishnan writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); 1326bfc0bab1SUma Krishnan } 1327c21e0bbfSMatthew R. Ochs } 1328c21e0bbfSMatthew R. Ochs 1329c21e0bbfSMatthew R. Ochs /** 1330c21e0bbfSMatthew R. Ochs * cxlflash_sync_err_irq() - interrupt handler for synchronous errors 1331c21e0bbfSMatthew R. Ochs * @irq: Interrupt number. 1332c21e0bbfSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 1333c21e0bbfSMatthew R. Ochs * 1334c21e0bbfSMatthew R. Ochs * Return: Always return IRQ_HANDLED. 1335c21e0bbfSMatthew R. Ochs */ 1336c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1337c21e0bbfSMatthew R. Ochs { 1338bfc0bab1SUma Krishnan struct hwq *hwq = (struct hwq *)data; 1339bfc0bab1SUma Krishnan struct cxlflash_cfg *cfg = hwq->afu->parent; 1340fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1341c21e0bbfSMatthew R. Ochs u64 reg; 1342c21e0bbfSMatthew R. Ochs u64 reg_unmasked; 1343c21e0bbfSMatthew R. Ochs 1344bfc0bab1SUma Krishnan reg = readq_be(&hwq->host_map->intr_status); 1345c21e0bbfSMatthew R. Ochs reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1346c21e0bbfSMatthew R. Ochs 1347c21e0bbfSMatthew R. Ochs if (reg_unmasked == 0UL) { 1348fb67d44dSMatthew R. Ochs dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", 1349fb67d44dSMatthew R. Ochs __func__, reg); 1350c21e0bbfSMatthew R. Ochs goto cxlflash_sync_err_irq_exit; 1351c21e0bbfSMatthew R. Ochs } 1352c21e0bbfSMatthew R. Ochs 1353fb67d44dSMatthew R. Ochs dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", 1354fb67d44dSMatthew R. Ochs __func__, reg); 1355c21e0bbfSMatthew R. Ochs 1356bfc0bab1SUma Krishnan writeq_be(reg_unmasked, &hwq->host_map->intr_clear); 1357c21e0bbfSMatthew R. Ochs 1358c21e0bbfSMatthew R. Ochs cxlflash_sync_err_irq_exit: 1359c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1360c21e0bbfSMatthew R. Ochs } 1361c21e0bbfSMatthew R. Ochs 1362c21e0bbfSMatthew R. Ochs /** 136376a6ebbeSMatthew R. Ochs * process_hrrq() - process the read-response queue 136476a6ebbeSMatthew R. Ochs * @afu: AFU associated with the host. 1365f918b4a8SMatthew R. Ochs * @doneq: Queue of commands harvested from the RRQ. 1366cba06e6dSMatthew R. Ochs * @budget: Threshold of RRQ entries to process. 1367f918b4a8SMatthew R. Ochs * 1368f918b4a8SMatthew R. Ochs * This routine must be called holding the disabled RRQ spin lock. 1369c21e0bbfSMatthew R. Ochs * 137076a6ebbeSMatthew R. Ochs * Return: The number of entries processed. 1371c21e0bbfSMatthew R. Ochs */ 1372bfc0bab1SUma Krishnan static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) 1373c21e0bbfSMatthew R. Ochs { 1374bfc0bab1SUma Krishnan struct afu *afu = hwq->afu; 1375c21e0bbfSMatthew R. Ochs struct afu_cmd *cmd; 1376696d0b0cSMatthew R. Ochs struct sisl_ioasa *ioasa; 1377696d0b0cSMatthew R. Ochs struct sisl_ioarcb *ioarcb; 1378bfc0bab1SUma Krishnan bool toggle = hwq->toggle; 137976a6ebbeSMatthew R. Ochs int num_hrrq = 0; 1380c21e0bbfSMatthew R. Ochs u64 entry, 1381bfc0bab1SUma Krishnan *hrrq_start = hwq->hrrq_start, 1382bfc0bab1SUma Krishnan *hrrq_end = hwq->hrrq_end, 1383bfc0bab1SUma Krishnan *hrrq_curr = hwq->hrrq_curr; 1384c21e0bbfSMatthew R. Ochs 1385cba06e6dSMatthew R. Ochs /* Process ready RRQ entries up to the specified budget (if any) */ 1386c21e0bbfSMatthew R. Ochs while (true) { 1387c21e0bbfSMatthew R. Ochs entry = *hrrq_curr; 1388c21e0bbfSMatthew R. Ochs 1389c21e0bbfSMatthew R. Ochs if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1390c21e0bbfSMatthew R. Ochs break; 1391c21e0bbfSMatthew R. Ochs 1392696d0b0cSMatthew R. Ochs entry &= ~SISL_RESP_HANDLE_T_BIT; 1393696d0b0cSMatthew R. Ochs 1394696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1395696d0b0cSMatthew R. Ochs ioasa = (struct sisl_ioasa *)entry; 1396696d0b0cSMatthew R. Ochs cmd = container_of(ioasa, struct afu_cmd, sa); 1397696d0b0cSMatthew R. Ochs } else { 1398696d0b0cSMatthew R. Ochs ioarcb = (struct sisl_ioarcb *)entry; 1399696d0b0cSMatthew R. Ochs cmd = container_of(ioarcb, struct afu_cmd, rcb); 1400696d0b0cSMatthew R. Ochs } 1401696d0b0cSMatthew R. Ochs 1402f918b4a8SMatthew R. Ochs list_add_tail(&cmd->queue, doneq); 1403c21e0bbfSMatthew R. Ochs 1404c21e0bbfSMatthew R. Ochs /* Advance to next entry or wrap and flip the toggle bit */ 1405c21e0bbfSMatthew R. Ochs if (hrrq_curr < hrrq_end) 1406c21e0bbfSMatthew R. Ochs hrrq_curr++; 1407c21e0bbfSMatthew R. Ochs else { 1408c21e0bbfSMatthew R. Ochs hrrq_curr = hrrq_start; 1409c21e0bbfSMatthew R. Ochs toggle ^= SISL_RESP_HANDLE_T_BIT; 1410c21e0bbfSMatthew R. Ochs } 1411696d0b0cSMatthew R. Ochs 1412bfc0bab1SUma Krishnan atomic_inc(&hwq->hsq_credits); 141376a6ebbeSMatthew R. Ochs num_hrrq++; 1414cba06e6dSMatthew R. Ochs 1415cba06e6dSMatthew R. Ochs if (budget > 0 && num_hrrq >= budget) 1416cba06e6dSMatthew R. Ochs break; 1417c21e0bbfSMatthew R. Ochs } 1418c21e0bbfSMatthew R. Ochs 1419bfc0bab1SUma Krishnan hwq->hrrq_curr = hrrq_curr; 1420bfc0bab1SUma Krishnan hwq->toggle = toggle; 1421c21e0bbfSMatthew R. Ochs 142276a6ebbeSMatthew R. Ochs return num_hrrq; 142376a6ebbeSMatthew R. Ochs } 142476a6ebbeSMatthew R. Ochs 142576a6ebbeSMatthew R. Ochs /** 1426f918b4a8SMatthew R. Ochs * process_cmd_doneq() - process a queue of harvested RRQ commands 1427f918b4a8SMatthew R. Ochs * @doneq: Queue of completed commands. 1428f918b4a8SMatthew R. Ochs * 1429f918b4a8SMatthew R. Ochs * Note that upon return the queue can no longer be trusted. 1430f918b4a8SMatthew R. Ochs */ 1431f918b4a8SMatthew R. Ochs static void process_cmd_doneq(struct list_head *doneq) 1432f918b4a8SMatthew R. Ochs { 1433f918b4a8SMatthew R. Ochs struct afu_cmd *cmd, *tmp; 1434f918b4a8SMatthew R. Ochs 1435f918b4a8SMatthew R. Ochs WARN_ON(list_empty(doneq)); 1436f918b4a8SMatthew R. Ochs 1437f918b4a8SMatthew R. Ochs list_for_each_entry_safe(cmd, tmp, doneq, queue) 1438f918b4a8SMatthew R. Ochs cmd_complete(cmd); 1439f918b4a8SMatthew R. Ochs } 1440f918b4a8SMatthew R. Ochs 1441f918b4a8SMatthew R. Ochs /** 1442cba06e6dSMatthew R. Ochs * cxlflash_irqpoll() - process a queue of harvested RRQ commands 1443cba06e6dSMatthew R. Ochs * @irqpoll: IRQ poll structure associated with queue to poll. 1444cba06e6dSMatthew R. Ochs * @budget: Threshold of RRQ entries to process per poll. 1445cba06e6dSMatthew R. Ochs * 1446cba06e6dSMatthew R. Ochs * Return: The number of entries processed. 1447cba06e6dSMatthew R. Ochs */ 1448cba06e6dSMatthew R. Ochs static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) 1449cba06e6dSMatthew R. Ochs { 1450bfc0bab1SUma Krishnan struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); 1451cba06e6dSMatthew R. Ochs unsigned long hrrq_flags; 1452cba06e6dSMatthew R. Ochs LIST_HEAD(doneq); 1453cba06e6dSMatthew R. Ochs int num_entries = 0; 1454cba06e6dSMatthew R. Ochs 1455bfc0bab1SUma Krishnan spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); 1456cba06e6dSMatthew R. Ochs 1457bfc0bab1SUma Krishnan num_entries = process_hrrq(hwq, &doneq, budget); 1458cba06e6dSMatthew R. Ochs if (num_entries < budget) 1459cba06e6dSMatthew R. Ochs irq_poll_complete(irqpoll); 1460cba06e6dSMatthew R. Ochs 1461bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1462cba06e6dSMatthew R. Ochs 1463cba06e6dSMatthew R. Ochs process_cmd_doneq(&doneq); 1464cba06e6dSMatthew R. Ochs return num_entries; 1465cba06e6dSMatthew R. Ochs } 1466cba06e6dSMatthew R. Ochs 1467cba06e6dSMatthew R. Ochs /** 146876a6ebbeSMatthew R. Ochs * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) 146976a6ebbeSMatthew R. Ochs * @irq: Interrupt number. 147076a6ebbeSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 147176a6ebbeSMatthew R. Ochs * 1472f918b4a8SMatthew R. Ochs * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. 147376a6ebbeSMatthew R. Ochs */ 147476a6ebbeSMatthew R. Ochs static irqreturn_t cxlflash_rrq_irq(int irq, void *data) 147576a6ebbeSMatthew R. Ochs { 1476bfc0bab1SUma Krishnan struct hwq *hwq = (struct hwq *)data; 1477bfc0bab1SUma Krishnan struct afu *afu = hwq->afu; 1478f918b4a8SMatthew R. Ochs unsigned long hrrq_flags; 1479f918b4a8SMatthew R. Ochs LIST_HEAD(doneq); 1480f918b4a8SMatthew R. Ochs int num_entries = 0; 148176a6ebbeSMatthew R. Ochs 1482bfc0bab1SUma Krishnan spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); 1483cba06e6dSMatthew R. Ochs 1484d2d354a6SUma Krishnan /* Silently drop spurious interrupts when queue is not online */ 1485d2d354a6SUma Krishnan if (!hwq->hrrq_online) { 1486d2d354a6SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1487d2d354a6SUma Krishnan return IRQ_HANDLED; 1488d2d354a6SUma Krishnan } 1489d2d354a6SUma Krishnan 1490cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) { 1491bfc0bab1SUma Krishnan irq_poll_sched(&hwq->irqpoll); 1492bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1493cba06e6dSMatthew R. Ochs return IRQ_HANDLED; 1494cba06e6dSMatthew R. Ochs } 1495cba06e6dSMatthew R. Ochs 1496bfc0bab1SUma Krishnan num_entries = process_hrrq(hwq, &doneq, -1); 1497bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1498f918b4a8SMatthew R. Ochs 1499f918b4a8SMatthew R. Ochs if (num_entries == 0) 1500f918b4a8SMatthew R. Ochs return IRQ_NONE; 1501f918b4a8SMatthew R. Ochs 1502f918b4a8SMatthew R. Ochs process_cmd_doneq(&doneq); 1503c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1504c21e0bbfSMatthew R. Ochs } 1505c21e0bbfSMatthew R. Ochs 1506e2ef33faSMatthew R. Ochs /* 1507e2ef33faSMatthew R. Ochs * Asynchronous interrupt information table 1508e2ef33faSMatthew R. Ochs * 1509e2ef33faSMatthew R. Ochs * NOTE: 1510e2ef33faSMatthew R. Ochs * - Order matters here as this array is indexed by bit position. 1511e2ef33faSMatthew R. Ochs * 1512e2ef33faSMatthew R. Ochs * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro 1513e2ef33faSMatthew R. Ochs * as complex and complains due to a lack of parentheses/braces. 1514e2ef33faSMatthew R. Ochs */ 1515e2ef33faSMatthew R. Ochs #define ASTATUS_FC(_a, _b, _c, _d) \ 1516e2ef33faSMatthew R. Ochs { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } 1517e2ef33faSMatthew R. Ochs 1518e2ef33faSMatthew R. Ochs #define BUILD_SISL_ASTATUS_FC_PORT(_a) \ 1519e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LINK_UP, "link up", 0), \ 1520e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LINK_DN, "link down", 0), \ 1521e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ 1522e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ 1523e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ 1524e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ 1525e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ 1526e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) 1527e2ef33faSMatthew R. Ochs 1528e2ef33faSMatthew R. Ochs static const struct asyc_intr_info ainfo[] = { 1529e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(1), 1530e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(0), 1531e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(3), 1532e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(2) 1533e2ef33faSMatthew R. Ochs }; 1534e2ef33faSMatthew R. Ochs 1535c21e0bbfSMatthew R. Ochs /** 1536c21e0bbfSMatthew R. Ochs * cxlflash_async_err_irq() - interrupt handler for asynchronous errors 1537c21e0bbfSMatthew R. Ochs * @irq: Interrupt number. 1538c21e0bbfSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 1539c21e0bbfSMatthew R. Ochs * 1540c21e0bbfSMatthew R. Ochs * Return: Always return IRQ_HANDLED. 1541c21e0bbfSMatthew R. Ochs */ 1542c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_async_err_irq(int irq, void *data) 1543c21e0bbfSMatthew R. Ochs { 1544bfc0bab1SUma Krishnan struct hwq *hwq = (struct hwq *)data; 1545bfc0bab1SUma Krishnan struct afu *afu = hwq->afu; 15464392ba49SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 15474392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1548c21e0bbfSMatthew R. Ochs const struct asyc_intr_info *info; 15491786f4a0SMatthew R. Ochs struct sisl_global_map __iomem *global = &afu->afu_map->global; 15500aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 1551e2ef33faSMatthew R. Ochs u64 reg_unmasked; 1552c21e0bbfSMatthew R. Ochs u64 reg; 1553e2ef33faSMatthew R. Ochs u64 bit; 1554c21e0bbfSMatthew R. Ochs u8 port; 1555c21e0bbfSMatthew R. Ochs 1556c21e0bbfSMatthew R. Ochs reg = readq_be(&global->regs.aintr_status); 1557c21e0bbfSMatthew R. Ochs reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1558c21e0bbfSMatthew R. Ochs 1559e2ef33faSMatthew R. Ochs if (unlikely(reg_unmasked == 0)) { 1560fb67d44dSMatthew R. Ochs dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", 1561c21e0bbfSMatthew R. Ochs __func__, reg); 1562c21e0bbfSMatthew R. Ochs goto out; 1563c21e0bbfSMatthew R. Ochs } 1564c21e0bbfSMatthew R. Ochs 1565f15fbf8dSMatthew R. Ochs /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ 1566c21e0bbfSMatthew R. Ochs writeq_be(reg_unmasked, &global->regs.aintr_clear); 1567c21e0bbfSMatthew R. Ochs 1568f15fbf8dSMatthew R. Ochs /* Check each bit that is on */ 1569e2ef33faSMatthew R. Ochs for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { 1570e2ef33faSMatthew R. Ochs if (unlikely(bit >= ARRAY_SIZE(ainfo))) { 1571e2ef33faSMatthew R. Ochs WARN_ON_ONCE(1); 1572c21e0bbfSMatthew R. Ochs continue; 1573e2ef33faSMatthew R. Ochs } 1574e2ef33faSMatthew R. Ochs 1575e2ef33faSMatthew R. Ochs info = &ainfo[bit]; 1576e2ef33faSMatthew R. Ochs if (unlikely(info->status != 1ULL << bit)) { 1577e2ef33faSMatthew R. Ochs WARN_ON_ONCE(1); 1578e2ef33faSMatthew R. Ochs continue; 1579e2ef33faSMatthew R. Ochs } 1580c21e0bbfSMatthew R. Ochs 1581c21e0bbfSMatthew R. Ochs port = info->port; 15820aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 1583c21e0bbfSMatthew R. Ochs 1584fb67d44dSMatthew R. Ochs dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", 1585c21e0bbfSMatthew R. Ochs __func__, port, info->desc, 15860aa14887SMatthew R. Ochs readq_be(&fc_port_regs[FC_STATUS / 8])); 1587c21e0bbfSMatthew R. Ochs 1588c21e0bbfSMatthew R. Ochs /* 1589f15fbf8dSMatthew R. Ochs * Do link reset first, some OTHER errors will set FC_ERROR 1590c21e0bbfSMatthew R. Ochs * again if cleared before or w/o a reset 1591c21e0bbfSMatthew R. Ochs */ 1592c21e0bbfSMatthew R. Ochs if (info->action & LINK_RESET) { 15934392ba49SMatthew R. Ochs dev_err(dev, "%s: FC Port %d: resetting link\n", 1594c21e0bbfSMatthew R. Ochs __func__, port); 1595c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_REQUIRED; 1596c21e0bbfSMatthew R. Ochs cfg->lr_port = port; 1597c21e0bbfSMatthew R. Ochs schedule_work(&cfg->work_q); 1598c21e0bbfSMatthew R. Ochs } 1599c21e0bbfSMatthew R. Ochs 1600c21e0bbfSMatthew R. Ochs if (info->action & CLR_FC_ERROR) { 16010aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_ERROR / 8]); 1602c21e0bbfSMatthew R. Ochs 1603c21e0bbfSMatthew R. Ochs /* 1604f15fbf8dSMatthew R. Ochs * Since all errors are unmasked, FC_ERROR and FC_ERRCAP 1605c21e0bbfSMatthew R. Ochs * should be the same and tracing one is sufficient. 1606c21e0bbfSMatthew R. Ochs */ 1607c21e0bbfSMatthew R. Ochs 1608fb67d44dSMatthew R. Ochs dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", 1609c21e0bbfSMatthew R. Ochs __func__, port, reg); 1610c21e0bbfSMatthew R. Ochs 16110aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); 16120aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); 1613c21e0bbfSMatthew R. Ochs } 1614ef51074aSMatthew R. Ochs 1615ef51074aSMatthew R. Ochs if (info->action & SCAN_HOST) { 1616ef51074aSMatthew R. Ochs atomic_inc(&cfg->scan_host_needed); 1617ef51074aSMatthew R. Ochs schedule_work(&cfg->work_q); 1618ef51074aSMatthew R. Ochs } 1619c21e0bbfSMatthew R. Ochs } 1620c21e0bbfSMatthew R. Ochs 1621c21e0bbfSMatthew R. Ochs out: 1622c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1623c21e0bbfSMatthew R. Ochs } 1624c21e0bbfSMatthew R. Ochs 1625c21e0bbfSMatthew R. Ochs /** 1626c21e0bbfSMatthew R. Ochs * read_vpd() - obtains the WWPNs from VPD 16271284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 162878ae028eSMatthew R. Ochs * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs 1629c21e0bbfSMatthew R. Ochs * 16301284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1631c21e0bbfSMatthew R. Ochs */ 1632c21e0bbfSMatthew R. Ochs static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1633c21e0bbfSMatthew R. Ochs { 1634fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1635fb67d44dSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1636c21e0bbfSMatthew R. Ochs int rc = 0; 1637c21e0bbfSMatthew R. Ochs int ro_start, ro_size, i, j, k; 1638c21e0bbfSMatthew R. Ochs ssize_t vpd_size; 1639c21e0bbfSMatthew R. Ochs char vpd_data[CXLFLASH_VPD_LEN]; 1640c21e0bbfSMatthew R. Ochs char tmp_buf[WWPN_BUF_LEN] = { 0 }; 16410d419130SMatthew R. Ochs const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *) 16420d419130SMatthew R. Ochs cfg->dev_id->driver_data; 16430d419130SMatthew R. Ochs const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED; 16440d419130SMatthew R. Ochs const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; 1645c21e0bbfSMatthew R. Ochs 1646c21e0bbfSMatthew R. Ochs /* Get the VPD data from the device */ 164725b8e08eSMatthew R. Ochs vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); 1648c21e0bbfSMatthew R. Ochs if (unlikely(vpd_size <= 0)) { 1649fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", 1650c21e0bbfSMatthew R. Ochs __func__, vpd_size); 1651c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1652c21e0bbfSMatthew R. Ochs goto out; 1653c21e0bbfSMatthew R. Ochs } 1654c21e0bbfSMatthew R. Ochs 1655c21e0bbfSMatthew R. Ochs /* Get the read only section offset */ 1656c21e0bbfSMatthew R. Ochs ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1657c21e0bbfSMatthew R. Ochs PCI_VPD_LRDT_RO_DATA); 1658c21e0bbfSMatthew R. Ochs if (unlikely(ro_start < 0)) { 1659fb67d44dSMatthew R. Ochs dev_err(dev, "%s: VPD Read-only data not found\n", __func__); 1660c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1661c21e0bbfSMatthew R. Ochs goto out; 1662c21e0bbfSMatthew R. Ochs } 1663c21e0bbfSMatthew R. Ochs 1664c21e0bbfSMatthew R. Ochs /* Get the read only section size, cap when extends beyond read VPD */ 1665c21e0bbfSMatthew R. Ochs ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1666c21e0bbfSMatthew R. Ochs j = ro_size; 1667c21e0bbfSMatthew R. Ochs i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1668c21e0bbfSMatthew R. Ochs if (unlikely((i + j) > vpd_size)) { 1669fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", 1670c21e0bbfSMatthew R. Ochs __func__, (i + j), vpd_size); 1671c21e0bbfSMatthew R. Ochs ro_size = vpd_size - i; 1672c21e0bbfSMatthew R. Ochs } 1673c21e0bbfSMatthew R. Ochs 1674c21e0bbfSMatthew R. Ochs /* 1675c21e0bbfSMatthew R. Ochs * Find the offset of the WWPN tag within the read only 1676c21e0bbfSMatthew R. Ochs * VPD data and validate the found field (partials are 1677c21e0bbfSMatthew R. Ochs * no good to us). Convert the ASCII data to an integer 1678c21e0bbfSMatthew R. Ochs * value. Note that we must copy to a temporary buffer 1679c21e0bbfSMatthew R. Ochs * because the conversion service requires that the ASCII 1680c21e0bbfSMatthew R. Ochs * string be terminated. 16810d419130SMatthew R. Ochs * 16820d419130SMatthew R. Ochs * Allow for WWPN not being found for all devices, setting 16830d419130SMatthew R. Ochs * the returned WWPN to zero when not found. Notify with a 16840d419130SMatthew R. Ochs * log error for cards that should have had WWPN keywords 16850d419130SMatthew R. Ochs * in the VPD - cards requiring WWPN will not have their 16860d419130SMatthew R. Ochs * ports programmed and operate in an undefined state. 1687c21e0bbfSMatthew R. Ochs */ 168878ae028eSMatthew R. Ochs for (k = 0; k < cfg->num_fc_ports; k++) { 1689c21e0bbfSMatthew R. Ochs j = ro_size; 1690c21e0bbfSMatthew R. Ochs i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1691c21e0bbfSMatthew R. Ochs 1692c21e0bbfSMatthew R. Ochs i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 16930d419130SMatthew R. Ochs if (i < 0) { 16940d419130SMatthew R. Ochs if (wwpn_vpd_required) 16950d419130SMatthew R. Ochs dev_err(dev, "%s: Port %d WWPN not found\n", 1696fb67d44dSMatthew R. Ochs __func__, k); 16970d419130SMatthew R. Ochs wwpn[k] = 0ULL; 16980d419130SMatthew R. Ochs continue; 1699c21e0bbfSMatthew R. Ochs } 1700c21e0bbfSMatthew R. Ochs 1701c21e0bbfSMatthew R. Ochs j = pci_vpd_info_field_size(&vpd_data[i]); 1702c21e0bbfSMatthew R. Ochs i += PCI_VPD_INFO_FLD_HDR_SIZE; 1703c21e0bbfSMatthew R. Ochs if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1704fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", 1705c21e0bbfSMatthew R. Ochs __func__, k); 1706c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1707c21e0bbfSMatthew R. Ochs goto out; 1708c21e0bbfSMatthew R. Ochs } 1709c21e0bbfSMatthew R. Ochs 1710c21e0bbfSMatthew R. Ochs memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1711c21e0bbfSMatthew R. Ochs rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1712c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1713fb67d44dSMatthew R. Ochs dev_err(dev, "%s: WWPN conversion failed for port %d\n", 1714fb67d44dSMatthew R. Ochs __func__, k); 1715c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1716c21e0bbfSMatthew R. Ochs goto out; 1717c21e0bbfSMatthew R. Ochs } 171878ae028eSMatthew R. Ochs 171978ae028eSMatthew R. Ochs dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); 1720c21e0bbfSMatthew R. Ochs } 1721c21e0bbfSMatthew R. Ochs 1722c21e0bbfSMatthew R. Ochs out: 1723fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1724c21e0bbfSMatthew R. Ochs return rc; 1725c21e0bbfSMatthew R. Ochs } 1726c21e0bbfSMatthew R. Ochs 1727c21e0bbfSMatthew R. Ochs /** 1728c21e0bbfSMatthew R. Ochs * init_pcr() - initialize the provisioning and control registers 17291284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1730c21e0bbfSMatthew R. Ochs * 1731c21e0bbfSMatthew R. Ochs * Also sets up fast access to the mapped registers and initializes AFU 1732c21e0bbfSMatthew R. Ochs * command fields that never change. 1733c21e0bbfSMatthew R. Ochs */ 173415305514SMatthew R. Ochs static void init_pcr(struct cxlflash_cfg *cfg) 1735c21e0bbfSMatthew R. Ochs { 1736c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 17371786f4a0SMatthew R. Ochs struct sisl_ctrl_map __iomem *ctrl_map; 1738bfc0bab1SUma Krishnan struct hwq *hwq; 173925b8e08eSMatthew R. Ochs void *cookie; 1740c21e0bbfSMatthew R. Ochs int i; 1741c21e0bbfSMatthew R. Ochs 1742c21e0bbfSMatthew R. Ochs for (i = 0; i < MAX_CONTEXT; i++) { 1743c21e0bbfSMatthew R. Ochs ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1744f15fbf8dSMatthew R. Ochs /* Disrupt any clients that could be running */ 1745c21e0bbfSMatthew R. Ochs /* e.g. clients that survived a master restart */ 1746c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->rht_start); 1747c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->rht_cnt_id); 1748c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->ctx_cap); 1749c21e0bbfSMatthew R. Ochs } 1750c21e0bbfSMatthew R. Ochs 1751bfc0bab1SUma Krishnan /* Copy frequently used fields into hwq */ 17523065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1753bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 175425b8e08eSMatthew R. Ochs cookie = hwq->ctx_cookie; 1755bfc0bab1SUma Krishnan 175625b8e08eSMatthew R. Ochs hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie); 1757bfc0bab1SUma Krishnan hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; 1758bfc0bab1SUma Krishnan hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; 1759c21e0bbfSMatthew R. Ochs 1760c21e0bbfSMatthew R. Ochs /* Program the Endian Control for the master context */ 1761bfc0bab1SUma Krishnan writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); 1762bfc0bab1SUma Krishnan } 1763c21e0bbfSMatthew R. Ochs } 1764c21e0bbfSMatthew R. Ochs 1765c21e0bbfSMatthew R. Ochs /** 1766c21e0bbfSMatthew R. Ochs * init_global() - initialize AFU global registers 17671284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1768c21e0bbfSMatthew R. Ochs */ 176915305514SMatthew R. Ochs static int init_global(struct cxlflash_cfg *cfg) 1770c21e0bbfSMatthew R. Ochs { 1771c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 17724392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1773bfc0bab1SUma Krishnan struct hwq *hwq; 1774bfc0bab1SUma Krishnan struct sisl_host_map __iomem *hmap; 17750aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 177678ae028eSMatthew R. Ochs u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ 1777c21e0bbfSMatthew R. Ochs int i = 0, num_ports = 0; 1778c21e0bbfSMatthew R. Ochs int rc = 0; 1779d44af4b0SUma Krishnan int j; 1780d44af4b0SUma Krishnan void *ctx; 1781c21e0bbfSMatthew R. Ochs u64 reg; 1782c21e0bbfSMatthew R. Ochs 1783c21e0bbfSMatthew R. Ochs rc = read_vpd(cfg, &wwpn[0]); 1784c21e0bbfSMatthew R. Ochs if (rc) { 17854392ba49SMatthew R. Ochs dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); 1786c21e0bbfSMatthew R. Ochs goto out; 1787c21e0bbfSMatthew R. Ochs } 1788c21e0bbfSMatthew R. Ochs 1789bfc0bab1SUma Krishnan /* Set up RRQ and SQ in HWQ for master issued cmds */ 17903065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1791bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1792bfc0bab1SUma Krishnan hmap = hwq->host_map; 1793bfc0bab1SUma Krishnan 1794bfc0bab1SUma Krishnan writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); 1795bfc0bab1SUma Krishnan writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); 1796d2d354a6SUma Krishnan hwq->hrrq_online = true; 1797c21e0bbfSMatthew R. Ochs 1798696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1799bfc0bab1SUma Krishnan writeq_be((u64)hwq->hsq_start, &hmap->sq_start); 1800bfc0bab1SUma Krishnan writeq_be((u64)hwq->hsq_end, &hmap->sq_end); 1801bfc0bab1SUma Krishnan } 1802696d0b0cSMatthew R. Ochs } 1803696d0b0cSMatthew R. Ochs 1804c21e0bbfSMatthew R. Ochs /* AFU configuration */ 1805c21e0bbfSMatthew R. Ochs reg = readq_be(&afu->afu_map->global.regs.afu_config); 1806c21e0bbfSMatthew R. Ochs reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1807c21e0bbfSMatthew R. Ochs /* enable all auto retry options and control endianness */ 1808c21e0bbfSMatthew R. Ochs /* leave others at default: */ 1809c21e0bbfSMatthew R. Ochs /* CTX_CAP write protected, mbox_r does not clear on read and */ 1810c21e0bbfSMatthew R. Ochs /* checker on if dual afu */ 1811c21e0bbfSMatthew R. Ochs writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1812c21e0bbfSMatthew R. Ochs 1813f15fbf8dSMatthew R. Ochs /* Global port select: select either port */ 1814c21e0bbfSMatthew R. Ochs if (afu->internal_lun) { 1815f15fbf8dSMatthew R. Ochs /* Only use port 0 */ 1816c21e0bbfSMatthew R. Ochs writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 181778ae028eSMatthew R. Ochs num_ports = 0; 1818c21e0bbfSMatthew R. Ochs } else { 18198fa4f177SMatthew R. Ochs writeq_be(PORT_MASK(cfg->num_fc_ports), 18208fa4f177SMatthew R. Ochs &afu->afu_map->global.regs.afu_port_sel); 182178ae028eSMatthew R. Ochs num_ports = cfg->num_fc_ports; 1822c21e0bbfSMatthew R. Ochs } 1823c21e0bbfSMatthew R. Ochs 1824c21e0bbfSMatthew R. Ochs for (i = 0; i < num_ports; i++) { 18250aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 18260aa14887SMatthew R. Ochs 1827f15fbf8dSMatthew R. Ochs /* Unmask all errors (but they are still masked at AFU) */ 18280aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); 1829f15fbf8dSMatthew R. Ochs /* Clear CRC error cnt & set a threshold */ 18300aa14887SMatthew R. Ochs (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); 18310aa14887SMatthew R. Ochs writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); 1832c21e0bbfSMatthew R. Ochs 1833f15fbf8dSMatthew R. Ochs /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1834f8013261SMatthew R. Ochs if (wwpn[i] != 0) 18350aa14887SMatthew R. Ochs afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); 1836c21e0bbfSMatthew R. Ochs /* Programming WWPN back to back causes additional 1837c21e0bbfSMatthew R. Ochs * offline/online transitions and a PLOGI 1838c21e0bbfSMatthew R. Ochs */ 1839c21e0bbfSMatthew R. Ochs msleep(100); 1840c21e0bbfSMatthew R. Ochs } 1841c21e0bbfSMatthew R. Ochs 1842d44af4b0SUma Krishnan if (afu_is_ocxl_lisn(afu)) { 1843d44af4b0SUma Krishnan /* Set up the LISN effective address for each master */ 1844d44af4b0SUma Krishnan for (i = 0; i < afu->num_hwqs; i++) { 1845d44af4b0SUma Krishnan hwq = get_hwq(afu, i); 1846d44af4b0SUma Krishnan ctx = hwq->ctx_cookie; 1847d44af4b0SUma Krishnan 1848d44af4b0SUma Krishnan for (j = 0; j < hwq->num_irqs; j++) { 1849d44af4b0SUma Krishnan reg = cfg->ops->get_irq_objhndl(ctx, j); 1850d44af4b0SUma Krishnan writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]); 1851d44af4b0SUma Krishnan } 1852d44af4b0SUma Krishnan 1853d44af4b0SUma Krishnan reg = hwq->ctx_hndl; 1854d44af4b0SUma Krishnan writeq_be(SISL_LISN_PASID(reg, reg), 1855d44af4b0SUma Krishnan &hwq->ctrl_map->lisn_pasid[0]); 1856d44af4b0SUma Krishnan writeq_be(SISL_LISN_PASID(0UL, reg), 1857d44af4b0SUma Krishnan &hwq->ctrl_map->lisn_pasid[1]); 1858d44af4b0SUma Krishnan } 1859d44af4b0SUma Krishnan } 1860d44af4b0SUma Krishnan 1861f15fbf8dSMatthew R. Ochs /* Set up master's own CTX_CAP to allow real mode, host translation */ 1862f15fbf8dSMatthew R. Ochs /* tables, afu cmds and read/write GSCSI cmds. */ 1863c21e0bbfSMatthew R. Ochs /* First, unlock ctx_cap write by reading mbox */ 18643065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1865bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1866bfc0bab1SUma Krishnan 1867bfc0bab1SUma Krishnan (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ 1868c21e0bbfSMatthew R. Ochs writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1869c21e0bbfSMatthew R. Ochs SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1870c21e0bbfSMatthew R. Ochs SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1871bfc0bab1SUma Krishnan &hwq->ctrl_map->ctx_cap); 1872bfc0bab1SUma Krishnan } 18733223c01aSMatthew R. Ochs 18743223c01aSMatthew R. Ochs /* 18753223c01aSMatthew R. Ochs * Determine write-same unmap support for host by evaluating the unmap 18763223c01aSMatthew R. Ochs * sector support bit of the context control register associated with 18773223c01aSMatthew R. Ochs * the primary hardware queue. Note that while this status is reflected 18783223c01aSMatthew R. Ochs * in a context register, the outcome can be assumed to be host-wide. 18793223c01aSMatthew R. Ochs */ 18803223c01aSMatthew R. Ochs hwq = get_hwq(afu, PRIMARY_HWQ); 18813223c01aSMatthew R. Ochs reg = readq_be(&hwq->host_map->ctx_ctrl); 18823223c01aSMatthew R. Ochs if (reg & SISL_CTX_CTRL_UNMAP_SECTOR) 18833223c01aSMatthew R. Ochs cfg->ws_unmap = true; 18843223c01aSMatthew R. Ochs 1885f15fbf8dSMatthew R. Ochs /* Initialize heartbeat */ 1886c21e0bbfSMatthew R. Ochs afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1887c21e0bbfSMatthew R. Ochs out: 1888c21e0bbfSMatthew R. Ochs return rc; 1889c21e0bbfSMatthew R. Ochs } 1890c21e0bbfSMatthew R. Ochs 1891c21e0bbfSMatthew R. Ochs /** 1892c21e0bbfSMatthew R. Ochs * start_afu() - initializes and starts the AFU 18931284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1894c21e0bbfSMatthew R. Ochs */ 1895c21e0bbfSMatthew R. Ochs static int start_afu(struct cxlflash_cfg *cfg) 1896c21e0bbfSMatthew R. Ochs { 1897c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 1898fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1899bfc0bab1SUma Krishnan struct hwq *hwq; 1900c21e0bbfSMatthew R. Ochs int rc = 0; 1901bfc0bab1SUma Krishnan int i; 1902c21e0bbfSMatthew R. Ochs 1903c21e0bbfSMatthew R. Ochs init_pcr(cfg); 1904c21e0bbfSMatthew R. Ochs 1905bfc0bab1SUma Krishnan /* Initialize each HWQ */ 19063065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1907bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1908bfc0bab1SUma Krishnan 1909bfc0bab1SUma Krishnan /* After an AFU reset, RRQ entries are stale, clear them */ 1910bfc0bab1SUma Krishnan memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); 1911bfc0bab1SUma Krishnan 1912bfc0bab1SUma Krishnan /* Initialize RRQ pointers */ 1913bfc0bab1SUma Krishnan hwq->hrrq_start = &hwq->rrq_entry[0]; 1914bfc0bab1SUma Krishnan hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; 1915bfc0bab1SUma Krishnan hwq->hrrq_curr = hwq->hrrq_start; 1916bfc0bab1SUma Krishnan hwq->toggle = 1; 191766ea9bccSUma Krishnan 191866ea9bccSUma Krishnan /* Initialize spin locks */ 1919bfc0bab1SUma Krishnan spin_lock_init(&hwq->hrrq_slock); 192066ea9bccSUma Krishnan spin_lock_init(&hwq->hsq_slock); 1921c21e0bbfSMatthew R. Ochs 1922696d0b0cSMatthew R. Ochs /* Initialize SQ */ 1923696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1924bfc0bab1SUma Krishnan memset(&hwq->sq, 0, sizeof(hwq->sq)); 1925bfc0bab1SUma Krishnan hwq->hsq_start = &hwq->sq[0]; 1926bfc0bab1SUma Krishnan hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; 1927bfc0bab1SUma Krishnan hwq->hsq_curr = hwq->hsq_start; 1928696d0b0cSMatthew R. Ochs 1929bfc0bab1SUma Krishnan atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); 1930696d0b0cSMatthew R. Ochs } 1931696d0b0cSMatthew R. Ochs 1932cba06e6dSMatthew R. Ochs /* Initialize IRQ poll */ 1933cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) 1934bfc0bab1SUma Krishnan irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, 1935cba06e6dSMatthew R. Ochs cxlflash_irqpoll); 1936cba06e6dSMatthew R. Ochs 1937bfc0bab1SUma Krishnan } 1938bfc0bab1SUma Krishnan 1939c21e0bbfSMatthew R. Ochs rc = init_global(cfg); 1940c21e0bbfSMatthew R. Ochs 1941fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1942c21e0bbfSMatthew R. Ochs return rc; 1943c21e0bbfSMatthew R. Ochs } 1944c21e0bbfSMatthew R. Ochs 1945c21e0bbfSMatthew R. Ochs /** 19469526f360SManoj N. Kumar * init_intr() - setup interrupt handlers for the master context 19471284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1948bfc0bab1SUma Krishnan * @hwq: Hardware queue to initialize. 1949c21e0bbfSMatthew R. Ochs * 19501284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1951c21e0bbfSMatthew R. Ochs */ 19529526f360SManoj N. Kumar static enum undo_level init_intr(struct cxlflash_cfg *cfg, 1953bfc0bab1SUma Krishnan struct hwq *hwq) 1954c21e0bbfSMatthew R. Ochs { 19559526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 1956b070545dSUma Krishnan void *ctx = hwq->ctx_cookie; 1957c21e0bbfSMatthew R. Ochs int rc = 0; 19589526f360SManoj N. Kumar enum undo_level level = UNDO_NOOP; 1959bfc0bab1SUma Krishnan bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); 1960e11e0ff8SUma Krishnan int num_irqs = hwq->num_irqs; 1961c21e0bbfSMatthew R. Ochs 196225b8e08eSMatthew R. Ochs rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs); 1963c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1964fb67d44dSMatthew R. Ochs dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", 1965c21e0bbfSMatthew R. Ochs __func__, rc); 19669526f360SManoj N. Kumar level = UNDO_NOOP; 1967c21e0bbfSMatthew R. Ochs goto out; 1968c21e0bbfSMatthew R. Ochs } 1969c21e0bbfSMatthew R. Ochs 197025b8e08eSMatthew R. Ochs rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, 1971c21e0bbfSMatthew R. Ochs "SISL_MSI_SYNC_ERROR"); 1972c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1973fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); 1974c21e0bbfSMatthew R. Ochs level = FREE_IRQ; 1975c21e0bbfSMatthew R. Ochs goto out; 1976c21e0bbfSMatthew R. Ochs } 1977c21e0bbfSMatthew R. Ochs 197825b8e08eSMatthew R. Ochs rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, 1979c21e0bbfSMatthew R. Ochs "SISL_MSI_RRQ_UPDATED"); 1980c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1981fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); 1982c21e0bbfSMatthew R. Ochs level = UNMAP_ONE; 1983c21e0bbfSMatthew R. Ochs goto out; 1984c21e0bbfSMatthew R. Ochs } 1985c21e0bbfSMatthew R. Ochs 1986bfc0bab1SUma Krishnan /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ 1987bfc0bab1SUma Krishnan if (!is_primary_hwq) 1988bfc0bab1SUma Krishnan goto out; 1989bfc0bab1SUma Krishnan 199025b8e08eSMatthew R. Ochs rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, 1991c21e0bbfSMatthew R. Ochs "SISL_MSI_ASYNC_ERROR"); 1992c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1993fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); 1994c21e0bbfSMatthew R. Ochs level = UNMAP_TWO; 1995c21e0bbfSMatthew R. Ochs goto out; 1996c21e0bbfSMatthew R. Ochs } 19979526f360SManoj N. Kumar out: 19989526f360SManoj N. Kumar return level; 19999526f360SManoj N. Kumar } 2000c21e0bbfSMatthew R. Ochs 20019526f360SManoj N. Kumar /** 20029526f360SManoj N. Kumar * init_mc() - create and register as the master context 20039526f360SManoj N. Kumar * @cfg: Internal structure associated with the host. 2004bfc0bab1SUma Krishnan * index: HWQ Index of the master context. 20059526f360SManoj N. Kumar * 20069526f360SManoj N. Kumar * Return: 0 on success, -errno on failure 20079526f360SManoj N. Kumar */ 2008bfc0bab1SUma Krishnan static int init_mc(struct cxlflash_cfg *cfg, u32 index) 20099526f360SManoj N. Kumar { 2010b070545dSUma Krishnan void *ctx; 20119526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 2012bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(cfg->afu, index); 20139526f360SManoj N. Kumar int rc = 0; 2014e11e0ff8SUma Krishnan int num_irqs; 20159526f360SManoj N. Kumar enum undo_level level; 20169526f360SManoj N. Kumar 2017bfc0bab1SUma Krishnan hwq->afu = cfg->afu; 2018bfc0bab1SUma Krishnan hwq->index = index; 2019a002bf83SUma Krishnan INIT_LIST_HEAD(&hwq->pending_cmds); 2020bfc0bab1SUma Krishnan 2021e11e0ff8SUma Krishnan if (index == PRIMARY_HWQ) { 202225b8e08eSMatthew R. Ochs ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie); 2023e11e0ff8SUma Krishnan num_irqs = 3; 2024e11e0ff8SUma Krishnan } else { 202525b8e08eSMatthew R. Ochs ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); 2026e11e0ff8SUma Krishnan num_irqs = 2; 2027e11e0ff8SUma Krishnan } 20280df69c60SUma Krishnan if (IS_ERR_OR_NULL(ctx)) { 20299526f360SManoj N. Kumar rc = -ENOMEM; 2030bfc0bab1SUma Krishnan goto err1; 20319526f360SManoj N. Kumar } 2032bfc0bab1SUma Krishnan 2033b070545dSUma Krishnan WARN_ON(hwq->ctx_cookie); 2034b070545dSUma Krishnan hwq->ctx_cookie = ctx; 2035e11e0ff8SUma Krishnan hwq->num_irqs = num_irqs; 20369526f360SManoj N. Kumar 20379526f360SManoj N. Kumar /* Set it up as a master with the CXL */ 203825b8e08eSMatthew R. Ochs cfg->ops->set_master(ctx); 20399526f360SManoj N. Kumar 2040bfc0bab1SUma Krishnan /* Reset AFU when initializing primary context */ 2041bfc0bab1SUma Krishnan if (index == PRIMARY_HWQ) { 204225b8e08eSMatthew R. Ochs rc = cfg->ops->afu_reset(ctx); 20439526f360SManoj N. Kumar if (unlikely(rc)) { 2044bfc0bab1SUma Krishnan dev_err(dev, "%s: AFU reset failed rc=%d\n", 2045bfc0bab1SUma Krishnan __func__, rc); 2046bfc0bab1SUma Krishnan goto err1; 2047bfc0bab1SUma Krishnan } 20489526f360SManoj N. Kumar } 20499526f360SManoj N. Kumar 2050bfc0bab1SUma Krishnan level = init_intr(cfg, hwq); 20519526f360SManoj N. Kumar if (unlikely(level)) { 2052fb67d44dSMatthew R. Ochs dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); 2053bfc0bab1SUma Krishnan goto err2; 20549526f360SManoj N. Kumar } 2055c21e0bbfSMatthew R. Ochs 205625b8e08eSMatthew R. Ochs /* Finally, activate the context by starting it */ 205725b8e08eSMatthew R. Ochs rc = cfg->ops->start_context(hwq->ctx_cookie); 2058c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 2059c21e0bbfSMatthew R. Ochs dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 2060c21e0bbfSMatthew R. Ochs level = UNMAP_THREE; 2061bfc0bab1SUma Krishnan goto err2; 2062c21e0bbfSMatthew R. Ochs } 2063bfc0bab1SUma Krishnan 2064bfc0bab1SUma Krishnan out: 2065fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2066c21e0bbfSMatthew R. Ochs return rc; 2067bfc0bab1SUma Krishnan err2: 2068bfc0bab1SUma Krishnan term_intr(cfg, level, index); 2069bfc0bab1SUma Krishnan if (index != PRIMARY_HWQ) 207025b8e08eSMatthew R. Ochs cfg->ops->release_context(ctx); 2071bfc0bab1SUma Krishnan err1: 2072b070545dSUma Krishnan hwq->ctx_cookie = NULL; 2073bfc0bab1SUma Krishnan goto out; 2074c21e0bbfSMatthew R. Ochs } 2075c21e0bbfSMatthew R. Ochs 2076c21e0bbfSMatthew R. Ochs /** 207756518072SMatthew R. Ochs * get_num_afu_ports() - determines and configures the number of AFU ports 207856518072SMatthew R. Ochs * @cfg: Internal structure associated with the host. 207956518072SMatthew R. Ochs * 208056518072SMatthew R. Ochs * This routine determines the number of AFU ports by converting the global 208156518072SMatthew R. Ochs * port selection mask. The converted value is only valid following an AFU 208256518072SMatthew R. Ochs * reset (explicit or power-on). This routine must be invoked shortly after 208356518072SMatthew R. Ochs * mapping as other routines are dependent on the number of ports during the 208456518072SMatthew R. Ochs * initialization sequence. 208556518072SMatthew R. Ochs * 208656518072SMatthew R. Ochs * To support legacy AFUs that might not have reflected an initial global 208756518072SMatthew R. Ochs * port mask (value read is 0), default to the number of ports originally 208856518072SMatthew R. Ochs * supported by the cxlflash driver (2) before hardware with other port 208956518072SMatthew R. Ochs * offerings was introduced. 209056518072SMatthew R. Ochs */ 209156518072SMatthew R. Ochs static void get_num_afu_ports(struct cxlflash_cfg *cfg) 209256518072SMatthew R. Ochs { 209356518072SMatthew R. Ochs struct afu *afu = cfg->afu; 209456518072SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 209556518072SMatthew R. Ochs u64 port_mask; 209656518072SMatthew R. Ochs int num_fc_ports = LEGACY_FC_PORTS; 209756518072SMatthew R. Ochs 209856518072SMatthew R. Ochs port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); 209956518072SMatthew R. Ochs if (port_mask != 0ULL) 210056518072SMatthew R. Ochs num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); 210156518072SMatthew R. Ochs 210256518072SMatthew R. Ochs dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", 210356518072SMatthew R. Ochs __func__, port_mask, num_fc_ports); 210456518072SMatthew R. Ochs 210556518072SMatthew R. Ochs cfg->num_fc_ports = num_fc_ports; 210656518072SMatthew R. Ochs cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); 210756518072SMatthew R. Ochs } 210856518072SMatthew R. Ochs 210956518072SMatthew R. Ochs /** 2110c21e0bbfSMatthew R. Ochs * init_afu() - setup as master context and start AFU 21111284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 2112c21e0bbfSMatthew R. Ochs * 2113c21e0bbfSMatthew R. Ochs * This routine is a higher level of control for configuring the 2114c21e0bbfSMatthew R. Ochs * AFU on probe and reset paths. 2115c21e0bbfSMatthew R. Ochs * 21161284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 2117c21e0bbfSMatthew R. Ochs */ 2118c21e0bbfSMatthew R. Ochs static int init_afu(struct cxlflash_cfg *cfg) 2119c21e0bbfSMatthew R. Ochs { 2120c21e0bbfSMatthew R. Ochs u64 reg; 2121c21e0bbfSMatthew R. Ochs int rc = 0; 2122c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 2123c21e0bbfSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 2124bfc0bab1SUma Krishnan struct hwq *hwq; 2125bfc0bab1SUma Krishnan int i; 2126c21e0bbfSMatthew R. Ochs 212725b8e08eSMatthew R. Ochs cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true); 21285cdac81aSMatthew R. Ochs 21293065267aSMatthew R. Ochs afu->num_hwqs = afu->desired_hwqs; 21303065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2131bfc0bab1SUma Krishnan rc = init_mc(cfg, i); 2132c21e0bbfSMatthew R. Ochs if (rc) { 2133bfc0bab1SUma Krishnan dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", 2134bfc0bab1SUma Krishnan __func__, rc, i); 2135bfc0bab1SUma Krishnan goto err1; 2136bfc0bab1SUma Krishnan } 2137c21e0bbfSMatthew R. Ochs } 2138c21e0bbfSMatthew R. Ochs 2139bfc0bab1SUma Krishnan /* Map the entire MMIO space of the AFU using the first context */ 2140bfc0bab1SUma Krishnan hwq = get_hwq(afu, PRIMARY_HWQ); 214125b8e08eSMatthew R. Ochs afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie); 2142c21e0bbfSMatthew R. Ochs if (!afu->afu_map) { 214325b8e08eSMatthew R. Ochs dev_err(dev, "%s: psa_map failed\n", __func__); 2144ee3491baSMatthew R. Ochs rc = -ENOMEM; 2145c21e0bbfSMatthew R. Ochs goto err1; 2146c21e0bbfSMatthew R. Ochs } 2147c21e0bbfSMatthew R. Ochs 2148e5ce067bSMatthew R. Ochs /* No byte reverse on reading afu_version or string will be backwards */ 2149e5ce067bSMatthew R. Ochs reg = readq(&afu->afu_map->global.regs.afu_version); 2150e5ce067bSMatthew R. Ochs memcpy(afu->version, ®, sizeof(reg)); 2151c21e0bbfSMatthew R. Ochs afu->interface_version = 2152c21e0bbfSMatthew R. Ochs readq_be(&afu->afu_map->global.regs.interface_version); 2153e5ce067bSMatthew R. Ochs if ((afu->interface_version + 1) == 0) { 2154fb67d44dSMatthew R. Ochs dev_err(dev, "Back level AFU, please upgrade. AFU version %s " 2155fb67d44dSMatthew R. Ochs "interface version %016llx\n", afu->version, 2156e5ce067bSMatthew R. Ochs afu->interface_version); 2157e5ce067bSMatthew R. Ochs rc = -EINVAL; 21580df5bef7SUma Krishnan goto err1; 2159ee3491baSMatthew R. Ochs } 2160ee3491baSMatthew R. Ochs 2161696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 2162696d0b0cSMatthew R. Ochs afu->send_cmd = send_cmd_sq; 2163696d0b0cSMatthew R. Ochs afu->context_reset = context_reset_sq; 2164696d0b0cSMatthew R. Ochs } else { 216548b4be36SMatthew R. Ochs afu->send_cmd = send_cmd_ioarrin; 216648b4be36SMatthew R. Ochs afu->context_reset = context_reset_ioarrin; 2167696d0b0cSMatthew R. Ochs } 216848b4be36SMatthew R. Ochs 2169fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, 2170ee3491baSMatthew R. Ochs afu->version, afu->interface_version); 2171c21e0bbfSMatthew R. Ochs 217256518072SMatthew R. Ochs get_num_afu_ports(cfg); 217356518072SMatthew R. Ochs 2174c21e0bbfSMatthew R. Ochs rc = start_afu(cfg); 2175c21e0bbfSMatthew R. Ochs if (rc) { 2176fb67d44dSMatthew R. Ochs dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); 21770df5bef7SUma Krishnan goto err1; 2178c21e0bbfSMatthew R. Ochs } 2179c21e0bbfSMatthew R. Ochs 2180c21e0bbfSMatthew R. Ochs afu_err_intr_init(cfg->afu); 21813065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2182bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 2183bfc0bab1SUma Krishnan 2184bfc0bab1SUma Krishnan hwq->room = readq_be(&hwq->host_map->cmd_room); 2185bfc0bab1SUma Krishnan } 2186c21e0bbfSMatthew R. Ochs 21872cb79266SMatthew R. Ochs /* Restore the LUN mappings */ 21882cb79266SMatthew R. Ochs cxlflash_restore_luntable(cfg); 2189ee3491baSMatthew R. Ochs out: 2190fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2191c21e0bbfSMatthew R. Ochs return rc; 2192ee3491baSMatthew R. Ochs 2193ee3491baSMatthew R. Ochs err1: 21943065267aSMatthew R. Ochs for (i = afu->num_hwqs - 1; i >= 0; i--) { 2195bfc0bab1SUma Krishnan term_intr(cfg, UNMAP_THREE, i); 2196bfc0bab1SUma Krishnan term_mc(cfg, i); 2197bfc0bab1SUma Krishnan } 2198ee3491baSMatthew R. Ochs goto out; 2199c21e0bbfSMatthew R. Ochs } 2200c21e0bbfSMatthew R. Ochs 2201c21e0bbfSMatthew R. Ochs /** 22020b09e711SUma Krishnan * afu_reset() - resets the AFU 22030b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 22040b09e711SUma Krishnan * 22050b09e711SUma Krishnan * Return: 0 on success, -errno on failure 22060b09e711SUma Krishnan */ 22070b09e711SUma Krishnan static int afu_reset(struct cxlflash_cfg *cfg) 22080b09e711SUma Krishnan { 22090b09e711SUma Krishnan struct device *dev = &cfg->dev->dev; 22100b09e711SUma Krishnan int rc = 0; 22110b09e711SUma Krishnan 22120b09e711SUma Krishnan /* Stop the context before the reset. Since the context is 22130b09e711SUma Krishnan * no longer available restart it after the reset is complete 22140b09e711SUma Krishnan */ 22150b09e711SUma Krishnan term_afu(cfg); 22160b09e711SUma Krishnan 22170b09e711SUma Krishnan rc = init_afu(cfg); 22180b09e711SUma Krishnan 22190b09e711SUma Krishnan dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 22200b09e711SUma Krishnan return rc; 22210b09e711SUma Krishnan } 22220b09e711SUma Krishnan 22230b09e711SUma Krishnan /** 22240b09e711SUma Krishnan * drain_ioctls() - wait until all currently executing ioctls have completed 22250b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 22260b09e711SUma Krishnan * 22270b09e711SUma Krishnan * Obtain write access to read/write semaphore that wraps ioctl 22280b09e711SUma Krishnan * handling to 'drain' ioctls currently executing. 22290b09e711SUma Krishnan */ 22300b09e711SUma Krishnan static void drain_ioctls(struct cxlflash_cfg *cfg) 22310b09e711SUma Krishnan { 22320b09e711SUma Krishnan down_write(&cfg->ioctl_rwsem); 22330b09e711SUma Krishnan up_write(&cfg->ioctl_rwsem); 22340b09e711SUma Krishnan } 22350b09e711SUma Krishnan 22360b09e711SUma Krishnan /** 22370b09e711SUma Krishnan * cxlflash_async_reset_host() - asynchronous host reset handler 22380b09e711SUma Krishnan * @data: Private data provided while scheduling reset. 22390b09e711SUma Krishnan * @cookie: Cookie that can be used for checkpointing. 22400b09e711SUma Krishnan */ 22410b09e711SUma Krishnan static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) 22420b09e711SUma Krishnan { 22430b09e711SUma Krishnan struct cxlflash_cfg *cfg = data; 22440b09e711SUma Krishnan struct device *dev = &cfg->dev->dev; 22450b09e711SUma Krishnan int rc = 0; 22460b09e711SUma Krishnan 22470b09e711SUma Krishnan if (cfg->state != STATE_RESET) { 22480b09e711SUma Krishnan dev_dbg(dev, "%s: Not performing a reset, state=%d\n", 22490b09e711SUma Krishnan __func__, cfg->state); 22500b09e711SUma Krishnan goto out; 22510b09e711SUma Krishnan } 22520b09e711SUma Krishnan 22530b09e711SUma Krishnan drain_ioctls(cfg); 22540b09e711SUma Krishnan cxlflash_mark_contexts_error(cfg); 22550b09e711SUma Krishnan rc = afu_reset(cfg); 22560b09e711SUma Krishnan if (rc) 22570b09e711SUma Krishnan cfg->state = STATE_FAILTERM; 22580b09e711SUma Krishnan else 22590b09e711SUma Krishnan cfg->state = STATE_NORMAL; 22600b09e711SUma Krishnan wake_up_all(&cfg->reset_waitq); 22610b09e711SUma Krishnan 22620b09e711SUma Krishnan out: 22630b09e711SUma Krishnan scsi_unblock_requests(cfg->host); 22640b09e711SUma Krishnan } 22650b09e711SUma Krishnan 22660b09e711SUma Krishnan /** 22670b09e711SUma Krishnan * cxlflash_schedule_async_reset() - schedule an asynchronous host reset 22680b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 22690b09e711SUma Krishnan */ 22700b09e711SUma Krishnan static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) 22710b09e711SUma Krishnan { 22720b09e711SUma Krishnan struct device *dev = &cfg->dev->dev; 22730b09e711SUma Krishnan 22740b09e711SUma Krishnan if (cfg->state != STATE_NORMAL) { 22750b09e711SUma Krishnan dev_dbg(dev, "%s: Not performing reset state=%d\n", 22760b09e711SUma Krishnan __func__, cfg->state); 22770b09e711SUma Krishnan return; 22780b09e711SUma Krishnan } 22790b09e711SUma Krishnan 22800b09e711SUma Krishnan cfg->state = STATE_RESET; 22810b09e711SUma Krishnan scsi_block_requests(cfg->host); 22820b09e711SUma Krishnan cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host, 22830b09e711SUma Krishnan cfg); 22840b09e711SUma Krishnan } 22850b09e711SUma Krishnan 22860b09e711SUma Krishnan /** 2287cf243027SMatthew R. Ochs * send_afu_cmd() - builds and sends an internal AFU command 2288c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host. 2289cf243027SMatthew R. Ochs * @rcb: Pre-populated IOARCB describing command to send. 2290c21e0bbfSMatthew R. Ochs * 2291cf243027SMatthew R. Ochs * The AFU can only take one internal AFU command at a time. This limitation is 2292cf243027SMatthew R. Ochs * enforced by using a mutex to provide exclusive access to the AFU during the 2293cf243027SMatthew R. Ochs * operation. This design point requires calling threads to not be on interrupt 2294cf243027SMatthew R. Ochs * context due to the possibility of sleeping during concurrent AFU operations. 2295c21e0bbfSMatthew R. Ochs * 2296cf243027SMatthew R. Ochs * The command status is optionally passed back to the caller when the caller 2297cf243027SMatthew R. Ochs * populates the IOASA field of the IOARCB with a pointer to an IOASA structure. 22985cdac81aSMatthew R. Ochs * 2299c21e0bbfSMatthew R. Ochs * Return: 2300539d890cSUma Krishnan * 0 on success, -errno on failure 2301c21e0bbfSMatthew R. Ochs */ 2302cf243027SMatthew R. Ochs static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) 2303c21e0bbfSMatthew R. Ochs { 23045cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 23054392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 2306c21e0bbfSMatthew R. Ochs struct afu_cmd *cmd = NULL; 2307bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 23089a597cd4SUma Krishnan ulong lock_flags; 2309350bb478SMatthew R. Ochs char *buf = NULL; 2310c21e0bbfSMatthew R. Ochs int rc = 0; 2311a96851d3SUma Krishnan int nretry = 0; 2312c21e0bbfSMatthew R. Ochs static DEFINE_MUTEX(sync_active); 2313c21e0bbfSMatthew R. Ochs 23145cdac81aSMatthew R. Ochs if (cfg->state != STATE_NORMAL) { 2315fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Sync not required state=%u\n", 2316fb67d44dSMatthew R. Ochs __func__, cfg->state); 23175cdac81aSMatthew R. Ochs return 0; 23185cdac81aSMatthew R. Ochs } 23195cdac81aSMatthew R. Ochs 2320c21e0bbfSMatthew R. Ochs mutex_lock(&sync_active); 2321de01283bSMatthew R. Ochs atomic_inc(&afu->cmds_active); 2322a1ea04b3SUma Krishnan buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); 2323350bb478SMatthew R. Ochs if (unlikely(!buf)) { 2324350bb478SMatthew R. Ochs dev_err(dev, "%s: no memory for command\n", __func__); 2325539d890cSUma Krishnan rc = -ENOMEM; 2326c21e0bbfSMatthew R. Ochs goto out; 2327c21e0bbfSMatthew R. Ochs } 2328c21e0bbfSMatthew R. Ochs 2329350bb478SMatthew R. Ochs cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); 2330a96851d3SUma Krishnan 2331a96851d3SUma Krishnan retry: 2332a1ea04b3SUma Krishnan memset(cmd, 0, sizeof(*cmd)); 2333cf243027SMatthew R. Ochs memcpy(&cmd->rcb, rcb, sizeof(*rcb)); 2334a1ea04b3SUma Krishnan INIT_LIST_HEAD(&cmd->queue); 2335350bb478SMatthew R. Ochs init_completion(&cmd->cevent); 2336350bb478SMatthew R. Ochs cmd->parent = afu; 2337bfc0bab1SUma Krishnan cmd->hwq_index = hwq->index; 2338bfc0bab1SUma Krishnan cmd->rcb.ctx_id = hwq->ctx_hndl; 2339c21e0bbfSMatthew R. Ochs 2340cf243027SMatthew R. Ochs dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n", 2341cf243027SMatthew R. Ochs __func__, afu, cmd, cmd->rcb.cdb[0], nretry); 2342c21e0bbfSMatthew R. Ochs 234348b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 2344539d890cSUma Krishnan if (unlikely(rc)) { 2345539d890cSUma Krishnan rc = -ENOBUFS; 2346c21e0bbfSMatthew R. Ochs goto out; 2347539d890cSUma Krishnan } 2348c21e0bbfSMatthew R. Ochs 23499ba848acSMatthew R. Ochs rc = wait_resp(afu, cmd); 2350a1ea04b3SUma Krishnan switch (rc) { 2351a1ea04b3SUma Krishnan case -ETIMEDOUT: 2352a96851d3SUma Krishnan rc = afu->context_reset(hwq); 2353a1ea04b3SUma Krishnan if (rc) { 23549a597cd4SUma Krishnan /* Delete the command from pending_cmds list */ 23559a597cd4SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 23569a597cd4SUma Krishnan list_del(&cmd->list); 23579a597cd4SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 23589a597cd4SUma Krishnan 23590b09e711SUma Krishnan cxlflash_schedule_async_reset(cfg); 2360a1ea04b3SUma Krishnan break; 2361a1ea04b3SUma Krishnan } 2362a1ea04b3SUma Krishnan /* fall through to retry */ 2363a1ea04b3SUma Krishnan case -EAGAIN: 2364a1ea04b3SUma Krishnan if (++nretry < 2) 2365a1ea04b3SUma Krishnan goto retry; 2366a1ea04b3SUma Krishnan /* fall through to exit */ 2367a1ea04b3SUma Krishnan default: 2368a1ea04b3SUma Krishnan break; 2369a96851d3SUma Krishnan } 2370a96851d3SUma Krishnan 2371cf243027SMatthew R. Ochs if (rcb->ioasa) 2372cf243027SMatthew R. Ochs *rcb->ioasa = cmd->sa; 2373c21e0bbfSMatthew R. Ochs out: 2374de01283bSMatthew R. Ochs atomic_dec(&afu->cmds_active); 2375c21e0bbfSMatthew R. Ochs mutex_unlock(&sync_active); 2376350bb478SMatthew R. Ochs kfree(buf); 2377fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2378c21e0bbfSMatthew R. Ochs return rc; 2379c21e0bbfSMatthew R. Ochs } 2380c21e0bbfSMatthew R. Ochs 2381c21e0bbfSMatthew R. Ochs /** 2382cf243027SMatthew R. Ochs * cxlflash_afu_sync() - builds and sends an AFU sync command 2383cf243027SMatthew R. Ochs * @afu: AFU associated with the host. 2384cf243027SMatthew R. Ochs * @ctx: Identifies context requesting sync. 2385cf243027SMatthew R. Ochs * @res: Identifies resource requesting sync. 2386cf243027SMatthew R. Ochs * @mode: Type of sync to issue (lightweight, heavyweight, global). 2387cf243027SMatthew R. Ochs * 2388cf243027SMatthew R. Ochs * AFU sync operations are only necessary and allowed when the device is 2389cf243027SMatthew R. Ochs * operating normally. When not operating normally, sync requests can occur as 2390cf243027SMatthew R. Ochs * part of cleaning up resources associated with an adapter prior to removal. 2391cf243027SMatthew R. Ochs * In this scenario, these requests are simply ignored (safe due to the AFU 2392cf243027SMatthew R. Ochs * going away). 2393cf243027SMatthew R. Ochs * 2394cf243027SMatthew R. Ochs * Return: 2395cf243027SMatthew R. Ochs * 0 on success, -errno on failure 2396cf243027SMatthew R. Ochs */ 2397cf243027SMatthew R. Ochs int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode) 2398cf243027SMatthew R. Ochs { 2399cf243027SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 2400cf243027SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 2401cf243027SMatthew R. Ochs struct sisl_ioarcb rcb = { 0 }; 2402cf243027SMatthew R. Ochs 2403cf243027SMatthew R. Ochs dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n", 2404cf243027SMatthew R. Ochs __func__, afu, ctx, res, mode); 2405cf243027SMatthew R. Ochs 2406cf243027SMatthew R. Ochs rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 2407cf243027SMatthew R. Ochs rcb.msi = SISL_MSI_RRQ_UPDATED; 2408cf243027SMatthew R. Ochs rcb.timeout = MC_AFU_SYNC_TIMEOUT; 2409cf243027SMatthew R. Ochs 2410cf243027SMatthew R. Ochs rcb.cdb[0] = SISL_AFU_CMD_SYNC; 2411cf243027SMatthew R. Ochs rcb.cdb[1] = mode; 2412cf243027SMatthew R. Ochs put_unaligned_be16(ctx, &rcb.cdb[2]); 2413cf243027SMatthew R. Ochs put_unaligned_be32(res, &rcb.cdb[4]); 2414cf243027SMatthew R. Ochs 2415cf243027SMatthew R. Ochs return send_afu_cmd(afu, &rcb); 2416cf243027SMatthew R. Ochs } 2417cf243027SMatthew R. Ochs 2418cf243027SMatthew R. Ochs /** 24197c4c41f1SUma Krishnan * cxlflash_eh_abort_handler() - abort a SCSI command 24207c4c41f1SUma Krishnan * @scp: SCSI command to abort. 24217c4c41f1SUma Krishnan * 24227c4c41f1SUma Krishnan * CXL Flash devices do not support a single command abort. Reset the context 24237c4c41f1SUma Krishnan * as per SISLite specification. Flush any pending commands in the hardware 24247c4c41f1SUma Krishnan * queue before the reset. 24257c4c41f1SUma Krishnan * 24267c4c41f1SUma Krishnan * Return: SUCCESS/FAILED as defined in scsi/scsi.h 24277c4c41f1SUma Krishnan */ 24287c4c41f1SUma Krishnan static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp) 24297c4c41f1SUma Krishnan { 24307c4c41f1SUma Krishnan int rc = FAILED; 24317c4c41f1SUma Krishnan struct Scsi_Host *host = scp->device->host; 24327c4c41f1SUma Krishnan struct cxlflash_cfg *cfg = shost_priv(host); 24337c4c41f1SUma Krishnan struct afu_cmd *cmd = sc_to_afuc(scp); 24347c4c41f1SUma Krishnan struct device *dev = &cfg->dev->dev; 24357c4c41f1SUma Krishnan struct afu *afu = cfg->afu; 24367c4c41f1SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 24377c4c41f1SUma Krishnan 24387c4c41f1SUma Krishnan dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " 24397c4c41f1SUma Krishnan "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, 24407c4c41f1SUma Krishnan scp->device->channel, scp->device->id, scp->device->lun, 24417c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 24427c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 24437c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 24447c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 24457c4c41f1SUma Krishnan 24467c4c41f1SUma Krishnan /* When the state is not normal, another reset/reload is in progress. 24477c4c41f1SUma Krishnan * Return failed and the mid-layer will invoke host reset handler. 24487c4c41f1SUma Krishnan */ 24497c4c41f1SUma Krishnan if (cfg->state != STATE_NORMAL) { 24507c4c41f1SUma Krishnan dev_dbg(dev, "%s: Invalid state for abort, state=%d\n", 24517c4c41f1SUma Krishnan __func__, cfg->state); 24527c4c41f1SUma Krishnan goto out; 24537c4c41f1SUma Krishnan } 24547c4c41f1SUma Krishnan 24557c4c41f1SUma Krishnan rc = afu->context_reset(hwq); 24567c4c41f1SUma Krishnan if (unlikely(rc)) 24577c4c41f1SUma Krishnan goto out; 24587c4c41f1SUma Krishnan 24597c4c41f1SUma Krishnan rc = SUCCESS; 24607c4c41f1SUma Krishnan 24617c4c41f1SUma Krishnan out: 24627c4c41f1SUma Krishnan dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 24637c4c41f1SUma Krishnan return rc; 24647c4c41f1SUma Krishnan } 24657c4c41f1SUma Krishnan 24667c4c41f1SUma Krishnan /** 246715305514SMatthew R. Ochs * cxlflash_eh_device_reset_handler() - reset a single LUN 246815305514SMatthew R. Ochs * @scp: SCSI command to send. 246915305514SMatthew R. Ochs * 247015305514SMatthew R. Ochs * Return: 247115305514SMatthew R. Ochs * SUCCESS as defined in scsi/scsi.h 247215305514SMatthew R. Ochs * FAILED as defined in scsi/scsi.h 247315305514SMatthew R. Ochs */ 247415305514SMatthew R. Ochs static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) 247515305514SMatthew R. Ochs { 247615305514SMatthew R. Ochs int rc = SUCCESS; 247732abbedaSMatthew R. Ochs struct scsi_device *sdev = scp->device; 247832abbedaSMatthew R. Ochs struct Scsi_Host *host = sdev->host; 2479fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 2480fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 248115305514SMatthew R. Ochs int rcr = 0; 248215305514SMatthew R. Ochs 24835a4d9d77SMatthew R. Ochs dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__, 24845a4d9d77SMatthew R. Ochs host->host_no, sdev->channel, sdev->id, sdev->lun); 2485ed486daaSMatthew R. Ochs retry: 248615305514SMatthew R. Ochs switch (cfg->state) { 248715305514SMatthew R. Ochs case STATE_NORMAL: 248832abbedaSMatthew R. Ochs rcr = send_tmf(cfg, sdev, TMF_LUN_RESET); 248915305514SMatthew R. Ochs if (unlikely(rcr)) 249015305514SMatthew R. Ochs rc = FAILED; 249115305514SMatthew R. Ochs break; 249215305514SMatthew R. Ochs case STATE_RESET: 249315305514SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 2494ed486daaSMatthew R. Ochs goto retry; 249515305514SMatthew R. Ochs default: 249615305514SMatthew R. Ochs rc = FAILED; 249715305514SMatthew R. Ochs break; 249815305514SMatthew R. Ochs } 249915305514SMatthew R. Ochs 2500fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 250115305514SMatthew R. Ochs return rc; 250215305514SMatthew R. Ochs } 250315305514SMatthew R. Ochs 250415305514SMatthew R. Ochs /** 250515305514SMatthew R. Ochs * cxlflash_eh_host_reset_handler() - reset the host adapter 250615305514SMatthew R. Ochs * @scp: SCSI command from stack identifying host. 250715305514SMatthew R. Ochs * 25081d3324c3SMatthew R. Ochs * Following a reset, the state is evaluated again in case an EEH occurred 25091d3324c3SMatthew R. Ochs * during the reset. In such a scenario, the host reset will either yield 25101d3324c3SMatthew R. Ochs * until the EEH recovery is complete or return success or failure based 25111d3324c3SMatthew R. Ochs * upon the current device state. 25121d3324c3SMatthew R. Ochs * 251315305514SMatthew R. Ochs * Return: 251415305514SMatthew R. Ochs * SUCCESS as defined in scsi/scsi.h 251515305514SMatthew R. Ochs * FAILED as defined in scsi/scsi.h 251615305514SMatthew R. Ochs */ 251715305514SMatthew R. Ochs static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) 251815305514SMatthew R. Ochs { 251915305514SMatthew R. Ochs int rc = SUCCESS; 252015305514SMatthew R. Ochs int rcr = 0; 252115305514SMatthew R. Ochs struct Scsi_Host *host = scp->device->host; 2522fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 2523fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 252415305514SMatthew R. Ochs 25255a4d9d77SMatthew R. Ochs dev_dbg(dev, "%s: %d\n", __func__, host->host_no); 252615305514SMatthew R. Ochs 252715305514SMatthew R. Ochs switch (cfg->state) { 252815305514SMatthew R. Ochs case STATE_NORMAL: 252915305514SMatthew R. Ochs cfg->state = STATE_RESET; 2530f411396dSManoj N. Kumar drain_ioctls(cfg); 253115305514SMatthew R. Ochs cxlflash_mark_contexts_error(cfg); 253215305514SMatthew R. Ochs rcr = afu_reset(cfg); 253315305514SMatthew R. Ochs if (rcr) { 253415305514SMatthew R. Ochs rc = FAILED; 253515305514SMatthew R. Ochs cfg->state = STATE_FAILTERM; 253615305514SMatthew R. Ochs } else 253715305514SMatthew R. Ochs cfg->state = STATE_NORMAL; 253815305514SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 25391d3324c3SMatthew R. Ochs ssleep(1); 25401d3324c3SMatthew R. Ochs /* fall through */ 254115305514SMatthew R. Ochs case STATE_RESET: 254215305514SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 254315305514SMatthew R. Ochs if (cfg->state == STATE_NORMAL) 254415305514SMatthew R. Ochs break; 254515305514SMatthew R. Ochs /* fall through */ 254615305514SMatthew R. Ochs default: 254715305514SMatthew R. Ochs rc = FAILED; 254815305514SMatthew R. Ochs break; 254915305514SMatthew R. Ochs } 255015305514SMatthew R. Ochs 2551fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 255215305514SMatthew R. Ochs return rc; 255315305514SMatthew R. Ochs } 255415305514SMatthew R. Ochs 255515305514SMatthew R. Ochs /** 255615305514SMatthew R. Ochs * cxlflash_change_queue_depth() - change the queue depth for the device 255715305514SMatthew R. Ochs * @sdev: SCSI device destined for queue depth change. 255815305514SMatthew R. Ochs * @qdepth: Requested queue depth value to set. 255915305514SMatthew R. Ochs * 256015305514SMatthew R. Ochs * The requested queue depth is capped to the maximum supported value. 256115305514SMatthew R. Ochs * 256215305514SMatthew R. Ochs * Return: The actual queue depth set. 256315305514SMatthew R. Ochs */ 256415305514SMatthew R. Ochs static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) 256515305514SMatthew R. Ochs { 256615305514SMatthew R. Ochs 256715305514SMatthew R. Ochs if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) 256815305514SMatthew R. Ochs qdepth = CXLFLASH_MAX_CMDS_PER_LUN; 256915305514SMatthew R. Ochs 257015305514SMatthew R. Ochs scsi_change_queue_depth(sdev, qdepth); 257115305514SMatthew R. Ochs return sdev->queue_depth; 257215305514SMatthew R. Ochs } 257315305514SMatthew R. Ochs 257415305514SMatthew R. Ochs /** 257515305514SMatthew R. Ochs * cxlflash_show_port_status() - queries and presents the current port status 2576e0f01a21SMatthew R. Ochs * @port: Desired port for status reporting. 25773b225cd3SMatthew R. Ochs * @cfg: Internal structure associated with the host. 257815305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 257915305514SMatthew R. Ochs * 258078ae028eSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf or -EINVAL. 258115305514SMatthew R. Ochs */ 25823b225cd3SMatthew R. Ochs static ssize_t cxlflash_show_port_status(u32 port, 25833b225cd3SMatthew R. Ochs struct cxlflash_cfg *cfg, 25843b225cd3SMatthew R. Ochs char *buf) 258515305514SMatthew R. Ochs { 258678ae028eSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 258715305514SMatthew R. Ochs char *disp_status; 258815305514SMatthew R. Ochs u64 status; 25890aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 259015305514SMatthew R. Ochs 259178ae028eSMatthew R. Ochs WARN_ON(port >= MAX_FC_PORTS); 259278ae028eSMatthew R. Ochs 259378ae028eSMatthew R. Ochs if (port >= cfg->num_fc_ports) { 259478ae028eSMatthew R. Ochs dev_info(dev, "%s: Port %d not supported on this card.\n", 259578ae028eSMatthew R. Ochs __func__, port); 259678ae028eSMatthew R. Ochs return -EINVAL; 259778ae028eSMatthew R. Ochs } 259815305514SMatthew R. Ochs 25990aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 26000aa14887SMatthew R. Ochs status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); 2601e0f01a21SMatthew R. Ochs status &= FC_MTIP_STATUS_MASK; 260215305514SMatthew R. Ochs 260315305514SMatthew R. Ochs if (status == FC_MTIP_STATUS_ONLINE) 260415305514SMatthew R. Ochs disp_status = "online"; 260515305514SMatthew R. Ochs else if (status == FC_MTIP_STATUS_OFFLINE) 260615305514SMatthew R. Ochs disp_status = "offline"; 260715305514SMatthew R. Ochs else 260815305514SMatthew R. Ochs disp_status = "unknown"; 260915305514SMatthew R. Ochs 2610e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); 261115305514SMatthew R. Ochs } 261215305514SMatthew R. Ochs 261315305514SMatthew R. Ochs /** 2614e0f01a21SMatthew R. Ochs * port0_show() - queries and presents the current status of port 0 2615e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2616e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2617e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2618e0f01a21SMatthew R. Ochs * 2619e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2620e0f01a21SMatthew R. Ochs */ 2621e0f01a21SMatthew R. Ochs static ssize_t port0_show(struct device *dev, 2622e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2623e0f01a21SMatthew R. Ochs char *buf) 2624e0f01a21SMatthew R. Ochs { 2625fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2626e0f01a21SMatthew R. Ochs 26273b225cd3SMatthew R. Ochs return cxlflash_show_port_status(0, cfg, buf); 2628e0f01a21SMatthew R. Ochs } 2629e0f01a21SMatthew R. Ochs 2630e0f01a21SMatthew R. Ochs /** 2631e0f01a21SMatthew R. Ochs * port1_show() - queries and presents the current status of port 1 2632e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2633e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2634e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2635e0f01a21SMatthew R. Ochs * 2636e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2637e0f01a21SMatthew R. Ochs */ 2638e0f01a21SMatthew R. Ochs static ssize_t port1_show(struct device *dev, 2639e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2640e0f01a21SMatthew R. Ochs char *buf) 2641e0f01a21SMatthew R. Ochs { 2642fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2643e0f01a21SMatthew R. Ochs 26443b225cd3SMatthew R. Ochs return cxlflash_show_port_status(1, cfg, buf); 2645e0f01a21SMatthew R. Ochs } 2646e0f01a21SMatthew R. Ochs 2647e0f01a21SMatthew R. Ochs /** 26481cd7fabcSMatthew R. Ochs * port2_show() - queries and presents the current status of port 2 26491cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 26501cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 26511cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 26521cd7fabcSMatthew R. Ochs * 26531cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 26541cd7fabcSMatthew R. Ochs */ 26551cd7fabcSMatthew R. Ochs static ssize_t port2_show(struct device *dev, 26561cd7fabcSMatthew R. Ochs struct device_attribute *attr, 26571cd7fabcSMatthew R. Ochs char *buf) 26581cd7fabcSMatthew R. Ochs { 26591cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 26601cd7fabcSMatthew R. Ochs 26611cd7fabcSMatthew R. Ochs return cxlflash_show_port_status(2, cfg, buf); 26621cd7fabcSMatthew R. Ochs } 26631cd7fabcSMatthew R. Ochs 26641cd7fabcSMatthew R. Ochs /** 26651cd7fabcSMatthew R. Ochs * port3_show() - queries and presents the current status of port 3 26661cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 26671cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 26681cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 26691cd7fabcSMatthew R. Ochs * 26701cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 26711cd7fabcSMatthew R. Ochs */ 26721cd7fabcSMatthew R. Ochs static ssize_t port3_show(struct device *dev, 26731cd7fabcSMatthew R. Ochs struct device_attribute *attr, 26741cd7fabcSMatthew R. Ochs char *buf) 26751cd7fabcSMatthew R. Ochs { 26761cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 26771cd7fabcSMatthew R. Ochs 26781cd7fabcSMatthew R. Ochs return cxlflash_show_port_status(3, cfg, buf); 26791cd7fabcSMatthew R. Ochs } 26801cd7fabcSMatthew R. Ochs 26811cd7fabcSMatthew R. Ochs /** 2682e0f01a21SMatthew R. Ochs * lun_mode_show() - presents the current LUN mode of the host 268315305514SMatthew R. Ochs * @dev: Generic device associated with the host. 2684e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the LUN mode. 268515305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. 268615305514SMatthew R. Ochs * 268715305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 268815305514SMatthew R. Ochs */ 2689e0f01a21SMatthew R. Ochs static ssize_t lun_mode_show(struct device *dev, 269015305514SMatthew R. Ochs struct device_attribute *attr, char *buf) 269115305514SMatthew R. Ochs { 2692fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 269315305514SMatthew R. Ochs struct afu *afu = cfg->afu; 269415305514SMatthew R. Ochs 2695e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 269615305514SMatthew R. Ochs } 269715305514SMatthew R. Ochs 269815305514SMatthew R. Ochs /** 2699e0f01a21SMatthew R. Ochs * lun_mode_store() - sets the LUN mode of the host 270015305514SMatthew R. Ochs * @dev: Generic device associated with the host. 2701e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the LUN mode. 270215305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. 270315305514SMatthew R. Ochs * @count: Length of data resizing in @buf. 270415305514SMatthew R. Ochs * 270515305514SMatthew R. Ochs * The CXL Flash AFU supports a dummy LUN mode where the external 270615305514SMatthew R. Ochs * links and storage are not required. Space on the FPGA is used 270715305514SMatthew R. Ochs * to create 1 or 2 small LUNs which are presented to the system 270815305514SMatthew R. Ochs * as if they were a normal storage device. This feature is useful 270915305514SMatthew R. Ochs * during development and also provides manufacturing with a way 271015305514SMatthew R. Ochs * to test the AFU without an actual device. 271115305514SMatthew R. Ochs * 271215305514SMatthew R. Ochs * 0 = external LUN[s] (default) 271315305514SMatthew R. Ochs * 1 = internal LUN (1 x 64K, 512B blocks, id 0) 271415305514SMatthew R. Ochs * 2 = internal LUN (1 x 64K, 4K blocks, id 0) 271515305514SMatthew R. Ochs * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) 271615305514SMatthew R. Ochs * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) 271715305514SMatthew R. Ochs * 271815305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 271915305514SMatthew R. Ochs */ 2720e0f01a21SMatthew R. Ochs static ssize_t lun_mode_store(struct device *dev, 272115305514SMatthew R. Ochs struct device_attribute *attr, 272215305514SMatthew R. Ochs const char *buf, size_t count) 272315305514SMatthew R. Ochs { 272415305514SMatthew R. Ochs struct Scsi_Host *shost = class_to_shost(dev); 2725fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(shost); 272615305514SMatthew R. Ochs struct afu *afu = cfg->afu; 272715305514SMatthew R. Ochs int rc; 272815305514SMatthew R. Ochs u32 lun_mode; 272915305514SMatthew R. Ochs 273015305514SMatthew R. Ochs rc = kstrtouint(buf, 10, &lun_mode); 273115305514SMatthew R. Ochs if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { 273215305514SMatthew R. Ochs afu->internal_lun = lun_mode; 2733603ecce9SManoj N. Kumar 2734603ecce9SManoj N. Kumar /* 2735603ecce9SManoj N. Kumar * When configured for internal LUN, there is only one channel, 273678ae028eSMatthew R. Ochs * channel number 0, else there will be one less than the number 273778ae028eSMatthew R. Ochs * of fc ports for this card. 2738603ecce9SManoj N. Kumar */ 2739603ecce9SManoj N. Kumar if (afu->internal_lun) 2740603ecce9SManoj N. Kumar shost->max_channel = 0; 2741603ecce9SManoj N. Kumar else 27428fa4f177SMatthew R. Ochs shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); 2743603ecce9SManoj N. Kumar 274415305514SMatthew R. Ochs afu_reset(cfg); 274515305514SMatthew R. Ochs scsi_scan_host(cfg->host); 274615305514SMatthew R. Ochs } 274715305514SMatthew R. Ochs 274815305514SMatthew R. Ochs return count; 274915305514SMatthew R. Ochs } 275015305514SMatthew R. Ochs 275115305514SMatthew R. Ochs /** 2752e0f01a21SMatthew R. Ochs * ioctl_version_show() - presents the current ioctl version of the host 275315305514SMatthew R. Ochs * @dev: Generic device associated with the host. 275415305514SMatthew R. Ochs * @attr: Device attribute representing the ioctl version. 275515305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. 275615305514SMatthew R. Ochs * 275715305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 275815305514SMatthew R. Ochs */ 2759e0f01a21SMatthew R. Ochs static ssize_t ioctl_version_show(struct device *dev, 2760e0f01a21SMatthew R. Ochs struct device_attribute *attr, char *buf) 276115305514SMatthew R. Ochs { 2762d6e32f53SMatthew R. Ochs ssize_t bytes = 0; 2763d6e32f53SMatthew R. Ochs 2764d6e32f53SMatthew R. Ochs bytes = scnprintf(buf, PAGE_SIZE, 2765d6e32f53SMatthew R. Ochs "disk: %u\n", DK_CXLFLASH_VERSION_0); 2766d6e32f53SMatthew R. Ochs bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 2767d6e32f53SMatthew R. Ochs "host: %u\n", HT_CXLFLASH_VERSION_0); 2768d6e32f53SMatthew R. Ochs 2769d6e32f53SMatthew R. Ochs return bytes; 277015305514SMatthew R. Ochs } 277115305514SMatthew R. Ochs 277215305514SMatthew R. Ochs /** 2773e0f01a21SMatthew R. Ochs * cxlflash_show_port_lun_table() - queries and presents the port LUN table 2774e0f01a21SMatthew R. Ochs * @port: Desired port for status reporting. 27753b225cd3SMatthew R. Ochs * @cfg: Internal structure associated with the host. 2776e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2777e0f01a21SMatthew R. Ochs * 277878ae028eSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf or -EINVAL. 2779e0f01a21SMatthew R. Ochs */ 2780e0f01a21SMatthew R. Ochs static ssize_t cxlflash_show_port_lun_table(u32 port, 27813b225cd3SMatthew R. Ochs struct cxlflash_cfg *cfg, 2782e0f01a21SMatthew R. Ochs char *buf) 2783e0f01a21SMatthew R. Ochs { 278478ae028eSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 27850aa14887SMatthew R. Ochs __be64 __iomem *fc_port_luns; 2786e0f01a21SMatthew R. Ochs int i; 2787e0f01a21SMatthew R. Ochs ssize_t bytes = 0; 2788e0f01a21SMatthew R. Ochs 278978ae028eSMatthew R. Ochs WARN_ON(port >= MAX_FC_PORTS); 279078ae028eSMatthew R. Ochs 279178ae028eSMatthew R. Ochs if (port >= cfg->num_fc_ports) { 279278ae028eSMatthew R. Ochs dev_info(dev, "%s: Port %d not supported on this card.\n", 279378ae028eSMatthew R. Ochs __func__, port); 279478ae028eSMatthew R. Ochs return -EINVAL; 279578ae028eSMatthew R. Ochs } 2796e0f01a21SMatthew R. Ochs 27970aa14887SMatthew R. Ochs fc_port_luns = get_fc_port_luns(cfg, port); 2798e0f01a21SMatthew R. Ochs 2799e0f01a21SMatthew R. Ochs for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2800e0f01a21SMatthew R. Ochs bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 28010aa14887SMatthew R. Ochs "%03d: %016llx\n", 28020aa14887SMatthew R. Ochs i, readq_be(&fc_port_luns[i])); 2803e0f01a21SMatthew R. Ochs return bytes; 2804e0f01a21SMatthew R. Ochs } 2805e0f01a21SMatthew R. Ochs 2806e0f01a21SMatthew R. Ochs /** 2807e0f01a21SMatthew R. Ochs * port0_lun_table_show() - presents the current LUN table of port 0 2808e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2809e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2810e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2811e0f01a21SMatthew R. Ochs * 2812e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2813e0f01a21SMatthew R. Ochs */ 2814e0f01a21SMatthew R. Ochs static ssize_t port0_lun_table_show(struct device *dev, 2815e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2816e0f01a21SMatthew R. Ochs char *buf) 2817e0f01a21SMatthew R. Ochs { 2818fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2819e0f01a21SMatthew R. Ochs 28203b225cd3SMatthew R. Ochs return cxlflash_show_port_lun_table(0, cfg, buf); 2821e0f01a21SMatthew R. Ochs } 2822e0f01a21SMatthew R. Ochs 2823e0f01a21SMatthew R. Ochs /** 2824e0f01a21SMatthew R. Ochs * port1_lun_table_show() - presents the current LUN table of port 1 2825e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2826e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2827e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2828e0f01a21SMatthew R. Ochs * 2829e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2830e0f01a21SMatthew R. Ochs */ 2831e0f01a21SMatthew R. Ochs static ssize_t port1_lun_table_show(struct device *dev, 2832e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2833e0f01a21SMatthew R. Ochs char *buf) 2834e0f01a21SMatthew R. Ochs { 2835fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2836e0f01a21SMatthew R. Ochs 28373b225cd3SMatthew R. Ochs return cxlflash_show_port_lun_table(1, cfg, buf); 2838e0f01a21SMatthew R. Ochs } 2839e0f01a21SMatthew R. Ochs 2840e0f01a21SMatthew R. Ochs /** 28411cd7fabcSMatthew R. Ochs * port2_lun_table_show() - presents the current LUN table of port 2 28421cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 28431cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 28441cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 28451cd7fabcSMatthew R. Ochs * 28461cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 28471cd7fabcSMatthew R. Ochs */ 28481cd7fabcSMatthew R. Ochs static ssize_t port2_lun_table_show(struct device *dev, 28491cd7fabcSMatthew R. Ochs struct device_attribute *attr, 28501cd7fabcSMatthew R. Ochs char *buf) 28511cd7fabcSMatthew R. Ochs { 28521cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 28531cd7fabcSMatthew R. Ochs 28541cd7fabcSMatthew R. Ochs return cxlflash_show_port_lun_table(2, cfg, buf); 28551cd7fabcSMatthew R. Ochs } 28561cd7fabcSMatthew R. Ochs 28571cd7fabcSMatthew R. Ochs /** 28581cd7fabcSMatthew R. Ochs * port3_lun_table_show() - presents the current LUN table of port 3 28591cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 28601cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 28611cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 28621cd7fabcSMatthew R. Ochs * 28631cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 28641cd7fabcSMatthew R. Ochs */ 28651cd7fabcSMatthew R. Ochs static ssize_t port3_lun_table_show(struct device *dev, 28661cd7fabcSMatthew R. Ochs struct device_attribute *attr, 28671cd7fabcSMatthew R. Ochs char *buf) 28681cd7fabcSMatthew R. Ochs { 28691cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 28701cd7fabcSMatthew R. Ochs 28711cd7fabcSMatthew R. Ochs return cxlflash_show_port_lun_table(3, cfg, buf); 28721cd7fabcSMatthew R. Ochs } 28731cd7fabcSMatthew R. Ochs 28741cd7fabcSMatthew R. Ochs /** 2875cba06e6dSMatthew R. Ochs * irqpoll_weight_show() - presents the current IRQ poll weight for the host 2876cba06e6dSMatthew R. Ochs * @dev: Generic device associated with the host. 2877cba06e6dSMatthew R. Ochs * @attr: Device attribute representing the IRQ poll weight. 2878cba06e6dSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll 2879cba06e6dSMatthew R. Ochs * weight in ASCII. 2880cba06e6dSMatthew R. Ochs * 2881cba06e6dSMatthew R. Ochs * An IRQ poll weight of 0 indicates polling is disabled. 2882cba06e6dSMatthew R. Ochs * 2883cba06e6dSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2884cba06e6dSMatthew R. Ochs */ 2885cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_show(struct device *dev, 2886cba06e6dSMatthew R. Ochs struct device_attribute *attr, char *buf) 2887cba06e6dSMatthew R. Ochs { 2888cba06e6dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2889cba06e6dSMatthew R. Ochs struct afu *afu = cfg->afu; 2890cba06e6dSMatthew R. Ochs 2891cba06e6dSMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); 2892cba06e6dSMatthew R. Ochs } 2893cba06e6dSMatthew R. Ochs 2894cba06e6dSMatthew R. Ochs /** 2895cba06e6dSMatthew R. Ochs * irqpoll_weight_store() - sets the current IRQ poll weight for the host 2896cba06e6dSMatthew R. Ochs * @dev: Generic device associated with the host. 2897cba06e6dSMatthew R. Ochs * @attr: Device attribute representing the IRQ poll weight. 2898cba06e6dSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll 2899cba06e6dSMatthew R. Ochs * weight in ASCII. 2900cba06e6dSMatthew R. Ochs * @count: Length of data resizing in @buf. 2901cba06e6dSMatthew R. Ochs * 2902cba06e6dSMatthew R. Ochs * An IRQ poll weight of 0 indicates polling is disabled. 2903cba06e6dSMatthew R. Ochs * 2904cba06e6dSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2905cba06e6dSMatthew R. Ochs */ 2906cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_store(struct device *dev, 2907cba06e6dSMatthew R. Ochs struct device_attribute *attr, 2908cba06e6dSMatthew R. Ochs const char *buf, size_t count) 2909cba06e6dSMatthew R. Ochs { 2910cba06e6dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2911cba06e6dSMatthew R. Ochs struct device *cfgdev = &cfg->dev->dev; 2912cba06e6dSMatthew R. Ochs struct afu *afu = cfg->afu; 2913bfc0bab1SUma Krishnan struct hwq *hwq; 2914cba06e6dSMatthew R. Ochs u32 weight; 2915bfc0bab1SUma Krishnan int rc, i; 2916cba06e6dSMatthew R. Ochs 2917cba06e6dSMatthew R. Ochs rc = kstrtouint(buf, 10, &weight); 2918cba06e6dSMatthew R. Ochs if (rc) 2919cba06e6dSMatthew R. Ochs return -EINVAL; 2920cba06e6dSMatthew R. Ochs 2921cba06e6dSMatthew R. Ochs if (weight > 256) { 2922cba06e6dSMatthew R. Ochs dev_info(cfgdev, 2923cba06e6dSMatthew R. Ochs "Invalid IRQ poll weight. It must be 256 or less.\n"); 2924cba06e6dSMatthew R. Ochs return -EINVAL; 2925cba06e6dSMatthew R. Ochs } 2926cba06e6dSMatthew R. Ochs 2927cba06e6dSMatthew R. Ochs if (weight == afu->irqpoll_weight) { 2928cba06e6dSMatthew R. Ochs dev_info(cfgdev, 2929cba06e6dSMatthew R. Ochs "Current IRQ poll weight has the same weight.\n"); 2930cba06e6dSMatthew R. Ochs return -EINVAL; 2931cba06e6dSMatthew R. Ochs } 2932cba06e6dSMatthew R. Ochs 2933bfc0bab1SUma Krishnan if (afu_is_irqpoll_enabled(afu)) { 29343065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2935bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 2936bfc0bab1SUma Krishnan 2937bfc0bab1SUma Krishnan irq_poll_disable(&hwq->irqpoll); 2938bfc0bab1SUma Krishnan } 2939bfc0bab1SUma Krishnan } 2940cba06e6dSMatthew R. Ochs 2941cba06e6dSMatthew R. Ochs afu->irqpoll_weight = weight; 2942cba06e6dSMatthew R. Ochs 2943bfc0bab1SUma Krishnan if (weight > 0) { 29443065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2945bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 2946bfc0bab1SUma Krishnan 2947bfc0bab1SUma Krishnan irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); 2948bfc0bab1SUma Krishnan } 2949bfc0bab1SUma Krishnan } 2950cba06e6dSMatthew R. Ochs 2951cba06e6dSMatthew R. Ochs return count; 2952cba06e6dSMatthew R. Ochs } 2953cba06e6dSMatthew R. Ochs 2954cba06e6dSMatthew R. Ochs /** 29553065267aSMatthew R. Ochs * num_hwqs_show() - presents the number of hardware queues for the host 29563065267aSMatthew R. Ochs * @dev: Generic device associated with the host. 29573065267aSMatthew R. Ochs * @attr: Device attribute representing the number of hardware queues. 29583065267aSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the number of hardware 29593065267aSMatthew R. Ochs * queues in ASCII. 29603065267aSMatthew R. Ochs * 29613065267aSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 29623065267aSMatthew R. Ochs */ 29633065267aSMatthew R. Ochs static ssize_t num_hwqs_show(struct device *dev, 29643065267aSMatthew R. Ochs struct device_attribute *attr, char *buf) 29653065267aSMatthew R. Ochs { 29663065267aSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 29673065267aSMatthew R. Ochs struct afu *afu = cfg->afu; 29683065267aSMatthew R. Ochs 29693065267aSMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); 29703065267aSMatthew R. Ochs } 29713065267aSMatthew R. Ochs 29723065267aSMatthew R. Ochs /** 29733065267aSMatthew R. Ochs * num_hwqs_store() - sets the number of hardware queues for the host 29743065267aSMatthew R. Ochs * @dev: Generic device associated with the host. 29753065267aSMatthew R. Ochs * @attr: Device attribute representing the number of hardware queues. 29763065267aSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the number of hardware 29773065267aSMatthew R. Ochs * queues in ASCII. 29783065267aSMatthew R. Ochs * @count: Length of data resizing in @buf. 29793065267aSMatthew R. Ochs * 29803065267aSMatthew R. Ochs * n > 0: num_hwqs = n 29813065267aSMatthew R. Ochs * n = 0: num_hwqs = num_online_cpus() 29823065267aSMatthew R. Ochs * n < 0: num_online_cpus() / abs(n) 29833065267aSMatthew R. Ochs * 29843065267aSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 29853065267aSMatthew R. Ochs */ 29863065267aSMatthew R. Ochs static ssize_t num_hwqs_store(struct device *dev, 29873065267aSMatthew R. Ochs struct device_attribute *attr, 29883065267aSMatthew R. Ochs const char *buf, size_t count) 29893065267aSMatthew R. Ochs { 29903065267aSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 29913065267aSMatthew R. Ochs struct afu *afu = cfg->afu; 29923065267aSMatthew R. Ochs int rc; 29933065267aSMatthew R. Ochs int nhwqs, num_hwqs; 29943065267aSMatthew R. Ochs 29953065267aSMatthew R. Ochs rc = kstrtoint(buf, 10, &nhwqs); 29963065267aSMatthew R. Ochs if (rc) 29973065267aSMatthew R. Ochs return -EINVAL; 29983065267aSMatthew R. Ochs 29993065267aSMatthew R. Ochs if (nhwqs >= 1) 30003065267aSMatthew R. Ochs num_hwqs = nhwqs; 30013065267aSMatthew R. Ochs else if (nhwqs == 0) 30023065267aSMatthew R. Ochs num_hwqs = num_online_cpus(); 30033065267aSMatthew R. Ochs else 30043065267aSMatthew R. Ochs num_hwqs = num_online_cpus() / abs(nhwqs); 30053065267aSMatthew R. Ochs 30063065267aSMatthew R. Ochs afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); 30073065267aSMatthew R. Ochs WARN_ON_ONCE(afu->desired_hwqs == 0); 30083065267aSMatthew R. Ochs 30093065267aSMatthew R. Ochs retry: 30103065267aSMatthew R. Ochs switch (cfg->state) { 30113065267aSMatthew R. Ochs case STATE_NORMAL: 30123065267aSMatthew R. Ochs cfg->state = STATE_RESET; 30133065267aSMatthew R. Ochs drain_ioctls(cfg); 30143065267aSMatthew R. Ochs cxlflash_mark_contexts_error(cfg); 30153065267aSMatthew R. Ochs rc = afu_reset(cfg); 30163065267aSMatthew R. Ochs if (rc) 30173065267aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 30183065267aSMatthew R. Ochs else 30193065267aSMatthew R. Ochs cfg->state = STATE_NORMAL; 30203065267aSMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 30213065267aSMatthew R. Ochs break; 30223065267aSMatthew R. Ochs case STATE_RESET: 30233065267aSMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 30243065267aSMatthew R. Ochs if (cfg->state == STATE_NORMAL) 30253065267aSMatthew R. Ochs goto retry; 30263065267aSMatthew R. Ochs default: 30273065267aSMatthew R. Ochs /* Ideally should not happen */ 30283065267aSMatthew R. Ochs dev_err(dev, "%s: Device is not ready, state=%d\n", 30293065267aSMatthew R. Ochs __func__, cfg->state); 30303065267aSMatthew R. Ochs break; 30313065267aSMatthew R. Ochs } 30323065267aSMatthew R. Ochs 30333065267aSMatthew R. Ochs return count; 30343065267aSMatthew R. Ochs } 30353065267aSMatthew R. Ochs 30361dd0c0e4SMatthew R. Ochs static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" }; 30371dd0c0e4SMatthew R. Ochs 30381dd0c0e4SMatthew R. Ochs /** 30391dd0c0e4SMatthew R. Ochs * hwq_mode_show() - presents the HWQ steering mode for the host 30401dd0c0e4SMatthew R. Ochs * @dev: Generic device associated with the host. 30411dd0c0e4SMatthew R. Ochs * @attr: Device attribute representing the HWQ steering mode. 30421dd0c0e4SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode 30431dd0c0e4SMatthew R. Ochs * as a character string. 30441dd0c0e4SMatthew R. Ochs * 30451dd0c0e4SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 30461dd0c0e4SMatthew R. Ochs */ 30471dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_show(struct device *dev, 30481dd0c0e4SMatthew R. Ochs struct device_attribute *attr, char *buf) 30491dd0c0e4SMatthew R. Ochs { 30501dd0c0e4SMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 30511dd0c0e4SMatthew R. Ochs struct afu *afu = cfg->afu; 30521dd0c0e4SMatthew R. Ochs 30531dd0c0e4SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]); 30541dd0c0e4SMatthew R. Ochs } 30551dd0c0e4SMatthew R. Ochs 30561dd0c0e4SMatthew R. Ochs /** 30571dd0c0e4SMatthew R. Ochs * hwq_mode_store() - sets the HWQ steering mode for the host 30581dd0c0e4SMatthew R. Ochs * @dev: Generic device associated with the host. 30591dd0c0e4SMatthew R. Ochs * @attr: Device attribute representing the HWQ steering mode. 30601dd0c0e4SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode 30611dd0c0e4SMatthew R. Ochs * as a character string. 30621dd0c0e4SMatthew R. Ochs * @count: Length of data resizing in @buf. 30631dd0c0e4SMatthew R. Ochs * 30641dd0c0e4SMatthew R. Ochs * rr = Round-Robin 30651dd0c0e4SMatthew R. Ochs * tag = Block MQ Tagging 30661dd0c0e4SMatthew R. Ochs * cpu = CPU Affinity 30671dd0c0e4SMatthew R. Ochs * 30681dd0c0e4SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 30691dd0c0e4SMatthew R. Ochs */ 30701dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_store(struct device *dev, 30711dd0c0e4SMatthew R. Ochs struct device_attribute *attr, 30721dd0c0e4SMatthew R. Ochs const char *buf, size_t count) 30731dd0c0e4SMatthew R. Ochs { 30741dd0c0e4SMatthew R. Ochs struct Scsi_Host *shost = class_to_shost(dev); 30751dd0c0e4SMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(shost); 30761dd0c0e4SMatthew R. Ochs struct device *cfgdev = &cfg->dev->dev; 30771dd0c0e4SMatthew R. Ochs struct afu *afu = cfg->afu; 30781dd0c0e4SMatthew R. Ochs int i; 30791dd0c0e4SMatthew R. Ochs u32 mode = MAX_HWQ_MODE; 30801dd0c0e4SMatthew R. Ochs 30811dd0c0e4SMatthew R. Ochs for (i = 0; i < MAX_HWQ_MODE; i++) { 30821dd0c0e4SMatthew R. Ochs if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { 30831dd0c0e4SMatthew R. Ochs mode = i; 30841dd0c0e4SMatthew R. Ochs break; 30851dd0c0e4SMatthew R. Ochs } 30861dd0c0e4SMatthew R. Ochs } 30871dd0c0e4SMatthew R. Ochs 30881dd0c0e4SMatthew R. Ochs if (mode >= MAX_HWQ_MODE) { 30891dd0c0e4SMatthew R. Ochs dev_info(cfgdev, "Invalid HWQ steering mode.\n"); 30901dd0c0e4SMatthew R. Ochs return -EINVAL; 30911dd0c0e4SMatthew R. Ochs } 30921dd0c0e4SMatthew R. Ochs 30931dd0c0e4SMatthew R. Ochs if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) { 30941dd0c0e4SMatthew R. Ochs dev_info(cfgdev, "SCSI-MQ is not enabled, use a different " 30951dd0c0e4SMatthew R. Ochs "HWQ steering mode.\n"); 30961dd0c0e4SMatthew R. Ochs return -EINVAL; 30971dd0c0e4SMatthew R. Ochs } 30981dd0c0e4SMatthew R. Ochs 30991dd0c0e4SMatthew R. Ochs afu->hwq_mode = mode; 31001dd0c0e4SMatthew R. Ochs 31011dd0c0e4SMatthew R. Ochs return count; 31021dd0c0e4SMatthew R. Ochs } 31031dd0c0e4SMatthew R. Ochs 31043065267aSMatthew R. Ochs /** 3105e0f01a21SMatthew R. Ochs * mode_show() - presents the current mode of the device 310615305514SMatthew R. Ochs * @dev: Generic device associated with the device. 310715305514SMatthew R. Ochs * @attr: Device attribute representing the device mode. 310815305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. 310915305514SMatthew R. Ochs * 311015305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 311115305514SMatthew R. Ochs */ 3112e0f01a21SMatthew R. Ochs static ssize_t mode_show(struct device *dev, 311315305514SMatthew R. Ochs struct device_attribute *attr, char *buf) 311415305514SMatthew R. Ochs { 311515305514SMatthew R. Ochs struct scsi_device *sdev = to_scsi_device(dev); 311615305514SMatthew R. Ochs 3117e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", 311815305514SMatthew R. Ochs sdev->hostdata ? "superpipe" : "legacy"); 311915305514SMatthew R. Ochs } 312015305514SMatthew R. Ochs 312115305514SMatthew R. Ochs /* 312215305514SMatthew R. Ochs * Host attributes 312315305514SMatthew R. Ochs */ 3124e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0); 3125e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1); 31261cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2); 31271cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3); 3128e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RW(lun_mode); 3129e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(ioctl_version); 3130e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0_lun_table); 3131e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1_lun_table); 31321cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2_lun_table); 31331cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3_lun_table); 3134cba06e6dSMatthew R. Ochs static DEVICE_ATTR_RW(irqpoll_weight); 31353065267aSMatthew R. Ochs static DEVICE_ATTR_RW(num_hwqs); 31361dd0c0e4SMatthew R. Ochs static DEVICE_ATTR_RW(hwq_mode); 313715305514SMatthew R. Ochs 313815305514SMatthew R. Ochs static struct device_attribute *cxlflash_host_attrs[] = { 313915305514SMatthew R. Ochs &dev_attr_port0, 314015305514SMatthew R. Ochs &dev_attr_port1, 31411cd7fabcSMatthew R. Ochs &dev_attr_port2, 31421cd7fabcSMatthew R. Ochs &dev_attr_port3, 314315305514SMatthew R. Ochs &dev_attr_lun_mode, 314415305514SMatthew R. Ochs &dev_attr_ioctl_version, 3145e0f01a21SMatthew R. Ochs &dev_attr_port0_lun_table, 3146e0f01a21SMatthew R. Ochs &dev_attr_port1_lun_table, 31471cd7fabcSMatthew R. Ochs &dev_attr_port2_lun_table, 31481cd7fabcSMatthew R. Ochs &dev_attr_port3_lun_table, 3149cba06e6dSMatthew R. Ochs &dev_attr_irqpoll_weight, 31503065267aSMatthew R. Ochs &dev_attr_num_hwqs, 31511dd0c0e4SMatthew R. Ochs &dev_attr_hwq_mode, 315215305514SMatthew R. Ochs NULL 315315305514SMatthew R. Ochs }; 315415305514SMatthew R. Ochs 315515305514SMatthew R. Ochs /* 315615305514SMatthew R. Ochs * Device attributes 315715305514SMatthew R. Ochs */ 3158e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(mode); 315915305514SMatthew R. Ochs 316015305514SMatthew R. Ochs static struct device_attribute *cxlflash_dev_attrs[] = { 316115305514SMatthew R. Ochs &dev_attr_mode, 316215305514SMatthew R. Ochs NULL 316315305514SMatthew R. Ochs }; 316415305514SMatthew R. Ochs 316515305514SMatthew R. Ochs /* 316615305514SMatthew R. Ochs * Host template 316715305514SMatthew R. Ochs */ 316815305514SMatthew R. Ochs static struct scsi_host_template driver_template = { 316915305514SMatthew R. Ochs .module = THIS_MODULE, 317015305514SMatthew R. Ochs .name = CXLFLASH_ADAPTER_NAME, 317115305514SMatthew R. Ochs .info = cxlflash_driver_info, 317215305514SMatthew R. Ochs .ioctl = cxlflash_ioctl, 317315305514SMatthew R. Ochs .proc_name = CXLFLASH_NAME, 317415305514SMatthew R. Ochs .queuecommand = cxlflash_queuecommand, 31757c4c41f1SUma Krishnan .eh_abort_handler = cxlflash_eh_abort_handler, 317615305514SMatthew R. Ochs .eh_device_reset_handler = cxlflash_eh_device_reset_handler, 317715305514SMatthew R. Ochs .eh_host_reset_handler = cxlflash_eh_host_reset_handler, 317815305514SMatthew R. Ochs .change_queue_depth = cxlflash_change_queue_depth, 317983430833SManoj N. Kumar .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, 318015305514SMatthew R. Ochs .can_queue = CXLFLASH_MAX_CMDS, 31815fbb96c8SMatthew R. Ochs .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, 318215305514SMatthew R. Ochs .this_id = -1, 318368ab2d76SUma Krishnan .sg_tablesize = 1, /* No scatter gather support */ 318415305514SMatthew R. Ochs .max_sectors = CXLFLASH_MAX_SECTORS, 318515305514SMatthew R. Ochs .use_clustering = ENABLE_CLUSTERING, 318615305514SMatthew R. Ochs .shost_attrs = cxlflash_host_attrs, 318715305514SMatthew R. Ochs .sdev_attrs = cxlflash_dev_attrs, 318815305514SMatthew R. Ochs }; 318915305514SMatthew R. Ochs 319015305514SMatthew R. Ochs /* 319115305514SMatthew R. Ochs * Device dependent values 319215305514SMatthew R. Ochs */ 319396e1b660SUma Krishnan static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 31940d419130SMatthew R. Ochs CXLFLASH_WWPN_VPD_REQUIRED }; 319596e1b660SUma Krishnan static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, 3196704c4b0dSUma Krishnan CXLFLASH_NOTIFY_SHUTDOWN }; 319794344520SMatthew R. Ochs static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, 319807d0c52fSUma Krishnan (CXLFLASH_NOTIFY_SHUTDOWN | 319907d0c52fSUma Krishnan CXLFLASH_OCXL_DEV) }; 320015305514SMatthew R. Ochs 320115305514SMatthew R. Ochs /* 320215305514SMatthew R. Ochs * PCI device binding table 320315305514SMatthew R. Ochs */ 320415305514SMatthew R. Ochs static struct pci_device_id cxlflash_pci_table[] = { 320515305514SMatthew R. Ochs {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, 320615305514SMatthew R. Ochs PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, 3207a2746fb1SManoj Kumar {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, 3208a2746fb1SManoj Kumar PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, 320994344520SMatthew R. Ochs {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, 321094344520SMatthew R. Ochs PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, 321115305514SMatthew R. Ochs {} 321215305514SMatthew R. Ochs }; 321315305514SMatthew R. Ochs 321415305514SMatthew R. Ochs MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); 321515305514SMatthew R. Ochs 321615305514SMatthew R. Ochs /** 3217c21e0bbfSMatthew R. Ochs * cxlflash_worker_thread() - work thread handler for the AFU 3218c21e0bbfSMatthew R. Ochs * @work: Work structure contained within cxlflash associated with host. 3219c21e0bbfSMatthew R. Ochs * 3220c21e0bbfSMatthew R. Ochs * Handles the following events: 3221c21e0bbfSMatthew R. Ochs * - Link reset which cannot be performed on interrupt context due to 3222c21e0bbfSMatthew R. Ochs * blocking up to a few seconds 3223ef51074aSMatthew R. Ochs * - Rescan the host 3224c21e0bbfSMatthew R. Ochs */ 3225c21e0bbfSMatthew R. Ochs static void cxlflash_worker_thread(struct work_struct *work) 3226c21e0bbfSMatthew R. Ochs { 32275cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, 32285cdac81aSMatthew R. Ochs work_q); 3229c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 32304392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 32310aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 3232c21e0bbfSMatthew R. Ochs int port; 3233c21e0bbfSMatthew R. Ochs ulong lock_flags; 3234c21e0bbfSMatthew R. Ochs 32355cdac81aSMatthew R. Ochs /* Avoid MMIO if the device has failed */ 32365cdac81aSMatthew R. Ochs 32375cdac81aSMatthew R. Ochs if (cfg->state != STATE_NORMAL) 32385cdac81aSMatthew R. Ochs return; 32395cdac81aSMatthew R. Ochs 3240c21e0bbfSMatthew R. Ochs spin_lock_irqsave(cfg->host->host_lock, lock_flags); 3241c21e0bbfSMatthew R. Ochs 3242c21e0bbfSMatthew R. Ochs if (cfg->lr_state == LINK_RESET_REQUIRED) { 3243c21e0bbfSMatthew R. Ochs port = cfg->lr_port; 3244c21e0bbfSMatthew R. Ochs if (port < 0) 32454392ba49SMatthew R. Ochs dev_err(dev, "%s: invalid port index %d\n", 32464392ba49SMatthew R. Ochs __func__, port); 3247c21e0bbfSMatthew R. Ochs else { 3248c21e0bbfSMatthew R. Ochs spin_unlock_irqrestore(cfg->host->host_lock, 3249c21e0bbfSMatthew R. Ochs lock_flags); 3250c21e0bbfSMatthew R. Ochs 3251c21e0bbfSMatthew R. Ochs /* The reset can block... */ 32520aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 32530aa14887SMatthew R. Ochs afu_link_reset(afu, port, fc_port_regs); 3254c21e0bbfSMatthew R. Ochs spin_lock_irqsave(cfg->host->host_lock, lock_flags); 3255c21e0bbfSMatthew R. Ochs } 3256c21e0bbfSMatthew R. Ochs 3257c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_COMPLETE; 3258c21e0bbfSMatthew R. Ochs } 3259c21e0bbfSMatthew R. Ochs 3260c21e0bbfSMatthew R. Ochs spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 3261ef51074aSMatthew R. Ochs 3262ef51074aSMatthew R. Ochs if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 3263ef51074aSMatthew R. Ochs scsi_scan_host(cfg->host); 3264c21e0bbfSMatthew R. Ochs } 3265c21e0bbfSMatthew R. Ochs 3266c21e0bbfSMatthew R. Ochs /** 3267a834a36bSUma Krishnan * cxlflash_chr_open() - character device open handler 3268a834a36bSUma Krishnan * @inode: Device inode associated with this character device. 3269a834a36bSUma Krishnan * @file: File pointer for this device. 3270a834a36bSUma Krishnan * 3271a834a36bSUma Krishnan * Only users with admin privileges are allowed to open the character device. 3272a834a36bSUma Krishnan * 3273a834a36bSUma Krishnan * Return: 0 on success, -errno on failure 3274a834a36bSUma Krishnan */ 3275a834a36bSUma Krishnan static int cxlflash_chr_open(struct inode *inode, struct file *file) 3276a834a36bSUma Krishnan { 3277a834a36bSUma Krishnan struct cxlflash_cfg *cfg; 3278a834a36bSUma Krishnan 3279a834a36bSUma Krishnan if (!capable(CAP_SYS_ADMIN)) 3280a834a36bSUma Krishnan return -EACCES; 3281a834a36bSUma Krishnan 3282a834a36bSUma Krishnan cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev); 3283a834a36bSUma Krishnan file->private_data = cfg; 3284a834a36bSUma Krishnan 3285a834a36bSUma Krishnan return 0; 3286a834a36bSUma Krishnan } 3287a834a36bSUma Krishnan 3288d6e32f53SMatthew R. Ochs /** 3289d6e32f53SMatthew R. Ochs * decode_hioctl() - translates encoded host ioctl to easily identifiable string 3290d6e32f53SMatthew R. Ochs * @cmd: The host ioctl command to decode. 3291d6e32f53SMatthew R. Ochs * 3292d6e32f53SMatthew R. Ochs * Return: A string identifying the decoded host ioctl. 3293d6e32f53SMatthew R. Ochs */ 3294d6e32f53SMatthew R. Ochs static char *decode_hioctl(int cmd) 3295d6e32f53SMatthew R. Ochs { 3296d6e32f53SMatthew R. Ochs switch (cmd) { 32979cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION: 32989cf43a36SMatthew R. Ochs return __stringify_1(HT_CXLFLASH_LUN_PROVISION); 3299d6e32f53SMatthew R. Ochs } 3300d6e32f53SMatthew R. Ochs 3301d6e32f53SMatthew R. Ochs return "UNKNOWN"; 3302d6e32f53SMatthew R. Ochs } 3303d6e32f53SMatthew R. Ochs 3304d6e32f53SMatthew R. Ochs /** 33059cf43a36SMatthew R. Ochs * cxlflash_lun_provision() - host LUN provisioning handler 33069cf43a36SMatthew R. Ochs * @cfg: Internal structure associated with the host. 33079cf43a36SMatthew R. Ochs * @arg: Kernel copy of userspace ioctl data structure. 33089cf43a36SMatthew R. Ochs * 33099cf43a36SMatthew R. Ochs * Return: 0 on success, -errno on failure 33109cf43a36SMatthew R. Ochs */ 33119cf43a36SMatthew R. Ochs static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, 33129cf43a36SMatthew R. Ochs struct ht_cxlflash_lun_provision *lunprov) 33139cf43a36SMatthew R. Ochs { 33149cf43a36SMatthew R. Ochs struct afu *afu = cfg->afu; 33159cf43a36SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 33169cf43a36SMatthew R. Ochs struct sisl_ioarcb rcb; 33179cf43a36SMatthew R. Ochs struct sisl_ioasa asa; 33189cf43a36SMatthew R. Ochs __be64 __iomem *fc_port_regs; 33199cf43a36SMatthew R. Ochs u16 port = lunprov->port; 33209cf43a36SMatthew R. Ochs u16 scmd = lunprov->hdr.subcmd; 33219cf43a36SMatthew R. Ochs u16 type; 33229cf43a36SMatthew R. Ochs u64 reg; 33239cf43a36SMatthew R. Ochs u64 size; 33249cf43a36SMatthew R. Ochs u64 lun_id; 33259cf43a36SMatthew R. Ochs int rc = 0; 33269cf43a36SMatthew R. Ochs 33279cf43a36SMatthew R. Ochs if (!afu_is_lun_provision(afu)) { 33289cf43a36SMatthew R. Ochs rc = -ENOTSUPP; 33299cf43a36SMatthew R. Ochs goto out; 33309cf43a36SMatthew R. Ochs } 33319cf43a36SMatthew R. Ochs 33329cf43a36SMatthew R. Ochs if (port >= cfg->num_fc_ports) { 33339cf43a36SMatthew R. Ochs rc = -EINVAL; 33349cf43a36SMatthew R. Ochs goto out; 33359cf43a36SMatthew R. Ochs } 33369cf43a36SMatthew R. Ochs 33379cf43a36SMatthew R. Ochs switch (scmd) { 33389cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN: 33399cf43a36SMatthew R. Ochs type = SISL_AFU_LUN_PROVISION_CREATE; 33409cf43a36SMatthew R. Ochs size = lunprov->size; 33419cf43a36SMatthew R. Ochs lun_id = 0; 33429cf43a36SMatthew R. Ochs break; 33439cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN: 33449cf43a36SMatthew R. Ochs type = SISL_AFU_LUN_PROVISION_DELETE; 33459cf43a36SMatthew R. Ochs size = 0; 33469cf43a36SMatthew R. Ochs lun_id = lunprov->lun_id; 33479cf43a36SMatthew R. Ochs break; 33489cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT: 33499cf43a36SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 33509cf43a36SMatthew R. Ochs 33519cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]); 33529cf43a36SMatthew R. Ochs lunprov->max_num_luns = reg; 33539cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]); 33549cf43a36SMatthew R. Ochs lunprov->cur_num_luns = reg; 33559cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]); 33569cf43a36SMatthew R. Ochs lunprov->max_cap_port = reg; 33579cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]); 33589cf43a36SMatthew R. Ochs lunprov->cur_cap_port = reg; 33599cf43a36SMatthew R. Ochs 33609cf43a36SMatthew R. Ochs goto out; 33619cf43a36SMatthew R. Ochs default: 33629cf43a36SMatthew R. Ochs rc = -EINVAL; 33639cf43a36SMatthew R. Ochs goto out; 33649cf43a36SMatthew R. Ochs } 33659cf43a36SMatthew R. Ochs 33669cf43a36SMatthew R. Ochs memset(&rcb, 0, sizeof(rcb)); 33679cf43a36SMatthew R. Ochs memset(&asa, 0, sizeof(asa)); 33689cf43a36SMatthew R. Ochs rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 33699cf43a36SMatthew R. Ochs rcb.lun_id = lun_id; 33709cf43a36SMatthew R. Ochs rcb.msi = SISL_MSI_RRQ_UPDATED; 33719cf43a36SMatthew R. Ochs rcb.timeout = MC_LUN_PROV_TIMEOUT; 33729cf43a36SMatthew R. Ochs rcb.ioasa = &asa; 33739cf43a36SMatthew R. Ochs 33749cf43a36SMatthew R. Ochs rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION; 33759cf43a36SMatthew R. Ochs rcb.cdb[1] = type; 33769cf43a36SMatthew R. Ochs rcb.cdb[2] = port; 33779cf43a36SMatthew R. Ochs put_unaligned_be64(size, &rcb.cdb[8]); 33789cf43a36SMatthew R. Ochs 33799cf43a36SMatthew R. Ochs rc = send_afu_cmd(afu, &rcb); 33809cf43a36SMatthew R. Ochs if (rc) { 33819cf43a36SMatthew R. Ochs dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", 33829cf43a36SMatthew R. Ochs __func__, rc, asa.ioasc, asa.afu_extra); 33839cf43a36SMatthew R. Ochs goto out; 33849cf43a36SMatthew R. Ochs } 33859cf43a36SMatthew R. Ochs 33869cf43a36SMatthew R. Ochs if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) { 33879cf43a36SMatthew R. Ochs lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo; 33889cf43a36SMatthew R. Ochs memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid)); 33899cf43a36SMatthew R. Ochs } 33909cf43a36SMatthew R. Ochs out: 33919cf43a36SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 33929cf43a36SMatthew R. Ochs return rc; 33939cf43a36SMatthew R. Ochs } 33949cf43a36SMatthew R. Ochs 33959cf43a36SMatthew R. Ochs /** 3396bc88ac47SMatthew R. Ochs * cxlflash_afu_debug() - host AFU debug handler 3397bc88ac47SMatthew R. Ochs * @cfg: Internal structure associated with the host. 3398bc88ac47SMatthew R. Ochs * @arg: Kernel copy of userspace ioctl data structure. 3399bc88ac47SMatthew R. Ochs * 3400bc88ac47SMatthew R. Ochs * For debug requests requiring a data buffer, always provide an aligned 3401bc88ac47SMatthew R. Ochs * (cache line) buffer to the AFU to appease any alignment requirements. 3402bc88ac47SMatthew R. Ochs * 3403bc88ac47SMatthew R. Ochs * Return: 0 on success, -errno on failure 3404bc88ac47SMatthew R. Ochs */ 3405bc88ac47SMatthew R. Ochs static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, 3406bc88ac47SMatthew R. Ochs struct ht_cxlflash_afu_debug *afu_dbg) 3407bc88ac47SMatthew R. Ochs { 3408bc88ac47SMatthew R. Ochs struct afu *afu = cfg->afu; 3409bc88ac47SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 3410bc88ac47SMatthew R. Ochs struct sisl_ioarcb rcb; 3411bc88ac47SMatthew R. Ochs struct sisl_ioasa asa; 3412bc88ac47SMatthew R. Ochs char *buf = NULL; 3413bc88ac47SMatthew R. Ochs char *kbuf = NULL; 3414bc88ac47SMatthew R. Ochs void __user *ubuf = (__force void __user *)afu_dbg->data_ea; 3415bc88ac47SMatthew R. Ochs u16 req_flags = SISL_REQ_FLAGS_AFU_CMD; 3416bc88ac47SMatthew R. Ochs u32 ulen = afu_dbg->data_len; 3417bc88ac47SMatthew R. Ochs bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE; 3418bc88ac47SMatthew R. Ochs int rc = 0; 3419bc88ac47SMatthew R. Ochs 3420bc88ac47SMatthew R. Ochs if (!afu_is_afu_debug(afu)) { 3421bc88ac47SMatthew R. Ochs rc = -ENOTSUPP; 3422bc88ac47SMatthew R. Ochs goto out; 3423bc88ac47SMatthew R. Ochs } 3424bc88ac47SMatthew R. Ochs 3425bc88ac47SMatthew R. Ochs if (ulen) { 3426bc88ac47SMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN; 3427bc88ac47SMatthew R. Ochs 3428bc88ac47SMatthew R. Ochs if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) { 3429bc88ac47SMatthew R. Ochs rc = -EINVAL; 3430bc88ac47SMatthew R. Ochs goto out; 3431bc88ac47SMatthew R. Ochs } 3432bc88ac47SMatthew R. Ochs 3433bc88ac47SMatthew R. Ochs buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL); 3434bc88ac47SMatthew R. Ochs if (unlikely(!buf)) { 3435bc88ac47SMatthew R. Ochs rc = -ENOMEM; 3436bc88ac47SMatthew R. Ochs goto out; 3437bc88ac47SMatthew R. Ochs } 3438bc88ac47SMatthew R. Ochs 3439bc88ac47SMatthew R. Ochs kbuf = PTR_ALIGN(buf, cache_line_size()); 3440bc88ac47SMatthew R. Ochs 3441bc88ac47SMatthew R. Ochs if (is_write) { 3442bc88ac47SMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 3443bc88ac47SMatthew R. Ochs 3444eeac8cdaSDan Carpenter if (copy_from_user(kbuf, ubuf, ulen)) { 3445eeac8cdaSDan Carpenter rc = -EFAULT; 3446bc88ac47SMatthew R. Ochs goto out; 3447bc88ac47SMatthew R. Ochs } 3448bc88ac47SMatthew R. Ochs } 3449eeac8cdaSDan Carpenter } 3450bc88ac47SMatthew R. Ochs 3451bc88ac47SMatthew R. Ochs memset(&rcb, 0, sizeof(rcb)); 3452bc88ac47SMatthew R. Ochs memset(&asa, 0, sizeof(asa)); 3453bc88ac47SMatthew R. Ochs 3454bc88ac47SMatthew R. Ochs rcb.req_flags = req_flags; 3455bc88ac47SMatthew R. Ochs rcb.msi = SISL_MSI_RRQ_UPDATED; 3456bc88ac47SMatthew R. Ochs rcb.timeout = MC_AFU_DEBUG_TIMEOUT; 3457bc88ac47SMatthew R. Ochs rcb.ioasa = &asa; 3458bc88ac47SMatthew R. Ochs 3459bc88ac47SMatthew R. Ochs if (ulen) { 3460bc88ac47SMatthew R. Ochs rcb.data_len = ulen; 3461bc88ac47SMatthew R. Ochs rcb.data_ea = (uintptr_t)kbuf; 3462bc88ac47SMatthew R. Ochs } 3463bc88ac47SMatthew R. Ochs 3464bc88ac47SMatthew R. Ochs rcb.cdb[0] = SISL_AFU_CMD_DEBUG; 3465bc88ac47SMatthew R. Ochs memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd, 3466bc88ac47SMatthew R. Ochs HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN); 3467bc88ac47SMatthew R. Ochs 3468bc88ac47SMatthew R. Ochs rc = send_afu_cmd(afu, &rcb); 3469bc88ac47SMatthew R. Ochs if (rc) { 3470bc88ac47SMatthew R. Ochs dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", 3471bc88ac47SMatthew R. Ochs __func__, rc, asa.ioasc, asa.afu_extra); 3472bc88ac47SMatthew R. Ochs goto out; 3473bc88ac47SMatthew R. Ochs } 3474bc88ac47SMatthew R. Ochs 3475eeac8cdaSDan Carpenter if (ulen && !is_write) { 3476eeac8cdaSDan Carpenter if (copy_to_user(ubuf, kbuf, ulen)) 3477eeac8cdaSDan Carpenter rc = -EFAULT; 3478eeac8cdaSDan Carpenter } 3479bc88ac47SMatthew R. Ochs out: 3480bc88ac47SMatthew R. Ochs kfree(buf); 3481bc88ac47SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3482bc88ac47SMatthew R. Ochs return rc; 3483bc88ac47SMatthew R. Ochs } 3484bc88ac47SMatthew R. Ochs 3485bc88ac47SMatthew R. Ochs /** 3486d6e32f53SMatthew R. Ochs * cxlflash_chr_ioctl() - character device IOCTL handler 3487d6e32f53SMatthew R. Ochs * @file: File pointer for this device. 3488d6e32f53SMatthew R. Ochs * @cmd: IOCTL command. 3489d6e32f53SMatthew R. Ochs * @arg: Userspace ioctl data structure. 3490d6e32f53SMatthew R. Ochs * 3491d6e32f53SMatthew R. Ochs * A read/write semaphore is used to implement a 'drain' of currently 3492d6e32f53SMatthew R. Ochs * running ioctls. The read semaphore is taken at the beginning of each 3493d6e32f53SMatthew R. Ochs * ioctl thread and released upon concluding execution. Additionally the 3494d6e32f53SMatthew R. Ochs * semaphore should be released and then reacquired in any ioctl execution 3495d6e32f53SMatthew R. Ochs * path which will wait for an event to occur that is outside the scope of 3496d6e32f53SMatthew R. Ochs * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, 3497d6e32f53SMatthew R. Ochs * a thread simply needs to acquire the write semaphore. 3498d6e32f53SMatthew R. Ochs * 3499d6e32f53SMatthew R. Ochs * Return: 0 on success, -errno on failure 3500d6e32f53SMatthew R. Ochs */ 3501d6e32f53SMatthew R. Ochs static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, 3502d6e32f53SMatthew R. Ochs unsigned long arg) 3503d6e32f53SMatthew R. Ochs { 3504d6e32f53SMatthew R. Ochs typedef int (*hioctl) (struct cxlflash_cfg *, void *); 3505d6e32f53SMatthew R. Ochs 3506d6e32f53SMatthew R. Ochs struct cxlflash_cfg *cfg = file->private_data; 3507d6e32f53SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 3508d6e32f53SMatthew R. Ochs char buf[sizeof(union cxlflash_ht_ioctls)]; 3509d6e32f53SMatthew R. Ochs void __user *uarg = (void __user *)arg; 3510d6e32f53SMatthew R. Ochs struct ht_cxlflash_hdr *hdr; 3511d6e32f53SMatthew R. Ochs size_t size = 0; 3512d6e32f53SMatthew R. Ochs bool known_ioctl = false; 3513d6e32f53SMatthew R. Ochs int idx = 0; 3514d6e32f53SMatthew R. Ochs int rc = 0; 3515d6e32f53SMatthew R. Ochs hioctl do_ioctl = NULL; 3516d6e32f53SMatthew R. Ochs 3517d6e32f53SMatthew R. Ochs static const struct { 3518d6e32f53SMatthew R. Ochs size_t size; 3519d6e32f53SMatthew R. Ochs hioctl ioctl; 3520d6e32f53SMatthew R. Ochs } ioctl_tbl[] = { /* NOTE: order matters here */ 35219cf43a36SMatthew R. Ochs { sizeof(struct ht_cxlflash_lun_provision), 35229cf43a36SMatthew R. Ochs (hioctl)cxlflash_lun_provision }, 3523bc88ac47SMatthew R. Ochs { sizeof(struct ht_cxlflash_afu_debug), 3524bc88ac47SMatthew R. Ochs (hioctl)cxlflash_afu_debug }, 3525d6e32f53SMatthew R. Ochs }; 3526d6e32f53SMatthew R. Ochs 3527d6e32f53SMatthew R. Ochs /* Hold read semaphore so we can drain if needed */ 3528d6e32f53SMatthew R. Ochs down_read(&cfg->ioctl_rwsem); 3529d6e32f53SMatthew R. Ochs 3530d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n", 3531d6e32f53SMatthew R. Ochs __func__, cmd, idx, sizeof(ioctl_tbl)); 3532d6e32f53SMatthew R. Ochs 3533d6e32f53SMatthew R. Ochs switch (cmd) { 35349cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION: 3535bc88ac47SMatthew R. Ochs case HT_CXLFLASH_AFU_DEBUG: 35369cf43a36SMatthew R. Ochs known_ioctl = true; 35379cf43a36SMatthew R. Ochs idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); 35389cf43a36SMatthew R. Ochs size = ioctl_tbl[idx].size; 35399cf43a36SMatthew R. Ochs do_ioctl = ioctl_tbl[idx].ioctl; 35409cf43a36SMatthew R. Ochs 35419cf43a36SMatthew R. Ochs if (likely(do_ioctl)) 35429cf43a36SMatthew R. Ochs break; 35439cf43a36SMatthew R. Ochs 35449cf43a36SMatthew R. Ochs /* fall through */ 3545d6e32f53SMatthew R. Ochs default: 3546d6e32f53SMatthew R. Ochs rc = -EINVAL; 3547d6e32f53SMatthew R. Ochs goto out; 3548d6e32f53SMatthew R. Ochs } 3549d6e32f53SMatthew R. Ochs 3550d6e32f53SMatthew R. Ochs if (unlikely(copy_from_user(&buf, uarg, size))) { 3551d6e32f53SMatthew R. Ochs dev_err(dev, "%s: copy_from_user() fail " 3552d6e32f53SMatthew R. Ochs "size=%lu cmd=%d (%s) uarg=%p\n", 3553d6e32f53SMatthew R. Ochs __func__, size, cmd, decode_hioctl(cmd), uarg); 3554d6e32f53SMatthew R. Ochs rc = -EFAULT; 3555d6e32f53SMatthew R. Ochs goto out; 3556d6e32f53SMatthew R. Ochs } 3557d6e32f53SMatthew R. Ochs 3558d6e32f53SMatthew R. Ochs hdr = (struct ht_cxlflash_hdr *)&buf; 3559d6e32f53SMatthew R. Ochs if (hdr->version != HT_CXLFLASH_VERSION_0) { 3560d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: Version %u not supported for %s\n", 3561d6e32f53SMatthew R. Ochs __func__, hdr->version, decode_hioctl(cmd)); 3562d6e32f53SMatthew R. Ochs rc = -EINVAL; 3563d6e32f53SMatthew R. Ochs goto out; 3564d6e32f53SMatthew R. Ochs } 3565d6e32f53SMatthew R. Ochs 3566d6e32f53SMatthew R. Ochs if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) { 3567d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); 3568d6e32f53SMatthew R. Ochs rc = -EINVAL; 3569d6e32f53SMatthew R. Ochs goto out; 3570d6e32f53SMatthew R. Ochs } 3571d6e32f53SMatthew R. Ochs 3572d6e32f53SMatthew R. Ochs rc = do_ioctl(cfg, (void *)&buf); 3573d6e32f53SMatthew R. Ochs if (likely(!rc)) 3574d6e32f53SMatthew R. Ochs if (unlikely(copy_to_user(uarg, &buf, size))) { 3575d6e32f53SMatthew R. Ochs dev_err(dev, "%s: copy_to_user() fail " 3576d6e32f53SMatthew R. Ochs "size=%lu cmd=%d (%s) uarg=%p\n", 3577d6e32f53SMatthew R. Ochs __func__, size, cmd, decode_hioctl(cmd), uarg); 3578d6e32f53SMatthew R. Ochs rc = -EFAULT; 3579d6e32f53SMatthew R. Ochs } 3580d6e32f53SMatthew R. Ochs 3581d6e32f53SMatthew R. Ochs /* fall through to exit */ 3582d6e32f53SMatthew R. Ochs 3583d6e32f53SMatthew R. Ochs out: 3584d6e32f53SMatthew R. Ochs up_read(&cfg->ioctl_rwsem); 3585d6e32f53SMatthew R. Ochs if (unlikely(rc && known_ioctl)) 3586d6e32f53SMatthew R. Ochs dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n", 3587d6e32f53SMatthew R. Ochs __func__, decode_hioctl(cmd), cmd, rc); 3588d6e32f53SMatthew R. Ochs else 3589d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n", 3590d6e32f53SMatthew R. Ochs __func__, decode_hioctl(cmd), cmd, rc); 3591d6e32f53SMatthew R. Ochs return rc; 3592d6e32f53SMatthew R. Ochs } 3593d6e32f53SMatthew R. Ochs 3594a834a36bSUma Krishnan /* 3595a834a36bSUma Krishnan * Character device file operations 3596a834a36bSUma Krishnan */ 3597a834a36bSUma Krishnan static const struct file_operations cxlflash_chr_fops = { 3598a834a36bSUma Krishnan .owner = THIS_MODULE, 3599a834a36bSUma Krishnan .open = cxlflash_chr_open, 3600d6e32f53SMatthew R. Ochs .unlocked_ioctl = cxlflash_chr_ioctl, 3601d6e32f53SMatthew R. Ochs .compat_ioctl = cxlflash_chr_ioctl, 3602a834a36bSUma Krishnan }; 3603a834a36bSUma Krishnan 3604a834a36bSUma Krishnan /** 3605a834a36bSUma Krishnan * init_chrdev() - initialize the character device for the host 3606a834a36bSUma Krishnan * @cfg: Internal structure associated with the host. 3607a834a36bSUma Krishnan * 3608a834a36bSUma Krishnan * Return: 0 on success, -errno on failure 3609a834a36bSUma Krishnan */ 3610a834a36bSUma Krishnan static int init_chrdev(struct cxlflash_cfg *cfg) 3611a834a36bSUma Krishnan { 3612a834a36bSUma Krishnan struct device *dev = &cfg->dev->dev; 3613a834a36bSUma Krishnan struct device *char_dev; 3614a834a36bSUma Krishnan dev_t devno; 3615a834a36bSUma Krishnan int minor; 3616a834a36bSUma Krishnan int rc = 0; 3617a834a36bSUma Krishnan 3618a834a36bSUma Krishnan minor = cxlflash_get_minor(); 3619a834a36bSUma Krishnan if (unlikely(minor < 0)) { 3620a834a36bSUma Krishnan dev_err(dev, "%s: Exhausted allowed adapters\n", __func__); 3621a834a36bSUma Krishnan rc = -ENOSPC; 3622a834a36bSUma Krishnan goto out; 3623a834a36bSUma Krishnan } 3624a834a36bSUma Krishnan 3625a834a36bSUma Krishnan devno = MKDEV(cxlflash_major, minor); 3626a834a36bSUma Krishnan cdev_init(&cfg->cdev, &cxlflash_chr_fops); 3627a834a36bSUma Krishnan 3628a834a36bSUma Krishnan rc = cdev_add(&cfg->cdev, devno, 1); 3629a834a36bSUma Krishnan if (rc) { 3630a834a36bSUma Krishnan dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc); 3631a834a36bSUma Krishnan goto err1; 3632a834a36bSUma Krishnan } 3633a834a36bSUma Krishnan 3634a834a36bSUma Krishnan char_dev = device_create(cxlflash_class, NULL, devno, 3635a834a36bSUma Krishnan NULL, "cxlflash%d", minor); 3636a834a36bSUma Krishnan if (IS_ERR(char_dev)) { 3637a834a36bSUma Krishnan rc = PTR_ERR(char_dev); 3638a834a36bSUma Krishnan dev_err(dev, "%s: device_create failed rc=%d\n", 3639a834a36bSUma Krishnan __func__, rc); 3640a834a36bSUma Krishnan goto err2; 3641a834a36bSUma Krishnan } 3642a834a36bSUma Krishnan 3643a834a36bSUma Krishnan cfg->chardev = char_dev; 3644a834a36bSUma Krishnan out: 3645a834a36bSUma Krishnan dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3646a834a36bSUma Krishnan return rc; 3647a834a36bSUma Krishnan err2: 3648a834a36bSUma Krishnan cdev_del(&cfg->cdev); 3649a834a36bSUma Krishnan err1: 3650a834a36bSUma Krishnan cxlflash_put_minor(minor); 3651a834a36bSUma Krishnan goto out; 3652a834a36bSUma Krishnan } 3653a834a36bSUma Krishnan 3654a834a36bSUma Krishnan /** 3655c21e0bbfSMatthew R. Ochs * cxlflash_probe() - PCI entry point to add host 3656c21e0bbfSMatthew R. Ochs * @pdev: PCI device associated with the host. 3657c21e0bbfSMatthew R. Ochs * @dev_id: PCI device id associated with device. 3658c21e0bbfSMatthew R. Ochs * 3659323e3342SMatthew R. Ochs * The device will initially start out in a 'probing' state and 3660323e3342SMatthew R. Ochs * transition to the 'normal' state at the end of a successful 3661323e3342SMatthew R. Ochs * probe. Should an EEH event occur during probe, the notification 3662323e3342SMatthew R. Ochs * thread (error_detected()) will wait until the probe handler 3663323e3342SMatthew R. Ochs * is nearly complete. At that time, the device will be moved to 3664323e3342SMatthew R. Ochs * a 'probed' state and the EEH thread woken up to drive the slot 3665323e3342SMatthew R. Ochs * reset and recovery (device moves to 'normal' state). Meanwhile, 3666323e3342SMatthew R. Ochs * the probe will be allowed to exit successfully. 3667323e3342SMatthew R. Ochs * 36681284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 3669c21e0bbfSMatthew R. Ochs */ 3670c21e0bbfSMatthew R. Ochs static int cxlflash_probe(struct pci_dev *pdev, 3671c21e0bbfSMatthew R. Ochs const struct pci_device_id *dev_id) 3672c21e0bbfSMatthew R. Ochs { 3673c21e0bbfSMatthew R. Ochs struct Scsi_Host *host; 3674c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = NULL; 3675fb67d44dSMatthew R. Ochs struct device *dev = &pdev->dev; 3676c21e0bbfSMatthew R. Ochs struct dev_dependent_vals *ddv; 3677c21e0bbfSMatthew R. Ochs int rc = 0; 367878ae028eSMatthew R. Ochs int k; 3679c21e0bbfSMatthew R. Ochs 3680c21e0bbfSMatthew R. Ochs dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", 3681c21e0bbfSMatthew R. Ochs __func__, pdev->irq); 3682c21e0bbfSMatthew R. Ochs 3683c21e0bbfSMatthew R. Ochs ddv = (struct dev_dependent_vals *)dev_id->driver_data; 3684c21e0bbfSMatthew R. Ochs driver_template.max_sectors = ddv->max_sectors; 3685c21e0bbfSMatthew R. Ochs 3686c21e0bbfSMatthew R. Ochs host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 3687c21e0bbfSMatthew R. Ochs if (!host) { 3688fb67d44dSMatthew R. Ochs dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); 3689c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 3690c21e0bbfSMatthew R. Ochs goto out; 3691c21e0bbfSMatthew R. Ochs } 3692c21e0bbfSMatthew R. Ochs 3693c21e0bbfSMatthew R. Ochs host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; 3694c21e0bbfSMatthew R. Ochs host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; 3695c21e0bbfSMatthew R. Ochs host->unique_id = host->host_no; 3696c21e0bbfSMatthew R. Ochs host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 3697c21e0bbfSMatthew R. Ochs 3698fb67d44dSMatthew R. Ochs cfg = shost_priv(host); 3699c21e0bbfSMatthew R. Ochs cfg->host = host; 3700c21e0bbfSMatthew R. Ochs rc = alloc_mem(cfg); 3701c21e0bbfSMatthew R. Ochs if (rc) { 3702fb67d44dSMatthew R. Ochs dev_err(dev, "%s: alloc_mem failed\n", __func__); 3703c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 37048b5b1e87SMatthew R. Ochs scsi_host_put(cfg->host); 3705c21e0bbfSMatthew R. Ochs goto out; 3706c21e0bbfSMatthew R. Ochs } 3707c21e0bbfSMatthew R. Ochs 3708c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_NONE; 3709c21e0bbfSMatthew R. Ochs cfg->dev = pdev; 371017ead26fSMatthew R. Ochs cfg->cxl_fops = cxlflash_cxl_fops; 37112cb79266SMatthew R. Ochs 371207d0c52fSUma Krishnan if (ddv->flags & CXLFLASH_OCXL_DEV) 371307d0c52fSUma Krishnan cfg->ops = &cxlflash_ocxl_ops; 371407d0c52fSUma Krishnan else 371507d0c52fSUma Krishnan cfg->ops = &cxlflash_cxl_ops; 371607d0c52fSUma Krishnan 37172cb79266SMatthew R. Ochs /* 371878ae028eSMatthew R. Ochs * Promoted LUNs move to the top of the LUN table. The rest stay on 371978ae028eSMatthew R. Ochs * the bottom half. The bottom half grows from the end (index = 255), 372078ae028eSMatthew R. Ochs * whereas the top half grows from the beginning (index = 0). 372178ae028eSMatthew R. Ochs * 372278ae028eSMatthew R. Ochs * Initialize the last LUN index for all possible ports. 37232cb79266SMatthew R. Ochs */ 37242cb79266SMatthew R. Ochs cfg->promote_lun_index = 0; 372578ae028eSMatthew R. Ochs 372678ae028eSMatthew R. Ochs for (k = 0; k < MAX_FC_PORTS; k++) 372778ae028eSMatthew R. Ochs cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; 37282cb79266SMatthew R. Ochs 3729c21e0bbfSMatthew R. Ochs cfg->dev_id = (struct pci_device_id *)dev_id; 3730c21e0bbfSMatthew R. Ochs 3731c21e0bbfSMatthew R. Ochs init_waitqueue_head(&cfg->tmf_waitq); 3732439e85c1SMatthew R. Ochs init_waitqueue_head(&cfg->reset_waitq); 3733c21e0bbfSMatthew R. Ochs 3734c21e0bbfSMatthew R. Ochs INIT_WORK(&cfg->work_q, cxlflash_worker_thread); 3735c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_INVALID; 3736c21e0bbfSMatthew R. Ochs cfg->lr_port = -1; 37370d73122cSMatthew R. Ochs spin_lock_init(&cfg->tmf_slock); 373865be2c79SMatthew R. Ochs mutex_init(&cfg->ctx_tbl_list_mutex); 373965be2c79SMatthew R. Ochs mutex_init(&cfg->ctx_recovery_mutex); 37400a27ae51SMatthew R. Ochs init_rwsem(&cfg->ioctl_rwsem); 374165be2c79SMatthew R. Ochs INIT_LIST_HEAD(&cfg->ctx_err_recovery); 374265be2c79SMatthew R. Ochs INIT_LIST_HEAD(&cfg->lluns); 3743c21e0bbfSMatthew R. Ochs 3744c21e0bbfSMatthew R. Ochs pci_set_drvdata(pdev, cfg); 3745c21e0bbfSMatthew R. Ochs 3746c21e0bbfSMatthew R. Ochs rc = init_pci(cfg); 3747c21e0bbfSMatthew R. Ochs if (rc) { 3748fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); 3749c21e0bbfSMatthew R. Ochs goto out_remove; 3750c21e0bbfSMatthew R. Ochs } 3751c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_PCI; 3752c21e0bbfSMatthew R. Ochs 375348e077dbSUma Krishnan cfg->afu_cookie = cfg->ops->create_afu(pdev); 375448e077dbSUma Krishnan if (unlikely(!cfg->afu_cookie)) { 375548e077dbSUma Krishnan dev_err(dev, "%s: create_afu failed\n", __func__); 375648e077dbSUma Krishnan goto out_remove; 375748e077dbSUma Krishnan } 375848e077dbSUma Krishnan 3759c21e0bbfSMatthew R. Ochs rc = init_afu(cfg); 3760323e3342SMatthew R. Ochs if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { 3761fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); 3762c21e0bbfSMatthew R. Ochs goto out_remove; 3763c21e0bbfSMatthew R. Ochs } 3764c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_AFU; 3765c21e0bbfSMatthew R. Ochs 3766c21e0bbfSMatthew R. Ochs rc = init_scsi(cfg); 3767c21e0bbfSMatthew R. Ochs if (rc) { 3768fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); 3769c21e0bbfSMatthew R. Ochs goto out_remove; 3770c21e0bbfSMatthew R. Ochs } 3771c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_SCSI; 3772c21e0bbfSMatthew R. Ochs 3773a834a36bSUma Krishnan rc = init_chrdev(cfg); 3774a834a36bSUma Krishnan if (rc) { 3775a834a36bSUma Krishnan dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc); 3776a834a36bSUma Krishnan goto out_remove; 3777a834a36bSUma Krishnan } 3778a834a36bSUma Krishnan cfg->init_state = INIT_STATE_CDEV; 3779a834a36bSUma Krishnan 3780323e3342SMatthew R. Ochs if (wq_has_sleeper(&cfg->reset_waitq)) { 3781323e3342SMatthew R. Ochs cfg->state = STATE_PROBED; 3782323e3342SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 3783323e3342SMatthew R. Ochs } else 3784323e3342SMatthew R. Ochs cfg->state = STATE_NORMAL; 3785c21e0bbfSMatthew R. Ochs out: 3786fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3787c21e0bbfSMatthew R. Ochs return rc; 3788c21e0bbfSMatthew R. Ochs 3789c21e0bbfSMatthew R. Ochs out_remove: 3790c21e0bbfSMatthew R. Ochs cxlflash_remove(pdev); 3791c21e0bbfSMatthew R. Ochs goto out; 3792c21e0bbfSMatthew R. Ochs } 3793c21e0bbfSMatthew R. Ochs 37945cdac81aSMatthew R. Ochs /** 37955cdac81aSMatthew R. Ochs * cxlflash_pci_error_detected() - called when a PCI error is detected 37965cdac81aSMatthew R. Ochs * @pdev: PCI device struct. 37975cdac81aSMatthew R. Ochs * @state: PCI channel state. 37985cdac81aSMatthew R. Ochs * 37991d3324c3SMatthew R. Ochs * When an EEH occurs during an active reset, wait until the reset is 38001d3324c3SMatthew R. Ochs * complete and then take action based upon the device state. 38011d3324c3SMatthew R. Ochs * 38025cdac81aSMatthew R. Ochs * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 38035cdac81aSMatthew R. Ochs */ 38045cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 38055cdac81aSMatthew R. Ochs pci_channel_state_t state) 38065cdac81aSMatthew R. Ochs { 380765be2c79SMatthew R. Ochs int rc = 0; 38085cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 38095cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 38105cdac81aSMatthew R. Ochs 38115cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); 38125cdac81aSMatthew R. Ochs 38135cdac81aSMatthew R. Ochs switch (state) { 38145cdac81aSMatthew R. Ochs case pci_channel_io_frozen: 3815323e3342SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && 3816323e3342SMatthew R. Ochs cfg->state != STATE_PROBING); 38171d3324c3SMatthew R. Ochs if (cfg->state == STATE_FAILTERM) 38181d3324c3SMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 38191d3324c3SMatthew R. Ochs 3820439e85c1SMatthew R. Ochs cfg->state = STATE_RESET; 38215cdac81aSMatthew R. Ochs scsi_block_requests(cfg->host); 38220a27ae51SMatthew R. Ochs drain_ioctls(cfg); 382365be2c79SMatthew R. Ochs rc = cxlflash_mark_contexts_error(cfg); 382465be2c79SMatthew R. Ochs if (unlikely(rc)) 3825fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", 382665be2c79SMatthew R. Ochs __func__, rc); 38279526f360SManoj N. Kumar term_afu(cfg); 38285cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_NEED_RESET; 38295cdac81aSMatthew R. Ochs case pci_channel_io_perm_failure: 38305cdac81aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 3831439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 38325cdac81aSMatthew R. Ochs scsi_unblock_requests(cfg->host); 38335cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 38345cdac81aSMatthew R. Ochs default: 38355cdac81aSMatthew R. Ochs break; 38365cdac81aSMatthew R. Ochs } 38375cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_NEED_RESET; 38385cdac81aSMatthew R. Ochs } 38395cdac81aSMatthew R. Ochs 38405cdac81aSMatthew R. Ochs /** 38415cdac81aSMatthew R. Ochs * cxlflash_pci_slot_reset() - called when PCI slot has been reset 38425cdac81aSMatthew R. Ochs * @pdev: PCI device struct. 38435cdac81aSMatthew R. Ochs * 38445cdac81aSMatthew R. Ochs * This routine is called by the pci error recovery code after the PCI 38455cdac81aSMatthew R. Ochs * slot has been reset, just before we should resume normal operations. 38465cdac81aSMatthew R. Ochs * 38475cdac81aSMatthew R. Ochs * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT 38485cdac81aSMatthew R. Ochs */ 38495cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) 38505cdac81aSMatthew R. Ochs { 38515cdac81aSMatthew R. Ochs int rc = 0; 38525cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 38535cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 38545cdac81aSMatthew R. Ochs 38555cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 38565cdac81aSMatthew R. Ochs 38575cdac81aSMatthew R. Ochs rc = init_afu(cfg); 38585cdac81aSMatthew R. Ochs if (unlikely(rc)) { 3859fb67d44dSMatthew R. Ochs dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); 38605cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 38615cdac81aSMatthew R. Ochs } 38625cdac81aSMatthew R. Ochs 38635cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_RECOVERED; 38645cdac81aSMatthew R. Ochs } 38655cdac81aSMatthew R. Ochs 38665cdac81aSMatthew R. Ochs /** 38675cdac81aSMatthew R. Ochs * cxlflash_pci_resume() - called when normal operation can resume 38685cdac81aSMatthew R. Ochs * @pdev: PCI device struct 38695cdac81aSMatthew R. Ochs */ 38705cdac81aSMatthew R. Ochs static void cxlflash_pci_resume(struct pci_dev *pdev) 38715cdac81aSMatthew R. Ochs { 38725cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 38735cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 38745cdac81aSMatthew R. Ochs 38755cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 38765cdac81aSMatthew R. Ochs 38775cdac81aSMatthew R. Ochs cfg->state = STATE_NORMAL; 3878439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 38795cdac81aSMatthew R. Ochs scsi_unblock_requests(cfg->host); 38805cdac81aSMatthew R. Ochs } 38815cdac81aSMatthew R. Ochs 3882a834a36bSUma Krishnan /** 3883a834a36bSUma Krishnan * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class 3884a834a36bSUma Krishnan * @dev: Character device. 3885a834a36bSUma Krishnan * @mode: Mode that can be used to verify access. 3886a834a36bSUma Krishnan * 3887a834a36bSUma Krishnan * Return: Allocated string describing the devtmpfs structure. 3888a834a36bSUma Krishnan */ 3889a834a36bSUma Krishnan static char *cxlflash_devnode(struct device *dev, umode_t *mode) 3890a834a36bSUma Krishnan { 3891a834a36bSUma Krishnan return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); 3892a834a36bSUma Krishnan } 3893a834a36bSUma Krishnan 3894a834a36bSUma Krishnan /** 3895a834a36bSUma Krishnan * cxlflash_class_init() - create character device class 3896a834a36bSUma Krishnan * 3897a834a36bSUma Krishnan * Return: 0 on success, -errno on failure 3898a834a36bSUma Krishnan */ 3899a834a36bSUma Krishnan static int cxlflash_class_init(void) 3900a834a36bSUma Krishnan { 3901a834a36bSUma Krishnan dev_t devno; 3902a834a36bSUma Krishnan int rc = 0; 3903a834a36bSUma Krishnan 3904a834a36bSUma Krishnan rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash"); 3905a834a36bSUma Krishnan if (unlikely(rc)) { 3906a834a36bSUma Krishnan pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc); 3907a834a36bSUma Krishnan goto out; 3908a834a36bSUma Krishnan } 3909a834a36bSUma Krishnan 3910a834a36bSUma Krishnan cxlflash_major = MAJOR(devno); 3911a834a36bSUma Krishnan 3912a834a36bSUma Krishnan cxlflash_class = class_create(THIS_MODULE, "cxlflash"); 3913a834a36bSUma Krishnan if (IS_ERR(cxlflash_class)) { 3914a834a36bSUma Krishnan rc = PTR_ERR(cxlflash_class); 3915a834a36bSUma Krishnan pr_err("%s: class_create failed rc=%d\n", __func__, rc); 3916a834a36bSUma Krishnan goto err; 3917a834a36bSUma Krishnan } 3918a834a36bSUma Krishnan 3919a834a36bSUma Krishnan cxlflash_class->devnode = cxlflash_devnode; 3920a834a36bSUma Krishnan out: 3921a834a36bSUma Krishnan pr_debug("%s: returning rc=%d\n", __func__, rc); 3922a834a36bSUma Krishnan return rc; 3923a834a36bSUma Krishnan err: 3924a834a36bSUma Krishnan unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); 3925a834a36bSUma Krishnan goto out; 3926a834a36bSUma Krishnan } 3927a834a36bSUma Krishnan 3928a834a36bSUma Krishnan /** 3929a834a36bSUma Krishnan * cxlflash_class_exit() - destroy character device class 3930a834a36bSUma Krishnan */ 3931a834a36bSUma Krishnan static void cxlflash_class_exit(void) 3932a834a36bSUma Krishnan { 3933a834a36bSUma Krishnan dev_t devno = MKDEV(cxlflash_major, 0); 3934a834a36bSUma Krishnan 3935a834a36bSUma Krishnan class_destroy(cxlflash_class); 3936a834a36bSUma Krishnan unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); 3937a834a36bSUma Krishnan } 3938a834a36bSUma Krishnan 39395cdac81aSMatthew R. Ochs static const struct pci_error_handlers cxlflash_err_handler = { 39405cdac81aSMatthew R. Ochs .error_detected = cxlflash_pci_error_detected, 39415cdac81aSMatthew R. Ochs .slot_reset = cxlflash_pci_slot_reset, 39425cdac81aSMatthew R. Ochs .resume = cxlflash_pci_resume, 39435cdac81aSMatthew R. Ochs }; 39445cdac81aSMatthew R. Ochs 3945c21e0bbfSMatthew R. Ochs /* 3946c21e0bbfSMatthew R. Ochs * PCI device structure 3947c21e0bbfSMatthew R. Ochs */ 3948c21e0bbfSMatthew R. Ochs static struct pci_driver cxlflash_driver = { 3949c21e0bbfSMatthew R. Ochs .name = CXLFLASH_NAME, 3950c21e0bbfSMatthew R. Ochs .id_table = cxlflash_pci_table, 3951c21e0bbfSMatthew R. Ochs .probe = cxlflash_probe, 3952c21e0bbfSMatthew R. Ochs .remove = cxlflash_remove, 3953babf985dSUma Krishnan .shutdown = cxlflash_remove, 39545cdac81aSMatthew R. Ochs .err_handler = &cxlflash_err_handler, 3955c21e0bbfSMatthew R. Ochs }; 3956c21e0bbfSMatthew R. Ochs 3957c21e0bbfSMatthew R. Ochs /** 3958c21e0bbfSMatthew R. Ochs * init_cxlflash() - module entry point 3959c21e0bbfSMatthew R. Ochs * 39601284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 3961c21e0bbfSMatthew R. Ochs */ 3962c21e0bbfSMatthew R. Ochs static int __init init_cxlflash(void) 3963c21e0bbfSMatthew R. Ochs { 3964a834a36bSUma Krishnan int rc; 3965a834a36bSUma Krishnan 3966cd41e18dSMatthew R. Ochs check_sizes(); 396765be2c79SMatthew R. Ochs cxlflash_list_init(); 3968a834a36bSUma Krishnan rc = cxlflash_class_init(); 3969a834a36bSUma Krishnan if (unlikely(rc)) 3970a834a36bSUma Krishnan goto out; 397165be2c79SMatthew R. Ochs 3972a834a36bSUma Krishnan rc = pci_register_driver(&cxlflash_driver); 3973a834a36bSUma Krishnan if (unlikely(rc)) 3974a834a36bSUma Krishnan goto err; 3975a834a36bSUma Krishnan out: 3976a834a36bSUma Krishnan pr_debug("%s: returning rc=%d\n", __func__, rc); 3977a834a36bSUma Krishnan return rc; 3978a834a36bSUma Krishnan err: 3979a834a36bSUma Krishnan cxlflash_class_exit(); 3980a834a36bSUma Krishnan goto out; 3981c21e0bbfSMatthew R. Ochs } 3982c21e0bbfSMatthew R. Ochs 3983c21e0bbfSMatthew R. Ochs /** 3984c21e0bbfSMatthew R. Ochs * exit_cxlflash() - module exit point 3985c21e0bbfSMatthew R. Ochs */ 3986c21e0bbfSMatthew R. Ochs static void __exit exit_cxlflash(void) 3987c21e0bbfSMatthew R. Ochs { 398865be2c79SMatthew R. Ochs cxlflash_term_global_luns(); 398965be2c79SMatthew R. Ochs cxlflash_free_errpage(); 399065be2c79SMatthew R. Ochs 3991c21e0bbfSMatthew R. Ochs pci_unregister_driver(&cxlflash_driver); 3992a834a36bSUma Krishnan cxlflash_class_exit(); 3993c21e0bbfSMatthew R. Ochs } 3994c21e0bbfSMatthew R. Ochs 3995c21e0bbfSMatthew R. Ochs module_init(init_cxlflash); 3996c21e0bbfSMatthew R. Ochs module_exit(exit_cxlflash); 3997