1c21e0bbfSMatthew R. Ochs /* 2c21e0bbfSMatthew R. Ochs * CXL Flash Device Driver 3c21e0bbfSMatthew R. Ochs * 4c21e0bbfSMatthew R. Ochs * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5c21e0bbfSMatthew R. Ochs * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6c21e0bbfSMatthew R. Ochs * 7c21e0bbfSMatthew R. Ochs * Copyright (C) 2015 IBM Corporation 8c21e0bbfSMatthew R. Ochs * 9c21e0bbfSMatthew R. Ochs * This program is free software; you can redistribute it and/or 10c21e0bbfSMatthew R. Ochs * modify it under the terms of the GNU General Public License 11c21e0bbfSMatthew R. Ochs * as published by the Free Software Foundation; either version 12c21e0bbfSMatthew R. Ochs * 2 of the License, or (at your option) any later version. 13c21e0bbfSMatthew R. Ochs */ 14c21e0bbfSMatthew R. Ochs 15c21e0bbfSMatthew R. Ochs #include <linux/delay.h> 16c21e0bbfSMatthew R. Ochs #include <linux/list.h> 17c21e0bbfSMatthew R. Ochs #include <linux/module.h> 18c21e0bbfSMatthew R. Ochs #include <linux/pci.h> 19c21e0bbfSMatthew R. Ochs 20c21e0bbfSMatthew R. Ochs #include <asm/unaligned.h> 21c21e0bbfSMatthew R. Ochs 22c21e0bbfSMatthew R. Ochs #include <misc/cxl.h> 23c21e0bbfSMatthew R. Ochs 24c21e0bbfSMatthew R. Ochs #include <scsi/scsi_cmnd.h> 25c21e0bbfSMatthew R. Ochs #include <scsi/scsi_host.h> 2665be2c79SMatthew R. Ochs #include <uapi/scsi/cxlflash_ioctl.h> 27c21e0bbfSMatthew R. Ochs 28c21e0bbfSMatthew R. Ochs #include "main.h" 29c21e0bbfSMatthew R. Ochs #include "sislite.h" 30c21e0bbfSMatthew R. Ochs #include "common.h" 31c21e0bbfSMatthew R. Ochs 32c21e0bbfSMatthew R. Ochs MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); 33c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); 34c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 35c21e0bbfSMatthew R. Ochs MODULE_LICENSE("GPL"); 36c21e0bbfSMatthew R. Ochs 37a834a36bSUma Krishnan static struct class *cxlflash_class; 38a834a36bSUma Krishnan static u32 cxlflash_major; 39a834a36bSUma Krishnan static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); 40a834a36bSUma Krishnan 41c21e0bbfSMatthew R. Ochs /** 42c21e0bbfSMatthew R. Ochs * process_cmd_err() - command error handler 43c21e0bbfSMatthew R. Ochs * @cmd: AFU command that experienced the error. 44c21e0bbfSMatthew R. Ochs * @scp: SCSI command associated with the AFU command in error. 45c21e0bbfSMatthew R. Ochs * 46c21e0bbfSMatthew R. Ochs * Translates error bits from AFU command to SCSI command results. 47c21e0bbfSMatthew R. Ochs */ 48c21e0bbfSMatthew R. Ochs static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 49c21e0bbfSMatthew R. Ochs { 50fb67d44dSMatthew R. Ochs struct afu *afu = cmd->parent; 51fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 52fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 53c21e0bbfSMatthew R. Ochs struct sisl_ioarcb *ioarcb; 54c21e0bbfSMatthew R. Ochs struct sisl_ioasa *ioasa; 558396012fSMatthew R. Ochs u32 resid; 56c21e0bbfSMatthew R. Ochs 57c21e0bbfSMatthew R. Ochs if (unlikely(!cmd)) 58c21e0bbfSMatthew R. Ochs return; 59c21e0bbfSMatthew R. Ochs 60c21e0bbfSMatthew R. Ochs ioarcb = &(cmd->rcb); 61c21e0bbfSMatthew R. Ochs ioasa = &(cmd->sa); 62c21e0bbfSMatthew R. Ochs 63c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 648396012fSMatthew R. Ochs resid = ioasa->resid; 658396012fSMatthew R. Ochs scsi_set_resid(scp, resid); 66fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 678396012fSMatthew R. Ochs __func__, cmd, scp, resid); 68c21e0bbfSMatthew R. Ochs } 69c21e0bbfSMatthew R. Ochs 70c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 71fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n", 72c21e0bbfSMatthew R. Ochs __func__, cmd, scp); 73c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 74c21e0bbfSMatthew R. Ochs } 75c21e0bbfSMatthew R. Ochs 76fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " 77fb67d44dSMatthew R. Ochs "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__, 78fb67d44dSMatthew R. Ochs ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, 79fb67d44dSMatthew R. Ochs ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); 80c21e0bbfSMatthew R. Ochs 81c21e0bbfSMatthew R. Ochs if (ioasa->rc.scsi_rc) { 82c21e0bbfSMatthew R. Ochs /* We have a SCSI status */ 83c21e0bbfSMatthew R. Ochs if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { 84c21e0bbfSMatthew R. Ochs memcpy(scp->sense_buffer, ioasa->sense_data, 85c21e0bbfSMatthew R. Ochs SISL_SENSE_DATA_LEN); 86c21e0bbfSMatthew R. Ochs scp->result = ioasa->rc.scsi_rc; 87c21e0bbfSMatthew R. Ochs } else 88c21e0bbfSMatthew R. Ochs scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); 89c21e0bbfSMatthew R. Ochs } 90c21e0bbfSMatthew R. Ochs 91c21e0bbfSMatthew R. Ochs /* 92c21e0bbfSMatthew R. Ochs * We encountered an error. Set scp->result based on nature 93c21e0bbfSMatthew R. Ochs * of error. 94c21e0bbfSMatthew R. Ochs */ 95c21e0bbfSMatthew R. Ochs if (ioasa->rc.fc_rc) { 96c21e0bbfSMatthew R. Ochs /* We have an FC status */ 97c21e0bbfSMatthew R. Ochs switch (ioasa->rc.fc_rc) { 98c21e0bbfSMatthew R. Ochs case SISL_FC_RC_LINKDOWN: 99c21e0bbfSMatthew R. Ochs scp->result = (DID_REQUEUE << 16); 100c21e0bbfSMatthew R. Ochs break; 101c21e0bbfSMatthew R. Ochs case SISL_FC_RC_RESID: 102c21e0bbfSMatthew R. Ochs /* This indicates an FCP resid underrun */ 103c21e0bbfSMatthew R. Ochs if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { 104c21e0bbfSMatthew R. Ochs /* If the SISL_RC_FLAGS_OVERRUN flag was set, 105c21e0bbfSMatthew R. Ochs * then we will handle this error else where. 106c21e0bbfSMatthew R. Ochs * If not then we must handle it here. 1078396012fSMatthew R. Ochs * This is probably an AFU bug. 108c21e0bbfSMatthew R. Ochs */ 109c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 110c21e0bbfSMatthew R. Ochs } 111c21e0bbfSMatthew R. Ochs break; 112c21e0bbfSMatthew R. Ochs case SISL_FC_RC_RESIDERR: 113c21e0bbfSMatthew R. Ochs /* Resid mismatch between adapter and device */ 114c21e0bbfSMatthew R. Ochs case SISL_FC_RC_TGTABORT: 115c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTOK: 116c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTFAIL: 117c21e0bbfSMatthew R. Ochs case SISL_FC_RC_NOLOGI: 118c21e0bbfSMatthew R. Ochs case SISL_FC_RC_ABORTPEND: 119c21e0bbfSMatthew R. Ochs case SISL_FC_RC_WRABORTPEND: 120c21e0bbfSMatthew R. Ochs case SISL_FC_RC_NOEXP: 121c21e0bbfSMatthew R. Ochs case SISL_FC_RC_INUSE: 122c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 123c21e0bbfSMatthew R. Ochs break; 124c21e0bbfSMatthew R. Ochs } 125c21e0bbfSMatthew R. Ochs } 126c21e0bbfSMatthew R. Ochs 127c21e0bbfSMatthew R. Ochs if (ioasa->rc.afu_rc) { 128c21e0bbfSMatthew R. Ochs /* We have an AFU error */ 129c21e0bbfSMatthew R. Ochs switch (ioasa->rc.afu_rc) { 130c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_NO_CHANNELS: 1318396012fSMatthew R. Ochs scp->result = (DID_NO_CONNECT << 16); 132c21e0bbfSMatthew R. Ochs break; 133c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_DATA_DMA_ERR: 134c21e0bbfSMatthew R. Ochs switch (ioasa->afu_extra) { 135c21e0bbfSMatthew R. Ochs case SISL_AFU_DMA_ERR_PAGE_IN: 136c21e0bbfSMatthew R. Ochs /* Retry */ 137c21e0bbfSMatthew R. Ochs scp->result = (DID_IMM_RETRY << 16); 138c21e0bbfSMatthew R. Ochs break; 139c21e0bbfSMatthew R. Ochs case SISL_AFU_DMA_ERR_INVALID_EA: 140c21e0bbfSMatthew R. Ochs default: 141c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 142c21e0bbfSMatthew R. Ochs } 143c21e0bbfSMatthew R. Ochs break; 144c21e0bbfSMatthew R. Ochs case SISL_AFU_RC_OUT_OF_DATA_BUFS: 145c21e0bbfSMatthew R. Ochs /* Retry */ 146c21e0bbfSMatthew R. Ochs scp->result = (DID_ALLOC_FAILURE << 16); 147c21e0bbfSMatthew R. Ochs break; 148c21e0bbfSMatthew R. Ochs default: 149c21e0bbfSMatthew R. Ochs scp->result = (DID_ERROR << 16); 150c21e0bbfSMatthew R. Ochs } 151c21e0bbfSMatthew R. Ochs } 152c21e0bbfSMatthew R. Ochs } 153c21e0bbfSMatthew R. Ochs 154c21e0bbfSMatthew R. Ochs /** 155c21e0bbfSMatthew R. Ochs * cmd_complete() - command completion handler 156c21e0bbfSMatthew R. Ochs * @cmd: AFU command that has completed. 157c21e0bbfSMatthew R. Ochs * 1588ba1ddb3SMatthew R. Ochs * For SCSI commands this routine prepares and submits commands that have 1598ba1ddb3SMatthew R. Ochs * either completed or timed out to the SCSI stack. For internal commands 1608ba1ddb3SMatthew R. Ochs * (TMF or AFU), this routine simply notifies the originator that the 1618ba1ddb3SMatthew R. Ochs * command has completed. 162c21e0bbfSMatthew R. Ochs */ 163c21e0bbfSMatthew R. Ochs static void cmd_complete(struct afu_cmd *cmd) 164c21e0bbfSMatthew R. Ochs { 165c21e0bbfSMatthew R. Ochs struct scsi_cmnd *scp; 166c21e0bbfSMatthew R. Ochs ulong lock_flags; 167c21e0bbfSMatthew R. Ochs struct afu *afu = cmd->parent; 168c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 169fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 170a002bf83SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 171c21e0bbfSMatthew R. Ochs 172a002bf83SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 173a002bf83SUma Krishnan list_del(&cmd->list); 174a002bf83SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 175a002bf83SUma Krishnan 176fe7f9698SMatthew R. Ochs if (cmd->scp) { 177fe7f9698SMatthew R. Ochs scp = cmd->scp; 1788396012fSMatthew R. Ochs if (unlikely(cmd->sa.ioasc)) 179c21e0bbfSMatthew R. Ochs process_cmd_err(cmd, scp); 180c21e0bbfSMatthew R. Ochs else 181c21e0bbfSMatthew R. Ochs scp->result = (DID_OK << 16); 182c21e0bbfSMatthew R. Ochs 183fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", 184fb67d44dSMatthew R. Ochs __func__, scp, scp->result, cmd->sa.ioasc); 185c21e0bbfSMatthew R. Ochs scp->scsi_done(scp); 1868ba1ddb3SMatthew R. Ochs } else if (cmd->cmd_tmf) { 187018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 188c21e0bbfSMatthew R. Ochs cfg->tmf_active = false; 189c21e0bbfSMatthew R. Ochs wake_up_all_locked(&cfg->tmf_waitq); 190018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 191c21e0bbfSMatthew R. Ochs } else 192c21e0bbfSMatthew R. Ochs complete(&cmd->cevent); 193c21e0bbfSMatthew R. Ochs } 194c21e0bbfSMatthew R. Ochs 195c21e0bbfSMatthew R. Ochs /** 196a1ea04b3SUma Krishnan * flush_pending_cmds() - flush all pending commands on this hardware queue 197a1ea04b3SUma Krishnan * @hwq: Hardware queue to flush. 198a1ea04b3SUma Krishnan * 199a1ea04b3SUma Krishnan * The hardware send queue lock associated with this hardware queue must be 200a1ea04b3SUma Krishnan * held when calling this routine. 201a1ea04b3SUma Krishnan */ 202a1ea04b3SUma Krishnan static void flush_pending_cmds(struct hwq *hwq) 203a1ea04b3SUma Krishnan { 2048ba1ddb3SMatthew R. Ochs struct cxlflash_cfg *cfg = hwq->afu->parent; 205a1ea04b3SUma Krishnan struct afu_cmd *cmd, *tmp; 206a1ea04b3SUma Krishnan struct scsi_cmnd *scp; 2078ba1ddb3SMatthew R. Ochs ulong lock_flags; 208a1ea04b3SUma Krishnan 209a1ea04b3SUma Krishnan list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { 210a1ea04b3SUma Krishnan /* Bypass command when on a doneq, cmd_complete() will handle */ 211a1ea04b3SUma Krishnan if (!list_empty(&cmd->queue)) 212a1ea04b3SUma Krishnan continue; 213a1ea04b3SUma Krishnan 214a1ea04b3SUma Krishnan list_del(&cmd->list); 215a1ea04b3SUma Krishnan 216a1ea04b3SUma Krishnan if (cmd->scp) { 217a1ea04b3SUma Krishnan scp = cmd->scp; 218a1ea04b3SUma Krishnan scp->result = (DID_IMM_RETRY << 16); 219a1ea04b3SUma Krishnan scp->scsi_done(scp); 220a1ea04b3SUma Krishnan } else { 221a1ea04b3SUma Krishnan cmd->cmd_aborted = true; 2228ba1ddb3SMatthew R. Ochs 2238ba1ddb3SMatthew R. Ochs if (cmd->cmd_tmf) { 2248ba1ddb3SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 2258ba1ddb3SMatthew R. Ochs cfg->tmf_active = false; 2268ba1ddb3SMatthew R. Ochs wake_up_all_locked(&cfg->tmf_waitq); 2278ba1ddb3SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, 2288ba1ddb3SMatthew R. Ochs lock_flags); 2298ba1ddb3SMatthew R. Ochs } else 230a1ea04b3SUma Krishnan complete(&cmd->cevent); 231a1ea04b3SUma Krishnan } 232a1ea04b3SUma Krishnan } 233a1ea04b3SUma Krishnan } 234a1ea04b3SUma Krishnan 235a1ea04b3SUma Krishnan /** 236a96851d3SUma Krishnan * context_reset() - reset context via specified register 237a96851d3SUma Krishnan * @hwq: Hardware queue owning the context to be reset. 2389c7d1ee5SMatthew R. Ochs * @reset_reg: MMIO register to perform reset. 239a96851d3SUma Krishnan * 2407c4c41f1SUma Krishnan * When the reset is successful, the SISLite specification guarantees that 2417c4c41f1SUma Krishnan * the AFU has aborted all currently pending I/O. Accordingly, these commands 2427c4c41f1SUma Krishnan * must be flushed. 2437c4c41f1SUma Krishnan * 244a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 24515305514SMatthew R. Ochs */ 246a96851d3SUma Krishnan static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) 24715305514SMatthew R. Ochs { 248a96851d3SUma Krishnan struct cxlflash_cfg *cfg = hwq->afu->parent; 2493d2f617dSUma Krishnan struct device *dev = &cfg->dev->dev; 250a96851d3SUma Krishnan int rc = -ETIMEDOUT; 251a96851d3SUma Krishnan int nretry = 0; 252a96851d3SUma Krishnan u64 val = 0x1; 2537c4c41f1SUma Krishnan ulong lock_flags; 25415305514SMatthew R. Ochs 255a96851d3SUma Krishnan dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); 25615305514SMatthew R. Ochs 2577c4c41f1SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 2587c4c41f1SUma Krishnan 259a96851d3SUma Krishnan writeq_be(val, reset_reg); 26015305514SMatthew R. Ochs do { 261a96851d3SUma Krishnan val = readq_be(reset_reg); 262a96851d3SUma Krishnan if ((val & 0x1) == 0x0) { 263a96851d3SUma Krishnan rc = 0; 26415305514SMatthew R. Ochs break; 265a96851d3SUma Krishnan } 266a96851d3SUma Krishnan 26715305514SMatthew R. Ochs /* Double delay each time */ 268ea765431SManoj N. Kumar udelay(1 << nretry); 26915305514SMatthew R. Ochs } while (nretry++ < MC_ROOM_RETRY_CNT); 2703d2f617dSUma Krishnan 2717c4c41f1SUma Krishnan if (!rc) 2727c4c41f1SUma Krishnan flush_pending_cmds(hwq); 2737c4c41f1SUma Krishnan 2747c4c41f1SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 2757c4c41f1SUma Krishnan 276a96851d3SUma Krishnan dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n", 277a96851d3SUma Krishnan __func__, rc, val, nretry); 278a96851d3SUma Krishnan return rc; 27915305514SMatthew R. Ochs } 28015305514SMatthew R. Ochs 28115305514SMatthew R. Ochs /** 282a96851d3SUma Krishnan * context_reset_ioarrin() - reset context via IOARRIN register 283a96851d3SUma Krishnan * @hwq: Hardware queue owning the context to be reset. 284a96851d3SUma Krishnan * 285a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 2869c7d1ee5SMatthew R. Ochs */ 287a96851d3SUma Krishnan static int context_reset_ioarrin(struct hwq *hwq) 2889c7d1ee5SMatthew R. Ochs { 289a96851d3SUma Krishnan return context_reset(hwq, &hwq->host_map->ioarrin); 2909c7d1ee5SMatthew R. Ochs } 2919c7d1ee5SMatthew R. Ochs 2929c7d1ee5SMatthew R. Ochs /** 293a96851d3SUma Krishnan * context_reset_sq() - reset context via SQ_CONTEXT_RESET register 294a96851d3SUma Krishnan * @hwq: Hardware queue owning the context to be reset. 295a96851d3SUma Krishnan * 296a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 297696d0b0cSMatthew R. Ochs */ 298a96851d3SUma Krishnan static int context_reset_sq(struct hwq *hwq) 299696d0b0cSMatthew R. Ochs { 300a96851d3SUma Krishnan return context_reset(hwq, &hwq->host_map->sq_ctx_reset); 301696d0b0cSMatthew R. Ochs } 302696d0b0cSMatthew R. Ochs 303696d0b0cSMatthew R. Ochs /** 30448b4be36SMatthew R. Ochs * send_cmd_ioarrin() - sends an AFU command via IOARRIN register 30515305514SMatthew R. Ochs * @afu: AFU associated with the host. 30615305514SMatthew R. Ochs * @cmd: AFU command to send. 30715305514SMatthew R. Ochs * 30815305514SMatthew R. Ochs * Return: 3091284fb0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 31015305514SMatthew R. Ochs */ 31148b4be36SMatthew R. Ochs static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) 31215305514SMatthew R. Ochs { 31315305514SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 31415305514SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 315bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 31615305514SMatthew R. Ochs int rc = 0; 31711f7b184SUma Krishnan s64 room; 31811f7b184SUma Krishnan ulong lock_flags; 31915305514SMatthew R. Ochs 32015305514SMatthew R. Ochs /* 32111f7b184SUma Krishnan * To avoid the performance penalty of MMIO, spread the update of 32211f7b184SUma Krishnan * 'room' over multiple commands. 32315305514SMatthew R. Ochs */ 32466ea9bccSUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 325bfc0bab1SUma Krishnan if (--hwq->room < 0) { 326bfc0bab1SUma Krishnan room = readq_be(&hwq->host_map->cmd_room); 32711f7b184SUma Krishnan if (room <= 0) { 32811f7b184SUma Krishnan dev_dbg_ratelimited(dev, "%s: no cmd_room to send " 32911f7b184SUma Krishnan "0x%02X, room=0x%016llX\n", 33011f7b184SUma Krishnan __func__, cmd->rcb.cdb[0], room); 331bfc0bab1SUma Krishnan hwq->room = 0; 33211f7b184SUma Krishnan rc = SCSI_MLQUEUE_HOST_BUSY; 33311f7b184SUma Krishnan goto out; 33411f7b184SUma Krishnan } 335bfc0bab1SUma Krishnan hwq->room = room - 1; 33615305514SMatthew R. Ochs } 33715305514SMatthew R. Ochs 338a002bf83SUma Krishnan list_add(&cmd->list, &hwq->pending_cmds); 339bfc0bab1SUma Krishnan writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); 34015305514SMatthew R. Ochs out: 34166ea9bccSUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 342fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, 343fb67d44dSMatthew R. Ochs cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); 34415305514SMatthew R. Ochs return rc; 34515305514SMatthew R. Ochs } 34615305514SMatthew R. Ochs 34715305514SMatthew R. Ochs /** 348696d0b0cSMatthew R. Ochs * send_cmd_sq() - sends an AFU command via SQ ring 349696d0b0cSMatthew R. Ochs * @afu: AFU associated with the host. 350696d0b0cSMatthew R. Ochs * @cmd: AFU command to send. 351696d0b0cSMatthew R. Ochs * 352696d0b0cSMatthew R. Ochs * Return: 353696d0b0cSMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 354696d0b0cSMatthew R. Ochs */ 355696d0b0cSMatthew R. Ochs static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) 356696d0b0cSMatthew R. Ochs { 357696d0b0cSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 358696d0b0cSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 359bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 360696d0b0cSMatthew R. Ochs int rc = 0; 361696d0b0cSMatthew R. Ochs int newval; 362696d0b0cSMatthew R. Ochs ulong lock_flags; 363696d0b0cSMatthew R. Ochs 364bfc0bab1SUma Krishnan newval = atomic_dec_if_positive(&hwq->hsq_credits); 365696d0b0cSMatthew R. Ochs if (newval <= 0) { 366696d0b0cSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 367696d0b0cSMatthew R. Ochs goto out; 368696d0b0cSMatthew R. Ochs } 369696d0b0cSMatthew R. Ochs 370696d0b0cSMatthew R. Ochs cmd->rcb.ioasa = &cmd->sa; 371696d0b0cSMatthew R. Ochs 372bfc0bab1SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 373696d0b0cSMatthew R. Ochs 374bfc0bab1SUma Krishnan *hwq->hsq_curr = cmd->rcb; 375bfc0bab1SUma Krishnan if (hwq->hsq_curr < hwq->hsq_end) 376bfc0bab1SUma Krishnan hwq->hsq_curr++; 377696d0b0cSMatthew R. Ochs else 378bfc0bab1SUma Krishnan hwq->hsq_curr = hwq->hsq_start; 379a002bf83SUma Krishnan 380a002bf83SUma Krishnan list_add(&cmd->list, &hwq->pending_cmds); 381bfc0bab1SUma Krishnan writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); 382696d0b0cSMatthew R. Ochs 383bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 384696d0b0cSMatthew R. Ochs out: 385fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " 386fb67d44dSMatthew R. Ochs "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, 387bfc0bab1SUma Krishnan cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, 388bfc0bab1SUma Krishnan readq_be(&hwq->host_map->sq_head), 389bfc0bab1SUma Krishnan readq_be(&hwq->host_map->sq_tail)); 390696d0b0cSMatthew R. Ochs return rc; 391696d0b0cSMatthew R. Ochs } 392696d0b0cSMatthew R. Ochs 393696d0b0cSMatthew R. Ochs /** 39415305514SMatthew R. Ochs * wait_resp() - polls for a response or timeout to a sent AFU command 39515305514SMatthew R. Ochs * @afu: AFU associated with the host. 39615305514SMatthew R. Ochs * @cmd: AFU command that was sent. 3979ba848acSMatthew R. Ochs * 398a96851d3SUma Krishnan * Return: 0 on success, -errno on failure 39915305514SMatthew R. Ochs */ 4009ba848acSMatthew R. Ochs static int wait_resp(struct afu *afu, struct afu_cmd *cmd) 40115305514SMatthew R. Ochs { 402fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 403fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 4049ba848acSMatthew R. Ochs int rc = 0; 40515305514SMatthew R. Ochs ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 40615305514SMatthew R. Ochs 40715305514SMatthew R. Ochs timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 408a96851d3SUma Krishnan if (!timeout) 409a96851d3SUma Krishnan rc = -ETIMEDOUT; 41015305514SMatthew R. Ochs 411a1ea04b3SUma Krishnan if (cmd->cmd_aborted) 412a1ea04b3SUma Krishnan rc = -EAGAIN; 413a1ea04b3SUma Krishnan 4149ba848acSMatthew R. Ochs if (unlikely(cmd->sa.ioasc != 0)) { 415fb67d44dSMatthew R. Ochs dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n", 416fb67d44dSMatthew R. Ochs __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); 417a96851d3SUma Krishnan rc = -EIO; 4189ba848acSMatthew R. Ochs } 4199ba848acSMatthew R. Ochs 4209ba848acSMatthew R. Ochs return rc; 42115305514SMatthew R. Ochs } 42215305514SMatthew R. Ochs 42315305514SMatthew R. Ochs /** 4241dd0c0e4SMatthew R. Ochs * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command 4251dd0c0e4SMatthew R. Ochs * @host: SCSI host associated with device. 4261dd0c0e4SMatthew R. Ochs * @scp: SCSI command to send. 4271dd0c0e4SMatthew R. Ochs * @afu: SCSI command to send. 4281dd0c0e4SMatthew R. Ochs * 4291dd0c0e4SMatthew R. Ochs * Hashes a command based upon the hardware queue mode. 4301dd0c0e4SMatthew R. Ochs * 4311dd0c0e4SMatthew R. Ochs * Return: Trusted index of target hardware queue 4321dd0c0e4SMatthew R. Ochs */ 4331dd0c0e4SMatthew R. Ochs static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, 4341dd0c0e4SMatthew R. Ochs struct afu *afu) 4351dd0c0e4SMatthew R. Ochs { 4361dd0c0e4SMatthew R. Ochs u32 tag; 4371dd0c0e4SMatthew R. Ochs u32 hwq = 0; 4381dd0c0e4SMatthew R. Ochs 4391dd0c0e4SMatthew R. Ochs if (afu->num_hwqs == 1) 4401dd0c0e4SMatthew R. Ochs return 0; 4411dd0c0e4SMatthew R. Ochs 4421dd0c0e4SMatthew R. Ochs switch (afu->hwq_mode) { 4431dd0c0e4SMatthew R. Ochs case HWQ_MODE_RR: 4441dd0c0e4SMatthew R. Ochs hwq = afu->hwq_rr_count++ % afu->num_hwqs; 4451dd0c0e4SMatthew R. Ochs break; 4461dd0c0e4SMatthew R. Ochs case HWQ_MODE_TAG: 4471dd0c0e4SMatthew R. Ochs tag = blk_mq_unique_tag(scp->request); 4481dd0c0e4SMatthew R. Ochs hwq = blk_mq_unique_tag_to_hwq(tag); 4491dd0c0e4SMatthew R. Ochs break; 4501dd0c0e4SMatthew R. Ochs case HWQ_MODE_CPU: 4511dd0c0e4SMatthew R. Ochs hwq = smp_processor_id() % afu->num_hwqs; 4521dd0c0e4SMatthew R. Ochs break; 4531dd0c0e4SMatthew R. Ochs default: 4541dd0c0e4SMatthew R. Ochs WARN_ON_ONCE(1); 4551dd0c0e4SMatthew R. Ochs } 4561dd0c0e4SMatthew R. Ochs 4571dd0c0e4SMatthew R. Ochs return hwq; 4581dd0c0e4SMatthew R. Ochs } 4591dd0c0e4SMatthew R. Ochs 4601dd0c0e4SMatthew R. Ochs /** 461c21e0bbfSMatthew R. Ochs * send_tmf() - sends a Task Management Function (TMF) 46232abbedaSMatthew R. Ochs * @cfg: Internal structure associated with the host. 46332abbedaSMatthew R. Ochs * @sdev: SCSI device destined for TMF. 464c21e0bbfSMatthew R. Ochs * @tmfcmd: TMF command to send. 465c21e0bbfSMatthew R. Ochs * 466c21e0bbfSMatthew R. Ochs * Return: 4678ba1ddb3SMatthew R. Ochs * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure 468c21e0bbfSMatthew R. Ochs */ 46932abbedaSMatthew R. Ochs static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev, 47032abbedaSMatthew R. Ochs u64 tmfcmd) 471c21e0bbfSMatthew R. Ochs { 47232abbedaSMatthew R. Ochs struct afu *afu = cfg->afu; 4738ba1ddb3SMatthew R. Ochs struct afu_cmd *cmd = NULL; 4744392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 47532abbedaSMatthew R. Ochs struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 4769a597cd4SUma Krishnan bool needs_deletion = false; 4778ba1ddb3SMatthew R. Ochs char *buf = NULL; 478c21e0bbfSMatthew R. Ochs ulong lock_flags; 479c21e0bbfSMatthew R. Ochs int rc = 0; 480018d1dc9SMatthew R. Ochs ulong to; 481c21e0bbfSMatthew R. Ochs 4828ba1ddb3SMatthew R. Ochs buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); 4838ba1ddb3SMatthew R. Ochs if (unlikely(!buf)) { 4848ba1ddb3SMatthew R. Ochs dev_err(dev, "%s: no memory for command\n", __func__); 4858ba1ddb3SMatthew R. Ochs rc = -ENOMEM; 4868ba1ddb3SMatthew R. Ochs goto out; 4878ba1ddb3SMatthew R. Ochs } 4888ba1ddb3SMatthew R. Ochs 4898ba1ddb3SMatthew R. Ochs cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); 4908ba1ddb3SMatthew R. Ochs INIT_LIST_HEAD(&cmd->queue); 4918ba1ddb3SMatthew R. Ochs 492018d1dc9SMatthew R. Ochs /* When Task Management Function is active do not send another */ 493018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 494c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) 495018d1dc9SMatthew R. Ochs wait_event_interruptible_lock_irq(cfg->tmf_waitq, 496018d1dc9SMatthew R. Ochs !cfg->tmf_active, 497018d1dc9SMatthew R. Ochs cfg->tmf_slock); 498c21e0bbfSMatthew R. Ochs cfg->tmf_active = true; 499018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 500c21e0bbfSMatthew R. Ochs 501d4ace351SMatthew R. Ochs cmd->parent = afu; 502d4ace351SMatthew R. Ochs cmd->cmd_tmf = true; 50332abbedaSMatthew R. Ochs cmd->hwq_index = hwq->index; 504d4ace351SMatthew R. Ochs 505bfc0bab1SUma Krishnan cmd->rcb.ctx_id = hwq->ctx_hndl; 5065fbb96c8SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 50732abbedaSMatthew R. Ochs cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel); 50832abbedaSMatthew R. Ochs cmd->rcb.lun_id = lun_to_lunid(sdev->lun); 509c21e0bbfSMatthew R. Ochs cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 510d4ace351SMatthew R. Ochs SISL_REQ_FLAGS_SUP_UNDERRUN | 511d4ace351SMatthew R. Ochs SISL_REQ_FLAGS_TMF_CMD); 512c21e0bbfSMatthew R. Ochs memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 513c21e0bbfSMatthew R. Ochs 51448b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 515c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 516018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 517c21e0bbfSMatthew R. Ochs cfg->tmf_active = false; 518018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 519c21e0bbfSMatthew R. Ochs goto out; 520c21e0bbfSMatthew R. Ochs } 521c21e0bbfSMatthew R. Ochs 522018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 523018d1dc9SMatthew R. Ochs to = msecs_to_jiffies(5000); 524018d1dc9SMatthew R. Ochs to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, 525018d1dc9SMatthew R. Ochs !cfg->tmf_active, 526018d1dc9SMatthew R. Ochs cfg->tmf_slock, 527018d1dc9SMatthew R. Ochs to); 528018d1dc9SMatthew R. Ochs if (!to) { 529fb67d44dSMatthew R. Ochs dev_err(dev, "%s: TMF timed out\n", __func__); 5308ba1ddb3SMatthew R. Ochs rc = -ETIMEDOUT; 5319a597cd4SUma Krishnan needs_deletion = true; 5328ba1ddb3SMatthew R. Ochs } else if (cmd->cmd_aborted) { 5338ba1ddb3SMatthew R. Ochs dev_err(dev, "%s: TMF aborted\n", __func__); 5348ba1ddb3SMatthew R. Ochs rc = -EAGAIN; 5358ba1ddb3SMatthew R. Ochs } else if (cmd->sa.ioasc) { 5368ba1ddb3SMatthew R. Ochs dev_err(dev, "%s: TMF failed ioasc=%08x\n", 5378ba1ddb3SMatthew R. Ochs __func__, cmd->sa.ioasc); 5388ba1ddb3SMatthew R. Ochs rc = -EIO; 539018d1dc9SMatthew R. Ochs } 5408ba1ddb3SMatthew R. Ochs cfg->tmf_active = false; 541018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 5429a597cd4SUma Krishnan 5439a597cd4SUma Krishnan if (needs_deletion) { 5449a597cd4SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 5459a597cd4SUma Krishnan list_del(&cmd->list); 5469a597cd4SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 5479a597cd4SUma Krishnan } 548c21e0bbfSMatthew R. Ochs out: 5498ba1ddb3SMatthew R. Ochs kfree(buf); 550c21e0bbfSMatthew R. Ochs return rc; 551c21e0bbfSMatthew R. Ochs } 552c21e0bbfSMatthew R. Ochs 553c21e0bbfSMatthew R. Ochs /** 554c21e0bbfSMatthew R. Ochs * cxlflash_driver_info() - information handler for this host driver 555c21e0bbfSMatthew R. Ochs * @host: SCSI host associated with device. 556c21e0bbfSMatthew R. Ochs * 557c21e0bbfSMatthew R. Ochs * Return: A string describing the device. 558c21e0bbfSMatthew R. Ochs */ 559c21e0bbfSMatthew R. Ochs static const char *cxlflash_driver_info(struct Scsi_Host *host) 560c21e0bbfSMatthew R. Ochs { 561c21e0bbfSMatthew R. Ochs return CXLFLASH_ADAPTER_NAME; 562c21e0bbfSMatthew R. Ochs } 563c21e0bbfSMatthew R. Ochs 564c21e0bbfSMatthew R. Ochs /** 565c21e0bbfSMatthew R. Ochs * cxlflash_queuecommand() - sends a mid-layer request 566c21e0bbfSMatthew R. Ochs * @host: SCSI host associated with device. 567c21e0bbfSMatthew R. Ochs * @scp: SCSI command to send. 568c21e0bbfSMatthew R. Ochs * 5691284fb0cSMatthew R. Ochs * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 570c21e0bbfSMatthew R. Ochs */ 571c21e0bbfSMatthew R. Ochs static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 572c21e0bbfSMatthew R. Ochs { 573fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 574c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 5754392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 576479ad8e9SMatthew R. Ochs struct afu_cmd *cmd = sc_to_afuci(scp); 5779d89326cSMatthew R. Ochs struct scatterlist *sg = scsi_sglist(scp); 5781dd0c0e4SMatthew R. Ochs int hwq_index = cmd_to_target_hwq(host, scp, afu); 5791dd0c0e4SMatthew R. Ochs struct hwq *hwq = get_hwq(afu, hwq_index); 5809d89326cSMatthew R. Ochs u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; 581c21e0bbfSMatthew R. Ochs ulong lock_flags; 582c21e0bbfSMatthew R. Ochs int rc = 0; 583c21e0bbfSMatthew R. Ochs 5844392ba49SMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 585fb67d44dSMatthew R. Ochs "cdb=(%08x-%08x-%08x-%08x)\n", 586c21e0bbfSMatthew R. Ochs __func__, scp, host->host_no, scp->device->channel, 587c21e0bbfSMatthew R. Ochs scp->device->id, scp->device->lun, 588c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 589c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 590c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 591c21e0bbfSMatthew R. Ochs get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 592c21e0bbfSMatthew R. Ochs 593018d1dc9SMatthew R. Ochs /* 594018d1dc9SMatthew R. Ochs * If a Task Management Function is active, wait for it to complete 595c21e0bbfSMatthew R. Ochs * before continuing with regular commands. 596c21e0bbfSMatthew R. Ochs */ 597018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 598c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) { 599018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 600c21e0bbfSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 601c21e0bbfSMatthew R. Ochs goto out; 602c21e0bbfSMatthew R. Ochs } 603018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 604c21e0bbfSMatthew R. Ochs 6055cdac81aSMatthew R. Ochs switch (cfg->state) { 606323e3342SMatthew R. Ochs case STATE_PROBING: 607323e3342SMatthew R. Ochs case STATE_PROBED: 608439e85c1SMatthew R. Ochs case STATE_RESET: 609fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); 6105cdac81aSMatthew R. Ochs rc = SCSI_MLQUEUE_HOST_BUSY; 6115cdac81aSMatthew R. Ochs goto out; 6125cdac81aSMatthew R. Ochs case STATE_FAILTERM: 613fb67d44dSMatthew R. Ochs dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__); 6145cdac81aSMatthew R. Ochs scp->result = (DID_NO_CONNECT << 16); 6155cdac81aSMatthew R. Ochs scp->scsi_done(scp); 6165cdac81aSMatthew R. Ochs rc = 0; 6175cdac81aSMatthew R. Ochs goto out; 6185cdac81aSMatthew R. Ochs default: 6195cdac81aSMatthew R. Ochs break; 6205cdac81aSMatthew R. Ochs } 6215cdac81aSMatthew R. Ochs 6229d89326cSMatthew R. Ochs if (likely(sg)) { 62350b787f7SMatthew R. Ochs cmd->rcb.data_len = sg->length; 62450b787f7SMatthew R. Ochs cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); 6259d89326cSMatthew R. Ochs } 6269d89326cSMatthew R. Ochs 627fe7f9698SMatthew R. Ochs cmd->scp = scp; 6289d89326cSMatthew R. Ochs cmd->parent = afu; 6291dd0c0e4SMatthew R. Ochs cmd->hwq_index = hwq_index; 6309d89326cSMatthew R. Ochs 63196cf727fSUma Krishnan cmd->sa.ioasc = 0; 632bfc0bab1SUma Krishnan cmd->rcb.ctx_id = hwq->ctx_hndl; 6335fbb96c8SMatthew R. Ochs cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; 6348fa4f177SMatthew R. Ochs cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); 635c21e0bbfSMatthew R. Ochs cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 636c21e0bbfSMatthew R. Ochs 637c21e0bbfSMatthew R. Ochs if (scp->sc_data_direction == DMA_TO_DEVICE) 6389d89326cSMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 639c21e0bbfSMatthew R. Ochs 6409d89326cSMatthew R. Ochs cmd->rcb.req_flags = req_flags; 641c21e0bbfSMatthew R. Ochs memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 642c21e0bbfSMatthew R. Ochs 64348b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 644c21e0bbfSMatthew R. Ochs out: 645c21e0bbfSMatthew R. Ochs return rc; 646c21e0bbfSMatthew R. Ochs } 647c21e0bbfSMatthew R. Ochs 648c21e0bbfSMatthew R. Ochs /** 649c21e0bbfSMatthew R. Ochs * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe 6501284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 651c21e0bbfSMatthew R. Ochs */ 652c21e0bbfSMatthew R. Ochs static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) 653c21e0bbfSMatthew R. Ochs { 654c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 655c21e0bbfSMatthew R. Ochs 656c21e0bbfSMatthew R. Ochs if (pci_channel_offline(pdev)) 657439e85c1SMatthew R. Ochs wait_event_timeout(cfg->reset_waitq, 658c21e0bbfSMatthew R. Ochs !pci_channel_offline(pdev), 659c21e0bbfSMatthew R. Ochs CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); 660c21e0bbfSMatthew R. Ochs } 661c21e0bbfSMatthew R. Ochs 662c21e0bbfSMatthew R. Ochs /** 663c21e0bbfSMatthew R. Ochs * free_mem() - free memory associated with the AFU 6641284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 665c21e0bbfSMatthew R. Ochs */ 666c21e0bbfSMatthew R. Ochs static void free_mem(struct cxlflash_cfg *cfg) 667c21e0bbfSMatthew R. Ochs { 668c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 669c21e0bbfSMatthew R. Ochs 670c21e0bbfSMatthew R. Ochs if (cfg->afu) { 671c21e0bbfSMatthew R. Ochs free_pages((ulong)afu, get_order(sizeof(struct afu))); 672c21e0bbfSMatthew R. Ochs cfg->afu = NULL; 673c21e0bbfSMatthew R. Ochs } 674c21e0bbfSMatthew R. Ochs } 675c21e0bbfSMatthew R. Ochs 676c21e0bbfSMatthew R. Ochs /** 6770b09e711SUma Krishnan * cxlflash_reset_sync() - synchronizing point for asynchronous resets 6780b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 6790b09e711SUma Krishnan */ 6800b09e711SUma Krishnan static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) 6810b09e711SUma Krishnan { 6820b09e711SUma Krishnan if (cfg->async_reset_cookie == 0) 6830b09e711SUma Krishnan return; 6840b09e711SUma Krishnan 6850b09e711SUma Krishnan /* Wait until all async calls prior to this cookie have completed */ 6860b09e711SUma Krishnan async_synchronize_cookie(cfg->async_reset_cookie + 1); 6870b09e711SUma Krishnan cfg->async_reset_cookie = 0; 6880b09e711SUma Krishnan } 6890b09e711SUma Krishnan 6900b09e711SUma Krishnan /** 691c21e0bbfSMatthew R. Ochs * stop_afu() - stops the AFU command timers and unmaps the MMIO space 6921284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 693c21e0bbfSMatthew R. Ochs * 694c21e0bbfSMatthew R. Ochs * Safe to call with AFU in a partially allocated/initialized state. 695ee91e332SManoj Kumar * 6960df5bef7SUma Krishnan * Cancels scheduled worker threads, waits for any active internal AFU 697cba06e6dSMatthew R. Ochs * commands to timeout, disables IRQ polling and then unmaps the MMIO space. 698c21e0bbfSMatthew R. Ochs */ 699c21e0bbfSMatthew R. Ochs static void stop_afu(struct cxlflash_cfg *cfg) 700c21e0bbfSMatthew R. Ochs { 701c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 702bfc0bab1SUma Krishnan struct hwq *hwq; 703bfc0bab1SUma Krishnan int i; 704c21e0bbfSMatthew R. Ochs 7050df5bef7SUma Krishnan cancel_work_sync(&cfg->work_q); 7060b09e711SUma Krishnan if (!current_is_async()) 7070b09e711SUma Krishnan cxlflash_reset_sync(cfg); 7080df5bef7SUma Krishnan 709c21e0bbfSMatthew R. Ochs if (likely(afu)) { 710de01283bSMatthew R. Ochs while (atomic_read(&afu->cmds_active)) 711de01283bSMatthew R. Ochs ssleep(1); 712bfc0bab1SUma Krishnan 713bfc0bab1SUma Krishnan if (afu_is_irqpoll_enabled(afu)) { 7143065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 715bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 716bfc0bab1SUma Krishnan 717bfc0bab1SUma Krishnan irq_poll_disable(&hwq->irqpoll); 718bfc0bab1SUma Krishnan } 719bfc0bab1SUma Krishnan } 720bfc0bab1SUma Krishnan 721c21e0bbfSMatthew R. Ochs if (likely(afu->afu_map)) { 72225b8e08eSMatthew R. Ochs cfg->ops->psa_unmap(afu->afu_map); 723c21e0bbfSMatthew R. Ochs afu->afu_map = NULL; 724c21e0bbfSMatthew R. Ochs } 725c21e0bbfSMatthew R. Ochs } 726c21e0bbfSMatthew R. Ochs } 727c21e0bbfSMatthew R. Ochs 728c21e0bbfSMatthew R. Ochs /** 7299526f360SManoj N. Kumar * term_intr() - disables all AFU interrupts 7301284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 731c21e0bbfSMatthew R. Ochs * @level: Depth of allocation, where to begin waterfall tear down. 732bfc0bab1SUma Krishnan * @index: Index of the hardware queue. 733c21e0bbfSMatthew R. Ochs * 734c21e0bbfSMatthew R. Ochs * Safe to call with AFU/MC in partially allocated/initialized state. 735c21e0bbfSMatthew R. Ochs */ 736bfc0bab1SUma Krishnan static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, 737bfc0bab1SUma Krishnan u32 index) 738c21e0bbfSMatthew R. Ochs { 739c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 7404392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 741bfc0bab1SUma Krishnan struct hwq *hwq; 742c21e0bbfSMatthew R. Ochs 743bfc0bab1SUma Krishnan if (!afu) { 744bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL afu\n", __func__); 745bfc0bab1SUma Krishnan return; 746bfc0bab1SUma Krishnan } 747bfc0bab1SUma Krishnan 748bfc0bab1SUma Krishnan hwq = get_hwq(afu, index); 749bfc0bab1SUma Krishnan 750b070545dSUma Krishnan if (!hwq->ctx_cookie) { 751bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL MC\n", __func__); 752c21e0bbfSMatthew R. Ochs return; 753c21e0bbfSMatthew R. Ochs } 754c21e0bbfSMatthew R. Ochs 755c21e0bbfSMatthew R. Ochs switch (level) { 756c21e0bbfSMatthew R. Ochs case UNMAP_THREE: 757bfc0bab1SUma Krishnan /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ 758bfc0bab1SUma Krishnan if (index == PRIMARY_HWQ) 75925b8e08eSMatthew R. Ochs cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); 760c21e0bbfSMatthew R. Ochs case UNMAP_TWO: 76125b8e08eSMatthew R. Ochs cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); 762c21e0bbfSMatthew R. Ochs case UNMAP_ONE: 76325b8e08eSMatthew R. Ochs cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); 764c21e0bbfSMatthew R. Ochs case FREE_IRQ: 76525b8e08eSMatthew R. Ochs cfg->ops->free_afu_irqs(hwq->ctx_cookie); 7669526f360SManoj N. Kumar /* fall through */ 7679526f360SManoj N. Kumar case UNDO_NOOP: 7689526f360SManoj N. Kumar /* No action required */ 7699526f360SManoj N. Kumar break; 770c21e0bbfSMatthew R. Ochs } 771c21e0bbfSMatthew R. Ochs } 772c21e0bbfSMatthew R. Ochs 773c21e0bbfSMatthew R. Ochs /** 7749526f360SManoj N. Kumar * term_mc() - terminates the master context 7759526f360SManoj N. Kumar * @cfg: Internal structure associated with the host. 776bfc0bab1SUma Krishnan * @index: Index of the hardware queue. 7779526f360SManoj N. Kumar * 7789526f360SManoj N. Kumar * Safe to call with AFU/MC in partially allocated/initialized state. 7799526f360SManoj N. Kumar */ 780bfc0bab1SUma Krishnan static void term_mc(struct cxlflash_cfg *cfg, u32 index) 7819526f360SManoj N. Kumar { 7829526f360SManoj N. Kumar struct afu *afu = cfg->afu; 7839526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 784bfc0bab1SUma Krishnan struct hwq *hwq; 785a1ea04b3SUma Krishnan ulong lock_flags; 7869526f360SManoj N. Kumar 787bfc0bab1SUma Krishnan if (!afu) { 788bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL afu\n", __func__); 7899526f360SManoj N. Kumar return; 7909526f360SManoj N. Kumar } 7919526f360SManoj N. Kumar 792bfc0bab1SUma Krishnan hwq = get_hwq(afu, index); 793bfc0bab1SUma Krishnan 794b070545dSUma Krishnan if (!hwq->ctx_cookie) { 795bfc0bab1SUma Krishnan dev_err(dev, "%s: returning with NULL MC\n", __func__); 796bfc0bab1SUma Krishnan return; 797bfc0bab1SUma Krishnan } 798bfc0bab1SUma Krishnan 79925b8e08eSMatthew R. Ochs WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie)); 800bfc0bab1SUma Krishnan if (index != PRIMARY_HWQ) 80125b8e08eSMatthew R. Ochs WARN_ON(cfg->ops->release_context(hwq->ctx_cookie)); 802b070545dSUma Krishnan hwq->ctx_cookie = NULL; 803a1ea04b3SUma Krishnan 804d2d354a6SUma Krishnan spin_lock_irqsave(&hwq->hrrq_slock, lock_flags); 805d2d354a6SUma Krishnan hwq->hrrq_online = false; 806d2d354a6SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags); 807d2d354a6SUma Krishnan 808a1ea04b3SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 809a1ea04b3SUma Krishnan flush_pending_cmds(hwq); 810a1ea04b3SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 8119526f360SManoj N. Kumar } 8129526f360SManoj N. Kumar 8139526f360SManoj N. Kumar /** 814c21e0bbfSMatthew R. Ochs * term_afu() - terminates the AFU 8151284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 816c21e0bbfSMatthew R. Ochs * 817c21e0bbfSMatthew R. Ochs * Safe to call with AFU/MC in partially allocated/initialized state. 818c21e0bbfSMatthew R. Ochs */ 819c21e0bbfSMatthew R. Ochs static void term_afu(struct cxlflash_cfg *cfg) 820c21e0bbfSMatthew R. Ochs { 821fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 822bfc0bab1SUma Krishnan int k; 823fb67d44dSMatthew R. Ochs 8249526f360SManoj N. Kumar /* 8259526f360SManoj N. Kumar * Tear down is carefully orchestrated to ensure 8269526f360SManoj N. Kumar * no interrupts can come in when the problem state 8279526f360SManoj N. Kumar * area is unmapped. 8289526f360SManoj N. Kumar * 829bfc0bab1SUma Krishnan * 1) Disable all AFU interrupts for each master 8309526f360SManoj N. Kumar * 2) Unmap the problem state area 831bfc0bab1SUma Krishnan * 3) Stop each master context 8329526f360SManoj N. Kumar */ 8333065267aSMatthew R. Ochs for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) 834bfc0bab1SUma Krishnan term_intr(cfg, UNMAP_THREE, k); 835bfc0bab1SUma Krishnan 836c21e0bbfSMatthew R. Ochs stop_afu(cfg); 837c21e0bbfSMatthew R. Ochs 8383065267aSMatthew R. Ochs for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) 839bfc0bab1SUma Krishnan term_mc(cfg, k); 8406ded8b3cSUma Krishnan 841fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__); 842c21e0bbfSMatthew R. Ochs } 843c21e0bbfSMatthew R. Ochs 844c21e0bbfSMatthew R. Ochs /** 845704c4b0dSUma Krishnan * notify_shutdown() - notifies device of pending shutdown 846704c4b0dSUma Krishnan * @cfg: Internal structure associated with the host. 847704c4b0dSUma Krishnan * @wait: Whether to wait for shutdown processing to complete. 848704c4b0dSUma Krishnan * 849704c4b0dSUma Krishnan * This function will notify the AFU that the adapter is being shutdown 850704c4b0dSUma Krishnan * and will wait for shutdown processing to complete if wait is true. 851704c4b0dSUma Krishnan * This notification should flush pending I/Os to the device and halt 852704c4b0dSUma Krishnan * further I/Os until the next AFU reset is issued and device restarted. 853704c4b0dSUma Krishnan */ 854704c4b0dSUma Krishnan static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) 855704c4b0dSUma Krishnan { 856704c4b0dSUma Krishnan struct afu *afu = cfg->afu; 857704c4b0dSUma Krishnan struct device *dev = &cfg->dev->dev; 858704c4b0dSUma Krishnan struct dev_dependent_vals *ddv; 8590aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 860704c4b0dSUma Krishnan u64 reg, status; 861704c4b0dSUma Krishnan int i, retry_cnt = 0; 862704c4b0dSUma Krishnan 863704c4b0dSUma Krishnan ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; 864704c4b0dSUma Krishnan if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) 865704c4b0dSUma Krishnan return; 866704c4b0dSUma Krishnan 8671bd2b282SUma Krishnan if (!afu || !afu->afu_map) { 868fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Problem state area not mapped\n", __func__); 8691bd2b282SUma Krishnan return; 8701bd2b282SUma Krishnan } 8711bd2b282SUma Krishnan 872704c4b0dSUma Krishnan /* Notify AFU */ 87378ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 8740aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 8750aa14887SMatthew R. Ochs 8760aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); 877704c4b0dSUma Krishnan reg |= SISL_FC_SHUTDOWN_NORMAL; 8780aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); 879704c4b0dSUma Krishnan } 880704c4b0dSUma Krishnan 881704c4b0dSUma Krishnan if (!wait) 882704c4b0dSUma Krishnan return; 883704c4b0dSUma Krishnan 884704c4b0dSUma Krishnan /* Wait up to 1.5 seconds for shutdown processing to complete */ 88578ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 8860aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 887704c4b0dSUma Krishnan retry_cnt = 0; 8880aa14887SMatthew R. Ochs 889704c4b0dSUma Krishnan while (true) { 8900aa14887SMatthew R. Ochs status = readq_be(&fc_port_regs[FC_STATUS / 8]); 891704c4b0dSUma Krishnan if (status & SISL_STATUS_SHUTDOWN_COMPLETE) 892704c4b0dSUma Krishnan break; 893704c4b0dSUma Krishnan if (++retry_cnt >= MC_RETRY_CNT) { 894704c4b0dSUma Krishnan dev_dbg(dev, "%s: port %d shutdown processing " 895704c4b0dSUma Krishnan "not yet completed\n", __func__, i); 896704c4b0dSUma Krishnan break; 897704c4b0dSUma Krishnan } 898704c4b0dSUma Krishnan msleep(100 * retry_cnt); 899704c4b0dSUma Krishnan } 900704c4b0dSUma Krishnan } 901704c4b0dSUma Krishnan } 902704c4b0dSUma Krishnan 903704c4b0dSUma Krishnan /** 904a834a36bSUma Krishnan * cxlflash_get_minor() - gets the first available minor number 905a834a36bSUma Krishnan * 906a834a36bSUma Krishnan * Return: Unique minor number that can be used to create the character device. 907a834a36bSUma Krishnan */ 908a834a36bSUma Krishnan static int cxlflash_get_minor(void) 909a834a36bSUma Krishnan { 910a834a36bSUma Krishnan int minor; 911a834a36bSUma Krishnan long bit; 912a834a36bSUma Krishnan 913a834a36bSUma Krishnan bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); 914a834a36bSUma Krishnan if (bit >= CXLFLASH_MAX_ADAPTERS) 915a834a36bSUma Krishnan return -1; 916a834a36bSUma Krishnan 917a834a36bSUma Krishnan minor = bit & MINORMASK; 918a834a36bSUma Krishnan set_bit(minor, cxlflash_minor); 919a834a36bSUma Krishnan return minor; 920a834a36bSUma Krishnan } 921a834a36bSUma Krishnan 922a834a36bSUma Krishnan /** 923a834a36bSUma Krishnan * cxlflash_put_minor() - releases the minor number 924a834a36bSUma Krishnan * @minor: Minor number that is no longer needed. 925a834a36bSUma Krishnan */ 926a834a36bSUma Krishnan static void cxlflash_put_minor(int minor) 927a834a36bSUma Krishnan { 928a834a36bSUma Krishnan clear_bit(minor, cxlflash_minor); 929a834a36bSUma Krishnan } 930a834a36bSUma Krishnan 931a834a36bSUma Krishnan /** 932a834a36bSUma Krishnan * cxlflash_release_chrdev() - release the character device for the host 933a834a36bSUma Krishnan * @cfg: Internal structure associated with the host. 934a834a36bSUma Krishnan */ 935a834a36bSUma Krishnan static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) 936a834a36bSUma Krishnan { 937a834a36bSUma Krishnan device_unregister(cfg->chardev); 938a834a36bSUma Krishnan cfg->chardev = NULL; 939a834a36bSUma Krishnan cdev_del(&cfg->cdev); 940a834a36bSUma Krishnan cxlflash_put_minor(MINOR(cfg->cdev.dev)); 941a834a36bSUma Krishnan } 942a834a36bSUma Krishnan 943a834a36bSUma Krishnan /** 944c21e0bbfSMatthew R. Ochs * cxlflash_remove() - PCI entry point to tear down host 945c21e0bbfSMatthew R. Ochs * @pdev: PCI device associated with the host. 946c21e0bbfSMatthew R. Ochs * 947323e3342SMatthew R. Ochs * Safe to use as a cleanup in partially allocated/initialized state. Note that 948323e3342SMatthew R. Ochs * the reset_waitq is flushed as part of the stop/termination of user contexts. 949c21e0bbfSMatthew R. Ochs */ 950c21e0bbfSMatthew R. Ochs static void cxlflash_remove(struct pci_dev *pdev) 951c21e0bbfSMatthew R. Ochs { 952c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 953fb67d44dSMatthew R. Ochs struct device *dev = &pdev->dev; 954c21e0bbfSMatthew R. Ochs ulong lock_flags; 955c21e0bbfSMatthew R. Ochs 956babf985dSUma Krishnan if (!pci_is_enabled(pdev)) { 957fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Device is disabled\n", __func__); 958babf985dSUma Krishnan return; 959babf985dSUma Krishnan } 960babf985dSUma Krishnan 961a3feb6efSUma Krishnan /* Yield to running recovery threads before continuing with remove */ 962a3feb6efSUma Krishnan wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && 963a3feb6efSUma Krishnan cfg->state != STATE_PROBING); 964018d1dc9SMatthew R. Ochs spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 965c21e0bbfSMatthew R. Ochs if (cfg->tmf_active) 966018d1dc9SMatthew R. Ochs wait_event_interruptible_lock_irq(cfg->tmf_waitq, 967018d1dc9SMatthew R. Ochs !cfg->tmf_active, 968018d1dc9SMatthew R. Ochs cfg->tmf_slock); 969018d1dc9SMatthew R. Ochs spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 970c21e0bbfSMatthew R. Ochs 971704c4b0dSUma Krishnan /* Notify AFU and wait for shutdown processing to complete */ 972704c4b0dSUma Krishnan notify_shutdown(cfg, true); 973704c4b0dSUma Krishnan 9745cdac81aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 97565be2c79SMatthew R. Ochs cxlflash_stop_term_user_contexts(cfg); 9765cdac81aSMatthew R. Ochs 977c21e0bbfSMatthew R. Ochs switch (cfg->init_state) { 978a834a36bSUma Krishnan case INIT_STATE_CDEV: 979a834a36bSUma Krishnan cxlflash_release_chrdev(cfg); 980c21e0bbfSMatthew R. Ochs case INIT_STATE_SCSI: 98165be2c79SMatthew R. Ochs cxlflash_term_local_luns(cfg); 982c21e0bbfSMatthew R. Ochs scsi_remove_host(cfg->host); 983c21e0bbfSMatthew R. Ochs case INIT_STATE_AFU: 984b45cdbafSManoj Kumar term_afu(cfg); 985c21e0bbfSMatthew R. Ochs case INIT_STATE_PCI: 98648e077dbSUma Krishnan cfg->ops->destroy_afu(cfg->afu_cookie); 987c21e0bbfSMatthew R. Ochs pci_disable_device(pdev); 988c21e0bbfSMatthew R. Ochs case INIT_STATE_NONE: 989c21e0bbfSMatthew R. Ochs free_mem(cfg); 9908b5b1e87SMatthew R. Ochs scsi_host_put(cfg->host); 991c21e0bbfSMatthew R. Ochs break; 992c21e0bbfSMatthew R. Ochs } 993c21e0bbfSMatthew R. Ochs 994fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__); 995c21e0bbfSMatthew R. Ochs } 996c21e0bbfSMatthew R. Ochs 997c21e0bbfSMatthew R. Ochs /** 998c21e0bbfSMatthew R. Ochs * alloc_mem() - allocates the AFU and its command pool 9991284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1000c21e0bbfSMatthew R. Ochs * 1001c21e0bbfSMatthew R. Ochs * A partially allocated state remains on failure. 1002c21e0bbfSMatthew R. Ochs * 1003c21e0bbfSMatthew R. Ochs * Return: 1004c21e0bbfSMatthew R. Ochs * 0 on success 1005c21e0bbfSMatthew R. Ochs * -ENOMEM on failure to allocate memory 1006c21e0bbfSMatthew R. Ochs */ 1007c21e0bbfSMatthew R. Ochs static int alloc_mem(struct cxlflash_cfg *cfg) 1008c21e0bbfSMatthew R. Ochs { 1009c21e0bbfSMatthew R. Ochs int rc = 0; 10104392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1011c21e0bbfSMatthew R. Ochs 1012696d0b0cSMatthew R. Ochs /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ 1013c21e0bbfSMatthew R. Ochs cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1014c21e0bbfSMatthew R. Ochs get_order(sizeof(struct afu))); 1015c21e0bbfSMatthew R. Ochs if (unlikely(!cfg->afu)) { 10164392ba49SMatthew R. Ochs dev_err(dev, "%s: cannot get %d free pages\n", 1017c21e0bbfSMatthew R. Ochs __func__, get_order(sizeof(struct afu))); 1018c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 1019c21e0bbfSMatthew R. Ochs goto out; 1020c21e0bbfSMatthew R. Ochs } 1021c21e0bbfSMatthew R. Ochs cfg->afu->parent = cfg; 10223065267aSMatthew R. Ochs cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; 1023c21e0bbfSMatthew R. Ochs cfg->afu->afu_map = NULL; 1024c21e0bbfSMatthew R. Ochs out: 1025c21e0bbfSMatthew R. Ochs return rc; 1026c21e0bbfSMatthew R. Ochs } 1027c21e0bbfSMatthew R. Ochs 1028c21e0bbfSMatthew R. Ochs /** 1029c21e0bbfSMatthew R. Ochs * init_pci() - initializes the host as a PCI device 10301284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1031c21e0bbfSMatthew R. Ochs * 10321284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1033c21e0bbfSMatthew R. Ochs */ 1034c21e0bbfSMatthew R. Ochs static int init_pci(struct cxlflash_cfg *cfg) 1035c21e0bbfSMatthew R. Ochs { 1036c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1037fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1038c21e0bbfSMatthew R. Ochs int rc = 0; 1039c21e0bbfSMatthew R. Ochs 1040c21e0bbfSMatthew R. Ochs rc = pci_enable_device(pdev); 1041c21e0bbfSMatthew R. Ochs if (rc || pci_channel_offline(pdev)) { 1042c21e0bbfSMatthew R. Ochs if (pci_channel_offline(pdev)) { 1043c21e0bbfSMatthew R. Ochs cxlflash_wait_for_pci_err_recovery(cfg); 1044c21e0bbfSMatthew R. Ochs rc = pci_enable_device(pdev); 1045c21e0bbfSMatthew R. Ochs } 1046c21e0bbfSMatthew R. Ochs 1047c21e0bbfSMatthew R. Ochs if (rc) { 1048fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Cannot enable adapter\n", __func__); 1049c21e0bbfSMatthew R. Ochs cxlflash_wait_for_pci_err_recovery(cfg); 1050961487e4SManoj N. Kumar goto out; 1051c21e0bbfSMatthew R. Ochs } 1052c21e0bbfSMatthew R. Ochs } 1053c21e0bbfSMatthew R. Ochs 1054c21e0bbfSMatthew R. Ochs out: 1055fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1056c21e0bbfSMatthew R. Ochs return rc; 1057c21e0bbfSMatthew R. Ochs } 1058c21e0bbfSMatthew R. Ochs 1059c21e0bbfSMatthew R. Ochs /** 1060c21e0bbfSMatthew R. Ochs * init_scsi() - adds the host to the SCSI stack and kicks off host scan 10611284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1062c21e0bbfSMatthew R. Ochs * 10631284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1064c21e0bbfSMatthew R. Ochs */ 1065c21e0bbfSMatthew R. Ochs static int init_scsi(struct cxlflash_cfg *cfg) 1066c21e0bbfSMatthew R. Ochs { 1067c21e0bbfSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1068fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1069c21e0bbfSMatthew R. Ochs int rc = 0; 1070c21e0bbfSMatthew R. Ochs 1071c21e0bbfSMatthew R. Ochs rc = scsi_add_host(cfg->host, &pdev->dev); 1072c21e0bbfSMatthew R. Ochs if (rc) { 1073fb67d44dSMatthew R. Ochs dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc); 1074c21e0bbfSMatthew R. Ochs goto out; 1075c21e0bbfSMatthew R. Ochs } 1076c21e0bbfSMatthew R. Ochs 1077c21e0bbfSMatthew R. Ochs scsi_scan_host(cfg->host); 1078c21e0bbfSMatthew R. Ochs 1079c21e0bbfSMatthew R. Ochs out: 1080fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1081c21e0bbfSMatthew R. Ochs return rc; 1082c21e0bbfSMatthew R. Ochs } 1083c21e0bbfSMatthew R. Ochs 1084c21e0bbfSMatthew R. Ochs /** 1085c21e0bbfSMatthew R. Ochs * set_port_online() - transitions the specified host FC port to online state 1086c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1087c21e0bbfSMatthew R. Ochs * 1088c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. Online state means 1089c21e0bbfSMatthew R. Ochs * that the FC link layer has synced, completed the handshaking process, and 1090c21e0bbfSMatthew R. Ochs * is ready for login to start. 1091c21e0bbfSMatthew R. Ochs */ 10921786f4a0SMatthew R. Ochs static void set_port_online(__be64 __iomem *fc_regs) 1093c21e0bbfSMatthew R. Ochs { 1094c21e0bbfSMatthew R. Ochs u64 cmdcfg; 1095c21e0bbfSMatthew R. Ochs 1096c21e0bbfSMatthew R. Ochs cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 1097c21e0bbfSMatthew R. Ochs cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ 1098c21e0bbfSMatthew R. Ochs cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ 1099c21e0bbfSMatthew R. Ochs writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 1100c21e0bbfSMatthew R. Ochs } 1101c21e0bbfSMatthew R. Ochs 1102c21e0bbfSMatthew R. Ochs /** 1103c21e0bbfSMatthew R. Ochs * set_port_offline() - transitions the specified host FC port to offline state 1104c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1105c21e0bbfSMatthew R. Ochs * 1106c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. 1107c21e0bbfSMatthew R. Ochs */ 11081786f4a0SMatthew R. Ochs static void set_port_offline(__be64 __iomem *fc_regs) 1109c21e0bbfSMatthew R. Ochs { 1110c21e0bbfSMatthew R. Ochs u64 cmdcfg; 1111c21e0bbfSMatthew R. Ochs 1112c21e0bbfSMatthew R. Ochs cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 1113c21e0bbfSMatthew R. Ochs cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ 1114c21e0bbfSMatthew R. Ochs cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ 1115c21e0bbfSMatthew R. Ochs writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 1116c21e0bbfSMatthew R. Ochs } 1117c21e0bbfSMatthew R. Ochs 1118c21e0bbfSMatthew R. Ochs /** 1119c21e0bbfSMatthew R. Ochs * wait_port_online() - waits for the specified host FC port come online 1120c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1121c21e0bbfSMatthew R. Ochs * @delay_us: Number of microseconds to delay between reading port status. 1122c21e0bbfSMatthew R. Ochs * @nretry: Number of cycles to retry reading port status. 1123c21e0bbfSMatthew R. Ochs * 1124c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. This will timeout 1125c21e0bbfSMatthew R. Ochs * when the cable is not plugged in. 1126c21e0bbfSMatthew R. Ochs * 1127c21e0bbfSMatthew R. Ochs * Return: 1128c21e0bbfSMatthew R. Ochs * TRUE (1) when the specified port is online 1129c21e0bbfSMatthew R. Ochs * FALSE (0) when the specified port fails to come online after timeout 1130c21e0bbfSMatthew R. Ochs */ 1131fb67d44dSMatthew R. Ochs static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1132c21e0bbfSMatthew R. Ochs { 1133c21e0bbfSMatthew R. Ochs u64 status; 1134c21e0bbfSMatthew R. Ochs 1135fb67d44dSMatthew R. Ochs WARN_ON(delay_us < 1000); 1136c21e0bbfSMatthew R. Ochs 1137c21e0bbfSMatthew R. Ochs do { 1138c21e0bbfSMatthew R. Ochs msleep(delay_us / 1000); 1139c21e0bbfSMatthew R. Ochs status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 114005dab432SMatthew R. Ochs if (status == U64_MAX) 114105dab432SMatthew R. Ochs nretry /= 2; 1142c21e0bbfSMatthew R. Ochs } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 1143c21e0bbfSMatthew R. Ochs nretry--); 1144c21e0bbfSMatthew R. Ochs 1145c21e0bbfSMatthew R. Ochs return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); 1146c21e0bbfSMatthew R. Ochs } 1147c21e0bbfSMatthew R. Ochs 1148c21e0bbfSMatthew R. Ochs /** 1149c21e0bbfSMatthew R. Ochs * wait_port_offline() - waits for the specified host FC port go offline 1150c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1151c21e0bbfSMatthew R. Ochs * @delay_us: Number of microseconds to delay between reading port status. 1152c21e0bbfSMatthew R. Ochs * @nretry: Number of cycles to retry reading port status. 1153c21e0bbfSMatthew R. Ochs * 1154c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. 1155c21e0bbfSMatthew R. Ochs * 1156c21e0bbfSMatthew R. Ochs * Return: 1157c21e0bbfSMatthew R. Ochs * TRUE (1) when the specified port is offline 1158c21e0bbfSMatthew R. Ochs * FALSE (0) when the specified port fails to go offline after timeout 1159c21e0bbfSMatthew R. Ochs */ 1160fb67d44dSMatthew R. Ochs static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1161c21e0bbfSMatthew R. Ochs { 1162c21e0bbfSMatthew R. Ochs u64 status; 1163c21e0bbfSMatthew R. Ochs 1164fb67d44dSMatthew R. Ochs WARN_ON(delay_us < 1000); 1165c21e0bbfSMatthew R. Ochs 1166c21e0bbfSMatthew R. Ochs do { 1167c21e0bbfSMatthew R. Ochs msleep(delay_us / 1000); 1168c21e0bbfSMatthew R. Ochs status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 116905dab432SMatthew R. Ochs if (status == U64_MAX) 117005dab432SMatthew R. Ochs nretry /= 2; 1171c21e0bbfSMatthew R. Ochs } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 1172c21e0bbfSMatthew R. Ochs nretry--); 1173c21e0bbfSMatthew R. Ochs 1174c21e0bbfSMatthew R. Ochs return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); 1175c21e0bbfSMatthew R. Ochs } 1176c21e0bbfSMatthew R. Ochs 1177c21e0bbfSMatthew R. Ochs /** 1178c21e0bbfSMatthew R. Ochs * afu_set_wwpn() - configures the WWPN for the specified host FC port 1179c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host that owns the specified FC port. 1180c21e0bbfSMatthew R. Ochs * @port: Port number being configured. 1181c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1182c21e0bbfSMatthew R. Ochs * @wwpn: The world-wide-port-number previously discovered for port. 1183c21e0bbfSMatthew R. Ochs * 1184c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. As part of the 1185c21e0bbfSMatthew R. Ochs * sequence to configure the WWPN, the port is toggled offline and then back 1186c21e0bbfSMatthew R. Ochs * online. This toggling action can cause this routine to delay up to a few 1187c21e0bbfSMatthew R. Ochs * seconds. When configured to use the internal LUN feature of the AFU, a 1188c21e0bbfSMatthew R. Ochs * failure to come online is overridden. 1189c21e0bbfSMatthew R. Ochs */ 1190f8013261SMatthew R. Ochs static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 11911786f4a0SMatthew R. Ochs u64 wwpn) 1192c21e0bbfSMatthew R. Ochs { 1193fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 1194fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1195fb67d44dSMatthew R. Ochs 1196c21e0bbfSMatthew R. Ochs set_port_offline(fc_regs); 1197c21e0bbfSMatthew R. Ochs if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1198c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) { 1199fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: wait on port %d to go offline timed out\n", 1200c21e0bbfSMatthew R. Ochs __func__, port); 1201c21e0bbfSMatthew R. Ochs } 1202c21e0bbfSMatthew R. Ochs 1203c21e0bbfSMatthew R. Ochs writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 1204c21e0bbfSMatthew R. Ochs 1205c21e0bbfSMatthew R. Ochs set_port_online(fc_regs); 1206c21e0bbfSMatthew R. Ochs if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1207c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) { 1208fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: wait on port %d to go online timed out\n", 1209c21e0bbfSMatthew R. Ochs __func__, port); 1210c21e0bbfSMatthew R. Ochs } 1211c21e0bbfSMatthew R. Ochs } 1212c21e0bbfSMatthew R. Ochs 1213c21e0bbfSMatthew R. Ochs /** 1214c21e0bbfSMatthew R. Ochs * afu_link_reset() - resets the specified host FC port 1215c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host that owns the specified FC port. 1216c21e0bbfSMatthew R. Ochs * @port: Port number being configured. 1217c21e0bbfSMatthew R. Ochs * @fc_regs: Top of MMIO region defined for specified port. 1218c21e0bbfSMatthew R. Ochs * 1219c21e0bbfSMatthew R. Ochs * The provided MMIO region must be mapped prior to call. The sequence to 1220c21e0bbfSMatthew R. Ochs * reset the port involves toggling it offline and then back online. This 1221c21e0bbfSMatthew R. Ochs * action can cause this routine to delay up to a few seconds. An effort 1222c21e0bbfSMatthew R. Ochs * is made to maintain link with the device by switching to host to use 1223c21e0bbfSMatthew R. Ochs * the alternate port exclusively while the reset takes place. 1224c21e0bbfSMatthew R. Ochs * failure to come online is overridden. 1225c21e0bbfSMatthew R. Ochs */ 12261786f4a0SMatthew R. Ochs static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 1227c21e0bbfSMatthew R. Ochs { 1228fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 1229fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1230c21e0bbfSMatthew R. Ochs u64 port_sel; 1231c21e0bbfSMatthew R. Ochs 1232c21e0bbfSMatthew R. Ochs /* first switch the AFU to the other links, if any */ 1233c21e0bbfSMatthew R. Ochs port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); 12344da74db0SDan Carpenter port_sel &= ~(1ULL << port); 1235c21e0bbfSMatthew R. Ochs writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1236c21e0bbfSMatthew R. Ochs cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1237c21e0bbfSMatthew R. Ochs 1238c21e0bbfSMatthew R. Ochs set_port_offline(fc_regs); 1239c21e0bbfSMatthew R. Ochs if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1240c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) 1241fb67d44dSMatthew R. Ochs dev_err(dev, "%s: wait on port %d to go offline timed out\n", 1242c21e0bbfSMatthew R. Ochs __func__, port); 1243c21e0bbfSMatthew R. Ochs 1244c21e0bbfSMatthew R. Ochs set_port_online(fc_regs); 1245c21e0bbfSMatthew R. Ochs if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1246c21e0bbfSMatthew R. Ochs FC_PORT_STATUS_RETRY_CNT)) 1247fb67d44dSMatthew R. Ochs dev_err(dev, "%s: wait on port %d to go online timed out\n", 1248c21e0bbfSMatthew R. Ochs __func__, port); 1249c21e0bbfSMatthew R. Ochs 1250c21e0bbfSMatthew R. Ochs /* switch back to include this port */ 12514da74db0SDan Carpenter port_sel |= (1ULL << port); 1252c21e0bbfSMatthew R. Ochs writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1253c21e0bbfSMatthew R. Ochs cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1254c21e0bbfSMatthew R. Ochs 1255fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); 1256c21e0bbfSMatthew R. Ochs } 1257c21e0bbfSMatthew R. Ochs 1258c21e0bbfSMatthew R. Ochs /** 1259c21e0bbfSMatthew R. Ochs * afu_err_intr_init() - clears and initializes the AFU for error interrupts 1260c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host. 1261c21e0bbfSMatthew R. Ochs */ 1262c21e0bbfSMatthew R. Ochs static void afu_err_intr_init(struct afu *afu) 1263c21e0bbfSMatthew R. Ochs { 126478ae028eSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 12650aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 1266c21e0bbfSMatthew R. Ochs int i; 1267bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 1268c21e0bbfSMatthew R. Ochs u64 reg; 1269c21e0bbfSMatthew R. Ochs 1270c21e0bbfSMatthew R. Ochs /* global async interrupts: AFU clears afu_ctrl on context exit 1271c21e0bbfSMatthew R. Ochs * if async interrupts were sent to that context. This prevents 1272c21e0bbfSMatthew R. Ochs * the AFU form sending further async interrupts when 1273c21e0bbfSMatthew R. Ochs * there is 1274c21e0bbfSMatthew R. Ochs * nobody to receive them. 1275c21e0bbfSMatthew R. Ochs */ 1276c21e0bbfSMatthew R. Ochs 1277c21e0bbfSMatthew R. Ochs /* mask all */ 1278c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); 1279bfc0bab1SUma Krishnan /* set LISN# to send and point to primary master context */ 1280bfc0bab1SUma Krishnan reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); 1281c21e0bbfSMatthew R. Ochs 1282c21e0bbfSMatthew R. Ochs if (afu->internal_lun) 1283c21e0bbfSMatthew R. Ochs reg |= 1; /* Bit 63 indicates local lun */ 1284c21e0bbfSMatthew R. Ochs writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); 1285c21e0bbfSMatthew R. Ochs /* clear all */ 1286c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1287c21e0bbfSMatthew R. Ochs /* unmask bits that are of interest */ 1288c21e0bbfSMatthew R. Ochs /* note: afu can send an interrupt after this step */ 1289c21e0bbfSMatthew R. Ochs writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); 1290c21e0bbfSMatthew R. Ochs /* clear again in case a bit came on after previous clear but before */ 1291c21e0bbfSMatthew R. Ochs /* unmask */ 1292c21e0bbfSMatthew R. Ochs writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1293c21e0bbfSMatthew R. Ochs 1294c21e0bbfSMatthew R. Ochs /* Clear/Set internal lun bits */ 12950aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, 0); 12960aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); 1297c21e0bbfSMatthew R. Ochs reg &= SISL_FC_INTERNAL_MASK; 1298c21e0bbfSMatthew R. Ochs if (afu->internal_lun) 1299c21e0bbfSMatthew R. Ochs reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); 13000aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); 1301c21e0bbfSMatthew R. Ochs 1302c21e0bbfSMatthew R. Ochs /* now clear FC errors */ 130378ae028eSMatthew R. Ochs for (i = 0; i < cfg->num_fc_ports; i++) { 13040aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 13050aa14887SMatthew R. Ochs 13060aa14887SMatthew R. Ochs writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); 13070aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); 1308c21e0bbfSMatthew R. Ochs } 1309c21e0bbfSMatthew R. Ochs 1310c21e0bbfSMatthew R. Ochs /* sync interrupts for master's IOARRIN write */ 1311c21e0bbfSMatthew R. Ochs /* note that unlike asyncs, there can be no pending sync interrupts */ 1312c21e0bbfSMatthew R. Ochs /* at this time (this is a fresh context and master has not written */ 1313c21e0bbfSMatthew R. Ochs /* IOARRIN yet), so there is nothing to clear. */ 1314c21e0bbfSMatthew R. Ochs 1315c21e0bbfSMatthew R. Ochs /* set LISN#, it is always sent to the context that wrote IOARRIN */ 13163065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1317bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1318bfc0bab1SUma Krishnan 1319465891feSMatthew R. Ochs reg = readq_be(&hwq->host_map->ctx_ctrl); 1320465891feSMatthew R. Ochs WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); 1321465891feSMatthew R. Ochs reg |= SISL_MSI_SYNC_ERROR; 1322465891feSMatthew R. Ochs writeq_be(reg, &hwq->host_map->ctx_ctrl); 1323bfc0bab1SUma Krishnan writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); 1324bfc0bab1SUma Krishnan } 1325c21e0bbfSMatthew R. Ochs } 1326c21e0bbfSMatthew R. Ochs 1327c21e0bbfSMatthew R. Ochs /** 1328c21e0bbfSMatthew R. Ochs * cxlflash_sync_err_irq() - interrupt handler for synchronous errors 1329c21e0bbfSMatthew R. Ochs * @irq: Interrupt number. 1330c21e0bbfSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 1331c21e0bbfSMatthew R. Ochs * 1332c21e0bbfSMatthew R. Ochs * Return: Always return IRQ_HANDLED. 1333c21e0bbfSMatthew R. Ochs */ 1334c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1335c21e0bbfSMatthew R. Ochs { 1336bfc0bab1SUma Krishnan struct hwq *hwq = (struct hwq *)data; 1337bfc0bab1SUma Krishnan struct cxlflash_cfg *cfg = hwq->afu->parent; 1338fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1339c21e0bbfSMatthew R. Ochs u64 reg; 1340c21e0bbfSMatthew R. Ochs u64 reg_unmasked; 1341c21e0bbfSMatthew R. Ochs 1342bfc0bab1SUma Krishnan reg = readq_be(&hwq->host_map->intr_status); 1343c21e0bbfSMatthew R. Ochs reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1344c21e0bbfSMatthew R. Ochs 1345c21e0bbfSMatthew R. Ochs if (reg_unmasked == 0UL) { 1346fb67d44dSMatthew R. Ochs dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n", 1347fb67d44dSMatthew R. Ochs __func__, reg); 1348c21e0bbfSMatthew R. Ochs goto cxlflash_sync_err_irq_exit; 1349c21e0bbfSMatthew R. Ochs } 1350c21e0bbfSMatthew R. Ochs 1351fb67d44dSMatthew R. Ochs dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", 1352fb67d44dSMatthew R. Ochs __func__, reg); 1353c21e0bbfSMatthew R. Ochs 1354bfc0bab1SUma Krishnan writeq_be(reg_unmasked, &hwq->host_map->intr_clear); 1355c21e0bbfSMatthew R. Ochs 1356c21e0bbfSMatthew R. Ochs cxlflash_sync_err_irq_exit: 1357c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1358c21e0bbfSMatthew R. Ochs } 1359c21e0bbfSMatthew R. Ochs 1360c21e0bbfSMatthew R. Ochs /** 136176a6ebbeSMatthew R. Ochs * process_hrrq() - process the read-response queue 136276a6ebbeSMatthew R. Ochs * @afu: AFU associated with the host. 1363f918b4a8SMatthew R. Ochs * @doneq: Queue of commands harvested from the RRQ. 1364cba06e6dSMatthew R. Ochs * @budget: Threshold of RRQ entries to process. 1365f918b4a8SMatthew R. Ochs * 1366f918b4a8SMatthew R. Ochs * This routine must be called holding the disabled RRQ spin lock. 1367c21e0bbfSMatthew R. Ochs * 136876a6ebbeSMatthew R. Ochs * Return: The number of entries processed. 1369c21e0bbfSMatthew R. Ochs */ 1370bfc0bab1SUma Krishnan static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) 1371c21e0bbfSMatthew R. Ochs { 1372bfc0bab1SUma Krishnan struct afu *afu = hwq->afu; 1373c21e0bbfSMatthew R. Ochs struct afu_cmd *cmd; 1374696d0b0cSMatthew R. Ochs struct sisl_ioasa *ioasa; 1375696d0b0cSMatthew R. Ochs struct sisl_ioarcb *ioarcb; 1376bfc0bab1SUma Krishnan bool toggle = hwq->toggle; 137776a6ebbeSMatthew R. Ochs int num_hrrq = 0; 1378c21e0bbfSMatthew R. Ochs u64 entry, 1379bfc0bab1SUma Krishnan *hrrq_start = hwq->hrrq_start, 1380bfc0bab1SUma Krishnan *hrrq_end = hwq->hrrq_end, 1381bfc0bab1SUma Krishnan *hrrq_curr = hwq->hrrq_curr; 1382c21e0bbfSMatthew R. Ochs 1383cba06e6dSMatthew R. Ochs /* Process ready RRQ entries up to the specified budget (if any) */ 1384c21e0bbfSMatthew R. Ochs while (true) { 1385c21e0bbfSMatthew R. Ochs entry = *hrrq_curr; 1386c21e0bbfSMatthew R. Ochs 1387c21e0bbfSMatthew R. Ochs if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1388c21e0bbfSMatthew R. Ochs break; 1389c21e0bbfSMatthew R. Ochs 1390696d0b0cSMatthew R. Ochs entry &= ~SISL_RESP_HANDLE_T_BIT; 1391696d0b0cSMatthew R. Ochs 1392696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1393696d0b0cSMatthew R. Ochs ioasa = (struct sisl_ioasa *)entry; 1394696d0b0cSMatthew R. Ochs cmd = container_of(ioasa, struct afu_cmd, sa); 1395696d0b0cSMatthew R. Ochs } else { 1396696d0b0cSMatthew R. Ochs ioarcb = (struct sisl_ioarcb *)entry; 1397696d0b0cSMatthew R. Ochs cmd = container_of(ioarcb, struct afu_cmd, rcb); 1398696d0b0cSMatthew R. Ochs } 1399696d0b0cSMatthew R. Ochs 1400f918b4a8SMatthew R. Ochs list_add_tail(&cmd->queue, doneq); 1401c21e0bbfSMatthew R. Ochs 1402c21e0bbfSMatthew R. Ochs /* Advance to next entry or wrap and flip the toggle bit */ 1403c21e0bbfSMatthew R. Ochs if (hrrq_curr < hrrq_end) 1404c21e0bbfSMatthew R. Ochs hrrq_curr++; 1405c21e0bbfSMatthew R. Ochs else { 1406c21e0bbfSMatthew R. Ochs hrrq_curr = hrrq_start; 1407c21e0bbfSMatthew R. Ochs toggle ^= SISL_RESP_HANDLE_T_BIT; 1408c21e0bbfSMatthew R. Ochs } 1409696d0b0cSMatthew R. Ochs 1410bfc0bab1SUma Krishnan atomic_inc(&hwq->hsq_credits); 141176a6ebbeSMatthew R. Ochs num_hrrq++; 1412cba06e6dSMatthew R. Ochs 1413cba06e6dSMatthew R. Ochs if (budget > 0 && num_hrrq >= budget) 1414cba06e6dSMatthew R. Ochs break; 1415c21e0bbfSMatthew R. Ochs } 1416c21e0bbfSMatthew R. Ochs 1417bfc0bab1SUma Krishnan hwq->hrrq_curr = hrrq_curr; 1418bfc0bab1SUma Krishnan hwq->toggle = toggle; 1419c21e0bbfSMatthew R. Ochs 142076a6ebbeSMatthew R. Ochs return num_hrrq; 142176a6ebbeSMatthew R. Ochs } 142276a6ebbeSMatthew R. Ochs 142376a6ebbeSMatthew R. Ochs /** 1424f918b4a8SMatthew R. Ochs * process_cmd_doneq() - process a queue of harvested RRQ commands 1425f918b4a8SMatthew R. Ochs * @doneq: Queue of completed commands. 1426f918b4a8SMatthew R. Ochs * 1427f918b4a8SMatthew R. Ochs * Note that upon return the queue can no longer be trusted. 1428f918b4a8SMatthew R. Ochs */ 1429f918b4a8SMatthew R. Ochs static void process_cmd_doneq(struct list_head *doneq) 1430f918b4a8SMatthew R. Ochs { 1431f918b4a8SMatthew R. Ochs struct afu_cmd *cmd, *tmp; 1432f918b4a8SMatthew R. Ochs 1433f918b4a8SMatthew R. Ochs WARN_ON(list_empty(doneq)); 1434f918b4a8SMatthew R. Ochs 1435f918b4a8SMatthew R. Ochs list_for_each_entry_safe(cmd, tmp, doneq, queue) 1436f918b4a8SMatthew R. Ochs cmd_complete(cmd); 1437f918b4a8SMatthew R. Ochs } 1438f918b4a8SMatthew R. Ochs 1439f918b4a8SMatthew R. Ochs /** 1440cba06e6dSMatthew R. Ochs * cxlflash_irqpoll() - process a queue of harvested RRQ commands 1441cba06e6dSMatthew R. Ochs * @irqpoll: IRQ poll structure associated with queue to poll. 1442cba06e6dSMatthew R. Ochs * @budget: Threshold of RRQ entries to process per poll. 1443cba06e6dSMatthew R. Ochs * 1444cba06e6dSMatthew R. Ochs * Return: The number of entries processed. 1445cba06e6dSMatthew R. Ochs */ 1446cba06e6dSMatthew R. Ochs static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) 1447cba06e6dSMatthew R. Ochs { 1448bfc0bab1SUma Krishnan struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); 1449cba06e6dSMatthew R. Ochs unsigned long hrrq_flags; 1450cba06e6dSMatthew R. Ochs LIST_HEAD(doneq); 1451cba06e6dSMatthew R. Ochs int num_entries = 0; 1452cba06e6dSMatthew R. Ochs 1453bfc0bab1SUma Krishnan spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); 1454cba06e6dSMatthew R. Ochs 1455bfc0bab1SUma Krishnan num_entries = process_hrrq(hwq, &doneq, budget); 1456cba06e6dSMatthew R. Ochs if (num_entries < budget) 1457cba06e6dSMatthew R. Ochs irq_poll_complete(irqpoll); 1458cba06e6dSMatthew R. Ochs 1459bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1460cba06e6dSMatthew R. Ochs 1461cba06e6dSMatthew R. Ochs process_cmd_doneq(&doneq); 1462cba06e6dSMatthew R. Ochs return num_entries; 1463cba06e6dSMatthew R. Ochs } 1464cba06e6dSMatthew R. Ochs 1465cba06e6dSMatthew R. Ochs /** 146676a6ebbeSMatthew R. Ochs * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) 146776a6ebbeSMatthew R. Ochs * @irq: Interrupt number. 146876a6ebbeSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 146976a6ebbeSMatthew R. Ochs * 1470f918b4a8SMatthew R. Ochs * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. 147176a6ebbeSMatthew R. Ochs */ 147276a6ebbeSMatthew R. Ochs static irqreturn_t cxlflash_rrq_irq(int irq, void *data) 147376a6ebbeSMatthew R. Ochs { 1474bfc0bab1SUma Krishnan struct hwq *hwq = (struct hwq *)data; 1475bfc0bab1SUma Krishnan struct afu *afu = hwq->afu; 1476f918b4a8SMatthew R. Ochs unsigned long hrrq_flags; 1477f918b4a8SMatthew R. Ochs LIST_HEAD(doneq); 1478f918b4a8SMatthew R. Ochs int num_entries = 0; 147976a6ebbeSMatthew R. Ochs 1480bfc0bab1SUma Krishnan spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); 1481cba06e6dSMatthew R. Ochs 1482d2d354a6SUma Krishnan /* Silently drop spurious interrupts when queue is not online */ 1483d2d354a6SUma Krishnan if (!hwq->hrrq_online) { 1484d2d354a6SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1485d2d354a6SUma Krishnan return IRQ_HANDLED; 1486d2d354a6SUma Krishnan } 1487d2d354a6SUma Krishnan 1488cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) { 1489bfc0bab1SUma Krishnan irq_poll_sched(&hwq->irqpoll); 1490bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1491cba06e6dSMatthew R. Ochs return IRQ_HANDLED; 1492cba06e6dSMatthew R. Ochs } 1493cba06e6dSMatthew R. Ochs 1494bfc0bab1SUma Krishnan num_entries = process_hrrq(hwq, &doneq, -1); 1495bfc0bab1SUma Krishnan spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); 1496f918b4a8SMatthew R. Ochs 1497f918b4a8SMatthew R. Ochs if (num_entries == 0) 1498f918b4a8SMatthew R. Ochs return IRQ_NONE; 1499f918b4a8SMatthew R. Ochs 1500f918b4a8SMatthew R. Ochs process_cmd_doneq(&doneq); 1501c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1502c21e0bbfSMatthew R. Ochs } 1503c21e0bbfSMatthew R. Ochs 1504e2ef33faSMatthew R. Ochs /* 1505e2ef33faSMatthew R. Ochs * Asynchronous interrupt information table 1506e2ef33faSMatthew R. Ochs * 1507e2ef33faSMatthew R. Ochs * NOTE: 1508e2ef33faSMatthew R. Ochs * - Order matters here as this array is indexed by bit position. 1509e2ef33faSMatthew R. Ochs * 1510e2ef33faSMatthew R. Ochs * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro 1511e2ef33faSMatthew R. Ochs * as complex and complains due to a lack of parentheses/braces. 1512e2ef33faSMatthew R. Ochs */ 1513e2ef33faSMatthew R. Ochs #define ASTATUS_FC(_a, _b, _c, _d) \ 1514e2ef33faSMatthew R. Ochs { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } 1515e2ef33faSMatthew R. Ochs 1516e2ef33faSMatthew R. Ochs #define BUILD_SISL_ASTATUS_FC_PORT(_a) \ 1517e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LINK_UP, "link up", 0), \ 1518e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LINK_DN, "link down", 0), \ 1519e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ 1520e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ 1521e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ 1522e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ 1523e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ 1524e2ef33faSMatthew R. Ochs ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) 1525e2ef33faSMatthew R. Ochs 1526e2ef33faSMatthew R. Ochs static const struct asyc_intr_info ainfo[] = { 1527e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(1), 1528e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(0), 1529e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(3), 1530e2ef33faSMatthew R. Ochs BUILD_SISL_ASTATUS_FC_PORT(2) 1531e2ef33faSMatthew R. Ochs }; 1532e2ef33faSMatthew R. Ochs 1533c21e0bbfSMatthew R. Ochs /** 1534c21e0bbfSMatthew R. Ochs * cxlflash_async_err_irq() - interrupt handler for asynchronous errors 1535c21e0bbfSMatthew R. Ochs * @irq: Interrupt number. 1536c21e0bbfSMatthew R. Ochs * @data: Private data provided at interrupt registration, the AFU. 1537c21e0bbfSMatthew R. Ochs * 1538c21e0bbfSMatthew R. Ochs * Return: Always return IRQ_HANDLED. 1539c21e0bbfSMatthew R. Ochs */ 1540c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_async_err_irq(int irq, void *data) 1541c21e0bbfSMatthew R. Ochs { 1542bfc0bab1SUma Krishnan struct hwq *hwq = (struct hwq *)data; 1543bfc0bab1SUma Krishnan struct afu *afu = hwq->afu; 15444392ba49SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 15454392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1546c21e0bbfSMatthew R. Ochs const struct asyc_intr_info *info; 15471786f4a0SMatthew R. Ochs struct sisl_global_map __iomem *global = &afu->afu_map->global; 15480aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 1549e2ef33faSMatthew R. Ochs u64 reg_unmasked; 1550c21e0bbfSMatthew R. Ochs u64 reg; 1551e2ef33faSMatthew R. Ochs u64 bit; 1552c21e0bbfSMatthew R. Ochs u8 port; 1553c21e0bbfSMatthew R. Ochs 1554c21e0bbfSMatthew R. Ochs reg = readq_be(&global->regs.aintr_status); 1555c21e0bbfSMatthew R. Ochs reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1556c21e0bbfSMatthew R. Ochs 1557e2ef33faSMatthew R. Ochs if (unlikely(reg_unmasked == 0)) { 1558fb67d44dSMatthew R. Ochs dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", 1559c21e0bbfSMatthew R. Ochs __func__, reg); 1560c21e0bbfSMatthew R. Ochs goto out; 1561c21e0bbfSMatthew R. Ochs } 1562c21e0bbfSMatthew R. Ochs 1563f15fbf8dSMatthew R. Ochs /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ 1564c21e0bbfSMatthew R. Ochs writeq_be(reg_unmasked, &global->regs.aintr_clear); 1565c21e0bbfSMatthew R. Ochs 1566f15fbf8dSMatthew R. Ochs /* Check each bit that is on */ 1567e2ef33faSMatthew R. Ochs for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { 1568e2ef33faSMatthew R. Ochs if (unlikely(bit >= ARRAY_SIZE(ainfo))) { 1569e2ef33faSMatthew R. Ochs WARN_ON_ONCE(1); 1570c21e0bbfSMatthew R. Ochs continue; 1571e2ef33faSMatthew R. Ochs } 1572e2ef33faSMatthew R. Ochs 1573e2ef33faSMatthew R. Ochs info = &ainfo[bit]; 1574e2ef33faSMatthew R. Ochs if (unlikely(info->status != 1ULL << bit)) { 1575e2ef33faSMatthew R. Ochs WARN_ON_ONCE(1); 1576e2ef33faSMatthew R. Ochs continue; 1577e2ef33faSMatthew R. Ochs } 1578c21e0bbfSMatthew R. Ochs 1579c21e0bbfSMatthew R. Ochs port = info->port; 15800aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 1581c21e0bbfSMatthew R. Ochs 1582fb67d44dSMatthew R. Ochs dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", 1583c21e0bbfSMatthew R. Ochs __func__, port, info->desc, 15840aa14887SMatthew R. Ochs readq_be(&fc_port_regs[FC_STATUS / 8])); 1585c21e0bbfSMatthew R. Ochs 1586c21e0bbfSMatthew R. Ochs /* 1587f15fbf8dSMatthew R. Ochs * Do link reset first, some OTHER errors will set FC_ERROR 1588c21e0bbfSMatthew R. Ochs * again if cleared before or w/o a reset 1589c21e0bbfSMatthew R. Ochs */ 1590c21e0bbfSMatthew R. Ochs if (info->action & LINK_RESET) { 15914392ba49SMatthew R. Ochs dev_err(dev, "%s: FC Port %d: resetting link\n", 1592c21e0bbfSMatthew R. Ochs __func__, port); 1593c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_REQUIRED; 1594c21e0bbfSMatthew R. Ochs cfg->lr_port = port; 1595c21e0bbfSMatthew R. Ochs schedule_work(&cfg->work_q); 1596c21e0bbfSMatthew R. Ochs } 1597c21e0bbfSMatthew R. Ochs 1598c21e0bbfSMatthew R. Ochs if (info->action & CLR_FC_ERROR) { 15990aa14887SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_ERROR / 8]); 1600c21e0bbfSMatthew R. Ochs 1601c21e0bbfSMatthew R. Ochs /* 1602f15fbf8dSMatthew R. Ochs * Since all errors are unmasked, FC_ERROR and FC_ERRCAP 1603c21e0bbfSMatthew R. Ochs * should be the same and tracing one is sufficient. 1604c21e0bbfSMatthew R. Ochs */ 1605c21e0bbfSMatthew R. Ochs 1606fb67d44dSMatthew R. Ochs dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", 1607c21e0bbfSMatthew R. Ochs __func__, port, reg); 1608c21e0bbfSMatthew R. Ochs 16090aa14887SMatthew R. Ochs writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); 16100aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); 1611c21e0bbfSMatthew R. Ochs } 1612ef51074aSMatthew R. Ochs 1613ef51074aSMatthew R. Ochs if (info->action & SCAN_HOST) { 1614ef51074aSMatthew R. Ochs atomic_inc(&cfg->scan_host_needed); 1615ef51074aSMatthew R. Ochs schedule_work(&cfg->work_q); 1616ef51074aSMatthew R. Ochs } 1617c21e0bbfSMatthew R. Ochs } 1618c21e0bbfSMatthew R. Ochs 1619c21e0bbfSMatthew R. Ochs out: 1620c21e0bbfSMatthew R. Ochs return IRQ_HANDLED; 1621c21e0bbfSMatthew R. Ochs } 1622c21e0bbfSMatthew R. Ochs 1623c21e0bbfSMatthew R. Ochs /** 1624c21e0bbfSMatthew R. Ochs * read_vpd() - obtains the WWPNs from VPD 16251284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 162678ae028eSMatthew R. Ochs * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs 1627c21e0bbfSMatthew R. Ochs * 16281284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1629c21e0bbfSMatthew R. Ochs */ 1630c21e0bbfSMatthew R. Ochs static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1631c21e0bbfSMatthew R. Ochs { 1632fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1633fb67d44dSMatthew R. Ochs struct pci_dev *pdev = cfg->dev; 1634c21e0bbfSMatthew R. Ochs int rc = 0; 1635c21e0bbfSMatthew R. Ochs int ro_start, ro_size, i, j, k; 1636c21e0bbfSMatthew R. Ochs ssize_t vpd_size; 1637c21e0bbfSMatthew R. Ochs char vpd_data[CXLFLASH_VPD_LEN]; 1638c21e0bbfSMatthew R. Ochs char tmp_buf[WWPN_BUF_LEN] = { 0 }; 16390d419130SMatthew R. Ochs const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *) 16400d419130SMatthew R. Ochs cfg->dev_id->driver_data; 16410d419130SMatthew R. Ochs const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED; 16420d419130SMatthew R. Ochs const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; 1643c21e0bbfSMatthew R. Ochs 1644c21e0bbfSMatthew R. Ochs /* Get the VPD data from the device */ 164525b8e08eSMatthew R. Ochs vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); 1646c21e0bbfSMatthew R. Ochs if (unlikely(vpd_size <= 0)) { 1647fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to read VPD (size = %ld)\n", 1648c21e0bbfSMatthew R. Ochs __func__, vpd_size); 1649c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1650c21e0bbfSMatthew R. Ochs goto out; 1651c21e0bbfSMatthew R. Ochs } 1652c21e0bbfSMatthew R. Ochs 1653c21e0bbfSMatthew R. Ochs /* Get the read only section offset */ 1654c21e0bbfSMatthew R. Ochs ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1655c21e0bbfSMatthew R. Ochs PCI_VPD_LRDT_RO_DATA); 1656c21e0bbfSMatthew R. Ochs if (unlikely(ro_start < 0)) { 1657fb67d44dSMatthew R. Ochs dev_err(dev, "%s: VPD Read-only data not found\n", __func__); 1658c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1659c21e0bbfSMatthew R. Ochs goto out; 1660c21e0bbfSMatthew R. Ochs } 1661c21e0bbfSMatthew R. Ochs 1662c21e0bbfSMatthew R. Ochs /* Get the read only section size, cap when extends beyond read VPD */ 1663c21e0bbfSMatthew R. Ochs ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1664c21e0bbfSMatthew R. Ochs j = ro_size; 1665c21e0bbfSMatthew R. Ochs i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1666c21e0bbfSMatthew R. Ochs if (unlikely((i + j) > vpd_size)) { 1667fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n", 1668c21e0bbfSMatthew R. Ochs __func__, (i + j), vpd_size); 1669c21e0bbfSMatthew R. Ochs ro_size = vpd_size - i; 1670c21e0bbfSMatthew R. Ochs } 1671c21e0bbfSMatthew R. Ochs 1672c21e0bbfSMatthew R. Ochs /* 1673c21e0bbfSMatthew R. Ochs * Find the offset of the WWPN tag within the read only 1674c21e0bbfSMatthew R. Ochs * VPD data and validate the found field (partials are 1675c21e0bbfSMatthew R. Ochs * no good to us). Convert the ASCII data to an integer 1676c21e0bbfSMatthew R. Ochs * value. Note that we must copy to a temporary buffer 1677c21e0bbfSMatthew R. Ochs * because the conversion service requires that the ASCII 1678c21e0bbfSMatthew R. Ochs * string be terminated. 16790d419130SMatthew R. Ochs * 16800d419130SMatthew R. Ochs * Allow for WWPN not being found for all devices, setting 16810d419130SMatthew R. Ochs * the returned WWPN to zero when not found. Notify with a 16820d419130SMatthew R. Ochs * log error for cards that should have had WWPN keywords 16830d419130SMatthew R. Ochs * in the VPD - cards requiring WWPN will not have their 16840d419130SMatthew R. Ochs * ports programmed and operate in an undefined state. 1685c21e0bbfSMatthew R. Ochs */ 168678ae028eSMatthew R. Ochs for (k = 0; k < cfg->num_fc_ports; k++) { 1687c21e0bbfSMatthew R. Ochs j = ro_size; 1688c21e0bbfSMatthew R. Ochs i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1689c21e0bbfSMatthew R. Ochs 1690c21e0bbfSMatthew R. Ochs i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 16910d419130SMatthew R. Ochs if (i < 0) { 16920d419130SMatthew R. Ochs if (wwpn_vpd_required) 16930d419130SMatthew R. Ochs dev_err(dev, "%s: Port %d WWPN not found\n", 1694fb67d44dSMatthew R. Ochs __func__, k); 16950d419130SMatthew R. Ochs wwpn[k] = 0ULL; 16960d419130SMatthew R. Ochs continue; 1697c21e0bbfSMatthew R. Ochs } 1698c21e0bbfSMatthew R. Ochs 1699c21e0bbfSMatthew R. Ochs j = pci_vpd_info_field_size(&vpd_data[i]); 1700c21e0bbfSMatthew R. Ochs i += PCI_VPD_INFO_FLD_HDR_SIZE; 1701c21e0bbfSMatthew R. Ochs if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1702fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n", 1703c21e0bbfSMatthew R. Ochs __func__, k); 1704c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1705c21e0bbfSMatthew R. Ochs goto out; 1706c21e0bbfSMatthew R. Ochs } 1707c21e0bbfSMatthew R. Ochs 1708c21e0bbfSMatthew R. Ochs memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1709c21e0bbfSMatthew R. Ochs rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1710c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1711fb67d44dSMatthew R. Ochs dev_err(dev, "%s: WWPN conversion failed for port %d\n", 1712fb67d44dSMatthew R. Ochs __func__, k); 1713c21e0bbfSMatthew R. Ochs rc = -ENODEV; 1714c21e0bbfSMatthew R. Ochs goto out; 1715c21e0bbfSMatthew R. Ochs } 171678ae028eSMatthew R. Ochs 171778ae028eSMatthew R. Ochs dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); 1718c21e0bbfSMatthew R. Ochs } 1719c21e0bbfSMatthew R. Ochs 1720c21e0bbfSMatthew R. Ochs out: 1721fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1722c21e0bbfSMatthew R. Ochs return rc; 1723c21e0bbfSMatthew R. Ochs } 1724c21e0bbfSMatthew R. Ochs 1725c21e0bbfSMatthew R. Ochs /** 1726c21e0bbfSMatthew R. Ochs * init_pcr() - initialize the provisioning and control registers 17271284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1728c21e0bbfSMatthew R. Ochs * 1729c21e0bbfSMatthew R. Ochs * Also sets up fast access to the mapped registers and initializes AFU 1730c21e0bbfSMatthew R. Ochs * command fields that never change. 1731c21e0bbfSMatthew R. Ochs */ 173215305514SMatthew R. Ochs static void init_pcr(struct cxlflash_cfg *cfg) 1733c21e0bbfSMatthew R. Ochs { 1734c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 17351786f4a0SMatthew R. Ochs struct sisl_ctrl_map __iomem *ctrl_map; 1736bfc0bab1SUma Krishnan struct hwq *hwq; 173725b8e08eSMatthew R. Ochs void *cookie; 1738c21e0bbfSMatthew R. Ochs int i; 1739c21e0bbfSMatthew R. Ochs 1740c21e0bbfSMatthew R. Ochs for (i = 0; i < MAX_CONTEXT; i++) { 1741c21e0bbfSMatthew R. Ochs ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1742f15fbf8dSMatthew R. Ochs /* Disrupt any clients that could be running */ 1743c21e0bbfSMatthew R. Ochs /* e.g. clients that survived a master restart */ 1744c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->rht_start); 1745c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->rht_cnt_id); 1746c21e0bbfSMatthew R. Ochs writeq_be(0, &ctrl_map->ctx_cap); 1747c21e0bbfSMatthew R. Ochs } 1748c21e0bbfSMatthew R. Ochs 1749bfc0bab1SUma Krishnan /* Copy frequently used fields into hwq */ 17503065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1751bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 175225b8e08eSMatthew R. Ochs cookie = hwq->ctx_cookie; 1753bfc0bab1SUma Krishnan 175425b8e08eSMatthew R. Ochs hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie); 1755bfc0bab1SUma Krishnan hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; 1756bfc0bab1SUma Krishnan hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; 1757c21e0bbfSMatthew R. Ochs 1758c21e0bbfSMatthew R. Ochs /* Program the Endian Control for the master context */ 1759bfc0bab1SUma Krishnan writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); 1760bfc0bab1SUma Krishnan } 1761c21e0bbfSMatthew R. Ochs } 1762c21e0bbfSMatthew R. Ochs 1763c21e0bbfSMatthew R. Ochs /** 1764c21e0bbfSMatthew R. Ochs * init_global() - initialize AFU global registers 17651284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1766c21e0bbfSMatthew R. Ochs */ 176715305514SMatthew R. Ochs static int init_global(struct cxlflash_cfg *cfg) 1768c21e0bbfSMatthew R. Ochs { 1769c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 17704392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1771bfc0bab1SUma Krishnan struct hwq *hwq; 1772bfc0bab1SUma Krishnan struct sisl_host_map __iomem *hmap; 17730aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 177478ae028eSMatthew R. Ochs u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ 1775c21e0bbfSMatthew R. Ochs int i = 0, num_ports = 0; 1776c21e0bbfSMatthew R. Ochs int rc = 0; 1777d44af4b0SUma Krishnan int j; 1778d44af4b0SUma Krishnan void *ctx; 1779c21e0bbfSMatthew R. Ochs u64 reg; 1780c21e0bbfSMatthew R. Ochs 1781c21e0bbfSMatthew R. Ochs rc = read_vpd(cfg, &wwpn[0]); 1782c21e0bbfSMatthew R. Ochs if (rc) { 17834392ba49SMatthew R. Ochs dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); 1784c21e0bbfSMatthew R. Ochs goto out; 1785c21e0bbfSMatthew R. Ochs } 1786c21e0bbfSMatthew R. Ochs 1787bfc0bab1SUma Krishnan /* Set up RRQ and SQ in HWQ for master issued cmds */ 17883065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1789bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1790bfc0bab1SUma Krishnan hmap = hwq->host_map; 1791bfc0bab1SUma Krishnan 1792bfc0bab1SUma Krishnan writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); 1793bfc0bab1SUma Krishnan writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); 1794d2d354a6SUma Krishnan hwq->hrrq_online = true; 1795c21e0bbfSMatthew R. Ochs 1796696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1797bfc0bab1SUma Krishnan writeq_be((u64)hwq->hsq_start, &hmap->sq_start); 1798bfc0bab1SUma Krishnan writeq_be((u64)hwq->hsq_end, &hmap->sq_end); 1799bfc0bab1SUma Krishnan } 1800696d0b0cSMatthew R. Ochs } 1801696d0b0cSMatthew R. Ochs 1802c21e0bbfSMatthew R. Ochs /* AFU configuration */ 1803c21e0bbfSMatthew R. Ochs reg = readq_be(&afu->afu_map->global.regs.afu_config); 1804c21e0bbfSMatthew R. Ochs reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1805c21e0bbfSMatthew R. Ochs /* enable all auto retry options and control endianness */ 1806c21e0bbfSMatthew R. Ochs /* leave others at default: */ 1807c21e0bbfSMatthew R. Ochs /* CTX_CAP write protected, mbox_r does not clear on read and */ 1808c21e0bbfSMatthew R. Ochs /* checker on if dual afu */ 1809c21e0bbfSMatthew R. Ochs writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1810c21e0bbfSMatthew R. Ochs 1811f15fbf8dSMatthew R. Ochs /* Global port select: select either port */ 1812c21e0bbfSMatthew R. Ochs if (afu->internal_lun) { 1813f15fbf8dSMatthew R. Ochs /* Only use port 0 */ 1814c21e0bbfSMatthew R. Ochs writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 181578ae028eSMatthew R. Ochs num_ports = 0; 1816c21e0bbfSMatthew R. Ochs } else { 18178fa4f177SMatthew R. Ochs writeq_be(PORT_MASK(cfg->num_fc_ports), 18188fa4f177SMatthew R. Ochs &afu->afu_map->global.regs.afu_port_sel); 181978ae028eSMatthew R. Ochs num_ports = cfg->num_fc_ports; 1820c21e0bbfSMatthew R. Ochs } 1821c21e0bbfSMatthew R. Ochs 1822c21e0bbfSMatthew R. Ochs for (i = 0; i < num_ports; i++) { 18230aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, i); 18240aa14887SMatthew R. Ochs 1825f15fbf8dSMatthew R. Ochs /* Unmask all errors (but they are still masked at AFU) */ 18260aa14887SMatthew R. Ochs writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); 1827f15fbf8dSMatthew R. Ochs /* Clear CRC error cnt & set a threshold */ 18280aa14887SMatthew R. Ochs (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); 18290aa14887SMatthew R. Ochs writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); 1830c21e0bbfSMatthew R. Ochs 1831f15fbf8dSMatthew R. Ochs /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1832f8013261SMatthew R. Ochs if (wwpn[i] != 0) 18330aa14887SMatthew R. Ochs afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); 1834c21e0bbfSMatthew R. Ochs /* Programming WWPN back to back causes additional 1835c21e0bbfSMatthew R. Ochs * offline/online transitions and a PLOGI 1836c21e0bbfSMatthew R. Ochs */ 1837c21e0bbfSMatthew R. Ochs msleep(100); 1838c21e0bbfSMatthew R. Ochs } 1839c21e0bbfSMatthew R. Ochs 1840d44af4b0SUma Krishnan if (afu_is_ocxl_lisn(afu)) { 1841d44af4b0SUma Krishnan /* Set up the LISN effective address for each master */ 1842d44af4b0SUma Krishnan for (i = 0; i < afu->num_hwqs; i++) { 1843d44af4b0SUma Krishnan hwq = get_hwq(afu, i); 1844d44af4b0SUma Krishnan ctx = hwq->ctx_cookie; 1845d44af4b0SUma Krishnan 1846d44af4b0SUma Krishnan for (j = 0; j < hwq->num_irqs; j++) { 1847d44af4b0SUma Krishnan reg = cfg->ops->get_irq_objhndl(ctx, j); 1848d44af4b0SUma Krishnan writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]); 1849d44af4b0SUma Krishnan } 1850d44af4b0SUma Krishnan 1851d44af4b0SUma Krishnan reg = hwq->ctx_hndl; 1852d44af4b0SUma Krishnan writeq_be(SISL_LISN_PASID(reg, reg), 1853d44af4b0SUma Krishnan &hwq->ctrl_map->lisn_pasid[0]); 1854d44af4b0SUma Krishnan writeq_be(SISL_LISN_PASID(0UL, reg), 1855d44af4b0SUma Krishnan &hwq->ctrl_map->lisn_pasid[1]); 1856d44af4b0SUma Krishnan } 1857d44af4b0SUma Krishnan } 1858d44af4b0SUma Krishnan 1859f15fbf8dSMatthew R. Ochs /* Set up master's own CTX_CAP to allow real mode, host translation */ 1860f15fbf8dSMatthew R. Ochs /* tables, afu cmds and read/write GSCSI cmds. */ 1861c21e0bbfSMatthew R. Ochs /* First, unlock ctx_cap write by reading mbox */ 18623065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1863bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1864bfc0bab1SUma Krishnan 1865bfc0bab1SUma Krishnan (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ 1866c21e0bbfSMatthew R. Ochs writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1867c21e0bbfSMatthew R. Ochs SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1868c21e0bbfSMatthew R. Ochs SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1869bfc0bab1SUma Krishnan &hwq->ctrl_map->ctx_cap); 1870bfc0bab1SUma Krishnan } 18713223c01aSMatthew R. Ochs 18723223c01aSMatthew R. Ochs /* 18733223c01aSMatthew R. Ochs * Determine write-same unmap support for host by evaluating the unmap 18743223c01aSMatthew R. Ochs * sector support bit of the context control register associated with 18753223c01aSMatthew R. Ochs * the primary hardware queue. Note that while this status is reflected 18763223c01aSMatthew R. Ochs * in a context register, the outcome can be assumed to be host-wide. 18773223c01aSMatthew R. Ochs */ 18783223c01aSMatthew R. Ochs hwq = get_hwq(afu, PRIMARY_HWQ); 18793223c01aSMatthew R. Ochs reg = readq_be(&hwq->host_map->ctx_ctrl); 18803223c01aSMatthew R. Ochs if (reg & SISL_CTX_CTRL_UNMAP_SECTOR) 18813223c01aSMatthew R. Ochs cfg->ws_unmap = true; 18823223c01aSMatthew R. Ochs 1883f15fbf8dSMatthew R. Ochs /* Initialize heartbeat */ 1884c21e0bbfSMatthew R. Ochs afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1885c21e0bbfSMatthew R. Ochs out: 1886c21e0bbfSMatthew R. Ochs return rc; 1887c21e0bbfSMatthew R. Ochs } 1888c21e0bbfSMatthew R. Ochs 1889c21e0bbfSMatthew R. Ochs /** 1890c21e0bbfSMatthew R. Ochs * start_afu() - initializes and starts the AFU 18911284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1892c21e0bbfSMatthew R. Ochs */ 1893c21e0bbfSMatthew R. Ochs static int start_afu(struct cxlflash_cfg *cfg) 1894c21e0bbfSMatthew R. Ochs { 1895c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 1896fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 1897bfc0bab1SUma Krishnan struct hwq *hwq; 1898c21e0bbfSMatthew R. Ochs int rc = 0; 1899bfc0bab1SUma Krishnan int i; 1900c21e0bbfSMatthew R. Ochs 1901c21e0bbfSMatthew R. Ochs init_pcr(cfg); 1902c21e0bbfSMatthew R. Ochs 1903bfc0bab1SUma Krishnan /* Initialize each HWQ */ 19043065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 1905bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 1906bfc0bab1SUma Krishnan 1907bfc0bab1SUma Krishnan /* After an AFU reset, RRQ entries are stale, clear them */ 1908bfc0bab1SUma Krishnan memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); 1909bfc0bab1SUma Krishnan 1910bfc0bab1SUma Krishnan /* Initialize RRQ pointers */ 1911bfc0bab1SUma Krishnan hwq->hrrq_start = &hwq->rrq_entry[0]; 1912bfc0bab1SUma Krishnan hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; 1913bfc0bab1SUma Krishnan hwq->hrrq_curr = hwq->hrrq_start; 1914bfc0bab1SUma Krishnan hwq->toggle = 1; 191566ea9bccSUma Krishnan 191666ea9bccSUma Krishnan /* Initialize spin locks */ 1917bfc0bab1SUma Krishnan spin_lock_init(&hwq->hrrq_slock); 191866ea9bccSUma Krishnan spin_lock_init(&hwq->hsq_slock); 1919c21e0bbfSMatthew R. Ochs 1920696d0b0cSMatthew R. Ochs /* Initialize SQ */ 1921696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 1922bfc0bab1SUma Krishnan memset(&hwq->sq, 0, sizeof(hwq->sq)); 1923bfc0bab1SUma Krishnan hwq->hsq_start = &hwq->sq[0]; 1924bfc0bab1SUma Krishnan hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; 1925bfc0bab1SUma Krishnan hwq->hsq_curr = hwq->hsq_start; 1926696d0b0cSMatthew R. Ochs 1927bfc0bab1SUma Krishnan atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); 1928696d0b0cSMatthew R. Ochs } 1929696d0b0cSMatthew R. Ochs 1930cba06e6dSMatthew R. Ochs /* Initialize IRQ poll */ 1931cba06e6dSMatthew R. Ochs if (afu_is_irqpoll_enabled(afu)) 1932bfc0bab1SUma Krishnan irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, 1933cba06e6dSMatthew R. Ochs cxlflash_irqpoll); 1934cba06e6dSMatthew R. Ochs 1935bfc0bab1SUma Krishnan } 1936bfc0bab1SUma Krishnan 1937c21e0bbfSMatthew R. Ochs rc = init_global(cfg); 1938c21e0bbfSMatthew R. Ochs 1939fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1940c21e0bbfSMatthew R. Ochs return rc; 1941c21e0bbfSMatthew R. Ochs } 1942c21e0bbfSMatthew R. Ochs 1943c21e0bbfSMatthew R. Ochs /** 19449526f360SManoj N. Kumar * init_intr() - setup interrupt handlers for the master context 19451284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 1946bfc0bab1SUma Krishnan * @hwq: Hardware queue to initialize. 1947c21e0bbfSMatthew R. Ochs * 19481284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 1949c21e0bbfSMatthew R. Ochs */ 19509526f360SManoj N. Kumar static enum undo_level init_intr(struct cxlflash_cfg *cfg, 1951bfc0bab1SUma Krishnan struct hwq *hwq) 1952c21e0bbfSMatthew R. Ochs { 19539526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 1954b070545dSUma Krishnan void *ctx = hwq->ctx_cookie; 1955c21e0bbfSMatthew R. Ochs int rc = 0; 19569526f360SManoj N. Kumar enum undo_level level = UNDO_NOOP; 1957bfc0bab1SUma Krishnan bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); 1958e11e0ff8SUma Krishnan int num_irqs = hwq->num_irqs; 1959c21e0bbfSMatthew R. Ochs 196025b8e08eSMatthew R. Ochs rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs); 1961c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 1962fb67d44dSMatthew R. Ochs dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", 1963c21e0bbfSMatthew R. Ochs __func__, rc); 19649526f360SManoj N. Kumar level = UNDO_NOOP; 1965c21e0bbfSMatthew R. Ochs goto out; 1966c21e0bbfSMatthew R. Ochs } 1967c21e0bbfSMatthew R. Ochs 196825b8e08eSMatthew R. Ochs rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, 1969c21e0bbfSMatthew R. Ochs "SISL_MSI_SYNC_ERROR"); 1970c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1971fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); 1972c21e0bbfSMatthew R. Ochs level = FREE_IRQ; 1973c21e0bbfSMatthew R. Ochs goto out; 1974c21e0bbfSMatthew R. Ochs } 1975c21e0bbfSMatthew R. Ochs 197625b8e08eSMatthew R. Ochs rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, 1977c21e0bbfSMatthew R. Ochs "SISL_MSI_RRQ_UPDATED"); 1978c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1979fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); 1980c21e0bbfSMatthew R. Ochs level = UNMAP_ONE; 1981c21e0bbfSMatthew R. Ochs goto out; 1982c21e0bbfSMatthew R. Ochs } 1983c21e0bbfSMatthew R. Ochs 1984bfc0bab1SUma Krishnan /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ 1985bfc0bab1SUma Krishnan if (!is_primary_hwq) 1986bfc0bab1SUma Krishnan goto out; 1987bfc0bab1SUma Krishnan 198825b8e08eSMatthew R. Ochs rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, 1989c21e0bbfSMatthew R. Ochs "SISL_MSI_ASYNC_ERROR"); 1990c21e0bbfSMatthew R. Ochs if (unlikely(rc <= 0)) { 1991fb67d44dSMatthew R. Ochs dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); 1992c21e0bbfSMatthew R. Ochs level = UNMAP_TWO; 1993c21e0bbfSMatthew R. Ochs goto out; 1994c21e0bbfSMatthew R. Ochs } 19959526f360SManoj N. Kumar out: 19969526f360SManoj N. Kumar return level; 19979526f360SManoj N. Kumar } 1998c21e0bbfSMatthew R. Ochs 19999526f360SManoj N. Kumar /** 20009526f360SManoj N. Kumar * init_mc() - create and register as the master context 20019526f360SManoj N. Kumar * @cfg: Internal structure associated with the host. 2002bfc0bab1SUma Krishnan * index: HWQ Index of the master context. 20039526f360SManoj N. Kumar * 20049526f360SManoj N. Kumar * Return: 0 on success, -errno on failure 20059526f360SManoj N. Kumar */ 2006bfc0bab1SUma Krishnan static int init_mc(struct cxlflash_cfg *cfg, u32 index) 20079526f360SManoj N. Kumar { 2008b070545dSUma Krishnan void *ctx; 20099526f360SManoj N. Kumar struct device *dev = &cfg->dev->dev; 2010bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(cfg->afu, index); 20119526f360SManoj N. Kumar int rc = 0; 2012e11e0ff8SUma Krishnan int num_irqs; 20139526f360SManoj N. Kumar enum undo_level level; 20149526f360SManoj N. Kumar 2015bfc0bab1SUma Krishnan hwq->afu = cfg->afu; 2016bfc0bab1SUma Krishnan hwq->index = index; 2017a002bf83SUma Krishnan INIT_LIST_HEAD(&hwq->pending_cmds); 2018bfc0bab1SUma Krishnan 2019e11e0ff8SUma Krishnan if (index == PRIMARY_HWQ) { 202025b8e08eSMatthew R. Ochs ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie); 2021e11e0ff8SUma Krishnan num_irqs = 3; 2022e11e0ff8SUma Krishnan } else { 202325b8e08eSMatthew R. Ochs ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); 2024e11e0ff8SUma Krishnan num_irqs = 2; 2025e11e0ff8SUma Krishnan } 20260df69c60SUma Krishnan if (IS_ERR_OR_NULL(ctx)) { 20279526f360SManoj N. Kumar rc = -ENOMEM; 2028bfc0bab1SUma Krishnan goto err1; 20299526f360SManoj N. Kumar } 2030bfc0bab1SUma Krishnan 2031b070545dSUma Krishnan WARN_ON(hwq->ctx_cookie); 2032b070545dSUma Krishnan hwq->ctx_cookie = ctx; 2033e11e0ff8SUma Krishnan hwq->num_irqs = num_irqs; 20349526f360SManoj N. Kumar 20359526f360SManoj N. Kumar /* Set it up as a master with the CXL */ 203625b8e08eSMatthew R. Ochs cfg->ops->set_master(ctx); 20379526f360SManoj N. Kumar 2038bfc0bab1SUma Krishnan /* Reset AFU when initializing primary context */ 2039bfc0bab1SUma Krishnan if (index == PRIMARY_HWQ) { 204025b8e08eSMatthew R. Ochs rc = cfg->ops->afu_reset(ctx); 20419526f360SManoj N. Kumar if (unlikely(rc)) { 2042bfc0bab1SUma Krishnan dev_err(dev, "%s: AFU reset failed rc=%d\n", 2043bfc0bab1SUma Krishnan __func__, rc); 2044bfc0bab1SUma Krishnan goto err1; 2045bfc0bab1SUma Krishnan } 20469526f360SManoj N. Kumar } 20479526f360SManoj N. Kumar 2048bfc0bab1SUma Krishnan level = init_intr(cfg, hwq); 20499526f360SManoj N. Kumar if (unlikely(level)) { 2050fb67d44dSMatthew R. Ochs dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); 2051bfc0bab1SUma Krishnan goto err2; 20529526f360SManoj N. Kumar } 2053c21e0bbfSMatthew R. Ochs 205425b8e08eSMatthew R. Ochs /* Finally, activate the context by starting it */ 205525b8e08eSMatthew R. Ochs rc = cfg->ops->start_context(hwq->ctx_cookie); 2056c21e0bbfSMatthew R. Ochs if (unlikely(rc)) { 2057c21e0bbfSMatthew R. Ochs dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 2058c21e0bbfSMatthew R. Ochs level = UNMAP_THREE; 2059bfc0bab1SUma Krishnan goto err2; 2060c21e0bbfSMatthew R. Ochs } 2061bfc0bab1SUma Krishnan 2062bfc0bab1SUma Krishnan out: 2063fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2064c21e0bbfSMatthew R. Ochs return rc; 2065bfc0bab1SUma Krishnan err2: 2066bfc0bab1SUma Krishnan term_intr(cfg, level, index); 2067bfc0bab1SUma Krishnan if (index != PRIMARY_HWQ) 206825b8e08eSMatthew R. Ochs cfg->ops->release_context(ctx); 2069bfc0bab1SUma Krishnan err1: 2070b070545dSUma Krishnan hwq->ctx_cookie = NULL; 2071bfc0bab1SUma Krishnan goto out; 2072c21e0bbfSMatthew R. Ochs } 2073c21e0bbfSMatthew R. Ochs 2074c21e0bbfSMatthew R. Ochs /** 207556518072SMatthew R. Ochs * get_num_afu_ports() - determines and configures the number of AFU ports 207656518072SMatthew R. Ochs * @cfg: Internal structure associated with the host. 207756518072SMatthew R. Ochs * 207856518072SMatthew R. Ochs * This routine determines the number of AFU ports by converting the global 207956518072SMatthew R. Ochs * port selection mask. The converted value is only valid following an AFU 208056518072SMatthew R. Ochs * reset (explicit or power-on). This routine must be invoked shortly after 208156518072SMatthew R. Ochs * mapping as other routines are dependent on the number of ports during the 208256518072SMatthew R. Ochs * initialization sequence. 208356518072SMatthew R. Ochs * 208456518072SMatthew R. Ochs * To support legacy AFUs that might not have reflected an initial global 208556518072SMatthew R. Ochs * port mask (value read is 0), default to the number of ports originally 208656518072SMatthew R. Ochs * supported by the cxlflash driver (2) before hardware with other port 208756518072SMatthew R. Ochs * offerings was introduced. 208856518072SMatthew R. Ochs */ 208956518072SMatthew R. Ochs static void get_num_afu_ports(struct cxlflash_cfg *cfg) 209056518072SMatthew R. Ochs { 209156518072SMatthew R. Ochs struct afu *afu = cfg->afu; 209256518072SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 209356518072SMatthew R. Ochs u64 port_mask; 209456518072SMatthew R. Ochs int num_fc_ports = LEGACY_FC_PORTS; 209556518072SMatthew R. Ochs 209656518072SMatthew R. Ochs port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); 209756518072SMatthew R. Ochs if (port_mask != 0ULL) 209856518072SMatthew R. Ochs num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); 209956518072SMatthew R. Ochs 210056518072SMatthew R. Ochs dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", 210156518072SMatthew R. Ochs __func__, port_mask, num_fc_ports); 210256518072SMatthew R. Ochs 210356518072SMatthew R. Ochs cfg->num_fc_ports = num_fc_ports; 210456518072SMatthew R. Ochs cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); 210556518072SMatthew R. Ochs } 210656518072SMatthew R. Ochs 210756518072SMatthew R. Ochs /** 2108c21e0bbfSMatthew R. Ochs * init_afu() - setup as master context and start AFU 21091284fb0cSMatthew R. Ochs * @cfg: Internal structure associated with the host. 2110c21e0bbfSMatthew R. Ochs * 2111c21e0bbfSMatthew R. Ochs * This routine is a higher level of control for configuring the 2112c21e0bbfSMatthew R. Ochs * AFU on probe and reset paths. 2113c21e0bbfSMatthew R. Ochs * 21141284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 2115c21e0bbfSMatthew R. Ochs */ 2116c21e0bbfSMatthew R. Ochs static int init_afu(struct cxlflash_cfg *cfg) 2117c21e0bbfSMatthew R. Ochs { 2118c21e0bbfSMatthew R. Ochs u64 reg; 2119c21e0bbfSMatthew R. Ochs int rc = 0; 2120c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 2121c21e0bbfSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 2122bfc0bab1SUma Krishnan struct hwq *hwq; 2123bfc0bab1SUma Krishnan int i; 2124c21e0bbfSMatthew R. Ochs 212525b8e08eSMatthew R. Ochs cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true); 21265cdac81aSMatthew R. Ochs 21273065267aSMatthew R. Ochs afu->num_hwqs = afu->desired_hwqs; 21283065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2129bfc0bab1SUma Krishnan rc = init_mc(cfg, i); 2130c21e0bbfSMatthew R. Ochs if (rc) { 2131bfc0bab1SUma Krishnan dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", 2132bfc0bab1SUma Krishnan __func__, rc, i); 2133bfc0bab1SUma Krishnan goto err1; 2134bfc0bab1SUma Krishnan } 2135c21e0bbfSMatthew R. Ochs } 2136c21e0bbfSMatthew R. Ochs 2137bfc0bab1SUma Krishnan /* Map the entire MMIO space of the AFU using the first context */ 2138bfc0bab1SUma Krishnan hwq = get_hwq(afu, PRIMARY_HWQ); 213925b8e08eSMatthew R. Ochs afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie); 2140c21e0bbfSMatthew R. Ochs if (!afu->afu_map) { 214125b8e08eSMatthew R. Ochs dev_err(dev, "%s: psa_map failed\n", __func__); 2142ee3491baSMatthew R. Ochs rc = -ENOMEM; 2143c21e0bbfSMatthew R. Ochs goto err1; 2144c21e0bbfSMatthew R. Ochs } 2145c21e0bbfSMatthew R. Ochs 2146e5ce067bSMatthew R. Ochs /* No byte reverse on reading afu_version or string will be backwards */ 2147e5ce067bSMatthew R. Ochs reg = readq(&afu->afu_map->global.regs.afu_version); 2148e5ce067bSMatthew R. Ochs memcpy(afu->version, ®, sizeof(reg)); 2149c21e0bbfSMatthew R. Ochs afu->interface_version = 2150c21e0bbfSMatthew R. Ochs readq_be(&afu->afu_map->global.regs.interface_version); 2151e5ce067bSMatthew R. Ochs if ((afu->interface_version + 1) == 0) { 2152fb67d44dSMatthew R. Ochs dev_err(dev, "Back level AFU, please upgrade. AFU version %s " 2153fb67d44dSMatthew R. Ochs "interface version %016llx\n", afu->version, 2154e5ce067bSMatthew R. Ochs afu->interface_version); 2155e5ce067bSMatthew R. Ochs rc = -EINVAL; 21560df5bef7SUma Krishnan goto err1; 2157ee3491baSMatthew R. Ochs } 2158ee3491baSMatthew R. Ochs 2159696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu)) { 2160696d0b0cSMatthew R. Ochs afu->send_cmd = send_cmd_sq; 2161696d0b0cSMatthew R. Ochs afu->context_reset = context_reset_sq; 2162696d0b0cSMatthew R. Ochs } else { 216348b4be36SMatthew R. Ochs afu->send_cmd = send_cmd_ioarrin; 216448b4be36SMatthew R. Ochs afu->context_reset = context_reset_ioarrin; 2165696d0b0cSMatthew R. Ochs } 216648b4be36SMatthew R. Ochs 2167fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, 2168ee3491baSMatthew R. Ochs afu->version, afu->interface_version); 2169c21e0bbfSMatthew R. Ochs 217056518072SMatthew R. Ochs get_num_afu_ports(cfg); 217156518072SMatthew R. Ochs 2172c21e0bbfSMatthew R. Ochs rc = start_afu(cfg); 2173c21e0bbfSMatthew R. Ochs if (rc) { 2174fb67d44dSMatthew R. Ochs dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); 21750df5bef7SUma Krishnan goto err1; 2176c21e0bbfSMatthew R. Ochs } 2177c21e0bbfSMatthew R. Ochs 2178c21e0bbfSMatthew R. Ochs afu_err_intr_init(cfg->afu); 21793065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2180bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 2181bfc0bab1SUma Krishnan 2182bfc0bab1SUma Krishnan hwq->room = readq_be(&hwq->host_map->cmd_room); 2183bfc0bab1SUma Krishnan } 2184c21e0bbfSMatthew R. Ochs 21852cb79266SMatthew R. Ochs /* Restore the LUN mappings */ 21862cb79266SMatthew R. Ochs cxlflash_restore_luntable(cfg); 2187ee3491baSMatthew R. Ochs out: 2188fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2189c21e0bbfSMatthew R. Ochs return rc; 2190ee3491baSMatthew R. Ochs 2191ee3491baSMatthew R. Ochs err1: 21923065267aSMatthew R. Ochs for (i = afu->num_hwqs - 1; i >= 0; i--) { 2193bfc0bab1SUma Krishnan term_intr(cfg, UNMAP_THREE, i); 2194bfc0bab1SUma Krishnan term_mc(cfg, i); 2195bfc0bab1SUma Krishnan } 2196ee3491baSMatthew R. Ochs goto out; 2197c21e0bbfSMatthew R. Ochs } 2198c21e0bbfSMatthew R. Ochs 2199c21e0bbfSMatthew R. Ochs /** 22000b09e711SUma Krishnan * afu_reset() - resets the AFU 22010b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 22020b09e711SUma Krishnan * 22030b09e711SUma Krishnan * Return: 0 on success, -errno on failure 22040b09e711SUma Krishnan */ 22050b09e711SUma Krishnan static int afu_reset(struct cxlflash_cfg *cfg) 22060b09e711SUma Krishnan { 22070b09e711SUma Krishnan struct device *dev = &cfg->dev->dev; 22080b09e711SUma Krishnan int rc = 0; 22090b09e711SUma Krishnan 22100b09e711SUma Krishnan /* Stop the context before the reset. Since the context is 22110b09e711SUma Krishnan * no longer available restart it after the reset is complete 22120b09e711SUma Krishnan */ 22130b09e711SUma Krishnan term_afu(cfg); 22140b09e711SUma Krishnan 22150b09e711SUma Krishnan rc = init_afu(cfg); 22160b09e711SUma Krishnan 22170b09e711SUma Krishnan dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 22180b09e711SUma Krishnan return rc; 22190b09e711SUma Krishnan } 22200b09e711SUma Krishnan 22210b09e711SUma Krishnan /** 22220b09e711SUma Krishnan * drain_ioctls() - wait until all currently executing ioctls have completed 22230b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 22240b09e711SUma Krishnan * 22250b09e711SUma Krishnan * Obtain write access to read/write semaphore that wraps ioctl 22260b09e711SUma Krishnan * handling to 'drain' ioctls currently executing. 22270b09e711SUma Krishnan */ 22280b09e711SUma Krishnan static void drain_ioctls(struct cxlflash_cfg *cfg) 22290b09e711SUma Krishnan { 22300b09e711SUma Krishnan down_write(&cfg->ioctl_rwsem); 22310b09e711SUma Krishnan up_write(&cfg->ioctl_rwsem); 22320b09e711SUma Krishnan } 22330b09e711SUma Krishnan 22340b09e711SUma Krishnan /** 22350b09e711SUma Krishnan * cxlflash_async_reset_host() - asynchronous host reset handler 22360b09e711SUma Krishnan * @data: Private data provided while scheduling reset. 22370b09e711SUma Krishnan * @cookie: Cookie that can be used for checkpointing. 22380b09e711SUma Krishnan */ 22390b09e711SUma Krishnan static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) 22400b09e711SUma Krishnan { 22410b09e711SUma Krishnan struct cxlflash_cfg *cfg = data; 22420b09e711SUma Krishnan struct device *dev = &cfg->dev->dev; 22430b09e711SUma Krishnan int rc = 0; 22440b09e711SUma Krishnan 22450b09e711SUma Krishnan if (cfg->state != STATE_RESET) { 22460b09e711SUma Krishnan dev_dbg(dev, "%s: Not performing a reset, state=%d\n", 22470b09e711SUma Krishnan __func__, cfg->state); 22480b09e711SUma Krishnan goto out; 22490b09e711SUma Krishnan } 22500b09e711SUma Krishnan 22510b09e711SUma Krishnan drain_ioctls(cfg); 22520b09e711SUma Krishnan cxlflash_mark_contexts_error(cfg); 22530b09e711SUma Krishnan rc = afu_reset(cfg); 22540b09e711SUma Krishnan if (rc) 22550b09e711SUma Krishnan cfg->state = STATE_FAILTERM; 22560b09e711SUma Krishnan else 22570b09e711SUma Krishnan cfg->state = STATE_NORMAL; 22580b09e711SUma Krishnan wake_up_all(&cfg->reset_waitq); 22590b09e711SUma Krishnan 22600b09e711SUma Krishnan out: 22610b09e711SUma Krishnan scsi_unblock_requests(cfg->host); 22620b09e711SUma Krishnan } 22630b09e711SUma Krishnan 22640b09e711SUma Krishnan /** 22650b09e711SUma Krishnan * cxlflash_schedule_async_reset() - schedule an asynchronous host reset 22660b09e711SUma Krishnan * @cfg: Internal structure associated with the host. 22670b09e711SUma Krishnan */ 22680b09e711SUma Krishnan static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) 22690b09e711SUma Krishnan { 22700b09e711SUma Krishnan struct device *dev = &cfg->dev->dev; 22710b09e711SUma Krishnan 22720b09e711SUma Krishnan if (cfg->state != STATE_NORMAL) { 22730b09e711SUma Krishnan dev_dbg(dev, "%s: Not performing reset state=%d\n", 22740b09e711SUma Krishnan __func__, cfg->state); 22750b09e711SUma Krishnan return; 22760b09e711SUma Krishnan } 22770b09e711SUma Krishnan 22780b09e711SUma Krishnan cfg->state = STATE_RESET; 22790b09e711SUma Krishnan scsi_block_requests(cfg->host); 22800b09e711SUma Krishnan cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host, 22810b09e711SUma Krishnan cfg); 22820b09e711SUma Krishnan } 22830b09e711SUma Krishnan 22840b09e711SUma Krishnan /** 2285cf243027SMatthew R. Ochs * send_afu_cmd() - builds and sends an internal AFU command 2286c21e0bbfSMatthew R. Ochs * @afu: AFU associated with the host. 2287cf243027SMatthew R. Ochs * @rcb: Pre-populated IOARCB describing command to send. 2288c21e0bbfSMatthew R. Ochs * 2289cf243027SMatthew R. Ochs * The AFU can only take one internal AFU command at a time. This limitation is 2290cf243027SMatthew R. Ochs * enforced by using a mutex to provide exclusive access to the AFU during the 2291cf243027SMatthew R. Ochs * operation. This design point requires calling threads to not be on interrupt 2292cf243027SMatthew R. Ochs * context due to the possibility of sleeping during concurrent AFU operations. 2293c21e0bbfSMatthew R. Ochs * 2294cf243027SMatthew R. Ochs * The command status is optionally passed back to the caller when the caller 2295cf243027SMatthew R. Ochs * populates the IOASA field of the IOARCB with a pointer to an IOASA structure. 22965cdac81aSMatthew R. Ochs * 2297c21e0bbfSMatthew R. Ochs * Return: 2298539d890cSUma Krishnan * 0 on success, -errno on failure 2299c21e0bbfSMatthew R. Ochs */ 2300cf243027SMatthew R. Ochs static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) 2301c21e0bbfSMatthew R. Ochs { 23025cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 23034392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 2304c21e0bbfSMatthew R. Ochs struct afu_cmd *cmd = NULL; 2305bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 23069a597cd4SUma Krishnan ulong lock_flags; 2307350bb478SMatthew R. Ochs char *buf = NULL; 2308c21e0bbfSMatthew R. Ochs int rc = 0; 2309a96851d3SUma Krishnan int nretry = 0; 2310c21e0bbfSMatthew R. Ochs static DEFINE_MUTEX(sync_active); 2311c21e0bbfSMatthew R. Ochs 23125cdac81aSMatthew R. Ochs if (cfg->state != STATE_NORMAL) { 2313fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Sync not required state=%u\n", 2314fb67d44dSMatthew R. Ochs __func__, cfg->state); 23155cdac81aSMatthew R. Ochs return 0; 23165cdac81aSMatthew R. Ochs } 23175cdac81aSMatthew R. Ochs 2318c21e0bbfSMatthew R. Ochs mutex_lock(&sync_active); 2319de01283bSMatthew R. Ochs atomic_inc(&afu->cmds_active); 2320a1ea04b3SUma Krishnan buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); 2321350bb478SMatthew R. Ochs if (unlikely(!buf)) { 2322350bb478SMatthew R. Ochs dev_err(dev, "%s: no memory for command\n", __func__); 2323539d890cSUma Krishnan rc = -ENOMEM; 2324c21e0bbfSMatthew R. Ochs goto out; 2325c21e0bbfSMatthew R. Ochs } 2326c21e0bbfSMatthew R. Ochs 2327350bb478SMatthew R. Ochs cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); 2328a96851d3SUma Krishnan 2329a96851d3SUma Krishnan retry: 2330a1ea04b3SUma Krishnan memset(cmd, 0, sizeof(*cmd)); 2331cf243027SMatthew R. Ochs memcpy(&cmd->rcb, rcb, sizeof(*rcb)); 2332a1ea04b3SUma Krishnan INIT_LIST_HEAD(&cmd->queue); 2333350bb478SMatthew R. Ochs init_completion(&cmd->cevent); 2334350bb478SMatthew R. Ochs cmd->parent = afu; 2335bfc0bab1SUma Krishnan cmd->hwq_index = hwq->index; 2336bfc0bab1SUma Krishnan cmd->rcb.ctx_id = hwq->ctx_hndl; 2337c21e0bbfSMatthew R. Ochs 2338cf243027SMatthew R. Ochs dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n", 2339cf243027SMatthew R. Ochs __func__, afu, cmd, cmd->rcb.cdb[0], nretry); 2340c21e0bbfSMatthew R. Ochs 234148b4be36SMatthew R. Ochs rc = afu->send_cmd(afu, cmd); 2342539d890cSUma Krishnan if (unlikely(rc)) { 2343539d890cSUma Krishnan rc = -ENOBUFS; 2344c21e0bbfSMatthew R. Ochs goto out; 2345539d890cSUma Krishnan } 2346c21e0bbfSMatthew R. Ochs 23479ba848acSMatthew R. Ochs rc = wait_resp(afu, cmd); 2348a1ea04b3SUma Krishnan switch (rc) { 2349a1ea04b3SUma Krishnan case -ETIMEDOUT: 2350a96851d3SUma Krishnan rc = afu->context_reset(hwq); 2351a1ea04b3SUma Krishnan if (rc) { 23529a597cd4SUma Krishnan /* Delete the command from pending_cmds list */ 23539a597cd4SUma Krishnan spin_lock_irqsave(&hwq->hsq_slock, lock_flags); 23549a597cd4SUma Krishnan list_del(&cmd->list); 23559a597cd4SUma Krishnan spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); 23569a597cd4SUma Krishnan 23570b09e711SUma Krishnan cxlflash_schedule_async_reset(cfg); 2358a1ea04b3SUma Krishnan break; 2359a1ea04b3SUma Krishnan } 2360a1ea04b3SUma Krishnan /* fall through to retry */ 2361a1ea04b3SUma Krishnan case -EAGAIN: 2362a1ea04b3SUma Krishnan if (++nretry < 2) 2363a1ea04b3SUma Krishnan goto retry; 2364a1ea04b3SUma Krishnan /* fall through to exit */ 2365a1ea04b3SUma Krishnan default: 2366a1ea04b3SUma Krishnan break; 2367a96851d3SUma Krishnan } 2368a96851d3SUma Krishnan 2369cf243027SMatthew R. Ochs if (rcb->ioasa) 2370cf243027SMatthew R. Ochs *rcb->ioasa = cmd->sa; 2371c21e0bbfSMatthew R. Ochs out: 2372de01283bSMatthew R. Ochs atomic_dec(&afu->cmds_active); 2373c21e0bbfSMatthew R. Ochs mutex_unlock(&sync_active); 2374350bb478SMatthew R. Ochs kfree(buf); 2375fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 2376c21e0bbfSMatthew R. Ochs return rc; 2377c21e0bbfSMatthew R. Ochs } 2378c21e0bbfSMatthew R. Ochs 2379c21e0bbfSMatthew R. Ochs /** 2380cf243027SMatthew R. Ochs * cxlflash_afu_sync() - builds and sends an AFU sync command 2381cf243027SMatthew R. Ochs * @afu: AFU associated with the host. 2382cf243027SMatthew R. Ochs * @ctx: Identifies context requesting sync. 2383cf243027SMatthew R. Ochs * @res: Identifies resource requesting sync. 2384cf243027SMatthew R. Ochs * @mode: Type of sync to issue (lightweight, heavyweight, global). 2385cf243027SMatthew R. Ochs * 2386cf243027SMatthew R. Ochs * AFU sync operations are only necessary and allowed when the device is 2387cf243027SMatthew R. Ochs * operating normally. When not operating normally, sync requests can occur as 2388cf243027SMatthew R. Ochs * part of cleaning up resources associated with an adapter prior to removal. 2389cf243027SMatthew R. Ochs * In this scenario, these requests are simply ignored (safe due to the AFU 2390cf243027SMatthew R. Ochs * going away). 2391cf243027SMatthew R. Ochs * 2392cf243027SMatthew R. Ochs * Return: 2393cf243027SMatthew R. Ochs * 0 on success, -errno on failure 2394cf243027SMatthew R. Ochs */ 2395cf243027SMatthew R. Ochs int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode) 2396cf243027SMatthew R. Ochs { 2397cf243027SMatthew R. Ochs struct cxlflash_cfg *cfg = afu->parent; 2398cf243027SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 2399cf243027SMatthew R. Ochs struct sisl_ioarcb rcb = { 0 }; 2400cf243027SMatthew R. Ochs 2401cf243027SMatthew R. Ochs dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n", 2402cf243027SMatthew R. Ochs __func__, afu, ctx, res, mode); 2403cf243027SMatthew R. Ochs 2404cf243027SMatthew R. Ochs rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 2405cf243027SMatthew R. Ochs rcb.msi = SISL_MSI_RRQ_UPDATED; 2406cf243027SMatthew R. Ochs rcb.timeout = MC_AFU_SYNC_TIMEOUT; 2407cf243027SMatthew R. Ochs 2408cf243027SMatthew R. Ochs rcb.cdb[0] = SISL_AFU_CMD_SYNC; 2409cf243027SMatthew R. Ochs rcb.cdb[1] = mode; 2410cf243027SMatthew R. Ochs put_unaligned_be16(ctx, &rcb.cdb[2]); 2411cf243027SMatthew R. Ochs put_unaligned_be32(res, &rcb.cdb[4]); 2412cf243027SMatthew R. Ochs 2413cf243027SMatthew R. Ochs return send_afu_cmd(afu, &rcb); 2414cf243027SMatthew R. Ochs } 2415cf243027SMatthew R. Ochs 2416cf243027SMatthew R. Ochs /** 24177c4c41f1SUma Krishnan * cxlflash_eh_abort_handler() - abort a SCSI command 24187c4c41f1SUma Krishnan * @scp: SCSI command to abort. 24197c4c41f1SUma Krishnan * 24207c4c41f1SUma Krishnan * CXL Flash devices do not support a single command abort. Reset the context 24217c4c41f1SUma Krishnan * as per SISLite specification. Flush any pending commands in the hardware 24227c4c41f1SUma Krishnan * queue before the reset. 24237c4c41f1SUma Krishnan * 24247c4c41f1SUma Krishnan * Return: SUCCESS/FAILED as defined in scsi/scsi.h 24257c4c41f1SUma Krishnan */ 24267c4c41f1SUma Krishnan static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp) 24277c4c41f1SUma Krishnan { 24287c4c41f1SUma Krishnan int rc = FAILED; 24297c4c41f1SUma Krishnan struct Scsi_Host *host = scp->device->host; 24307c4c41f1SUma Krishnan struct cxlflash_cfg *cfg = shost_priv(host); 24317c4c41f1SUma Krishnan struct afu_cmd *cmd = sc_to_afuc(scp); 24327c4c41f1SUma Krishnan struct device *dev = &cfg->dev->dev; 24337c4c41f1SUma Krishnan struct afu *afu = cfg->afu; 24347c4c41f1SUma Krishnan struct hwq *hwq = get_hwq(afu, cmd->hwq_index); 24357c4c41f1SUma Krishnan 24367c4c41f1SUma Krishnan dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " 24377c4c41f1SUma Krishnan "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no, 24387c4c41f1SUma Krishnan scp->device->channel, scp->device->id, scp->device->lun, 24397c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 24407c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 24417c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 24427c4c41f1SUma Krishnan get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 24437c4c41f1SUma Krishnan 24447c4c41f1SUma Krishnan /* When the state is not normal, another reset/reload is in progress. 24457c4c41f1SUma Krishnan * Return failed and the mid-layer will invoke host reset handler. 24467c4c41f1SUma Krishnan */ 24477c4c41f1SUma Krishnan if (cfg->state != STATE_NORMAL) { 24487c4c41f1SUma Krishnan dev_dbg(dev, "%s: Invalid state for abort, state=%d\n", 24497c4c41f1SUma Krishnan __func__, cfg->state); 24507c4c41f1SUma Krishnan goto out; 24517c4c41f1SUma Krishnan } 24527c4c41f1SUma Krishnan 24537c4c41f1SUma Krishnan rc = afu->context_reset(hwq); 24547c4c41f1SUma Krishnan if (unlikely(rc)) 24557c4c41f1SUma Krishnan goto out; 24567c4c41f1SUma Krishnan 24577c4c41f1SUma Krishnan rc = SUCCESS; 24587c4c41f1SUma Krishnan 24597c4c41f1SUma Krishnan out: 24607c4c41f1SUma Krishnan dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 24617c4c41f1SUma Krishnan return rc; 24627c4c41f1SUma Krishnan } 24637c4c41f1SUma Krishnan 24647c4c41f1SUma Krishnan /** 246515305514SMatthew R. Ochs * cxlflash_eh_device_reset_handler() - reset a single LUN 246615305514SMatthew R. Ochs * @scp: SCSI command to send. 246715305514SMatthew R. Ochs * 246815305514SMatthew R. Ochs * Return: 246915305514SMatthew R. Ochs * SUCCESS as defined in scsi/scsi.h 247015305514SMatthew R. Ochs * FAILED as defined in scsi/scsi.h 247115305514SMatthew R. Ochs */ 247215305514SMatthew R. Ochs static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) 247315305514SMatthew R. Ochs { 247415305514SMatthew R. Ochs int rc = SUCCESS; 247532abbedaSMatthew R. Ochs struct scsi_device *sdev = scp->device; 247632abbedaSMatthew R. Ochs struct Scsi_Host *host = sdev->host; 2477fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 2478fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 247915305514SMatthew R. Ochs int rcr = 0; 248015305514SMatthew R. Ochs 24815a4d9d77SMatthew R. Ochs dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__, 24825a4d9d77SMatthew R. Ochs host->host_no, sdev->channel, sdev->id, sdev->lun); 2483ed486daaSMatthew R. Ochs retry: 248415305514SMatthew R. Ochs switch (cfg->state) { 248515305514SMatthew R. Ochs case STATE_NORMAL: 248632abbedaSMatthew R. Ochs rcr = send_tmf(cfg, sdev, TMF_LUN_RESET); 248715305514SMatthew R. Ochs if (unlikely(rcr)) 248815305514SMatthew R. Ochs rc = FAILED; 248915305514SMatthew R. Ochs break; 249015305514SMatthew R. Ochs case STATE_RESET: 249115305514SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 2492ed486daaSMatthew R. Ochs goto retry; 249315305514SMatthew R. Ochs default: 249415305514SMatthew R. Ochs rc = FAILED; 249515305514SMatthew R. Ochs break; 249615305514SMatthew R. Ochs } 249715305514SMatthew R. Ochs 2498fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 249915305514SMatthew R. Ochs return rc; 250015305514SMatthew R. Ochs } 250115305514SMatthew R. Ochs 250215305514SMatthew R. Ochs /** 250315305514SMatthew R. Ochs * cxlflash_eh_host_reset_handler() - reset the host adapter 250415305514SMatthew R. Ochs * @scp: SCSI command from stack identifying host. 250515305514SMatthew R. Ochs * 25061d3324c3SMatthew R. Ochs * Following a reset, the state is evaluated again in case an EEH occurred 25071d3324c3SMatthew R. Ochs * during the reset. In such a scenario, the host reset will either yield 25081d3324c3SMatthew R. Ochs * until the EEH recovery is complete or return success or failure based 25091d3324c3SMatthew R. Ochs * upon the current device state. 25101d3324c3SMatthew R. Ochs * 251115305514SMatthew R. Ochs * Return: 251215305514SMatthew R. Ochs * SUCCESS as defined in scsi/scsi.h 251315305514SMatthew R. Ochs * FAILED as defined in scsi/scsi.h 251415305514SMatthew R. Ochs */ 251515305514SMatthew R. Ochs static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) 251615305514SMatthew R. Ochs { 251715305514SMatthew R. Ochs int rc = SUCCESS; 251815305514SMatthew R. Ochs int rcr = 0; 251915305514SMatthew R. Ochs struct Scsi_Host *host = scp->device->host; 2520fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(host); 2521fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 252215305514SMatthew R. Ochs 25235a4d9d77SMatthew R. Ochs dev_dbg(dev, "%s: %d\n", __func__, host->host_no); 252415305514SMatthew R. Ochs 252515305514SMatthew R. Ochs switch (cfg->state) { 252615305514SMatthew R. Ochs case STATE_NORMAL: 252715305514SMatthew R. Ochs cfg->state = STATE_RESET; 2528f411396dSManoj N. Kumar drain_ioctls(cfg); 252915305514SMatthew R. Ochs cxlflash_mark_contexts_error(cfg); 253015305514SMatthew R. Ochs rcr = afu_reset(cfg); 253115305514SMatthew R. Ochs if (rcr) { 253215305514SMatthew R. Ochs rc = FAILED; 253315305514SMatthew R. Ochs cfg->state = STATE_FAILTERM; 253415305514SMatthew R. Ochs } else 253515305514SMatthew R. Ochs cfg->state = STATE_NORMAL; 253615305514SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 25371d3324c3SMatthew R. Ochs ssleep(1); 25381d3324c3SMatthew R. Ochs /* fall through */ 253915305514SMatthew R. Ochs case STATE_RESET: 254015305514SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 254115305514SMatthew R. Ochs if (cfg->state == STATE_NORMAL) 254215305514SMatthew R. Ochs break; 254315305514SMatthew R. Ochs /* fall through */ 254415305514SMatthew R. Ochs default: 254515305514SMatthew R. Ochs rc = FAILED; 254615305514SMatthew R. Ochs break; 254715305514SMatthew R. Ochs } 254815305514SMatthew R. Ochs 2549fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 255015305514SMatthew R. Ochs return rc; 255115305514SMatthew R. Ochs } 255215305514SMatthew R. Ochs 255315305514SMatthew R. Ochs /** 255415305514SMatthew R. Ochs * cxlflash_change_queue_depth() - change the queue depth for the device 255515305514SMatthew R. Ochs * @sdev: SCSI device destined for queue depth change. 255615305514SMatthew R. Ochs * @qdepth: Requested queue depth value to set. 255715305514SMatthew R. Ochs * 255815305514SMatthew R. Ochs * The requested queue depth is capped to the maximum supported value. 255915305514SMatthew R. Ochs * 256015305514SMatthew R. Ochs * Return: The actual queue depth set. 256115305514SMatthew R. Ochs */ 256215305514SMatthew R. Ochs static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) 256315305514SMatthew R. Ochs { 256415305514SMatthew R. Ochs 256515305514SMatthew R. Ochs if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) 256615305514SMatthew R. Ochs qdepth = CXLFLASH_MAX_CMDS_PER_LUN; 256715305514SMatthew R. Ochs 256815305514SMatthew R. Ochs scsi_change_queue_depth(sdev, qdepth); 256915305514SMatthew R. Ochs return sdev->queue_depth; 257015305514SMatthew R. Ochs } 257115305514SMatthew R. Ochs 257215305514SMatthew R. Ochs /** 257315305514SMatthew R. Ochs * cxlflash_show_port_status() - queries and presents the current port status 2574e0f01a21SMatthew R. Ochs * @port: Desired port for status reporting. 25753b225cd3SMatthew R. Ochs * @cfg: Internal structure associated with the host. 257615305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 257715305514SMatthew R. Ochs * 257878ae028eSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf or -EINVAL. 257915305514SMatthew R. Ochs */ 25803b225cd3SMatthew R. Ochs static ssize_t cxlflash_show_port_status(u32 port, 25813b225cd3SMatthew R. Ochs struct cxlflash_cfg *cfg, 25823b225cd3SMatthew R. Ochs char *buf) 258315305514SMatthew R. Ochs { 258478ae028eSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 258515305514SMatthew R. Ochs char *disp_status; 258615305514SMatthew R. Ochs u64 status; 25870aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 258815305514SMatthew R. Ochs 258978ae028eSMatthew R. Ochs WARN_ON(port >= MAX_FC_PORTS); 259078ae028eSMatthew R. Ochs 259178ae028eSMatthew R. Ochs if (port >= cfg->num_fc_ports) { 259278ae028eSMatthew R. Ochs dev_info(dev, "%s: Port %d not supported on this card.\n", 259378ae028eSMatthew R. Ochs __func__, port); 259478ae028eSMatthew R. Ochs return -EINVAL; 259578ae028eSMatthew R. Ochs } 259615305514SMatthew R. Ochs 25970aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 25980aa14887SMatthew R. Ochs status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); 2599e0f01a21SMatthew R. Ochs status &= FC_MTIP_STATUS_MASK; 260015305514SMatthew R. Ochs 260115305514SMatthew R. Ochs if (status == FC_MTIP_STATUS_ONLINE) 260215305514SMatthew R. Ochs disp_status = "online"; 260315305514SMatthew R. Ochs else if (status == FC_MTIP_STATUS_OFFLINE) 260415305514SMatthew R. Ochs disp_status = "offline"; 260515305514SMatthew R. Ochs else 260615305514SMatthew R. Ochs disp_status = "unknown"; 260715305514SMatthew R. Ochs 2608e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); 260915305514SMatthew R. Ochs } 261015305514SMatthew R. Ochs 261115305514SMatthew R. Ochs /** 2612e0f01a21SMatthew R. Ochs * port0_show() - queries and presents the current status of port 0 2613e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2614e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2615e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2616e0f01a21SMatthew R. Ochs * 2617e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2618e0f01a21SMatthew R. Ochs */ 2619e0f01a21SMatthew R. Ochs static ssize_t port0_show(struct device *dev, 2620e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2621e0f01a21SMatthew R. Ochs char *buf) 2622e0f01a21SMatthew R. Ochs { 2623fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2624e0f01a21SMatthew R. Ochs 26253b225cd3SMatthew R. Ochs return cxlflash_show_port_status(0, cfg, buf); 2626e0f01a21SMatthew R. Ochs } 2627e0f01a21SMatthew R. Ochs 2628e0f01a21SMatthew R. Ochs /** 2629e0f01a21SMatthew R. Ochs * port1_show() - queries and presents the current status of port 1 2630e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2631e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2632e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2633e0f01a21SMatthew R. Ochs * 2634e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2635e0f01a21SMatthew R. Ochs */ 2636e0f01a21SMatthew R. Ochs static ssize_t port1_show(struct device *dev, 2637e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2638e0f01a21SMatthew R. Ochs char *buf) 2639e0f01a21SMatthew R. Ochs { 2640fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2641e0f01a21SMatthew R. Ochs 26423b225cd3SMatthew R. Ochs return cxlflash_show_port_status(1, cfg, buf); 2643e0f01a21SMatthew R. Ochs } 2644e0f01a21SMatthew R. Ochs 2645e0f01a21SMatthew R. Ochs /** 26461cd7fabcSMatthew R. Ochs * port2_show() - queries and presents the current status of port 2 26471cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 26481cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 26491cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 26501cd7fabcSMatthew R. Ochs * 26511cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 26521cd7fabcSMatthew R. Ochs */ 26531cd7fabcSMatthew R. Ochs static ssize_t port2_show(struct device *dev, 26541cd7fabcSMatthew R. Ochs struct device_attribute *attr, 26551cd7fabcSMatthew R. Ochs char *buf) 26561cd7fabcSMatthew R. Ochs { 26571cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 26581cd7fabcSMatthew R. Ochs 26591cd7fabcSMatthew R. Ochs return cxlflash_show_port_status(2, cfg, buf); 26601cd7fabcSMatthew R. Ochs } 26611cd7fabcSMatthew R. Ochs 26621cd7fabcSMatthew R. Ochs /** 26631cd7fabcSMatthew R. Ochs * port3_show() - queries and presents the current status of port 3 26641cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 26651cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 26661cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 26671cd7fabcSMatthew R. Ochs * 26681cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 26691cd7fabcSMatthew R. Ochs */ 26701cd7fabcSMatthew R. Ochs static ssize_t port3_show(struct device *dev, 26711cd7fabcSMatthew R. Ochs struct device_attribute *attr, 26721cd7fabcSMatthew R. Ochs char *buf) 26731cd7fabcSMatthew R. Ochs { 26741cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 26751cd7fabcSMatthew R. Ochs 26761cd7fabcSMatthew R. Ochs return cxlflash_show_port_status(3, cfg, buf); 26771cd7fabcSMatthew R. Ochs } 26781cd7fabcSMatthew R. Ochs 26791cd7fabcSMatthew R. Ochs /** 2680e0f01a21SMatthew R. Ochs * lun_mode_show() - presents the current LUN mode of the host 268115305514SMatthew R. Ochs * @dev: Generic device associated with the host. 2682e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the LUN mode. 268315305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. 268415305514SMatthew R. Ochs * 268515305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 268615305514SMatthew R. Ochs */ 2687e0f01a21SMatthew R. Ochs static ssize_t lun_mode_show(struct device *dev, 268815305514SMatthew R. Ochs struct device_attribute *attr, char *buf) 268915305514SMatthew R. Ochs { 2690fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 269115305514SMatthew R. Ochs struct afu *afu = cfg->afu; 269215305514SMatthew R. Ochs 2693e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 269415305514SMatthew R. Ochs } 269515305514SMatthew R. Ochs 269615305514SMatthew R. Ochs /** 2697e0f01a21SMatthew R. Ochs * lun_mode_store() - sets the LUN mode of the host 269815305514SMatthew R. Ochs * @dev: Generic device associated with the host. 2699e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the LUN mode. 270015305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. 270115305514SMatthew R. Ochs * @count: Length of data resizing in @buf. 270215305514SMatthew R. Ochs * 270315305514SMatthew R. Ochs * The CXL Flash AFU supports a dummy LUN mode where the external 270415305514SMatthew R. Ochs * links and storage are not required. Space on the FPGA is used 270515305514SMatthew R. Ochs * to create 1 or 2 small LUNs which are presented to the system 270615305514SMatthew R. Ochs * as if they were a normal storage device. This feature is useful 270715305514SMatthew R. Ochs * during development and also provides manufacturing with a way 270815305514SMatthew R. Ochs * to test the AFU without an actual device. 270915305514SMatthew R. Ochs * 271015305514SMatthew R. Ochs * 0 = external LUN[s] (default) 271115305514SMatthew R. Ochs * 1 = internal LUN (1 x 64K, 512B blocks, id 0) 271215305514SMatthew R. Ochs * 2 = internal LUN (1 x 64K, 4K blocks, id 0) 271315305514SMatthew R. Ochs * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) 271415305514SMatthew R. Ochs * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) 271515305514SMatthew R. Ochs * 271615305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 271715305514SMatthew R. Ochs */ 2718e0f01a21SMatthew R. Ochs static ssize_t lun_mode_store(struct device *dev, 271915305514SMatthew R. Ochs struct device_attribute *attr, 272015305514SMatthew R. Ochs const char *buf, size_t count) 272115305514SMatthew R. Ochs { 272215305514SMatthew R. Ochs struct Scsi_Host *shost = class_to_shost(dev); 2723fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(shost); 272415305514SMatthew R. Ochs struct afu *afu = cfg->afu; 272515305514SMatthew R. Ochs int rc; 272615305514SMatthew R. Ochs u32 lun_mode; 272715305514SMatthew R. Ochs 272815305514SMatthew R. Ochs rc = kstrtouint(buf, 10, &lun_mode); 272915305514SMatthew R. Ochs if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { 273015305514SMatthew R. Ochs afu->internal_lun = lun_mode; 2731603ecce9SManoj N. Kumar 2732603ecce9SManoj N. Kumar /* 2733603ecce9SManoj N. Kumar * When configured for internal LUN, there is only one channel, 273478ae028eSMatthew R. Ochs * channel number 0, else there will be one less than the number 273578ae028eSMatthew R. Ochs * of fc ports for this card. 2736603ecce9SManoj N. Kumar */ 2737603ecce9SManoj N. Kumar if (afu->internal_lun) 2738603ecce9SManoj N. Kumar shost->max_channel = 0; 2739603ecce9SManoj N. Kumar else 27408fa4f177SMatthew R. Ochs shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); 2741603ecce9SManoj N. Kumar 274215305514SMatthew R. Ochs afu_reset(cfg); 274315305514SMatthew R. Ochs scsi_scan_host(cfg->host); 274415305514SMatthew R. Ochs } 274515305514SMatthew R. Ochs 274615305514SMatthew R. Ochs return count; 274715305514SMatthew R. Ochs } 274815305514SMatthew R. Ochs 274915305514SMatthew R. Ochs /** 2750e0f01a21SMatthew R. Ochs * ioctl_version_show() - presents the current ioctl version of the host 275115305514SMatthew R. Ochs * @dev: Generic device associated with the host. 275215305514SMatthew R. Ochs * @attr: Device attribute representing the ioctl version. 275315305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. 275415305514SMatthew R. Ochs * 275515305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 275615305514SMatthew R. Ochs */ 2757e0f01a21SMatthew R. Ochs static ssize_t ioctl_version_show(struct device *dev, 2758e0f01a21SMatthew R. Ochs struct device_attribute *attr, char *buf) 275915305514SMatthew R. Ochs { 2760d6e32f53SMatthew R. Ochs ssize_t bytes = 0; 2761d6e32f53SMatthew R. Ochs 2762d6e32f53SMatthew R. Ochs bytes = scnprintf(buf, PAGE_SIZE, 2763d6e32f53SMatthew R. Ochs "disk: %u\n", DK_CXLFLASH_VERSION_0); 2764d6e32f53SMatthew R. Ochs bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 2765d6e32f53SMatthew R. Ochs "host: %u\n", HT_CXLFLASH_VERSION_0); 2766d6e32f53SMatthew R. Ochs 2767d6e32f53SMatthew R. Ochs return bytes; 276815305514SMatthew R. Ochs } 276915305514SMatthew R. Ochs 277015305514SMatthew R. Ochs /** 2771e0f01a21SMatthew R. Ochs * cxlflash_show_port_lun_table() - queries and presents the port LUN table 2772e0f01a21SMatthew R. Ochs * @port: Desired port for status reporting. 27733b225cd3SMatthew R. Ochs * @cfg: Internal structure associated with the host. 2774e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2775e0f01a21SMatthew R. Ochs * 277678ae028eSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf or -EINVAL. 2777e0f01a21SMatthew R. Ochs */ 2778e0f01a21SMatthew R. Ochs static ssize_t cxlflash_show_port_lun_table(u32 port, 27793b225cd3SMatthew R. Ochs struct cxlflash_cfg *cfg, 2780e0f01a21SMatthew R. Ochs char *buf) 2781e0f01a21SMatthew R. Ochs { 278278ae028eSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 27830aa14887SMatthew R. Ochs __be64 __iomem *fc_port_luns; 2784e0f01a21SMatthew R. Ochs int i; 2785e0f01a21SMatthew R. Ochs ssize_t bytes = 0; 2786e0f01a21SMatthew R. Ochs 278778ae028eSMatthew R. Ochs WARN_ON(port >= MAX_FC_PORTS); 278878ae028eSMatthew R. Ochs 278978ae028eSMatthew R. Ochs if (port >= cfg->num_fc_ports) { 279078ae028eSMatthew R. Ochs dev_info(dev, "%s: Port %d not supported on this card.\n", 279178ae028eSMatthew R. Ochs __func__, port); 279278ae028eSMatthew R. Ochs return -EINVAL; 279378ae028eSMatthew R. Ochs } 2794e0f01a21SMatthew R. Ochs 27950aa14887SMatthew R. Ochs fc_port_luns = get_fc_port_luns(cfg, port); 2796e0f01a21SMatthew R. Ochs 2797e0f01a21SMatthew R. Ochs for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2798e0f01a21SMatthew R. Ochs bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 27990aa14887SMatthew R. Ochs "%03d: %016llx\n", 28000aa14887SMatthew R. Ochs i, readq_be(&fc_port_luns[i])); 2801e0f01a21SMatthew R. Ochs return bytes; 2802e0f01a21SMatthew R. Ochs } 2803e0f01a21SMatthew R. Ochs 2804e0f01a21SMatthew R. Ochs /** 2805e0f01a21SMatthew R. Ochs * port0_lun_table_show() - presents the current LUN table of port 0 2806e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2807e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2808e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2809e0f01a21SMatthew R. Ochs * 2810e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2811e0f01a21SMatthew R. Ochs */ 2812e0f01a21SMatthew R. Ochs static ssize_t port0_lun_table_show(struct device *dev, 2813e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2814e0f01a21SMatthew R. Ochs char *buf) 2815e0f01a21SMatthew R. Ochs { 2816fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2817e0f01a21SMatthew R. Ochs 28183b225cd3SMatthew R. Ochs return cxlflash_show_port_lun_table(0, cfg, buf); 2819e0f01a21SMatthew R. Ochs } 2820e0f01a21SMatthew R. Ochs 2821e0f01a21SMatthew R. Ochs /** 2822e0f01a21SMatthew R. Ochs * port1_lun_table_show() - presents the current LUN table of port 1 2823e0f01a21SMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 2824e0f01a21SMatthew R. Ochs * @attr: Device attribute representing the port. 2825e0f01a21SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2826e0f01a21SMatthew R. Ochs * 2827e0f01a21SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2828e0f01a21SMatthew R. Ochs */ 2829e0f01a21SMatthew R. Ochs static ssize_t port1_lun_table_show(struct device *dev, 2830e0f01a21SMatthew R. Ochs struct device_attribute *attr, 2831e0f01a21SMatthew R. Ochs char *buf) 2832e0f01a21SMatthew R. Ochs { 2833fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2834e0f01a21SMatthew R. Ochs 28353b225cd3SMatthew R. Ochs return cxlflash_show_port_lun_table(1, cfg, buf); 2836e0f01a21SMatthew R. Ochs } 2837e0f01a21SMatthew R. Ochs 2838e0f01a21SMatthew R. Ochs /** 28391cd7fabcSMatthew R. Ochs * port2_lun_table_show() - presents the current LUN table of port 2 28401cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 28411cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 28421cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 28431cd7fabcSMatthew R. Ochs * 28441cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 28451cd7fabcSMatthew R. Ochs */ 28461cd7fabcSMatthew R. Ochs static ssize_t port2_lun_table_show(struct device *dev, 28471cd7fabcSMatthew R. Ochs struct device_attribute *attr, 28481cd7fabcSMatthew R. Ochs char *buf) 28491cd7fabcSMatthew R. Ochs { 28501cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 28511cd7fabcSMatthew R. Ochs 28521cd7fabcSMatthew R. Ochs return cxlflash_show_port_lun_table(2, cfg, buf); 28531cd7fabcSMatthew R. Ochs } 28541cd7fabcSMatthew R. Ochs 28551cd7fabcSMatthew R. Ochs /** 28561cd7fabcSMatthew R. Ochs * port3_lun_table_show() - presents the current LUN table of port 3 28571cd7fabcSMatthew R. Ochs * @dev: Generic device associated with the host owning the port. 28581cd7fabcSMatthew R. Ochs * @attr: Device attribute representing the port. 28591cd7fabcSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 28601cd7fabcSMatthew R. Ochs * 28611cd7fabcSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 28621cd7fabcSMatthew R. Ochs */ 28631cd7fabcSMatthew R. Ochs static ssize_t port3_lun_table_show(struct device *dev, 28641cd7fabcSMatthew R. Ochs struct device_attribute *attr, 28651cd7fabcSMatthew R. Ochs char *buf) 28661cd7fabcSMatthew R. Ochs { 28671cd7fabcSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 28681cd7fabcSMatthew R. Ochs 28691cd7fabcSMatthew R. Ochs return cxlflash_show_port_lun_table(3, cfg, buf); 28701cd7fabcSMatthew R. Ochs } 28711cd7fabcSMatthew R. Ochs 28721cd7fabcSMatthew R. Ochs /** 2873cba06e6dSMatthew R. Ochs * irqpoll_weight_show() - presents the current IRQ poll weight for the host 2874cba06e6dSMatthew R. Ochs * @dev: Generic device associated with the host. 2875cba06e6dSMatthew R. Ochs * @attr: Device attribute representing the IRQ poll weight. 2876cba06e6dSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll 2877cba06e6dSMatthew R. Ochs * weight in ASCII. 2878cba06e6dSMatthew R. Ochs * 2879cba06e6dSMatthew R. Ochs * An IRQ poll weight of 0 indicates polling is disabled. 2880cba06e6dSMatthew R. Ochs * 2881cba06e6dSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2882cba06e6dSMatthew R. Ochs */ 2883cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_show(struct device *dev, 2884cba06e6dSMatthew R. Ochs struct device_attribute *attr, char *buf) 2885cba06e6dSMatthew R. Ochs { 2886cba06e6dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2887cba06e6dSMatthew R. Ochs struct afu *afu = cfg->afu; 2888cba06e6dSMatthew R. Ochs 2889cba06e6dSMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); 2890cba06e6dSMatthew R. Ochs } 2891cba06e6dSMatthew R. Ochs 2892cba06e6dSMatthew R. Ochs /** 2893cba06e6dSMatthew R. Ochs * irqpoll_weight_store() - sets the current IRQ poll weight for the host 2894cba06e6dSMatthew R. Ochs * @dev: Generic device associated with the host. 2895cba06e6dSMatthew R. Ochs * @attr: Device attribute representing the IRQ poll weight. 2896cba06e6dSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll 2897cba06e6dSMatthew R. Ochs * weight in ASCII. 2898cba06e6dSMatthew R. Ochs * @count: Length of data resizing in @buf. 2899cba06e6dSMatthew R. Ochs * 2900cba06e6dSMatthew R. Ochs * An IRQ poll weight of 0 indicates polling is disabled. 2901cba06e6dSMatthew R. Ochs * 2902cba06e6dSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 2903cba06e6dSMatthew R. Ochs */ 2904cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_store(struct device *dev, 2905cba06e6dSMatthew R. Ochs struct device_attribute *attr, 2906cba06e6dSMatthew R. Ochs const char *buf, size_t count) 2907cba06e6dSMatthew R. Ochs { 2908cba06e6dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 2909cba06e6dSMatthew R. Ochs struct device *cfgdev = &cfg->dev->dev; 2910cba06e6dSMatthew R. Ochs struct afu *afu = cfg->afu; 2911bfc0bab1SUma Krishnan struct hwq *hwq; 2912cba06e6dSMatthew R. Ochs u32 weight; 2913bfc0bab1SUma Krishnan int rc, i; 2914cba06e6dSMatthew R. Ochs 2915cba06e6dSMatthew R. Ochs rc = kstrtouint(buf, 10, &weight); 2916cba06e6dSMatthew R. Ochs if (rc) 2917cba06e6dSMatthew R. Ochs return -EINVAL; 2918cba06e6dSMatthew R. Ochs 2919cba06e6dSMatthew R. Ochs if (weight > 256) { 2920cba06e6dSMatthew R. Ochs dev_info(cfgdev, 2921cba06e6dSMatthew R. Ochs "Invalid IRQ poll weight. It must be 256 or less.\n"); 2922cba06e6dSMatthew R. Ochs return -EINVAL; 2923cba06e6dSMatthew R. Ochs } 2924cba06e6dSMatthew R. Ochs 2925cba06e6dSMatthew R. Ochs if (weight == afu->irqpoll_weight) { 2926cba06e6dSMatthew R. Ochs dev_info(cfgdev, 2927cba06e6dSMatthew R. Ochs "Current IRQ poll weight has the same weight.\n"); 2928cba06e6dSMatthew R. Ochs return -EINVAL; 2929cba06e6dSMatthew R. Ochs } 2930cba06e6dSMatthew R. Ochs 2931bfc0bab1SUma Krishnan if (afu_is_irqpoll_enabled(afu)) { 29323065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2933bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 2934bfc0bab1SUma Krishnan 2935bfc0bab1SUma Krishnan irq_poll_disable(&hwq->irqpoll); 2936bfc0bab1SUma Krishnan } 2937bfc0bab1SUma Krishnan } 2938cba06e6dSMatthew R. Ochs 2939cba06e6dSMatthew R. Ochs afu->irqpoll_weight = weight; 2940cba06e6dSMatthew R. Ochs 2941bfc0bab1SUma Krishnan if (weight > 0) { 29423065267aSMatthew R. Ochs for (i = 0; i < afu->num_hwqs; i++) { 2943bfc0bab1SUma Krishnan hwq = get_hwq(afu, i); 2944bfc0bab1SUma Krishnan 2945bfc0bab1SUma Krishnan irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); 2946bfc0bab1SUma Krishnan } 2947bfc0bab1SUma Krishnan } 2948cba06e6dSMatthew R. Ochs 2949cba06e6dSMatthew R. Ochs return count; 2950cba06e6dSMatthew R. Ochs } 2951cba06e6dSMatthew R. Ochs 2952cba06e6dSMatthew R. Ochs /** 29533065267aSMatthew R. Ochs * num_hwqs_show() - presents the number of hardware queues for the host 29543065267aSMatthew R. Ochs * @dev: Generic device associated with the host. 29553065267aSMatthew R. Ochs * @attr: Device attribute representing the number of hardware queues. 29563065267aSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the number of hardware 29573065267aSMatthew R. Ochs * queues in ASCII. 29583065267aSMatthew R. Ochs * 29593065267aSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 29603065267aSMatthew R. Ochs */ 29613065267aSMatthew R. Ochs static ssize_t num_hwqs_show(struct device *dev, 29623065267aSMatthew R. Ochs struct device_attribute *attr, char *buf) 29633065267aSMatthew R. Ochs { 29643065267aSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 29653065267aSMatthew R. Ochs struct afu *afu = cfg->afu; 29663065267aSMatthew R. Ochs 29673065267aSMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); 29683065267aSMatthew R. Ochs } 29693065267aSMatthew R. Ochs 29703065267aSMatthew R. Ochs /** 29713065267aSMatthew R. Ochs * num_hwqs_store() - sets the number of hardware queues for the host 29723065267aSMatthew R. Ochs * @dev: Generic device associated with the host. 29733065267aSMatthew R. Ochs * @attr: Device attribute representing the number of hardware queues. 29743065267aSMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the number of hardware 29753065267aSMatthew R. Ochs * queues in ASCII. 29763065267aSMatthew R. Ochs * @count: Length of data resizing in @buf. 29773065267aSMatthew R. Ochs * 29783065267aSMatthew R. Ochs * n > 0: num_hwqs = n 29793065267aSMatthew R. Ochs * n = 0: num_hwqs = num_online_cpus() 29803065267aSMatthew R. Ochs * n < 0: num_online_cpus() / abs(n) 29813065267aSMatthew R. Ochs * 29823065267aSMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 29833065267aSMatthew R. Ochs */ 29843065267aSMatthew R. Ochs static ssize_t num_hwqs_store(struct device *dev, 29853065267aSMatthew R. Ochs struct device_attribute *attr, 29863065267aSMatthew R. Ochs const char *buf, size_t count) 29873065267aSMatthew R. Ochs { 29883065267aSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 29893065267aSMatthew R. Ochs struct afu *afu = cfg->afu; 29903065267aSMatthew R. Ochs int rc; 29913065267aSMatthew R. Ochs int nhwqs, num_hwqs; 29923065267aSMatthew R. Ochs 29933065267aSMatthew R. Ochs rc = kstrtoint(buf, 10, &nhwqs); 29943065267aSMatthew R. Ochs if (rc) 29953065267aSMatthew R. Ochs return -EINVAL; 29963065267aSMatthew R. Ochs 29973065267aSMatthew R. Ochs if (nhwqs >= 1) 29983065267aSMatthew R. Ochs num_hwqs = nhwqs; 29993065267aSMatthew R. Ochs else if (nhwqs == 0) 30003065267aSMatthew R. Ochs num_hwqs = num_online_cpus(); 30013065267aSMatthew R. Ochs else 30023065267aSMatthew R. Ochs num_hwqs = num_online_cpus() / abs(nhwqs); 30033065267aSMatthew R. Ochs 30043065267aSMatthew R. Ochs afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); 30053065267aSMatthew R. Ochs WARN_ON_ONCE(afu->desired_hwqs == 0); 30063065267aSMatthew R. Ochs 30073065267aSMatthew R. Ochs retry: 30083065267aSMatthew R. Ochs switch (cfg->state) { 30093065267aSMatthew R. Ochs case STATE_NORMAL: 30103065267aSMatthew R. Ochs cfg->state = STATE_RESET; 30113065267aSMatthew R. Ochs drain_ioctls(cfg); 30123065267aSMatthew R. Ochs cxlflash_mark_contexts_error(cfg); 30133065267aSMatthew R. Ochs rc = afu_reset(cfg); 30143065267aSMatthew R. Ochs if (rc) 30153065267aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 30163065267aSMatthew R. Ochs else 30173065267aSMatthew R. Ochs cfg->state = STATE_NORMAL; 30183065267aSMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 30193065267aSMatthew R. Ochs break; 30203065267aSMatthew R. Ochs case STATE_RESET: 30213065267aSMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 30223065267aSMatthew R. Ochs if (cfg->state == STATE_NORMAL) 30233065267aSMatthew R. Ochs goto retry; 30243065267aSMatthew R. Ochs default: 30253065267aSMatthew R. Ochs /* Ideally should not happen */ 30263065267aSMatthew R. Ochs dev_err(dev, "%s: Device is not ready, state=%d\n", 30273065267aSMatthew R. Ochs __func__, cfg->state); 30283065267aSMatthew R. Ochs break; 30293065267aSMatthew R. Ochs } 30303065267aSMatthew R. Ochs 30313065267aSMatthew R. Ochs return count; 30323065267aSMatthew R. Ochs } 30333065267aSMatthew R. Ochs 30341dd0c0e4SMatthew R. Ochs static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" }; 30351dd0c0e4SMatthew R. Ochs 30361dd0c0e4SMatthew R. Ochs /** 30371dd0c0e4SMatthew R. Ochs * hwq_mode_show() - presents the HWQ steering mode for the host 30381dd0c0e4SMatthew R. Ochs * @dev: Generic device associated with the host. 30391dd0c0e4SMatthew R. Ochs * @attr: Device attribute representing the HWQ steering mode. 30401dd0c0e4SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode 30411dd0c0e4SMatthew R. Ochs * as a character string. 30421dd0c0e4SMatthew R. Ochs * 30431dd0c0e4SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 30441dd0c0e4SMatthew R. Ochs */ 30451dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_show(struct device *dev, 30461dd0c0e4SMatthew R. Ochs struct device_attribute *attr, char *buf) 30471dd0c0e4SMatthew R. Ochs { 30481dd0c0e4SMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); 30491dd0c0e4SMatthew R. Ochs struct afu *afu = cfg->afu; 30501dd0c0e4SMatthew R. Ochs 30511dd0c0e4SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]); 30521dd0c0e4SMatthew R. Ochs } 30531dd0c0e4SMatthew R. Ochs 30541dd0c0e4SMatthew R. Ochs /** 30551dd0c0e4SMatthew R. Ochs * hwq_mode_store() - sets the HWQ steering mode for the host 30561dd0c0e4SMatthew R. Ochs * @dev: Generic device associated with the host. 30571dd0c0e4SMatthew R. Ochs * @attr: Device attribute representing the HWQ steering mode. 30581dd0c0e4SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode 30591dd0c0e4SMatthew R. Ochs * as a character string. 30601dd0c0e4SMatthew R. Ochs * @count: Length of data resizing in @buf. 30611dd0c0e4SMatthew R. Ochs * 30621dd0c0e4SMatthew R. Ochs * rr = Round-Robin 30631dd0c0e4SMatthew R. Ochs * tag = Block MQ Tagging 30641dd0c0e4SMatthew R. Ochs * cpu = CPU Affinity 30651dd0c0e4SMatthew R. Ochs * 30661dd0c0e4SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 30671dd0c0e4SMatthew R. Ochs */ 30681dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_store(struct device *dev, 30691dd0c0e4SMatthew R. Ochs struct device_attribute *attr, 30701dd0c0e4SMatthew R. Ochs const char *buf, size_t count) 30711dd0c0e4SMatthew R. Ochs { 30721dd0c0e4SMatthew R. Ochs struct Scsi_Host *shost = class_to_shost(dev); 30731dd0c0e4SMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(shost); 30741dd0c0e4SMatthew R. Ochs struct device *cfgdev = &cfg->dev->dev; 30751dd0c0e4SMatthew R. Ochs struct afu *afu = cfg->afu; 30761dd0c0e4SMatthew R. Ochs int i; 30771dd0c0e4SMatthew R. Ochs u32 mode = MAX_HWQ_MODE; 30781dd0c0e4SMatthew R. Ochs 30791dd0c0e4SMatthew R. Ochs for (i = 0; i < MAX_HWQ_MODE; i++) { 30801dd0c0e4SMatthew R. Ochs if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { 30811dd0c0e4SMatthew R. Ochs mode = i; 30821dd0c0e4SMatthew R. Ochs break; 30831dd0c0e4SMatthew R. Ochs } 30841dd0c0e4SMatthew R. Ochs } 30851dd0c0e4SMatthew R. Ochs 30861dd0c0e4SMatthew R. Ochs if (mode >= MAX_HWQ_MODE) { 30871dd0c0e4SMatthew R. Ochs dev_info(cfgdev, "Invalid HWQ steering mode.\n"); 30881dd0c0e4SMatthew R. Ochs return -EINVAL; 30891dd0c0e4SMatthew R. Ochs } 30901dd0c0e4SMatthew R. Ochs 30911dd0c0e4SMatthew R. Ochs if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) { 30921dd0c0e4SMatthew R. Ochs dev_info(cfgdev, "SCSI-MQ is not enabled, use a different " 30931dd0c0e4SMatthew R. Ochs "HWQ steering mode.\n"); 30941dd0c0e4SMatthew R. Ochs return -EINVAL; 30951dd0c0e4SMatthew R. Ochs } 30961dd0c0e4SMatthew R. Ochs 30971dd0c0e4SMatthew R. Ochs afu->hwq_mode = mode; 30981dd0c0e4SMatthew R. Ochs 30991dd0c0e4SMatthew R. Ochs return count; 31001dd0c0e4SMatthew R. Ochs } 31011dd0c0e4SMatthew R. Ochs 31023065267aSMatthew R. Ochs /** 3103e0f01a21SMatthew R. Ochs * mode_show() - presents the current mode of the device 310415305514SMatthew R. Ochs * @dev: Generic device associated with the device. 310515305514SMatthew R. Ochs * @attr: Device attribute representing the device mode. 310615305514SMatthew R. Ochs * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. 310715305514SMatthew R. Ochs * 310815305514SMatthew R. Ochs * Return: The size of the ASCII string returned in @buf. 310915305514SMatthew R. Ochs */ 3110e0f01a21SMatthew R. Ochs static ssize_t mode_show(struct device *dev, 311115305514SMatthew R. Ochs struct device_attribute *attr, char *buf) 311215305514SMatthew R. Ochs { 311315305514SMatthew R. Ochs struct scsi_device *sdev = to_scsi_device(dev); 311415305514SMatthew R. Ochs 3115e0f01a21SMatthew R. Ochs return scnprintf(buf, PAGE_SIZE, "%s\n", 311615305514SMatthew R. Ochs sdev->hostdata ? "superpipe" : "legacy"); 311715305514SMatthew R. Ochs } 311815305514SMatthew R. Ochs 311915305514SMatthew R. Ochs /* 312015305514SMatthew R. Ochs * Host attributes 312115305514SMatthew R. Ochs */ 3122e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0); 3123e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1); 31241cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2); 31251cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3); 3126e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RW(lun_mode); 3127e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(ioctl_version); 3128e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0_lun_table); 3129e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1_lun_table); 31301cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2_lun_table); 31311cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3_lun_table); 3132cba06e6dSMatthew R. Ochs static DEVICE_ATTR_RW(irqpoll_weight); 31333065267aSMatthew R. Ochs static DEVICE_ATTR_RW(num_hwqs); 31341dd0c0e4SMatthew R. Ochs static DEVICE_ATTR_RW(hwq_mode); 313515305514SMatthew R. Ochs 313615305514SMatthew R. Ochs static struct device_attribute *cxlflash_host_attrs[] = { 313715305514SMatthew R. Ochs &dev_attr_port0, 313815305514SMatthew R. Ochs &dev_attr_port1, 31391cd7fabcSMatthew R. Ochs &dev_attr_port2, 31401cd7fabcSMatthew R. Ochs &dev_attr_port3, 314115305514SMatthew R. Ochs &dev_attr_lun_mode, 314215305514SMatthew R. Ochs &dev_attr_ioctl_version, 3143e0f01a21SMatthew R. Ochs &dev_attr_port0_lun_table, 3144e0f01a21SMatthew R. Ochs &dev_attr_port1_lun_table, 31451cd7fabcSMatthew R. Ochs &dev_attr_port2_lun_table, 31461cd7fabcSMatthew R. Ochs &dev_attr_port3_lun_table, 3147cba06e6dSMatthew R. Ochs &dev_attr_irqpoll_weight, 31483065267aSMatthew R. Ochs &dev_attr_num_hwqs, 31491dd0c0e4SMatthew R. Ochs &dev_attr_hwq_mode, 315015305514SMatthew R. Ochs NULL 315115305514SMatthew R. Ochs }; 315215305514SMatthew R. Ochs 315315305514SMatthew R. Ochs /* 315415305514SMatthew R. Ochs * Device attributes 315515305514SMatthew R. Ochs */ 3156e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(mode); 315715305514SMatthew R. Ochs 315815305514SMatthew R. Ochs static struct device_attribute *cxlflash_dev_attrs[] = { 315915305514SMatthew R. Ochs &dev_attr_mode, 316015305514SMatthew R. Ochs NULL 316115305514SMatthew R. Ochs }; 316215305514SMatthew R. Ochs 316315305514SMatthew R. Ochs /* 316415305514SMatthew R. Ochs * Host template 316515305514SMatthew R. Ochs */ 316615305514SMatthew R. Ochs static struct scsi_host_template driver_template = { 316715305514SMatthew R. Ochs .module = THIS_MODULE, 316815305514SMatthew R. Ochs .name = CXLFLASH_ADAPTER_NAME, 316915305514SMatthew R. Ochs .info = cxlflash_driver_info, 317015305514SMatthew R. Ochs .ioctl = cxlflash_ioctl, 317115305514SMatthew R. Ochs .proc_name = CXLFLASH_NAME, 317215305514SMatthew R. Ochs .queuecommand = cxlflash_queuecommand, 31737c4c41f1SUma Krishnan .eh_abort_handler = cxlflash_eh_abort_handler, 317415305514SMatthew R. Ochs .eh_device_reset_handler = cxlflash_eh_device_reset_handler, 317515305514SMatthew R. Ochs .eh_host_reset_handler = cxlflash_eh_host_reset_handler, 317615305514SMatthew R. Ochs .change_queue_depth = cxlflash_change_queue_depth, 317783430833SManoj N. Kumar .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, 317815305514SMatthew R. Ochs .can_queue = CXLFLASH_MAX_CMDS, 31795fbb96c8SMatthew R. Ochs .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, 318015305514SMatthew R. Ochs .this_id = -1, 318168ab2d76SUma Krishnan .sg_tablesize = 1, /* No scatter gather support */ 318215305514SMatthew R. Ochs .max_sectors = CXLFLASH_MAX_SECTORS, 318315305514SMatthew R. Ochs .use_clustering = ENABLE_CLUSTERING, 318415305514SMatthew R. Ochs .shost_attrs = cxlflash_host_attrs, 318515305514SMatthew R. Ochs .sdev_attrs = cxlflash_dev_attrs, 318615305514SMatthew R. Ochs }; 318715305514SMatthew R. Ochs 318815305514SMatthew R. Ochs /* 318915305514SMatthew R. Ochs * Device dependent values 319015305514SMatthew R. Ochs */ 319196e1b660SUma Krishnan static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 31920d419130SMatthew R. Ochs CXLFLASH_WWPN_VPD_REQUIRED }; 319396e1b660SUma Krishnan static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, 3194704c4b0dSUma Krishnan CXLFLASH_NOTIFY_SHUTDOWN }; 319594344520SMatthew R. Ochs static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, 319607d0c52fSUma Krishnan (CXLFLASH_NOTIFY_SHUTDOWN | 319707d0c52fSUma Krishnan CXLFLASH_OCXL_DEV) }; 319815305514SMatthew R. Ochs 319915305514SMatthew R. Ochs /* 320015305514SMatthew R. Ochs * PCI device binding table 320115305514SMatthew R. Ochs */ 320215305514SMatthew R. Ochs static struct pci_device_id cxlflash_pci_table[] = { 320315305514SMatthew R. Ochs {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, 320415305514SMatthew R. Ochs PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, 3205a2746fb1SManoj Kumar {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, 3206a2746fb1SManoj Kumar PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, 320794344520SMatthew R. Ochs {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, 320894344520SMatthew R. Ochs PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, 320915305514SMatthew R. Ochs {} 321015305514SMatthew R. Ochs }; 321115305514SMatthew R. Ochs 321215305514SMatthew R. Ochs MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); 321315305514SMatthew R. Ochs 321415305514SMatthew R. Ochs /** 3215c21e0bbfSMatthew R. Ochs * cxlflash_worker_thread() - work thread handler for the AFU 3216c21e0bbfSMatthew R. Ochs * @work: Work structure contained within cxlflash associated with host. 3217c21e0bbfSMatthew R. Ochs * 3218c21e0bbfSMatthew R. Ochs * Handles the following events: 3219c21e0bbfSMatthew R. Ochs * - Link reset which cannot be performed on interrupt context due to 3220c21e0bbfSMatthew R. Ochs * blocking up to a few seconds 3221ef51074aSMatthew R. Ochs * - Rescan the host 3222c21e0bbfSMatthew R. Ochs */ 3223c21e0bbfSMatthew R. Ochs static void cxlflash_worker_thread(struct work_struct *work) 3224c21e0bbfSMatthew R. Ochs { 32255cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, 32265cdac81aSMatthew R. Ochs work_q); 3227c21e0bbfSMatthew R. Ochs struct afu *afu = cfg->afu; 32284392ba49SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 32290aa14887SMatthew R. Ochs __be64 __iomem *fc_port_regs; 3230c21e0bbfSMatthew R. Ochs int port; 3231c21e0bbfSMatthew R. Ochs ulong lock_flags; 3232c21e0bbfSMatthew R. Ochs 32335cdac81aSMatthew R. Ochs /* Avoid MMIO if the device has failed */ 32345cdac81aSMatthew R. Ochs 32355cdac81aSMatthew R. Ochs if (cfg->state != STATE_NORMAL) 32365cdac81aSMatthew R. Ochs return; 32375cdac81aSMatthew R. Ochs 3238c21e0bbfSMatthew R. Ochs spin_lock_irqsave(cfg->host->host_lock, lock_flags); 3239c21e0bbfSMatthew R. Ochs 3240c21e0bbfSMatthew R. Ochs if (cfg->lr_state == LINK_RESET_REQUIRED) { 3241c21e0bbfSMatthew R. Ochs port = cfg->lr_port; 3242c21e0bbfSMatthew R. Ochs if (port < 0) 32434392ba49SMatthew R. Ochs dev_err(dev, "%s: invalid port index %d\n", 32444392ba49SMatthew R. Ochs __func__, port); 3245c21e0bbfSMatthew R. Ochs else { 3246c21e0bbfSMatthew R. Ochs spin_unlock_irqrestore(cfg->host->host_lock, 3247c21e0bbfSMatthew R. Ochs lock_flags); 3248c21e0bbfSMatthew R. Ochs 3249c21e0bbfSMatthew R. Ochs /* The reset can block... */ 32500aa14887SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 32510aa14887SMatthew R. Ochs afu_link_reset(afu, port, fc_port_regs); 3252c21e0bbfSMatthew R. Ochs spin_lock_irqsave(cfg->host->host_lock, lock_flags); 3253c21e0bbfSMatthew R. Ochs } 3254c21e0bbfSMatthew R. Ochs 3255c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_COMPLETE; 3256c21e0bbfSMatthew R. Ochs } 3257c21e0bbfSMatthew R. Ochs 3258c21e0bbfSMatthew R. Ochs spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 3259ef51074aSMatthew R. Ochs 3260ef51074aSMatthew R. Ochs if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 3261ef51074aSMatthew R. Ochs scsi_scan_host(cfg->host); 3262c21e0bbfSMatthew R. Ochs } 3263c21e0bbfSMatthew R. Ochs 3264c21e0bbfSMatthew R. Ochs /** 3265a834a36bSUma Krishnan * cxlflash_chr_open() - character device open handler 3266a834a36bSUma Krishnan * @inode: Device inode associated with this character device. 3267a834a36bSUma Krishnan * @file: File pointer for this device. 3268a834a36bSUma Krishnan * 3269a834a36bSUma Krishnan * Only users with admin privileges are allowed to open the character device. 3270a834a36bSUma Krishnan * 3271a834a36bSUma Krishnan * Return: 0 on success, -errno on failure 3272a834a36bSUma Krishnan */ 3273a834a36bSUma Krishnan static int cxlflash_chr_open(struct inode *inode, struct file *file) 3274a834a36bSUma Krishnan { 3275a834a36bSUma Krishnan struct cxlflash_cfg *cfg; 3276a834a36bSUma Krishnan 3277a834a36bSUma Krishnan if (!capable(CAP_SYS_ADMIN)) 3278a834a36bSUma Krishnan return -EACCES; 3279a834a36bSUma Krishnan 3280a834a36bSUma Krishnan cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev); 3281a834a36bSUma Krishnan file->private_data = cfg; 3282a834a36bSUma Krishnan 3283a834a36bSUma Krishnan return 0; 3284a834a36bSUma Krishnan } 3285a834a36bSUma Krishnan 3286d6e32f53SMatthew R. Ochs /** 3287d6e32f53SMatthew R. Ochs * decode_hioctl() - translates encoded host ioctl to easily identifiable string 3288d6e32f53SMatthew R. Ochs * @cmd: The host ioctl command to decode. 3289d6e32f53SMatthew R. Ochs * 3290d6e32f53SMatthew R. Ochs * Return: A string identifying the decoded host ioctl. 3291d6e32f53SMatthew R. Ochs */ 3292d6e32f53SMatthew R. Ochs static char *decode_hioctl(int cmd) 3293d6e32f53SMatthew R. Ochs { 3294d6e32f53SMatthew R. Ochs switch (cmd) { 32959cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION: 32969cf43a36SMatthew R. Ochs return __stringify_1(HT_CXLFLASH_LUN_PROVISION); 3297d6e32f53SMatthew R. Ochs } 3298d6e32f53SMatthew R. Ochs 3299d6e32f53SMatthew R. Ochs return "UNKNOWN"; 3300d6e32f53SMatthew R. Ochs } 3301d6e32f53SMatthew R. Ochs 3302d6e32f53SMatthew R. Ochs /** 33039cf43a36SMatthew R. Ochs * cxlflash_lun_provision() - host LUN provisioning handler 33049cf43a36SMatthew R. Ochs * @cfg: Internal structure associated with the host. 33059cf43a36SMatthew R. Ochs * @arg: Kernel copy of userspace ioctl data structure. 33069cf43a36SMatthew R. Ochs * 33079cf43a36SMatthew R. Ochs * Return: 0 on success, -errno on failure 33089cf43a36SMatthew R. Ochs */ 33099cf43a36SMatthew R. Ochs static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, 33109cf43a36SMatthew R. Ochs struct ht_cxlflash_lun_provision *lunprov) 33119cf43a36SMatthew R. Ochs { 33129cf43a36SMatthew R. Ochs struct afu *afu = cfg->afu; 33139cf43a36SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 33149cf43a36SMatthew R. Ochs struct sisl_ioarcb rcb; 33159cf43a36SMatthew R. Ochs struct sisl_ioasa asa; 33169cf43a36SMatthew R. Ochs __be64 __iomem *fc_port_regs; 33179cf43a36SMatthew R. Ochs u16 port = lunprov->port; 33189cf43a36SMatthew R. Ochs u16 scmd = lunprov->hdr.subcmd; 33199cf43a36SMatthew R. Ochs u16 type; 33209cf43a36SMatthew R. Ochs u64 reg; 33219cf43a36SMatthew R. Ochs u64 size; 33229cf43a36SMatthew R. Ochs u64 lun_id; 33239cf43a36SMatthew R. Ochs int rc = 0; 33249cf43a36SMatthew R. Ochs 33259cf43a36SMatthew R. Ochs if (!afu_is_lun_provision(afu)) { 33269cf43a36SMatthew R. Ochs rc = -ENOTSUPP; 33279cf43a36SMatthew R. Ochs goto out; 33289cf43a36SMatthew R. Ochs } 33299cf43a36SMatthew R. Ochs 33309cf43a36SMatthew R. Ochs if (port >= cfg->num_fc_ports) { 33319cf43a36SMatthew R. Ochs rc = -EINVAL; 33329cf43a36SMatthew R. Ochs goto out; 33339cf43a36SMatthew R. Ochs } 33349cf43a36SMatthew R. Ochs 33359cf43a36SMatthew R. Ochs switch (scmd) { 33369cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN: 33379cf43a36SMatthew R. Ochs type = SISL_AFU_LUN_PROVISION_CREATE; 33389cf43a36SMatthew R. Ochs size = lunprov->size; 33399cf43a36SMatthew R. Ochs lun_id = 0; 33409cf43a36SMatthew R. Ochs break; 33419cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN: 33429cf43a36SMatthew R. Ochs type = SISL_AFU_LUN_PROVISION_DELETE; 33439cf43a36SMatthew R. Ochs size = 0; 33449cf43a36SMatthew R. Ochs lun_id = lunprov->lun_id; 33459cf43a36SMatthew R. Ochs break; 33469cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT: 33479cf43a36SMatthew R. Ochs fc_port_regs = get_fc_port_regs(cfg, port); 33489cf43a36SMatthew R. Ochs 33499cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]); 33509cf43a36SMatthew R. Ochs lunprov->max_num_luns = reg; 33519cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]); 33529cf43a36SMatthew R. Ochs lunprov->cur_num_luns = reg; 33539cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]); 33549cf43a36SMatthew R. Ochs lunprov->max_cap_port = reg; 33559cf43a36SMatthew R. Ochs reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]); 33569cf43a36SMatthew R. Ochs lunprov->cur_cap_port = reg; 33579cf43a36SMatthew R. Ochs 33589cf43a36SMatthew R. Ochs goto out; 33599cf43a36SMatthew R. Ochs default: 33609cf43a36SMatthew R. Ochs rc = -EINVAL; 33619cf43a36SMatthew R. Ochs goto out; 33629cf43a36SMatthew R. Ochs } 33639cf43a36SMatthew R. Ochs 33649cf43a36SMatthew R. Ochs memset(&rcb, 0, sizeof(rcb)); 33659cf43a36SMatthew R. Ochs memset(&asa, 0, sizeof(asa)); 33669cf43a36SMatthew R. Ochs rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 33679cf43a36SMatthew R. Ochs rcb.lun_id = lun_id; 33689cf43a36SMatthew R. Ochs rcb.msi = SISL_MSI_RRQ_UPDATED; 33699cf43a36SMatthew R. Ochs rcb.timeout = MC_LUN_PROV_TIMEOUT; 33709cf43a36SMatthew R. Ochs rcb.ioasa = &asa; 33719cf43a36SMatthew R. Ochs 33729cf43a36SMatthew R. Ochs rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION; 33739cf43a36SMatthew R. Ochs rcb.cdb[1] = type; 33749cf43a36SMatthew R. Ochs rcb.cdb[2] = port; 33759cf43a36SMatthew R. Ochs put_unaligned_be64(size, &rcb.cdb[8]); 33769cf43a36SMatthew R. Ochs 33779cf43a36SMatthew R. Ochs rc = send_afu_cmd(afu, &rcb); 33789cf43a36SMatthew R. Ochs if (rc) { 33799cf43a36SMatthew R. Ochs dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", 33809cf43a36SMatthew R. Ochs __func__, rc, asa.ioasc, asa.afu_extra); 33819cf43a36SMatthew R. Ochs goto out; 33829cf43a36SMatthew R. Ochs } 33839cf43a36SMatthew R. Ochs 33849cf43a36SMatthew R. Ochs if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) { 33859cf43a36SMatthew R. Ochs lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo; 33869cf43a36SMatthew R. Ochs memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid)); 33879cf43a36SMatthew R. Ochs } 33889cf43a36SMatthew R. Ochs out: 33899cf43a36SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 33909cf43a36SMatthew R. Ochs return rc; 33919cf43a36SMatthew R. Ochs } 33929cf43a36SMatthew R. Ochs 33939cf43a36SMatthew R. Ochs /** 3394bc88ac47SMatthew R. Ochs * cxlflash_afu_debug() - host AFU debug handler 3395bc88ac47SMatthew R. Ochs * @cfg: Internal structure associated with the host. 3396bc88ac47SMatthew R. Ochs * @arg: Kernel copy of userspace ioctl data structure. 3397bc88ac47SMatthew R. Ochs * 3398bc88ac47SMatthew R. Ochs * For debug requests requiring a data buffer, always provide an aligned 3399bc88ac47SMatthew R. Ochs * (cache line) buffer to the AFU to appease any alignment requirements. 3400bc88ac47SMatthew R. Ochs * 3401bc88ac47SMatthew R. Ochs * Return: 0 on success, -errno on failure 3402bc88ac47SMatthew R. Ochs */ 3403bc88ac47SMatthew R. Ochs static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, 3404bc88ac47SMatthew R. Ochs struct ht_cxlflash_afu_debug *afu_dbg) 3405bc88ac47SMatthew R. Ochs { 3406bc88ac47SMatthew R. Ochs struct afu *afu = cfg->afu; 3407bc88ac47SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 3408bc88ac47SMatthew R. Ochs struct sisl_ioarcb rcb; 3409bc88ac47SMatthew R. Ochs struct sisl_ioasa asa; 3410bc88ac47SMatthew R. Ochs char *buf = NULL; 3411bc88ac47SMatthew R. Ochs char *kbuf = NULL; 3412bc88ac47SMatthew R. Ochs void __user *ubuf = (__force void __user *)afu_dbg->data_ea; 3413bc88ac47SMatthew R. Ochs u16 req_flags = SISL_REQ_FLAGS_AFU_CMD; 3414bc88ac47SMatthew R. Ochs u32 ulen = afu_dbg->data_len; 3415bc88ac47SMatthew R. Ochs bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE; 3416bc88ac47SMatthew R. Ochs int rc = 0; 3417bc88ac47SMatthew R. Ochs 3418bc88ac47SMatthew R. Ochs if (!afu_is_afu_debug(afu)) { 3419bc88ac47SMatthew R. Ochs rc = -ENOTSUPP; 3420bc88ac47SMatthew R. Ochs goto out; 3421bc88ac47SMatthew R. Ochs } 3422bc88ac47SMatthew R. Ochs 3423bc88ac47SMatthew R. Ochs if (ulen) { 3424bc88ac47SMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN; 3425bc88ac47SMatthew R. Ochs 3426bc88ac47SMatthew R. Ochs if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) { 3427bc88ac47SMatthew R. Ochs rc = -EINVAL; 3428bc88ac47SMatthew R. Ochs goto out; 3429bc88ac47SMatthew R. Ochs } 3430bc88ac47SMatthew R. Ochs 3431bc88ac47SMatthew R. Ochs buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL); 3432bc88ac47SMatthew R. Ochs if (unlikely(!buf)) { 3433bc88ac47SMatthew R. Ochs rc = -ENOMEM; 3434bc88ac47SMatthew R. Ochs goto out; 3435bc88ac47SMatthew R. Ochs } 3436bc88ac47SMatthew R. Ochs 3437bc88ac47SMatthew R. Ochs kbuf = PTR_ALIGN(buf, cache_line_size()); 3438bc88ac47SMatthew R. Ochs 3439bc88ac47SMatthew R. Ochs if (is_write) { 3440bc88ac47SMatthew R. Ochs req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 3441bc88ac47SMatthew R. Ochs 3442eeac8cdaSDan Carpenter if (copy_from_user(kbuf, ubuf, ulen)) { 3443eeac8cdaSDan Carpenter rc = -EFAULT; 3444bc88ac47SMatthew R. Ochs goto out; 3445bc88ac47SMatthew R. Ochs } 3446bc88ac47SMatthew R. Ochs } 3447eeac8cdaSDan Carpenter } 3448bc88ac47SMatthew R. Ochs 3449bc88ac47SMatthew R. Ochs memset(&rcb, 0, sizeof(rcb)); 3450bc88ac47SMatthew R. Ochs memset(&asa, 0, sizeof(asa)); 3451bc88ac47SMatthew R. Ochs 3452bc88ac47SMatthew R. Ochs rcb.req_flags = req_flags; 3453bc88ac47SMatthew R. Ochs rcb.msi = SISL_MSI_RRQ_UPDATED; 3454bc88ac47SMatthew R. Ochs rcb.timeout = MC_AFU_DEBUG_TIMEOUT; 3455bc88ac47SMatthew R. Ochs rcb.ioasa = &asa; 3456bc88ac47SMatthew R. Ochs 3457bc88ac47SMatthew R. Ochs if (ulen) { 3458bc88ac47SMatthew R. Ochs rcb.data_len = ulen; 3459bc88ac47SMatthew R. Ochs rcb.data_ea = (uintptr_t)kbuf; 3460bc88ac47SMatthew R. Ochs } 3461bc88ac47SMatthew R. Ochs 3462bc88ac47SMatthew R. Ochs rcb.cdb[0] = SISL_AFU_CMD_DEBUG; 3463bc88ac47SMatthew R. Ochs memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd, 3464bc88ac47SMatthew R. Ochs HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN); 3465bc88ac47SMatthew R. Ochs 3466bc88ac47SMatthew R. Ochs rc = send_afu_cmd(afu, &rcb); 3467bc88ac47SMatthew R. Ochs if (rc) { 3468bc88ac47SMatthew R. Ochs dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n", 3469bc88ac47SMatthew R. Ochs __func__, rc, asa.ioasc, asa.afu_extra); 3470bc88ac47SMatthew R. Ochs goto out; 3471bc88ac47SMatthew R. Ochs } 3472bc88ac47SMatthew R. Ochs 3473eeac8cdaSDan Carpenter if (ulen && !is_write) { 3474eeac8cdaSDan Carpenter if (copy_to_user(ubuf, kbuf, ulen)) 3475eeac8cdaSDan Carpenter rc = -EFAULT; 3476eeac8cdaSDan Carpenter } 3477bc88ac47SMatthew R. Ochs out: 3478bc88ac47SMatthew R. Ochs kfree(buf); 3479bc88ac47SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3480bc88ac47SMatthew R. Ochs return rc; 3481bc88ac47SMatthew R. Ochs } 3482bc88ac47SMatthew R. Ochs 3483bc88ac47SMatthew R. Ochs /** 3484d6e32f53SMatthew R. Ochs * cxlflash_chr_ioctl() - character device IOCTL handler 3485d6e32f53SMatthew R. Ochs * @file: File pointer for this device. 3486d6e32f53SMatthew R. Ochs * @cmd: IOCTL command. 3487d6e32f53SMatthew R. Ochs * @arg: Userspace ioctl data structure. 3488d6e32f53SMatthew R. Ochs * 3489d6e32f53SMatthew R. Ochs * A read/write semaphore is used to implement a 'drain' of currently 3490d6e32f53SMatthew R. Ochs * running ioctls. The read semaphore is taken at the beginning of each 3491d6e32f53SMatthew R. Ochs * ioctl thread and released upon concluding execution. Additionally the 3492d6e32f53SMatthew R. Ochs * semaphore should be released and then reacquired in any ioctl execution 3493d6e32f53SMatthew R. Ochs * path which will wait for an event to occur that is outside the scope of 3494d6e32f53SMatthew R. Ochs * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, 3495d6e32f53SMatthew R. Ochs * a thread simply needs to acquire the write semaphore. 3496d6e32f53SMatthew R. Ochs * 3497d6e32f53SMatthew R. Ochs * Return: 0 on success, -errno on failure 3498d6e32f53SMatthew R. Ochs */ 3499d6e32f53SMatthew R. Ochs static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, 3500d6e32f53SMatthew R. Ochs unsigned long arg) 3501d6e32f53SMatthew R. Ochs { 3502d6e32f53SMatthew R. Ochs typedef int (*hioctl) (struct cxlflash_cfg *, void *); 3503d6e32f53SMatthew R. Ochs 3504d6e32f53SMatthew R. Ochs struct cxlflash_cfg *cfg = file->private_data; 3505d6e32f53SMatthew R. Ochs struct device *dev = &cfg->dev->dev; 3506d6e32f53SMatthew R. Ochs char buf[sizeof(union cxlflash_ht_ioctls)]; 3507d6e32f53SMatthew R. Ochs void __user *uarg = (void __user *)arg; 3508d6e32f53SMatthew R. Ochs struct ht_cxlflash_hdr *hdr; 3509d6e32f53SMatthew R. Ochs size_t size = 0; 3510d6e32f53SMatthew R. Ochs bool known_ioctl = false; 3511d6e32f53SMatthew R. Ochs int idx = 0; 3512d6e32f53SMatthew R. Ochs int rc = 0; 3513d6e32f53SMatthew R. Ochs hioctl do_ioctl = NULL; 3514d6e32f53SMatthew R. Ochs 3515d6e32f53SMatthew R. Ochs static const struct { 3516d6e32f53SMatthew R. Ochs size_t size; 3517d6e32f53SMatthew R. Ochs hioctl ioctl; 3518d6e32f53SMatthew R. Ochs } ioctl_tbl[] = { /* NOTE: order matters here */ 35199cf43a36SMatthew R. Ochs { sizeof(struct ht_cxlflash_lun_provision), 35209cf43a36SMatthew R. Ochs (hioctl)cxlflash_lun_provision }, 3521bc88ac47SMatthew R. Ochs { sizeof(struct ht_cxlflash_afu_debug), 3522bc88ac47SMatthew R. Ochs (hioctl)cxlflash_afu_debug }, 3523d6e32f53SMatthew R. Ochs }; 3524d6e32f53SMatthew R. Ochs 3525d6e32f53SMatthew R. Ochs /* Hold read semaphore so we can drain if needed */ 3526d6e32f53SMatthew R. Ochs down_read(&cfg->ioctl_rwsem); 3527d6e32f53SMatthew R. Ochs 3528d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n", 3529d6e32f53SMatthew R. Ochs __func__, cmd, idx, sizeof(ioctl_tbl)); 3530d6e32f53SMatthew R. Ochs 3531d6e32f53SMatthew R. Ochs switch (cmd) { 35329cf43a36SMatthew R. Ochs case HT_CXLFLASH_LUN_PROVISION: 3533bc88ac47SMatthew R. Ochs case HT_CXLFLASH_AFU_DEBUG: 35349cf43a36SMatthew R. Ochs known_ioctl = true; 35359cf43a36SMatthew R. Ochs idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); 35369cf43a36SMatthew R. Ochs size = ioctl_tbl[idx].size; 35379cf43a36SMatthew R. Ochs do_ioctl = ioctl_tbl[idx].ioctl; 35389cf43a36SMatthew R. Ochs 35399cf43a36SMatthew R. Ochs if (likely(do_ioctl)) 35409cf43a36SMatthew R. Ochs break; 35419cf43a36SMatthew R. Ochs 35429cf43a36SMatthew R. Ochs /* fall through */ 3543d6e32f53SMatthew R. Ochs default: 3544d6e32f53SMatthew R. Ochs rc = -EINVAL; 3545d6e32f53SMatthew R. Ochs goto out; 3546d6e32f53SMatthew R. Ochs } 3547d6e32f53SMatthew R. Ochs 3548d6e32f53SMatthew R. Ochs if (unlikely(copy_from_user(&buf, uarg, size))) { 3549d6e32f53SMatthew R. Ochs dev_err(dev, "%s: copy_from_user() fail " 3550d6e32f53SMatthew R. Ochs "size=%lu cmd=%d (%s) uarg=%p\n", 3551d6e32f53SMatthew R. Ochs __func__, size, cmd, decode_hioctl(cmd), uarg); 3552d6e32f53SMatthew R. Ochs rc = -EFAULT; 3553d6e32f53SMatthew R. Ochs goto out; 3554d6e32f53SMatthew R. Ochs } 3555d6e32f53SMatthew R. Ochs 3556d6e32f53SMatthew R. Ochs hdr = (struct ht_cxlflash_hdr *)&buf; 3557d6e32f53SMatthew R. Ochs if (hdr->version != HT_CXLFLASH_VERSION_0) { 3558d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: Version %u not supported for %s\n", 3559d6e32f53SMatthew R. Ochs __func__, hdr->version, decode_hioctl(cmd)); 3560d6e32f53SMatthew R. Ochs rc = -EINVAL; 3561d6e32f53SMatthew R. Ochs goto out; 3562d6e32f53SMatthew R. Ochs } 3563d6e32f53SMatthew R. Ochs 3564d6e32f53SMatthew R. Ochs if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) { 3565d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); 3566d6e32f53SMatthew R. Ochs rc = -EINVAL; 3567d6e32f53SMatthew R. Ochs goto out; 3568d6e32f53SMatthew R. Ochs } 3569d6e32f53SMatthew R. Ochs 3570d6e32f53SMatthew R. Ochs rc = do_ioctl(cfg, (void *)&buf); 3571d6e32f53SMatthew R. Ochs if (likely(!rc)) 3572d6e32f53SMatthew R. Ochs if (unlikely(copy_to_user(uarg, &buf, size))) { 3573d6e32f53SMatthew R. Ochs dev_err(dev, "%s: copy_to_user() fail " 3574d6e32f53SMatthew R. Ochs "size=%lu cmd=%d (%s) uarg=%p\n", 3575d6e32f53SMatthew R. Ochs __func__, size, cmd, decode_hioctl(cmd), uarg); 3576d6e32f53SMatthew R. Ochs rc = -EFAULT; 3577d6e32f53SMatthew R. Ochs } 3578d6e32f53SMatthew R. Ochs 3579d6e32f53SMatthew R. Ochs /* fall through to exit */ 3580d6e32f53SMatthew R. Ochs 3581d6e32f53SMatthew R. Ochs out: 3582d6e32f53SMatthew R. Ochs up_read(&cfg->ioctl_rwsem); 3583d6e32f53SMatthew R. Ochs if (unlikely(rc && known_ioctl)) 3584d6e32f53SMatthew R. Ochs dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n", 3585d6e32f53SMatthew R. Ochs __func__, decode_hioctl(cmd), cmd, rc); 3586d6e32f53SMatthew R. Ochs else 3587d6e32f53SMatthew R. Ochs dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n", 3588d6e32f53SMatthew R. Ochs __func__, decode_hioctl(cmd), cmd, rc); 3589d6e32f53SMatthew R. Ochs return rc; 3590d6e32f53SMatthew R. Ochs } 3591d6e32f53SMatthew R. Ochs 3592a834a36bSUma Krishnan /* 3593a834a36bSUma Krishnan * Character device file operations 3594a834a36bSUma Krishnan */ 3595a834a36bSUma Krishnan static const struct file_operations cxlflash_chr_fops = { 3596a834a36bSUma Krishnan .owner = THIS_MODULE, 3597a834a36bSUma Krishnan .open = cxlflash_chr_open, 3598d6e32f53SMatthew R. Ochs .unlocked_ioctl = cxlflash_chr_ioctl, 3599d6e32f53SMatthew R. Ochs .compat_ioctl = cxlflash_chr_ioctl, 3600a834a36bSUma Krishnan }; 3601a834a36bSUma Krishnan 3602a834a36bSUma Krishnan /** 3603a834a36bSUma Krishnan * init_chrdev() - initialize the character device for the host 3604a834a36bSUma Krishnan * @cfg: Internal structure associated with the host. 3605a834a36bSUma Krishnan * 3606a834a36bSUma Krishnan * Return: 0 on success, -errno on failure 3607a834a36bSUma Krishnan */ 3608a834a36bSUma Krishnan static int init_chrdev(struct cxlflash_cfg *cfg) 3609a834a36bSUma Krishnan { 3610a834a36bSUma Krishnan struct device *dev = &cfg->dev->dev; 3611a834a36bSUma Krishnan struct device *char_dev; 3612a834a36bSUma Krishnan dev_t devno; 3613a834a36bSUma Krishnan int minor; 3614a834a36bSUma Krishnan int rc = 0; 3615a834a36bSUma Krishnan 3616a834a36bSUma Krishnan minor = cxlflash_get_minor(); 3617a834a36bSUma Krishnan if (unlikely(minor < 0)) { 3618a834a36bSUma Krishnan dev_err(dev, "%s: Exhausted allowed adapters\n", __func__); 3619a834a36bSUma Krishnan rc = -ENOSPC; 3620a834a36bSUma Krishnan goto out; 3621a834a36bSUma Krishnan } 3622a834a36bSUma Krishnan 3623a834a36bSUma Krishnan devno = MKDEV(cxlflash_major, minor); 3624a834a36bSUma Krishnan cdev_init(&cfg->cdev, &cxlflash_chr_fops); 3625a834a36bSUma Krishnan 3626a834a36bSUma Krishnan rc = cdev_add(&cfg->cdev, devno, 1); 3627a834a36bSUma Krishnan if (rc) { 3628a834a36bSUma Krishnan dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc); 3629a834a36bSUma Krishnan goto err1; 3630a834a36bSUma Krishnan } 3631a834a36bSUma Krishnan 3632a834a36bSUma Krishnan char_dev = device_create(cxlflash_class, NULL, devno, 3633a834a36bSUma Krishnan NULL, "cxlflash%d", minor); 3634a834a36bSUma Krishnan if (IS_ERR(char_dev)) { 3635a834a36bSUma Krishnan rc = PTR_ERR(char_dev); 3636a834a36bSUma Krishnan dev_err(dev, "%s: device_create failed rc=%d\n", 3637a834a36bSUma Krishnan __func__, rc); 3638a834a36bSUma Krishnan goto err2; 3639a834a36bSUma Krishnan } 3640a834a36bSUma Krishnan 3641a834a36bSUma Krishnan cfg->chardev = char_dev; 3642a834a36bSUma Krishnan out: 3643a834a36bSUma Krishnan dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3644a834a36bSUma Krishnan return rc; 3645a834a36bSUma Krishnan err2: 3646a834a36bSUma Krishnan cdev_del(&cfg->cdev); 3647a834a36bSUma Krishnan err1: 3648a834a36bSUma Krishnan cxlflash_put_minor(minor); 3649a834a36bSUma Krishnan goto out; 3650a834a36bSUma Krishnan } 3651a834a36bSUma Krishnan 3652a834a36bSUma Krishnan /** 3653c21e0bbfSMatthew R. Ochs * cxlflash_probe() - PCI entry point to add host 3654c21e0bbfSMatthew R. Ochs * @pdev: PCI device associated with the host. 3655c21e0bbfSMatthew R. Ochs * @dev_id: PCI device id associated with device. 3656c21e0bbfSMatthew R. Ochs * 3657323e3342SMatthew R. Ochs * The device will initially start out in a 'probing' state and 3658323e3342SMatthew R. Ochs * transition to the 'normal' state at the end of a successful 3659323e3342SMatthew R. Ochs * probe. Should an EEH event occur during probe, the notification 3660323e3342SMatthew R. Ochs * thread (error_detected()) will wait until the probe handler 3661323e3342SMatthew R. Ochs * is nearly complete. At that time, the device will be moved to 3662323e3342SMatthew R. Ochs * a 'probed' state and the EEH thread woken up to drive the slot 3663323e3342SMatthew R. Ochs * reset and recovery (device moves to 'normal' state). Meanwhile, 3664323e3342SMatthew R. Ochs * the probe will be allowed to exit successfully. 3665323e3342SMatthew R. Ochs * 36661284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 3667c21e0bbfSMatthew R. Ochs */ 3668c21e0bbfSMatthew R. Ochs static int cxlflash_probe(struct pci_dev *pdev, 3669c21e0bbfSMatthew R. Ochs const struct pci_device_id *dev_id) 3670c21e0bbfSMatthew R. Ochs { 3671c21e0bbfSMatthew R. Ochs struct Scsi_Host *host; 3672c21e0bbfSMatthew R. Ochs struct cxlflash_cfg *cfg = NULL; 3673fb67d44dSMatthew R. Ochs struct device *dev = &pdev->dev; 3674c21e0bbfSMatthew R. Ochs struct dev_dependent_vals *ddv; 3675c21e0bbfSMatthew R. Ochs int rc = 0; 367678ae028eSMatthew R. Ochs int k; 3677c21e0bbfSMatthew R. Ochs 3678c21e0bbfSMatthew R. Ochs dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", 3679c21e0bbfSMatthew R. Ochs __func__, pdev->irq); 3680c21e0bbfSMatthew R. Ochs 3681c21e0bbfSMatthew R. Ochs ddv = (struct dev_dependent_vals *)dev_id->driver_data; 3682c21e0bbfSMatthew R. Ochs driver_template.max_sectors = ddv->max_sectors; 3683c21e0bbfSMatthew R. Ochs 3684c21e0bbfSMatthew R. Ochs host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 3685c21e0bbfSMatthew R. Ochs if (!host) { 3686fb67d44dSMatthew R. Ochs dev_err(dev, "%s: scsi_host_alloc failed\n", __func__); 3687c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 3688c21e0bbfSMatthew R. Ochs goto out; 3689c21e0bbfSMatthew R. Ochs } 3690c21e0bbfSMatthew R. Ochs 3691c21e0bbfSMatthew R. Ochs host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; 3692c21e0bbfSMatthew R. Ochs host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; 3693c21e0bbfSMatthew R. Ochs host->unique_id = host->host_no; 3694c21e0bbfSMatthew R. Ochs host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 3695c21e0bbfSMatthew R. Ochs 3696fb67d44dSMatthew R. Ochs cfg = shost_priv(host); 3697c21e0bbfSMatthew R. Ochs cfg->host = host; 3698c21e0bbfSMatthew R. Ochs rc = alloc_mem(cfg); 3699c21e0bbfSMatthew R. Ochs if (rc) { 3700fb67d44dSMatthew R. Ochs dev_err(dev, "%s: alloc_mem failed\n", __func__); 3701c21e0bbfSMatthew R. Ochs rc = -ENOMEM; 37028b5b1e87SMatthew R. Ochs scsi_host_put(cfg->host); 3703c21e0bbfSMatthew R. Ochs goto out; 3704c21e0bbfSMatthew R. Ochs } 3705c21e0bbfSMatthew R. Ochs 3706c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_NONE; 3707c21e0bbfSMatthew R. Ochs cfg->dev = pdev; 370817ead26fSMatthew R. Ochs cfg->cxl_fops = cxlflash_cxl_fops; 37092cb79266SMatthew R. Ochs 371007d0c52fSUma Krishnan if (ddv->flags & CXLFLASH_OCXL_DEV) 371107d0c52fSUma Krishnan cfg->ops = &cxlflash_ocxl_ops; 371207d0c52fSUma Krishnan else 371307d0c52fSUma Krishnan cfg->ops = &cxlflash_cxl_ops; 371407d0c52fSUma Krishnan 37152cb79266SMatthew R. Ochs /* 371678ae028eSMatthew R. Ochs * Promoted LUNs move to the top of the LUN table. The rest stay on 371778ae028eSMatthew R. Ochs * the bottom half. The bottom half grows from the end (index = 255), 371878ae028eSMatthew R. Ochs * whereas the top half grows from the beginning (index = 0). 371978ae028eSMatthew R. Ochs * 372078ae028eSMatthew R. Ochs * Initialize the last LUN index for all possible ports. 37212cb79266SMatthew R. Ochs */ 37222cb79266SMatthew R. Ochs cfg->promote_lun_index = 0; 372378ae028eSMatthew R. Ochs 372478ae028eSMatthew R. Ochs for (k = 0; k < MAX_FC_PORTS; k++) 372578ae028eSMatthew R. Ochs cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; 37262cb79266SMatthew R. Ochs 3727c21e0bbfSMatthew R. Ochs cfg->dev_id = (struct pci_device_id *)dev_id; 3728c21e0bbfSMatthew R. Ochs 3729c21e0bbfSMatthew R. Ochs init_waitqueue_head(&cfg->tmf_waitq); 3730439e85c1SMatthew R. Ochs init_waitqueue_head(&cfg->reset_waitq); 3731c21e0bbfSMatthew R. Ochs 3732c21e0bbfSMatthew R. Ochs INIT_WORK(&cfg->work_q, cxlflash_worker_thread); 3733c21e0bbfSMatthew R. Ochs cfg->lr_state = LINK_RESET_INVALID; 3734c21e0bbfSMatthew R. Ochs cfg->lr_port = -1; 37350d73122cSMatthew R. Ochs spin_lock_init(&cfg->tmf_slock); 373665be2c79SMatthew R. Ochs mutex_init(&cfg->ctx_tbl_list_mutex); 373765be2c79SMatthew R. Ochs mutex_init(&cfg->ctx_recovery_mutex); 37380a27ae51SMatthew R. Ochs init_rwsem(&cfg->ioctl_rwsem); 373965be2c79SMatthew R. Ochs INIT_LIST_HEAD(&cfg->ctx_err_recovery); 374065be2c79SMatthew R. Ochs INIT_LIST_HEAD(&cfg->lluns); 3741c21e0bbfSMatthew R. Ochs 3742c21e0bbfSMatthew R. Ochs pci_set_drvdata(pdev, cfg); 3743c21e0bbfSMatthew R. Ochs 3744c21e0bbfSMatthew R. Ochs rc = init_pci(cfg); 3745c21e0bbfSMatthew R. Ochs if (rc) { 3746fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc); 3747c21e0bbfSMatthew R. Ochs goto out_remove; 3748c21e0bbfSMatthew R. Ochs } 3749c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_PCI; 3750c21e0bbfSMatthew R. Ochs 375148e077dbSUma Krishnan cfg->afu_cookie = cfg->ops->create_afu(pdev); 375248e077dbSUma Krishnan if (unlikely(!cfg->afu_cookie)) { 375348e077dbSUma Krishnan dev_err(dev, "%s: create_afu failed\n", __func__); 375448e077dbSUma Krishnan goto out_remove; 375548e077dbSUma Krishnan } 375648e077dbSUma Krishnan 3757c21e0bbfSMatthew R. Ochs rc = init_afu(cfg); 3758323e3342SMatthew R. Ochs if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { 3759fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); 3760c21e0bbfSMatthew R. Ochs goto out_remove; 3761c21e0bbfSMatthew R. Ochs } 3762c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_AFU; 3763c21e0bbfSMatthew R. Ochs 3764c21e0bbfSMatthew R. Ochs rc = init_scsi(cfg); 3765c21e0bbfSMatthew R. Ochs if (rc) { 3766fb67d44dSMatthew R. Ochs dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc); 3767c21e0bbfSMatthew R. Ochs goto out_remove; 3768c21e0bbfSMatthew R. Ochs } 3769c21e0bbfSMatthew R. Ochs cfg->init_state = INIT_STATE_SCSI; 3770c21e0bbfSMatthew R. Ochs 3771a834a36bSUma Krishnan rc = init_chrdev(cfg); 3772a834a36bSUma Krishnan if (rc) { 3773a834a36bSUma Krishnan dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc); 3774a834a36bSUma Krishnan goto out_remove; 3775a834a36bSUma Krishnan } 3776a834a36bSUma Krishnan cfg->init_state = INIT_STATE_CDEV; 3777a834a36bSUma Krishnan 3778323e3342SMatthew R. Ochs if (wq_has_sleeper(&cfg->reset_waitq)) { 3779323e3342SMatthew R. Ochs cfg->state = STATE_PROBED; 3780323e3342SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 3781323e3342SMatthew R. Ochs } else 3782323e3342SMatthew R. Ochs cfg->state = STATE_NORMAL; 3783c21e0bbfSMatthew R. Ochs out: 3784fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3785c21e0bbfSMatthew R. Ochs return rc; 3786c21e0bbfSMatthew R. Ochs 3787c21e0bbfSMatthew R. Ochs out_remove: 3788c21e0bbfSMatthew R. Ochs cxlflash_remove(pdev); 3789c21e0bbfSMatthew R. Ochs goto out; 3790c21e0bbfSMatthew R. Ochs } 3791c21e0bbfSMatthew R. Ochs 37925cdac81aSMatthew R. Ochs /** 37935cdac81aSMatthew R. Ochs * cxlflash_pci_error_detected() - called when a PCI error is detected 37945cdac81aSMatthew R. Ochs * @pdev: PCI device struct. 37955cdac81aSMatthew R. Ochs * @state: PCI channel state. 37965cdac81aSMatthew R. Ochs * 37971d3324c3SMatthew R. Ochs * When an EEH occurs during an active reset, wait until the reset is 37981d3324c3SMatthew R. Ochs * complete and then take action based upon the device state. 37991d3324c3SMatthew R. Ochs * 38005cdac81aSMatthew R. Ochs * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 38015cdac81aSMatthew R. Ochs */ 38025cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 38035cdac81aSMatthew R. Ochs pci_channel_state_t state) 38045cdac81aSMatthew R. Ochs { 380565be2c79SMatthew R. Ochs int rc = 0; 38065cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 38075cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 38085cdac81aSMatthew R. Ochs 38095cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); 38105cdac81aSMatthew R. Ochs 38115cdac81aSMatthew R. Ochs switch (state) { 38125cdac81aSMatthew R. Ochs case pci_channel_io_frozen: 3813323e3342SMatthew R. Ochs wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && 3814323e3342SMatthew R. Ochs cfg->state != STATE_PROBING); 38151d3324c3SMatthew R. Ochs if (cfg->state == STATE_FAILTERM) 38161d3324c3SMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 38171d3324c3SMatthew R. Ochs 3818439e85c1SMatthew R. Ochs cfg->state = STATE_RESET; 38195cdac81aSMatthew R. Ochs scsi_block_requests(cfg->host); 38200a27ae51SMatthew R. Ochs drain_ioctls(cfg); 382165be2c79SMatthew R. Ochs rc = cxlflash_mark_contexts_error(cfg); 382265be2c79SMatthew R. Ochs if (unlikely(rc)) 3823fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed to mark user contexts rc=%d\n", 382465be2c79SMatthew R. Ochs __func__, rc); 38259526f360SManoj N. Kumar term_afu(cfg); 38265cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_NEED_RESET; 38275cdac81aSMatthew R. Ochs case pci_channel_io_perm_failure: 38285cdac81aSMatthew R. Ochs cfg->state = STATE_FAILTERM; 3829439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 38305cdac81aSMatthew R. Ochs scsi_unblock_requests(cfg->host); 38315cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 38325cdac81aSMatthew R. Ochs default: 38335cdac81aSMatthew R. Ochs break; 38345cdac81aSMatthew R. Ochs } 38355cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_NEED_RESET; 38365cdac81aSMatthew R. Ochs } 38375cdac81aSMatthew R. Ochs 38385cdac81aSMatthew R. Ochs /** 38395cdac81aSMatthew R. Ochs * cxlflash_pci_slot_reset() - called when PCI slot has been reset 38405cdac81aSMatthew R. Ochs * @pdev: PCI device struct. 38415cdac81aSMatthew R. Ochs * 38425cdac81aSMatthew R. Ochs * This routine is called by the pci error recovery code after the PCI 38435cdac81aSMatthew R. Ochs * slot has been reset, just before we should resume normal operations. 38445cdac81aSMatthew R. Ochs * 38455cdac81aSMatthew R. Ochs * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT 38465cdac81aSMatthew R. Ochs */ 38475cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) 38485cdac81aSMatthew R. Ochs { 38495cdac81aSMatthew R. Ochs int rc = 0; 38505cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 38515cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 38525cdac81aSMatthew R. Ochs 38535cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 38545cdac81aSMatthew R. Ochs 38555cdac81aSMatthew R. Ochs rc = init_afu(cfg); 38565cdac81aSMatthew R. Ochs if (unlikely(rc)) { 3857fb67d44dSMatthew R. Ochs dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc); 38585cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_DISCONNECT; 38595cdac81aSMatthew R. Ochs } 38605cdac81aSMatthew R. Ochs 38615cdac81aSMatthew R. Ochs return PCI_ERS_RESULT_RECOVERED; 38625cdac81aSMatthew R. Ochs } 38635cdac81aSMatthew R. Ochs 38645cdac81aSMatthew R. Ochs /** 38655cdac81aSMatthew R. Ochs * cxlflash_pci_resume() - called when normal operation can resume 38665cdac81aSMatthew R. Ochs * @pdev: PCI device struct 38675cdac81aSMatthew R. Ochs */ 38685cdac81aSMatthew R. Ochs static void cxlflash_pci_resume(struct pci_dev *pdev) 38695cdac81aSMatthew R. Ochs { 38705cdac81aSMatthew R. Ochs struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 38715cdac81aSMatthew R. Ochs struct device *dev = &cfg->dev->dev; 38725cdac81aSMatthew R. Ochs 38735cdac81aSMatthew R. Ochs dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 38745cdac81aSMatthew R. Ochs 38755cdac81aSMatthew R. Ochs cfg->state = STATE_NORMAL; 3876439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq); 38775cdac81aSMatthew R. Ochs scsi_unblock_requests(cfg->host); 38785cdac81aSMatthew R. Ochs } 38795cdac81aSMatthew R. Ochs 3880a834a36bSUma Krishnan /** 3881a834a36bSUma Krishnan * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class 3882a834a36bSUma Krishnan * @dev: Character device. 3883a834a36bSUma Krishnan * @mode: Mode that can be used to verify access. 3884a834a36bSUma Krishnan * 3885a834a36bSUma Krishnan * Return: Allocated string describing the devtmpfs structure. 3886a834a36bSUma Krishnan */ 3887a834a36bSUma Krishnan static char *cxlflash_devnode(struct device *dev, umode_t *mode) 3888a834a36bSUma Krishnan { 3889a834a36bSUma Krishnan return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev)); 3890a834a36bSUma Krishnan } 3891a834a36bSUma Krishnan 3892a834a36bSUma Krishnan /** 3893a834a36bSUma Krishnan * cxlflash_class_init() - create character device class 3894a834a36bSUma Krishnan * 3895a834a36bSUma Krishnan * Return: 0 on success, -errno on failure 3896a834a36bSUma Krishnan */ 3897a834a36bSUma Krishnan static int cxlflash_class_init(void) 3898a834a36bSUma Krishnan { 3899a834a36bSUma Krishnan dev_t devno; 3900a834a36bSUma Krishnan int rc = 0; 3901a834a36bSUma Krishnan 3902a834a36bSUma Krishnan rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash"); 3903a834a36bSUma Krishnan if (unlikely(rc)) { 3904a834a36bSUma Krishnan pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc); 3905a834a36bSUma Krishnan goto out; 3906a834a36bSUma Krishnan } 3907a834a36bSUma Krishnan 3908a834a36bSUma Krishnan cxlflash_major = MAJOR(devno); 3909a834a36bSUma Krishnan 3910a834a36bSUma Krishnan cxlflash_class = class_create(THIS_MODULE, "cxlflash"); 3911a834a36bSUma Krishnan if (IS_ERR(cxlflash_class)) { 3912a834a36bSUma Krishnan rc = PTR_ERR(cxlflash_class); 3913a834a36bSUma Krishnan pr_err("%s: class_create failed rc=%d\n", __func__, rc); 3914a834a36bSUma Krishnan goto err; 3915a834a36bSUma Krishnan } 3916a834a36bSUma Krishnan 3917a834a36bSUma Krishnan cxlflash_class->devnode = cxlflash_devnode; 3918a834a36bSUma Krishnan out: 3919a834a36bSUma Krishnan pr_debug("%s: returning rc=%d\n", __func__, rc); 3920a834a36bSUma Krishnan return rc; 3921a834a36bSUma Krishnan err: 3922a834a36bSUma Krishnan unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); 3923a834a36bSUma Krishnan goto out; 3924a834a36bSUma Krishnan } 3925a834a36bSUma Krishnan 3926a834a36bSUma Krishnan /** 3927a834a36bSUma Krishnan * cxlflash_class_exit() - destroy character device class 3928a834a36bSUma Krishnan */ 3929a834a36bSUma Krishnan static void cxlflash_class_exit(void) 3930a834a36bSUma Krishnan { 3931a834a36bSUma Krishnan dev_t devno = MKDEV(cxlflash_major, 0); 3932a834a36bSUma Krishnan 3933a834a36bSUma Krishnan class_destroy(cxlflash_class); 3934a834a36bSUma Krishnan unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); 3935a834a36bSUma Krishnan } 3936a834a36bSUma Krishnan 39375cdac81aSMatthew R. Ochs static const struct pci_error_handlers cxlflash_err_handler = { 39385cdac81aSMatthew R. Ochs .error_detected = cxlflash_pci_error_detected, 39395cdac81aSMatthew R. Ochs .slot_reset = cxlflash_pci_slot_reset, 39405cdac81aSMatthew R. Ochs .resume = cxlflash_pci_resume, 39415cdac81aSMatthew R. Ochs }; 39425cdac81aSMatthew R. Ochs 3943c21e0bbfSMatthew R. Ochs /* 3944c21e0bbfSMatthew R. Ochs * PCI device structure 3945c21e0bbfSMatthew R. Ochs */ 3946c21e0bbfSMatthew R. Ochs static struct pci_driver cxlflash_driver = { 3947c21e0bbfSMatthew R. Ochs .name = CXLFLASH_NAME, 3948c21e0bbfSMatthew R. Ochs .id_table = cxlflash_pci_table, 3949c21e0bbfSMatthew R. Ochs .probe = cxlflash_probe, 3950c21e0bbfSMatthew R. Ochs .remove = cxlflash_remove, 3951babf985dSUma Krishnan .shutdown = cxlflash_remove, 39525cdac81aSMatthew R. Ochs .err_handler = &cxlflash_err_handler, 3953c21e0bbfSMatthew R. Ochs }; 3954c21e0bbfSMatthew R. Ochs 3955c21e0bbfSMatthew R. Ochs /** 3956c21e0bbfSMatthew R. Ochs * init_cxlflash() - module entry point 3957c21e0bbfSMatthew R. Ochs * 39581284fb0cSMatthew R. Ochs * Return: 0 on success, -errno on failure 3959c21e0bbfSMatthew R. Ochs */ 3960c21e0bbfSMatthew R. Ochs static int __init init_cxlflash(void) 3961c21e0bbfSMatthew R. Ochs { 3962a834a36bSUma Krishnan int rc; 3963a834a36bSUma Krishnan 3964cd41e18dSMatthew R. Ochs check_sizes(); 396565be2c79SMatthew R. Ochs cxlflash_list_init(); 3966a834a36bSUma Krishnan rc = cxlflash_class_init(); 3967a834a36bSUma Krishnan if (unlikely(rc)) 3968a834a36bSUma Krishnan goto out; 396965be2c79SMatthew R. Ochs 3970a834a36bSUma Krishnan rc = pci_register_driver(&cxlflash_driver); 3971a834a36bSUma Krishnan if (unlikely(rc)) 3972a834a36bSUma Krishnan goto err; 3973a834a36bSUma Krishnan out: 3974a834a36bSUma Krishnan pr_debug("%s: returning rc=%d\n", __func__, rc); 3975a834a36bSUma Krishnan return rc; 3976a834a36bSUma Krishnan err: 3977a834a36bSUma Krishnan cxlflash_class_exit(); 3978a834a36bSUma Krishnan goto out; 3979c21e0bbfSMatthew R. Ochs } 3980c21e0bbfSMatthew R. Ochs 3981c21e0bbfSMatthew R. Ochs /** 3982c21e0bbfSMatthew R. Ochs * exit_cxlflash() - module exit point 3983c21e0bbfSMatthew R. Ochs */ 3984c21e0bbfSMatthew R. Ochs static void __exit exit_cxlflash(void) 3985c21e0bbfSMatthew R. Ochs { 398665be2c79SMatthew R. Ochs cxlflash_term_global_luns(); 398765be2c79SMatthew R. Ochs cxlflash_free_errpage(); 398865be2c79SMatthew R. Ochs 3989c21e0bbfSMatthew R. Ochs pci_unregister_driver(&cxlflash_driver); 3990a834a36bSUma Krishnan cxlflash_class_exit(); 3991c21e0bbfSMatthew R. Ochs } 3992c21e0bbfSMatthew R. Ochs 3993c21e0bbfSMatthew R. Ochs module_init(init_cxlflash); 3994c21e0bbfSMatthew R. Ochs module_exit(exit_cxlflash); 3995