12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
265be2c79SMatthew R. Ochs /*
365be2c79SMatthew R. Ochs * CXL Flash Device Driver
465be2c79SMatthew R. Ochs *
565be2c79SMatthew R. Ochs * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
665be2c79SMatthew R. Ochs * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
765be2c79SMatthew R. Ochs *
865be2c79SMatthew R. Ochs * Copyright (C) 2015 IBM Corporation
965be2c79SMatthew R. Ochs */
1065be2c79SMatthew R. Ochs
1165be2c79SMatthew R. Ochs #include <linux/delay.h>
1265be2c79SMatthew R. Ochs #include <linux/file.h>
13cd43c221SUma Krishnan #include <linux/interrupt.h>
14cd43c221SUma Krishnan #include <linux/pci.h>
1565be2c79SMatthew R. Ochs #include <linux/syscalls.h>
1665be2c79SMatthew R. Ochs #include <asm/unaligned.h>
1765be2c79SMatthew R. Ochs
1865be2c79SMatthew R. Ochs #include <scsi/scsi.h>
1965be2c79SMatthew R. Ochs #include <scsi/scsi_host.h>
2065be2c79SMatthew R. Ochs #include <scsi/scsi_cmnd.h>
2165be2c79SMatthew R. Ochs #include <scsi/scsi_eh.h>
2265be2c79SMatthew R. Ochs #include <uapi/scsi/cxlflash_ioctl.h>
2365be2c79SMatthew R. Ochs
2465be2c79SMatthew R. Ochs #include "sislite.h"
2565be2c79SMatthew R. Ochs #include "common.h"
262cb79266SMatthew R. Ochs #include "vlun.h"
2765be2c79SMatthew R. Ochs #include "superpipe.h"
2865be2c79SMatthew R. Ochs
2965be2c79SMatthew R. Ochs struct cxlflash_global global;
3065be2c79SMatthew R. Ochs
3165be2c79SMatthew R. Ochs /**
322cb79266SMatthew R. Ochs * marshal_rele_to_resize() - translate release to resize structure
3332b3edfdSLee Jones * @release: Source structure from which to translate/copy.
342cb79266SMatthew R. Ochs * @resize: Destination structure for the translate/copy.
352cb79266SMatthew R. Ochs */
marshal_rele_to_resize(struct dk_cxlflash_release * release,struct dk_cxlflash_resize * resize)362cb79266SMatthew R. Ochs static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
372cb79266SMatthew R. Ochs struct dk_cxlflash_resize *resize)
382cb79266SMatthew R. Ochs {
392cb79266SMatthew R. Ochs resize->hdr = release->hdr;
402cb79266SMatthew R. Ochs resize->context_id = release->context_id;
412cb79266SMatthew R. Ochs resize->rsrc_handle = release->rsrc_handle;
422cb79266SMatthew R. Ochs }
432cb79266SMatthew R. Ochs
442cb79266SMatthew R. Ochs /**
4565be2c79SMatthew R. Ochs * marshal_det_to_rele() - translate detach to release structure
4665be2c79SMatthew R. Ochs * @detach: Destination structure for the translate/copy.
4732b3edfdSLee Jones * @release: Source structure from which to translate/copy.
4865be2c79SMatthew R. Ochs */
marshal_det_to_rele(struct dk_cxlflash_detach * detach,struct dk_cxlflash_release * release)4965be2c79SMatthew R. Ochs static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
5065be2c79SMatthew R. Ochs struct dk_cxlflash_release *release)
5165be2c79SMatthew R. Ochs {
5265be2c79SMatthew R. Ochs release->hdr = detach->hdr;
5365be2c79SMatthew R. Ochs release->context_id = detach->context_id;
5465be2c79SMatthew R. Ochs }
5565be2c79SMatthew R. Ochs
5665be2c79SMatthew R. Ochs /**
57c2c292f4SUma Krishnan * marshal_udir_to_rele() - translate udirect to release structure
58c2c292f4SUma Krishnan * @udirect: Source structure from which to translate/copy.
59c2c292f4SUma Krishnan * @release: Destination structure for the translate/copy.
60c2c292f4SUma Krishnan */
marshal_udir_to_rele(struct dk_cxlflash_udirect * udirect,struct dk_cxlflash_release * release)61c2c292f4SUma Krishnan static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
62c2c292f4SUma Krishnan struct dk_cxlflash_release *release)
63c2c292f4SUma Krishnan {
64c2c292f4SUma Krishnan release->hdr = udirect->hdr;
65c2c292f4SUma Krishnan release->context_id = udirect->context_id;
66c2c292f4SUma Krishnan release->rsrc_handle = udirect->rsrc_handle;
67c2c292f4SUma Krishnan }
68c2c292f4SUma Krishnan
69c2c292f4SUma Krishnan /**
7065be2c79SMatthew R. Ochs * cxlflash_free_errpage() - frees resources associated with global error page
7165be2c79SMatthew R. Ochs */
cxlflash_free_errpage(void)7265be2c79SMatthew R. Ochs void cxlflash_free_errpage(void)
7365be2c79SMatthew R. Ochs {
7465be2c79SMatthew R. Ochs
7565be2c79SMatthew R. Ochs mutex_lock(&global.mutex);
7665be2c79SMatthew R. Ochs if (global.err_page) {
7765be2c79SMatthew R. Ochs __free_page(global.err_page);
7865be2c79SMatthew R. Ochs global.err_page = NULL;
7965be2c79SMatthew R. Ochs }
8065be2c79SMatthew R. Ochs mutex_unlock(&global.mutex);
8165be2c79SMatthew R. Ochs }
8265be2c79SMatthew R. Ochs
8365be2c79SMatthew R. Ochs /**
8465be2c79SMatthew R. Ochs * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
8565be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
8665be2c79SMatthew R. Ochs *
8765be2c79SMatthew R. Ochs * When the host needs to go down, all users must be quiesced and their
8865be2c79SMatthew R. Ochs * memory freed. This is accomplished by putting the contexts in error
89f15fbf8dSMatthew R. Ochs * state which will notify the user and let them 'drive' the tear down.
9065be2c79SMatthew R. Ochs * Meanwhile, this routine camps until all user contexts have been removed.
91323e3342SMatthew R. Ochs *
92323e3342SMatthew R. Ochs * Note that the main loop in this routine will always execute at least once
93323e3342SMatthew R. Ochs * to flush the reset_waitq.
9465be2c79SMatthew R. Ochs */
cxlflash_stop_term_user_contexts(struct cxlflash_cfg * cfg)9565be2c79SMatthew R. Ochs void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
9665be2c79SMatthew R. Ochs {
9765be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
98323e3342SMatthew R. Ochs int i, found = true;
9965be2c79SMatthew R. Ochs
10065be2c79SMatthew R. Ochs cxlflash_mark_contexts_error(cfg);
10165be2c79SMatthew R. Ochs
10265be2c79SMatthew R. Ochs while (true) {
10365be2c79SMatthew R. Ochs for (i = 0; i < MAX_CONTEXT; i++)
10465be2c79SMatthew R. Ochs if (cfg->ctx_tbl[i]) {
10565be2c79SMatthew R. Ochs found = true;
10665be2c79SMatthew R. Ochs break;
10765be2c79SMatthew R. Ochs }
10865be2c79SMatthew R. Ochs
10965be2c79SMatthew R. Ochs if (!found && list_empty(&cfg->ctx_err_recovery))
11065be2c79SMatthew R. Ochs return;
11165be2c79SMatthew R. Ochs
11265be2c79SMatthew R. Ochs dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
11365be2c79SMatthew R. Ochs __func__);
114439e85c1SMatthew R. Ochs wake_up_all(&cfg->reset_waitq);
11565be2c79SMatthew R. Ochs ssleep(1);
116323e3342SMatthew R. Ochs found = false;
11765be2c79SMatthew R. Ochs }
11865be2c79SMatthew R. Ochs }
11965be2c79SMatthew R. Ochs
12065be2c79SMatthew R. Ochs /**
12165be2c79SMatthew R. Ochs * find_error_context() - locates a context by cookie on the error recovery list
12265be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
12365be2c79SMatthew R. Ochs * @rctxid: Desired context by id.
12465be2c79SMatthew R. Ochs * @file: Desired context by file.
12565be2c79SMatthew R. Ochs *
12665be2c79SMatthew R. Ochs * Return: Found context on success, NULL on failure
12765be2c79SMatthew R. Ochs */
find_error_context(struct cxlflash_cfg * cfg,u64 rctxid,struct file * file)12865be2c79SMatthew R. Ochs static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
12965be2c79SMatthew R. Ochs struct file *file)
13065be2c79SMatthew R. Ochs {
13165be2c79SMatthew R. Ochs struct ctx_info *ctxi;
13265be2c79SMatthew R. Ochs
13365be2c79SMatthew R. Ochs list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
13465be2c79SMatthew R. Ochs if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
13565be2c79SMatthew R. Ochs return ctxi;
13665be2c79SMatthew R. Ochs
13765be2c79SMatthew R. Ochs return NULL;
13865be2c79SMatthew R. Ochs }
13965be2c79SMatthew R. Ochs
14065be2c79SMatthew R. Ochs /**
14165be2c79SMatthew R. Ochs * get_context() - obtains a validated and locked context reference
14265be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
14365be2c79SMatthew R. Ochs * @rctxid: Desired context (raw, un-decoded format).
14465be2c79SMatthew R. Ochs * @arg: LUN information or file associated with request.
14565be2c79SMatthew R. Ochs * @ctx_ctrl: Control information to 'steer' desired lookup.
14665be2c79SMatthew R. Ochs *
14765be2c79SMatthew R. Ochs * NOTE: despite the name pid, in linux, current->pid actually refers
14865be2c79SMatthew R. Ochs * to the lightweight process id (tid) and can change if the process is
14965be2c79SMatthew R. Ochs * multi threaded. The tgid remains constant for the process and only changes
15065be2c79SMatthew R. Ochs * when the process of fork. For all intents and purposes, think of tgid
15165be2c79SMatthew R. Ochs * as a pid in the traditional sense.
15265be2c79SMatthew R. Ochs *
15365be2c79SMatthew R. Ochs * Return: Validated context on success, NULL on failure
15465be2c79SMatthew R. Ochs */
get_context(struct cxlflash_cfg * cfg,u64 rctxid,void * arg,enum ctx_ctrl ctx_ctrl)15565be2c79SMatthew R. Ochs struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
15665be2c79SMatthew R. Ochs void *arg, enum ctx_ctrl ctx_ctrl)
15765be2c79SMatthew R. Ochs {
15865be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
15965be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
16065be2c79SMatthew R. Ochs struct lun_access *lun_access = NULL;
16165be2c79SMatthew R. Ochs struct file *file = NULL;
16265be2c79SMatthew R. Ochs struct llun_info *lli = arg;
16365be2c79SMatthew R. Ochs u64 ctxid = DECODE_CTXID(rctxid);
16465be2c79SMatthew R. Ochs int rc;
165d84c198fSMatthew R. Ochs pid_t pid = task_tgid_nr(current), ctxpid = 0;
16665be2c79SMatthew R. Ochs
16765be2c79SMatthew R. Ochs if (ctx_ctrl & CTX_CTRL_FILE) {
16865be2c79SMatthew R. Ochs lli = NULL;
16965be2c79SMatthew R. Ochs file = (struct file *)arg;
17065be2c79SMatthew R. Ochs }
17165be2c79SMatthew R. Ochs
17265be2c79SMatthew R. Ochs if (ctx_ctrl & CTX_CTRL_CLONE)
173d84c198fSMatthew R. Ochs pid = task_ppid_nr(current);
17465be2c79SMatthew R. Ochs
17565be2c79SMatthew R. Ochs if (likely(ctxid < MAX_CONTEXT)) {
17665be2c79SMatthew R. Ochs while (true) {
177a82544c7SMatthew R. Ochs mutex_lock(&cfg->ctx_tbl_list_mutex);
17865be2c79SMatthew R. Ochs ctxi = cfg->ctx_tbl[ctxid];
17965be2c79SMatthew R. Ochs if (ctxi)
18065be2c79SMatthew R. Ochs if ((file && (ctxi->file != file)) ||
18165be2c79SMatthew R. Ochs (!file && (ctxi->ctxid != rctxid)))
18265be2c79SMatthew R. Ochs ctxi = NULL;
18365be2c79SMatthew R. Ochs
18465be2c79SMatthew R. Ochs if ((ctx_ctrl & CTX_CTRL_ERR) ||
18565be2c79SMatthew R. Ochs (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
18665be2c79SMatthew R. Ochs ctxi = find_error_context(cfg, rctxid, file);
18765be2c79SMatthew R. Ochs if (!ctxi) {
18865be2c79SMatthew R. Ochs mutex_unlock(&cfg->ctx_tbl_list_mutex);
18965be2c79SMatthew R. Ochs goto out;
19065be2c79SMatthew R. Ochs }
19165be2c79SMatthew R. Ochs
19265be2c79SMatthew R. Ochs /*
19365be2c79SMatthew R. Ochs * Need to acquire ownership of the context while still
19465be2c79SMatthew R. Ochs * under the table/list lock to serialize with a remove
19565be2c79SMatthew R. Ochs * thread. Use the 'try' to avoid stalling the
19665be2c79SMatthew R. Ochs * table/list lock for a single context.
19765be2c79SMatthew R. Ochs *
19865be2c79SMatthew R. Ochs * Note that the lock order is:
19965be2c79SMatthew R. Ochs *
20065be2c79SMatthew R. Ochs * cfg->ctx_tbl_list_mutex -> ctxi->mutex
20165be2c79SMatthew R. Ochs *
20265be2c79SMatthew R. Ochs * Therefore release ctx_tbl_list_mutex before retrying.
20365be2c79SMatthew R. Ochs */
20465be2c79SMatthew R. Ochs rc = mutex_trylock(&ctxi->mutex);
20565be2c79SMatthew R. Ochs mutex_unlock(&cfg->ctx_tbl_list_mutex);
20665be2c79SMatthew R. Ochs if (rc)
20765be2c79SMatthew R. Ochs break; /* got the context's lock! */
20865be2c79SMatthew R. Ochs }
20965be2c79SMatthew R. Ochs
21065be2c79SMatthew R. Ochs if (ctxi->unavail)
21165be2c79SMatthew R. Ochs goto denied;
21265be2c79SMatthew R. Ochs
21365be2c79SMatthew R. Ochs ctxpid = ctxi->pid;
21465be2c79SMatthew R. Ochs if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
21565be2c79SMatthew R. Ochs if (pid != ctxpid)
21665be2c79SMatthew R. Ochs goto denied;
21765be2c79SMatthew R. Ochs
21865be2c79SMatthew R. Ochs if (lli) {
21965be2c79SMatthew R. Ochs list_for_each_entry(lun_access, &ctxi->luns, list)
22065be2c79SMatthew R. Ochs if (lun_access->lli == lli)
22165be2c79SMatthew R. Ochs goto out;
22265be2c79SMatthew R. Ochs goto denied;
22365be2c79SMatthew R. Ochs }
22465be2c79SMatthew R. Ochs }
22565be2c79SMatthew R. Ochs
22665be2c79SMatthew R. Ochs out:
227fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u "
22865be2c79SMatthew R. Ochs "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
22965be2c79SMatthew R. Ochs ctx_ctrl);
23065be2c79SMatthew R. Ochs
23165be2c79SMatthew R. Ochs return ctxi;
23265be2c79SMatthew R. Ochs
23365be2c79SMatthew R. Ochs denied:
23465be2c79SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
23565be2c79SMatthew R. Ochs ctxi = NULL;
23665be2c79SMatthew R. Ochs goto out;
23765be2c79SMatthew R. Ochs }
23865be2c79SMatthew R. Ochs
23965be2c79SMatthew R. Ochs /**
24065be2c79SMatthew R. Ochs * put_context() - release a context that was retrieved from get_context()
24165be2c79SMatthew R. Ochs * @ctxi: Context to release.
24265be2c79SMatthew R. Ochs *
24365be2c79SMatthew R. Ochs * For now, releasing the context equates to unlocking it's mutex.
24465be2c79SMatthew R. Ochs */
put_context(struct ctx_info * ctxi)24565be2c79SMatthew R. Ochs void put_context(struct ctx_info *ctxi)
24665be2c79SMatthew R. Ochs {
24765be2c79SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
24865be2c79SMatthew R. Ochs }
24965be2c79SMatthew R. Ochs
25065be2c79SMatthew R. Ochs /**
25165be2c79SMatthew R. Ochs * afu_attach() - attach a context to the AFU
25265be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
25365be2c79SMatthew R. Ochs * @ctxi: Context to attach.
25465be2c79SMatthew R. Ochs *
25565be2c79SMatthew R. Ochs * Upon setting the context capabilities, they must be confirmed with
25665be2c79SMatthew R. Ochs * a read back operation as the context might have been closed since
25765be2c79SMatthew R. Ochs * the mailbox was unlocked. When this occurs, registration is failed.
25865be2c79SMatthew R. Ochs *
25965be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
26065be2c79SMatthew R. Ochs */
afu_attach(struct cxlflash_cfg * cfg,struct ctx_info * ctxi)26165be2c79SMatthew R. Ochs static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
26265be2c79SMatthew R. Ochs {
26365be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
26465be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
2651786f4a0SMatthew R. Ochs struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
26665be2c79SMatthew R. Ochs int rc = 0;
267bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
26865be2c79SMatthew R. Ochs u64 val;
26923239eecSUma Krishnan int i;
27065be2c79SMatthew R. Ochs
27165be2c79SMatthew R. Ochs /* Unlock cap and restrict user to read/write cmds in translated mode */
27265be2c79SMatthew R. Ochs readq_be(&ctrl_map->mbox_r);
27365be2c79SMatthew R. Ochs val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
27465be2c79SMatthew R. Ochs writeq_be(val, &ctrl_map->ctx_cap);
27565be2c79SMatthew R. Ochs val = readq_be(&ctrl_map->ctx_cap);
27665be2c79SMatthew R. Ochs if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
277fb67d44dSMatthew R. Ochs dev_err(dev, "%s: ctx may be closed val=%016llx\n",
27865be2c79SMatthew R. Ochs __func__, val);
27965be2c79SMatthew R. Ochs rc = -EAGAIN;
28065be2c79SMatthew R. Ochs goto out;
28165be2c79SMatthew R. Ochs }
28265be2c79SMatthew R. Ochs
28323239eecSUma Krishnan if (afu_is_ocxl_lisn(afu)) {
28423239eecSUma Krishnan /* Set up the LISN effective address for each interrupt */
28523239eecSUma Krishnan for (i = 0; i < ctxi->irqs; i++) {
28623239eecSUma Krishnan val = cfg->ops->get_irq_objhndl(ctxi->ctx, i);
28723239eecSUma Krishnan writeq_be(val, &ctrl_map->lisn_ea[i]);
28823239eecSUma Krishnan }
28923239eecSUma Krishnan
29023239eecSUma Krishnan /* Use primary HWQ PASID as identifier for all interrupts */
29123239eecSUma Krishnan val = hwq->ctx_hndl;
29223239eecSUma Krishnan writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]);
29323239eecSUma Krishnan writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]);
29423239eecSUma Krishnan }
29523239eecSUma Krishnan
29665be2c79SMatthew R. Ochs /* Set up MMIO registers pointing to the RHT */
29765be2c79SMatthew R. Ochs writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
298bfc0bab1SUma Krishnan val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl));
29965be2c79SMatthew R. Ochs writeq_be(val, &ctrl_map->rht_cnt_id);
30065be2c79SMatthew R. Ochs out:
30165be2c79SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
30265be2c79SMatthew R. Ochs return rc;
30365be2c79SMatthew R. Ochs }
30465be2c79SMatthew R. Ochs
30565be2c79SMatthew R. Ochs /**
30665be2c79SMatthew R. Ochs * read_cap16() - issues a SCSI READ_CAP16 command
30765be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
30865be2c79SMatthew R. Ochs * @lli: LUN destined for capacity request.
30965be2c79SMatthew R. Ochs *
310aacb4ff6SMatthew R. Ochs * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
3111035c989SMike Christie * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
3121035c989SMike Christie * the recovery, the handler drains all currently running ioctls, waiting until
3131035c989SMike Christie * they have completed before proceeding with a reset. As this routine is used
3141035c989SMike Christie * on the ioctl path, this can create a condition where the EEH handler becomes
3151035c989SMike Christie * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
3161035c989SMike Christie * temporarily unmark this thread as an ioctl thread by releasing the ioctl
3171035c989SMike Christie * read semaphore. This will allow the EEH handler to proceed with a recovery
3181035c989SMike Christie * while this thread is still running. Once the scsi_execute_cmd() returns,
3191035c989SMike Christie * reacquire the ioctl read semaphore and check the adapter state in case it
3201035c989SMike Christie * changed while inside of scsi_execute_cmd(). The state check will wait if the
3211035c989SMike Christie * adapter is still being recovered or return a failure if the recovery failed.
3221035c989SMike Christie * In the event that the adapter reset failed, simply return the failure as the
3231035c989SMike Christie * ioctl would be unable to continue.
324aacb4ff6SMatthew R. Ochs *
325aacb4ff6SMatthew R. Ochs * Note that the above puts a requirement on this routine to only be called on
326aacb4ff6SMatthew R. Ochs * an ioctl thread.
327aacb4ff6SMatthew R. Ochs *
32865be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
32965be2c79SMatthew R. Ochs */
read_cap16(struct scsi_device * sdev,struct llun_info * lli)33065be2c79SMatthew R. Ochs static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
33165be2c79SMatthew R. Ochs {
332fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
33365be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
33465be2c79SMatthew R. Ochs struct glun_info *gli = lli->parent;
33576aaf87bSChristoph Hellwig struct scsi_sense_hdr sshdr;
3361035c989SMike Christie const struct scsi_exec_args exec_args = {
3371035c989SMike Christie .sshdr = &sshdr,
3381035c989SMike Christie };
33965be2c79SMatthew R. Ochs u8 *cmd_buf = NULL;
34065be2c79SMatthew R. Ochs u8 *scsi_cmd = NULL;
34165be2c79SMatthew R. Ochs int rc = 0;
34265be2c79SMatthew R. Ochs int result = 0;
34365be2c79SMatthew R. Ochs int retry_cnt = 0;
344471a5a60SManoj Kumar u32 to = CMD_TIMEOUT * HZ;
34565be2c79SMatthew R. Ochs
34665be2c79SMatthew R. Ochs retry:
34765be2c79SMatthew R. Ochs cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
34865be2c79SMatthew R. Ochs scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
3491fd89e4dSKees Cook if (unlikely(!cmd_buf || !scsi_cmd)) {
35065be2c79SMatthew R. Ochs rc = -ENOMEM;
35165be2c79SMatthew R. Ochs goto out;
35265be2c79SMatthew R. Ochs }
35365be2c79SMatthew R. Ochs
35465be2c79SMatthew R. Ochs scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
35565be2c79SMatthew R. Ochs scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
35665be2c79SMatthew R. Ochs put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
35765be2c79SMatthew R. Ochs
358fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__,
35965be2c79SMatthew R. Ochs retry_cnt ? "re" : "", scsi_cmd[0]);
36065be2c79SMatthew R. Ochs
361*cabb6374SGeert Uytterhoeven /* Drop the ioctl read semaphore across lengthy call */
362aacb4ff6SMatthew R. Ochs up_read(&cfg->ioctl_rwsem);
3631035c989SMike Christie result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
3641035c989SMike Christie CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
365aacb4ff6SMatthew R. Ochs down_read(&cfg->ioctl_rwsem);
366aacb4ff6SMatthew R. Ochs rc = check_state(cfg);
367aacb4ff6SMatthew R. Ochs if (rc) {
368fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed state result=%08x\n",
369aacb4ff6SMatthew R. Ochs __func__, result);
370aacb4ff6SMatthew R. Ochs rc = -ENODEV;
371aacb4ff6SMatthew R. Ochs goto out;
372aacb4ff6SMatthew R. Ochs }
37365be2c79SMatthew R. Ochs
374464a00c9SHannes Reinecke if (result > 0 && scsi_sense_valid(&sshdr)) {
37565be2c79SMatthew R. Ochs if (result & SAM_STAT_CHECK_CONDITION) {
37665be2c79SMatthew R. Ochs switch (sshdr.sense_key) {
37765be2c79SMatthew R. Ochs case NO_SENSE:
37865be2c79SMatthew R. Ochs case RECOVERED_ERROR:
37965be2c79SMatthew R. Ochs case NOT_READY:
38065be2c79SMatthew R. Ochs result &= ~SAM_STAT_CHECK_CONDITION;
38165be2c79SMatthew R. Ochs break;
38265be2c79SMatthew R. Ochs case UNIT_ATTENTION:
38365be2c79SMatthew R. Ochs switch (sshdr.asc) {
38465be2c79SMatthew R. Ochs case 0x29: /* Power on Reset or Device Reset */
385df561f66SGustavo A. R. Silva fallthrough;
38665be2c79SMatthew R. Ochs case 0x2A: /* Device capacity changed */
38765be2c79SMatthew R. Ochs case 0x3F: /* Report LUNs changed */
38865be2c79SMatthew R. Ochs /* Retry the command once more */
38965be2c79SMatthew R. Ochs if (retry_cnt++ < 1) {
39065be2c79SMatthew R. Ochs kfree(cmd_buf);
39165be2c79SMatthew R. Ochs kfree(scsi_cmd);
39265be2c79SMatthew R. Ochs goto retry;
39365be2c79SMatthew R. Ochs }
39465be2c79SMatthew R. Ochs }
39565be2c79SMatthew R. Ochs break;
39665be2c79SMatthew R. Ochs default:
39765be2c79SMatthew R. Ochs break;
39865be2c79SMatthew R. Ochs }
39965be2c79SMatthew R. Ochs }
40065be2c79SMatthew R. Ochs }
40165be2c79SMatthew R. Ochs
40265be2c79SMatthew R. Ochs if (result) {
403fb67d44dSMatthew R. Ochs dev_err(dev, "%s: command failed, result=%08x\n",
40465be2c79SMatthew R. Ochs __func__, result);
40565be2c79SMatthew R. Ochs rc = -EIO;
40665be2c79SMatthew R. Ochs goto out;
40765be2c79SMatthew R. Ochs }
40865be2c79SMatthew R. Ochs
40965be2c79SMatthew R. Ochs /*
41065be2c79SMatthew R. Ochs * Read cap was successful, grab values from the buffer;
41165be2c79SMatthew R. Ochs * note that we don't need to worry about unaligned access
41265be2c79SMatthew R. Ochs * as the buffer is allocated on an aligned boundary.
41365be2c79SMatthew R. Ochs */
41465be2c79SMatthew R. Ochs mutex_lock(&gli->mutex);
4151786f4a0SMatthew R. Ochs gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
4161786f4a0SMatthew R. Ochs gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
41765be2c79SMatthew R. Ochs mutex_unlock(&gli->mutex);
41865be2c79SMatthew R. Ochs
41965be2c79SMatthew R. Ochs out:
42065be2c79SMatthew R. Ochs kfree(cmd_buf);
42165be2c79SMatthew R. Ochs kfree(scsi_cmd);
42265be2c79SMatthew R. Ochs
42365be2c79SMatthew R. Ochs dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
42465be2c79SMatthew R. Ochs __func__, gli->max_lba, gli->blk_len, rc);
42565be2c79SMatthew R. Ochs return rc;
42665be2c79SMatthew R. Ochs }
42765be2c79SMatthew R. Ochs
42865be2c79SMatthew R. Ochs /**
42965be2c79SMatthew R. Ochs * get_rhte() - obtains validated resource handle table entry reference
43065be2c79SMatthew R. Ochs * @ctxi: Context owning the resource handle.
43165be2c79SMatthew R. Ochs * @rhndl: Resource handle associated with entry.
43265be2c79SMatthew R. Ochs * @lli: LUN associated with request.
43365be2c79SMatthew R. Ochs *
43465be2c79SMatthew R. Ochs * Return: Validated RHTE on success, NULL on failure
43565be2c79SMatthew R. Ochs */
get_rhte(struct ctx_info * ctxi,res_hndl_t rhndl,struct llun_info * lli)43665be2c79SMatthew R. Ochs struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
43765be2c79SMatthew R. Ochs struct llun_info *lli)
43865be2c79SMatthew R. Ochs {
439fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = ctxi->cfg;
440fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev;
44165be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte = NULL;
44265be2c79SMatthew R. Ochs
44365be2c79SMatthew R. Ochs if (unlikely(!ctxi->rht_start)) {
444fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Context does not have allocated RHT\n",
44565be2c79SMatthew R. Ochs __func__);
44665be2c79SMatthew R. Ochs goto out;
44765be2c79SMatthew R. Ochs }
44865be2c79SMatthew R. Ochs
44965be2c79SMatthew R. Ochs if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
450fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
451fb67d44dSMatthew R. Ochs __func__, rhndl);
45265be2c79SMatthew R. Ochs goto out;
45365be2c79SMatthew R. Ochs }
45465be2c79SMatthew R. Ochs
45565be2c79SMatthew R. Ochs if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
456fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n",
45765be2c79SMatthew R. Ochs __func__, rhndl);
45865be2c79SMatthew R. Ochs goto out;
45965be2c79SMatthew R. Ochs }
46065be2c79SMatthew R. Ochs
46165be2c79SMatthew R. Ochs rhte = &ctxi->rht_start[rhndl];
46265be2c79SMatthew R. Ochs if (unlikely(rhte->nmask == 0)) {
463fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n",
46465be2c79SMatthew R. Ochs __func__, rhndl);
46565be2c79SMatthew R. Ochs rhte = NULL;
46665be2c79SMatthew R. Ochs goto out;
46765be2c79SMatthew R. Ochs }
46865be2c79SMatthew R. Ochs
46965be2c79SMatthew R. Ochs out:
47065be2c79SMatthew R. Ochs return rhte;
47165be2c79SMatthew R. Ochs }
47265be2c79SMatthew R. Ochs
47365be2c79SMatthew R. Ochs /**
47465be2c79SMatthew R. Ochs * rhte_checkout() - obtains free/empty resource handle table entry
47565be2c79SMatthew R. Ochs * @ctxi: Context owning the resource handle.
47665be2c79SMatthew R. Ochs * @lli: LUN associated with request.
47765be2c79SMatthew R. Ochs *
47865be2c79SMatthew R. Ochs * Return: Free RHTE on success, NULL on failure
47965be2c79SMatthew R. Ochs */
rhte_checkout(struct ctx_info * ctxi,struct llun_info * lli)48065be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
48165be2c79SMatthew R. Ochs struct llun_info *lli)
48265be2c79SMatthew R. Ochs {
483fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = ctxi->cfg;
484fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev;
48565be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte = NULL;
48665be2c79SMatthew R. Ochs int i;
48765be2c79SMatthew R. Ochs
48865be2c79SMatthew R. Ochs /* Find a free RHT entry */
48965be2c79SMatthew R. Ochs for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
49065be2c79SMatthew R. Ochs if (ctxi->rht_start[i].nmask == 0) {
49165be2c79SMatthew R. Ochs rhte = &ctxi->rht_start[i];
49265be2c79SMatthew R. Ochs ctxi->rht_out++;
49365be2c79SMatthew R. Ochs break;
49465be2c79SMatthew R. Ochs }
49565be2c79SMatthew R. Ochs
49665be2c79SMatthew R. Ochs if (likely(rhte))
49765be2c79SMatthew R. Ochs ctxi->rht_lun[i] = lli;
49865be2c79SMatthew R. Ochs
499fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i);
50065be2c79SMatthew R. Ochs return rhte;
50165be2c79SMatthew R. Ochs }
50265be2c79SMatthew R. Ochs
50365be2c79SMatthew R. Ochs /**
50465be2c79SMatthew R. Ochs * rhte_checkin() - releases a resource handle table entry
50565be2c79SMatthew R. Ochs * @ctxi: Context owning the resource handle.
50665be2c79SMatthew R. Ochs * @rhte: RHTE to release.
50765be2c79SMatthew R. Ochs */
rhte_checkin(struct ctx_info * ctxi,struct sisl_rht_entry * rhte)50865be2c79SMatthew R. Ochs void rhte_checkin(struct ctx_info *ctxi,
50965be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte)
51065be2c79SMatthew R. Ochs {
51165be2c79SMatthew R. Ochs u32 rsrc_handle = rhte - ctxi->rht_start;
51265be2c79SMatthew R. Ochs
51365be2c79SMatthew R. Ochs rhte->nmask = 0;
51465be2c79SMatthew R. Ochs rhte->fp = 0;
51565be2c79SMatthew R. Ochs ctxi->rht_out--;
51665be2c79SMatthew R. Ochs ctxi->rht_lun[rsrc_handle] = NULL;
5172cb79266SMatthew R. Ochs ctxi->rht_needs_ws[rsrc_handle] = false;
51865be2c79SMatthew R. Ochs }
51965be2c79SMatthew R. Ochs
52065be2c79SMatthew R. Ochs /**
52132b3edfdSLee Jones * rht_format1() - populates a RHTE for format 1
52265be2c79SMatthew R. Ochs * @rhte: RHTE to populate.
52365be2c79SMatthew R. Ochs * @lun_id: LUN ID of LUN associated with RHTE.
52465be2c79SMatthew R. Ochs * @perm: Desired permissions for RHTE.
52565be2c79SMatthew R. Ochs * @port_sel: Port selection mask
52665be2c79SMatthew R. Ochs */
rht_format1(struct sisl_rht_entry * rhte,u64 lun_id,u32 perm,u32 port_sel)52765be2c79SMatthew R. Ochs static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
52865be2c79SMatthew R. Ochs u32 port_sel)
52965be2c79SMatthew R. Ochs {
53065be2c79SMatthew R. Ochs /*
53165be2c79SMatthew R. Ochs * Populate the Format 1 RHT entry for direct access (physical
53265be2c79SMatthew R. Ochs * LUN) using the synchronization sequence defined in the
53365be2c79SMatthew R. Ochs * SISLite specification.
53465be2c79SMatthew R. Ochs */
53565be2c79SMatthew R. Ochs struct sisl_rht_entry_f1 dummy = { 0 };
53665be2c79SMatthew R. Ochs struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
53765be2c79SMatthew R. Ochs
53865be2c79SMatthew R. Ochs memset(rhte_f1, 0, sizeof(*rhte_f1));
53965be2c79SMatthew R. Ochs rhte_f1->fp = SISL_RHT_FP(1U, 0);
54065be2c79SMatthew R. Ochs dma_wmb(); /* Make setting of format bit visible */
54165be2c79SMatthew R. Ochs
54265be2c79SMatthew R. Ochs rhte_f1->lun_id = lun_id;
54365be2c79SMatthew R. Ochs dma_wmb(); /* Make setting of LUN id visible */
54465be2c79SMatthew R. Ochs
54565be2c79SMatthew R. Ochs /*
54665be2c79SMatthew R. Ochs * Use a dummy RHT Format 1 entry to build the second dword
54765be2c79SMatthew R. Ochs * of the entry that must be populated in a single write when
54865be2c79SMatthew R. Ochs * enabled (valid bit set to TRUE).
54965be2c79SMatthew R. Ochs */
55065be2c79SMatthew R. Ochs dummy.valid = 0x80;
55165be2c79SMatthew R. Ochs dummy.fp = SISL_RHT_FP(1U, perm);
55265be2c79SMatthew R. Ochs dummy.port_sel = port_sel;
55365be2c79SMatthew R. Ochs rhte_f1->dw = dummy.dw;
55465be2c79SMatthew R. Ochs
55565be2c79SMatthew R. Ochs dma_wmb(); /* Make remaining RHT entry fields visible */
55665be2c79SMatthew R. Ochs }
55765be2c79SMatthew R. Ochs
55865be2c79SMatthew R. Ochs /**
55965be2c79SMatthew R. Ochs * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
56065be2c79SMatthew R. Ochs * @gli: LUN to attach.
56165be2c79SMatthew R. Ochs * @mode: Desired mode of the LUN.
56265be2c79SMatthew R. Ochs * @locked: Mutex status on current thread.
56365be2c79SMatthew R. Ochs *
56465be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
56565be2c79SMatthew R. Ochs */
cxlflash_lun_attach(struct glun_info * gli,enum lun_mode mode,bool locked)56665be2c79SMatthew R. Ochs int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
56765be2c79SMatthew R. Ochs {
56865be2c79SMatthew R. Ochs int rc = 0;
56965be2c79SMatthew R. Ochs
57065be2c79SMatthew R. Ochs if (!locked)
57165be2c79SMatthew R. Ochs mutex_lock(&gli->mutex);
57265be2c79SMatthew R. Ochs
57365be2c79SMatthew R. Ochs if (gli->mode == MODE_NONE)
57465be2c79SMatthew R. Ochs gli->mode = mode;
57565be2c79SMatthew R. Ochs else if (gli->mode != mode) {
576fb67d44dSMatthew R. Ochs pr_debug("%s: gli_mode=%d requested_mode=%d\n",
57765be2c79SMatthew R. Ochs __func__, gli->mode, mode);
57865be2c79SMatthew R. Ochs rc = -EINVAL;
57965be2c79SMatthew R. Ochs goto out;
58065be2c79SMatthew R. Ochs }
58165be2c79SMatthew R. Ochs
58265be2c79SMatthew R. Ochs gli->users++;
58365be2c79SMatthew R. Ochs WARN_ON(gli->users <= 0);
58465be2c79SMatthew R. Ochs out:
58565be2c79SMatthew R. Ochs pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
58665be2c79SMatthew R. Ochs __func__, rc, gli->mode, gli->users);
58765be2c79SMatthew R. Ochs if (!locked)
58865be2c79SMatthew R. Ochs mutex_unlock(&gli->mutex);
58965be2c79SMatthew R. Ochs return rc;
59065be2c79SMatthew R. Ochs }
59165be2c79SMatthew R. Ochs
59265be2c79SMatthew R. Ochs /**
59365be2c79SMatthew R. Ochs * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
59465be2c79SMatthew R. Ochs * @gli: LUN to detach.
5952cb79266SMatthew R. Ochs *
5962cb79266SMatthew R. Ochs * When resetting the mode, terminate block allocation resources as they
5972cb79266SMatthew R. Ochs * are no longer required (service is safe to call even when block allocation
5982cb79266SMatthew R. Ochs * resources were not present - such as when transitioning from physical mode).
5992cb79266SMatthew R. Ochs * These resources will be reallocated when needed (subsequent transition to
6002cb79266SMatthew R. Ochs * virtual mode).
60165be2c79SMatthew R. Ochs */
cxlflash_lun_detach(struct glun_info * gli)60265be2c79SMatthew R. Ochs void cxlflash_lun_detach(struct glun_info *gli)
60365be2c79SMatthew R. Ochs {
60465be2c79SMatthew R. Ochs mutex_lock(&gli->mutex);
60565be2c79SMatthew R. Ochs WARN_ON(gli->mode == MODE_NONE);
6062cb79266SMatthew R. Ochs if (--gli->users == 0) {
60765be2c79SMatthew R. Ochs gli->mode = MODE_NONE;
6082cb79266SMatthew R. Ochs cxlflash_ba_terminate(&gli->blka.ba_lun);
6092cb79266SMatthew R. Ochs }
61065be2c79SMatthew R. Ochs pr_debug("%s: gli->users=%u\n", __func__, gli->users);
61165be2c79SMatthew R. Ochs WARN_ON(gli->users < 0);
61265be2c79SMatthew R. Ochs mutex_unlock(&gli->mutex);
61365be2c79SMatthew R. Ochs }
61465be2c79SMatthew R. Ochs
61565be2c79SMatthew R. Ochs /**
61665be2c79SMatthew R. Ochs * _cxlflash_disk_release() - releases the specified resource entry
61765be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
61865be2c79SMatthew R. Ochs * @ctxi: Context owning resources.
61965be2c79SMatthew R. Ochs * @release: Release ioctl data structure.
62065be2c79SMatthew R. Ochs *
6212cb79266SMatthew R. Ochs * For LUNs in virtual mode, the virtual LUN associated with the specified
6222cb79266SMatthew R. Ochs * resource handle is resized to 0 prior to releasing the RHTE. Note that the
6232cb79266SMatthew R. Ochs * AFU sync should _not_ be performed when the context is sitting on the error
6242cb79266SMatthew R. Ochs * recovery list. A context on the error recovery list is not known to the AFU
6252cb79266SMatthew R. Ochs * due to reset. When the context is recovered, it will be reattached and made
6262cb79266SMatthew R. Ochs * known again to the AFU.
62765be2c79SMatthew R. Ochs *
62865be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
62965be2c79SMatthew R. Ochs */
_cxlflash_disk_release(struct scsi_device * sdev,struct ctx_info * ctxi,struct dk_cxlflash_release * release)63065be2c79SMatthew R. Ochs int _cxlflash_disk_release(struct scsi_device *sdev,
63165be2c79SMatthew R. Ochs struct ctx_info *ctxi,
63265be2c79SMatthew R. Ochs struct dk_cxlflash_release *release)
63365be2c79SMatthew R. Ochs {
634fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
63565be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
63665be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
63765be2c79SMatthew R. Ochs struct glun_info *gli = lli->parent;
63865be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
63965be2c79SMatthew R. Ochs bool put_ctx = false;
64065be2c79SMatthew R. Ochs
6412cb79266SMatthew R. Ochs struct dk_cxlflash_resize size;
64265be2c79SMatthew R. Ochs res_hndl_t rhndl = release->rsrc_handle;
64365be2c79SMatthew R. Ochs
64465be2c79SMatthew R. Ochs int rc = 0;
645c2c292f4SUma Krishnan int rcr = 0;
64665be2c79SMatthew R. Ochs u64 ctxid = DECODE_CTXID(release->context_id),
64765be2c79SMatthew R. Ochs rctxid = release->context_id;
64865be2c79SMatthew R. Ochs
64965be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte;
65065be2c79SMatthew R. Ochs struct sisl_rht_entry_f1 *rhte_f1;
65165be2c79SMatthew R. Ochs
652fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n",
65365be2c79SMatthew R. Ochs __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
65465be2c79SMatthew R. Ochs
65565be2c79SMatthew R. Ochs if (!ctxi) {
65665be2c79SMatthew R. Ochs ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
65765be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
658fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
65965be2c79SMatthew R. Ochs __func__, ctxid);
66065be2c79SMatthew R. Ochs rc = -EINVAL;
66165be2c79SMatthew R. Ochs goto out;
66265be2c79SMatthew R. Ochs }
66365be2c79SMatthew R. Ochs
66465be2c79SMatthew R. Ochs put_ctx = true;
66565be2c79SMatthew R. Ochs }
66665be2c79SMatthew R. Ochs
66765be2c79SMatthew R. Ochs rhte = get_rhte(ctxi, rhndl, lli);
66865be2c79SMatthew R. Ochs if (unlikely(!rhte)) {
669fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
67065be2c79SMatthew R. Ochs __func__, rhndl);
67165be2c79SMatthew R. Ochs rc = -EINVAL;
67265be2c79SMatthew R. Ochs goto out;
67365be2c79SMatthew R. Ochs }
67465be2c79SMatthew R. Ochs
6752cb79266SMatthew R. Ochs /*
6762cb79266SMatthew R. Ochs * Resize to 0 for virtual LUNS by setting the size
6772cb79266SMatthew R. Ochs * to 0. This will clear LXT_START and LXT_CNT fields
6782cb79266SMatthew R. Ochs * in the RHT entry and properly sync with the AFU.
6792cb79266SMatthew R. Ochs *
6802cb79266SMatthew R. Ochs * Afterwards we clear the remaining fields.
6812cb79266SMatthew R. Ochs */
68265be2c79SMatthew R. Ochs switch (gli->mode) {
6832cb79266SMatthew R. Ochs case MODE_VIRTUAL:
6842cb79266SMatthew R. Ochs marshal_rele_to_resize(release, &size);
6852cb79266SMatthew R. Ochs size.req_size = 0;
6862cb79266SMatthew R. Ochs rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
6872cb79266SMatthew R. Ochs if (rc) {
6882cb79266SMatthew R. Ochs dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
6892cb79266SMatthew R. Ochs goto out;
6902cb79266SMatthew R. Ochs }
6912cb79266SMatthew R. Ochs
6922cb79266SMatthew R. Ochs break;
69365be2c79SMatthew R. Ochs case MODE_PHYSICAL:
69465be2c79SMatthew R. Ochs /*
69565be2c79SMatthew R. Ochs * Clear the Format 1 RHT entry for direct access
69665be2c79SMatthew R. Ochs * (physical LUN) using the synchronization sequence
69765be2c79SMatthew R. Ochs * defined in the SISLite specification.
69865be2c79SMatthew R. Ochs */
69965be2c79SMatthew R. Ochs rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
70065be2c79SMatthew R. Ochs
70165be2c79SMatthew R. Ochs rhte_f1->valid = 0;
70265be2c79SMatthew R. Ochs dma_wmb(); /* Make revocation of RHT entry visible */
70365be2c79SMatthew R. Ochs
70465be2c79SMatthew R. Ochs rhte_f1->lun_id = 0;
70565be2c79SMatthew R. Ochs dma_wmb(); /* Make clearing of LUN id visible */
70665be2c79SMatthew R. Ochs
70765be2c79SMatthew R. Ochs rhte_f1->dw = 0;
70865be2c79SMatthew R. Ochs dma_wmb(); /* Make RHT entry bottom-half clearing visible */
70965be2c79SMatthew R. Ochs
710c2c292f4SUma Krishnan if (!ctxi->err_recovery_active) {
711c2c292f4SUma Krishnan rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
712c2c292f4SUma Krishnan if (unlikely(rcr))
713c2c292f4SUma Krishnan dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
714c2c292f4SUma Krishnan __func__, rcr);
715c2c292f4SUma Krishnan }
71665be2c79SMatthew R. Ochs break;
71765be2c79SMatthew R. Ochs default:
71865be2c79SMatthew R. Ochs WARN(1, "Unsupported LUN mode!");
71965be2c79SMatthew R. Ochs goto out;
72065be2c79SMatthew R. Ochs }
72165be2c79SMatthew R. Ochs
72265be2c79SMatthew R. Ochs rhte_checkin(ctxi, rhte);
72365be2c79SMatthew R. Ochs cxlflash_lun_detach(gli);
72465be2c79SMatthew R. Ochs
72565be2c79SMatthew R. Ochs out:
72665be2c79SMatthew R. Ochs if (put_ctx)
72765be2c79SMatthew R. Ochs put_context(ctxi);
72865be2c79SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
72965be2c79SMatthew R. Ochs return rc;
73065be2c79SMatthew R. Ochs }
73165be2c79SMatthew R. Ochs
cxlflash_disk_release(struct scsi_device * sdev,struct dk_cxlflash_release * release)73265be2c79SMatthew R. Ochs int cxlflash_disk_release(struct scsi_device *sdev,
73365be2c79SMatthew R. Ochs struct dk_cxlflash_release *release)
73465be2c79SMatthew R. Ochs {
73565be2c79SMatthew R. Ochs return _cxlflash_disk_release(sdev, NULL, release);
73665be2c79SMatthew R. Ochs }
73765be2c79SMatthew R. Ochs
73865be2c79SMatthew R. Ochs /**
73965be2c79SMatthew R. Ochs * destroy_context() - releases a context
74065be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
74165be2c79SMatthew R. Ochs * @ctxi: Context to release.
74265be2c79SMatthew R. Ochs *
74341b99e1aSMatthew R. Ochs * This routine is safe to be called with a a non-initialized context.
74441b99e1aSMatthew R. Ochs * Also note that the routine conditionally checks for the existence
74541b99e1aSMatthew R. Ochs * of the context control map before clearing the RHT registers and
74641b99e1aSMatthew R. Ochs * context capabilities because it is possible to destroy a context
74741b99e1aSMatthew R. Ochs * while the context is in the error state (previous mapping was
74841b99e1aSMatthew R. Ochs * removed [so there is no need to worry about clearing] and context
74941b99e1aSMatthew R. Ochs * is waiting for a new mapping).
75065be2c79SMatthew R. Ochs */
destroy_context(struct cxlflash_cfg * cfg,struct ctx_info * ctxi)75165be2c79SMatthew R. Ochs static void destroy_context(struct cxlflash_cfg *cfg,
75265be2c79SMatthew R. Ochs struct ctx_info *ctxi)
75365be2c79SMatthew R. Ochs {
75465be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
75565be2c79SMatthew R. Ochs
7565e6632d1SMatthew R. Ochs if (ctxi->initialized) {
75765be2c79SMatthew R. Ochs WARN_ON(!list_empty(&ctxi->luns));
75865be2c79SMatthew R. Ochs
7595e6632d1SMatthew R. Ochs /* Clear RHT registers and drop all capabilities for context */
76065be2c79SMatthew R. Ochs if (afu->afu_map && ctxi->ctrl_map) {
76165be2c79SMatthew R. Ochs writeq_be(0, &ctxi->ctrl_map->rht_start);
76265be2c79SMatthew R. Ochs writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
76365be2c79SMatthew R. Ochs writeq_be(0, &ctxi->ctrl_map->ctx_cap);
76465be2c79SMatthew R. Ochs }
7655e6632d1SMatthew R. Ochs }
7665e6632d1SMatthew R. Ochs
76765be2c79SMatthew R. Ochs /* Free memory associated with context */
76865be2c79SMatthew R. Ochs free_page((ulong)ctxi->rht_start);
7692cb79266SMatthew R. Ochs kfree(ctxi->rht_needs_ws);
77065be2c79SMatthew R. Ochs kfree(ctxi->rht_lun);
77165be2c79SMatthew R. Ochs kfree(ctxi);
77265be2c79SMatthew R. Ochs }
77365be2c79SMatthew R. Ochs
77465be2c79SMatthew R. Ochs /**
77565be2c79SMatthew R. Ochs * create_context() - allocates and initializes a context
77665be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
77765be2c79SMatthew R. Ochs *
77865be2c79SMatthew R. Ochs * Return: Allocated context on success, NULL on failure
77965be2c79SMatthew R. Ochs */
create_context(struct cxlflash_cfg * cfg)7805e6632d1SMatthew R. Ochs static struct ctx_info *create_context(struct cxlflash_cfg *cfg)
78165be2c79SMatthew R. Ochs {
78265be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
78365be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
78465be2c79SMatthew R. Ochs struct llun_info **lli = NULL;
785e568e23fSMatthew R. Ochs u8 *ws = NULL;
78665be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte;
78765be2c79SMatthew R. Ochs
78865be2c79SMatthew R. Ochs ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
78965be2c79SMatthew R. Ochs lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
7902cb79266SMatthew R. Ochs ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
7912cb79266SMatthew R. Ochs if (unlikely(!ctxi || !lli || !ws)) {
792fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to allocate context\n", __func__);
79365be2c79SMatthew R. Ochs goto err;
79465be2c79SMatthew R. Ochs }
79565be2c79SMatthew R. Ochs
79665be2c79SMatthew R. Ochs rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
79765be2c79SMatthew R. Ochs if (unlikely(!rhte)) {
798fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to allocate RHT\n", __func__);
79965be2c79SMatthew R. Ochs goto err;
80065be2c79SMatthew R. Ochs }
80165be2c79SMatthew R. Ochs
80265be2c79SMatthew R. Ochs ctxi->rht_lun = lli;
8032cb79266SMatthew R. Ochs ctxi->rht_needs_ws = ws;
80465be2c79SMatthew R. Ochs ctxi->rht_start = rhte;
80565be2c79SMatthew R. Ochs out:
80665be2c79SMatthew R. Ochs return ctxi;
80765be2c79SMatthew R. Ochs
80865be2c79SMatthew R. Ochs err:
8092cb79266SMatthew R. Ochs kfree(ws);
81065be2c79SMatthew R. Ochs kfree(lli);
81165be2c79SMatthew R. Ochs kfree(ctxi);
81265be2c79SMatthew R. Ochs ctxi = NULL;
81365be2c79SMatthew R. Ochs goto out;
81465be2c79SMatthew R. Ochs }
81565be2c79SMatthew R. Ochs
81665be2c79SMatthew R. Ochs /**
8175e6632d1SMatthew R. Ochs * init_context() - initializes a previously allocated context
8185e6632d1SMatthew R. Ochs * @ctxi: Previously allocated context
8195e6632d1SMatthew R. Ochs * @cfg: Internal structure associated with the host.
820b070545dSUma Krishnan * @ctx: Previously obtained context cookie.
8215e6632d1SMatthew R. Ochs * @ctxid: Previously obtained process element associated with CXL context.
8225e6632d1SMatthew R. Ochs * @file: Previously obtained file associated with CXL context.
8235e6632d1SMatthew R. Ochs * @perms: User-specified permissions.
824af2047ecSMatthew R. Ochs * @irqs: User-specified number of interrupts.
8255e6632d1SMatthew R. Ochs */
init_context(struct ctx_info * ctxi,struct cxlflash_cfg * cfg,void * ctx,int ctxid,struct file * file,u32 perms,u64 irqs)8265e6632d1SMatthew R. Ochs static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
827af2047ecSMatthew R. Ochs void *ctx, int ctxid, struct file *file, u32 perms,
828af2047ecSMatthew R. Ochs u64 irqs)
8295e6632d1SMatthew R. Ochs {
8305e6632d1SMatthew R. Ochs struct afu *afu = cfg->afu;
8315e6632d1SMatthew R. Ochs
8325e6632d1SMatthew R. Ochs ctxi->rht_perms = perms;
8335e6632d1SMatthew R. Ochs ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
8345e6632d1SMatthew R. Ochs ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
835af2047ecSMatthew R. Ochs ctxi->irqs = irqs;
836d84c198fSMatthew R. Ochs ctxi->pid = task_tgid_nr(current); /* tgid = pid */
8375e6632d1SMatthew R. Ochs ctxi->ctx = ctx;
83844ef38f9SMatthew R. Ochs ctxi->cfg = cfg;
8395e6632d1SMatthew R. Ochs ctxi->file = file;
8405e6632d1SMatthew R. Ochs ctxi->initialized = true;
8415e6632d1SMatthew R. Ochs mutex_init(&ctxi->mutex);
842888baf06SMatthew R. Ochs kref_init(&ctxi->kref);
8435e6632d1SMatthew R. Ochs INIT_LIST_HEAD(&ctxi->luns);
8445e6632d1SMatthew R. Ochs INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
8455e6632d1SMatthew R. Ochs }
8465e6632d1SMatthew R. Ochs
8475e6632d1SMatthew R. Ochs /**
848888baf06SMatthew R. Ochs * remove_context() - context kref release handler
849888baf06SMatthew R. Ochs * @kref: Kernel reference associated with context to be removed.
850888baf06SMatthew R. Ochs *
851888baf06SMatthew R. Ochs * When a context no longer has any references it can safely be removed
852888baf06SMatthew R. Ochs * from global access and destroyed. Note that it is assumed the thread
853888baf06SMatthew R. Ochs * relinquishing access to the context holds its mutex.
854888baf06SMatthew R. Ochs */
remove_context(struct kref * kref)855888baf06SMatthew R. Ochs static void remove_context(struct kref *kref)
856888baf06SMatthew R. Ochs {
857888baf06SMatthew R. Ochs struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
858888baf06SMatthew R. Ochs struct cxlflash_cfg *cfg = ctxi->cfg;
859888baf06SMatthew R. Ochs u64 ctxid = DECODE_CTXID(ctxi->ctxid);
860888baf06SMatthew R. Ochs
861888baf06SMatthew R. Ochs /* Remove context from table/error list */
862888baf06SMatthew R. Ochs WARN_ON(!mutex_is_locked(&ctxi->mutex));
863888baf06SMatthew R. Ochs ctxi->unavail = true;
864888baf06SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
865888baf06SMatthew R. Ochs mutex_lock(&cfg->ctx_tbl_list_mutex);
866888baf06SMatthew R. Ochs mutex_lock(&ctxi->mutex);
867888baf06SMatthew R. Ochs
868888baf06SMatthew R. Ochs if (!list_empty(&ctxi->list))
869888baf06SMatthew R. Ochs list_del(&ctxi->list);
870888baf06SMatthew R. Ochs cfg->ctx_tbl[ctxid] = NULL;
871888baf06SMatthew R. Ochs mutex_unlock(&cfg->ctx_tbl_list_mutex);
872888baf06SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
873888baf06SMatthew R. Ochs
874888baf06SMatthew R. Ochs /* Context now completely uncoupled/unreachable */
875888baf06SMatthew R. Ochs destroy_context(cfg, ctxi);
876888baf06SMatthew R. Ochs }
877888baf06SMatthew R. Ochs
878888baf06SMatthew R. Ochs /**
87965be2c79SMatthew R. Ochs * _cxlflash_disk_detach() - detaches a LUN from a context
88065be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
88165be2c79SMatthew R. Ochs * @ctxi: Context owning resources.
88265be2c79SMatthew R. Ochs * @detach: Detach ioctl data structure.
88365be2c79SMatthew R. Ochs *
88465be2c79SMatthew R. Ochs * As part of the detach, all per-context resources associated with the LUN
88565be2c79SMatthew R. Ochs * are cleaned up. When detaching the last LUN for a context, the context
88665be2c79SMatthew R. Ochs * itself is cleaned up and released.
88765be2c79SMatthew R. Ochs *
88865be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
88965be2c79SMatthew R. Ochs */
_cxlflash_disk_detach(struct scsi_device * sdev,struct ctx_info * ctxi,struct dk_cxlflash_detach * detach)89065be2c79SMatthew R. Ochs static int _cxlflash_disk_detach(struct scsi_device *sdev,
89165be2c79SMatthew R. Ochs struct ctx_info *ctxi,
89265be2c79SMatthew R. Ochs struct dk_cxlflash_detach *detach)
89365be2c79SMatthew R. Ochs {
894fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
89565be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
89665be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
89765be2c79SMatthew R. Ochs struct lun_access *lun_access, *t;
89865be2c79SMatthew R. Ochs struct dk_cxlflash_release rel;
89965be2c79SMatthew R. Ochs bool put_ctx = false;
90065be2c79SMatthew R. Ochs
90165be2c79SMatthew R. Ochs int i;
90265be2c79SMatthew R. Ochs int rc = 0;
90365be2c79SMatthew R. Ochs u64 ctxid = DECODE_CTXID(detach->context_id),
90465be2c79SMatthew R. Ochs rctxid = detach->context_id;
90565be2c79SMatthew R. Ochs
90665be2c79SMatthew R. Ochs dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
90765be2c79SMatthew R. Ochs
90865be2c79SMatthew R. Ochs if (!ctxi) {
90965be2c79SMatthew R. Ochs ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
91065be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
911fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%llu\n",
91265be2c79SMatthew R. Ochs __func__, ctxid);
91365be2c79SMatthew R. Ochs rc = -EINVAL;
91465be2c79SMatthew R. Ochs goto out;
91565be2c79SMatthew R. Ochs }
91665be2c79SMatthew R. Ochs
91765be2c79SMatthew R. Ochs put_ctx = true;
91865be2c79SMatthew R. Ochs }
91965be2c79SMatthew R. Ochs
92065be2c79SMatthew R. Ochs /* Cleanup outstanding resources tied to this LUN */
92165be2c79SMatthew R. Ochs if (ctxi->rht_out) {
92265be2c79SMatthew R. Ochs marshal_det_to_rele(detach, &rel);
92365be2c79SMatthew R. Ochs for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
92465be2c79SMatthew R. Ochs if (ctxi->rht_lun[i] == lli) {
92565be2c79SMatthew R. Ochs rel.rsrc_handle = i;
92665be2c79SMatthew R. Ochs _cxlflash_disk_release(sdev, ctxi, &rel);
92765be2c79SMatthew R. Ochs }
92865be2c79SMatthew R. Ochs
92965be2c79SMatthew R. Ochs /* No need to loop further if we're done */
93065be2c79SMatthew R. Ochs if (ctxi->rht_out == 0)
93165be2c79SMatthew R. Ochs break;
93265be2c79SMatthew R. Ochs }
93365be2c79SMatthew R. Ochs }
93465be2c79SMatthew R. Ochs
93565be2c79SMatthew R. Ochs /* Take our LUN out of context, free the node */
93665be2c79SMatthew R. Ochs list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
93765be2c79SMatthew R. Ochs if (lun_access->lli == lli) {
93865be2c79SMatthew R. Ochs list_del(&lun_access->list);
93965be2c79SMatthew R. Ochs kfree(lun_access);
94065be2c79SMatthew R. Ochs lun_access = NULL;
94165be2c79SMatthew R. Ochs break;
94265be2c79SMatthew R. Ochs }
94365be2c79SMatthew R. Ochs
94465be2c79SMatthew R. Ochs /*
945888baf06SMatthew R. Ochs * Release the context reference and the sdev reference that
946888baf06SMatthew R. Ochs * bound this LUN to the context.
94765be2c79SMatthew R. Ochs */
948c4a11827SMatthew R. Ochs if (kref_put(&ctxi->kref, remove_context))
949c4a11827SMatthew R. Ochs put_ctx = false;
95022fe1ae8SMatthew R. Ochs scsi_device_put(sdev);
95165be2c79SMatthew R. Ochs out:
95265be2c79SMatthew R. Ochs if (put_ctx)
95365be2c79SMatthew R. Ochs put_context(ctxi);
95465be2c79SMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
95565be2c79SMatthew R. Ochs return rc;
95665be2c79SMatthew R. Ochs }
95765be2c79SMatthew R. Ochs
cxlflash_disk_detach(struct scsi_device * sdev,struct dk_cxlflash_detach * detach)95865be2c79SMatthew R. Ochs static int cxlflash_disk_detach(struct scsi_device *sdev,
95965be2c79SMatthew R. Ochs struct dk_cxlflash_detach *detach)
96065be2c79SMatthew R. Ochs {
96165be2c79SMatthew R. Ochs return _cxlflash_disk_detach(sdev, NULL, detach);
96265be2c79SMatthew R. Ochs }
96365be2c79SMatthew R. Ochs
96465be2c79SMatthew R. Ochs /**
96565be2c79SMatthew R. Ochs * cxlflash_cxl_release() - release handler for adapter file descriptor
96665be2c79SMatthew R. Ochs * @inode: File-system inode associated with fd.
96765be2c79SMatthew R. Ochs * @file: File installed with adapter file descriptor.
96865be2c79SMatthew R. Ochs *
96965be2c79SMatthew R. Ochs * This routine is the release handler for the fops registered with
97065be2c79SMatthew R. Ochs * the CXL services on an initial attach for a context. It is called
971cd34af40SMatthew R. Ochs * when a close (explicity by the user or as part of a process tear
972cd34af40SMatthew R. Ochs * down) is performed on the adapter file descriptor returned to the
973cd34af40SMatthew R. Ochs * user. The user should be aware that explicitly performing a close
974cd34af40SMatthew R. Ochs * considered catastrophic and subsequent usage of the superpipe API
975cd34af40SMatthew R. Ochs * with previously saved off tokens will fail.
97665be2c79SMatthew R. Ochs *
977cd34af40SMatthew R. Ochs * This routine derives the context reference and calls detach for
978cd34af40SMatthew R. Ochs * each LUN associated with the context.The final detach operation
979cd34af40SMatthew R. Ochs * causes the context itself to be freed. With exception to when the
980cd34af40SMatthew R. Ochs * CXL process element (context id) lookup fails (a case that should
981cd34af40SMatthew R. Ochs * theoretically never occur), every call into this routine results
982cd34af40SMatthew R. Ochs * in a complete freeing of a context.
98365be2c79SMatthew R. Ochs *
98432a9ae41SUma Krishnan * Detaching the LUN is typically an ioctl() operation and the underlying
98532a9ae41SUma Krishnan * code assumes that ioctl_rwsem has been acquired as a reader. To support
98632a9ae41SUma Krishnan * that design point, the semaphore is acquired and released around detach.
98732a9ae41SUma Krishnan *
98865be2c79SMatthew R. Ochs * Return: 0 on success
98965be2c79SMatthew R. Ochs */
cxlflash_cxl_release(struct inode * inode,struct file * file)99065be2c79SMatthew R. Ochs static int cxlflash_cxl_release(struct inode *inode, struct file *file)
99165be2c79SMatthew R. Ochs {
99265be2c79SMatthew R. Ochs struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
99365be2c79SMatthew R. Ochs cxl_fops);
99425b8e08eSMatthew R. Ochs void *ctx = cfg->ops->fops_get_context(file);
99565be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
99665be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
99765be2c79SMatthew R. Ochs struct dk_cxlflash_detach detach = { { 0 }, 0 };
99865be2c79SMatthew R. Ochs struct lun_access *lun_access, *t;
99965be2c79SMatthew R. Ochs enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
100065be2c79SMatthew R. Ochs int ctxid;
100165be2c79SMatthew R. Ochs
100225b8e08eSMatthew R. Ochs ctxid = cfg->ops->process_element(ctx);
100365be2c79SMatthew R. Ochs if (unlikely(ctxid < 0)) {
1004fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
100565be2c79SMatthew R. Ochs __func__, ctx, ctxid);
100665be2c79SMatthew R. Ochs goto out;
100765be2c79SMatthew R. Ochs }
100865be2c79SMatthew R. Ochs
100965be2c79SMatthew R. Ochs ctxi = get_context(cfg, ctxid, file, ctrl);
101065be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
101165be2c79SMatthew R. Ochs ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
101265be2c79SMatthew R. Ochs if (!ctxi) {
1013fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: ctxid=%d already free\n",
101465be2c79SMatthew R. Ochs __func__, ctxid);
101565be2c79SMatthew R. Ochs goto out_release;
101665be2c79SMatthew R. Ochs }
101765be2c79SMatthew R. Ochs
1018fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Another process owns ctxid=%d\n",
101965be2c79SMatthew R. Ochs __func__, ctxid);
102065be2c79SMatthew R. Ochs put_context(ctxi);
102165be2c79SMatthew R. Ochs goto out;
102265be2c79SMatthew R. Ochs }
102365be2c79SMatthew R. Ochs
1024fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid);
102565be2c79SMatthew R. Ochs
102632a9ae41SUma Krishnan down_read(&cfg->ioctl_rwsem);
102765be2c79SMatthew R. Ochs detach.context_id = ctxi->ctxid;
102865be2c79SMatthew R. Ochs list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
102965be2c79SMatthew R. Ochs _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
103032a9ae41SUma Krishnan up_read(&cfg->ioctl_rwsem);
103165be2c79SMatthew R. Ochs out_release:
103225b8e08eSMatthew R. Ochs cfg->ops->fd_release(inode, file);
103365be2c79SMatthew R. Ochs out:
103465be2c79SMatthew R. Ochs dev_dbg(dev, "%s: returning\n", __func__);
103565be2c79SMatthew R. Ochs return 0;
103665be2c79SMatthew R. Ochs }
103765be2c79SMatthew R. Ochs
103865be2c79SMatthew R. Ochs /**
103965be2c79SMatthew R. Ochs * unmap_context() - clears a previously established mapping
104065be2c79SMatthew R. Ochs * @ctxi: Context owning the mapping.
104165be2c79SMatthew R. Ochs *
104265be2c79SMatthew R. Ochs * This routine is used to switch between the error notification page
104365be2c79SMatthew R. Ochs * (dummy page of all 1's) and the real mapping (established by the CXL
104465be2c79SMatthew R. Ochs * fault handler).
104565be2c79SMatthew R. Ochs */
unmap_context(struct ctx_info * ctxi)104665be2c79SMatthew R. Ochs static void unmap_context(struct ctx_info *ctxi)
104765be2c79SMatthew R. Ochs {
104865be2c79SMatthew R. Ochs unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
104965be2c79SMatthew R. Ochs }
105065be2c79SMatthew R. Ochs
105165be2c79SMatthew R. Ochs /**
105265be2c79SMatthew R. Ochs * get_err_page() - obtains and allocates the error notification page
1053fb67d44dSMatthew R. Ochs * @cfg: Internal structure associated with the host.
105465be2c79SMatthew R. Ochs *
105565be2c79SMatthew R. Ochs * Return: error notification page on success, NULL on failure
105665be2c79SMatthew R. Ochs */
get_err_page(struct cxlflash_cfg * cfg)1057fb67d44dSMatthew R. Ochs static struct page *get_err_page(struct cxlflash_cfg *cfg)
105865be2c79SMatthew R. Ochs {
105965be2c79SMatthew R. Ochs struct page *err_page = global.err_page;
1060fb67d44dSMatthew R. Ochs struct device *dev = &cfg->dev->dev;
106165be2c79SMatthew R. Ochs
106265be2c79SMatthew R. Ochs if (unlikely(!err_page)) {
106365be2c79SMatthew R. Ochs err_page = alloc_page(GFP_KERNEL);
106465be2c79SMatthew R. Ochs if (unlikely(!err_page)) {
1065fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to allocate err_page\n",
1066fb67d44dSMatthew R. Ochs __func__);
106765be2c79SMatthew R. Ochs goto out;
106865be2c79SMatthew R. Ochs }
106965be2c79SMatthew R. Ochs
107065be2c79SMatthew R. Ochs memset(page_address(err_page), -1, PAGE_SIZE);
107165be2c79SMatthew R. Ochs
107265be2c79SMatthew R. Ochs /* Serialize update w/ other threads to avoid a leak */
107365be2c79SMatthew R. Ochs mutex_lock(&global.mutex);
107465be2c79SMatthew R. Ochs if (likely(!global.err_page))
107565be2c79SMatthew R. Ochs global.err_page = err_page;
107665be2c79SMatthew R. Ochs else {
107765be2c79SMatthew R. Ochs __free_page(err_page);
107865be2c79SMatthew R. Ochs err_page = global.err_page;
107965be2c79SMatthew R. Ochs }
108065be2c79SMatthew R. Ochs mutex_unlock(&global.mutex);
108165be2c79SMatthew R. Ochs }
108265be2c79SMatthew R. Ochs
108365be2c79SMatthew R. Ochs out:
1084fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page);
108565be2c79SMatthew R. Ochs return err_page;
108665be2c79SMatthew R. Ochs }
108765be2c79SMatthew R. Ochs
108865be2c79SMatthew R. Ochs /**
108965be2c79SMatthew R. Ochs * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
109065be2c79SMatthew R. Ochs * @vmf: VM fault associated with current fault.
109165be2c79SMatthew R. Ochs *
109265be2c79SMatthew R. Ochs * To support error notification via MMIO, faults are 'caught' by this routine
109365be2c79SMatthew R. Ochs * that was inserted before passing back the adapter file descriptor on attach.
109465be2c79SMatthew R. Ochs * When a fault occurs, this routine evaluates if error recovery is active and
109565be2c79SMatthew R. Ochs * if so, installs the error page to 'notify' the user about the error state.
109665be2c79SMatthew R. Ochs * During normal operation, the fault is simply handled by the original fault
109765be2c79SMatthew R. Ochs * handler that was installed by CXL services as part of initializing the
109865be2c79SMatthew R. Ochs * adapter file descriptor. The VMA's page protection bits are toggled to
109965be2c79SMatthew R. Ochs * indicate cached/not-cached depending on the memory backing the fault.
110065be2c79SMatthew R. Ochs *
110165be2c79SMatthew R. Ochs * Return: 0 on success, VM_FAULT_SIGBUS on failure
110265be2c79SMatthew R. Ochs */
cxlflash_mmap_fault(struct vm_fault * vmf)1103a38b80c5SSouptick Joarder static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
110465be2c79SMatthew R. Ochs {
110511bac800SDave Jiang struct vm_area_struct *vma = vmf->vma;
110665be2c79SMatthew R. Ochs struct file *file = vma->vm_file;
110765be2c79SMatthew R. Ochs struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
110865be2c79SMatthew R. Ochs cxl_fops);
110925b8e08eSMatthew R. Ochs void *ctx = cfg->ops->fops_get_context(file);
111065be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
111165be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
111265be2c79SMatthew R. Ochs struct page *err_page = NULL;
111365be2c79SMatthew R. Ochs enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1114a38b80c5SSouptick Joarder vm_fault_t rc = 0;
111565be2c79SMatthew R. Ochs int ctxid;
111665be2c79SMatthew R. Ochs
111725b8e08eSMatthew R. Ochs ctxid = cfg->ops->process_element(ctx);
111865be2c79SMatthew R. Ochs if (unlikely(ctxid < 0)) {
1119fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
112065be2c79SMatthew R. Ochs __func__, ctx, ctxid);
112165be2c79SMatthew R. Ochs goto err;
112265be2c79SMatthew R. Ochs }
112365be2c79SMatthew R. Ochs
112465be2c79SMatthew R. Ochs ctxi = get_context(cfg, ctxid, file, ctrl);
112565be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
1126fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
112765be2c79SMatthew R. Ochs goto err;
112865be2c79SMatthew R. Ochs }
112965be2c79SMatthew R. Ochs
1130de9f0b0cSMatthew R. Ochs dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
113165be2c79SMatthew R. Ochs
113265be2c79SMatthew R. Ochs if (likely(!ctxi->err_recovery_active)) {
113365be2c79SMatthew R. Ochs vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
113411bac800SDave Jiang rc = ctxi->cxl_mmap_vmops->fault(vmf);
113565be2c79SMatthew R. Ochs } else {
1136fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: err recovery active, use err_page\n",
113765be2c79SMatthew R. Ochs __func__);
113865be2c79SMatthew R. Ochs
1139fb67d44dSMatthew R. Ochs err_page = get_err_page(cfg);
114065be2c79SMatthew R. Ochs if (unlikely(!err_page)) {
1141fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Could not get err_page\n", __func__);
114265be2c79SMatthew R. Ochs rc = VM_FAULT_RETRY;
114365be2c79SMatthew R. Ochs goto out;
114465be2c79SMatthew R. Ochs }
114565be2c79SMatthew R. Ochs
114665be2c79SMatthew R. Ochs get_page(err_page);
114765be2c79SMatthew R. Ochs vmf->page = err_page;
114865be2c79SMatthew R. Ochs vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
114965be2c79SMatthew R. Ochs }
115065be2c79SMatthew R. Ochs
115165be2c79SMatthew R. Ochs out:
115265be2c79SMatthew R. Ochs if (likely(ctxi))
115365be2c79SMatthew R. Ochs put_context(ctxi);
1154a38b80c5SSouptick Joarder dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
115565be2c79SMatthew R. Ochs return rc;
115665be2c79SMatthew R. Ochs
115765be2c79SMatthew R. Ochs err:
115865be2c79SMatthew R. Ochs rc = VM_FAULT_SIGBUS;
115965be2c79SMatthew R. Ochs goto out;
116065be2c79SMatthew R. Ochs }
116165be2c79SMatthew R. Ochs
116265be2c79SMatthew R. Ochs /*
116365be2c79SMatthew R. Ochs * Local MMAP vmops to 'catch' faults
116465be2c79SMatthew R. Ochs */
116565be2c79SMatthew R. Ochs static const struct vm_operations_struct cxlflash_mmap_vmops = {
116665be2c79SMatthew R. Ochs .fault = cxlflash_mmap_fault,
116765be2c79SMatthew R. Ochs };
116865be2c79SMatthew R. Ochs
116965be2c79SMatthew R. Ochs /**
117065be2c79SMatthew R. Ochs * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
117165be2c79SMatthew R. Ochs * @file: File installed with adapter file descriptor.
117265be2c79SMatthew R. Ochs * @vma: VM area associated with mapping.
117365be2c79SMatthew R. Ochs *
117465be2c79SMatthew R. Ochs * Installs local mmap vmops to 'catch' faults for error notification support.
117565be2c79SMatthew R. Ochs *
117665be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
117765be2c79SMatthew R. Ochs */
cxlflash_cxl_mmap(struct file * file,struct vm_area_struct * vma)117865be2c79SMatthew R. Ochs static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
117965be2c79SMatthew R. Ochs {
118065be2c79SMatthew R. Ochs struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
118165be2c79SMatthew R. Ochs cxl_fops);
118225b8e08eSMatthew R. Ochs void *ctx = cfg->ops->fops_get_context(file);
118365be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
118465be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
118565be2c79SMatthew R. Ochs enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
118665be2c79SMatthew R. Ochs int ctxid;
118765be2c79SMatthew R. Ochs int rc = 0;
118865be2c79SMatthew R. Ochs
118925b8e08eSMatthew R. Ochs ctxid = cfg->ops->process_element(ctx);
119065be2c79SMatthew R. Ochs if (unlikely(ctxid < 0)) {
1191fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
119265be2c79SMatthew R. Ochs __func__, ctx, ctxid);
119365be2c79SMatthew R. Ochs rc = -EIO;
119465be2c79SMatthew R. Ochs goto out;
119565be2c79SMatthew R. Ochs }
119665be2c79SMatthew R. Ochs
119765be2c79SMatthew R. Ochs ctxi = get_context(cfg, ctxid, file, ctrl);
119865be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
1199fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid);
120065be2c79SMatthew R. Ochs rc = -EIO;
120165be2c79SMatthew R. Ochs goto out;
120265be2c79SMatthew R. Ochs }
120365be2c79SMatthew R. Ochs
1204de9f0b0cSMatthew R. Ochs dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
120565be2c79SMatthew R. Ochs
120625b8e08eSMatthew R. Ochs rc = cfg->ops->fd_mmap(file, vma);
120765be2c79SMatthew R. Ochs if (likely(!rc)) {
120865be2c79SMatthew R. Ochs /* Insert ourself in the mmap fault handler path */
120965be2c79SMatthew R. Ochs ctxi->cxl_mmap_vmops = vma->vm_ops;
121065be2c79SMatthew R. Ochs vma->vm_ops = &cxlflash_mmap_vmops;
121165be2c79SMatthew R. Ochs }
121265be2c79SMatthew R. Ochs
121365be2c79SMatthew R. Ochs out:
121465be2c79SMatthew R. Ochs if (likely(ctxi))
121565be2c79SMatthew R. Ochs put_context(ctxi);
121665be2c79SMatthew R. Ochs return rc;
121765be2c79SMatthew R. Ochs }
121865be2c79SMatthew R. Ochs
121917ead26fSMatthew R. Ochs const struct file_operations cxlflash_cxl_fops = {
122065be2c79SMatthew R. Ochs .owner = THIS_MODULE,
122165be2c79SMatthew R. Ochs .mmap = cxlflash_cxl_mmap,
122265be2c79SMatthew R. Ochs .release = cxlflash_cxl_release,
122365be2c79SMatthew R. Ochs };
122465be2c79SMatthew R. Ochs
122565be2c79SMatthew R. Ochs /**
122665be2c79SMatthew R. Ochs * cxlflash_mark_contexts_error() - move contexts to error state and list
122765be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
122865be2c79SMatthew R. Ochs *
122965be2c79SMatthew R. Ochs * A context is only moved over to the error list when there are no outstanding
123065be2c79SMatthew R. Ochs * references to it. This ensures that a running operation has completed.
123165be2c79SMatthew R. Ochs *
123265be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
123365be2c79SMatthew R. Ochs */
cxlflash_mark_contexts_error(struct cxlflash_cfg * cfg)123465be2c79SMatthew R. Ochs int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
123565be2c79SMatthew R. Ochs {
123665be2c79SMatthew R. Ochs int i, rc = 0;
123765be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
123865be2c79SMatthew R. Ochs
123965be2c79SMatthew R. Ochs mutex_lock(&cfg->ctx_tbl_list_mutex);
124065be2c79SMatthew R. Ochs
124165be2c79SMatthew R. Ochs for (i = 0; i < MAX_CONTEXT; i++) {
124265be2c79SMatthew R. Ochs ctxi = cfg->ctx_tbl[i];
124365be2c79SMatthew R. Ochs if (ctxi) {
124465be2c79SMatthew R. Ochs mutex_lock(&ctxi->mutex);
124565be2c79SMatthew R. Ochs cfg->ctx_tbl[i] = NULL;
124665be2c79SMatthew R. Ochs list_add(&ctxi->list, &cfg->ctx_err_recovery);
124765be2c79SMatthew R. Ochs ctxi->err_recovery_active = true;
124865be2c79SMatthew R. Ochs ctxi->ctrl_map = NULL;
124965be2c79SMatthew R. Ochs unmap_context(ctxi);
125065be2c79SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
125165be2c79SMatthew R. Ochs }
125265be2c79SMatthew R. Ochs }
125365be2c79SMatthew R. Ochs
125465be2c79SMatthew R. Ochs mutex_unlock(&cfg->ctx_tbl_list_mutex);
125565be2c79SMatthew R. Ochs return rc;
125665be2c79SMatthew R. Ochs }
125765be2c79SMatthew R. Ochs
125865be2c79SMatthew R. Ochs /*
125965be2c79SMatthew R. Ochs * Dummy NULL fops
126065be2c79SMatthew R. Ochs */
126165be2c79SMatthew R. Ochs static const struct file_operations null_fops = {
126265be2c79SMatthew R. Ochs .owner = THIS_MODULE,
126365be2c79SMatthew R. Ochs };
126465be2c79SMatthew R. Ochs
126565be2c79SMatthew R. Ochs /**
12660a27ae51SMatthew R. Ochs * check_state() - checks and responds to the current adapter state
12670a27ae51SMatthew R. Ochs * @cfg: Internal structure associated with the host.
12680a27ae51SMatthew R. Ochs *
12690a27ae51SMatthew R. Ochs * This routine can block and should only be used on process context.
12700a27ae51SMatthew R. Ochs * It assumes that the caller is an ioctl thread and holding the ioctl
12710a27ae51SMatthew R. Ochs * read semaphore. This is temporarily let up across the wait to allow
12720a27ae51SMatthew R. Ochs * for draining actively running ioctls. Also note that when waking up
12730a27ae51SMatthew R. Ochs * from waiting in reset, the state is unknown and must be checked again
12740a27ae51SMatthew R. Ochs * before proceeding.
12750a27ae51SMatthew R. Ochs *
12760a27ae51SMatthew R. Ochs * Return: 0 on success, -errno on failure
12770a27ae51SMatthew R. Ochs */
check_state(struct cxlflash_cfg * cfg)1278aacb4ff6SMatthew R. Ochs int check_state(struct cxlflash_cfg *cfg)
12790a27ae51SMatthew R. Ochs {
12800a27ae51SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
12810a27ae51SMatthew R. Ochs int rc = 0;
12820a27ae51SMatthew R. Ochs
12830a27ae51SMatthew R. Ochs retry:
12840a27ae51SMatthew R. Ochs switch (cfg->state) {
1285439e85c1SMatthew R. Ochs case STATE_RESET:
1286439e85c1SMatthew R. Ochs dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
12870a27ae51SMatthew R. Ochs up_read(&cfg->ioctl_rwsem);
1288439e85c1SMatthew R. Ochs rc = wait_event_interruptible(cfg->reset_waitq,
1289439e85c1SMatthew R. Ochs cfg->state != STATE_RESET);
12900a27ae51SMatthew R. Ochs down_read(&cfg->ioctl_rwsem);
12910a27ae51SMatthew R. Ochs if (unlikely(rc))
12920a27ae51SMatthew R. Ochs break;
12930a27ae51SMatthew R. Ochs goto retry;
12940a27ae51SMatthew R. Ochs case STATE_FAILTERM:
1295fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Failed/Terminating\n", __func__);
12960a27ae51SMatthew R. Ochs rc = -ENODEV;
12970a27ae51SMatthew R. Ochs break;
12980a27ae51SMatthew R. Ochs default:
12990a27ae51SMatthew R. Ochs break;
13000a27ae51SMatthew R. Ochs }
13010a27ae51SMatthew R. Ochs
13020a27ae51SMatthew R. Ochs return rc;
13030a27ae51SMatthew R. Ochs }
13040a27ae51SMatthew R. Ochs
13050a27ae51SMatthew R. Ochs /**
130665be2c79SMatthew R. Ochs * cxlflash_disk_attach() - attach a LUN to a context
130765be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
130865be2c79SMatthew R. Ochs * @attach: Attach ioctl data structure.
130965be2c79SMatthew R. Ochs *
131065be2c79SMatthew R. Ochs * Creates a context and attaches LUN to it. A LUN can only be attached
131165be2c79SMatthew R. Ochs * one time to a context (subsequent attaches for the same context/LUN pair
131265be2c79SMatthew R. Ochs * are not supported). Additional LUNs can be attached to a context by
131365be2c79SMatthew R. Ochs * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
131465be2c79SMatthew R. Ochs *
131565be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
131665be2c79SMatthew R. Ochs */
cxlflash_disk_attach(struct scsi_device * sdev,struct dk_cxlflash_attach * attach)131765be2c79SMatthew R. Ochs static int cxlflash_disk_attach(struct scsi_device *sdev,
131865be2c79SMatthew R. Ochs struct dk_cxlflash_attach *attach)
131965be2c79SMatthew R. Ochs {
1320fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
132165be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
132265be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
132365be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
132465be2c79SMatthew R. Ochs struct glun_info *gli = lli->parent;
132565be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
132665be2c79SMatthew R. Ochs struct lun_access *lun_access = NULL;
132765be2c79SMatthew R. Ochs int rc = 0;
132865be2c79SMatthew R. Ochs u32 perms;
132965be2c79SMatthew R. Ochs int ctxid = -1;
1330af2047ecSMatthew R. Ochs u64 irqs = attach->num_interrupts;
1331696d0b0cSMatthew R. Ochs u64 flags = 0UL;
133265be2c79SMatthew R. Ochs u64 rctxid = 0UL;
13338a96b52aSMatthew R. Ochs struct file *file = NULL;
133465be2c79SMatthew R. Ochs
1335b070545dSUma Krishnan void *ctx = NULL;
133665be2c79SMatthew R. Ochs
133765be2c79SMatthew R. Ochs int fd = -1;
133865be2c79SMatthew R. Ochs
1339af2047ecSMatthew R. Ochs if (irqs > 4) {
134065be2c79SMatthew R. Ochs dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1341af2047ecSMatthew R. Ochs __func__, irqs);
134265be2c79SMatthew R. Ochs rc = -EINVAL;
134365be2c79SMatthew R. Ochs goto out;
134465be2c79SMatthew R. Ochs }
134565be2c79SMatthew R. Ochs
134665be2c79SMatthew R. Ochs if (gli->max_lba == 0) {
1347fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n",
134865be2c79SMatthew R. Ochs __func__, lli->lun_id[sdev->channel]);
134965be2c79SMatthew R. Ochs rc = read_cap16(sdev, lli);
135065be2c79SMatthew R. Ochs if (rc) {
1351fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Invalid device rc=%d\n",
135265be2c79SMatthew R. Ochs __func__, rc);
135365be2c79SMatthew R. Ochs rc = -ENODEV;
135465be2c79SMatthew R. Ochs goto out;
135565be2c79SMatthew R. Ochs }
1356fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba);
1357fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len);
135865be2c79SMatthew R. Ochs }
135965be2c79SMatthew R. Ochs
136065be2c79SMatthew R. Ochs if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
136165be2c79SMatthew R. Ochs rctxid = attach->context_id;
136265be2c79SMatthew R. Ochs ctxi = get_context(cfg, rctxid, NULL, 0);
136365be2c79SMatthew R. Ochs if (!ctxi) {
1364fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context rctxid=%016llx\n",
136565be2c79SMatthew R. Ochs __func__, rctxid);
136665be2c79SMatthew R. Ochs rc = -EINVAL;
136765be2c79SMatthew R. Ochs goto out;
136865be2c79SMatthew R. Ochs }
136965be2c79SMatthew R. Ochs
137065be2c79SMatthew R. Ochs list_for_each_entry(lun_access, &ctxi->luns, list)
137165be2c79SMatthew R. Ochs if (lun_access->lli == lli) {
1372fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Already attached\n",
137365be2c79SMatthew R. Ochs __func__);
137465be2c79SMatthew R. Ochs rc = -EINVAL;
137565be2c79SMatthew R. Ochs goto out;
137665be2c79SMatthew R. Ochs }
137765be2c79SMatthew R. Ochs }
137865be2c79SMatthew R. Ochs
137922fe1ae8SMatthew R. Ochs rc = scsi_device_get(sdev);
138022fe1ae8SMatthew R. Ochs if (unlikely(rc)) {
1381fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to get sdev reference\n", __func__);
138222fe1ae8SMatthew R. Ochs goto out;
138322fe1ae8SMatthew R. Ochs }
138422fe1ae8SMatthew R. Ochs
138565be2c79SMatthew R. Ochs lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
138665be2c79SMatthew R. Ochs if (unlikely(!lun_access)) {
1387fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Unable to allocate lun_access\n", __func__);
138865be2c79SMatthew R. Ochs rc = -ENOMEM;
13898a96b52aSMatthew R. Ochs goto err;
139065be2c79SMatthew R. Ochs }
139165be2c79SMatthew R. Ochs
139265be2c79SMatthew R. Ochs lun_access->lli = lli;
139365be2c79SMatthew R. Ochs lun_access->sdev = sdev;
139465be2c79SMatthew R. Ochs
1395888baf06SMatthew R. Ochs /* Non-NULL context indicates reuse (another context reference) */
139665be2c79SMatthew R. Ochs if (ctxi) {
1397fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n",
139865be2c79SMatthew R. Ochs __func__, rctxid);
1399888baf06SMatthew R. Ochs kref_get(&ctxi->kref);
140065be2c79SMatthew R. Ochs list_add(&lun_access->list, &ctxi->luns);
140165be2c79SMatthew R. Ochs goto out_attach;
140265be2c79SMatthew R. Ochs }
140365be2c79SMatthew R. Ochs
14045d1952acSUma Krishnan ctxi = create_context(cfg);
14055d1952acSUma Krishnan if (unlikely(!ctxi)) {
1406fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed to create context ctxid=%d\n",
14075d1952acSUma Krishnan __func__, ctxid);
14089ff87041SChristophe JAILLET rc = -ENOMEM;
14095d1952acSUma Krishnan goto err;
14105d1952acSUma Krishnan }
14115d1952acSUma Krishnan
141225b8e08eSMatthew R. Ochs ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
141321891a45SGeliang Tang if (IS_ERR_OR_NULL(ctx)) {
141465be2c79SMatthew R. Ochs dev_err(dev, "%s: Could not initialize context %p\n",
141565be2c79SMatthew R. Ochs __func__, ctx);
141665be2c79SMatthew R. Ochs rc = -ENODEV;
14178a96b52aSMatthew R. Ochs goto err;
141865be2c79SMatthew R. Ochs }
141965be2c79SMatthew R. Ochs
142025b8e08eSMatthew R. Ochs rc = cfg->ops->start_work(ctx, irqs);
14215d1952acSUma Krishnan if (unlikely(rc)) {
14225d1952acSUma Krishnan dev_dbg(dev, "%s: Could not start context rc=%d\n",
14235d1952acSUma Krishnan __func__, rc);
14245d1952acSUma Krishnan goto err;
14255d1952acSUma Krishnan }
14265d1952acSUma Krishnan
142725b8e08eSMatthew R. Ochs ctxid = cfg->ops->process_element(ctx);
1428e37390beSDan Carpenter if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1429fb67d44dSMatthew R. Ochs dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
143065be2c79SMatthew R. Ochs rc = -EPERM;
14318a96b52aSMatthew R. Ochs goto err;
143265be2c79SMatthew R. Ochs }
143365be2c79SMatthew R. Ochs
143425b8e08eSMatthew R. Ochs file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
143565be2c79SMatthew R. Ochs if (unlikely(fd < 0)) {
143665be2c79SMatthew R. Ochs rc = -ENODEV;
143765be2c79SMatthew R. Ochs dev_err(dev, "%s: Could not get file descriptor\n", __func__);
14388a96b52aSMatthew R. Ochs goto err;
143965be2c79SMatthew R. Ochs }
144065be2c79SMatthew R. Ochs
144165be2c79SMatthew R. Ochs /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
144265be2c79SMatthew R. Ochs perms = SISL_RHT_PERM(attach->hdr.flags + 1);
144365be2c79SMatthew R. Ochs
14445e6632d1SMatthew R. Ochs /* Context mutex is locked upon return */
1445af2047ecSMatthew R. Ochs init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
14465e6632d1SMatthew R. Ochs
144765be2c79SMatthew R. Ochs rc = afu_attach(cfg, ctxi);
144865be2c79SMatthew R. Ochs if (unlikely(rc)) {
144965be2c79SMatthew R. Ochs dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
14508a96b52aSMatthew R. Ochs goto err;
145165be2c79SMatthew R. Ochs }
145265be2c79SMatthew R. Ochs
145365be2c79SMatthew R. Ochs /*
145465be2c79SMatthew R. Ochs * No error paths after this point. Once the fd is installed it's
145565be2c79SMatthew R. Ochs * visible to user space and can't be undone safely on this thread.
145665be2c79SMatthew R. Ochs * There is no need to worry about a deadlock here because no one
145765be2c79SMatthew R. Ochs * knows about us yet; we can be the only one holding our mutex.
145865be2c79SMatthew R. Ochs */
145965be2c79SMatthew R. Ochs list_add(&lun_access->list, &ctxi->luns);
146065be2c79SMatthew R. Ochs mutex_lock(&cfg->ctx_tbl_list_mutex);
146165be2c79SMatthew R. Ochs mutex_lock(&ctxi->mutex);
146265be2c79SMatthew R. Ochs cfg->ctx_tbl[ctxid] = ctxi;
146365be2c79SMatthew R. Ochs mutex_unlock(&cfg->ctx_tbl_list_mutex);
146465be2c79SMatthew R. Ochs fd_install(fd, file);
146565be2c79SMatthew R. Ochs
146665be2c79SMatthew R. Ochs out_attach:
1467cd34af40SMatthew R. Ochs if (fd != -1)
1468696d0b0cSMatthew R. Ochs flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1469696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu))
1470696d0b0cSMatthew R. Ochs flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1471cd34af40SMatthew R. Ochs
1472696d0b0cSMatthew R. Ochs attach->hdr.return_flags = flags;
147365be2c79SMatthew R. Ochs attach->context_id = ctxi->ctxid;
147465be2c79SMatthew R. Ochs attach->block_size = gli->blk_len;
147565be2c79SMatthew R. Ochs attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
147665be2c79SMatthew R. Ochs attach->last_lba = gli->max_lba;
1477471a5a60SManoj Kumar attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
1478471a5a60SManoj Kumar attach->max_xfer /= gli->blk_len;
147965be2c79SMatthew R. Ochs
148065be2c79SMatthew R. Ochs out:
148165be2c79SMatthew R. Ochs attach->adap_fd = fd;
148265be2c79SMatthew R. Ochs
148365be2c79SMatthew R. Ochs if (ctxi)
148465be2c79SMatthew R. Ochs put_context(ctxi);
148565be2c79SMatthew R. Ochs
148665be2c79SMatthew R. Ochs dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
148765be2c79SMatthew R. Ochs __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
148865be2c79SMatthew R. Ochs return rc;
148965be2c79SMatthew R. Ochs
14908a96b52aSMatthew R. Ochs err:
14918a96b52aSMatthew R. Ochs /* Cleanup CXL context; okay to 'stop' even if it was not started */
14928a96b52aSMatthew R. Ochs if (!IS_ERR_OR_NULL(ctx)) {
149325b8e08eSMatthew R. Ochs cfg->ops->stop_context(ctx);
149425b8e08eSMatthew R. Ochs cfg->ops->release_context(ctx);
14958a96b52aSMatthew R. Ochs ctx = NULL;
14968a96b52aSMatthew R. Ochs }
14978a96b52aSMatthew R. Ochs
149865be2c79SMatthew R. Ochs /*
149965be2c79SMatthew R. Ochs * Here, we're overriding the fops with a dummy all-NULL fops because
150065be2c79SMatthew R. Ochs * fput() calls the release fop, which will cause us to mistakenly
150165be2c79SMatthew R. Ochs * call into the CXL code. Rather than try to add yet more complexity
150265be2c79SMatthew R. Ochs * to that routine (cxlflash_cxl_release) we should try to fix the
150365be2c79SMatthew R. Ochs * issue here.
150465be2c79SMatthew R. Ochs */
15058a96b52aSMatthew R. Ochs if (fd > 0) {
150665be2c79SMatthew R. Ochs file->f_op = &null_fops;
150765be2c79SMatthew R. Ochs fput(file);
150865be2c79SMatthew R. Ochs put_unused_fd(fd);
150965be2c79SMatthew R. Ochs fd = -1;
15108a96b52aSMatthew R. Ochs file = NULL;
15118a96b52aSMatthew R. Ochs }
15128a96b52aSMatthew R. Ochs
151341b99e1aSMatthew R. Ochs /* Cleanup our context */
15148a96b52aSMatthew R. Ochs if (ctxi) {
15158a96b52aSMatthew R. Ochs destroy_context(cfg, ctxi);
15168a96b52aSMatthew R. Ochs ctxi = NULL;
15178a96b52aSMatthew R. Ochs }
15188a96b52aSMatthew R. Ochs
151965be2c79SMatthew R. Ochs kfree(lun_access);
152022fe1ae8SMatthew R. Ochs scsi_device_put(sdev);
152165be2c79SMatthew R. Ochs goto out;
152265be2c79SMatthew R. Ochs }
152365be2c79SMatthew R. Ochs
152465be2c79SMatthew R. Ochs /**
152565be2c79SMatthew R. Ochs * recover_context() - recovers a context in error
152665be2c79SMatthew R. Ochs * @cfg: Internal structure associated with the host.
152765be2c79SMatthew R. Ochs * @ctxi: Context to release.
1528de9f0b0cSMatthew R. Ochs * @adap_fd: Adapter file descriptor associated with new/recovered context.
152965be2c79SMatthew R. Ochs *
153065be2c79SMatthew R. Ochs * Restablishes the state for a context-in-error.
153165be2c79SMatthew R. Ochs *
153265be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
153365be2c79SMatthew R. Ochs */
recover_context(struct cxlflash_cfg * cfg,struct ctx_info * ctxi,int * adap_fd)1534de9f0b0cSMatthew R. Ochs static int recover_context(struct cxlflash_cfg *cfg,
1535de9f0b0cSMatthew R. Ochs struct ctx_info *ctxi,
1536de9f0b0cSMatthew R. Ochs int *adap_fd)
153765be2c79SMatthew R. Ochs {
153865be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
153965be2c79SMatthew R. Ochs int rc = 0;
1540cd34af40SMatthew R. Ochs int fd = -1;
154165be2c79SMatthew R. Ochs int ctxid = -1;
154265be2c79SMatthew R. Ochs struct file *file;
1543b070545dSUma Krishnan void *ctx;
154465be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
154565be2c79SMatthew R. Ochs
154625b8e08eSMatthew R. Ochs ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
154721891a45SGeliang Tang if (IS_ERR_OR_NULL(ctx)) {
154865be2c79SMatthew R. Ochs dev_err(dev, "%s: Could not initialize context %p\n",
154965be2c79SMatthew R. Ochs __func__, ctx);
155065be2c79SMatthew R. Ochs rc = -ENODEV;
155165be2c79SMatthew R. Ochs goto out;
155265be2c79SMatthew R. Ochs }
155365be2c79SMatthew R. Ochs
155425b8e08eSMatthew R. Ochs rc = cfg->ops->start_work(ctx, ctxi->irqs);
15555d1952acSUma Krishnan if (unlikely(rc)) {
15565d1952acSUma Krishnan dev_dbg(dev, "%s: Could not start context rc=%d\n",
15575d1952acSUma Krishnan __func__, rc);
15585d1952acSUma Krishnan goto err1;
15595d1952acSUma Krishnan }
15605d1952acSUma Krishnan
156125b8e08eSMatthew R. Ochs ctxid = cfg->ops->process_element(ctx);
1562e37390beSDan Carpenter if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
1563fb67d44dSMatthew R. Ochs dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
156465be2c79SMatthew R. Ochs rc = -EPERM;
15655d1952acSUma Krishnan goto err2;
156665be2c79SMatthew R. Ochs }
156765be2c79SMatthew R. Ochs
156825b8e08eSMatthew R. Ochs file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
156965be2c79SMatthew R. Ochs if (unlikely(fd < 0)) {
157065be2c79SMatthew R. Ochs rc = -ENODEV;
157165be2c79SMatthew R. Ochs dev_err(dev, "%s: Could not get file descriptor\n", __func__);
157265be2c79SMatthew R. Ochs goto err2;
157365be2c79SMatthew R. Ochs }
157465be2c79SMatthew R. Ochs
157565be2c79SMatthew R. Ochs /* Update with new MMIO area based on updated context id */
157665be2c79SMatthew R. Ochs ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
157765be2c79SMatthew R. Ochs
157865be2c79SMatthew R. Ochs rc = afu_attach(cfg, ctxi);
157965be2c79SMatthew R. Ochs if (rc) {
158065be2c79SMatthew R. Ochs dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
158165be2c79SMatthew R. Ochs goto err3;
158265be2c79SMatthew R. Ochs }
158365be2c79SMatthew R. Ochs
158465be2c79SMatthew R. Ochs /*
158565be2c79SMatthew R. Ochs * No error paths after this point. Once the fd is installed it's
158665be2c79SMatthew R. Ochs * visible to user space and can't be undone safely on this thread.
158765be2c79SMatthew R. Ochs */
158865be2c79SMatthew R. Ochs ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
158965be2c79SMatthew R. Ochs ctxi->ctx = ctx;
159065be2c79SMatthew R. Ochs ctxi->file = file;
159165be2c79SMatthew R. Ochs
159265be2c79SMatthew R. Ochs /*
159365be2c79SMatthew R. Ochs * Put context back in table (note the reinit of the context list);
159465be2c79SMatthew R. Ochs * we must first drop the context's mutex and then acquire it in
159565be2c79SMatthew R. Ochs * order with the table/list mutex to avoid a deadlock - safe to do
159665be2c79SMatthew R. Ochs * here because no one can find us at this moment in time.
159765be2c79SMatthew R. Ochs */
159865be2c79SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
159965be2c79SMatthew R. Ochs mutex_lock(&cfg->ctx_tbl_list_mutex);
160065be2c79SMatthew R. Ochs mutex_lock(&ctxi->mutex);
160165be2c79SMatthew R. Ochs list_del_init(&ctxi->list);
160265be2c79SMatthew R. Ochs cfg->ctx_tbl[ctxid] = ctxi;
160365be2c79SMatthew R. Ochs mutex_unlock(&cfg->ctx_tbl_list_mutex);
160465be2c79SMatthew R. Ochs fd_install(fd, file);
1605de9f0b0cSMatthew R. Ochs *adap_fd = fd;
160665be2c79SMatthew R. Ochs out:
160765be2c79SMatthew R. Ochs dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
160865be2c79SMatthew R. Ochs __func__, ctxid, fd, rc);
160965be2c79SMatthew R. Ochs return rc;
161065be2c79SMatthew R. Ochs
161165be2c79SMatthew R. Ochs err3:
161265be2c79SMatthew R. Ochs fput(file);
161365be2c79SMatthew R. Ochs put_unused_fd(fd);
16145d1952acSUma Krishnan err2:
161525b8e08eSMatthew R. Ochs cfg->ops->stop_context(ctx);
161665be2c79SMatthew R. Ochs err1:
161725b8e08eSMatthew R. Ochs cfg->ops->release_context(ctx);
161865be2c79SMatthew R. Ochs goto out;
161965be2c79SMatthew R. Ochs }
162065be2c79SMatthew R. Ochs
162165be2c79SMatthew R. Ochs /**
162265be2c79SMatthew R. Ochs * cxlflash_afu_recover() - initiates AFU recovery
162365be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
162465be2c79SMatthew R. Ochs * @recover: Recover ioctl data structure.
162565be2c79SMatthew R. Ochs *
162665be2c79SMatthew R. Ochs * Only a single recovery is allowed at a time to avoid exhausting CXL
162765be2c79SMatthew R. Ochs * resources (leading to recovery failure) in the event that we're up
162865be2c79SMatthew R. Ochs * against the maximum number of contexts limit. For similar reasons,
162965be2c79SMatthew R. Ochs * a context recovery is retried if there are multiple recoveries taking
163065be2c79SMatthew R. Ochs * place at the same time and the failure was due to CXL services being
163165be2c79SMatthew R. Ochs * unable to keep up.
163265be2c79SMatthew R. Ochs *
1633635f6b08SManoj N. Kumar * As this routine is called on ioctl context, it holds the ioctl r/w
1634635f6b08SManoj N. Kumar * semaphore that is used to drain ioctls in recovery scenarios. The
1635635f6b08SManoj N. Kumar * implementation to achieve the pacing described above (a local mutex)
1636635f6b08SManoj N. Kumar * requires that the ioctl r/w semaphore be dropped and reacquired to
1637635f6b08SManoj N. Kumar * avoid a 3-way deadlock when multiple process recoveries operate in
1638635f6b08SManoj N. Kumar * parallel.
1639635f6b08SManoj N. Kumar *
164065be2c79SMatthew R. Ochs * Because a user can detect an error condition before the kernel, it is
164165be2c79SMatthew R. Ochs * quite possible for this routine to act as the kernel's EEH detection
164265be2c79SMatthew R. Ochs * source (MMIO read of mbox_r). Because of this, there is a window of
164365be2c79SMatthew R. Ochs * time where an EEH might have been detected but not yet 'serviced'
1644439e85c1SMatthew R. Ochs * (callback invoked, causing the device to enter reset state). To avoid
164565be2c79SMatthew R. Ochs * looping in this routine during that window, a 1 second sleep is in place
164665be2c79SMatthew R. Ochs * between the time the MMIO failure is detected and the time a wait on the
1647439e85c1SMatthew R. Ochs * reset wait queue is attempted via check_state().
164865be2c79SMatthew R. Ochs *
164965be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
165065be2c79SMatthew R. Ochs */
cxlflash_afu_recover(struct scsi_device * sdev,struct dk_cxlflash_recover_afu * recover)165165be2c79SMatthew R. Ochs static int cxlflash_afu_recover(struct scsi_device *sdev,
165265be2c79SMatthew R. Ochs struct dk_cxlflash_recover_afu *recover)
165365be2c79SMatthew R. Ochs {
1654fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
165565be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
165665be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
165765be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
165865be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
165965be2c79SMatthew R. Ochs struct mutex *mutex = &cfg->ctx_recovery_mutex;
1660bfc0bab1SUma Krishnan struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1661696d0b0cSMatthew R. Ochs u64 flags;
166265be2c79SMatthew R. Ochs u64 ctxid = DECODE_CTXID(recover->context_id),
166365be2c79SMatthew R. Ochs rctxid = recover->context_id;
166465be2c79SMatthew R. Ochs long reg;
16651a9e3941SMatthew R. Ochs bool locked = true;
166665be2c79SMatthew R. Ochs int lretry = 20; /* up to 2 seconds */
1667de9f0b0cSMatthew R. Ochs int new_adap_fd = -1;
166865be2c79SMatthew R. Ochs int rc = 0;
166965be2c79SMatthew R. Ochs
167065be2c79SMatthew R. Ochs atomic_inc(&cfg->recovery_threads);
1671635f6b08SManoj N. Kumar up_read(&cfg->ioctl_rwsem);
167265be2c79SMatthew R. Ochs rc = mutex_lock_interruptible(mutex);
1673635f6b08SManoj N. Kumar down_read(&cfg->ioctl_rwsem);
16741a9e3941SMatthew R. Ochs if (rc) {
16751a9e3941SMatthew R. Ochs locked = false;
167665be2c79SMatthew R. Ochs goto out;
16771a9e3941SMatthew R. Ochs }
16781a9e3941SMatthew R. Ochs
1679635f6b08SManoj N. Kumar rc = check_state(cfg);
1680635f6b08SManoj N. Kumar if (rc) {
1681fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc);
1682635f6b08SManoj N. Kumar rc = -ENODEV;
1683635f6b08SManoj N. Kumar goto out;
1684635f6b08SManoj N. Kumar }
168565be2c79SMatthew R. Ochs
1686fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n",
168765be2c79SMatthew R. Ochs __func__, recover->reason, rctxid);
168865be2c79SMatthew R. Ochs
168965be2c79SMatthew R. Ochs retry:
169065be2c79SMatthew R. Ochs /* Ensure that this process is attached to the context */
169165be2c79SMatthew R. Ochs ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
169265be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
1693fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
169465be2c79SMatthew R. Ochs rc = -EINVAL;
169565be2c79SMatthew R. Ochs goto out;
169665be2c79SMatthew R. Ochs }
169765be2c79SMatthew R. Ochs
169865be2c79SMatthew R. Ochs if (ctxi->err_recovery_active) {
169965be2c79SMatthew R. Ochs retry_recover:
1700de9f0b0cSMatthew R. Ochs rc = recover_context(cfg, ctxi, &new_adap_fd);
170165be2c79SMatthew R. Ochs if (unlikely(rc)) {
1702fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n",
170365be2c79SMatthew R. Ochs __func__, ctxid, rc);
170465be2c79SMatthew R. Ochs if ((rc == -ENODEV) &&
170565be2c79SMatthew R. Ochs ((atomic_read(&cfg->recovery_threads) > 1) ||
170665be2c79SMatthew R. Ochs (lretry--))) {
1707fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Going to try again\n",
170865be2c79SMatthew R. Ochs __func__);
170965be2c79SMatthew R. Ochs mutex_unlock(mutex);
171065be2c79SMatthew R. Ochs msleep(100);
171165be2c79SMatthew R. Ochs rc = mutex_lock_interruptible(mutex);
17121a9e3941SMatthew R. Ochs if (rc) {
17131a9e3941SMatthew R. Ochs locked = false;
171465be2c79SMatthew R. Ochs goto out;
17151a9e3941SMatthew R. Ochs }
171665be2c79SMatthew R. Ochs goto retry_recover;
171765be2c79SMatthew R. Ochs }
171865be2c79SMatthew R. Ochs
171965be2c79SMatthew R. Ochs goto out;
172065be2c79SMatthew R. Ochs }
172165be2c79SMatthew R. Ochs
172265be2c79SMatthew R. Ochs ctxi->err_recovery_active = false;
1723696d0b0cSMatthew R. Ochs
1724696d0b0cSMatthew R. Ochs flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1725696d0b0cSMatthew R. Ochs DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1726696d0b0cSMatthew R. Ochs if (afu_is_sq_cmd_mode(afu))
1727696d0b0cSMatthew R. Ochs flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE;
1728696d0b0cSMatthew R. Ochs
1729696d0b0cSMatthew R. Ochs recover->hdr.return_flags = flags;
173065be2c79SMatthew R. Ochs recover->context_id = ctxi->ctxid;
1731de9f0b0cSMatthew R. Ochs recover->adap_fd = new_adap_fd;
173265be2c79SMatthew R. Ochs recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
173365be2c79SMatthew R. Ochs goto out;
173465be2c79SMatthew R. Ochs }
173565be2c79SMatthew R. Ochs
173665be2c79SMatthew R. Ochs /* Test if in error state */
1737bfc0bab1SUma Krishnan reg = readq_be(&hwq->ctrl_map->mbox_r);
173865be2c79SMatthew R. Ochs if (reg == -1) {
17390a27ae51SMatthew R. Ochs dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
17400a27ae51SMatthew R. Ochs
17410a27ae51SMatthew R. Ochs /*
17420a27ae51SMatthew R. Ochs * Before checking the state, put back the context obtained with
17430a27ae51SMatthew R. Ochs * get_context() as it is no longer needed and sleep for a short
17440a27ae51SMatthew R. Ochs * period of time (see prolog notes).
17450a27ae51SMatthew R. Ochs */
17460a27ae51SMatthew R. Ochs put_context(ctxi);
174765be2c79SMatthew R. Ochs ctxi = NULL;
174865be2c79SMatthew R. Ochs ssleep(1);
174965be2c79SMatthew R. Ochs rc = check_state(cfg);
175065be2c79SMatthew R. Ochs if (unlikely(rc))
175165be2c79SMatthew R. Ochs goto out;
175265be2c79SMatthew R. Ochs goto retry;
175365be2c79SMatthew R. Ochs }
175465be2c79SMatthew R. Ochs
1755fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__);
175665be2c79SMatthew R. Ochs out:
175765be2c79SMatthew R. Ochs if (likely(ctxi))
175865be2c79SMatthew R. Ochs put_context(ctxi);
17591a9e3941SMatthew R. Ochs if (locked)
176065be2c79SMatthew R. Ochs mutex_unlock(mutex);
176165be2c79SMatthew R. Ochs atomic_dec_if_positive(&cfg->recovery_threads);
176265be2c79SMatthew R. Ochs return rc;
176365be2c79SMatthew R. Ochs }
176465be2c79SMatthew R. Ochs
176565be2c79SMatthew R. Ochs /**
176665be2c79SMatthew R. Ochs * process_sense() - evaluates and processes sense data
176765be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
176865be2c79SMatthew R. Ochs * @verify: Verify ioctl data structure.
176965be2c79SMatthew R. Ochs *
177065be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
177165be2c79SMatthew R. Ochs */
process_sense(struct scsi_device * sdev,struct dk_cxlflash_verify * verify)177265be2c79SMatthew R. Ochs static int process_sense(struct scsi_device *sdev,
177365be2c79SMatthew R. Ochs struct dk_cxlflash_verify *verify)
177465be2c79SMatthew R. Ochs {
1775fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
177665be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
177765be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
177865be2c79SMatthew R. Ochs struct glun_info *gli = lli->parent;
177965be2c79SMatthew R. Ochs u64 prev_lba = gli->max_lba;
178065be2c79SMatthew R. Ochs struct scsi_sense_hdr sshdr = { 0 };
178165be2c79SMatthew R. Ochs int rc = 0;
178265be2c79SMatthew R. Ochs
178365be2c79SMatthew R. Ochs rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
178465be2c79SMatthew R. Ochs DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
178565be2c79SMatthew R. Ochs if (!rc) {
1786fb67d44dSMatthew R. Ochs dev_err(dev, "%s: Failed to normalize sense data\n", __func__);
178765be2c79SMatthew R. Ochs rc = -EINVAL;
178865be2c79SMatthew R. Ochs goto out;
178965be2c79SMatthew R. Ochs }
179065be2c79SMatthew R. Ochs
179165be2c79SMatthew R. Ochs switch (sshdr.sense_key) {
179265be2c79SMatthew R. Ochs case NO_SENSE:
179365be2c79SMatthew R. Ochs case RECOVERED_ERROR:
179465be2c79SMatthew R. Ochs case NOT_READY:
179565be2c79SMatthew R. Ochs break;
179665be2c79SMatthew R. Ochs case UNIT_ATTENTION:
179765be2c79SMatthew R. Ochs switch (sshdr.asc) {
179865be2c79SMatthew R. Ochs case 0x29: /* Power on Reset or Device Reset */
1799df561f66SGustavo A. R. Silva fallthrough;
180065be2c79SMatthew R. Ochs case 0x2A: /* Device settings/capacity changed */
180165be2c79SMatthew R. Ochs rc = read_cap16(sdev, lli);
180265be2c79SMatthew R. Ochs if (rc) {
180365be2c79SMatthew R. Ochs rc = -ENODEV;
180465be2c79SMatthew R. Ochs break;
180565be2c79SMatthew R. Ochs }
180665be2c79SMatthew R. Ochs if (prev_lba != gli->max_lba)
180765be2c79SMatthew R. Ochs dev_dbg(dev, "%s: Capacity changed old=%lld "
180865be2c79SMatthew R. Ochs "new=%lld\n", __func__, prev_lba,
180965be2c79SMatthew R. Ochs gli->max_lba);
181065be2c79SMatthew R. Ochs break;
181165be2c79SMatthew R. Ochs case 0x3F: /* Report LUNs changed, Rescan. */
181265be2c79SMatthew R. Ochs scsi_scan_host(cfg->host);
181365be2c79SMatthew R. Ochs break;
181465be2c79SMatthew R. Ochs default:
181565be2c79SMatthew R. Ochs rc = -EIO;
181665be2c79SMatthew R. Ochs break;
181765be2c79SMatthew R. Ochs }
181865be2c79SMatthew R. Ochs break;
181965be2c79SMatthew R. Ochs default:
182065be2c79SMatthew R. Ochs rc = -EIO;
182165be2c79SMatthew R. Ochs break;
182265be2c79SMatthew R. Ochs }
182365be2c79SMatthew R. Ochs out:
182465be2c79SMatthew R. Ochs dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
182565be2c79SMatthew R. Ochs sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
182665be2c79SMatthew R. Ochs return rc;
182765be2c79SMatthew R. Ochs }
182865be2c79SMatthew R. Ochs
182965be2c79SMatthew R. Ochs /**
183065be2c79SMatthew R. Ochs * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
183165be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
183265be2c79SMatthew R. Ochs * @verify: Verify ioctl data structure.
183365be2c79SMatthew R. Ochs *
183465be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
183565be2c79SMatthew R. Ochs */
cxlflash_disk_verify(struct scsi_device * sdev,struct dk_cxlflash_verify * verify)183665be2c79SMatthew R. Ochs static int cxlflash_disk_verify(struct scsi_device *sdev,
183765be2c79SMatthew R. Ochs struct dk_cxlflash_verify *verify)
183865be2c79SMatthew R. Ochs {
183965be2c79SMatthew R. Ochs int rc = 0;
184065be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
1841fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
184265be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
184365be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
184465be2c79SMatthew R. Ochs struct glun_info *gli = lli->parent;
184565be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte = NULL;
184665be2c79SMatthew R. Ochs res_hndl_t rhndl = verify->rsrc_handle;
184765be2c79SMatthew R. Ochs u64 ctxid = DECODE_CTXID(verify->context_id),
184865be2c79SMatthew R. Ochs rctxid = verify->context_id;
184965be2c79SMatthew R. Ochs u64 last_lba = 0;
185065be2c79SMatthew R. Ochs
1851fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, "
1852fb67d44dSMatthew R. Ochs "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle,
185365be2c79SMatthew R. Ochs verify->hint, verify->hdr.flags);
185465be2c79SMatthew R. Ochs
185565be2c79SMatthew R. Ochs ctxi = get_context(cfg, rctxid, lli, 0);
185665be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
1857fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
185865be2c79SMatthew R. Ochs rc = -EINVAL;
185965be2c79SMatthew R. Ochs goto out;
186065be2c79SMatthew R. Ochs }
186165be2c79SMatthew R. Ochs
186265be2c79SMatthew R. Ochs rhte = get_rhte(ctxi, rhndl, lli);
186365be2c79SMatthew R. Ochs if (unlikely(!rhte)) {
1864fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n",
186565be2c79SMatthew R. Ochs __func__, rhndl);
186665be2c79SMatthew R. Ochs rc = -EINVAL;
186765be2c79SMatthew R. Ochs goto out;
186865be2c79SMatthew R. Ochs }
186965be2c79SMatthew R. Ochs
187065be2c79SMatthew R. Ochs /*
187165be2c79SMatthew R. Ochs * Look at the hint/sense to see if it requires us to redrive
187265be2c79SMatthew R. Ochs * inquiry (i.e. the Unit attention is due to the WWN changing).
187365be2c79SMatthew R. Ochs */
187465be2c79SMatthew R. Ochs if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
18758e782623SMatthew R. Ochs /* Can't hold mutex across process_sense/read_cap16,
18768e782623SMatthew R. Ochs * since we could have an intervening EEH event.
18778e782623SMatthew R. Ochs */
18788e782623SMatthew R. Ochs ctxi->unavail = true;
18798e782623SMatthew R. Ochs mutex_unlock(&ctxi->mutex);
188065be2c79SMatthew R. Ochs rc = process_sense(sdev, verify);
188165be2c79SMatthew R. Ochs if (unlikely(rc)) {
188265be2c79SMatthew R. Ochs dev_err(dev, "%s: Failed to validate sense data (%d)\n",
188365be2c79SMatthew R. Ochs __func__, rc);
18848e782623SMatthew R. Ochs mutex_lock(&ctxi->mutex);
18858e782623SMatthew R. Ochs ctxi->unavail = false;
188665be2c79SMatthew R. Ochs goto out;
188765be2c79SMatthew R. Ochs }
18888e782623SMatthew R. Ochs mutex_lock(&ctxi->mutex);
18898e782623SMatthew R. Ochs ctxi->unavail = false;
189065be2c79SMatthew R. Ochs }
189165be2c79SMatthew R. Ochs
189265be2c79SMatthew R. Ochs switch (gli->mode) {
189365be2c79SMatthew R. Ochs case MODE_PHYSICAL:
189465be2c79SMatthew R. Ochs last_lba = gli->max_lba;
189565be2c79SMatthew R. Ochs break;
18962cb79266SMatthew R. Ochs case MODE_VIRTUAL:
18972cb79266SMatthew R. Ochs /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
18982cb79266SMatthew R. Ochs last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
18992cb79266SMatthew R. Ochs last_lba /= CXLFLASH_BLOCK_SIZE;
19002cb79266SMatthew R. Ochs last_lba--;
19012cb79266SMatthew R. Ochs break;
190265be2c79SMatthew R. Ochs default:
190365be2c79SMatthew R. Ochs WARN(1, "Unsupported LUN mode!");
190465be2c79SMatthew R. Ochs }
190565be2c79SMatthew R. Ochs
190665be2c79SMatthew R. Ochs verify->last_lba = last_lba;
190765be2c79SMatthew R. Ochs
190865be2c79SMatthew R. Ochs out:
190965be2c79SMatthew R. Ochs if (likely(ctxi))
191065be2c79SMatthew R. Ochs put_context(ctxi);
1911fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning rc=%d llba=%llx\n",
191265be2c79SMatthew R. Ochs __func__, rc, verify->last_lba);
191365be2c79SMatthew R. Ochs return rc;
191465be2c79SMatthew R. Ochs }
191565be2c79SMatthew R. Ochs
191665be2c79SMatthew R. Ochs /**
191765be2c79SMatthew R. Ochs * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
191865be2c79SMatthew R. Ochs * @cmd: The ioctl command to decode.
191965be2c79SMatthew R. Ochs *
192065be2c79SMatthew R. Ochs * Return: A string identifying the decoded ioctl.
192165be2c79SMatthew R. Ochs */
decode_ioctl(unsigned int cmd)19226f4e626fSNathan Chancellor static char *decode_ioctl(unsigned int cmd)
192365be2c79SMatthew R. Ochs {
192465be2c79SMatthew R. Ochs switch (cmd) {
192565be2c79SMatthew R. Ochs case DK_CXLFLASH_ATTACH:
192665be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_ATTACH);
192765be2c79SMatthew R. Ochs case DK_CXLFLASH_USER_DIRECT:
192865be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_USER_DIRECT);
19292cb79266SMatthew R. Ochs case DK_CXLFLASH_USER_VIRTUAL:
19302cb79266SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
19312cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_RESIZE:
19322cb79266SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
193365be2c79SMatthew R. Ochs case DK_CXLFLASH_RELEASE:
193465be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_RELEASE);
193565be2c79SMatthew R. Ochs case DK_CXLFLASH_DETACH:
193665be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_DETACH);
193765be2c79SMatthew R. Ochs case DK_CXLFLASH_VERIFY:
193865be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_VERIFY);
19392cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_CLONE:
19402cb79266SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
194165be2c79SMatthew R. Ochs case DK_CXLFLASH_RECOVER_AFU:
194265be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
194365be2c79SMatthew R. Ochs case DK_CXLFLASH_MANAGE_LUN:
194465be2c79SMatthew R. Ochs return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
194565be2c79SMatthew R. Ochs }
194665be2c79SMatthew R. Ochs
194765be2c79SMatthew R. Ochs return "UNKNOWN";
194865be2c79SMatthew R. Ochs }
194965be2c79SMatthew R. Ochs
195065be2c79SMatthew R. Ochs /**
195165be2c79SMatthew R. Ochs * cxlflash_disk_direct_open() - opens a direct (physical) disk
195265be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
195365be2c79SMatthew R. Ochs * @arg: UDirect ioctl data structure.
195465be2c79SMatthew R. Ochs *
195565be2c79SMatthew R. Ochs * On successful return, the user is informed of the resource handle
195665be2c79SMatthew R. Ochs * to be used to identify the direct lun and the size (in blocks) of
195765be2c79SMatthew R. Ochs * the direct lun in last LBA format.
195865be2c79SMatthew R. Ochs *
195965be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
196065be2c79SMatthew R. Ochs */
cxlflash_disk_direct_open(struct scsi_device * sdev,void * arg)196165be2c79SMatthew R. Ochs static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
196265be2c79SMatthew R. Ochs {
1963fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
196465be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
196565be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
196665be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
196765be2c79SMatthew R. Ochs struct glun_info *gli = lli->parent;
1968c2c292f4SUma Krishnan struct dk_cxlflash_release rel = { { 0 }, 0 };
196965be2c79SMatthew R. Ochs
197065be2c79SMatthew R. Ochs struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
197165be2c79SMatthew R. Ochs
197265be2c79SMatthew R. Ochs u64 ctxid = DECODE_CTXID(pphys->context_id),
197365be2c79SMatthew R. Ochs rctxid = pphys->context_id;
197465be2c79SMatthew R. Ochs u64 lun_size = 0;
197565be2c79SMatthew R. Ochs u64 last_lba = 0;
197665be2c79SMatthew R. Ochs u64 rsrc_handle = -1;
19778fa4f177SMatthew R. Ochs u32 port = CHAN2PORTMASK(sdev->channel);
197865be2c79SMatthew R. Ochs
197965be2c79SMatthew R. Ochs int rc = 0;
198065be2c79SMatthew R. Ochs
198165be2c79SMatthew R. Ochs struct ctx_info *ctxi = NULL;
198265be2c79SMatthew R. Ochs struct sisl_rht_entry *rhte = NULL;
198365be2c79SMatthew R. Ochs
1984fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size);
198565be2c79SMatthew R. Ochs
198665be2c79SMatthew R. Ochs rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
198765be2c79SMatthew R. Ochs if (unlikely(rc)) {
1988fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__);
198965be2c79SMatthew R. Ochs goto out;
199065be2c79SMatthew R. Ochs }
199165be2c79SMatthew R. Ochs
199265be2c79SMatthew R. Ochs ctxi = get_context(cfg, rctxid, lli, 0);
199365be2c79SMatthew R. Ochs if (unlikely(!ctxi)) {
1994fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid);
199565be2c79SMatthew R. Ochs rc = -EINVAL;
199665be2c79SMatthew R. Ochs goto err1;
199765be2c79SMatthew R. Ochs }
199865be2c79SMatthew R. Ochs
199965be2c79SMatthew R. Ochs rhte = rhte_checkout(ctxi, lli);
200065be2c79SMatthew R. Ochs if (unlikely(!rhte)) {
2001fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Too many opens ctxid=%lld\n",
2002fb67d44dSMatthew R. Ochs __func__, ctxid);
200365be2c79SMatthew R. Ochs rc = -EMFILE; /* too many opens */
200465be2c79SMatthew R. Ochs goto err1;
200565be2c79SMatthew R. Ochs }
200665be2c79SMatthew R. Ochs
200765be2c79SMatthew R. Ochs rsrc_handle = (rhte - ctxi->rht_start);
200865be2c79SMatthew R. Ochs
200965be2c79SMatthew R. Ochs rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
201065be2c79SMatthew R. Ochs
201165be2c79SMatthew R. Ochs last_lba = gli->max_lba;
201265be2c79SMatthew R. Ochs pphys->hdr.return_flags = 0;
201365be2c79SMatthew R. Ochs pphys->last_lba = last_lba;
201465be2c79SMatthew R. Ochs pphys->rsrc_handle = rsrc_handle;
201565be2c79SMatthew R. Ochs
2016c2c292f4SUma Krishnan rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
2017c2c292f4SUma Krishnan if (unlikely(rc)) {
2018c2c292f4SUma Krishnan dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
2019c2c292f4SUma Krishnan goto err2;
2020c2c292f4SUma Krishnan }
2021c2c292f4SUma Krishnan
202265be2c79SMatthew R. Ochs out:
202365be2c79SMatthew R. Ochs if (likely(ctxi))
202465be2c79SMatthew R. Ochs put_context(ctxi);
2025fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n",
202665be2c79SMatthew R. Ochs __func__, rsrc_handle, rc, last_lba);
202765be2c79SMatthew R. Ochs return rc;
202865be2c79SMatthew R. Ochs
2029c2c292f4SUma Krishnan err2:
2030c2c292f4SUma Krishnan marshal_udir_to_rele(pphys, &rel);
2031c2c292f4SUma Krishnan _cxlflash_disk_release(sdev, ctxi, &rel);
2032c2c292f4SUma Krishnan goto out;
203365be2c79SMatthew R. Ochs err1:
203465be2c79SMatthew R. Ochs cxlflash_lun_detach(gli);
203565be2c79SMatthew R. Ochs goto out;
203665be2c79SMatthew R. Ochs }
203765be2c79SMatthew R. Ochs
203865be2c79SMatthew R. Ochs /**
203965be2c79SMatthew R. Ochs * ioctl_common() - common IOCTL handler for driver
204065be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
204165be2c79SMatthew R. Ochs * @cmd: IOCTL command.
204265be2c79SMatthew R. Ochs *
204365be2c79SMatthew R. Ochs * Handles common fencing operations that are valid for multiple ioctls. Always
204465be2c79SMatthew R. Ochs * allow through ioctls that are cleanup oriented in nature, even when operating
204565be2c79SMatthew R. Ochs * in a failed/terminating state.
204665be2c79SMatthew R. Ochs *
204765be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
204865be2c79SMatthew R. Ochs */
ioctl_common(struct scsi_device * sdev,unsigned int cmd)20496f4e626fSNathan Chancellor static int ioctl_common(struct scsi_device *sdev, unsigned int cmd)
205065be2c79SMatthew R. Ochs {
2051fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
205265be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
205365be2c79SMatthew R. Ochs struct llun_info *lli = sdev->hostdata;
205465be2c79SMatthew R. Ochs int rc = 0;
205565be2c79SMatthew R. Ochs
205665be2c79SMatthew R. Ochs if (unlikely(!lli)) {
205765be2c79SMatthew R. Ochs dev_dbg(dev, "%s: Unknown LUN\n", __func__);
205865be2c79SMatthew R. Ochs rc = -EINVAL;
205965be2c79SMatthew R. Ochs goto out;
206065be2c79SMatthew R. Ochs }
206165be2c79SMatthew R. Ochs
206265be2c79SMatthew R. Ochs rc = check_state(cfg);
206365be2c79SMatthew R. Ochs if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
206465be2c79SMatthew R. Ochs switch (cmd) {
20652cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_RESIZE:
206665be2c79SMatthew R. Ochs case DK_CXLFLASH_RELEASE:
206765be2c79SMatthew R. Ochs case DK_CXLFLASH_DETACH:
2068fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Command override rc=%d\n",
206965be2c79SMatthew R. Ochs __func__, rc);
207065be2c79SMatthew R. Ochs rc = 0;
207165be2c79SMatthew R. Ochs break;
207265be2c79SMatthew R. Ochs }
207365be2c79SMatthew R. Ochs }
207465be2c79SMatthew R. Ochs out:
207565be2c79SMatthew R. Ochs return rc;
207665be2c79SMatthew R. Ochs }
207765be2c79SMatthew R. Ochs
207865be2c79SMatthew R. Ochs /**
207965be2c79SMatthew R. Ochs * cxlflash_ioctl() - IOCTL handler for driver
208065be2c79SMatthew R. Ochs * @sdev: SCSI device associated with LUN.
208165be2c79SMatthew R. Ochs * @cmd: IOCTL command.
208265be2c79SMatthew R. Ochs * @arg: Userspace ioctl data structure.
208365be2c79SMatthew R. Ochs *
20840a27ae51SMatthew R. Ochs * A read/write semaphore is used to implement a 'drain' of currently
20850a27ae51SMatthew R. Ochs * running ioctls. The read semaphore is taken at the beginning of each
20860a27ae51SMatthew R. Ochs * ioctl thread and released upon concluding execution. Additionally the
20870a27ae51SMatthew R. Ochs * semaphore should be released and then reacquired in any ioctl execution
20880a27ae51SMatthew R. Ochs * path which will wait for an event to occur that is outside the scope of
20890a27ae51SMatthew R. Ochs * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
20900a27ae51SMatthew R. Ochs * a thread simply needs to acquire the write semaphore.
20910a27ae51SMatthew R. Ochs *
209265be2c79SMatthew R. Ochs * Return: 0 on success, -errno on failure
209365be2c79SMatthew R. Ochs */
cxlflash_ioctl(struct scsi_device * sdev,unsigned int cmd,void __user * arg)20946f4e626fSNathan Chancellor int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
209565be2c79SMatthew R. Ochs {
209665be2c79SMatthew R. Ochs typedef int (*sioctl) (struct scsi_device *, void *);
209765be2c79SMatthew R. Ochs
2098fb67d44dSMatthew R. Ochs struct cxlflash_cfg *cfg = shost_priv(sdev->host);
209965be2c79SMatthew R. Ochs struct device *dev = &cfg->dev->dev;
210065be2c79SMatthew R. Ochs struct afu *afu = cfg->afu;
210165be2c79SMatthew R. Ochs struct dk_cxlflash_hdr *hdr;
210265be2c79SMatthew R. Ochs char buf[sizeof(union cxlflash_ioctls)];
210365be2c79SMatthew R. Ochs size_t size = 0;
210465be2c79SMatthew R. Ochs bool known_ioctl = false;
210565be2c79SMatthew R. Ochs int idx;
210665be2c79SMatthew R. Ochs int rc = 0;
210765be2c79SMatthew R. Ochs struct Scsi_Host *shost = sdev->host;
210865be2c79SMatthew R. Ochs sioctl do_ioctl = NULL;
210965be2c79SMatthew R. Ochs
211065be2c79SMatthew R. Ochs static const struct {
211165be2c79SMatthew R. Ochs size_t size;
211265be2c79SMatthew R. Ochs sioctl ioctl;
211365be2c79SMatthew R. Ochs } ioctl_tbl[] = { /* NOTE: order matters here */
211465be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
211565be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
211665be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
211765be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
211865be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
211965be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
212065be2c79SMatthew R. Ochs {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
21212cb79266SMatthew R. Ochs {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
21222cb79266SMatthew R. Ochs {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
21232cb79266SMatthew R. Ochs {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
212465be2c79SMatthew R. Ochs };
212565be2c79SMatthew R. Ochs
21260a27ae51SMatthew R. Ochs /* Hold read semaphore so we can drain if needed */
21270a27ae51SMatthew R. Ochs down_read(&cfg->ioctl_rwsem);
21280a27ae51SMatthew R. Ochs
212965be2c79SMatthew R. Ochs /* Restrict command set to physical support only for internal LUN */
213065be2c79SMatthew R. Ochs if (afu->internal_lun)
213165be2c79SMatthew R. Ochs switch (cmd) {
213265be2c79SMatthew R. Ochs case DK_CXLFLASH_RELEASE:
21332cb79266SMatthew R. Ochs case DK_CXLFLASH_USER_VIRTUAL:
21342cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_RESIZE:
21352cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_CLONE:
213665be2c79SMatthew R. Ochs dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
213765be2c79SMatthew R. Ochs __func__, decode_ioctl(cmd), afu->internal_lun);
213865be2c79SMatthew R. Ochs rc = -EINVAL;
213965be2c79SMatthew R. Ochs goto cxlflash_ioctl_exit;
214065be2c79SMatthew R. Ochs }
214165be2c79SMatthew R. Ochs
214265be2c79SMatthew R. Ochs switch (cmd) {
214365be2c79SMatthew R. Ochs case DK_CXLFLASH_ATTACH:
214465be2c79SMatthew R. Ochs case DK_CXLFLASH_USER_DIRECT:
214565be2c79SMatthew R. Ochs case DK_CXLFLASH_RELEASE:
214665be2c79SMatthew R. Ochs case DK_CXLFLASH_DETACH:
214765be2c79SMatthew R. Ochs case DK_CXLFLASH_VERIFY:
214865be2c79SMatthew R. Ochs case DK_CXLFLASH_RECOVER_AFU:
21492cb79266SMatthew R. Ochs case DK_CXLFLASH_USER_VIRTUAL:
21502cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_RESIZE:
21512cb79266SMatthew R. Ochs case DK_CXLFLASH_VLUN_CLONE:
215265be2c79SMatthew R. Ochs dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
215365be2c79SMatthew R. Ochs __func__, decode_ioctl(cmd), cmd, shost->host_no,
215465be2c79SMatthew R. Ochs sdev->channel, sdev->id, sdev->lun);
215565be2c79SMatthew R. Ochs rc = ioctl_common(sdev, cmd);
215665be2c79SMatthew R. Ochs if (unlikely(rc))
215765be2c79SMatthew R. Ochs goto cxlflash_ioctl_exit;
215865be2c79SMatthew R. Ochs
2159df561f66SGustavo A. R. Silva fallthrough;
216065be2c79SMatthew R. Ochs
216165be2c79SMatthew R. Ochs case DK_CXLFLASH_MANAGE_LUN:
216265be2c79SMatthew R. Ochs known_ioctl = true;
216365be2c79SMatthew R. Ochs idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
216465be2c79SMatthew R. Ochs size = ioctl_tbl[idx].size;
216565be2c79SMatthew R. Ochs do_ioctl = ioctl_tbl[idx].ioctl;
216665be2c79SMatthew R. Ochs
216765be2c79SMatthew R. Ochs if (likely(do_ioctl))
216865be2c79SMatthew R. Ochs break;
216965be2c79SMatthew R. Ochs
2170df561f66SGustavo A. R. Silva fallthrough;
217165be2c79SMatthew R. Ochs default:
217265be2c79SMatthew R. Ochs rc = -EINVAL;
217365be2c79SMatthew R. Ochs goto cxlflash_ioctl_exit;
217465be2c79SMatthew R. Ochs }
217565be2c79SMatthew R. Ochs
217665be2c79SMatthew R. Ochs if (unlikely(copy_from_user(&buf, arg, size))) {
21776f4e626fSNathan Chancellor dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n",
217865be2c79SMatthew R. Ochs __func__, size, cmd, decode_ioctl(cmd), arg);
217965be2c79SMatthew R. Ochs rc = -EFAULT;
218065be2c79SMatthew R. Ochs goto cxlflash_ioctl_exit;
218165be2c79SMatthew R. Ochs }
218265be2c79SMatthew R. Ochs
218365be2c79SMatthew R. Ochs hdr = (struct dk_cxlflash_hdr *)&buf;
218465be2c79SMatthew R. Ochs if (hdr->version != DK_CXLFLASH_VERSION_0) {
218565be2c79SMatthew R. Ochs dev_dbg(dev, "%s: Version %u not supported for %s\n",
218665be2c79SMatthew R. Ochs __func__, hdr->version, decode_ioctl(cmd));
218765be2c79SMatthew R. Ochs rc = -EINVAL;
218865be2c79SMatthew R. Ochs goto cxlflash_ioctl_exit;
218965be2c79SMatthew R. Ochs }
219065be2c79SMatthew R. Ochs
219165be2c79SMatthew R. Ochs if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2192fb67d44dSMatthew R. Ochs dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
219365be2c79SMatthew R. Ochs rc = -EINVAL;
219465be2c79SMatthew R. Ochs goto cxlflash_ioctl_exit;
219565be2c79SMatthew R. Ochs }
219665be2c79SMatthew R. Ochs
219765be2c79SMatthew R. Ochs rc = do_ioctl(sdev, (void *)&buf);
219865be2c79SMatthew R. Ochs if (likely(!rc))
219965be2c79SMatthew R. Ochs if (unlikely(copy_to_user(arg, &buf, size))) {
22006f4e626fSNathan Chancellor dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n",
220165be2c79SMatthew R. Ochs __func__, size, cmd, decode_ioctl(cmd), arg);
220265be2c79SMatthew R. Ochs rc = -EFAULT;
220365be2c79SMatthew R. Ochs }
220465be2c79SMatthew R. Ochs
220565be2c79SMatthew R. Ochs /* fall through to exit */
220665be2c79SMatthew R. Ochs
220765be2c79SMatthew R. Ochs cxlflash_ioctl_exit:
22080a27ae51SMatthew R. Ochs up_read(&cfg->ioctl_rwsem);
220965be2c79SMatthew R. Ochs if (unlikely(rc && known_ioctl))
221065be2c79SMatthew R. Ochs dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
221165be2c79SMatthew R. Ochs "returned rc %d\n", __func__,
221265be2c79SMatthew R. Ochs decode_ioctl(cmd), cmd, shost->host_no,
221365be2c79SMatthew R. Ochs sdev->channel, sdev->id, sdev->lun, rc);
221465be2c79SMatthew R. Ochs else
221565be2c79SMatthew R. Ochs dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
221665be2c79SMatthew R. Ochs "returned rc %d\n", __func__, decode_ioctl(cmd),
221765be2c79SMatthew R. Ochs cmd, shost->host_no, sdev->channel, sdev->id,
221865be2c79SMatthew R. Ochs sdev->lun, rc);
221965be2c79SMatthew R. Ochs return rc;
222065be2c79SMatthew R. Ochs }
2221