1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
300bab910SSwen Schillig * zfcp device driver
41da177e4SLinus Torvalds *
500bab910SSwen Schillig * Setup and helper functions to access QDIO.
61da177e4SLinus Torvalds *
7978857c7SBenjamin Block * Copyright IBM Corp. 2002, 2020
81da177e4SLinus Torvalds */
91da177e4SLinus Torvalds
10ecf39d42SChristof Schmitt #define KMSG_COMPONENT "zfcp"
11ecf39d42SChristof Schmitt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12ecf39d42SChristof Schmitt
13efd32176SJulian Wiedmann #include <linux/lockdep.h>
145a0e3ad6STejun Heo #include <linux/slab.h>
153a4c5d59SHeiko Carstens #include <linux/module.h>
161da177e4SLinus Torvalds #include "zfcp_ext.h"
1734c2b712SChristof Schmitt #include "zfcp_qdio.h"
181da177e4SLinus Torvalds
195156934bSBenjamin Block static bool enable_multibuffer = true;
2086a9668aSSwen Schillig module_param_named(datarouter, enable_multibuffer, bool, 0400);
21909ee499SSteffen Maier MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
2286a9668aSSwen Schillig
23b3f0a1eeSJulian Wiedmann #define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10)
24b3f0a1eeSJulian Wiedmann #define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC
25b3f0a1eeSJulian Wiedmann
zfcp_qdio_handler_error(struct zfcp_qdio * qdio,char * dbftag,unsigned int qdio_err)26208d0961SSteffen Maier static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
27339f4f4eSChristof Schmitt unsigned int qdio_err)
2800bab910SSwen Schillig {
29564e1c86SSwen Schillig struct zfcp_adapter *adapter = qdio->adapter;
30564e1c86SSwen Schillig
31ff3b24faSChristof Schmitt dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
3200bab910SSwen Schillig
3386a9668aSSwen Schillig if (qdio_err & QDIO_ERROR_SLSB_STATE) {
34339f4f4eSChristof Schmitt zfcp_qdio_siosl(adapter);
35208d0961SSteffen Maier zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
3686a9668aSSwen Schillig return;
3786a9668aSSwen Schillig }
3800bab910SSwen Schillig zfcp_erp_adapter_reopen(adapter,
3900bab910SSwen Schillig ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
40208d0961SSteffen Maier ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
4100bab910SSwen Schillig }
4200bab910SSwen Schillig
zfcp_qdio_zero_sbals(struct qdio_buffer * sbal[],int first,int cnt)435d4e2262SChristof Schmitt static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
445d4e2262SChristof Schmitt {
455d4e2262SChristof Schmitt int i, sbal_idx;
465d4e2262SChristof Schmitt
475d4e2262SChristof Schmitt for (i = first; i < first + cnt; i++) {
485d4e2262SChristof Schmitt sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
495d4e2262SChristof Schmitt memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
505d4e2262SChristof Schmitt }
515d4e2262SChristof Schmitt }
525d4e2262SChristof Schmitt
5394506fd1SMartin Peschke /* this needs to be called prior to updating the queue fill level */
zfcp_qdio_account(struct zfcp_qdio * qdio)5441e05a12SHeiko Carstens static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
5594506fd1SMartin Peschke {
5641e05a12SHeiko Carstens unsigned long long now, span;
57706eca49SSwen Schillig int used;
5894506fd1SMartin Peschke
591aae0560SHeiko Carstens now = get_tod_clock_monotonic();
6041e05a12SHeiko Carstens span = (now - qdio->req_q_time) >> 12;
61706eca49SSwen Schillig used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
62564e1c86SSwen Schillig qdio->req_q_util += used * span;
63564e1c86SSwen Schillig qdio->req_q_time = now;
6494506fd1SMartin Peschke }
6594506fd1SMartin Peschke
zfcp_qdio_int_req(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)66779e6e1cSJan Glauber static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
67706eca49SSwen Schillig int queue_no, int idx, int count,
6800bab910SSwen Schillig unsigned long parm)
6900bab910SSwen Schillig {
70564e1c86SSwen Schillig struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
7100bab910SSwen Schillig
72339f4f4eSChristof Schmitt zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
73b3f0a1eeSJulian Wiedmann }
7400bab910SSwen Schillig
zfcp_qdio_request_tasklet(struct tasklet_struct * tasklet)75b3f0a1eeSJulian Wiedmann static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet)
76b3f0a1eeSJulian Wiedmann {
77b3f0a1eeSJulian Wiedmann struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet);
78b3f0a1eeSJulian Wiedmann struct ccw_device *cdev = qdio->adapter->ccw_device;
79b3f0a1eeSJulian Wiedmann unsigned int start, error;
80b3f0a1eeSJulian Wiedmann int completed;
81b3f0a1eeSJulian Wiedmann
82b44995e5SJulian Wiedmann completed = qdio_inspect_output_queue(cdev, 0, &start, &error);
83b3f0a1eeSJulian Wiedmann if (completed > 0) {
84b3f0a1eeSJulian Wiedmann if (error) {
85b3f0a1eeSJulian Wiedmann zfcp_qdio_handler_error(qdio, "qdreqt1", error);
86b3f0a1eeSJulian Wiedmann } else {
8700bab910SSwen Schillig /* cleanup all SBALs being program-owned now */
88b3f0a1eeSJulian Wiedmann zfcp_qdio_zero_sbals(qdio->req_q, start, completed);
8900bab910SSwen Schillig
9044a24cb3SChristof Schmitt spin_lock_irq(&qdio->stat_lock);
91564e1c86SSwen Schillig zfcp_qdio_account(qdio);
9244a24cb3SChristof Schmitt spin_unlock_irq(&qdio->stat_lock);
93b3f0a1eeSJulian Wiedmann atomic_add(completed, &qdio->req_q_free);
94564e1c86SSwen Schillig wake_up(&qdio->req_q_wq);
9500bab910SSwen Schillig }
96b3f0a1eeSJulian Wiedmann }
97b3f0a1eeSJulian Wiedmann
98b3f0a1eeSJulian Wiedmann if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
99b3f0a1eeSJulian Wiedmann timer_reduce(&qdio->request_timer,
100b3f0a1eeSJulian Wiedmann jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS));
101b3f0a1eeSJulian Wiedmann }
102b3f0a1eeSJulian Wiedmann
zfcp_qdio_request_timer(struct timer_list * timer)103b3f0a1eeSJulian Wiedmann static void zfcp_qdio_request_timer(struct timer_list *timer)
104b3f0a1eeSJulian Wiedmann {
105b3f0a1eeSJulian Wiedmann struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer);
106b3f0a1eeSJulian Wiedmann
107b3f0a1eeSJulian Wiedmann tasklet_schedule(&qdio->request_tasklet);
108b3f0a1eeSJulian Wiedmann }
10900bab910SSwen Schillig
zfcp_qdio_int_resp(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)110779e6e1cSJan Glauber static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
111706eca49SSwen Schillig int queue_no, int idx, int count,
11200bab910SSwen Schillig unsigned long parm)
11300bab910SSwen Schillig {
114564e1c86SSwen Schillig struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
11586a9668aSSwen Schillig struct zfcp_adapter *adapter = qdio->adapter;
11686a9668aSSwen Schillig int sbal_no, sbal_idx;
11701e60527SSteffen Maier
11801e60527SSteffen Maier if (unlikely(qdio_err)) {
11901e60527SSteffen Maier if (zfcp_adapter_multi_buffer_active(adapter)) {
12086a9668aSSwen Schillig void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
12101e60527SSteffen Maier struct qdio_buffer_element *sbale;
12286a9668aSSwen Schillig u64 req_id;
12386a9668aSSwen Schillig u8 scount;
12400bab910SSwen Schillig
12501e60527SSteffen Maier memset(pl, 0,
12601e60527SSteffen Maier ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
12786a9668aSSwen Schillig sbale = qdio->res_q[idx]->element;
1282db01da8SJulian Wiedmann req_id = sbale->addr;
12901e60527SSteffen Maier scount = min(sbale->scount + 1,
13001e60527SSteffen Maier ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
13101e60527SSteffen Maier /* incl. signaling SBAL */
13286a9668aSSwen Schillig
13386a9668aSSwen Schillig for (sbal_no = 0; sbal_no < scount; sbal_no++) {
13486a9668aSSwen Schillig sbal_idx = (idx + sbal_no) %
13586a9668aSSwen Schillig QDIO_MAX_BUFFERS_PER_Q;
13686a9668aSSwen Schillig pl[sbal_no] = qdio->res_q[sbal_idx];
13786a9668aSSwen Schillig }
13886a9668aSSwen Schillig zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
13986a9668aSSwen Schillig }
140339f4f4eSChristof Schmitt zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
14100bab910SSwen Schillig return;
14200bab910SSwen Schillig }
14300bab910SSwen Schillig
1441da177e4SLinus Torvalds /*
1451da177e4SLinus Torvalds * go through all SBALs from input queue currently
1461da177e4SLinus Torvalds * returned by QDIO layer
1471da177e4SLinus Torvalds */
14800bab910SSwen Schillig for (sbal_no = 0; sbal_no < count; sbal_no++) {
149706eca49SSwen Schillig sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
1501da177e4SLinus Torvalds /* go through all SBALEs of SBAL */
151564e1c86SSwen Schillig zfcp_fsf_reqid_check(qdio, sbal_idx);
1521da177e4SLinus Torvalds }
1531da177e4SLinus Torvalds
1541da177e4SLinus Torvalds /*
155706eca49SSwen Schillig * put SBALs back to response queue
1561da177e4SLinus Torvalds */
157*a60bffe5SJulian Wiedmann if (qdio_add_bufs_to_input_queue(cdev, 0, idx, count))
158ea4a3a6aSSwen Schillig zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
1591da177e4SLinus Torvalds }
1601da177e4SLinus Torvalds
zfcp_qdio_irq_tasklet(struct tasklet_struct * tasklet)1610b524abcSJulian Wiedmann static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet)
1620b524abcSJulian Wiedmann {
1630b524abcSJulian Wiedmann struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet);
1640b524abcSJulian Wiedmann struct ccw_device *cdev = qdio->adapter->ccw_device;
1650b524abcSJulian Wiedmann unsigned int start, error;
1660b524abcSJulian Wiedmann int completed;
1670b524abcSJulian Wiedmann
168b3f0a1eeSJulian Wiedmann if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q)
169b3f0a1eeSJulian Wiedmann tasklet_schedule(&qdio->request_tasklet);
170b3f0a1eeSJulian Wiedmann
171b3f0a1eeSJulian Wiedmann /* Check the Response Queue: */
172b44995e5SJulian Wiedmann completed = qdio_inspect_input_queue(cdev, 0, &start, &error);
1730b524abcSJulian Wiedmann if (completed < 0)
1740b524abcSJulian Wiedmann return;
1750b524abcSJulian Wiedmann if (completed > 0)
1760b524abcSJulian Wiedmann zfcp_qdio_int_resp(cdev, error, 0, start, completed,
1770b524abcSJulian Wiedmann (unsigned long) qdio);
1780b524abcSJulian Wiedmann
1790b524abcSJulian Wiedmann if (qdio_start_irq(cdev))
1800b524abcSJulian Wiedmann /* More work pending: */
1810b524abcSJulian Wiedmann tasklet_schedule(&qdio->irq_tasklet);
1820b524abcSJulian Wiedmann }
1830b524abcSJulian Wiedmann
zfcp_qdio_poll(struct ccw_device * cdev,unsigned long data)1840b524abcSJulian Wiedmann static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data)
1850b524abcSJulian Wiedmann {
1860b524abcSJulian Wiedmann struct zfcp_qdio *qdio = (struct zfcp_qdio *) data;
1870b524abcSJulian Wiedmann
1880b524abcSJulian Wiedmann tasklet_schedule(&qdio->irq_tasklet);
1890b524abcSJulian Wiedmann }
1900b524abcSJulian Wiedmann
19144cc76f2SSwen Schillig static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)1921674b405SChristof Schmitt zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
1931da177e4SLinus Torvalds {
19444cc76f2SSwen Schillig struct qdio_buffer_element *sbale;
1951da177e4SLinus Torvalds
1961da177e4SLinus Torvalds /* set last entry flag in current SBALE of current SBAL */
197564e1c86SSwen Schillig sbale = zfcp_qdio_sbale_curr(qdio, q_req);
1983ec90878SJan Glauber sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
1991da177e4SLinus Torvalds
2001da177e4SLinus Torvalds /* don't exceed last allowed SBAL */
20142428f74SSwen Schillig if (q_req->sbal_last == q_req->sbal_limit)
2021da177e4SLinus Torvalds return NULL;
2031da177e4SLinus Torvalds
2041da177e4SLinus Torvalds /* set chaining flag in first SBALE of current SBAL */
205564e1c86SSwen Schillig sbale = zfcp_qdio_sbale_req(qdio, q_req);
2063ec90878SJan Glauber sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
2071da177e4SLinus Torvalds
2081da177e4SLinus Torvalds /* calculate index of next SBAL */
20942428f74SSwen Schillig q_req->sbal_last++;
21042428f74SSwen Schillig q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
2111da177e4SLinus Torvalds
2121da177e4SLinus Torvalds /* keep this requests number of SBALs up-to-date */
21342428f74SSwen Schillig q_req->sbal_number++;
21401b04759SSwen Schillig BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
2151da177e4SLinus Torvalds
2161da177e4SLinus Torvalds /* start at first SBALE of new SBAL */
21742428f74SSwen Schillig q_req->sbale_curr = 0;
2181da177e4SLinus Torvalds
2191da177e4SLinus Torvalds /* set storage-block type for new SBAL */
220564e1c86SSwen Schillig sbale = zfcp_qdio_sbale_curr(qdio, q_req);
2213ec90878SJan Glauber sbale->sflags |= q_req->sbtype;
2221da177e4SLinus Torvalds
2231da177e4SLinus Torvalds return sbale;
2241da177e4SLinus Torvalds }
2251da177e4SLinus Torvalds
22644cc76f2SSwen Schillig static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)2271674b405SChristof Schmitt zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
2281da177e4SLinus Torvalds {
22986a9668aSSwen Schillig if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
2301674b405SChristof Schmitt return zfcp_qdio_sbal_chain(qdio, q_req);
23142428f74SSwen Schillig q_req->sbale_curr++;
232564e1c86SSwen Schillig return zfcp_qdio_sbale_curr(qdio, q_req);
2331da177e4SLinus Torvalds }
2341da177e4SLinus Torvalds
2351da177e4SLinus Torvalds /**
2361da177e4SLinus Torvalds * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
2371674b405SChristof Schmitt * @qdio: pointer to struct zfcp_qdio
2381674b405SChristof Schmitt * @q_req: pointer to struct zfcp_qdio_req
2391da177e4SLinus Torvalds * @sg: scatter-gather list
24086a9668aSSwen Schillig * Returns: zero or -EINVAL on error
2411da177e4SLinus Torvalds */
zfcp_qdio_sbals_from_sg(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg)24234c2b712SChristof Schmitt int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
24301b04759SSwen Schillig struct scatterlist *sg)
2441da177e4SLinus Torvalds {
24544cc76f2SSwen Schillig struct qdio_buffer_element *sbale;
2461da177e4SLinus Torvalds
24700bab910SSwen Schillig /* set storage-block type for this request */
248564e1c86SSwen Schillig sbale = zfcp_qdio_sbale_req(qdio, q_req);
2493ec90878SJan Glauber sbale->sflags |= q_req->sbtype;
2501da177e4SLinus Torvalds
25100bab910SSwen Schillig for (; sg; sg = sg_next(sg)) {
2521674b405SChristof Schmitt sbale = zfcp_qdio_sbale_next(qdio, q_req);
25368322984SChristof Schmitt if (!sbale) {
25468322984SChristof Schmitt atomic_inc(&qdio->req_q_full);
255706eca49SSwen Schillig zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
256706eca49SSwen Schillig q_req->sbal_number);
25768322984SChristof Schmitt return -EINVAL;
25868322984SChristof Schmitt }
2592db01da8SJulian Wiedmann sbale->addr = sg_phys(sg);
26068322984SChristof Schmitt sbale->length = sg->length;
2611da177e4SLinus Torvalds }
26286a9668aSSwen Schillig return 0;
2631da177e4SLinus Torvalds }
2641da177e4SLinus Torvalds
zfcp_qdio_sbal_check(struct zfcp_qdio * qdio)2656b9e1520SChristof Schmitt static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
2666b9e1520SChristof Schmitt {
267706eca49SSwen Schillig if (atomic_read(&qdio->req_q_free) ||
268c2af7545SChristof Schmitt !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
2696b9e1520SChristof Schmitt return 1;
2706b9e1520SChristof Schmitt return 0;
2716b9e1520SChristof Schmitt }
2726b9e1520SChristof Schmitt
2736b9e1520SChristof Schmitt /**
2746b9e1520SChristof Schmitt * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
2756b9e1520SChristof Schmitt * @qdio: pointer to struct zfcp_qdio
2766b9e1520SChristof Schmitt *
2776b9e1520SChristof Schmitt * The req_q_lock must be held by the caller of this function, and
2786b9e1520SChristof Schmitt * this function may only be called from process context; it will
2796b9e1520SChristof Schmitt * sleep when waiting for a free sbal.
2806b9e1520SChristof Schmitt *
2816b9e1520SChristof Schmitt * Returns: 0 on success, -EIO if there is no free sbal after waiting.
2826b9e1520SChristof Schmitt */
zfcp_qdio_sbal_get(struct zfcp_qdio * qdio)2836b9e1520SChristof Schmitt int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
2846b9e1520SChristof Schmitt {
2856b9e1520SChristof Schmitt long ret;
2866b9e1520SChristof Schmitt
287d79ff142SMartin Peschke ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
288d79ff142SMartin Peschke zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
289c2af7545SChristof Schmitt
290c2af7545SChristof Schmitt if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
291c2af7545SChristof Schmitt return -EIO;
292c2af7545SChristof Schmitt
2936b9e1520SChristof Schmitt if (ret > 0)
2946b9e1520SChristof Schmitt return 0;
295c2af7545SChristof Schmitt
2966b9e1520SChristof Schmitt if (!ret) {
2976b9e1520SChristof Schmitt atomic_inc(&qdio->req_q_full);
2986b9e1520SChristof Schmitt /* assume hanging outbound queue, try queue recovery */
299ea4a3a6aSSwen Schillig zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
3006b9e1520SChristof Schmitt }
3016b9e1520SChristof Schmitt
3026b9e1520SChristof Schmitt return -EIO;
3036b9e1520SChristof Schmitt }
3046b9e1520SChristof Schmitt
3051da177e4SLinus Torvalds /**
306459ad085SJulian Wiedmann * zfcp_qdio_send - send req to QDIO
307564e1c86SSwen Schillig * @qdio: pointer to struct zfcp_qdio
30834c2b712SChristof Schmitt * @q_req: pointer to struct zfcp_qdio_req
30900bab910SSwen Schillig * Returns: 0 on success, error otherwise
3101da177e4SLinus Torvalds */
zfcp_qdio_send(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)31134c2b712SChristof Schmitt int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
3121da177e4SLinus Torvalds {
31321ddaa53SChristof Schmitt int retval;
314706eca49SSwen Schillig u8 sbal_number = q_req->sbal_number;
31500bab910SSwen Schillig
316efd32176SJulian Wiedmann /*
317efd32176SJulian Wiedmann * This should actually be a spin_lock_bh(stat_lock), to protect against
318b3f0a1eeSJulian Wiedmann * Request Queue completion processing in tasklet context.
319efd32176SJulian Wiedmann * But we can't do so (and are safe), as we always get called with IRQs
320efd32176SJulian Wiedmann * disabled by spin_lock_irq[save](req_q_lock).
321efd32176SJulian Wiedmann */
322efd32176SJulian Wiedmann lockdep_assert_irqs_disabled();
32344a24cb3SChristof Schmitt spin_lock(&qdio->stat_lock);
324564e1c86SSwen Schillig zfcp_qdio_account(qdio);
32544a24cb3SChristof Schmitt spin_unlock(&qdio->stat_lock);
32694506fd1SMartin Peschke
327c3bfffa5SJulian Wiedmann atomic_sub(sbal_number, &qdio->req_q_free);
328c3bfffa5SJulian Wiedmann
329*a60bffe5SJulian Wiedmann retval = qdio_add_bufs_to_output_queue(qdio->adapter->ccw_device, 0,
330*a60bffe5SJulian Wiedmann q_req->sbal_first, sbal_number,
331*a60bffe5SJulian Wiedmann NULL);
332706eca49SSwen Schillig
33300bab910SSwen Schillig if (unlikely(retval)) {
334c3bfffa5SJulian Wiedmann /* Failed to submit the IO, roll back our modifications. */
335c3bfffa5SJulian Wiedmann atomic_add(sbal_number, &qdio->req_q_free);
336706eca49SSwen Schillig zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
337706eca49SSwen Schillig sbal_number);
33800bab910SSwen Schillig return retval;
3391da177e4SLinus Torvalds }
3401da177e4SLinus Torvalds
341b3f0a1eeSJulian Wiedmann if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ)
342b3f0a1eeSJulian Wiedmann tasklet_schedule(&qdio->request_tasklet);
343b3f0a1eeSJulian Wiedmann else
344b3f0a1eeSJulian Wiedmann timer_reduce(&qdio->request_timer,
345b3f0a1eeSJulian Wiedmann jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS));
346b3f0a1eeSJulian Wiedmann
34700bab910SSwen Schillig /* account for transferred buffers */
348706eca49SSwen Schillig qdio->req_q_idx += sbal_number;
349706eca49SSwen Schillig qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
350706eca49SSwen Schillig
35100bab910SSwen Schillig return 0;
35200bab910SSwen Schillig }
35300bab910SSwen Schillig
35400bab910SSwen Schillig /**
35500bab910SSwen Schillig * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
3568684d614SSteffen Maier * @qdio: pointer to struct zfcp_qdio
35700bab910SSwen Schillig * Returns: -ENOMEM on memory allocation error or return value from
35800bab910SSwen Schillig * qdio_allocate
35900bab910SSwen Schillig */
zfcp_qdio_allocate(struct zfcp_qdio * qdio)360d5a282a1SSwen Schillig static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
36100bab910SSwen Schillig {
362852eb1aaSSebastian Ott int ret;
36300bab910SSwen Schillig
364852eb1aaSSebastian Ott ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
365852eb1aaSSebastian Ott if (ret)
36600bab910SSwen Schillig return -ENOMEM;
36700bab910SSwen Schillig
368852eb1aaSSebastian Ott ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
369852eb1aaSSebastian Ott if (ret)
370852eb1aaSSebastian Ott goto free_req_q;
371852eb1aaSSebastian Ott
3723d63d3b4SChristof Schmitt init_waitqueue_head(&qdio->req_q_wq);
37300bab910SSwen Schillig
3743db1db93SJulian Wiedmann ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
375852eb1aaSSebastian Ott if (ret)
376852eb1aaSSebastian Ott goto free_res_q;
377852eb1aaSSebastian Ott
378852eb1aaSSebastian Ott return 0;
379852eb1aaSSebastian Ott
380852eb1aaSSebastian Ott free_res_q:
381852eb1aaSSebastian Ott qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
382852eb1aaSSebastian Ott free_req_q:
383852eb1aaSSebastian Ott qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
384852eb1aaSSebastian Ott return ret;
38500bab910SSwen Schillig }
38600bab910SSwen Schillig
38700bab910SSwen Schillig /**
388f6beebb1SHeiko Carstens * zfcp_qdio_close - close qdio queues for an adapter
389564e1c86SSwen Schillig * @qdio: pointer to structure zfcp_qdio
39000bab910SSwen Schillig */
zfcp_qdio_close(struct zfcp_qdio * qdio)391564e1c86SSwen Schillig void zfcp_qdio_close(struct zfcp_qdio *qdio)
39200bab910SSwen Schillig {
393706eca49SSwen Schillig struct zfcp_adapter *adapter = qdio->adapter;
394706eca49SSwen Schillig int idx, count;
39500bab910SSwen Schillig
396706eca49SSwen Schillig if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
39700bab910SSwen Schillig return;
39800bab910SSwen Schillig
399*a60bffe5SJulian Wiedmann /*
400*a60bffe5SJulian Wiedmann * Clear QDIOUP flag, thus qdio_add_bufs_to_output_queue() is not called
401*a60bffe5SJulian Wiedmann * during qdio_shutdown().
402*a60bffe5SJulian Wiedmann */
40344a24cb3SChristof Schmitt spin_lock_irq(&qdio->req_q_lock);
404805de8f4SPeter Zijlstra atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
40544a24cb3SChristof Schmitt spin_unlock_irq(&qdio->req_q_lock);
40600bab910SSwen Schillig
407c2af7545SChristof Schmitt wake_up(&qdio->req_q_wq);
408c2af7545SChristof Schmitt
4090b524abcSJulian Wiedmann tasklet_disable(&qdio->irq_tasklet);
410b3f0a1eeSJulian Wiedmann tasklet_disable(&qdio->request_tasklet);
411b3f0a1eeSJulian Wiedmann del_timer_sync(&qdio->request_timer);
4120b524abcSJulian Wiedmann qdio_stop_irq(adapter->ccw_device);
413706eca49SSwen Schillig qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
41400bab910SSwen Schillig
41500bab910SSwen Schillig /* cleanup used outbound sbals */
416706eca49SSwen Schillig count = atomic_read(&qdio->req_q_free);
41700bab910SSwen Schillig if (count < QDIO_MAX_BUFFERS_PER_Q) {
418706eca49SSwen Schillig idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
41900bab910SSwen Schillig count = QDIO_MAX_BUFFERS_PER_Q - count;
420706eca49SSwen Schillig zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
42100bab910SSwen Schillig }
422706eca49SSwen Schillig qdio->req_q_idx = 0;
423706eca49SSwen Schillig atomic_set(&qdio->req_q_free, 0);
42400bab910SSwen Schillig }
42500bab910SSwen Schillig
zfcp_qdio_shost_update(struct zfcp_adapter * const adapter,const struct zfcp_qdio * const qdio)426978857c7SBenjamin Block void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
427978857c7SBenjamin Block const struct zfcp_qdio *const qdio)
428978857c7SBenjamin Block {
429978857c7SBenjamin Block struct Scsi_Host *const shost = adapter->scsi_host;
430978857c7SBenjamin Block
431978857c7SBenjamin Block if (shost == NULL)
432978857c7SBenjamin Block return;
433978857c7SBenjamin Block
434978857c7SBenjamin Block shost->sg_tablesize = qdio->max_sbale_per_req;
435978857c7SBenjamin Block shost->max_sectors = qdio->max_sbale_per_req * 8;
436978857c7SBenjamin Block }
437978857c7SBenjamin Block
43800bab910SSwen Schillig /**
43900bab910SSwen Schillig * zfcp_qdio_open - prepare and initialize response queue
440564e1c86SSwen Schillig * @qdio: pointer to struct zfcp_qdio
44100bab910SSwen Schillig * Returns: 0 on success, otherwise -EIO
44200bab910SSwen Schillig */
zfcp_qdio_open(struct zfcp_qdio * qdio)443564e1c86SSwen Schillig int zfcp_qdio_open(struct zfcp_qdio *qdio)
44400bab910SSwen Schillig {
445d8564e19SJulian Wiedmann struct qdio_buffer **input_sbals[1] = {qdio->res_q};
446d8564e19SJulian Wiedmann struct qdio_buffer **output_sbals[1] = {qdio->req_q};
44744cc76f2SSwen Schillig struct qdio_buffer_element *sbale;
448ad96401cSJulian Wiedmann struct qdio_initialize init_data = {0};
449706eca49SSwen Schillig struct zfcp_adapter *adapter = qdio->adapter;
450706eca49SSwen Schillig struct ccw_device *cdev = adapter->ccw_device;
451dcc18f48SChristof Schmitt struct qdio_ssqd_desc ssqd;
45200bab910SSwen Schillig int cc;
45300bab910SSwen Schillig
454706eca49SSwen Schillig if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
45500bab910SSwen Schillig return -EIO;
45600bab910SSwen Schillig
457805de8f4SPeter Zijlstra atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
458339f4f4eSChristof Schmitt &qdio->adapter->status);
459339f4f4eSChristof Schmitt
460ad96401cSJulian Wiedmann init_data.q_format = QDIO_ZFCP_QFMT;
461ad96401cSJulian Wiedmann init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
462ad96401cSJulian Wiedmann if (enable_multibuffer)
463ad96401cSJulian Wiedmann init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
464ad96401cSJulian Wiedmann init_data.no_input_qs = 1;
465ad96401cSJulian Wiedmann init_data.no_output_qs = 1;
466ad96401cSJulian Wiedmann init_data.input_handler = zfcp_qdio_int_resp;
467ad96401cSJulian Wiedmann init_data.output_handler = zfcp_qdio_int_req;
4680b524abcSJulian Wiedmann init_data.irq_poll = zfcp_qdio_poll;
469ad96401cSJulian Wiedmann init_data.int_parm = (unsigned long) qdio;
470d8564e19SJulian Wiedmann init_data.input_sbal_addr_array = input_sbals;
471d8564e19SJulian Wiedmann init_data.output_sbal_addr_array = output_sbals;
472564e1c86SSwen Schillig
4731da1092dSJulian Wiedmann if (qdio_establish(cdev, &init_data))
474ff3b24faSChristof Schmitt goto failed_establish;
47500bab910SSwen Schillig
4761da1092dSJulian Wiedmann if (qdio_get_ssqd_desc(cdev, &ssqd))
477dcc18f48SChristof Schmitt goto failed_qdio;
478dcc18f48SChristof Schmitt
479dcc18f48SChristof Schmitt if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
480805de8f4SPeter Zijlstra atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
481dcc18f48SChristof Schmitt &qdio->adapter->status);
482dcc18f48SChristof Schmitt
48386a9668aSSwen Schillig if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
484805de8f4SPeter Zijlstra atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
48586a9668aSSwen Schillig qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
48686a9668aSSwen Schillig } else {
487805de8f4SPeter Zijlstra atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
48886a9668aSSwen Schillig qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
48986a9668aSSwen Schillig }
49086a9668aSSwen Schillig
49186a9668aSSwen Schillig qdio->max_sbale_per_req =
49286a9668aSSwen Schillig ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
49386a9668aSSwen Schillig - 2;
494564e1c86SSwen Schillig if (qdio_activate(cdev))
49500bab910SSwen Schillig goto failed_qdio;
49600bab910SSwen Schillig
49700bab910SSwen Schillig for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
498706eca49SSwen Schillig sbale = &(qdio->res_q[cc]->element[0]);
49900bab910SSwen Schillig sbale->length = 0;
5003ec90878SJan Glauber sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
5013ec90878SJan Glauber sbale->sflags = 0;
5022db01da8SJulian Wiedmann sbale->addr = 0;
50300bab910SSwen Schillig }
50400bab910SSwen Schillig
505*a60bffe5SJulian Wiedmann if (qdio_add_bufs_to_input_queue(cdev, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
50600bab910SSwen Schillig goto failed_qdio;
50700bab910SSwen Schillig
50825985edcSLucas De Marchi /* set index of first available SBALS / number of available SBALS */
509706eca49SSwen Schillig qdio->req_q_idx = 0;
510706eca49SSwen Schillig atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
511805de8f4SPeter Zijlstra atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
51200bab910SSwen Schillig
513b3f0a1eeSJulian Wiedmann /* Enable processing for Request Queue completions: */
514b3f0a1eeSJulian Wiedmann tasklet_enable(&qdio->request_tasklet);
5150b524abcSJulian Wiedmann /* Enable processing for QDIO interrupts: */
5160b524abcSJulian Wiedmann tasklet_enable(&qdio->irq_tasklet);
5170b524abcSJulian Wiedmann /* This results in a qdio_start_irq(): */
5180b524abcSJulian Wiedmann tasklet_schedule(&qdio->irq_tasklet);
5190b524abcSJulian Wiedmann
520978857c7SBenjamin Block zfcp_qdio_shost_update(adapter, qdio);
52186a9668aSSwen Schillig
52200bab910SSwen Schillig return 0;
52300bab910SSwen Schillig
52400bab910SSwen Schillig failed_qdio:
525564e1c86SSwen Schillig qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
526ff3b24faSChristof Schmitt failed_establish:
527564e1c86SSwen Schillig dev_err(&cdev->dev,
528ff3b24faSChristof Schmitt "Setting up the QDIO connection to the FCP adapter failed\n");
52900bab910SSwen Schillig return -EIO;
53000bab910SSwen Schillig }
531d5a282a1SSwen Schillig
zfcp_qdio_destroy(struct zfcp_qdio * qdio)532d5a282a1SSwen Schillig void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
533d5a282a1SSwen Schillig {
534d5a282a1SSwen Schillig if (!qdio)
535d5a282a1SSwen Schillig return;
536d5a282a1SSwen Schillig
5370b524abcSJulian Wiedmann tasklet_kill(&qdio->irq_tasklet);
538b3f0a1eeSJulian Wiedmann tasklet_kill(&qdio->request_tasklet);
5390b524abcSJulian Wiedmann
540d5a282a1SSwen Schillig if (qdio->adapter->ccw_device)
541d5a282a1SSwen Schillig qdio_free(qdio->adapter->ccw_device);
542d5a282a1SSwen Schillig
543852eb1aaSSebastian Ott qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
544852eb1aaSSebastian Ott qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
545d5a282a1SSwen Schillig kfree(qdio);
546d5a282a1SSwen Schillig }
547d5a282a1SSwen Schillig
zfcp_qdio_setup(struct zfcp_adapter * adapter)548d5a282a1SSwen Schillig int zfcp_qdio_setup(struct zfcp_adapter *adapter)
549d5a282a1SSwen Schillig {
550d5a282a1SSwen Schillig struct zfcp_qdio *qdio;
551d5a282a1SSwen Schillig
552d5a282a1SSwen Schillig qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
553d5a282a1SSwen Schillig if (!qdio)
554d5a282a1SSwen Schillig return -ENOMEM;
555d5a282a1SSwen Schillig
556d5a282a1SSwen Schillig qdio->adapter = adapter;
557d5a282a1SSwen Schillig
558d5a282a1SSwen Schillig if (zfcp_qdio_allocate(qdio)) {
559852eb1aaSSebastian Ott kfree(qdio);
560d5a282a1SSwen Schillig return -ENOMEM;
561d5a282a1SSwen Schillig }
562d5a282a1SSwen Schillig
563d5a282a1SSwen Schillig spin_lock_init(&qdio->req_q_lock);
564d5a282a1SSwen Schillig spin_lock_init(&qdio->stat_lock);
565b3f0a1eeSJulian Wiedmann timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0);
5660b524abcSJulian Wiedmann tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet);
567b3f0a1eeSJulian Wiedmann tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet);
5680b524abcSJulian Wiedmann tasklet_disable(&qdio->irq_tasklet);
569b3f0a1eeSJulian Wiedmann tasklet_disable(&qdio->request_tasklet);
570d5a282a1SSwen Schillig
571d5a282a1SSwen Schillig adapter->qdio = qdio;
572d5a282a1SSwen Schillig return 0;
573d5a282a1SSwen Schillig }
574d5a282a1SSwen Schillig
575339f4f4eSChristof Schmitt /**
576339f4f4eSChristof Schmitt * zfcp_qdio_siosl - Trigger logging in FCP channel
577339f4f4eSChristof Schmitt * @adapter: The zfcp_adapter where to trigger logging
578339f4f4eSChristof Schmitt *
579339f4f4eSChristof Schmitt * Call the cio siosl function to trigger hardware logging. This
580339f4f4eSChristof Schmitt * wrapper function sets a flag to ensure hardware logging is only
581339f4f4eSChristof Schmitt * triggered once before going through qdio shutdown.
582339f4f4eSChristof Schmitt *
583339f4f4eSChristof Schmitt * The triggers are always run from qdio tasklet context, so no
584339f4f4eSChristof Schmitt * additional synchronization is necessary.
585339f4f4eSChristof Schmitt */
zfcp_qdio_siosl(struct zfcp_adapter * adapter)586339f4f4eSChristof Schmitt void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
587339f4f4eSChristof Schmitt {
588339f4f4eSChristof Schmitt int rc;
589339f4f4eSChristof Schmitt
590339f4f4eSChristof Schmitt if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
591339f4f4eSChristof Schmitt return;
592339f4f4eSChristof Schmitt
593339f4f4eSChristof Schmitt rc = ccw_device_siosl(adapter->ccw_device);
594339f4f4eSChristof Schmitt if (!rc)
595805de8f4SPeter Zijlstra atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
596339f4f4eSChristof Schmitt &adapter->status);
597339f4f4eSChristof Schmitt }
598