1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Setup and helper functions to access QDIO. 6 * 7 * Copyright IBM Corp. 2002, 2020 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include "zfcp_ext.h" 16 #include "zfcp_qdio.h" 17 18 static bool enable_multibuffer = true; 19 module_param_named(datarouter, enable_multibuffer, bool, 0400); 20 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); 21 22 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag, 23 unsigned int qdio_err) 24 { 25 struct zfcp_adapter *adapter = qdio->adapter; 26 27 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 28 29 if (qdio_err & QDIO_ERROR_SLSB_STATE) { 30 zfcp_qdio_siosl(adapter); 31 zfcp_erp_adapter_shutdown(adapter, 0, dbftag); 32 return; 33 } 34 zfcp_erp_adapter_reopen(adapter, 35 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 36 ZFCP_STATUS_COMMON_ERP_FAILED, dbftag); 37 } 38 39 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) 40 { 41 int i, sbal_idx; 42 43 for (i = first; i < first + cnt; i++) { 44 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; 45 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); 46 } 47 } 48 49 /* this needs to be called prior to updating the queue fill level */ 50 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) 51 { 52 unsigned long long now, span; 53 int used; 54 55 now = get_tod_clock_monotonic(); 56 span = (now - qdio->req_q_time) >> 12; 57 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 58 qdio->req_q_util += used * span; 59 qdio->req_q_time = now; 60 } 61 62 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 63 int queue_no, int idx, int count, 64 unsigned long parm) 65 { 66 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 67 68 if (unlikely(qdio_err)) { 69 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); 70 return; 71 } 72 73 /* cleanup all SBALs being program-owned now */ 74 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 75 76 spin_lock_irq(&qdio->stat_lock); 77 zfcp_qdio_account(qdio); 78 spin_unlock_irq(&qdio->stat_lock); 79 atomic_add(count, &qdio->req_q_free); 80 wake_up(&qdio->req_q_wq); 81 } 82 83 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, 84 int queue_no, int idx, int count, 85 unsigned long parm) 86 { 87 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 88 struct zfcp_adapter *adapter = qdio->adapter; 89 int sbal_no, sbal_idx; 90 91 if (unlikely(qdio_err)) { 92 if (zfcp_adapter_multi_buffer_active(adapter)) { 93 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; 94 struct qdio_buffer_element *sbale; 95 u64 req_id; 96 u8 scount; 97 98 memset(pl, 0, 99 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); 100 sbale = qdio->res_q[idx]->element; 101 req_id = sbale->addr; 102 scount = min(sbale->scount + 1, 103 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); 104 /* incl. signaling SBAL */ 105 106 for (sbal_no = 0; sbal_no < scount; sbal_no++) { 107 sbal_idx = (idx + sbal_no) % 108 QDIO_MAX_BUFFERS_PER_Q; 109 pl[sbal_no] = qdio->res_q[sbal_idx]; 110 } 111 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl); 112 } 113 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 114 return; 115 } 116 117 /* 118 * go through all SBALs from input queue currently 119 * returned by QDIO layer 120 */ 121 for (sbal_no = 0; sbal_no < count; sbal_no++) { 122 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 123 /* go through all SBALEs of SBAL */ 124 zfcp_fsf_reqid_check(qdio, sbal_idx); 125 } 126 127 /* 128 * put SBALs back to response queue 129 */ 130 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) 131 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); 132 } 133 134 static struct qdio_buffer_element * 135 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 136 { 137 struct qdio_buffer_element *sbale; 138 139 /* set last entry flag in current SBALE of current SBAL */ 140 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 141 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; 142 143 /* don't exceed last allowed SBAL */ 144 if (q_req->sbal_last == q_req->sbal_limit) 145 return NULL; 146 147 /* set chaining flag in first SBALE of current SBAL */ 148 sbale = zfcp_qdio_sbale_req(qdio, q_req); 149 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS; 150 151 /* calculate index of next SBAL */ 152 q_req->sbal_last++; 153 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; 154 155 /* keep this requests number of SBALs up-to-date */ 156 q_req->sbal_number++; 157 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); 158 159 /* start at first SBALE of new SBAL */ 160 q_req->sbale_curr = 0; 161 162 /* set storage-block type for new SBAL */ 163 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 164 sbale->sflags |= q_req->sbtype; 165 166 return sbale; 167 } 168 169 static struct qdio_buffer_element * 170 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 171 { 172 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) 173 return zfcp_qdio_sbal_chain(qdio, q_req); 174 q_req->sbale_curr++; 175 return zfcp_qdio_sbale_curr(qdio, q_req); 176 } 177 178 /** 179 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 180 * @qdio: pointer to struct zfcp_qdio 181 * @q_req: pointer to struct zfcp_qdio_req 182 * @sg: scatter-gather list 183 * Returns: zero or -EINVAL on error 184 */ 185 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 186 struct scatterlist *sg) 187 { 188 struct qdio_buffer_element *sbale; 189 190 /* set storage-block type for this request */ 191 sbale = zfcp_qdio_sbale_req(qdio, q_req); 192 sbale->sflags |= q_req->sbtype; 193 194 for (; sg; sg = sg_next(sg)) { 195 sbale = zfcp_qdio_sbale_next(qdio, q_req); 196 if (!sbale) { 197 atomic_inc(&qdio->req_q_full); 198 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, 199 q_req->sbal_number); 200 return -EINVAL; 201 } 202 sbale->addr = sg_phys(sg); 203 sbale->length = sg->length; 204 } 205 return 0; 206 } 207 208 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 209 { 210 if (atomic_read(&qdio->req_q_free) || 211 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 212 return 1; 213 return 0; 214 } 215 216 /** 217 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary 218 * @qdio: pointer to struct zfcp_qdio 219 * 220 * The req_q_lock must be held by the caller of this function, and 221 * this function may only be called from process context; it will 222 * sleep when waiting for a free sbal. 223 * 224 * Returns: 0 on success, -EIO if there is no free sbal after waiting. 225 */ 226 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) 227 { 228 long ret; 229 230 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, 231 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); 232 233 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 234 return -EIO; 235 236 if (ret > 0) 237 return 0; 238 239 if (!ret) { 240 atomic_inc(&qdio->req_q_full); 241 /* assume hanging outbound queue, try queue recovery */ 242 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 243 } 244 245 return -EIO; 246 } 247 248 /** 249 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 250 * @qdio: pointer to struct zfcp_qdio 251 * @q_req: pointer to struct zfcp_qdio_req 252 * Returns: 0 on success, error otherwise 253 */ 254 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 255 { 256 int retval; 257 u8 sbal_number = q_req->sbal_number; 258 259 spin_lock(&qdio->stat_lock); 260 zfcp_qdio_account(qdio); 261 spin_unlock(&qdio->stat_lock); 262 263 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, 264 q_req->sbal_first, sbal_number); 265 266 if (unlikely(retval)) { 267 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, 268 sbal_number); 269 return retval; 270 } 271 272 /* account for transferred buffers */ 273 atomic_sub(sbal_number, &qdio->req_q_free); 274 qdio->req_q_idx += sbal_number; 275 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; 276 277 return 0; 278 } 279 280 /** 281 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 282 * @qdio: pointer to struct zfcp_qdio 283 * Returns: -ENOMEM on memory allocation error or return value from 284 * qdio_allocate 285 */ 286 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) 287 { 288 int ret; 289 290 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 291 if (ret) 292 return -ENOMEM; 293 294 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 295 if (ret) 296 goto free_req_q; 297 298 init_waitqueue_head(&qdio->req_q_wq); 299 300 ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1); 301 if (ret) 302 goto free_res_q; 303 304 return 0; 305 306 free_res_q: 307 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 308 free_req_q: 309 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 310 return ret; 311 } 312 313 /** 314 * zfcp_close_qdio - close qdio queues for an adapter 315 * @qdio: pointer to structure zfcp_qdio 316 */ 317 void zfcp_qdio_close(struct zfcp_qdio *qdio) 318 { 319 struct zfcp_adapter *adapter = qdio->adapter; 320 int idx, count; 321 322 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 323 return; 324 325 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 326 spin_lock_irq(&qdio->req_q_lock); 327 atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 328 spin_unlock_irq(&qdio->req_q_lock); 329 330 wake_up(&qdio->req_q_wq); 331 332 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 333 334 /* cleanup used outbound sbals */ 335 count = atomic_read(&qdio->req_q_free); 336 if (count < QDIO_MAX_BUFFERS_PER_Q) { 337 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; 338 count = QDIO_MAX_BUFFERS_PER_Q - count; 339 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 340 } 341 qdio->req_q_idx = 0; 342 atomic_set(&qdio->req_q_free, 0); 343 } 344 345 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter, 346 const struct zfcp_qdio *const qdio) 347 { 348 struct Scsi_Host *const shost = adapter->scsi_host; 349 350 if (shost == NULL) 351 return; 352 353 shost->sg_tablesize = qdio->max_sbale_per_req; 354 shost->max_sectors = qdio->max_sbale_per_req * 8; 355 } 356 357 /** 358 * zfcp_qdio_open - prepare and initialize response queue 359 * @qdio: pointer to struct zfcp_qdio 360 * Returns: 0 on success, otherwise -EIO 361 */ 362 int zfcp_qdio_open(struct zfcp_qdio *qdio) 363 { 364 struct qdio_buffer **input_sbals[1] = {qdio->res_q}; 365 struct qdio_buffer **output_sbals[1] = {qdio->req_q}; 366 struct qdio_buffer_element *sbale; 367 struct qdio_initialize init_data = {0}; 368 struct zfcp_adapter *adapter = qdio->adapter; 369 struct ccw_device *cdev = adapter->ccw_device; 370 struct qdio_ssqd_desc ssqd; 371 int cc; 372 373 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 374 return -EIO; 375 376 atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 377 &qdio->adapter->status); 378 379 init_data.q_format = QDIO_ZFCP_QFMT; 380 memcpy(init_data.adapter_name, dev_name(&cdev->dev), 8); 381 ASCEBC(init_data.adapter_name, 8); 382 init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 383 if (enable_multibuffer) 384 init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE; 385 init_data.no_input_qs = 1; 386 init_data.no_output_qs = 1; 387 init_data.input_handler = zfcp_qdio_int_resp; 388 init_data.output_handler = zfcp_qdio_int_req; 389 init_data.int_parm = (unsigned long) qdio; 390 init_data.input_sbal_addr_array = input_sbals; 391 init_data.output_sbal_addr_array = output_sbals; 392 init_data.scan_threshold = 393 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2; 394 395 if (qdio_establish(cdev, &init_data)) 396 goto failed_establish; 397 398 if (qdio_get_ssqd_desc(cdev, &ssqd)) 399 goto failed_qdio; 400 401 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) 402 atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 403 &qdio->adapter->status); 404 405 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { 406 atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 407 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; 408 } else { 409 atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 410 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; 411 } 412 413 qdio->max_sbale_per_req = 414 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal 415 - 2; 416 if (qdio_activate(cdev)) 417 goto failed_qdio; 418 419 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 420 sbale = &(qdio->res_q[cc]->element[0]); 421 sbale->length = 0; 422 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY; 423 sbale->sflags = 0; 424 sbale->addr = 0; 425 } 426 427 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) 428 goto failed_qdio; 429 430 /* set index of first available SBALS / number of available SBALS */ 431 qdio->req_q_idx = 0; 432 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 433 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 434 435 zfcp_qdio_shost_update(adapter, qdio); 436 437 return 0; 438 439 failed_qdio: 440 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 441 failed_establish: 442 dev_err(&cdev->dev, 443 "Setting up the QDIO connection to the FCP adapter failed\n"); 444 return -EIO; 445 } 446 447 void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 448 { 449 if (!qdio) 450 return; 451 452 if (qdio->adapter->ccw_device) 453 qdio_free(qdio->adapter->ccw_device); 454 455 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 456 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 457 kfree(qdio); 458 } 459 460 int zfcp_qdio_setup(struct zfcp_adapter *adapter) 461 { 462 struct zfcp_qdio *qdio; 463 464 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); 465 if (!qdio) 466 return -ENOMEM; 467 468 qdio->adapter = adapter; 469 470 if (zfcp_qdio_allocate(qdio)) { 471 kfree(qdio); 472 return -ENOMEM; 473 } 474 475 spin_lock_init(&qdio->req_q_lock); 476 spin_lock_init(&qdio->stat_lock); 477 478 adapter->qdio = qdio; 479 return 0; 480 } 481 482 /** 483 * zfcp_qdio_siosl - Trigger logging in FCP channel 484 * @adapter: The zfcp_adapter where to trigger logging 485 * 486 * Call the cio siosl function to trigger hardware logging. This 487 * wrapper function sets a flag to ensure hardware logging is only 488 * triggered once before going through qdio shutdown. 489 * 490 * The triggers are always run from qdio tasklet context, so no 491 * additional synchronization is necessary. 492 */ 493 void zfcp_qdio_siosl(struct zfcp_adapter *adapter) 494 { 495 int rc; 496 497 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) 498 return; 499 500 rc = ccw_device_siosl(adapter->ccw_device); 501 if (!rc) 502 atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 503 &adapter->status); 504 } 505