1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Setup and helper functions to access QDIO. 6 * 7 * Copyright IBM Corp. 2002, 2020 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/lockdep.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include "zfcp_ext.h" 17 #include "zfcp_qdio.h" 18 19 static bool enable_multibuffer = true; 20 module_param_named(datarouter, enable_multibuffer, bool, 0400); 21 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); 22 23 #define ZFCP_QDIO_REQUEST_RESCAN_MSECS (MSEC_PER_SEC * 10) 24 #define ZFCP_QDIO_REQUEST_SCAN_MSECS MSEC_PER_SEC 25 26 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag, 27 unsigned int qdio_err) 28 { 29 struct zfcp_adapter *adapter = qdio->adapter; 30 31 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 32 33 if (qdio_err & QDIO_ERROR_SLSB_STATE) { 34 zfcp_qdio_siosl(adapter); 35 zfcp_erp_adapter_shutdown(adapter, 0, dbftag); 36 return; 37 } 38 zfcp_erp_adapter_reopen(adapter, 39 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 40 ZFCP_STATUS_COMMON_ERP_FAILED, dbftag); 41 } 42 43 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) 44 { 45 int i, sbal_idx; 46 47 for (i = first; i < first + cnt; i++) { 48 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; 49 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); 50 } 51 } 52 53 /* this needs to be called prior to updating the queue fill level */ 54 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) 55 { 56 unsigned long long now, span; 57 int used; 58 59 now = get_tod_clock_monotonic(); 60 span = (now - qdio->req_q_time) >> 12; 61 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 62 qdio->req_q_util += used * span; 63 qdio->req_q_time = now; 64 } 65 66 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 67 int queue_no, int idx, int count, 68 unsigned long parm) 69 { 70 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 71 72 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); 73 } 74 75 static void zfcp_qdio_request_tasklet(struct tasklet_struct *tasklet) 76 { 77 struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, request_tasklet); 78 struct ccw_device *cdev = qdio->adapter->ccw_device; 79 unsigned int start, error; 80 int completed; 81 82 completed = qdio_inspect_queue(cdev, 0, false, &start, &error); 83 if (completed > 0) { 84 if (error) { 85 zfcp_qdio_handler_error(qdio, "qdreqt1", error); 86 } else { 87 /* cleanup all SBALs being program-owned now */ 88 zfcp_qdio_zero_sbals(qdio->req_q, start, completed); 89 90 spin_lock_irq(&qdio->stat_lock); 91 zfcp_qdio_account(qdio); 92 spin_unlock_irq(&qdio->stat_lock); 93 atomic_add(completed, &qdio->req_q_free); 94 wake_up(&qdio->req_q_wq); 95 } 96 } 97 98 if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q) 99 timer_reduce(&qdio->request_timer, 100 jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_RESCAN_MSECS)); 101 } 102 103 static void zfcp_qdio_request_timer(struct timer_list *timer) 104 { 105 struct zfcp_qdio *qdio = from_timer(qdio, timer, request_timer); 106 107 tasklet_schedule(&qdio->request_tasklet); 108 } 109 110 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, 111 int queue_no, int idx, int count, 112 unsigned long parm) 113 { 114 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 115 struct zfcp_adapter *adapter = qdio->adapter; 116 int sbal_no, sbal_idx; 117 118 if (unlikely(qdio_err)) { 119 if (zfcp_adapter_multi_buffer_active(adapter)) { 120 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; 121 struct qdio_buffer_element *sbale; 122 u64 req_id; 123 u8 scount; 124 125 memset(pl, 0, 126 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); 127 sbale = qdio->res_q[idx]->element; 128 req_id = sbale->addr; 129 scount = min(sbale->scount + 1, 130 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); 131 /* incl. signaling SBAL */ 132 133 for (sbal_no = 0; sbal_no < scount; sbal_no++) { 134 sbal_idx = (idx + sbal_no) % 135 QDIO_MAX_BUFFERS_PER_Q; 136 pl[sbal_no] = qdio->res_q[sbal_idx]; 137 } 138 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl); 139 } 140 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 141 return; 142 } 143 144 /* 145 * go through all SBALs from input queue currently 146 * returned by QDIO layer 147 */ 148 for (sbal_no = 0; sbal_no < count; sbal_no++) { 149 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 150 /* go through all SBALEs of SBAL */ 151 zfcp_fsf_reqid_check(qdio, sbal_idx); 152 } 153 154 /* 155 * put SBALs back to response queue 156 */ 157 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count, NULL)) 158 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); 159 } 160 161 static void zfcp_qdio_irq_tasklet(struct tasklet_struct *tasklet) 162 { 163 struct zfcp_qdio *qdio = from_tasklet(qdio, tasklet, irq_tasklet); 164 struct ccw_device *cdev = qdio->adapter->ccw_device; 165 unsigned int start, error; 166 int completed; 167 168 if (atomic_read(&qdio->req_q_free) < QDIO_MAX_BUFFERS_PER_Q) 169 tasklet_schedule(&qdio->request_tasklet); 170 171 /* Check the Response Queue: */ 172 completed = qdio_inspect_queue(cdev, 0, true, &start, &error); 173 if (completed < 0) 174 return; 175 if (completed > 0) 176 zfcp_qdio_int_resp(cdev, error, 0, start, completed, 177 (unsigned long) qdio); 178 179 if (qdio_start_irq(cdev)) 180 /* More work pending: */ 181 tasklet_schedule(&qdio->irq_tasklet); 182 } 183 184 static void zfcp_qdio_poll(struct ccw_device *cdev, unsigned long data) 185 { 186 struct zfcp_qdio *qdio = (struct zfcp_qdio *) data; 187 188 tasklet_schedule(&qdio->irq_tasklet); 189 } 190 191 static struct qdio_buffer_element * 192 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 193 { 194 struct qdio_buffer_element *sbale; 195 196 /* set last entry flag in current SBALE of current SBAL */ 197 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 198 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; 199 200 /* don't exceed last allowed SBAL */ 201 if (q_req->sbal_last == q_req->sbal_limit) 202 return NULL; 203 204 /* set chaining flag in first SBALE of current SBAL */ 205 sbale = zfcp_qdio_sbale_req(qdio, q_req); 206 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS; 207 208 /* calculate index of next SBAL */ 209 q_req->sbal_last++; 210 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; 211 212 /* keep this requests number of SBALs up-to-date */ 213 q_req->sbal_number++; 214 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); 215 216 /* start at first SBALE of new SBAL */ 217 q_req->sbale_curr = 0; 218 219 /* set storage-block type for new SBAL */ 220 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 221 sbale->sflags |= q_req->sbtype; 222 223 return sbale; 224 } 225 226 static struct qdio_buffer_element * 227 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 228 { 229 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) 230 return zfcp_qdio_sbal_chain(qdio, q_req); 231 q_req->sbale_curr++; 232 return zfcp_qdio_sbale_curr(qdio, q_req); 233 } 234 235 /** 236 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 237 * @qdio: pointer to struct zfcp_qdio 238 * @q_req: pointer to struct zfcp_qdio_req 239 * @sg: scatter-gather list 240 * Returns: zero or -EINVAL on error 241 */ 242 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 243 struct scatterlist *sg) 244 { 245 struct qdio_buffer_element *sbale; 246 247 /* set storage-block type for this request */ 248 sbale = zfcp_qdio_sbale_req(qdio, q_req); 249 sbale->sflags |= q_req->sbtype; 250 251 for (; sg; sg = sg_next(sg)) { 252 sbale = zfcp_qdio_sbale_next(qdio, q_req); 253 if (!sbale) { 254 atomic_inc(&qdio->req_q_full); 255 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, 256 q_req->sbal_number); 257 return -EINVAL; 258 } 259 sbale->addr = sg_phys(sg); 260 sbale->length = sg->length; 261 } 262 return 0; 263 } 264 265 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 266 { 267 if (atomic_read(&qdio->req_q_free) || 268 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 269 return 1; 270 return 0; 271 } 272 273 /** 274 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary 275 * @qdio: pointer to struct zfcp_qdio 276 * 277 * The req_q_lock must be held by the caller of this function, and 278 * this function may only be called from process context; it will 279 * sleep when waiting for a free sbal. 280 * 281 * Returns: 0 on success, -EIO if there is no free sbal after waiting. 282 */ 283 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) 284 { 285 long ret; 286 287 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, 288 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); 289 290 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 291 return -EIO; 292 293 if (ret > 0) 294 return 0; 295 296 if (!ret) { 297 atomic_inc(&qdio->req_q_full); 298 /* assume hanging outbound queue, try queue recovery */ 299 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 300 } 301 302 return -EIO; 303 } 304 305 /** 306 * zfcp_qdio_send - send req to QDIO 307 * @qdio: pointer to struct zfcp_qdio 308 * @q_req: pointer to struct zfcp_qdio_req 309 * Returns: 0 on success, error otherwise 310 */ 311 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 312 { 313 int retval; 314 u8 sbal_number = q_req->sbal_number; 315 316 /* 317 * This should actually be a spin_lock_bh(stat_lock), to protect against 318 * Request Queue completion processing in tasklet context. 319 * But we can't do so (and are safe), as we always get called with IRQs 320 * disabled by spin_lock_irq[save](req_q_lock). 321 */ 322 lockdep_assert_irqs_disabled(); 323 spin_lock(&qdio->stat_lock); 324 zfcp_qdio_account(qdio); 325 spin_unlock(&qdio->stat_lock); 326 327 atomic_sub(sbal_number, &qdio->req_q_free); 328 329 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, 330 q_req->sbal_first, sbal_number, NULL); 331 332 if (unlikely(retval)) { 333 /* Failed to submit the IO, roll back our modifications. */ 334 atomic_add(sbal_number, &qdio->req_q_free); 335 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, 336 sbal_number); 337 return retval; 338 } 339 340 if (atomic_read(&qdio->req_q_free) <= 2 * ZFCP_QDIO_MAX_SBALS_PER_REQ) 341 tasklet_schedule(&qdio->request_tasklet); 342 else 343 timer_reduce(&qdio->request_timer, 344 jiffies + msecs_to_jiffies(ZFCP_QDIO_REQUEST_SCAN_MSECS)); 345 346 /* account for transferred buffers */ 347 qdio->req_q_idx += sbal_number; 348 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; 349 350 return 0; 351 } 352 353 /** 354 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 355 * @qdio: pointer to struct zfcp_qdio 356 * Returns: -ENOMEM on memory allocation error or return value from 357 * qdio_allocate 358 */ 359 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) 360 { 361 int ret; 362 363 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 364 if (ret) 365 return -ENOMEM; 366 367 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 368 if (ret) 369 goto free_req_q; 370 371 init_waitqueue_head(&qdio->req_q_wq); 372 373 ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1); 374 if (ret) 375 goto free_res_q; 376 377 return 0; 378 379 free_res_q: 380 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 381 free_req_q: 382 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 383 return ret; 384 } 385 386 /** 387 * zfcp_qdio_close - close qdio queues for an adapter 388 * @qdio: pointer to structure zfcp_qdio 389 */ 390 void zfcp_qdio_close(struct zfcp_qdio *qdio) 391 { 392 struct zfcp_adapter *adapter = qdio->adapter; 393 int idx, count; 394 395 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 396 return; 397 398 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 399 spin_lock_irq(&qdio->req_q_lock); 400 atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 401 spin_unlock_irq(&qdio->req_q_lock); 402 403 wake_up(&qdio->req_q_wq); 404 405 tasklet_disable(&qdio->irq_tasklet); 406 tasklet_disable(&qdio->request_tasklet); 407 del_timer_sync(&qdio->request_timer); 408 qdio_stop_irq(adapter->ccw_device); 409 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 410 411 /* cleanup used outbound sbals */ 412 count = atomic_read(&qdio->req_q_free); 413 if (count < QDIO_MAX_BUFFERS_PER_Q) { 414 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; 415 count = QDIO_MAX_BUFFERS_PER_Q - count; 416 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 417 } 418 qdio->req_q_idx = 0; 419 atomic_set(&qdio->req_q_free, 0); 420 } 421 422 void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter, 423 const struct zfcp_qdio *const qdio) 424 { 425 struct Scsi_Host *const shost = adapter->scsi_host; 426 427 if (shost == NULL) 428 return; 429 430 shost->sg_tablesize = qdio->max_sbale_per_req; 431 shost->max_sectors = qdio->max_sbale_per_req * 8; 432 } 433 434 /** 435 * zfcp_qdio_open - prepare and initialize response queue 436 * @qdio: pointer to struct zfcp_qdio 437 * Returns: 0 on success, otherwise -EIO 438 */ 439 int zfcp_qdio_open(struct zfcp_qdio *qdio) 440 { 441 struct qdio_buffer **input_sbals[1] = {qdio->res_q}; 442 struct qdio_buffer **output_sbals[1] = {qdio->req_q}; 443 struct qdio_buffer_element *sbale; 444 struct qdio_initialize init_data = {0}; 445 struct zfcp_adapter *adapter = qdio->adapter; 446 struct ccw_device *cdev = adapter->ccw_device; 447 struct qdio_ssqd_desc ssqd; 448 int cc; 449 450 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 451 return -EIO; 452 453 atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 454 &qdio->adapter->status); 455 456 init_data.q_format = QDIO_ZFCP_QFMT; 457 init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 458 if (enable_multibuffer) 459 init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE; 460 init_data.no_input_qs = 1; 461 init_data.no_output_qs = 1; 462 init_data.input_handler = zfcp_qdio_int_resp; 463 init_data.output_handler = zfcp_qdio_int_req; 464 init_data.irq_poll = zfcp_qdio_poll; 465 init_data.int_parm = (unsigned long) qdio; 466 init_data.input_sbal_addr_array = input_sbals; 467 init_data.output_sbal_addr_array = output_sbals; 468 469 if (qdio_establish(cdev, &init_data)) 470 goto failed_establish; 471 472 if (qdio_get_ssqd_desc(cdev, &ssqd)) 473 goto failed_qdio; 474 475 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) 476 atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 477 &qdio->adapter->status); 478 479 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { 480 atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 481 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; 482 } else { 483 atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 484 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; 485 } 486 487 qdio->max_sbale_per_req = 488 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal 489 - 2; 490 if (qdio_activate(cdev)) 491 goto failed_qdio; 492 493 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 494 sbale = &(qdio->res_q[cc]->element[0]); 495 sbale->length = 0; 496 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY; 497 sbale->sflags = 0; 498 sbale->addr = 0; 499 } 500 501 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q, 502 NULL)) 503 goto failed_qdio; 504 505 /* set index of first available SBALS / number of available SBALS */ 506 qdio->req_q_idx = 0; 507 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 508 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 509 510 /* Enable processing for Request Queue completions: */ 511 tasklet_enable(&qdio->request_tasklet); 512 /* Enable processing for QDIO interrupts: */ 513 tasklet_enable(&qdio->irq_tasklet); 514 /* This results in a qdio_start_irq(): */ 515 tasklet_schedule(&qdio->irq_tasklet); 516 517 zfcp_qdio_shost_update(adapter, qdio); 518 519 return 0; 520 521 failed_qdio: 522 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 523 failed_establish: 524 dev_err(&cdev->dev, 525 "Setting up the QDIO connection to the FCP adapter failed\n"); 526 return -EIO; 527 } 528 529 void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 530 { 531 if (!qdio) 532 return; 533 534 tasklet_kill(&qdio->irq_tasklet); 535 tasklet_kill(&qdio->request_tasklet); 536 537 if (qdio->adapter->ccw_device) 538 qdio_free(qdio->adapter->ccw_device); 539 540 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 541 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 542 kfree(qdio); 543 } 544 545 int zfcp_qdio_setup(struct zfcp_adapter *adapter) 546 { 547 struct zfcp_qdio *qdio; 548 549 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); 550 if (!qdio) 551 return -ENOMEM; 552 553 qdio->adapter = adapter; 554 555 if (zfcp_qdio_allocate(qdio)) { 556 kfree(qdio); 557 return -ENOMEM; 558 } 559 560 spin_lock_init(&qdio->req_q_lock); 561 spin_lock_init(&qdio->stat_lock); 562 timer_setup(&qdio->request_timer, zfcp_qdio_request_timer, 0); 563 tasklet_setup(&qdio->irq_tasklet, zfcp_qdio_irq_tasklet); 564 tasklet_setup(&qdio->request_tasklet, zfcp_qdio_request_tasklet); 565 tasklet_disable(&qdio->irq_tasklet); 566 tasklet_disable(&qdio->request_tasklet); 567 568 adapter->qdio = qdio; 569 return 0; 570 } 571 572 /** 573 * zfcp_qdio_siosl - Trigger logging in FCP channel 574 * @adapter: The zfcp_adapter where to trigger logging 575 * 576 * Call the cio siosl function to trigger hardware logging. This 577 * wrapper function sets a flag to ensure hardware logging is only 578 * triggered once before going through qdio shutdown. 579 * 580 * The triggers are always run from qdio tasklet context, so no 581 * additional synchronization is necessary. 582 */ 583 void zfcp_qdio_siosl(struct zfcp_adapter *adapter) 584 { 585 int rc; 586 587 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) 588 return; 589 590 rc = ccw_device_siosl(adapter->ccw_device); 591 if (!rc) 592 atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 593 &adapter->status); 594 } 595