1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Setup and helper functions to access QDIO. 6 * 7 * Copyright IBM Corp. 2002, 2010 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/slab.h> 14 #include <linux/module.h> 15 #include "zfcp_ext.h" 16 #include "zfcp_qdio.h" 17 18 static bool enable_multibuffer = true; 19 module_param_named(datarouter, enable_multibuffer, bool, 0400); 20 MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); 21 22 static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, 23 unsigned int qdio_err) 24 { 25 struct zfcp_adapter *adapter = qdio->adapter; 26 27 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 28 29 if (qdio_err & QDIO_ERROR_SLSB_STATE) { 30 zfcp_qdio_siosl(adapter); 31 zfcp_erp_adapter_shutdown(adapter, 0, id); 32 return; 33 } 34 zfcp_erp_adapter_reopen(adapter, 35 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 36 ZFCP_STATUS_COMMON_ERP_FAILED, id); 37 } 38 39 static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt) 40 { 41 int i, sbal_idx; 42 43 for (i = first; i < first + cnt; i++) { 44 sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q; 45 memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer)); 46 } 47 } 48 49 /* this needs to be called prior to updating the queue fill level */ 50 static inline void zfcp_qdio_account(struct zfcp_qdio *qdio) 51 { 52 unsigned long long now, span; 53 int used; 54 55 now = get_tod_clock_monotonic(); 56 span = (now - qdio->req_q_time) >> 12; 57 used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free); 58 qdio->req_q_util += used * span; 59 qdio->req_q_time = now; 60 } 61 62 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err, 63 int queue_no, int idx, int count, 64 unsigned long parm) 65 { 66 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 67 68 if (unlikely(qdio_err)) { 69 zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err); 70 return; 71 } 72 73 /* cleanup all SBALs being program-owned now */ 74 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 75 76 spin_lock_irq(&qdio->stat_lock); 77 zfcp_qdio_account(qdio); 78 spin_unlock_irq(&qdio->stat_lock); 79 atomic_add(count, &qdio->req_q_free); 80 wake_up(&qdio->req_q_wq); 81 } 82 83 static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, 84 int queue_no, int idx, int count, 85 unsigned long parm) 86 { 87 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 88 struct zfcp_adapter *adapter = qdio->adapter; 89 int sbal_no, sbal_idx; 90 91 if (unlikely(qdio_err)) { 92 if (zfcp_adapter_multi_buffer_active(adapter)) { 93 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; 94 struct qdio_buffer_element *sbale; 95 u64 req_id; 96 u8 scount; 97 98 memset(pl, 0, 99 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); 100 sbale = qdio->res_q[idx]->element; 101 req_id = (u64) sbale->addr; 102 scount = min(sbale->scount + 1, 103 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); 104 /* incl. signaling SBAL */ 105 106 for (sbal_no = 0; sbal_no < scount; sbal_no++) { 107 sbal_idx = (idx + sbal_no) % 108 QDIO_MAX_BUFFERS_PER_Q; 109 pl[sbal_no] = qdio->res_q[sbal_idx]; 110 } 111 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl); 112 } 113 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 114 return; 115 } 116 117 /* 118 * go through all SBALs from input queue currently 119 * returned by QDIO layer 120 */ 121 for (sbal_no = 0; sbal_no < count; sbal_no++) { 122 sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q; 123 /* go through all SBALEs of SBAL */ 124 zfcp_fsf_reqid_check(qdio, sbal_idx); 125 } 126 127 /* 128 * put SBALs back to response queue 129 */ 130 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count)) 131 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2"); 132 } 133 134 static struct qdio_buffer_element * 135 zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 136 { 137 struct qdio_buffer_element *sbale; 138 139 /* set last entry flag in current SBALE of current SBAL */ 140 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 141 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY; 142 143 /* don't exceed last allowed SBAL */ 144 if (q_req->sbal_last == q_req->sbal_limit) 145 return NULL; 146 147 /* set chaining flag in first SBALE of current SBAL */ 148 sbale = zfcp_qdio_sbale_req(qdio, q_req); 149 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS; 150 151 /* calculate index of next SBAL */ 152 q_req->sbal_last++; 153 q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q; 154 155 /* keep this requests number of SBALs up-to-date */ 156 q_req->sbal_number++; 157 BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ); 158 159 /* start at first SBALE of new SBAL */ 160 q_req->sbale_curr = 0; 161 162 /* set storage-block type for new SBAL */ 163 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 164 sbale->sflags |= q_req->sbtype; 165 166 return sbale; 167 } 168 169 static struct qdio_buffer_element * 170 zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 171 { 172 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) 173 return zfcp_qdio_sbal_chain(qdio, q_req); 174 q_req->sbale_curr++; 175 return zfcp_qdio_sbale_curr(qdio, q_req); 176 } 177 178 /** 179 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list 180 * @qdio: pointer to struct zfcp_qdio 181 * @q_req: pointer to struct zfcp_qdio_req 182 * @sg: scatter-gather list 183 * @max_sbals: upper bound for number of SBALs to be used 184 * Returns: zero or -EINVAL on error 185 */ 186 int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 187 struct scatterlist *sg) 188 { 189 struct qdio_buffer_element *sbale; 190 191 /* set storage-block type for this request */ 192 sbale = zfcp_qdio_sbale_req(qdio, q_req); 193 sbale->sflags |= q_req->sbtype; 194 195 for (; sg; sg = sg_next(sg)) { 196 sbale = zfcp_qdio_sbale_next(qdio, q_req); 197 if (!sbale) { 198 atomic_inc(&qdio->req_q_full); 199 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, 200 q_req->sbal_number); 201 return -EINVAL; 202 } 203 sbale->addr = sg_virt(sg); 204 sbale->length = sg->length; 205 } 206 return 0; 207 } 208 209 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 210 { 211 if (atomic_read(&qdio->req_q_free) || 212 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 213 return 1; 214 return 0; 215 } 216 217 /** 218 * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary 219 * @qdio: pointer to struct zfcp_qdio 220 * 221 * The req_q_lock must be held by the caller of this function, and 222 * this function may only be called from process context; it will 223 * sleep when waiting for a free sbal. 224 * 225 * Returns: 0 on success, -EIO if there is no free sbal after waiting. 226 */ 227 int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) 228 { 229 long ret; 230 231 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, 232 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); 233 234 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 235 return -EIO; 236 237 if (ret > 0) 238 return 0; 239 240 if (!ret) { 241 atomic_inc(&qdio->req_q_full); 242 /* assume hanging outbound queue, try queue recovery */ 243 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 244 } 245 246 return -EIO; 247 } 248 249 /** 250 * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO 251 * @qdio: pointer to struct zfcp_qdio 252 * @q_req: pointer to struct zfcp_qdio_req 253 * Returns: 0 on success, error otherwise 254 */ 255 int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 256 { 257 int retval; 258 u8 sbal_number = q_req->sbal_number; 259 260 spin_lock(&qdio->stat_lock); 261 zfcp_qdio_account(qdio); 262 spin_unlock(&qdio->stat_lock); 263 264 retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, 265 q_req->sbal_first, sbal_number); 266 267 if (unlikely(retval)) { 268 zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first, 269 sbal_number); 270 return retval; 271 } 272 273 /* account for transferred buffers */ 274 atomic_sub(sbal_number, &qdio->req_q_free); 275 qdio->req_q_idx += sbal_number; 276 qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q; 277 278 return 0; 279 } 280 281 282 static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, 283 struct zfcp_qdio *qdio) 284 { 285 memset(id, 0, sizeof(*id)); 286 id->cdev = qdio->adapter->ccw_device; 287 id->q_format = QDIO_ZFCP_QFMT; 288 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 289 ASCEBC(id->adapter_name, 8); 290 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 291 if (enable_multibuffer) 292 id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE; 293 id->no_input_qs = 1; 294 id->no_output_qs = 1; 295 id->input_handler = zfcp_qdio_int_resp; 296 id->output_handler = zfcp_qdio_int_req; 297 id->int_parm = (unsigned long) qdio; 298 id->input_sbal_addr_array = (void **) (qdio->res_q); 299 id->output_sbal_addr_array = (void **) (qdio->req_q); 300 id->scan_threshold = 301 QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2; 302 } 303 304 /** 305 * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data 306 * @adapter: pointer to struct zfcp_adapter 307 * Returns: -ENOMEM on memory allocation error or return value from 308 * qdio_allocate 309 */ 310 static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) 311 { 312 struct qdio_initialize init_data; 313 int ret; 314 315 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 316 if (ret) 317 return -ENOMEM; 318 319 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 320 if (ret) 321 goto free_req_q; 322 323 zfcp_qdio_setup_init_data(&init_data, qdio); 324 init_waitqueue_head(&qdio->req_q_wq); 325 326 ret = qdio_allocate(&init_data); 327 if (ret) 328 goto free_res_q; 329 330 return 0; 331 332 free_res_q: 333 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 334 free_req_q: 335 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 336 return ret; 337 } 338 339 /** 340 * zfcp_close_qdio - close qdio queues for an adapter 341 * @qdio: pointer to structure zfcp_qdio 342 */ 343 void zfcp_qdio_close(struct zfcp_qdio *qdio) 344 { 345 struct zfcp_adapter *adapter = qdio->adapter; 346 int idx, count; 347 348 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 349 return; 350 351 /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */ 352 spin_lock_irq(&qdio->req_q_lock); 353 atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 354 spin_unlock_irq(&qdio->req_q_lock); 355 356 wake_up(&qdio->req_q_wq); 357 358 qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR); 359 360 /* cleanup used outbound sbals */ 361 count = atomic_read(&qdio->req_q_free); 362 if (count < QDIO_MAX_BUFFERS_PER_Q) { 363 idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q; 364 count = QDIO_MAX_BUFFERS_PER_Q - count; 365 zfcp_qdio_zero_sbals(qdio->req_q, idx, count); 366 } 367 qdio->req_q_idx = 0; 368 atomic_set(&qdio->req_q_free, 0); 369 } 370 371 /** 372 * zfcp_qdio_open - prepare and initialize response queue 373 * @qdio: pointer to struct zfcp_qdio 374 * Returns: 0 on success, otherwise -EIO 375 */ 376 int zfcp_qdio_open(struct zfcp_qdio *qdio) 377 { 378 struct qdio_buffer_element *sbale; 379 struct qdio_initialize init_data; 380 struct zfcp_adapter *adapter = qdio->adapter; 381 struct ccw_device *cdev = adapter->ccw_device; 382 struct qdio_ssqd_desc ssqd; 383 int cc; 384 385 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP) 386 return -EIO; 387 388 atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 389 &qdio->adapter->status); 390 391 zfcp_qdio_setup_init_data(&init_data, qdio); 392 393 if (qdio_establish(&init_data)) 394 goto failed_establish; 395 396 if (qdio_get_ssqd_desc(init_data.cdev, &ssqd)) 397 goto failed_qdio; 398 399 if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED) 400 atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 401 &qdio->adapter->status); 402 403 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { 404 atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 405 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; 406 } else { 407 atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); 408 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; 409 } 410 411 qdio->max_sbale_per_req = 412 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal 413 - 2; 414 if (qdio_activate(cdev)) 415 goto failed_qdio; 416 417 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 418 sbale = &(qdio->res_q[cc]->element[0]); 419 sbale->length = 0; 420 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY; 421 sbale->sflags = 0; 422 sbale->addr = NULL; 423 } 424 425 if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q)) 426 goto failed_qdio; 427 428 /* set index of first available SBALS / number of available SBALS */ 429 qdio->req_q_idx = 0; 430 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 431 atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 432 433 if (adapter->scsi_host) { 434 adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; 435 adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8; 436 } 437 438 return 0; 439 440 failed_qdio: 441 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 442 failed_establish: 443 dev_err(&cdev->dev, 444 "Setting up the QDIO connection to the FCP adapter failed\n"); 445 return -EIO; 446 } 447 448 void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 449 { 450 if (!qdio) 451 return; 452 453 if (qdio->adapter->ccw_device) 454 qdio_free(qdio->adapter->ccw_device); 455 456 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q); 457 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q); 458 kfree(qdio); 459 } 460 461 int zfcp_qdio_setup(struct zfcp_adapter *adapter) 462 { 463 struct zfcp_qdio *qdio; 464 465 qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL); 466 if (!qdio) 467 return -ENOMEM; 468 469 qdio->adapter = adapter; 470 471 if (zfcp_qdio_allocate(qdio)) { 472 kfree(qdio); 473 return -ENOMEM; 474 } 475 476 spin_lock_init(&qdio->req_q_lock); 477 spin_lock_init(&qdio->stat_lock); 478 479 adapter->qdio = qdio; 480 return 0; 481 } 482 483 /** 484 * zfcp_qdio_siosl - Trigger logging in FCP channel 485 * @adapter: The zfcp_adapter where to trigger logging 486 * 487 * Call the cio siosl function to trigger hardware logging. This 488 * wrapper function sets a flag to ensure hardware logging is only 489 * triggered once before going through qdio shutdown. 490 * 491 * The triggers are always run from qdio tasklet context, so no 492 * additional synchronization is necessary. 493 */ 494 void zfcp_qdio_siosl(struct zfcp_adapter *adapter) 495 { 496 int rc; 497 498 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED) 499 return; 500 501 rc = ccw_device_siosl(adapter->ccw_device); 502 if (!rc) 503 atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED, 504 &adapter->status); 505 } 506