1 /* 2 * Copyright 2014 Cisco Systems, Inc. All rights reserved. 3 * 4 * This program is free software; you may redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; version 2 of the License. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 15 * SOFTWARE. 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 22 #include <linux/interrupt.h> 23 #include <linux/workqueue.h> 24 #include <linux/spinlock.h> 25 #include <linux/mempool.h> 26 #include <scsi/scsi_tcq.h> 27 28 #include "snic_io.h" 29 #include "snic.h" 30 #include "cq_enet_desc.h" 31 #include "snic_fwint.h" 32 33 static void 34 snic_wq_cmpl_frame_send(struct vnic_wq *wq, 35 struct cq_desc *cq_desc, 36 struct vnic_wq_buf *buf, 37 void *opaque) 38 { 39 struct snic *snic = svnic_dev_priv(wq->vdev); 40 41 SNIC_BUG_ON(buf->os_buf == NULL); 42 43 if (snic_log_level & SNIC_DESC_LOGGING) 44 SNIC_HOST_INFO(snic->shost, 45 "Ack received for snic_host_req %p.\n", 46 buf->os_buf); 47 48 SNIC_TRC(snic->shost->host_no, 0, 0, 49 ((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0, 50 0); 51 52 buf->os_buf = NULL; 53 } 54 55 static int 56 snic_wq_cmpl_handler_cont(struct vnic_dev *vdev, 57 struct cq_desc *cq_desc, 58 u8 type, 59 u16 q_num, 60 u16 cmpl_idx, 61 void *opaque) 62 { 63 struct snic *snic = svnic_dev_priv(vdev); 64 unsigned long flags; 65 66 SNIC_BUG_ON(q_num != 0); 67 68 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 69 svnic_wq_service(&snic->wq[q_num], 70 cq_desc, 71 cmpl_idx, 72 snic_wq_cmpl_frame_send, 73 NULL); 74 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 75 76 return 0; 77 } /* end of snic_cmpl_handler_cont */ 78 79 int 80 snic_wq_cmpl_handler(struct snic *snic, int work_to_do) 81 { 82 unsigned int work_done = 0; 83 unsigned int i; 84 85 snic->s_stats.misc.last_ack_time = jiffies; 86 for (i = 0; i < snic->wq_count; i++) { 87 work_done += svnic_cq_service(&snic->cq[i], 88 work_to_do, 89 snic_wq_cmpl_handler_cont, 90 NULL); 91 } 92 93 return work_done; 94 } /* end of snic_wq_cmpl_handler */ 95 96 void 97 snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 98 { 99 100 struct snic_host_req *req = buf->os_buf; 101 struct snic *snic = svnic_dev_priv(wq->vdev); 102 struct snic_req_info *rqi = NULL; 103 unsigned long flags; 104 105 dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len, 106 DMA_TO_DEVICE); 107 108 rqi = req_to_rqi(req); 109 spin_lock_irqsave(&snic->spl_cmd_lock, flags); 110 if (list_empty(&rqi->list)) { 111 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); 112 goto end; 113 } 114 115 SNIC_BUG_ON(rqi->list.next == NULL); /* if not added to spl_cmd_list */ 116 list_del_init(&rqi->list); 117 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); 118 119 if (rqi->sge_va) { 120 snic_pci_unmap_rsp_buf(snic, rqi); 121 kfree((void *)rqi->sge_va); 122 rqi->sge_va = 0; 123 } 124 snic_req_free(snic, rqi); 125 SNIC_HOST_INFO(snic->shost, "snic_free_wq_buf .. freed.\n"); 126 127 end: 128 return; 129 } 130 131 /* Criteria to select work queue in multi queue mode */ 132 static int 133 snic_select_wq(struct snic *snic) 134 { 135 /* No multi queue support for now */ 136 BUILD_BUG_ON(SNIC_WQ_MAX > 1); 137 138 return 0; 139 } 140 141 static int 142 snic_wqdesc_avail(struct snic *snic, int q_num, int req_type) 143 { 144 int nr_wqdesc = snic->config.wq_enet_desc_count; 145 146 if (q_num > 0) { 147 /* 148 * Multi Queue case, additional care is required. 149 * Per WQ active requests need to be maintained. 150 */ 151 SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n"); 152 SNIC_BUG_ON(q_num > 0); 153 154 return -1; 155 } 156 157 nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs); 158 159 return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1); 160 } 161 162 int 163 snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len) 164 { 165 dma_addr_t pa = 0; 166 unsigned long flags; 167 struct snic_fw_stats *fwstats = &snic->s_stats.fw; 168 struct snic_host_req *req = (struct snic_host_req *) os_buf; 169 long act_reqs; 170 long desc_avail = 0; 171 int q_num = 0; 172 173 snic_print_desc(__func__, os_buf, len); 174 175 /* Map request buffer */ 176 pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE); 177 if (dma_mapping_error(&snic->pdev->dev, pa)) { 178 SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n"); 179 180 return -ENOMEM; 181 } 182 183 req->req_pa = (ulong)pa; 184 185 q_num = snic_select_wq(snic); 186 187 spin_lock_irqsave(&snic->wq_lock[q_num], flags); 188 desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type); 189 if (desc_avail <= 0) { 190 dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE); 191 req->req_pa = 0; 192 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 193 atomic64_inc(&snic->s_stats.misc.wq_alloc_fail); 194 SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no); 195 196 return -ENOMEM; 197 } 198 199 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); 200 /* 201 * Update stats 202 * note: when multi queue enabled, fw actv_reqs should be per queue. 203 */ 204 act_reqs = atomic64_inc_return(&fwstats->actv_reqs); 205 spin_unlock_irqrestore(&snic->wq_lock[q_num], flags); 206 207 if (act_reqs > atomic64_read(&fwstats->max_actv_reqs)) 208 atomic64_set(&fwstats->max_actv_reqs, act_reqs); 209 210 return 0; 211 } /* end of snic_queue_wq_desc() */ 212 213 /* 214 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list. 215 * Purpose : Used during driver unload to clean up the requests. 216 */ 217 void 218 snic_handle_untagged_req(struct snic *snic, struct snic_req_info *rqi) 219 { 220 unsigned long flags; 221 222 INIT_LIST_HEAD(&rqi->list); 223 224 spin_lock_irqsave(&snic->spl_cmd_lock, flags); 225 list_add_tail(&rqi->list, &snic->spl_cmd_list); 226 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); 227 } 228 229 /* 230 * snic_req_init: 231 * Allocates snic_req_info + snic_host_req + sgl data, and initializes. 232 */ 233 struct snic_req_info * 234 snic_req_init(struct snic *snic, int sg_cnt) 235 { 236 u8 typ; 237 struct snic_req_info *rqi = NULL; 238 239 typ = (sg_cnt <= SNIC_REQ_CACHE_DFLT_SGL) ? 240 SNIC_REQ_CACHE_DFLT_SGL : SNIC_REQ_CACHE_MAX_SGL; 241 242 rqi = mempool_alloc(snic->req_pool[typ], GFP_ATOMIC); 243 if (!rqi) { 244 atomic64_inc(&snic->s_stats.io.alloc_fail); 245 SNIC_HOST_ERR(snic->shost, 246 "Failed to allocate memory from snic req pool id = %d\n", 247 typ); 248 return rqi; 249 } 250 251 memset(rqi, 0, sizeof(*rqi)); 252 rqi->rq_pool_type = typ; 253 rqi->start_time = jiffies; 254 rqi->req = (struct snic_host_req *) (rqi + 1); 255 rqi->req_len = sizeof(struct snic_host_req); 256 rqi->snic = snic; 257 258 rqi->req = (struct snic_host_req *)(rqi + 1); 259 260 if (sg_cnt == 0) 261 goto end; 262 263 rqi->req_len += (sg_cnt * sizeof(struct snic_sg_desc)); 264 265 if (sg_cnt > atomic64_read(&snic->s_stats.io.max_sgl)) 266 atomic64_set(&snic->s_stats.io.max_sgl, sg_cnt); 267 268 SNIC_BUG_ON(sg_cnt > SNIC_MAX_SG_DESC_CNT); 269 atomic64_inc(&snic->s_stats.io.sgl_cnt[sg_cnt - 1]); 270 271 end: 272 memset(rqi->req, 0, rqi->req_len); 273 274 /* pre initialization of init_ctx to support req_to_rqi */ 275 rqi->req->hdr.init_ctx = (ulong) rqi; 276 277 SNIC_SCSI_DBG(snic->shost, "Req_alloc:rqi = %p allocatd.\n", rqi); 278 279 return rqi; 280 } /* end of snic_req_init */ 281 282 /* 283 * snic_abort_req_init : Inits abort request. 284 */ 285 struct snic_host_req * 286 snic_abort_req_init(struct snic *snic, struct snic_req_info *rqi) 287 { 288 struct snic_host_req *req = NULL; 289 290 SNIC_BUG_ON(!rqi); 291 292 /* If abort to be issued second time, then reuse */ 293 if (rqi->abort_req) 294 return rqi->abort_req; 295 296 297 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); 298 if (!req) { 299 SNIC_HOST_ERR(snic->shost, "abts:Failed to alloc tm req.\n"); 300 WARN_ON_ONCE(1); 301 302 return NULL; 303 } 304 305 rqi->abort_req = req; 306 memset(req, 0, sizeof(struct snic_host_req)); 307 /* pre initialization of init_ctx to support req_to_rqi */ 308 req->hdr.init_ctx = (ulong) rqi; 309 310 return req; 311 } /* end of snic_abort_req_init */ 312 313 /* 314 * snic_dr_req_init : Inits device reset req 315 */ 316 struct snic_host_req * 317 snic_dr_req_init(struct snic *snic, struct snic_req_info *rqi) 318 { 319 struct snic_host_req *req = NULL; 320 321 SNIC_BUG_ON(!rqi); 322 323 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); 324 if (!req) { 325 SNIC_HOST_ERR(snic->shost, "dr:Failed to alloc tm req.\n"); 326 WARN_ON_ONCE(1); 327 328 return NULL; 329 } 330 331 SNIC_BUG_ON(rqi->dr_req != NULL); 332 rqi->dr_req = req; 333 memset(req, 0, sizeof(struct snic_host_req)); 334 /* pre initialization of init_ctx to support req_to_rqi */ 335 req->hdr.init_ctx = (ulong) rqi; 336 337 return req; 338 } /* end of snic_dr_req_init */ 339 340 /* frees snic_req_info and snic_host_req */ 341 void 342 snic_req_free(struct snic *snic, struct snic_req_info *rqi) 343 { 344 SNIC_BUG_ON(rqi->req == rqi->abort_req); 345 SNIC_BUG_ON(rqi->req == rqi->dr_req); 346 SNIC_BUG_ON(rqi->sge_va != 0); 347 348 SNIC_SCSI_DBG(snic->shost, 349 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n", 350 rqi, rqi->req, rqi->abort_req, rqi->dr_req); 351 352 if (rqi->abort_req) { 353 if (rqi->abort_req->req_pa) 354 dma_unmap_single(&snic->pdev->dev, 355 rqi->abort_req->req_pa, 356 sizeof(struct snic_host_req), 357 DMA_TO_DEVICE); 358 359 mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 360 } 361 362 if (rqi->dr_req) { 363 if (rqi->dr_req->req_pa) 364 dma_unmap_single(&snic->pdev->dev, 365 rqi->dr_req->req_pa, 366 sizeof(struct snic_host_req), 367 DMA_TO_DEVICE); 368 369 mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]); 370 } 371 372 if (rqi->req->req_pa) 373 dma_unmap_single(&snic->pdev->dev, 374 rqi->req->req_pa, 375 rqi->req_len, 376 DMA_TO_DEVICE); 377 378 mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]); 379 } 380 381 void 382 snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi) 383 { 384 struct snic_sg_desc *sgd; 385 386 sgd = req_to_sgl(rqi_to_req(rqi)); 387 SNIC_BUG_ON(sgd[0].addr == 0); 388 dma_unmap_single(&snic->pdev->dev, 389 le64_to_cpu(sgd[0].addr), 390 le32_to_cpu(sgd[0].len), 391 DMA_FROM_DEVICE); 392 } 393 394 /* 395 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them. 396 */ 397 void 398 snic_free_all_untagged_reqs(struct snic *snic) 399 { 400 struct snic_req_info *rqi; 401 struct list_head *cur, *nxt; 402 unsigned long flags; 403 404 spin_lock_irqsave(&snic->spl_cmd_lock, flags); 405 list_for_each_safe(cur, nxt, &snic->spl_cmd_list) { 406 rqi = list_entry(cur, struct snic_req_info, list); 407 list_del_init(&rqi->list); 408 if (rqi->sge_va) { 409 snic_pci_unmap_rsp_buf(snic, rqi); 410 kfree((void *)rqi->sge_va); 411 rqi->sge_va = 0; 412 } 413 414 snic_req_free(snic, rqi); 415 } 416 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); 417 } 418 419 /* 420 * snic_release_untagged_req : Unlinks the untagged req and frees it. 421 */ 422 void 423 snic_release_untagged_req(struct snic *snic, struct snic_req_info *rqi) 424 { 425 unsigned long flags; 426 427 spin_lock_irqsave(&snic->snic_lock, flags); 428 if (snic->in_remove) { 429 spin_unlock_irqrestore(&snic->snic_lock, flags); 430 goto end; 431 } 432 spin_unlock_irqrestore(&snic->snic_lock, flags); 433 434 spin_lock_irqsave(&snic->spl_cmd_lock, flags); 435 if (list_empty(&rqi->list)) { 436 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); 437 goto end; 438 } 439 list_del_init(&rqi->list); 440 spin_unlock_irqrestore(&snic->spl_cmd_lock, flags); 441 snic_req_free(snic, rqi); 442 443 end: 444 return; 445 } 446 447 /* dump buf in hex fmt */ 448 void 449 snic_hex_dump(char *pfx, char *data, int len) 450 { 451 SNIC_INFO("%s Dumping Data of Len = %d\n", pfx, len); 452 print_hex_dump_bytes(pfx, DUMP_PREFIX_NONE, data, len); 453 } 454 455 #define LINE_BUFSZ 128 /* for snic_print_desc fn */ 456 static void 457 snic_dump_desc(const char *fn, char *os_buf, int len) 458 { 459 struct snic_host_req *req = (struct snic_host_req *) os_buf; 460 struct snic_fw_req *fwreq = (struct snic_fw_req *) os_buf; 461 struct snic_req_info *rqi = NULL; 462 char line[LINE_BUFSZ] = { '\0' }; 463 char *cmd_str = NULL; 464 465 if (req->hdr.type >= SNIC_RSP_REPORT_TGTS_CMPL) 466 rqi = (struct snic_req_info *) fwreq->hdr.init_ctx; 467 else 468 rqi = (struct snic_req_info *) req->hdr.init_ctx; 469 470 SNIC_BUG_ON(rqi == NULL || rqi->req == NULL); 471 switch (req->hdr.type) { 472 case SNIC_REQ_REPORT_TGTS: 473 cmd_str = "report-tgt : "; 474 snprintf(line, LINE_BUFSZ, "SNIC_REQ_REPORT_TGTS :"); 475 break; 476 477 case SNIC_REQ_ICMND: 478 cmd_str = "icmnd : "; 479 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ICMND : 0x%x :", 480 req->u.icmnd.cdb[0]); 481 break; 482 483 case SNIC_REQ_ITMF: 484 cmd_str = "itmf : "; 485 snprintf(line, LINE_BUFSZ, "SNIC_REQ_ITMF :"); 486 break; 487 488 case SNIC_REQ_HBA_RESET: 489 cmd_str = "hba reset :"; 490 snprintf(line, LINE_BUFSZ, "SNIC_REQ_HBA_RESET :"); 491 break; 492 493 case SNIC_REQ_EXCH_VER: 494 cmd_str = "exch ver : "; 495 snprintf(line, LINE_BUFSZ, "SNIC_REQ_EXCH_VER :"); 496 break; 497 498 case SNIC_REQ_TGT_INFO: 499 cmd_str = "tgt info : "; 500 break; 501 502 case SNIC_RSP_REPORT_TGTS_CMPL: 503 cmd_str = "report tgt cmpl : "; 504 snprintf(line, LINE_BUFSZ, "SNIC_RSP_REPORT_TGTS_CMPL :"); 505 break; 506 507 case SNIC_RSP_ICMND_CMPL: 508 cmd_str = "icmnd_cmpl : "; 509 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ICMND_CMPL : 0x%x :", 510 rqi->req->u.icmnd.cdb[0]); 511 break; 512 513 case SNIC_RSP_ITMF_CMPL: 514 cmd_str = "itmf_cmpl : "; 515 snprintf(line, LINE_BUFSZ, "SNIC_RSP_ITMF_CMPL :"); 516 break; 517 518 case SNIC_RSP_HBA_RESET_CMPL: 519 cmd_str = "hba_reset_cmpl : "; 520 snprintf(line, LINE_BUFSZ, "SNIC_RSP_HBA_RESET_CMPL :"); 521 break; 522 523 case SNIC_RSP_EXCH_VER_CMPL: 524 cmd_str = "exch_ver_cmpl : "; 525 snprintf(line, LINE_BUFSZ, "SNIC_RSP_EXCH_VER_CMPL :"); 526 break; 527 528 case SNIC_MSG_ACK: 529 cmd_str = "msg ack : "; 530 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ACK :"); 531 break; 532 533 case SNIC_MSG_ASYNC_EVNOTIFY: 534 cmd_str = "async notify : "; 535 snprintf(line, LINE_BUFSZ, "SNIC_MSG_ASYNC_EVNOTIFY :"); 536 break; 537 538 default: 539 cmd_str = "unknown : "; 540 SNIC_BUG_ON(1); 541 break; 542 } 543 544 SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n", 545 fn, line, req->hdr.cmnd_id, req->hdr.sg_cnt, req->hdr.status, 546 req->hdr.init_ctx); 547 548 /* Enable it, to dump byte stream */ 549 if (snic_log_level & 0x20) 550 snic_hex_dump(cmd_str, os_buf, len); 551 } /* end of __snic_print_desc */ 552 553 void 554 snic_print_desc(const char *fn, char *os_buf, int len) 555 { 556 if (snic_log_level & SNIC_DESC_LOGGING) 557 snic_dump_desc(fn, os_buf, len); 558 } 559 560 void 561 snic_calc_io_process_time(struct snic *snic, struct snic_req_info *rqi) 562 { 563 u64 duration; 564 565 duration = jiffies - rqi->start_time; 566 567 if (duration > atomic64_read(&snic->s_stats.io.max_time)) 568 atomic64_set(&snic->s_stats.io.max_time, duration); 569 } 570