1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ 3 4 #include <linux/kernel.h> 5 #include <linux/nospec.h> 6 #include "cc_driver.h" 7 #include "cc_buffer_mgr.h" 8 #include "cc_request_mgr.h" 9 #include "cc_pm.h" 10 11 #define CC_MAX_POLL_ITER 10 12 /* The highest descriptor count in used */ 13 #define CC_MAX_DESC_SEQ_LEN 23 14 15 struct cc_req_mgr_handle { 16 /* Request manager resources */ 17 unsigned int hw_queue_size; /* HW capability */ 18 unsigned int min_free_hw_slots; 19 unsigned int max_used_sw_slots; 20 struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; 21 u32 req_queue_head; 22 u32 req_queue_tail; 23 u32 axi_completed; 24 u32 q_free_slots; 25 /* This lock protects access to HW register 26 * that must be single request at a time 27 */ 28 spinlock_t hw_lock; 29 struct cc_hw_desc compl_desc; 30 u8 *dummy_comp_buff; 31 dma_addr_t dummy_comp_buff_dma; 32 33 /* backlog queue */ 34 struct list_head backlog; 35 unsigned int bl_len; 36 spinlock_t bl_lock; /* protect backlog queue */ 37 38 #ifdef COMP_IN_WQ 39 struct workqueue_struct *workq; 40 struct delayed_work compwork; 41 #else 42 struct tasklet_struct comptask; 43 #endif 44 bool is_runtime_suspended; 45 }; 46 47 struct cc_bl_item { 48 struct cc_crypto_req creq; 49 struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN]; 50 unsigned int len; 51 struct list_head list; 52 bool notif; 53 }; 54 55 static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = { 56 { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT), 57 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT), 58 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT), 59 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT), 60 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT), 61 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT), 62 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT), 63 BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) }, 64 { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT), 65 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT), 66 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT), 67 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT), 68 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT), 69 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT), 70 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT), 71 BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) } 72 }; 73 74 static void comp_handler(unsigned long devarg); 75 #ifdef COMP_IN_WQ 76 static void comp_work_handler(struct work_struct *work); 77 #endif 78 79 static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot) 80 { 81 alg = array_index_nospec(alg, CC_CPP_NUM_ALGS); 82 slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS); 83 84 return cc_cpp_int_masks[alg][slot]; 85 } 86 87 void cc_req_mgr_fini(struct cc_drvdata *drvdata) 88 { 89 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 90 struct device *dev = drvdata_to_dev(drvdata); 91 92 if (!req_mgr_h) 93 return; /* Not allocated */ 94 95 if (req_mgr_h->dummy_comp_buff_dma) { 96 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff, 97 req_mgr_h->dummy_comp_buff_dma); 98 } 99 100 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size - 101 req_mgr_h->min_free_hw_slots)); 102 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots); 103 104 #ifdef COMP_IN_WQ 105 flush_workqueue(req_mgr_h->workq); 106 destroy_workqueue(req_mgr_h->workq); 107 #else 108 /* Kill tasklet */ 109 tasklet_kill(&req_mgr_h->comptask); 110 #endif 111 kzfree(req_mgr_h); 112 drvdata->request_mgr_handle = NULL; 113 } 114 115 int cc_req_mgr_init(struct cc_drvdata *drvdata) 116 { 117 struct cc_req_mgr_handle *req_mgr_h; 118 struct device *dev = drvdata_to_dev(drvdata); 119 int rc = 0; 120 121 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL); 122 if (!req_mgr_h) { 123 rc = -ENOMEM; 124 goto req_mgr_init_err; 125 } 126 127 drvdata->request_mgr_handle = req_mgr_h; 128 129 spin_lock_init(&req_mgr_h->hw_lock); 130 spin_lock_init(&req_mgr_h->bl_lock); 131 INIT_LIST_HEAD(&req_mgr_h->backlog); 132 133 #ifdef COMP_IN_WQ 134 dev_dbg(dev, "Initializing completion workqueue\n"); 135 req_mgr_h->workq = create_singlethread_workqueue("ccree"); 136 if (!req_mgr_h->workq) { 137 dev_err(dev, "Failed creating work queue\n"); 138 rc = -ENOMEM; 139 goto req_mgr_init_err; 140 } 141 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); 142 #else 143 dev_dbg(dev, "Initializing completion tasklet\n"); 144 tasklet_init(&req_mgr_h->comptask, comp_handler, 145 (unsigned long)drvdata); 146 #endif 147 req_mgr_h->hw_queue_size = cc_ioread(drvdata, 148 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE)); 149 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); 150 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { 151 dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n", 152 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); 153 rc = -ENOMEM; 154 goto req_mgr_init_err; 155 } 156 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; 157 req_mgr_h->max_used_sw_slots = 0; 158 159 /* Allocate DMA word for "dummy" completion descriptor use */ 160 req_mgr_h->dummy_comp_buff = 161 dma_alloc_coherent(dev, sizeof(u32), 162 &req_mgr_h->dummy_comp_buff_dma, 163 GFP_KERNEL); 164 if (!req_mgr_h->dummy_comp_buff) { 165 dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n", 166 sizeof(u32)); 167 rc = -ENOMEM; 168 goto req_mgr_init_err; 169 } 170 171 /* Init. "dummy" completion descriptor */ 172 hw_desc_init(&req_mgr_h->compl_desc); 173 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32)); 174 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma, 175 sizeof(u32), NS_BIT, 1); 176 set_flow_mode(&req_mgr_h->compl_desc, BYPASS); 177 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc); 178 179 return 0; 180 181 req_mgr_init_err: 182 cc_req_mgr_fini(drvdata); 183 return rc; 184 } 185 186 static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[], 187 unsigned int seq_len) 188 { 189 int i, w; 190 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0); 191 struct device *dev = drvdata_to_dev(drvdata); 192 193 /* 194 * We do indeed write all 6 command words to the same 195 * register. The HW supports this. 196 */ 197 198 for (i = 0; i < seq_len; i++) { 199 for (w = 0; w <= 5; w++) 200 writel_relaxed(seq[i].word[w], reg); 201 202 if (cc_dump_desc) 203 dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", 204 i, seq[i].word[0], seq[i].word[1], 205 seq[i].word[2], seq[i].word[3], 206 seq[i].word[4], seq[i].word[5]); 207 } 208 } 209 210 /*! 211 * Completion will take place if and only if user requested completion 212 * by cc_send_sync_request(). 213 * 214 * \param dev 215 * \param dx_compl_h The completion event to signal 216 */ 217 static void request_mgr_complete(struct device *dev, void *dx_compl_h, 218 int dummy) 219 { 220 struct completion *this_compl = dx_compl_h; 221 222 complete(this_compl); 223 } 224 225 static int cc_queues_status(struct cc_drvdata *drvdata, 226 struct cc_req_mgr_handle *req_mgr_h, 227 unsigned int total_seq_len) 228 { 229 unsigned long poll_queue; 230 struct device *dev = drvdata_to_dev(drvdata); 231 232 /* SW queue is checked only once as it will not 233 * be changed during the poll because the spinlock_bh 234 * is held by the thread 235 */ 236 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == 237 req_mgr_h->req_queue_tail) { 238 dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", 239 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); 240 return -ENOSPC; 241 } 242 243 if (req_mgr_h->q_free_slots >= total_seq_len) 244 return 0; 245 246 /* Wait for space in HW queue. Poll constant num of iterations. */ 247 for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) { 248 req_mgr_h->q_free_slots = 249 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); 250 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) 251 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; 252 253 if (req_mgr_h->q_free_slots >= total_seq_len) { 254 /* If there is enough place return */ 255 return 0; 256 } 257 258 dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n", 259 req_mgr_h->q_free_slots, total_seq_len); 260 } 261 /* No room in the HW queue try again later */ 262 dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n", 263 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE, 264 req_mgr_h->q_free_slots, total_seq_len); 265 return -ENOSPC; 266 } 267 268 /*! 269 * Enqueue caller request to crypto hardware. 270 * Need to be called with HW lock held and PM running 271 * 272 * \param drvdata 273 * \param cc_req The request to enqueue 274 * \param desc The crypto sequence 275 * \param len The crypto sequence length 276 * \param add_comp If "true": add an artificial dout DMA to mark completion 277 * 278 * \return int Returns -EINPROGRESS or error code 279 */ 280 static int cc_do_send_request(struct cc_drvdata *drvdata, 281 struct cc_crypto_req *cc_req, 282 struct cc_hw_desc *desc, unsigned int len, 283 bool add_comp) 284 { 285 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 286 unsigned int used_sw_slots; 287 unsigned int total_seq_len = len; /*initial sequence length*/ 288 struct device *dev = drvdata_to_dev(drvdata); 289 290 used_sw_slots = ((req_mgr_h->req_queue_head - 291 req_mgr_h->req_queue_tail) & 292 (MAX_REQUEST_QUEUE_SIZE - 1)); 293 if (used_sw_slots > req_mgr_h->max_used_sw_slots) 294 req_mgr_h->max_used_sw_slots = used_sw_slots; 295 296 /* Enqueue request - must be locked with HW lock*/ 297 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; 298 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & 299 (MAX_REQUEST_QUEUE_SIZE - 1); 300 /* TODO: Use circ_buf.h ? */ 301 302 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head); 303 304 /* 305 * We are about to push command to the HW via the command registers 306 * that may reference host memory. We need to issue a memory barrier 307 * to make sure there are no outstanding memory writes 308 */ 309 wmb(); 310 311 /* STAT_PHASE_4: Push sequence */ 312 313 enqueue_seq(drvdata, desc, len); 314 315 if (add_comp) { 316 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1); 317 total_seq_len++; 318 } 319 320 if (req_mgr_h->q_free_slots < total_seq_len) { 321 /* This situation should never occur. Maybe indicating problem 322 * with resuming power. Set the free slot count to 0 and hope 323 * for the best. 324 */ 325 dev_err(dev, "HW free slot count mismatch."); 326 req_mgr_h->q_free_slots = 0; 327 } else { 328 /* Update the free slots in HW queue */ 329 req_mgr_h->q_free_slots -= total_seq_len; 330 } 331 332 /* Operation still in process */ 333 return -EINPROGRESS; 334 } 335 336 static void cc_enqueue_backlog(struct cc_drvdata *drvdata, 337 struct cc_bl_item *bli) 338 { 339 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 340 struct device *dev = drvdata_to_dev(drvdata); 341 342 spin_lock_bh(&mgr->bl_lock); 343 list_add_tail(&bli->list, &mgr->backlog); 344 ++mgr->bl_len; 345 dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len); 346 spin_unlock_bh(&mgr->bl_lock); 347 tasklet_schedule(&mgr->comptask); 348 } 349 350 static void cc_proc_backlog(struct cc_drvdata *drvdata) 351 { 352 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 353 struct cc_bl_item *bli; 354 struct cc_crypto_req *creq; 355 void *req; 356 struct device *dev = drvdata_to_dev(drvdata); 357 int rc; 358 359 spin_lock(&mgr->bl_lock); 360 361 while (mgr->bl_len) { 362 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); 363 dev_dbg(dev, "---bl len: %d\n", mgr->bl_len); 364 365 spin_unlock(&mgr->bl_lock); 366 367 368 creq = &bli->creq; 369 req = creq->user_arg; 370 371 /* 372 * Notify the request we're moving out of the backlog 373 * but only if we haven't done so already. 374 */ 375 if (!bli->notif) { 376 creq->user_cb(dev, req, -EINPROGRESS); 377 bli->notif = true; 378 } 379 380 spin_lock(&mgr->hw_lock); 381 382 rc = cc_queues_status(drvdata, mgr, bli->len); 383 if (rc) { 384 /* 385 * There is still not room in the FIFO for 386 * this request. Bail out. We'll return here 387 * on the next completion irq. 388 */ 389 spin_unlock(&mgr->hw_lock); 390 return; 391 } 392 393 rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, 394 bli->len, false); 395 396 spin_unlock(&mgr->hw_lock); 397 398 if (rc != -EINPROGRESS) { 399 cc_pm_put_suspend(dev); 400 creq->user_cb(dev, req, rc); 401 } 402 403 /* Remove ourselves from the backlog list */ 404 spin_lock(&mgr->bl_lock); 405 list_del(&bli->list); 406 --mgr->bl_len; 407 kfree(bli); 408 } 409 410 spin_unlock(&mgr->bl_lock); 411 } 412 413 int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, 414 struct cc_hw_desc *desc, unsigned int len, 415 struct crypto_async_request *req) 416 { 417 int rc; 418 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 419 struct device *dev = drvdata_to_dev(drvdata); 420 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; 421 gfp_t flags = cc_gfp_flags(req); 422 struct cc_bl_item *bli; 423 424 rc = cc_pm_get(dev); 425 if (rc) { 426 dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); 427 return rc; 428 } 429 430 spin_lock_bh(&mgr->hw_lock); 431 rc = cc_queues_status(drvdata, mgr, len); 432 433 #ifdef CC_DEBUG_FORCE_BACKLOG 434 if (backlog_ok) 435 rc = -ENOSPC; 436 #endif /* CC_DEBUG_FORCE_BACKLOG */ 437 438 if (rc == -ENOSPC && backlog_ok) { 439 spin_unlock_bh(&mgr->hw_lock); 440 441 bli = kmalloc(sizeof(*bli), flags); 442 if (!bli) { 443 cc_pm_put_suspend(dev); 444 return -ENOMEM; 445 } 446 447 memcpy(&bli->creq, cc_req, sizeof(*cc_req)); 448 memcpy(&bli->desc, desc, len * sizeof(*desc)); 449 bli->len = len; 450 bli->notif = false; 451 cc_enqueue_backlog(drvdata, bli); 452 return -EBUSY; 453 } 454 455 if (!rc) 456 rc = cc_do_send_request(drvdata, cc_req, desc, len, false); 457 458 spin_unlock_bh(&mgr->hw_lock); 459 return rc; 460 } 461 462 int cc_send_sync_request(struct cc_drvdata *drvdata, 463 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, 464 unsigned int len) 465 { 466 int rc; 467 struct device *dev = drvdata_to_dev(drvdata); 468 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; 469 470 init_completion(&cc_req->seq_compl); 471 cc_req->user_cb = request_mgr_complete; 472 cc_req->user_arg = &cc_req->seq_compl; 473 474 rc = cc_pm_get(dev); 475 if (rc) { 476 dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); 477 return rc; 478 } 479 480 while (true) { 481 spin_lock_bh(&mgr->hw_lock); 482 rc = cc_queues_status(drvdata, mgr, len + 1); 483 484 if (!rc) 485 break; 486 487 spin_unlock_bh(&mgr->hw_lock); 488 if (rc != -EAGAIN) { 489 cc_pm_put_suspend(dev); 490 return rc; 491 } 492 wait_for_completion_interruptible(&drvdata->hw_queue_avail); 493 reinit_completion(&drvdata->hw_queue_avail); 494 } 495 496 rc = cc_do_send_request(drvdata, cc_req, desc, len, true); 497 spin_unlock_bh(&mgr->hw_lock); 498 499 if (rc != -EINPROGRESS) { 500 cc_pm_put_suspend(dev); 501 return rc; 502 } 503 504 wait_for_completion(&cc_req->seq_compl); 505 return 0; 506 } 507 508 /*! 509 * Enqueue caller request to crypto hardware during init process. 510 * assume this function is not called in middle of a flow, 511 * since we set QUEUE_LAST_IND flag in the last descriptor. 512 * 513 * \param drvdata 514 * \param desc The crypto sequence 515 * \param len The crypto sequence length 516 * 517 * \return int Returns "0" upon success 518 */ 519 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, 520 unsigned int len) 521 { 522 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; 523 unsigned int total_seq_len = len; /*initial sequence length*/ 524 int rc = 0; 525 526 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. 527 */ 528 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len); 529 if (rc) 530 return rc; 531 532 set_queue_last_ind(drvdata, &desc[(len - 1)]); 533 534 /* 535 * We are about to push command to the HW via the command registers 536 * that may reference host memory. We need to issue a memory barrier 537 * to make sure there are no outstanding memory writes 538 */ 539 wmb(); 540 enqueue_seq(drvdata, desc, len); 541 542 /* Update the free slots in HW queue */ 543 req_mgr_h->q_free_slots = 544 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); 545 546 return 0; 547 } 548 549 void complete_request(struct cc_drvdata *drvdata) 550 { 551 struct cc_req_mgr_handle *request_mgr_handle = 552 drvdata->request_mgr_handle; 553 554 complete(&drvdata->hw_queue_avail); 555 #ifdef COMP_IN_WQ 556 queue_delayed_work(request_mgr_handle->workq, 557 &request_mgr_handle->compwork, 0); 558 #else 559 tasklet_schedule(&request_mgr_handle->comptask); 560 #endif 561 } 562 563 #ifdef COMP_IN_WQ 564 static void comp_work_handler(struct work_struct *work) 565 { 566 struct cc_drvdata *drvdata = 567 container_of(work, struct cc_drvdata, compwork.work); 568 569 comp_handler((unsigned long)drvdata); 570 } 571 #endif 572 573 static void proc_completions(struct cc_drvdata *drvdata) 574 { 575 struct cc_crypto_req *cc_req; 576 struct device *dev = drvdata_to_dev(drvdata); 577 struct cc_req_mgr_handle *request_mgr_handle = 578 drvdata->request_mgr_handle; 579 unsigned int *tail = &request_mgr_handle->req_queue_tail; 580 unsigned int *head = &request_mgr_handle->req_queue_head; 581 int rc; 582 u32 mask; 583 584 while (request_mgr_handle->axi_completed) { 585 request_mgr_handle->axi_completed--; 586 587 /* Dequeue request */ 588 if (*head == *tail) { 589 /* We are supposed to handle a completion but our 590 * queue is empty. This is not normal. Return and 591 * hope for the best. 592 */ 593 dev_err(dev, "Request queue is empty head == tail %u\n", 594 *head); 595 break; 596 } 597 598 cc_req = &request_mgr_handle->req_queue[*tail]; 599 600 if (cc_req->cpp.is_cpp) { 601 602 dev_dbg(dev, "CPP request completion slot: %d alg:%d\n", 603 cc_req->cpp.slot, cc_req->cpp.alg); 604 mask = cc_cpp_int_mask(cc_req->cpp.alg, 605 cc_req->cpp.slot); 606 rc = (drvdata->irq & mask ? -EPERM : 0); 607 dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask, 608 drvdata->irq, rc); 609 } else { 610 dev_dbg(dev, "None CPP request completion\n"); 611 rc = 0; 612 } 613 614 if (cc_req->user_cb) 615 cc_req->user_cb(dev, cc_req->user_arg, rc); 616 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); 617 dev_dbg(dev, "Dequeue request tail=%u\n", *tail); 618 dev_dbg(dev, "Request completed. axi_completed=%d\n", 619 request_mgr_handle->axi_completed); 620 cc_pm_put_suspend(dev); 621 } 622 } 623 624 static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata) 625 { 626 return FIELD_GET(AXIM_MON_COMP_VALUE, 627 cc_ioread(drvdata, drvdata->axim_mon_offset)); 628 } 629 630 /* Deferred service handler, run as interrupt-fired tasklet */ 631 static void comp_handler(unsigned long devarg) 632 { 633 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; 634 struct cc_req_mgr_handle *request_mgr_handle = 635 drvdata->request_mgr_handle; 636 struct device *dev = drvdata_to_dev(drvdata); 637 u32 irq; 638 639 dev_dbg(dev, "Completion handler called!\n"); 640 irq = (drvdata->irq & drvdata->comp_mask); 641 642 /* To avoid the interrupt from firing as we unmask it, 643 * we clear it now 644 */ 645 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); 646 647 /* Avoid race with above clear: Test completion counter once more */ 648 649 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); 650 651 dev_dbg(dev, "AXI completion after updated: %d\n", 652 request_mgr_handle->axi_completed); 653 654 while (request_mgr_handle->axi_completed) { 655 do { 656 drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR)); 657 irq = (drvdata->irq & drvdata->comp_mask); 658 proc_completions(drvdata); 659 660 /* At this point (after proc_completions()), 661 * request_mgr_handle->axi_completed is 0. 662 */ 663 request_mgr_handle->axi_completed += 664 cc_axi_comp_count(drvdata); 665 } while (request_mgr_handle->axi_completed > 0); 666 667 cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); 668 669 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); 670 } 671 672 /* after verifying that there is nothing to do, 673 * unmask AXI completion interrupt 674 */ 675 cc_iowrite(drvdata, CC_REG(HOST_IMR), 676 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask); 677 678 cc_proc_backlog(drvdata); 679 dev_dbg(dev, "Comp. handler done.\n"); 680 } 681 682 /* 683 * resume the queue configuration - no need to take the lock as this happens 684 * inside the spin lock protection 685 */ 686 #if defined(CONFIG_PM) 687 int cc_resume_req_queue(struct cc_drvdata *drvdata) 688 { 689 struct cc_req_mgr_handle *request_mgr_handle = 690 drvdata->request_mgr_handle; 691 692 spin_lock_bh(&request_mgr_handle->hw_lock); 693 request_mgr_handle->is_runtime_suspended = false; 694 spin_unlock_bh(&request_mgr_handle->hw_lock); 695 696 return 0; 697 } 698 699 /* 700 * suspend the queue configuration. Since it is used for the runtime suspend 701 * only verify that the queue can be suspended. 702 */ 703 int cc_suspend_req_queue(struct cc_drvdata *drvdata) 704 { 705 struct cc_req_mgr_handle *request_mgr_handle = 706 drvdata->request_mgr_handle; 707 708 /* lock the send_request */ 709 spin_lock_bh(&request_mgr_handle->hw_lock); 710 if (request_mgr_handle->req_queue_head != 711 request_mgr_handle->req_queue_tail) { 712 spin_unlock_bh(&request_mgr_handle->hw_lock); 713 return -EBUSY; 714 } 715 request_mgr_handle->is_runtime_suspended = true; 716 spin_unlock_bh(&request_mgr_handle->hw_lock); 717 718 return 0; 719 } 720 721 bool cc_req_queue_suspended(struct cc_drvdata *drvdata) 722 { 723 struct cc_req_mgr_handle *request_mgr_handle = 724 drvdata->request_mgr_handle; 725 726 return request_mgr_handle->is_runtime_suspended; 727 } 728 729 #endif 730