irq.c (d26a3a6ce7e02f9c056ad992bcd9624735022337) irq.c (e1ef053e08c9b56c0de0635beea75466e97a7383)
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"

--- 266 unchanged lines hidden (view full) ---

275}
276
277static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
278{
279 struct hl_user_pending_interrupt *pend, *temp_pend;
280 struct list_head *ts_reg_free_list_head = NULL;
281 struct timestamp_reg_work_obj *job;
282 bool reg_node_handle_fail = false;
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"

--- 266 unchanged lines hidden (view full) ---

275}
276
277static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
278{
279 struct hl_user_pending_interrupt *pend, *temp_pend;
280 struct list_head *ts_reg_free_list_head = NULL;
281 struct timestamp_reg_work_obj *job;
282 bool reg_node_handle_fail = false;
283 ktime_t now = ktime_get();
284 int rc;
285
286 /* For registration nodes:
287 * As part of handling the registration nodes, we should put refcount to
288 * some objects. the problem is that we cannot do that under spinlock
289 * or in irq handler context at all (since release functions are long and
290 * might sleep), so we will need to handle that part in workqueue context.
291 * To avoid handling kmalloc failure which compels us rolling back actions

--- 6 unchanged lines hidden (view full) ---

298
299 spin_lock(&intr->wait_list_lock);
300 list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
301 if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
302 !pend->cq_kernel_addr) {
303 if (pend->ts_reg_info.buf) {
304 if (!reg_node_handle_fail) {
305 rc = handle_registration_node(hdev, pend,
283 int rc;
284
285 /* For registration nodes:
286 * As part of handling the registration nodes, we should put refcount to
287 * some objects. the problem is that we cannot do that under spinlock
288 * or in irq handler context at all (since release functions are long and
289 * might sleep), so we will need to handle that part in workqueue context.
290 * To avoid handling kmalloc failure which compels us rolling back actions

--- 6 unchanged lines hidden (view full) ---

297
298 spin_lock(&intr->wait_list_lock);
299 list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
300 if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
301 !pend->cq_kernel_addr) {
302 if (pend->ts_reg_info.buf) {
303 if (!reg_node_handle_fail) {
304 rc = handle_registration_node(hdev, pend,
306 &ts_reg_free_list_head, now);
305 &ts_reg_free_list_head, intr->timestamp);
307 if (rc)
308 reg_node_handle_fail = true;
309 }
310 } else {
311 /* Handle wait target value node */
306 if (rc)
307 reg_node_handle_fail = true;
308 }
309 } else {
310 /* Handle wait target value node */
312 pend->fence.timestamp = now;
311 pend->fence.timestamp = intr->timestamp;
313 complete_all(&pend->fence.completion);
314 }
315 }
316 }
317 spin_unlock(&intr->wait_list_lock);
318
319 if (ts_reg_free_list_head) {
320 INIT_WORK(&job->free_obj, hl_ts_free_objects);
321 job->free_obj_head = ts_reg_free_list_head;
322 job->hdev = hdev;
323 queue_work(hdev->ts_free_obj_wq, &job->free_obj);
324 } else {
325 kfree(job);
326 }
327}
328
312 complete_all(&pend->fence.completion);
313 }
314 }
315 }
316 spin_unlock(&intr->wait_list_lock);
317
318 if (ts_reg_free_list_head) {
319 INIT_WORK(&job->free_obj, hl_ts_free_objects);
320 job->free_obj_head = ts_reg_free_list_head;
321 job->hdev = hdev;
322 queue_work(hdev->ts_free_obj_wq, &job->free_obj);
323 } else {
324 kfree(job);
325 }
326}
327
328static void handle_tpc_interrupt(struct hl_device *hdev)
329{
330 u64 event_mask;
331 u32 flags;
332
333 event_mask = HL_NOTIFIER_EVENT_TPC_ASSERT |
334 HL_NOTIFIER_EVENT_USER_ENGINE_ERR |
335 HL_NOTIFIER_EVENT_DEVICE_RESET;
336
337 flags = HL_DRV_RESET_DELAY;
338
339 dev_err_ratelimited(hdev->dev, "Received TPC assert\n");
340 hl_device_cond_reset(hdev, flags, event_mask);
341}
342
343static void handle_unexpected_user_interrupt(struct hl_device *hdev)
344{
345 dev_err_ratelimited(hdev->dev, "Received unexpected user error interrupt\n");
346}
347
329/**
330 * hl_irq_handler_user_interrupt - irq handler for user interrupts
331 *
332 * @irq: irq number
333 * @arg: pointer to user interrupt structure
334 *
335 */
336irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
337{
338 struct hl_user_interrupt *user_int = arg;
348/**
349 * hl_irq_handler_user_interrupt - irq handler for user interrupts
350 *
351 * @irq: irq number
352 * @arg: pointer to user interrupt structure
353 *
354 */
355irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
356{
357 struct hl_user_interrupt *user_int = arg;
358
359 user_int->timestamp = ktime_get();
360
361 return IRQ_WAKE_THREAD;
362}
363
364/**
365 * hl_irq_user_interrupt_thread_handler - irq thread handler for user interrupts.
366 * This function is invoked by threaded irq mechanism
367 *
368 * @irq: irq number
369 * @arg: pointer to user interrupt structure
370 *
371 */
372irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
373{
374 struct hl_user_interrupt *user_int = arg;
339 struct hl_device *hdev = user_int->hdev;
340
341 switch (user_int->type) {
342 case HL_USR_INTERRUPT_CQ:
343 handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
344
345 /* Handle user cq interrupt registered on this specific irq */
346 handle_user_interrupt(hdev, user_int);
347 break;
348 case HL_USR_INTERRUPT_DECODER:
349 handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
350
351 /* Handle decoder interrupt registered on this specific irq */
352 handle_user_interrupt(hdev, user_int);
353 break;
375 struct hl_device *hdev = user_int->hdev;
376
377 switch (user_int->type) {
378 case HL_USR_INTERRUPT_CQ:
379 handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
380
381 /* Handle user cq interrupt registered on this specific irq */
382 handle_user_interrupt(hdev, user_int);
383 break;
384 case HL_USR_INTERRUPT_DECODER:
385 handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
386
387 /* Handle decoder interrupt registered on this specific irq */
388 handle_user_interrupt(hdev, user_int);
389 break;
390 case HL_USR_INTERRUPT_TPC:
391 handle_tpc_interrupt(hdev);
392 break;
393 case HL_USR_INTERRUPT_UNEXPECTED:
394 handle_unexpected_user_interrupt(hdev);
395 break;
354 default:
355 break;
356 }
357
358 return IRQ_HANDLED;
359}
360
361/**
396 default:
397 break;
398 }
399
400 return IRQ_HANDLED;
401}
402
403/**
362 * hl_irq_handler_default - default irq handler
363 *
364 * @irq: irq number
365 * @arg: pointer to user interrupt structure
366 *
367 */
368irqreturn_t hl_irq_handler_default(int irq, void *arg)
369{
370 struct hl_user_interrupt *user_interrupt = arg;
371 struct hl_device *hdev = user_interrupt->hdev;
372 u32 interrupt_id = user_interrupt->interrupt_id;
373
374 dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
375
376 return IRQ_HANDLED;
377}
378
379/**
380 * hl_irq_handler_eq - irq handler for event queue
381 *
382 * @irq: irq number
383 * @arg: pointer to event queue structure
384 *
385 */
386irqreturn_t hl_irq_handler_eq(int irq, void *arg)
387{

--- 12 unchanged lines hidden (view full) ---

400 cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
401 entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
402
403 if (!entry_ready)
404 break;
405
406 cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
407 if ((hdev->event_queue.check_eqe_index) &&
404 * hl_irq_handler_eq - irq handler for event queue
405 *
406 * @irq: irq number
407 * @arg: pointer to event queue structure
408 *
409 */
410irqreturn_t hl_irq_handler_eq(int irq, void *arg)
411{

--- 12 unchanged lines hidden (view full) ---

424 cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
425 entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
426
427 if (!entry_ready)
428 break;
429
430 cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
431 if ((hdev->event_queue.check_eqe_index) &&
408 (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK)
409 != cur_eqe_index)) {
432 (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
410 dev_dbg(hdev->dev,
433 dev_dbg(hdev->dev,
411 "EQE 0x%x in queue is ready but index does not match %d!=%d",
412 eq_base[eq->ci].hdr.ctl,
434 "EQE %#x in queue is ready but index does not match %d!=%d",
435 cur_eqe,
413 ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
414 cur_eqe_index);
415 break;
416 }
417
418 eq->prev_eqe_index++;
419
420 eq_entry = &eq_base[eq->ci];

--- 165 unchanged lines hidden ---
436 ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
437 cur_eqe_index);
438 break;
439 }
440
441 eq->prev_eqe_index++;
442
443 eq_entry = &eq_base[eq->ci];

--- 165 unchanged lines hidden ---