Lines Matching refs:iq

51 	struct octeon_instr_queue *iq;  in octeon_init_instr_queue()  local
73 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue()
75 iq->oct_dev = oct; in octeon_init_instr_queue()
77 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue()
78 if (!iq->base_addr) { in octeon_init_instr_queue()
84 iq->max_count = num_descs; in octeon_init_instr_queue()
89 iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), in octeon_init_instr_queue()
91 if (!iq->request_list) in octeon_init_instr_queue()
92 iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list))); in octeon_init_instr_queue()
93 if (!iq->request_list) { in octeon_init_instr_queue()
94 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue()
101 iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); in octeon_init_instr_queue()
103 iq->txpciq.u64 = txpciq.u64; in octeon_init_instr_queue()
104 iq->fill_threshold = (u32)conf->db_min; in octeon_init_instr_queue()
105 iq->fill_cnt = 0; in octeon_init_instr_queue()
106 iq->host_write_index = 0; in octeon_init_instr_queue()
107 iq->octeon_read_index = 0; in octeon_init_instr_queue()
108 iq->flush_index = 0; in octeon_init_instr_queue()
109 iq->last_db_time = 0; in octeon_init_instr_queue()
110 iq->do_auto_flush = 1; in octeon_init_instr_queue()
111 iq->db_timeout = (u32)conf->db_timeout; in octeon_init_instr_queue()
112 atomic_set(&iq->instr_pending, 0); in octeon_init_instr_queue()
113 iq->pkts_processed = 0; in octeon_init_instr_queue()
116 spin_lock_init(&iq->lock); in octeon_init_instr_queue()
118 iq->allow_soft_cmds = true; in octeon_init_instr_queue()
119 spin_lock_init(&iq->post_lock); in octeon_init_instr_queue()
121 iq->allow_soft_cmds = false; in octeon_init_instr_queue()
124 spin_lock_init(&iq->iq_flush_running_lock); in octeon_init_instr_queue()
126 oct->io_qmask.iq |= BIT_ULL(iq_no); in octeon_init_instr_queue()
130 iq->iqcmd_64B = (conf->instr_type == 64); in octeon_init_instr_queue()
138 vfree(iq->request_list); in octeon_init_instr_queue()
139 iq->request_list = NULL; in octeon_init_instr_queue()
140 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); in octeon_init_instr_queue()
159 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in octeon_delete_instr_queue() local
174 vfree(iq->request_list); in octeon_delete_instr_queue()
176 if (iq->base_addr) { in octeon_delete_instr_queue()
177 q_size = iq->max_count * desc_size; in octeon_delete_instr_queue()
178 lio_dma_free(oct, (u32)q_size, iq->base_addr, in octeon_delete_instr_queue()
179 iq->base_addr_dma); in octeon_delete_instr_queue()
180 oct->io_qmask.iq &= ~(1ULL << iq_no); in octeon_delete_instr_queue()
244 if (!(oct->io_qmask.iq & BIT_ULL(i))) in lio_wait_for_instr_fetch()
265 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) in ring_doorbell() argument
268 writel(iq->fill_cnt, iq->doorbell_reg); in ring_doorbell()
270 iq->fill_cnt = 0; in ring_doorbell()
271 iq->last_db_time = jiffies; in ring_doorbell()
279 struct octeon_instr_queue *iq; in octeon_ring_doorbell_locked() local
281 iq = oct->instr_queue[iq_no]; in octeon_ring_doorbell_locked()
282 spin_lock(&iq->post_lock); in octeon_ring_doorbell_locked()
283 if (iq->fill_cnt) in octeon_ring_doorbell_locked()
284 ring_doorbell(oct, iq); in octeon_ring_doorbell_locked()
285 spin_unlock(&iq->post_lock); in octeon_ring_doorbell_locked()
289 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, in __copy_cmd_into_iq() argument
294 cmdsize = ((iq->iqcmd_64B) ? 64 : 32); in __copy_cmd_into_iq()
295 iqptr = iq->base_addr + (cmdsize * iq->host_write_index); in __copy_cmd_into_iq()
301 __post_command2(struct octeon_instr_queue *iq, u8 *cmd) in __post_command2() argument
310 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) { in __post_command2()
316 if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2)) in __post_command2()
319 __copy_cmd_into_iq(iq, cmd); in __post_command2()
322 st.index = iq->host_write_index; in __post_command2()
323 iq->host_write_index = incr_index(iq->host_write_index, 1, in __post_command2()
324 iq->max_count); in __post_command2()
325 iq->fill_cnt++; in __post_command2()
332 atomic_inc(&iq->instr_pending); in __post_command2()
354 __add_to_request_list(struct octeon_instr_queue *iq, in __add_to_request_list() argument
357 iq->request_list[idx].buf = buf; in __add_to_request_list()
358 iq->request_list[idx].reqtype = reqtype; in __add_to_request_list()
364 struct octeon_instr_queue *iq, u32 napi_budget) in lio_process_iq_request_list() argument
369 u32 old = iq->flush_index; in lio_process_iq_request_list()
375 while (old != iq->octeon_read_index) { in lio_process_iq_request_list()
376 reqtype = iq->request_list[old].reqtype; in lio_process_iq_request_list()
377 buf = iq->request_list[old].buf; in lio_process_iq_request_list()
416 iq->request_list[old].buf = NULL; in lio_process_iq_request_list()
417 iq->request_list[old].reqtype = 0; in lio_process_iq_request_list()
421 old = incr_index(old, 1, iq->max_count); in lio_process_iq_request_list()
427 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, in lio_process_iq_request_list()
429 iq->flush_index = old; in lio_process_iq_request_list()
441 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, in octeon_flush_iq() argument
448 if (!spin_trylock(&iq->iq_flush_running_lock)) in octeon_flush_iq()
451 spin_lock_bh(&iq->lock); in octeon_flush_iq()
453 iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); in octeon_flush_iq()
457 if (iq->flush_index == iq->octeon_read_index) in octeon_flush_iq()
462 lio_process_iq_request_list(oct, iq, in octeon_flush_iq()
467 lio_process_iq_request_list(oct, iq, 0); in octeon_flush_iq()
470 iq->pkts_processed += inst_processed; in octeon_flush_iq()
471 atomic_sub(inst_processed, &iq->instr_pending); in octeon_flush_iq()
472 iq->stats.instr_processed += inst_processed; in octeon_flush_iq()
481 iq->last_db_time = jiffies; in octeon_flush_iq()
483 spin_unlock_bh(&iq->lock); in octeon_flush_iq()
485 spin_unlock(&iq->iq_flush_running_lock); in octeon_flush_iq()
495 struct octeon_instr_queue *iq; in __check_db_timeout() local
501 iq = oct->instr_queue[iq_no]; in __check_db_timeout()
502 if (!iq) in __check_db_timeout()
506 if (!atomic_read(&iq->instr_pending)) in __check_db_timeout()
509 next_time = iq->last_db_time + iq->db_timeout; in __check_db_timeout()
512 iq->last_db_time = jiffies; in __check_db_timeout()
515 octeon_flush_iq(oct, iq, 0); in __check_db_timeout()
517 lio_enable_irq(NULL, iq); in __check_db_timeout()
542 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in octeon_send_command() local
547 if (iq->allow_soft_cmds) in octeon_send_command()
548 spin_lock_bh(&iq->post_lock); in octeon_send_command()
550 st = __post_command2(iq, cmd); in octeon_send_command()
554 __add_to_request_list(iq, st.index, buf, reqtype); in octeon_send_command()
558 if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db || in octeon_send_command()
560 ring_doorbell(oct, iq); in octeon_send_command()
565 if (iq->allow_soft_cmds) in octeon_send_command()
566 spin_unlock_bh(&iq->post_lock); in octeon_send_command()
687 struct octeon_instr_queue *iq; in octeon_send_soft_command() local
693 iq = oct->instr_queue[sc->iq_no]; in octeon_send_soft_command()
694 if (!iq->allow_soft_cmds) { in octeon_send_soft_command()