Lines Matching refs:scrq
3943 struct ibmvnic_sub_crq_queue *scrq) in reset_one_sub_crq_queue() argument
3947 if (!scrq) { in reset_one_sub_crq_queue()
3952 if (scrq->irq) { in reset_one_sub_crq_queue()
3953 free_irq(scrq->irq, scrq); in reset_one_sub_crq_queue()
3954 irq_dispose_mapping(scrq->irq); in reset_one_sub_crq_queue()
3955 scrq->irq = 0; in reset_one_sub_crq_queue()
3958 if (scrq->msgs) { in reset_one_sub_crq_queue()
3959 memset(scrq->msgs, 0, 4 * PAGE_SIZE); in reset_one_sub_crq_queue()
3960 atomic_set(&scrq->used, 0); in reset_one_sub_crq_queue()
3961 scrq->cur = 0; in reset_one_sub_crq_queue()
3962 scrq->ind_buf.index = 0; in reset_one_sub_crq_queue()
3968 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in reset_one_sub_crq_queue()
3969 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in reset_one_sub_crq_queue()
4000 struct ibmvnic_sub_crq_queue *scrq, in release_sub_crq_queue() argument
4013 scrq->crq_num); in release_sub_crq_queue()
4019 scrq->crq_num, rc); in release_sub_crq_queue()
4025 scrq->ind_buf.indir_arr, in release_sub_crq_queue()
4026 scrq->ind_buf.indir_dma); in release_sub_crq_queue()
4028 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in release_sub_crq_queue()
4030 free_pages((unsigned long)scrq->msgs, 2); in release_sub_crq_queue()
4031 free_cpumask_var(scrq->affinity_mask); in release_sub_crq_queue()
4032 kfree(scrq); in release_sub_crq_queue()
4039 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_queue() local
4042 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); in init_sub_crq_queue()
4043 if (!scrq) in init_sub_crq_queue()
4046 scrq->msgs = in init_sub_crq_queue()
4048 if (!scrq->msgs) { in init_sub_crq_queue()
4052 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) in init_sub_crq_queue()
4055 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, in init_sub_crq_queue()
4057 if (dma_mapping_error(dev, scrq->msg_token)) { in init_sub_crq_queue()
4062 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in init_sub_crq_queue()
4063 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in init_sub_crq_queue()
4075 scrq->adapter = adapter; in init_sub_crq_queue()
4076 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); in init_sub_crq_queue()
4077 scrq->ind_buf.index = 0; in init_sub_crq_queue()
4079 scrq->ind_buf.indir_arr = in init_sub_crq_queue()
4082 &scrq->ind_buf.indir_dma, in init_sub_crq_queue()
4085 if (!scrq->ind_buf.indir_arr) in init_sub_crq_queue()
4088 spin_lock_init(&scrq->lock); in init_sub_crq_queue()
4092 scrq->crq_num, scrq->hw_irq, scrq->irq); in init_sub_crq_queue()
4094 return scrq; in init_sub_crq_queue()
4100 scrq->crq_num); in init_sub_crq_queue()
4103 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in init_sub_crq_queue()
4106 free_cpumask_var(scrq->affinity_mask); in init_sub_crq_queue()
4108 free_pages((unsigned long)scrq->msgs, 2); in init_sub_crq_queue()
4110 kfree(scrq); in init_sub_crq_queue()
4175 struct ibmvnic_sub_crq_queue *scrq) in disable_scrq_irq() argument
4181 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in disable_scrq_irq()
4184 scrq->hw_irq, rc); in disable_scrq_irq()
4191 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_xics_eoi() argument
4193 u64 val = 0xff000000 | scrq->hw_irq; in ibmvnic_xics_eoi()
4206 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_clear_pending_interrupt() argument
4209 ibmvnic_xics_eoi(dev, scrq); in ibmvnic_clear_pending_interrupt()
4213 struct ibmvnic_sub_crq_queue *scrq) in enable_scrq_irq() argument
4218 if (scrq->hw_irq > 0x100000000ULL) { in enable_scrq_irq()
4219 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); in enable_scrq_irq()
4225 ibmvnic_clear_pending_interrupt(dev, scrq); in enable_scrq_irq()
4229 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in enable_scrq_irq()
4232 scrq->hw_irq, rc); in enable_scrq_irq()
4237 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_complete_tx() argument
4248 while (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
4249 unsigned int pool = scrq->pool_index; in ibmvnic_complete_tx()
4254 next = ibmvnic_next_scrq(adapter, scrq); in ibmvnic_complete_tx()
4289 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
4292 if (atomic_sub_return(num_entries, &scrq->used) <= in ibmvnic_complete_tx()
4295 scrq->pool_index)) { in ibmvnic_complete_tx()
4299 scrq->pool_index); in ibmvnic_complete_tx()
4302 scrq->pool_index); in ibmvnic_complete_tx()
4308 enable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
4310 if (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
4311 disable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
4320 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_tx() local
4321 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_tx()
4323 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_tx()
4324 ibmvnic_complete_tx(adapter, scrq); in ibmvnic_interrupt_tx()
4331 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_rx() local
4332 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_rx()
4340 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; in ibmvnic_interrupt_rx()
4342 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { in ibmvnic_interrupt_rx()
4343 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_rx()
4344 __napi_schedule(&adapter->napi[scrq->scrq_num]); in ibmvnic_interrupt_rx()
4353 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_irqs() local
4360 scrq = adapter->tx_scrq[i]; in init_sub_crq_irqs()
4361 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4363 if (!scrq->irq) { in init_sub_crq_irqs()
4369 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", in init_sub_crq_irqs()
4371 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, in init_sub_crq_irqs()
4372 0, scrq->name, scrq); in init_sub_crq_irqs()
4376 scrq->irq, rc); in init_sub_crq_irqs()
4377 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4385 scrq = adapter->rx_scrq[i]; in init_sub_crq_irqs()
4386 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4387 if (!scrq->irq) { in init_sub_crq_irqs()
4392 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", in init_sub_crq_irqs()
4394 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, in init_sub_crq_irqs()
4395 0, scrq->name, scrq); in init_sub_crq_irqs()
4398 scrq->irq, rc); in init_sub_crq_irqs()
4399 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4658 struct ibmvnic_sub_crq_queue *scrq) in pending_scrq() argument
4660 union sub_crq *entry = &scrq->msgs[scrq->cur]; in pending_scrq()
4674 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_next_scrq() argument
4679 spin_lock_irqsave(&scrq->lock, flags); in ibmvnic_next_scrq()
4680 entry = &scrq->msgs[scrq->cur]; in ibmvnic_next_scrq()
4682 if (++scrq->cur == scrq->size) in ibmvnic_next_scrq()
4683 scrq->cur = 0; in ibmvnic_next_scrq()
4687 spin_unlock_irqrestore(&scrq->lock, flags); in ibmvnic_next_scrq()