Lines Matching refs:scrq

3871 				   struct ibmvnic_sub_crq_queue *scrq)  in reset_one_sub_crq_queue()  argument
3875 if (!scrq) { in reset_one_sub_crq_queue()
3880 if (scrq->irq) { in reset_one_sub_crq_queue()
3881 free_irq(scrq->irq, scrq); in reset_one_sub_crq_queue()
3882 irq_dispose_mapping(scrq->irq); in reset_one_sub_crq_queue()
3883 scrq->irq = 0; in reset_one_sub_crq_queue()
3886 if (scrq->msgs) { in reset_one_sub_crq_queue()
3887 memset(scrq->msgs, 0, 4 * PAGE_SIZE); in reset_one_sub_crq_queue()
3888 atomic_set(&scrq->used, 0); in reset_one_sub_crq_queue()
3889 scrq->cur = 0; in reset_one_sub_crq_queue()
3890 scrq->ind_buf.index = 0; in reset_one_sub_crq_queue()
3896 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in reset_one_sub_crq_queue()
3897 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in reset_one_sub_crq_queue()
3928 struct ibmvnic_sub_crq_queue *scrq, in release_sub_crq_queue() argument
3941 scrq->crq_num); in release_sub_crq_queue()
3947 scrq->crq_num, rc); in release_sub_crq_queue()
3953 scrq->ind_buf.indir_arr, in release_sub_crq_queue()
3954 scrq->ind_buf.indir_dma); in release_sub_crq_queue()
3956 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in release_sub_crq_queue()
3958 free_pages((unsigned long)scrq->msgs, 2); in release_sub_crq_queue()
3959 free_cpumask_var(scrq->affinity_mask); in release_sub_crq_queue()
3960 kfree(scrq); in release_sub_crq_queue()
3967 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_queue() local
3970 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL); in init_sub_crq_queue()
3971 if (!scrq) in init_sub_crq_queue()
3974 scrq->msgs = in init_sub_crq_queue()
3976 if (!scrq->msgs) { in init_sub_crq_queue()
3980 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) in init_sub_crq_queue()
3983 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, in init_sub_crq_queue()
3985 if (dma_mapping_error(dev, scrq->msg_token)) { in init_sub_crq_queue()
3990 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in init_sub_crq_queue()
3991 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in init_sub_crq_queue()
4003 scrq->adapter = adapter; in init_sub_crq_queue()
4004 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); in init_sub_crq_queue()
4005 scrq->ind_buf.index = 0; in init_sub_crq_queue()
4007 scrq->ind_buf.indir_arr = in init_sub_crq_queue()
4010 &scrq->ind_buf.indir_dma, in init_sub_crq_queue()
4013 if (!scrq->ind_buf.indir_arr) in init_sub_crq_queue()
4016 spin_lock_init(&scrq->lock); in init_sub_crq_queue()
4020 scrq->crq_num, scrq->hw_irq, scrq->irq); in init_sub_crq_queue()
4022 return scrq; in init_sub_crq_queue()
4028 scrq->crq_num); in init_sub_crq_queue()
4031 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in init_sub_crq_queue()
4034 free_cpumask_var(scrq->affinity_mask); in init_sub_crq_queue()
4036 free_pages((unsigned long)scrq->msgs, 2); in init_sub_crq_queue()
4038 kfree(scrq); in init_sub_crq_queue()
4103 struct ibmvnic_sub_crq_queue *scrq) in disable_scrq_irq() argument
4109 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in disable_scrq_irq()
4112 scrq->hw_irq, rc); in disable_scrq_irq()
4119 static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_xics_eoi() argument
4121 u64 val = 0xff000000 | scrq->hw_irq; in ibmvnic_xics_eoi()
4134 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_clear_pending_interrupt() argument
4137 ibmvnic_xics_eoi(dev, scrq); in ibmvnic_clear_pending_interrupt()
4141 struct ibmvnic_sub_crq_queue *scrq) in enable_scrq_irq() argument
4146 if (scrq->hw_irq > 0x100000000ULL) { in enable_scrq_irq()
4147 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); in enable_scrq_irq()
4153 ibmvnic_clear_pending_interrupt(dev, scrq); in enable_scrq_irq()
4157 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in enable_scrq_irq()
4160 scrq->hw_irq, rc); in enable_scrq_irq()
4165 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_complete_tx() argument
4176 while (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
4177 unsigned int pool = scrq->pool_index; in ibmvnic_complete_tx()
4182 next = ibmvnic_next_scrq(adapter, scrq); in ibmvnic_complete_tx()
4217 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
4220 if (atomic_sub_return(num_entries, &scrq->used) <= in ibmvnic_complete_tx()
4223 scrq->pool_index)) { in ibmvnic_complete_tx()
4227 scrq->pool_index); in ibmvnic_complete_tx()
4230 scrq->pool_index); in ibmvnic_complete_tx()
4236 enable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
4238 if (pending_scrq(adapter, scrq)) { in ibmvnic_complete_tx()
4239 disable_scrq_irq(adapter, scrq); in ibmvnic_complete_tx()
4248 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_tx() local
4249 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_tx()
4251 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_tx()
4252 ibmvnic_complete_tx(adapter, scrq); in ibmvnic_interrupt_tx()
4259 struct ibmvnic_sub_crq_queue *scrq = instance; in ibmvnic_interrupt_rx() local
4260 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_rx()
4268 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; in ibmvnic_interrupt_rx()
4270 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { in ibmvnic_interrupt_rx()
4271 disable_scrq_irq(adapter, scrq); in ibmvnic_interrupt_rx()
4272 __napi_schedule(&adapter->napi[scrq->scrq_num]); in ibmvnic_interrupt_rx()
4281 struct ibmvnic_sub_crq_queue *scrq; in init_sub_crq_irqs() local
4288 scrq = adapter->tx_scrq[i]; in init_sub_crq_irqs()
4289 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4291 if (!scrq->irq) { in init_sub_crq_irqs()
4297 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", in init_sub_crq_irqs()
4299 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, in init_sub_crq_irqs()
4300 0, scrq->name, scrq); in init_sub_crq_irqs()
4304 scrq->irq, rc); in init_sub_crq_irqs()
4305 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4313 scrq = adapter->rx_scrq[i]; in init_sub_crq_irqs()
4314 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4315 if (!scrq->irq) { in init_sub_crq_irqs()
4320 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", in init_sub_crq_irqs()
4322 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, in init_sub_crq_irqs()
4323 0, scrq->name, scrq); in init_sub_crq_irqs()
4326 scrq->irq, rc); in init_sub_crq_irqs()
4327 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4586 struct ibmvnic_sub_crq_queue *scrq) in pending_scrq() argument
4588 union sub_crq *entry = &scrq->msgs[scrq->cur]; in pending_scrq()
4602 struct ibmvnic_sub_crq_queue *scrq) in ibmvnic_next_scrq() argument
4607 spin_lock_irqsave(&scrq->lock, flags); in ibmvnic_next_scrq()
4608 entry = &scrq->msgs[scrq->cur]; in ibmvnic_next_scrq()
4610 if (++scrq->cur == scrq->size) in ibmvnic_next_scrq()
4611 scrq->cur = 0; in ibmvnic_next_scrq()
4615 spin_unlock_irqrestore(&scrq->lock, flags); in ibmvnic_next_scrq()