Lines Matching +full:rcu +full:- +full:big +full:- +full:endian +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-or-later
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
41 #include <linux/dma-mapping.h>
182 if (!(queue && queue->irq)) in ibmvnic_clean_queue_affinity()
185 cpumask_clear(queue->affinity_mask); in ibmvnic_clean_queue_affinity()
187 if (irq_set_affinity_and_hint(queue->irq, NULL)) in ibmvnic_clean_queue_affinity()
188 netdev_warn(adapter->netdev, in ibmvnic_clean_queue_affinity()
190 __func__, queue, queue->irq); in ibmvnic_clean_queue_affinity()
200 rxqs = adapter->rx_scrq; in ibmvnic_clean_affinity()
201 txqs = adapter->tx_scrq; in ibmvnic_clean_affinity()
202 num_txqs = adapter->num_active_tx_scrqs; in ibmvnic_clean_affinity()
203 num_rxqs = adapter->num_active_rx_scrqs; in ibmvnic_clean_affinity()
205 netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__); in ibmvnic_clean_affinity()
220 cpumask_var_t mask; in ibmvnic_set_queue_affinity() local
224 if (!(queue && queue->irq)) in ibmvnic_set_queue_affinity()
228 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in ibmvnic_set_queue_affinity()
229 return -ENOMEM; in ibmvnic_set_queue_affinity()
234 (*stragglers)--; in ibmvnic_set_queue_affinity()
238 cpumask_set_cpu(*cpu, mask); in ibmvnic_set_queue_affinity()
242 /* set queue affinity mask */ in ibmvnic_set_queue_affinity()
243 cpumask_copy(queue->affinity_mask, mask); in ibmvnic_set_queue_affinity()
244 rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask); in ibmvnic_set_queue_affinity()
245 free_cpumask_var(mask); in ibmvnic_set_queue_affinity()
253 struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq; in ibmvnic_set_affinity()
254 struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq; in ibmvnic_set_affinity()
256 int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0; in ibmvnic_set_affinity()
257 int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0; in ibmvnic_set_affinity()
263 netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__); in ibmvnic_set_affinity()
264 if (!(adapter->rx_scrq && adapter->tx_scrq)) { in ibmvnic_set_affinity()
265 netdev_warn(adapter->netdev, in ibmvnic_set_affinity()
278 cpu = cpumask_next(-1, cpu_online_mask); in ibmvnic_set_affinity()
283 * ex: TX0 -> RX0 -> TX1 -> RX1 etc. in ibmvnic_set_affinity()
300 rc = __netif_set_xps_queue(adapter->netdev, in ibmvnic_set_affinity()
301 cpumask_bits(queue->affinity_mask), in ibmvnic_set_affinity()
302 i_txqs - 1, XPS_CPUS); in ibmvnic_set_affinity()
304 netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n", in ibmvnic_set_affinity()
305 __func__, i_txqs - 1, rc); in ibmvnic_set_affinity()
310 netdev_warn(adapter->netdev, in ibmvnic_set_affinity()
312 __func__, queue, queue->irq, rc); in ibmvnic_set_affinity()
350 ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node); in ibmvnic_cpu_notif_add()
354 &adapter->node_dead); in ibmvnic_cpu_notif_add()
357 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); in ibmvnic_cpu_notif_add()
363 cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node); in ibmvnic_cpu_notif_remove()
365 &adapter->node_dead); in ibmvnic_cpu_notif_remove()
383 * ibmvnic_wait_for_completion - Check device state and wait for completion
399 netdev = adapter->netdev; in ibmvnic_wait_for_completion()
403 if (!adapter->crq.active) { in ibmvnic_wait_for_completion()
405 return -ENODEV; in ibmvnic_wait_for_completion()
407 if (!retry--) in ibmvnic_wait_for_completion()
413 return -ETIMEDOUT; in ibmvnic_wait_for_completion()
417 * reuse_ltb() - Check if a long term buffer can be reused
427 return (ltb->buff && ltb->size == size); in reuse_ltb()
431 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
440 * its non-NULL, free it. Then allocate a new one of the correct size.
454 struct device *dev = &adapter->vdev->dev; in alloc_long_term_buff()
461 ltb->size, size); in alloc_long_term_buff()
462 prev = ltb->size; in alloc_long_term_buff()
466 if (ltb->buff) { in alloc_long_term_buff()
468 ltb->map_id, ltb->size); in alloc_long_term_buff()
470 ltb->buff = dma_alloc_coherent(dev, size, <b->addr, in alloc_long_term_buff()
472 if (!ltb->buff) { in alloc_long_term_buff()
474 return -ENOMEM; in alloc_long_term_buff()
476 ltb->size = size; in alloc_long_term_buff()
478 ltb->map_id = find_first_zero_bit(adapter->map_ids, in alloc_long_term_buff()
480 bitmap_set(adapter->map_ids, ltb->map_id, 1); in alloc_long_term_buff()
484 ltb->map_id, ltb->size, prev); in alloc_long_term_buff()
487 /* Ensure ltb is zeroed - specially when reusing it. */ in alloc_long_term_buff()
488 memset(ltb->buff, 0, ltb->size); in alloc_long_term_buff()
490 mutex_lock(&adapter->fw_lock); in alloc_long_term_buff()
491 adapter->fw_done_rc = 0; in alloc_long_term_buff()
492 reinit_completion(&adapter->fw_done); in alloc_long_term_buff()
494 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); in alloc_long_term_buff()
500 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in alloc_long_term_buff()
507 if (adapter->fw_done_rc) { in alloc_long_term_buff()
509 adapter->fw_done_rc); in alloc_long_term_buff()
510 rc = -EIO; in alloc_long_term_buff()
515 /* don't free LTB on communication error - see function header */ in alloc_long_term_buff()
516 mutex_unlock(&adapter->fw_lock); in alloc_long_term_buff()
523 struct device *dev = &adapter->vdev->dev; in free_long_term_buff()
525 if (!ltb->buff) in free_long_term_buff()
532 if (adapter->reset_reason != VNIC_RESET_FAILOVER && in free_long_term_buff()
533 adapter->reset_reason != VNIC_RESET_MOBILITY && in free_long_term_buff()
534 adapter->reset_reason != VNIC_RESET_TIMEOUT) in free_long_term_buff()
535 send_request_unmap(adapter, ltb->map_id); in free_long_term_buff()
537 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); in free_long_term_buff()
539 ltb->buff = NULL; in free_long_term_buff()
541 bitmap_clear(adapter->map_ids, ltb->map_id, 1); in free_long_term_buff()
542 ltb->map_id = 0; in free_long_term_buff()
546 * free_ltb_set - free the given set of long term buffers (LTBS)
558 for (i = 0; i < ltb_set->num_ltbs; i++) in free_ltb_set()
559 free_long_term_buff(adapter, <b_set->ltbs[i]); in free_ltb_set()
561 kfree(ltb_set->ltbs); in free_ltb_set()
562 ltb_set->ltbs = NULL; in free_ltb_set()
563 ltb_set->num_ltbs = 0; in free_ltb_set()
567 * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
587 struct device *dev = &adapter->vdev->dev; in alloc_ltb_set()
620 return -ENOMEM; in alloc_ltb_set()
661 rem_size -= ltb_size; in alloc_ltb_set()
680 * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
700 WARN_ON(bufidx >= rxpool->size); in map_rxpool_buf_to_ltb()
702 for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) { in map_rxpool_buf_to_ltb()
703 ltb = &rxpool->ltb_set.ltbs[i]; in map_rxpool_buf_to_ltb()
704 nbufs = ltb->size / rxpool->buff_size; in map_rxpool_buf_to_ltb()
707 bufidx -= nbufs; in map_rxpool_buf_to_ltb()
711 *offset = bufidx * rxpool->buff_size; in map_rxpool_buf_to_ltb()
715 * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
733 WARN_ON_ONCE(bufidx >= txpool->num_buffers); in map_txpool_buf_to_ltb()
735 for (i = 0; i < txpool->ltb_set.num_ltbs; i++) { in map_txpool_buf_to_ltb()
736 ltb = &txpool->ltb_set.ltbs[i]; in map_txpool_buf_to_ltb()
737 nbufs = ltb->size / txpool->buf_size; in map_txpool_buf_to_ltb()
740 bufidx -= nbufs; in map_txpool_buf_to_ltb()
744 *offset = bufidx * txpool->buf_size; in map_txpool_buf_to_ltb()
751 for (i = 0; i < adapter->num_active_rx_pools; i++) in deactivate_rx_pools()
752 adapter->rx_pool[i].active = 0; in deactivate_rx_pools()
758 int count = pool->size - atomic_read(&pool->available); in replenish_rx_pool()
759 u64 handle = adapter->rx_scrq[pool->index]->handle; in replenish_rx_pool()
760 struct device *dev = &adapter->vdev->dev; in replenish_rx_pool()
775 if (!pool->active) in replenish_rx_pool()
778 rx_scrq = adapter->rx_scrq[pool->index]; in replenish_rx_pool()
779 ind_bufp = &rx_scrq->ind_buf; in replenish_rx_pool()
783 * To account for them, start the loop at ind_bufp->index rather in replenish_rx_pool()
784 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will in replenish_rx_pool()
787 for (i = ind_bufp->index; i < count; ++i) { in replenish_rx_pool()
788 bufidx = pool->free_map[pool->next_free]; in replenish_rx_pool()
795 skb = pool->rx_buff[bufidx].skb; in replenish_rx_pool()
797 skb = netdev_alloc_skb(adapter->netdev, in replenish_rx_pool()
798 pool->buff_size); in replenish_rx_pool()
801 adapter->replenish_no_mem++; in replenish_rx_pool()
806 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; in replenish_rx_pool()
807 pool->next_free = (pool->next_free + 1) % pool->size; in replenish_rx_pool()
811 dst = ltb->buff + offset; in replenish_rx_pool()
812 memset(dst, 0, pool->buff_size); in replenish_rx_pool()
813 dma_addr = ltb->addr + offset; in replenish_rx_pool()
816 pool->rx_buff[bufidx].data = dst; in replenish_rx_pool()
817 pool->rx_buff[bufidx].dma = dma_addr; in replenish_rx_pool()
818 pool->rx_buff[bufidx].skb = skb; in replenish_rx_pool()
819 pool->rx_buff[bufidx].pool_index = pool->index; in replenish_rx_pool()
820 pool->rx_buff[bufidx].size = pool->buff_size; in replenish_rx_pool()
823 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; in replenish_rx_pool()
825 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; in replenish_rx_pool()
826 sub_crq->rx_add.correlator = in replenish_rx_pool()
827 cpu_to_be64((u64)&pool->rx_buff[bufidx]); in replenish_rx_pool()
828 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr); in replenish_rx_pool()
829 sub_crq->rx_add.map_id = ltb->map_id; in replenish_rx_pool()
833 * converted to big endian to prevent the last byte from being in replenish_rx_pool()
839 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); in replenish_rx_pool()
842 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || in replenish_rx_pool()
843 i == count - 1) { in replenish_rx_pool()
846 (u64)ind_bufp->indir_dma, in replenish_rx_pool()
847 (u64)ind_bufp->index); in replenish_rx_pool()
850 buffers_added += ind_bufp->index; in replenish_rx_pool()
851 adapter->replenish_add_buff_success += ind_bufp->index; in replenish_rx_pool()
852 ind_bufp->index = 0; in replenish_rx_pool()
855 atomic_add(buffers_added, &pool->available); in replenish_rx_pool()
861 for (i = ind_bufp->index - 1; i >= 0; --i) { in replenish_rx_pool()
864 pool->next_free = pool->next_free == 0 ? in replenish_rx_pool()
865 pool->size - 1 : pool->next_free - 1; in replenish_rx_pool()
866 sub_crq = &ind_bufp->indir_arr[i]; in replenish_rx_pool()
868 be64_to_cpu(sub_crq->rx_add.correlator); in replenish_rx_pool()
869 bufidx = (int)(rx_buff - pool->rx_buff); in replenish_rx_pool()
870 pool->free_map[pool->next_free] = bufidx; in replenish_rx_pool()
871 dev_kfree_skb_any(pool->rx_buff[bufidx].skb); in replenish_rx_pool()
872 pool->rx_buff[bufidx].skb = NULL; in replenish_rx_pool()
874 adapter->replenish_add_buff_failure += ind_bufp->index; in replenish_rx_pool()
875 atomic_add(buffers_added, &pool->available); in replenish_rx_pool()
876 ind_bufp->index = 0; in replenish_rx_pool()
877 if (lpar_rc == H_CLOSED || adapter->failover_pending) { in replenish_rx_pool()
884 netif_carrier_off(adapter->netdev); in replenish_rx_pool()
892 adapter->replenish_task_cycles++; in replenish_pools()
893 for (i = 0; i < adapter->num_active_rx_pools; i++) { in replenish_pools()
894 if (adapter->rx_pool[i].active) in replenish_pools()
895 replenish_rx_pool(adapter, &adapter->rx_pool[i]); in replenish_pools()
898 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i); in replenish_pools()
903 kfree(adapter->tx_stats_buffers); in release_stats_buffers()
904 kfree(adapter->rx_stats_buffers); in release_stats_buffers()
905 adapter->tx_stats_buffers = NULL; in release_stats_buffers()
906 adapter->rx_stats_buffers = NULL; in release_stats_buffers()
911 adapter->tx_stats_buffers = in init_stats_buffers()
915 if (!adapter->tx_stats_buffers) in init_stats_buffers()
916 return -ENOMEM; in init_stats_buffers()
918 adapter->rx_stats_buffers = in init_stats_buffers()
922 if (!adapter->rx_stats_buffers) in init_stats_buffers()
923 return -ENOMEM; in init_stats_buffers()
930 struct device *dev = &adapter->vdev->dev; in release_stats_token()
932 if (!adapter->stats_token) in release_stats_token()
935 dma_unmap_single(dev, adapter->stats_token, in release_stats_token()
938 adapter->stats_token = 0; in release_stats_token()
943 struct device *dev = &adapter->vdev->dev; in init_stats_token()
947 stok = dma_map_single(dev, &adapter->stats, in init_stats_token()
956 adapter->stats_token = stok; in init_stats_token()
957 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); in init_stats_token()
962 * release_rx_pools() - Release any rx pools attached to @adapter.
965 * Safe to call this multiple times - even if no pools are attached.
972 if (!adapter->rx_pool) in release_rx_pools()
975 for (i = 0; i < adapter->num_active_rx_pools; i++) { in release_rx_pools()
976 rx_pool = &adapter->rx_pool[i]; in release_rx_pools()
978 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); in release_rx_pools()
980 kfree(rx_pool->free_map); in release_rx_pools()
982 free_ltb_set(adapter, &rx_pool->ltb_set); in release_rx_pools()
984 if (!rx_pool->rx_buff) in release_rx_pools()
987 for (j = 0; j < rx_pool->size; j++) { in release_rx_pools()
988 if (rx_pool->rx_buff[j].skb) { in release_rx_pools()
989 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); in release_rx_pools()
990 rx_pool->rx_buff[j].skb = NULL; in release_rx_pools()
994 kfree(rx_pool->rx_buff); in release_rx_pools()
997 kfree(adapter->rx_pool); in release_rx_pools()
998 adapter->rx_pool = NULL; in release_rx_pools()
999 adapter->num_active_rx_pools = 0; in release_rx_pools()
1000 adapter->prev_rx_pool_size = 0; in release_rx_pools()
1004 * reuse_rx_pools() - Check if the existing rx pools can be reused.
1023 if (!adapter->rx_pool) in reuse_rx_pools()
1026 old_num_pools = adapter->num_active_rx_pools; in reuse_rx_pools()
1027 new_num_pools = adapter->req_rx_queues; in reuse_rx_pools()
1029 old_pool_size = adapter->prev_rx_pool_size; in reuse_rx_pools()
1030 new_pool_size = adapter->req_rx_add_entries_per_subcrq; in reuse_rx_pools()
1032 old_buff_size = adapter->prev_rx_buf_sz; in reuse_rx_pools()
1033 new_buff_size = adapter->cur_rx_buf_sz; in reuse_rx_pools()
1057 struct device *dev = &adapter->vdev->dev; in init_rx_pools()
1064 pool_size = adapter->req_rx_add_entries_per_subcrq; in init_rx_pools()
1065 num_pools = adapter->req_rx_queues; in init_rx_pools()
1066 buff_size = adapter->cur_rx_buf_sz; in init_rx_pools()
1076 adapter->rx_pool = kcalloc(num_pools, in init_rx_pools()
1079 if (!adapter->rx_pool) { in init_rx_pools()
1081 return -ENOMEM; in init_rx_pools()
1087 adapter->num_active_rx_pools = num_pools; in init_rx_pools()
1090 rx_pool = &adapter->rx_pool[i]; in init_rx_pools()
1092 netdev_dbg(adapter->netdev, in init_rx_pools()
1096 rx_pool->size = pool_size; in init_rx_pools()
1097 rx_pool->index = i; in init_rx_pools()
1098 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); in init_rx_pools()
1100 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), in init_rx_pools()
1102 if (!rx_pool->free_map) { in init_rx_pools()
1104 rc = -ENOMEM; in init_rx_pools()
1108 rx_pool->rx_buff = kcalloc(rx_pool->size, in init_rx_pools()
1111 if (!rx_pool->rx_buff) { in init_rx_pools()
1113 rc = -ENOMEM; in init_rx_pools()
1118 adapter->prev_rx_pool_size = pool_size; in init_rx_pools()
1119 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; in init_rx_pools()
1123 rx_pool = &adapter->rx_pool[i]; in init_rx_pools()
1125 i, rx_pool->size, rx_pool->buff_size); in init_rx_pools()
1127 rc = alloc_ltb_set(adapter, &rx_pool->ltb_set, in init_rx_pools()
1128 rx_pool->size, rx_pool->buff_size); in init_rx_pools()
1132 for (j = 0; j < rx_pool->size; ++j) { in init_rx_pools()
1135 rx_pool->free_map[j] = j; in init_rx_pools()
1137 /* NOTE: Don't clear rx_buff->skb here - will leak in init_rx_pools()
1141 rx_buff = &rx_pool->rx_buff[j]; in init_rx_pools()
1142 rx_buff->dma = 0; in init_rx_pools()
1143 rx_buff->data = 0; in init_rx_pools()
1144 rx_buff->size = 0; in init_rx_pools()
1145 rx_buff->pool_index = 0; in init_rx_pools()
1151 atomic_set(&rx_pool->available, 0); in init_rx_pools()
1152 rx_pool->next_alloc = 0; in init_rx_pools()
1153 rx_pool->next_free = 0; in init_rx_pools()
1157 rx_pool->active = 1; in init_rx_pools()
1171 if (!adapter->vpd) in release_vpd_data()
1174 kfree(adapter->vpd->buff); in release_vpd_data()
1175 kfree(adapter->vpd); in release_vpd_data()
1177 adapter->vpd = NULL; in release_vpd_data()
1183 kfree(tx_pool->tx_buff); in release_one_tx_pool()
1184 kfree(tx_pool->free_map); in release_one_tx_pool()
1185 free_ltb_set(adapter, &tx_pool->ltb_set); in release_one_tx_pool()
1189 * release_tx_pools() - Release any tx pools attached to @adapter.
1192 * Safe to call this multiple times - even if no pools are attached.
1198 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are in release_tx_pools()
1199 * both NULL or both non-NULL. So we only need to check one. in release_tx_pools()
1201 if (!adapter->tx_pool) in release_tx_pools()
1204 for (i = 0; i < adapter->num_active_tx_pools; i++) { in release_tx_pools()
1205 release_one_tx_pool(adapter, &adapter->tx_pool[i]); in release_tx_pools()
1206 release_one_tx_pool(adapter, &adapter->tso_pool[i]); in release_tx_pools()
1209 kfree(adapter->tx_pool); in release_tx_pools()
1210 adapter->tx_pool = NULL; in release_tx_pools()
1211 kfree(adapter->tso_pool); in release_tx_pools()
1212 adapter->tso_pool = NULL; in release_tx_pools()
1213 adapter->num_active_tx_pools = 0; in release_tx_pools()
1214 adapter->prev_tx_pool_size = 0; in release_tx_pools()
1223 tx_pool->tx_buff = kcalloc(pool_size, in init_one_tx_pool()
1226 if (!tx_pool->tx_buff) in init_one_tx_pool()
1227 return -ENOMEM; in init_one_tx_pool()
1229 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); in init_one_tx_pool()
1230 if (!tx_pool->free_map) { in init_one_tx_pool()
1231 kfree(tx_pool->tx_buff); in init_one_tx_pool()
1232 tx_pool->tx_buff = NULL; in init_one_tx_pool()
1233 return -ENOMEM; in init_one_tx_pool()
1237 tx_pool->free_map[i] = i; in init_one_tx_pool()
1239 tx_pool->consumer_index = 0; in init_one_tx_pool()
1240 tx_pool->producer_index = 0; in init_one_tx_pool()
1241 tx_pool->num_buffers = pool_size; in init_one_tx_pool()
1242 tx_pool->buf_size = buf_size; in init_one_tx_pool()
1248 * reuse_tx_pools() - Check if the existing tx pools can be reused.
1266 if (!adapter->tx_pool) in reuse_tx_pools()
1269 old_num_pools = adapter->num_active_tx_pools; in reuse_tx_pools()
1270 new_num_pools = adapter->num_active_tx_scrqs; in reuse_tx_pools()
1271 old_pool_size = adapter->prev_tx_pool_size; in reuse_tx_pools()
1272 new_pool_size = adapter->req_tx_entries_per_subcrq; in reuse_tx_pools()
1273 old_mtu = adapter->prev_mtu; in reuse_tx_pools()
1274 new_mtu = adapter->req_mtu; in reuse_tx_pools()
1298 struct device *dev = &adapter->vdev->dev; in init_tx_pools()
1304 num_pools = adapter->req_tx_queues; in init_tx_pools()
1306 /* We must notify the VIOS about the LTB on all resets - but we only in init_tx_pools()
1318 pool_size = adapter->req_tx_entries_per_subcrq; in init_tx_pools()
1319 num_pools = adapter->num_active_tx_scrqs; in init_tx_pools()
1321 adapter->tx_pool = kcalloc(num_pools, in init_tx_pools()
1323 if (!adapter->tx_pool) in init_tx_pools()
1324 return -ENOMEM; in init_tx_pools()
1326 adapter->tso_pool = kcalloc(num_pools, in init_tx_pools()
1328 /* To simplify release_tx_pools() ensure that ->tx_pool and in init_tx_pools()
1329 * ->tso_pool are either both NULL or both non-NULL. in init_tx_pools()
1331 if (!adapter->tso_pool) { in init_tx_pools()
1332 kfree(adapter->tx_pool); in init_tx_pools()
1333 adapter->tx_pool = NULL; in init_tx_pools()
1334 return -ENOMEM; in init_tx_pools()
1340 adapter->num_active_tx_pools = num_pools; in init_tx_pools()
1342 buff_size = adapter->req_mtu + VLAN_HLEN; in init_tx_pools()
1347 i, adapter->req_tx_entries_per_subcrq, buff_size); in init_tx_pools()
1349 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], in init_tx_pools()
1354 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], in init_tx_pools()
1361 adapter->prev_tx_pool_size = pool_size; in init_tx_pools()
1362 adapter->prev_mtu = adapter->req_mtu; in init_tx_pools()
1368 * For consistency, we use tx_pool->num_buffers and in init_tx_pools()
1369 * tso_pool->num_buffers below. in init_tx_pools()
1371 rc = -1; in init_tx_pools()
1376 tx_pool = &adapter->tx_pool[i]; in init_tx_pools()
1379 i, tx_pool->num_buffers, tx_pool->buf_size); in init_tx_pools()
1381 rc = alloc_ltb_set(adapter, &tx_pool->ltb_set, in init_tx_pools()
1382 tx_pool->num_buffers, tx_pool->buf_size); in init_tx_pools()
1386 tx_pool->consumer_index = 0; in init_tx_pools()
1387 tx_pool->producer_index = 0; in init_tx_pools()
1389 for (j = 0; j < tx_pool->num_buffers; j++) in init_tx_pools()
1390 tx_pool->free_map[j] = j; in init_tx_pools()
1392 tso_pool = &adapter->tso_pool[i]; in init_tx_pools()
1395 i, tso_pool->num_buffers, tso_pool->buf_size); in init_tx_pools()
1397 rc = alloc_ltb_set(adapter, &tso_pool->ltb_set, in init_tx_pools()
1398 tso_pool->num_buffers, tso_pool->buf_size); in init_tx_pools()
1402 tso_pool->consumer_index = 0; in init_tx_pools()
1403 tso_pool->producer_index = 0; in init_tx_pools()
1405 for (j = 0; j < tso_pool->num_buffers; j++) in init_tx_pools()
1406 tso_pool->free_map[j] = j; in init_tx_pools()
1423 if (adapter->napi_enabled) in ibmvnic_napi_enable()
1426 for (i = 0; i < adapter->req_rx_queues; i++) in ibmvnic_napi_enable()
1427 napi_enable(&adapter->napi[i]); in ibmvnic_napi_enable()
1429 adapter->napi_enabled = true; in ibmvnic_napi_enable()
1436 if (!adapter->napi_enabled) in ibmvnic_napi_disable()
1439 for (i = 0; i < adapter->req_rx_queues; i++) { in ibmvnic_napi_disable()
1440 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); in ibmvnic_napi_disable()
1441 napi_disable(&adapter->napi[i]); in ibmvnic_napi_disable()
1444 adapter->napi_enabled = false; in ibmvnic_napi_disable()
1451 adapter->napi = kcalloc(adapter->req_rx_queues, in init_napi()
1453 if (!adapter->napi) in init_napi()
1454 return -ENOMEM; in init_napi()
1456 for (i = 0; i < adapter->req_rx_queues; i++) { in init_napi()
1457 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i); in init_napi()
1458 netif_napi_add(adapter->netdev, &adapter->napi[i], in init_napi()
1462 adapter->num_active_rx_napi = adapter->req_rx_queues; in init_napi()
1470 if (!adapter->napi) in release_napi()
1473 for (i = 0; i < adapter->num_active_rx_napi; i++) { in release_napi()
1474 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); in release_napi()
1475 netif_napi_del(&adapter->napi[i]); in release_napi()
1478 kfree(adapter->napi); in release_napi()
1479 adapter->napi = NULL; in release_napi()
1480 adapter->num_active_rx_napi = 0; in release_napi()
1481 adapter->napi_enabled = false; in release_napi()
1522 return -EACCES; in ibmvnic_login()
1525 adapter->init_done_rc = 0; in ibmvnic_login()
1526 reinit_completion(&adapter->init_done); in ibmvnic_login()
1531 if (!wait_for_completion_timeout(&adapter->init_done, in ibmvnic_login()
1534 adapter->login_pending = false; in ibmvnic_login()
1538 if (adapter->init_done_rc == ABORTED) { in ibmvnic_login()
1541 adapter->init_done_rc = 0; in ibmvnic_login()
1547 } else if (adapter->init_done_rc == PARTIALSUCCESS) { in ibmvnic_login()
1554 adapter->init_done_rc = 0; in ibmvnic_login()
1555 reinit_completion(&adapter->init_done); in ibmvnic_login()
1557 if (!wait_for_completion_timeout(&adapter->init_done, in ibmvnic_login()
1561 return -ETIMEDOUT; in ibmvnic_login()
1578 } else if (adapter->init_done_rc) { in ibmvnic_login()
1580 adapter->init_done_rc); in ibmvnic_login()
1583 /* adapter login failed, so free any CRQs or sub-CRQs in ibmvnic_login()
1590 "Freeing and re-registering CRQs before attempting to login again\n"); in ibmvnic_login()
1592 adapter->init_done_rc = 0; in ibmvnic_login()
1595 * we are essentially re-initializing communication in ibmvnic_login()
1603 * pass since we are re-initializing the CRQ in ibmvnic_login()
1605 adapter->failover_pending = false; in ibmvnic_login()
1617 spin_lock_irqsave(&adapter->rwi_lock, flags); in ibmvnic_login()
1619 spin_unlock_irqrestore(&adapter->rwi_lock, in ibmvnic_login()
1626 return -EIO; in ibmvnic_login()
1639 } while (rc == -EAGAIN && retry_count++ < retries); in ibmvnic_login()
1643 __ibmvnic_set_mac(netdev, adapter->mac_addr); in ibmvnic_login()
1645 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state)); in ibmvnic_login()
1651 if (!adapter->login_buf) in release_login_buffer()
1654 dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, in release_login_buffer()
1655 adapter->login_buf_sz, DMA_TO_DEVICE); in release_login_buffer()
1656 kfree(adapter->login_buf); in release_login_buffer()
1657 adapter->login_buf = NULL; in release_login_buffer()
1662 if (!adapter->login_rsp_buf) in release_login_rsp_buffer()
1665 dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, in release_login_rsp_buffer()
1666 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); in release_login_rsp_buffer()
1667 kfree(adapter->login_rsp_buf); in release_login_rsp_buffer()
1668 adapter->login_rsp_buf = NULL; in release_login_rsp_buffer()
1682 struct net_device *netdev = adapter->netdev; in set_link_state()
1698 reinit_completion(&adapter->init_done); in set_link_state()
1705 if (!wait_for_completion_timeout(&adapter->init_done, in set_link_state()
1708 return -ETIMEDOUT; in set_link_state()
1711 if (adapter->init_done_rc == PARTIALSUCCESS) { in set_link_state()
1712 /* Partuial success, delay and re-send */ in set_link_state()
1715 } else if (adapter->init_done_rc) { in set_link_state()
1717 adapter->init_done_rc); in set_link_state()
1718 return adapter->init_done_rc; in set_link_state()
1731 adapter->req_tx_queues, adapter->req_rx_queues); in set_real_num_queues()
1733 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); in set_real_num_queues()
1739 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues); in set_real_num_queues()
1748 struct device *dev = &adapter->vdev->dev; in ibmvnic_get_vpd()
1753 if (adapter->vpd->buff) in ibmvnic_get_vpd()
1754 len = adapter->vpd->len; in ibmvnic_get_vpd()
1756 mutex_lock(&adapter->fw_lock); in ibmvnic_get_vpd()
1757 adapter->fw_done_rc = 0; in ibmvnic_get_vpd()
1758 reinit_completion(&adapter->fw_done); in ibmvnic_get_vpd()
1764 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1768 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in ibmvnic_get_vpd()
1771 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1774 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1776 if (!adapter->vpd->len) in ibmvnic_get_vpd()
1777 return -ENODATA; in ibmvnic_get_vpd()
1779 if (!adapter->vpd->buff) in ibmvnic_get_vpd()
1780 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); in ibmvnic_get_vpd()
1781 else if (adapter->vpd->len != len) in ibmvnic_get_vpd()
1782 adapter->vpd->buff = in ibmvnic_get_vpd()
1783 krealloc(adapter->vpd->buff, in ibmvnic_get_vpd()
1784 adapter->vpd->len, GFP_KERNEL); in ibmvnic_get_vpd()
1786 if (!adapter->vpd->buff) { in ibmvnic_get_vpd()
1788 return -ENOMEM; in ibmvnic_get_vpd()
1791 adapter->vpd->dma_addr = in ibmvnic_get_vpd()
1792 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, in ibmvnic_get_vpd()
1794 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) { in ibmvnic_get_vpd()
1796 kfree(adapter->vpd->buff); in ibmvnic_get_vpd()
1797 adapter->vpd->buff = NULL; in ibmvnic_get_vpd()
1798 return -ENOMEM; in ibmvnic_get_vpd()
1801 mutex_lock(&adapter->fw_lock); in ibmvnic_get_vpd()
1802 adapter->fw_done_rc = 0; in ibmvnic_get_vpd()
1803 reinit_completion(&adapter->fw_done); in ibmvnic_get_vpd()
1807 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); in ibmvnic_get_vpd()
1808 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); in ibmvnic_get_vpd()
1811 kfree(adapter->vpd->buff); in ibmvnic_get_vpd()
1812 adapter->vpd->buff = NULL; in ibmvnic_get_vpd()
1813 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1817 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in ibmvnic_get_vpd()
1820 kfree(adapter->vpd->buff); in ibmvnic_get_vpd()
1821 adapter->vpd->buff = NULL; in ibmvnic_get_vpd()
1822 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1826 mutex_unlock(&adapter->fw_lock); in ibmvnic_get_vpd()
1832 struct net_device *netdev = adapter->netdev; in init_resources()
1839 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); in init_resources()
1840 if (!adapter->vpd) in init_resources()
1841 return -ENOMEM; in init_resources()
1867 enum vnic_state prev_state = adapter->state; in __ibmvnic_open()
1870 adapter->state = VNIC_OPENING; in __ibmvnic_open()
1874 /* We're ready to receive frames, enable the sub-crq interrupts and in __ibmvnic_open()
1877 for (i = 0; i < adapter->req_rx_queues; i++) { in __ibmvnic_open()
1880 enable_irq(adapter->rx_scrq[i]->irq); in __ibmvnic_open()
1881 enable_scrq_irq(adapter, adapter->rx_scrq[i]); in __ibmvnic_open()
1884 for (i = 0; i < adapter->req_tx_queues; i++) { in __ibmvnic_open()
1887 enable_irq(adapter->tx_scrq[i]->irq); in __ibmvnic_open()
1888 enable_scrq_irq(adapter, adapter->tx_scrq[i]); in __ibmvnic_open()
1895 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) in __ibmvnic_open()
1906 adapter->tx_queues_active = true; in __ibmvnic_open()
1911 * with setting ->tx_queues_active = false. in __ibmvnic_open()
1918 for (i = 0; i < adapter->req_rx_queues; i++) in __ibmvnic_open()
1919 napi_schedule(&adapter->napi[i]); in __ibmvnic_open()
1922 adapter->state = VNIC_OPEN; in __ibmvnic_open()
1937 * It should be safe to overwrite the adapter->state here. Since in ibmvnic_open()
1944 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) { in ibmvnic_open()
1946 adapter_state_to_string(adapter->state), in ibmvnic_open()
1947 adapter->failover_pending); in ibmvnic_open()
1948 adapter->state = VNIC_OPEN; in ibmvnic_open()
1953 if (adapter->state != VNIC_CLOSED) { in ibmvnic_open()
1968 /* If open failed and there is a pending failover or in-progress reset, in ibmvnic_open()
1973 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) { in ibmvnic_open()
1974 adapter->state = VNIC_OPEN; in ibmvnic_open()
1995 if (!adapter->rx_pool) in clean_rx_pools()
1998 rx_scrqs = adapter->num_active_rx_pools; in clean_rx_pools()
1999 rx_entries = adapter->req_rx_add_entries_per_subcrq; in clean_rx_pools()
2003 rx_pool = &adapter->rx_pool[i]; in clean_rx_pools()
2004 if (!rx_pool || !rx_pool->rx_buff) in clean_rx_pools()
2007 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); in clean_rx_pools()
2009 rx_buff = &rx_pool->rx_buff[j]; in clean_rx_pools()
2010 if (rx_buff && rx_buff->skb) { in clean_rx_pools()
2011 dev_kfree_skb_any(rx_buff->skb); in clean_rx_pools()
2012 rx_buff->skb = NULL; in clean_rx_pools()
2025 if (!tx_pool || !tx_pool->tx_buff) in clean_one_tx_pool()
2028 tx_entries = tx_pool->num_buffers; in clean_one_tx_pool()
2031 tx_buff = &tx_pool->tx_buff[i]; in clean_one_tx_pool()
2032 if (tx_buff && tx_buff->skb) { in clean_one_tx_pool()
2033 dev_kfree_skb_any(tx_buff->skb); in clean_one_tx_pool()
2034 tx_buff->skb = NULL; in clean_one_tx_pool()
2044 if (!adapter->tx_pool || !adapter->tso_pool) in clean_tx_pools()
2047 tx_scrqs = adapter->num_active_tx_pools; in clean_tx_pools()
2051 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); in clean_tx_pools()
2052 clean_one_tx_pool(adapter, &adapter->tx_pool[i]); in clean_tx_pools()
2053 clean_one_tx_pool(adapter, &adapter->tso_pool[i]); in clean_tx_pools()
2059 struct net_device *netdev = adapter->netdev; in ibmvnic_disable_irqs()
2062 if (adapter->tx_scrq) { in ibmvnic_disable_irqs()
2063 for (i = 0; i < adapter->req_tx_queues; i++) in ibmvnic_disable_irqs()
2064 if (adapter->tx_scrq[i]->irq) { in ibmvnic_disable_irqs()
2067 disable_scrq_irq(adapter, adapter->tx_scrq[i]); in ibmvnic_disable_irqs()
2068 disable_irq(adapter->tx_scrq[i]->irq); in ibmvnic_disable_irqs()
2072 if (adapter->rx_scrq) { in ibmvnic_disable_irqs()
2073 for (i = 0; i < adapter->req_rx_queues; i++) { in ibmvnic_disable_irqs()
2074 if (adapter->rx_scrq[i]->irq) { in ibmvnic_disable_irqs()
2077 disable_scrq_irq(adapter, adapter->rx_scrq[i]); in ibmvnic_disable_irqs()
2078 disable_irq(adapter->rx_scrq[i]->irq); in ibmvnic_disable_irqs()
2090 adapter->tx_queues_active = false; in ibmvnic_cleanup()
2092 /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active in ibmvnic_cleanup()
2097 if (test_bit(0, &adapter->resetting)) in ibmvnic_cleanup()
2111 adapter->state = VNIC_CLOSING; in __ibmvnic_close()
2113 adapter->state = VNIC_CLOSED; in __ibmvnic_close()
2123 adapter_state_to_string(adapter->state), in ibmvnic_close()
2124 adapter->failover_pending, in ibmvnic_close()
2125 adapter->force_reset_recovery); in ibmvnic_close()
2130 if (adapter->failover_pending) { in ibmvnic_close()
2131 adapter->state = VNIC_CLOSED; in ibmvnic_close()
2144 * build_hdr_data - creates L2/L3/L4 header data buffer
2165 if (skb->protocol == htons(ETH_P_IP)) { in build_hdr_data()
2166 hdr_len[1] = ip_hdr(skb)->ihl * 4; in build_hdr_data()
2167 if (ip_hdr(skb)->protocol == IPPROTO_TCP) in build_hdr_data()
2169 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) in build_hdr_data()
2171 } else if (skb->protocol == htons(ETH_P_IPV6)) { in build_hdr_data()
2173 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) in build_hdr_data()
2175 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) in build_hdr_data()
2177 } else if (skb->protocol == htons(ETH_P_ARP)) { in build_hdr_data()
2178 hdr_len[1] = arp_hdr_len(skb->dev); in build_hdr_data()
2204 * create_hdr_descs - create header and header extension descriptors
2225 cur = hdr_data + len - tmp_len; in create_hdr_descs()
2246 tmp_len -= tmp; in create_hdr_descs()
2256 * build_hdr_descs_arr - build a header descriptor array
2289 if (skb->len < netdev->min_mtu) in ibmvnic_xmit_workarounds()
2290 return skb_put_padto(skb, netdev->min_mtu); in ibmvnic_xmit_workarounds()
2307 ind_bufp = &tx_scrq->ind_buf; in ibmvnic_tx_scrq_clean_buffer()
2308 entries = (u64)ind_bufp->index; in ibmvnic_tx_scrq_clean_buffer()
2309 queue_num = tx_scrq->pool_index; in ibmvnic_tx_scrq_clean_buffer()
2311 for (i = entries - 1; i >= 0; --i) { in ibmvnic_tx_scrq_clean_buffer()
2312 tx_scrq_entry = ind_bufp->indir_arr[i]; in ibmvnic_tx_scrq_clean_buffer()
2317 tx_pool = &adapter->tso_pool[queue_num]; in ibmvnic_tx_scrq_clean_buffer()
2320 tx_pool = &adapter->tx_pool[queue_num]; in ibmvnic_tx_scrq_clean_buffer()
2322 tx_pool->free_map[tx_pool->consumer_index] = index; in ibmvnic_tx_scrq_clean_buffer()
2323 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? in ibmvnic_tx_scrq_clean_buffer()
2324 tx_pool->num_buffers - 1 : in ibmvnic_tx_scrq_clean_buffer()
2325 tx_pool->consumer_index - 1; in ibmvnic_tx_scrq_clean_buffer()
2326 tx_buff = &tx_pool->tx_buff[index]; in ibmvnic_tx_scrq_clean_buffer()
2327 adapter->netdev->stats.tx_packets--; in ibmvnic_tx_scrq_clean_buffer()
2328 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; in ibmvnic_tx_scrq_clean_buffer()
2329 adapter->tx_stats_buffers[queue_num].batched_packets--; in ibmvnic_tx_scrq_clean_buffer()
2330 adapter->tx_stats_buffers[queue_num].bytes -= in ibmvnic_tx_scrq_clean_buffer()
2331 tx_buff->skb->len; in ibmvnic_tx_scrq_clean_buffer()
2332 dev_kfree_skb_any(tx_buff->skb); in ibmvnic_tx_scrq_clean_buffer()
2333 tx_buff->skb = NULL; in ibmvnic_tx_scrq_clean_buffer()
2334 adapter->netdev->stats.tx_dropped++; in ibmvnic_tx_scrq_clean_buffer()
2337 ind_bufp->index = 0; in ibmvnic_tx_scrq_clean_buffer()
2339 if (atomic_sub_return(entries, &tx_scrq->used) <= in ibmvnic_tx_scrq_clean_buffer()
2340 (adapter->req_tx_entries_per_subcrq / 2) && in ibmvnic_tx_scrq_clean_buffer()
2341 __netif_subqueue_stopped(adapter->netdev, queue_num)) { in ibmvnic_tx_scrq_clean_buffer()
2344 if (adapter->tx_queues_active) { in ibmvnic_tx_scrq_clean_buffer()
2345 netif_wake_subqueue(adapter->netdev, queue_num); in ibmvnic_tx_scrq_clean_buffer()
2346 netdev_dbg(adapter->netdev, "Started queue %d\n", in ibmvnic_tx_scrq_clean_buffer()
2357 unsigned int ua = adapter->vdev->unit_address; in send_subcrq_direct()
2358 struct device *dev = &adapter->vdev->dev; in send_subcrq_direct()
2384 ind_bufp = &tx_scrq->ind_buf; in ibmvnic_tx_scrq_flush()
2385 dma_addr = (u64)ind_bufp->indir_dma; in ibmvnic_tx_scrq_flush()
2386 entries = (u64)ind_bufp->index; in ibmvnic_tx_scrq_flush()
2387 handle = tx_scrq->handle; in ibmvnic_tx_scrq_flush()
2396 (u64 *)ind_bufp->indir_arr); in ibmvnic_tx_scrq_flush()
2401 ind_bufp->index = 0; in ibmvnic_tx_scrq_flush()
2409 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; in ibmvnic_xmit()
2410 struct device *dev = &adapter->vdev->dev; in ibmvnic_xmit()
2438 * rcu to ensure reset waits for us to complete. in ibmvnic_xmit()
2441 if (!adapter->tx_queues_active) { in ibmvnic_xmit()
2450 tx_scrq = adapter->tx_scrq[queue_num]; in ibmvnic_xmit()
2452 ind_bufp = &tx_scrq->ind_buf; in ibmvnic_xmit()
2465 tx_pool = &adapter->tso_pool[queue_num]; in ibmvnic_xmit()
2467 tx_pool = &adapter->tx_pool[queue_num]; in ibmvnic_xmit()
2469 bufidx = tx_pool->free_map[tx_pool->consumer_index]; in ibmvnic_xmit()
2482 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP; in ibmvnic_xmit()
2486 dst = ltb->buff + offset; in ibmvnic_xmit()
2487 memset(dst, 0, tx_pool->buf_size); in ibmvnic_xmit()
2488 data_dma_addr = ltb->addr + offset; in ibmvnic_xmit()
2497 !ind_bufp->index && !netdev_xmit_more()) { in ibmvnic_xmit()
2499 if (skb->ip_summed == CHECKSUM_PARTIAL && in ibmvnic_xmit()
2504 if (skb_shinfo(skb)->nr_frags) { in ibmvnic_xmit()
2512 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in ibmvnic_xmit()
2513 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in ibmvnic_xmit()
2520 skb_copy_from_linear_data(skb, dst, skb->len); in ibmvnic_xmit()
2526 tx_pool->consumer_index = in ibmvnic_xmit()
2527 (tx_pool->consumer_index + 1) % tx_pool->num_buffers; in ibmvnic_xmit()
2529 tx_buff = &tx_pool->tx_buff[bufidx]; in ibmvnic_xmit()
2535 if (unlikely(tx_buff->skb)) { in ibmvnic_xmit()
2539 dev_kfree_skb_any(tx_buff->skb); in ibmvnic_xmit()
2542 tx_buff->skb = skb; in ibmvnic_xmit()
2543 tx_buff->index = bufidx; in ibmvnic_xmit()
2544 tx_buff->pool_index = queue_num; in ibmvnic_xmit()
2545 skblen = skb->len; in ibmvnic_xmit()
2559 tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id); in ibmvnic_xmit()
2560 tx_crq.v1.sge_len = cpu_to_be32(skb->len); in ibmvnic_xmit()
2563 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) { in ibmvnic_xmit()
2565 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci); in ibmvnic_xmit()
2568 if (skb->protocol == htons(ETH_P_IP)) { in ibmvnic_xmit()
2570 proto = ip_hdr(skb)->protocol; in ibmvnic_xmit()
2571 } else if (skb->protocol == htons(ETH_P_IPV6)) { in ibmvnic_xmit()
2573 proto = ipv6_hdr(skb)->nexthdr; in ibmvnic_xmit()
2581 if (skb->ip_summed == CHECKSUM_PARTIAL) { in ibmvnic_xmit()
2587 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); in ibmvnic_xmit()
2592 ind_bufp->index = 1; in ibmvnic_xmit()
2593 tx_buff->num_entries = 1; in ibmvnic_xmit()
2594 netdev_tx_sent_queue(txq, skb->len); in ibmvnic_xmit()
2595 ind_bufp->indir_arr[0] = tx_crq; in ibmvnic_xmit()
2608 tx_buff->num_entries = num_entries; in ibmvnic_xmit()
2610 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { in ibmvnic_xmit()
2617 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], in ibmvnic_xmit()
2620 ind_bufp->index += num_entries; in ibmvnic_xmit()
2621 if (__netdev_tx_sent_queue(txq, skb->len, in ibmvnic_xmit()
2623 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { in ibmvnic_xmit()
2632 if (atomic_add_return(num_entries, &tx_scrq->used) in ibmvnic_xmit()
2633 >= adapter->req_tx_entries_per_subcrq) { in ibmvnic_xmit()
2645 tx_buff->skb = NULL; in ibmvnic_xmit()
2646 tx_pool->consumer_index = tx_pool->consumer_index == 0 ? in ibmvnic_xmit()
2647 tx_pool->num_buffers - 1 : in ibmvnic_xmit()
2648 tx_pool->consumer_index - 1; in ibmvnic_xmit()
2654 if (lpar_rc == H_CLOSED || adapter->failover_pending) { in ibmvnic_xmit()
2665 netdev->stats.tx_dropped += tx_dropped; in ibmvnic_xmit()
2666 netdev->stats.tx_bytes += tx_bytes; in ibmvnic_xmit()
2667 netdev->stats.tx_packets += tx_bpackets + tx_dpackets; in ibmvnic_xmit()
2668 adapter->tx_send_failed += tx_send_failed; in ibmvnic_xmit()
2669 adapter->tx_map_failed += tx_map_failed; in ibmvnic_xmit()
2670 adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets; in ibmvnic_xmit()
2671 adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets; in ibmvnic_xmit()
2672 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; in ibmvnic_xmit()
2673 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; in ibmvnic_xmit()
2688 if (netdev->flags & IFF_PROMISC) { in ibmvnic_set_multi()
2689 if (!adapter->promisc_supported) in ibmvnic_set_multi()
2692 if (netdev->flags & IFF_ALLMULTI) { in ibmvnic_set_multi()
2714 ha->addr); in ibmvnic_set_multi()
2728 rc = -EADDRNOTAVAIL; in __ibmvnic_set_mac()
2737 mutex_lock(&adapter->fw_lock); in __ibmvnic_set_mac()
2738 adapter->fw_done_rc = 0; in __ibmvnic_set_mac()
2739 reinit_completion(&adapter->fw_done); in __ibmvnic_set_mac()
2743 rc = -EIO; in __ibmvnic_set_mac()
2744 mutex_unlock(&adapter->fw_lock); in __ibmvnic_set_mac()
2748 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in __ibmvnic_set_mac()
2749 /* netdev->dev_addr is changed in handle_change_mac_rsp function */ in __ibmvnic_set_mac()
2750 if (rc || adapter->fw_done_rc) { in __ibmvnic_set_mac()
2751 rc = -EIO; in __ibmvnic_set_mac()
2752 mutex_unlock(&adapter->fw_lock); in __ibmvnic_set_mac()
2755 mutex_unlock(&adapter->fw_lock); in __ibmvnic_set_mac()
2758 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); in __ibmvnic_set_mac()
2769 if (!is_valid_ether_addr(addr->sa_data)) in ibmvnic_set_mac()
2770 return -EADDRNOTAVAIL; in ibmvnic_set_mac()
2772 ether_addr_copy(adapter->mac_addr, addr->sa_data); in ibmvnic_set_mac()
2773 if (adapter->state != VNIC_PROBED) in ibmvnic_set_mac()
2774 rc = __ibmvnic_set_mac(netdev, addr->sa_data); in ibmvnic_set_mac()
2809 reinit_completion(&adapter->init_done); in reinit_init_done()
2810 adapter->init_done_rc = 0; in reinit_init_done()
2815 * non-zero if we hit a fatal error and must halt.
2820 struct net_device *netdev = adapter->netdev; in do_reset()
2825 netdev_dbg(adapter->netdev, in do_reset()
2827 adapter_state_to_string(adapter->state), in do_reset()
2828 adapter->failover_pending, in do_reset()
2829 reset_reason_to_string(rwi->reset_reason), in do_reset()
2832 adapter->reset_reason = rwi->reset_reason; in do_reset()
2834 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) in do_reset()
2841 if (rwi->reset_reason == VNIC_RESET_FAILOVER) in do_reset()
2842 adapter->failover_pending = false; in do_reset()
2845 reset_state = adapter->state; in do_reset()
2848 rc = -EBUSY; in do_reset()
2854 old_num_rx_queues = adapter->req_rx_queues; in do_reset()
2855 old_num_tx_queues = adapter->req_tx_queues; in do_reset()
2856 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; in do_reset()
2857 old_num_tx_slots = adapter->req_tx_entries_per_subcrq; in do_reset()
2862 adapter->reset_reason != VNIC_RESET_MOBILITY && in do_reset()
2863 adapter->reset_reason != VNIC_RESET_FAILOVER) { in do_reset()
2864 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2869 adapter->state = VNIC_CLOSING; in do_reset()
2872 * re-acquire after the link state change to allow in do_reset()
2882 if (adapter->state == VNIC_OPEN) { in do_reset()
2893 adapter->state = VNIC_CLOSING; in do_reset()
2896 if (adapter->state != VNIC_CLOSING) { in do_reset()
2900 rc = -EAGAIN; in do_reset()
2903 adapter->state = VNIC_CLOSED; in do_reset()
2907 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2913 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { in do_reset()
2917 adapter->state = VNIC_PROBED; in do_reset()
2921 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2923 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { in do_reset()
2929 rc = vio_enable_interrupts(adapter->vdev); in do_reset()
2931 netdev_err(adapter->netdev, in do_reset()
2938 netdev_err(adapter->netdev, in do_reset()
2959 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { in do_reset()
2963 } else if (adapter->req_rx_queues != old_num_rx_queues || in do_reset()
2964 adapter->req_tx_queues != old_num_tx_queues || in do_reset()
2965 adapter->req_rx_add_entries_per_subcrq != in do_reset()
2967 adapter->req_tx_entries_per_subcrq != in do_reset()
2969 !adapter->rx_pool || in do_reset()
2970 !adapter->tso_pool || in do_reset()
2971 !adapter->tx_pool) { in do_reset()
2998 adapter->state = VNIC_CLOSED; in do_reset()
3014 if (adapter->reset_reason == VNIC_RESET_FAILOVER || in do_reset()
3015 adapter->reset_reason == VNIC_RESET_MOBILITY) in do_reset()
3023 adapter->state = reset_state; in do_reset()
3025 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) in do_reset()
3028 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n", in do_reset()
3029 adapter_state_to_string(adapter->state), in do_reset()
3030 adapter->failover_pending, rc); in do_reset()
3037 struct net_device *netdev = adapter->netdev; in do_hard_reset()
3040 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n", in do_hard_reset()
3041 reset_reason_to_string(rwi->reset_reason)); in do_hard_reset()
3044 reset_state = adapter->state; in do_hard_reset()
3047 rc = -EBUSY; in do_hard_reset()
3052 adapter->reset_reason = rwi->reset_reason; in do_hard_reset()
3062 adapter->state = VNIC_PROBED; in do_hard_reset()
3068 netdev_err(adapter->netdev, in do_hard_reset()
3092 adapter->state = VNIC_CLOSED; in do_hard_reset()
3107 adapter->state = reset_state; in do_hard_reset()
3108 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n", in do_hard_reset()
3109 adapter_state_to_string(adapter->state), in do_hard_reset()
3110 adapter->failover_pending, rc); in do_hard_reset()
3119 spin_lock_irqsave(&adapter->rwi_lock, flags); in get_next_rwi()
3121 if (!list_empty(&adapter->rwi_list)) { in get_next_rwi()
3122 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi, in get_next_rwi()
3124 list_del(&rwi->list); in get_next_rwi()
3129 spin_unlock_irqrestore(&adapter->rwi_lock, flags); in get_next_rwi()
3134 * do_passive_init - complete probing when partner device is detected.
3142 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
3150 struct net_device *netdev = adapter->netdev; in do_passive_init()
3151 struct device *dev = &adapter->vdev->dev; in do_passive_init()
3156 adapter->state = VNIC_PROBING; in do_passive_init()
3157 reinit_completion(&adapter->init_done); in do_passive_init()
3158 adapter->init_done_rc = 0; in do_passive_init()
3159 adapter->crq.active = true; in do_passive_init()
3167 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc); in do_passive_init()
3169 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { in do_passive_init()
3171 rc = -ETIMEDOUT; in do_passive_init()
3187 netdev->mtu = adapter->req_mtu - ETH_HLEN; in do_passive_init()
3188 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; in do_passive_init()
3189 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; in do_passive_init()
3191 adapter->state = VNIC_PROBED; in do_passive_init()
3199 adapter->state = VNIC_DOWN; in do_passive_init()
3218 dev = &adapter->vdev->dev; in __ibmvnic_reset()
3225 * 1. Adpater being removed - just return in __ibmvnic_reset()
3226 * 2. Timed out on probe or another reset in progress - delay the work in __ibmvnic_reset()
3227 * 3. Completed probe - perform any resets in queue in __ibmvnic_reset()
3229 if (adapter->state == VNIC_PROBING && in __ibmvnic_reset()
3230 !wait_for_completion_timeout(&adapter->probe_done, timeout)) { in __ibmvnic_reset()
3233 &adapter->ibmvnic_delayed_reset, in __ibmvnic_reset()
3239 if (adapter->state == VNIC_REMOVING) in __ibmvnic_reset()
3242 /* ->rwi_list is stable now (no one else is removing entries) */ in __ibmvnic_reset()
3246 * Before setting the ->resetting bit though, we have to make sure in __ibmvnic_reset()
3251 * ----------------- -------------- in __ibmvnic_reset()
3253 * set ->resetting bit in __ibmvnic_reset()
3254 * find ->resetting bit is set in __ibmvnic_reset()
3255 * set ->state to IBMVNIC_OPEN (i.e in __ibmvnic_reset()
3267 spin_lock(&adapter->rwi_lock); in __ibmvnic_reset()
3268 if (!list_empty(&adapter->rwi_list)) { in __ibmvnic_reset()
3269 if (test_and_set_bit_lock(0, &adapter->resetting)) { in __ibmvnic_reset()
3271 &adapter->ibmvnic_delayed_reset, in __ibmvnic_reset()
3277 spin_unlock(&adapter->rwi_lock); in __ibmvnic_reset()
3284 spin_lock_irqsave(&adapter->state_lock, flags); in __ibmvnic_reset()
3286 if (adapter->state == VNIC_REMOVING || in __ibmvnic_reset()
3287 adapter->state == VNIC_REMOVED) { in __ibmvnic_reset()
3288 spin_unlock_irqrestore(&adapter->state_lock, flags); in __ibmvnic_reset()
3295 reset_state = adapter->state; in __ibmvnic_reset()
3298 spin_unlock_irqrestore(&adapter->state_lock, flags); in __ibmvnic_reset()
3300 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) { in __ibmvnic_reset()
3305 netif_carrier_on(adapter->netdev); in __ibmvnic_reset()
3306 } else if (adapter->force_reset_recovery) { in __ibmvnic_reset()
3311 adapter->failover_pending = false; in __ibmvnic_reset()
3314 if (adapter->wait_for_reset) { in __ibmvnic_reset()
3316 adapter->force_reset_recovery = false; in __ibmvnic_reset()
3320 adapter->force_reset_recovery = false; in __ibmvnic_reset()
3329 /* If auto-priority-failover is enabled we can get in __ibmvnic_reset()
3331 * in at least two failed resets (from high-priority in __ibmvnic_reset()
3332 * backing device to low-priority one and then back) in __ibmvnic_reset()
3337 netdev_dbg(adapter->netdev, in __ibmvnic_reset()
3339 adapter_state_to_string(adapter->state), in __ibmvnic_reset()
3348 adapter->last_reset_time = jiffies; in __ibmvnic_reset()
3351 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc); in __ibmvnic_reset()
3370 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || in __ibmvnic_reset()
3371 rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) in __ibmvnic_reset()
3372 adapter->force_reset_recovery = true; in __ibmvnic_reset()
3375 if (adapter->wait_for_reset) { in __ibmvnic_reset()
3376 adapter->reset_done_rc = rc; in __ibmvnic_reset()
3377 complete(&adapter->reset_done); in __ibmvnic_reset()
3380 clear_bit_unlock(0, &adapter->resetting); in __ibmvnic_reset()
3382 netdev_dbg(adapter->netdev, in __ibmvnic_reset()
3384 adapter_state_to_string(adapter->state), in __ibmvnic_reset()
3385 adapter->force_reset_recovery, in __ibmvnic_reset()
3386 adapter->wait_for_reset); in __ibmvnic_reset()
3395 __ibmvnic_reset(&adapter->ibmvnic_reset); in __ibmvnic_delayed_reset()
3402 if (!list_empty(&adapter->rwi_list)) { in flush_reset_queue()
3403 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) { in flush_reset_queue()
3413 struct net_device *netdev = adapter->netdev; in ibmvnic_reset()
3418 spin_lock_irqsave(&adapter->rwi_lock, flags); in ibmvnic_reset()
3423 * duplicate reset when walking the ->rwi_list below. in ibmvnic_reset()
3425 if (adapter->state == VNIC_REMOVING || in ibmvnic_reset()
3426 adapter->state == VNIC_REMOVED || in ibmvnic_reset()
3427 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { in ibmvnic_reset()
3433 list_for_each_entry(tmp, &adapter->rwi_list, list) { in ibmvnic_reset()
3434 if (tmp->reset_reason == reason) { in ibmvnic_reset()
3450 if (adapter->force_reset_recovery) in ibmvnic_reset()
3453 rwi->reset_reason = reason; in ibmvnic_reset()
3454 list_add_tail(&rwi->list, &adapter->rwi_list); in ibmvnic_reset()
3455 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n", in ibmvnic_reset()
3457 queue_work(system_long_wq, &adapter->ibmvnic_reset); in ibmvnic_reset()
3462 spin_unlock_irqrestore(&adapter->rwi_lock, flags); in ibmvnic_reset()
3467 return -ret; in ibmvnic_reset()
3474 if (test_bit(0, &adapter->resetting)) { in ibmvnic_tx_timeout()
3475 netdev_err(adapter->netdev, in ibmvnic_tx_timeout()
3482 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) { in ibmvnic_tx_timeout()
3492 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index]; in remove_buff_from_pool()
3494 rx_buff->skb = NULL; in remove_buff_from_pool()
3496 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff); in remove_buff_from_pool()
3497 pool->next_alloc = (pool->next_alloc + 1) % pool->size; in remove_buff_from_pool()
3499 atomic_dec(&pool->available); in remove_buff_from_pool()
3510 netdev = napi->dev; in ibmvnic_poll()
3512 scrq_num = (int)(napi - adapter->napi); in ibmvnic_poll()
3514 rx_scrq = adapter->rx_scrq[scrq_num]; in ibmvnic_poll()
3525 if (unlikely(test_bit(0, &adapter->resetting) && in ibmvnic_poll()
3526 adapter->reset_reason != VNIC_RESET_NON_FATAL)) { in ibmvnic_poll()
3536 be64_to_cpu(next->rx_comp.correlator); in ibmvnic_poll()
3538 if (next->rx_comp.rc) { in ibmvnic_poll()
3540 be16_to_cpu(next->rx_comp.rc)); in ibmvnic_poll()
3542 next->rx_comp.first = 0; in ibmvnic_poll()
3543 dev_kfree_skb_any(rx_buff->skb); in ibmvnic_poll()
3546 } else if (!rx_buff->skb) { in ibmvnic_poll()
3548 next->rx_comp.first = 0; in ibmvnic_poll()
3553 length = be32_to_cpu(next->rx_comp.len); in ibmvnic_poll()
3554 offset = be16_to_cpu(next->rx_comp.off_frame_data); in ibmvnic_poll()
3555 flags = next->rx_comp.flags; in ibmvnic_poll()
3556 skb = rx_buff->skb; in ibmvnic_poll()
3559 skb_copy_to_linear_data(skb, rx_buff->data + offset, in ibmvnic_poll()
3565 if (adapter->rx_vlan_header_insertion && in ibmvnic_poll()
3568 ntohs(next->rx_comp.vlan_tci)); in ibmvnic_poll()
3571 next->rx_comp.first = 0; in ibmvnic_poll()
3575 skb->protocol = eth_type_trans(skb, netdev); in ibmvnic_poll()
3580 skb->ip_summed = CHECKSUM_UNNECESSARY; in ibmvnic_poll()
3583 length = skb->len; in ibmvnic_poll()
3585 netdev->stats.rx_packets++; in ibmvnic_poll()
3586 netdev->stats.rx_bytes += length; in ibmvnic_poll()
3587 adapter->rx_stats_buffers[scrq_num].packets++; in ibmvnic_poll()
3588 adapter->rx_stats_buffers[scrq_num].bytes += length; in ibmvnic_poll()
3592 if (adapter->state != VNIC_CLOSING && in ibmvnic_poll()
3593 ((atomic_read(&adapter->rx_pool[scrq_num].available) < in ibmvnic_poll()
3594 adapter->req_rx_add_entries_per_subcrq / 2) || in ibmvnic_poll()
3596 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); in ibmvnic_poll()
3615 adapter->fallback.mtu = adapter->req_mtu; in wait_for_reset()
3616 adapter->fallback.rx_queues = adapter->req_rx_queues; in wait_for_reset()
3617 adapter->fallback.tx_queues = adapter->req_tx_queues; in wait_for_reset()
3618 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; in wait_for_reset()
3619 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; in wait_for_reset()
3621 reinit_completion(&adapter->reset_done); in wait_for_reset()
3622 adapter->wait_for_reset = true; in wait_for_reset()
3629 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000); in wait_for_reset()
3631 ret = -ENODEV; in wait_for_reset()
3636 if (adapter->reset_done_rc) { in wait_for_reset()
3637 ret = -EIO; in wait_for_reset()
3638 adapter->desired.mtu = adapter->fallback.mtu; in wait_for_reset()
3639 adapter->desired.rx_queues = adapter->fallback.rx_queues; in wait_for_reset()
3640 adapter->desired.tx_queues = adapter->fallback.tx_queues; in wait_for_reset()
3641 adapter->desired.rx_entries = adapter->fallback.rx_entries; in wait_for_reset()
3642 adapter->desired.tx_entries = adapter->fallback.tx_entries; in wait_for_reset()
3644 reinit_completion(&adapter->reset_done); in wait_for_reset()
3645 adapter->wait_for_reset = true; in wait_for_reset()
3651 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, in wait_for_reset()
3654 ret = -ENODEV; in wait_for_reset()
3659 adapter->wait_for_reset = false; in wait_for_reset()
3668 adapter->desired.mtu = new_mtu + ETH_HLEN; in ibmvnic_change_mtu()
3682 if (skb_shinfo(skb)->gso_size < 224 || in ibmvnic_features_check()
3683 skb_shinfo(skb)->gso_segs == 1) in ibmvnic_features_check()
3712 adapter->speed = SPEED_UNKNOWN; in ibmvnic_get_link_ksettings()
3713 adapter->duplex = DUPLEX_UNKNOWN; in ibmvnic_get_link_ksettings()
3715 cmd->base.speed = adapter->speed; in ibmvnic_get_link_ksettings()
3716 cmd->base.duplex = adapter->duplex; in ibmvnic_get_link_ksettings()
3717 cmd->base.port = PORT_FIBRE; in ibmvnic_get_link_ksettings()
3718 cmd->base.phy_address = 0; in ibmvnic_get_link_ksettings()
3719 cmd->base.autoneg = AUTONEG_ENABLE; in ibmvnic_get_link_ksettings()
3729 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); in ibmvnic_get_drvinfo()
3730 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); in ibmvnic_get_drvinfo()
3731 strscpy(info->fw_version, adapter->fw_version, in ibmvnic_get_drvinfo()
3732 sizeof(info->fw_version)); in ibmvnic_get_drvinfo()
3739 return adapter->msg_enable; in ibmvnic_get_msglevel()
3746 adapter->msg_enable = data; in ibmvnic_set_msglevel()
3756 return adapter->logical_link_state; in ibmvnic_get_link()
3766 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; in ibmvnic_get_ringparam()
3767 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; in ibmvnic_get_ringparam()
3768 ring->rx_mini_max_pending = 0; in ibmvnic_get_ringparam()
3769 ring->rx_jumbo_max_pending = 0; in ibmvnic_get_ringparam()
3770 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; in ibmvnic_get_ringparam()
3771 ring->tx_pending = adapter->req_tx_entries_per_subcrq; in ibmvnic_get_ringparam()
3772 ring->rx_mini_pending = 0; in ibmvnic_get_ringparam()
3773 ring->rx_jumbo_pending = 0; in ibmvnic_get_ringparam()
3783 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || in ibmvnic_set_ringparam()
3784 ring->tx_pending > adapter->max_tx_entries_per_subcrq) { in ibmvnic_set_ringparam()
3787 adapter->max_rx_add_entries_per_subcrq); in ibmvnic_set_ringparam()
3789 adapter->max_tx_entries_per_subcrq); in ibmvnic_set_ringparam()
3790 return -EINVAL; in ibmvnic_set_ringparam()
3793 adapter->desired.rx_entries = ring->rx_pending; in ibmvnic_set_ringparam()
3794 adapter->desired.tx_entries = ring->tx_pending; in ibmvnic_set_ringparam()
3804 channels->max_rx = adapter->max_rx_queues; in ibmvnic_get_channels()
3805 channels->max_tx = adapter->max_tx_queues; in ibmvnic_get_channels()
3806 channels->max_other = 0; in ibmvnic_get_channels()
3807 channels->max_combined = 0; in ibmvnic_get_channels()
3808 channels->rx_count = adapter->req_rx_queues; in ibmvnic_get_channels()
3809 channels->tx_count = adapter->req_tx_queues; in ibmvnic_get_channels()
3810 channels->other_count = 0; in ibmvnic_get_channels()
3811 channels->combined_count = 0; in ibmvnic_get_channels()
3819 adapter->desired.rx_queues = channels->rx_count; in ibmvnic_set_channels()
3820 adapter->desired.tx_queues = channels->tx_count; in ibmvnic_set_channels()
3836 for (i = 0; i < adapter->req_tx_queues; i++) { in ibmvnic_get_strings()
3850 for (i = 0; i < adapter->req_rx_queues; i++) { in ibmvnic_get_strings()
3869 adapter->req_tx_queues * NUM_TX_STATS + in ibmvnic_get_sset_count()
3870 adapter->req_rx_queues * NUM_RX_STATS; in ibmvnic_get_sset_count()
3872 return -EOPNOTSUPP; in ibmvnic_get_sset_count()
3887 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); in ibmvnic_get_ethtool_stats()
3892 reinit_completion(&adapter->stats_done); in ibmvnic_get_ethtool_stats()
3896 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000); in ibmvnic_get_ethtool_stats()
3904 for (j = 0; j < adapter->req_tx_queues; j++) { in ibmvnic_get_ethtool_stats()
3905 data[i] = adapter->tx_stats_buffers[j].batched_packets; in ibmvnic_get_ethtool_stats()
3907 data[i] = adapter->tx_stats_buffers[j].direct_packets; in ibmvnic_get_ethtool_stats()
3909 data[i] = adapter->tx_stats_buffers[j].bytes; in ibmvnic_get_ethtool_stats()
3911 data[i] = adapter->tx_stats_buffers[j].dropped_packets; in ibmvnic_get_ethtool_stats()
3915 for (j = 0; j < adapter->req_rx_queues; j++) { in ibmvnic_get_ethtool_stats()
3916 data[i] = adapter->rx_stats_buffers[j].packets; in ibmvnic_get_ethtool_stats()
3918 data[i] = adapter->rx_stats_buffers[j].bytes; in ibmvnic_get_ethtool_stats()
3920 data[i] = adapter->rx_stats_buffers[j].interrupts; in ibmvnic_get_ethtool_stats()
3948 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n"); in reset_one_sub_crq_queue()
3949 return -EINVAL; in reset_one_sub_crq_queue()
3952 if (scrq->irq) { in reset_one_sub_crq_queue()
3953 free_irq(scrq->irq, scrq); in reset_one_sub_crq_queue()
3954 irq_dispose_mapping(scrq->irq); in reset_one_sub_crq_queue()
3955 scrq->irq = 0; in reset_one_sub_crq_queue()
3958 if (scrq->msgs) { in reset_one_sub_crq_queue()
3959 memset(scrq->msgs, 0, 4 * PAGE_SIZE); in reset_one_sub_crq_queue()
3960 atomic_set(&scrq->used, 0); in reset_one_sub_crq_queue()
3961 scrq->cur = 0; in reset_one_sub_crq_queue()
3962 scrq->ind_buf.index = 0; in reset_one_sub_crq_queue()
3964 netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); in reset_one_sub_crq_queue()
3965 return -EINVAL; in reset_one_sub_crq_queue()
3968 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in reset_one_sub_crq_queue()
3969 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in reset_one_sub_crq_queue()
3977 if (!adapter->tx_scrq || !adapter->rx_scrq) in reset_sub_crq_queues()
3978 return -EINVAL; in reset_sub_crq_queues()
3982 for (i = 0; i < adapter->req_tx_queues; i++) { in reset_sub_crq_queues()
3983 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); in reset_sub_crq_queues()
3984 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); in reset_sub_crq_queues()
3989 for (i = 0; i < adapter->req_rx_queues; i++) { in reset_sub_crq_queues()
3990 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); in reset_sub_crq_queues()
3991 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); in reset_sub_crq_queues()
4003 struct device *dev = &adapter->vdev->dev; in release_sub_crq_queue()
4006 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n"); in release_sub_crq_queue()
4009 /* Close the sub-crqs */ in release_sub_crq_queue()
4012 adapter->vdev->unit_address, in release_sub_crq_queue()
4013 scrq->crq_num); in release_sub_crq_queue()
4017 netdev_err(adapter->netdev, in release_sub_crq_queue()
4018 "Failed to release sub-CRQ %16lx, rc = %ld\n", in release_sub_crq_queue()
4019 scrq->crq_num, rc); in release_sub_crq_queue()
4025 scrq->ind_buf.indir_arr, in release_sub_crq_queue()
4026 scrq->ind_buf.indir_dma); in release_sub_crq_queue()
4028 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in release_sub_crq_queue()
4030 free_pages((unsigned long)scrq->msgs, 2); in release_sub_crq_queue()
4031 free_cpumask_var(scrq->affinity_mask); in release_sub_crq_queue()
4038 struct device *dev = &adapter->vdev->dev; in init_sub_crq_queue()
4046 scrq->msgs = in init_sub_crq_queue()
4048 if (!scrq->msgs) { in init_sub_crq_queue()
4052 if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL)) in init_sub_crq_queue()
4055 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE, in init_sub_crq_queue()
4057 if (dma_mapping_error(dev, scrq->msg_token)) { in init_sub_crq_queue()
4062 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, in init_sub_crq_queue()
4063 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); in init_sub_crq_queue()
4071 dev_warn(dev, "Error %d registering sub-crq\n", rc); in init_sub_crq_queue()
4075 scrq->adapter = adapter; in init_sub_crq_queue()
4076 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs); in init_sub_crq_queue()
4077 scrq->ind_buf.index = 0; in init_sub_crq_queue()
4079 scrq->ind_buf.indir_arr = in init_sub_crq_queue()
4082 &scrq->ind_buf.indir_dma, in init_sub_crq_queue()
4085 if (!scrq->ind_buf.indir_arr) in init_sub_crq_queue()
4088 spin_lock_init(&scrq->lock); in init_sub_crq_queue()
4090 netdev_dbg(adapter->netdev, in init_sub_crq_queue()
4091 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n", in init_sub_crq_queue()
4092 scrq->crq_num, scrq->hw_irq, scrq->irq); in init_sub_crq_queue()
4099 adapter->vdev->unit_address, in init_sub_crq_queue()
4100 scrq->crq_num); in init_sub_crq_queue()
4103 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE, in init_sub_crq_queue()
4106 free_cpumask_var(scrq->affinity_mask); in init_sub_crq_queue()
4108 free_pages((unsigned long)scrq->msgs, 2); in init_sub_crq_queue()
4120 if (adapter->tx_scrq) { in release_sub_crqs()
4121 for (i = 0; i < adapter->num_active_tx_scrqs; i++) { in release_sub_crqs()
4122 if (!adapter->tx_scrq[i]) in release_sub_crqs()
4125 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", in release_sub_crqs()
4127 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); in release_sub_crqs()
4128 if (adapter->tx_scrq[i]->irq) { in release_sub_crqs()
4129 free_irq(adapter->tx_scrq[i]->irq, in release_sub_crqs()
4130 adapter->tx_scrq[i]); in release_sub_crqs()
4131 irq_dispose_mapping(adapter->tx_scrq[i]->irq); in release_sub_crqs()
4132 adapter->tx_scrq[i]->irq = 0; in release_sub_crqs()
4135 release_sub_crq_queue(adapter, adapter->tx_scrq[i], in release_sub_crqs()
4139 kfree(adapter->tx_scrq); in release_sub_crqs()
4140 adapter->tx_scrq = NULL; in release_sub_crqs()
4141 adapter->num_active_tx_scrqs = 0; in release_sub_crqs()
4150 if (adapter->rx_scrq) { in release_sub_crqs()
4151 for (i = 0; i < adapter->num_active_rx_scrqs; i++) { in release_sub_crqs()
4152 if (!adapter->rx_scrq[i]) in release_sub_crqs()
4155 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", in release_sub_crqs()
4157 if (adapter->rx_scrq[i]->irq) { in release_sub_crqs()
4158 free_irq(adapter->rx_scrq[i]->irq, in release_sub_crqs()
4159 adapter->rx_scrq[i]); in release_sub_crqs()
4160 irq_dispose_mapping(adapter->rx_scrq[i]->irq); in release_sub_crqs()
4161 adapter->rx_scrq[i]->irq = 0; in release_sub_crqs()
4164 release_sub_crq_queue(adapter, adapter->rx_scrq[i], in release_sub_crqs()
4168 kfree(adapter->rx_scrq); in release_sub_crqs()
4169 adapter->rx_scrq = NULL; in release_sub_crqs()
4170 adapter->num_active_rx_scrqs = 0; in release_sub_crqs()
4177 struct device *dev = &adapter->vdev->dev; in disable_scrq_irq()
4180 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, in disable_scrq_irq()
4181 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in disable_scrq_irq()
4184 scrq->hw_irq, rc); in disable_scrq_irq()
4193 u64 val = 0xff000000 | scrq->hw_irq; in ibmvnic_xics_eoi()
4215 struct device *dev = &adapter->vdev->dev; in enable_scrq_irq()
4218 if (scrq->hw_irq > 0x100000000ULL) { in enable_scrq_irq()
4219 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); in enable_scrq_irq()
4223 if (test_bit(0, &adapter->resetting) && in enable_scrq_irq()
4224 adapter->reset_reason == VNIC_RESET_MOBILITY) { in enable_scrq_irq()
4228 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, in enable_scrq_irq()
4229 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); in enable_scrq_irq()
4232 scrq->hw_irq, rc); in enable_scrq_irq()
4239 struct device *dev = &adapter->vdev->dev; in ibmvnic_complete_tx()
4249 unsigned int pool = scrq->pool_index; in ibmvnic_complete_tx()
4255 for (i = 0; i < next->tx_comp.num_comps; i++) { in ibmvnic_complete_tx()
4256 index = be32_to_cpu(next->tx_comp.correlators[i]); in ibmvnic_complete_tx()
4258 tx_pool = &adapter->tso_pool[pool]; in ibmvnic_complete_tx()
4261 tx_pool = &adapter->tx_pool[pool]; in ibmvnic_complete_tx()
4264 txbuff = &tx_pool->tx_buff[index]; in ibmvnic_complete_tx()
4266 num_entries += txbuff->num_entries; in ibmvnic_complete_tx()
4267 if (txbuff->skb) { in ibmvnic_complete_tx()
4268 total_bytes += txbuff->skb->len; in ibmvnic_complete_tx()
4269 if (next->tx_comp.rcs[i]) { in ibmvnic_complete_tx()
4271 next->tx_comp.rcs[i]); in ibmvnic_complete_tx()
4272 dev_kfree_skb_irq(txbuff->skb); in ibmvnic_complete_tx()
4274 dev_consume_skb_irq(txbuff->skb); in ibmvnic_complete_tx()
4276 txbuff->skb = NULL; in ibmvnic_complete_tx()
4278 netdev_warn(adapter->netdev, in ibmvnic_complete_tx()
4281 tx_pool->free_map[tx_pool->producer_index] = index; in ibmvnic_complete_tx()
4282 tx_pool->producer_index = in ibmvnic_complete_tx()
4283 (tx_pool->producer_index + 1) % in ibmvnic_complete_tx()
4284 tx_pool->num_buffers; in ibmvnic_complete_tx()
4287 next->tx_comp.first = 0; in ibmvnic_complete_tx()
4289 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index); in ibmvnic_complete_tx()
4292 if (atomic_sub_return(num_entries, &scrq->used) <= in ibmvnic_complete_tx()
4293 (adapter->req_tx_entries_per_subcrq / 2) && in ibmvnic_complete_tx()
4294 __netif_subqueue_stopped(adapter->netdev, in ibmvnic_complete_tx()
4295 scrq->pool_index)) { in ibmvnic_complete_tx()
4297 if (adapter->tx_queues_active) { in ibmvnic_complete_tx()
4298 netif_wake_subqueue(adapter->netdev, in ibmvnic_complete_tx()
4299 scrq->pool_index); in ibmvnic_complete_tx()
4300 netdev_dbg(adapter->netdev, in ibmvnic_complete_tx()
4302 scrq->pool_index); in ibmvnic_complete_tx()
4321 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_tx()
4332 struct ibmvnic_adapter *adapter = scrq->adapter; in ibmvnic_interrupt_rx()
4337 if (unlikely(adapter->state != VNIC_OPEN)) in ibmvnic_interrupt_rx()
4340 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; in ibmvnic_interrupt_rx()
4342 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { in ibmvnic_interrupt_rx()
4344 __napi_schedule(&adapter->napi[scrq->scrq_num]); in ibmvnic_interrupt_rx()
4352 struct device *dev = &adapter->vdev->dev; in init_sub_crq_irqs()
4357 for (i = 0; i < adapter->req_tx_queues; i++) { in init_sub_crq_irqs()
4358 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", in init_sub_crq_irqs()
4360 scrq = adapter->tx_scrq[i]; in init_sub_crq_irqs()
4361 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4363 if (!scrq->irq) { in init_sub_crq_irqs()
4364 rc = -EINVAL; in init_sub_crq_irqs()
4369 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d", in init_sub_crq_irqs()
4370 adapter->vdev->unit_address, i); in init_sub_crq_irqs()
4371 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx, in init_sub_crq_irqs()
4372 0, scrq->name, scrq); in init_sub_crq_irqs()
4376 scrq->irq, rc); in init_sub_crq_irqs()
4377 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4382 for (i = 0; i < adapter->req_rx_queues; i++) { in init_sub_crq_irqs()
4383 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", in init_sub_crq_irqs()
4385 scrq = adapter->rx_scrq[i]; in init_sub_crq_irqs()
4386 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); in init_sub_crq_irqs()
4387 if (!scrq->irq) { in init_sub_crq_irqs()
4388 rc = -EINVAL; in init_sub_crq_irqs()
4392 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d", in init_sub_crq_irqs()
4393 adapter->vdev->unit_address, i); in init_sub_crq_irqs()
4394 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx, in init_sub_crq_irqs()
4395 0, scrq->name, scrq); in init_sub_crq_irqs()
4398 scrq->irq, rc); in init_sub_crq_irqs()
4399 irq_dispose_mapping(scrq->irq); in init_sub_crq_irqs()
4412 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]); in init_sub_crq_irqs()
4413 irq_dispose_mapping(adapter->rx_scrq[j]->irq); in init_sub_crq_irqs()
4415 i = adapter->req_tx_queues; in init_sub_crq_irqs()
4418 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]); in init_sub_crq_irqs()
4419 irq_dispose_mapping(adapter->tx_scrq[j]->irq); in init_sub_crq_irqs()
4427 struct device *dev = &adapter->vdev->dev; in init_sub_crqs()
4434 total_queues = adapter->req_tx_queues + adapter->req_rx_queues; in init_sub_crqs()
4438 return -ENOMEM; in init_sub_crqs()
4443 dev_warn(dev, "Couldn't allocate all sub-crqs\n"); in init_sub_crqs()
4451 adapter->min_tx_queues + adapter->min_rx_queues) { in init_sub_crqs()
4452 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n"); in init_sub_crqs()
4457 for (i = 0; i < total_queues - registered_queues + more ; i++) { in init_sub_crqs()
4458 netdev_dbg(adapter->netdev, "Reducing number of queues\n"); in init_sub_crqs()
4461 if (adapter->req_rx_queues > adapter->min_rx_queues) in init_sub_crqs()
4462 adapter->req_rx_queues--; in init_sub_crqs()
4467 if (adapter->req_tx_queues > adapter->min_tx_queues) in init_sub_crqs()
4468 adapter->req_tx_queues--; in init_sub_crqs()
4475 adapter->tx_scrq = kcalloc(adapter->req_tx_queues, in init_sub_crqs()
4476 sizeof(*adapter->tx_scrq), GFP_KERNEL); in init_sub_crqs()
4477 if (!adapter->tx_scrq) in init_sub_crqs()
4480 for (i = 0; i < adapter->req_tx_queues; i++) { in init_sub_crqs()
4481 adapter->tx_scrq[i] = allqueues[i]; in init_sub_crqs()
4482 adapter->tx_scrq[i]->pool_index = i; in init_sub_crqs()
4483 adapter->num_active_tx_scrqs++; in init_sub_crqs()
4486 adapter->rx_scrq = kcalloc(adapter->req_rx_queues, in init_sub_crqs()
4487 sizeof(*adapter->rx_scrq), GFP_KERNEL); in init_sub_crqs()
4488 if (!adapter->rx_scrq) in init_sub_crqs()
4491 for (i = 0; i < adapter->req_rx_queues; i++) { in init_sub_crqs()
4492 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues]; in init_sub_crqs()
4493 adapter->rx_scrq[i]->scrq_num = i; in init_sub_crqs()
4494 adapter->num_active_rx_scrqs++; in init_sub_crqs()
4501 kfree(adapter->tx_scrq); in init_sub_crqs()
4502 adapter->tx_scrq = NULL; in init_sub_crqs()
4507 return -ENOMEM; in init_sub_crqs()
4512 struct device *dev = &adapter->vdev->dev; in send_request_cap()
4522 if (!(adapter->netdev->flags & IFF_PROMISC) || in send_request_cap()
4523 adapter->promisc_supported) in send_request_cap()
4529 /* Sub-CRQ entries are 32 byte long */ in send_request_cap()
4532 atomic_set(&adapter->running_cap_crqs, cap_reqs); in send_request_cap()
4534 if (adapter->min_tx_entries_per_subcrq > entries_page || in send_request_cap()
4535 adapter->min_rx_add_entries_per_subcrq > entries_page) { in send_request_cap()
4536 dev_err(dev, "Fatal, invalid entries per sub-crq\n"); in send_request_cap()
4540 if (adapter->desired.mtu) in send_request_cap()
4541 adapter->req_mtu = adapter->desired.mtu; in send_request_cap()
4543 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; in send_request_cap()
4545 if (!adapter->desired.tx_entries) in send_request_cap()
4546 adapter->desired.tx_entries = in send_request_cap()
4547 adapter->max_tx_entries_per_subcrq; in send_request_cap()
4548 if (!adapter->desired.rx_entries) in send_request_cap()
4549 adapter->desired.rx_entries = in send_request_cap()
4550 adapter->max_rx_add_entries_per_subcrq; in send_request_cap()
4553 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); in send_request_cap()
4555 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * in send_request_cap()
4556 adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) { in send_request_cap()
4557 adapter->desired.tx_entries = max_entries; in send_request_cap()
4560 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * in send_request_cap()
4561 adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) { in send_request_cap()
4562 adapter->desired.rx_entries = max_entries; in send_request_cap()
4565 if (adapter->desired.tx_entries) in send_request_cap()
4566 adapter->req_tx_entries_per_subcrq = in send_request_cap()
4567 adapter->desired.tx_entries; in send_request_cap()
4569 adapter->req_tx_entries_per_subcrq = in send_request_cap()
4570 adapter->max_tx_entries_per_subcrq; in send_request_cap()
4572 if (adapter->desired.rx_entries) in send_request_cap()
4573 adapter->req_rx_add_entries_per_subcrq = in send_request_cap()
4574 adapter->desired.rx_entries; in send_request_cap()
4576 adapter->req_rx_add_entries_per_subcrq = in send_request_cap()
4577 adapter->max_rx_add_entries_per_subcrq; in send_request_cap()
4579 if (adapter->desired.tx_queues) in send_request_cap()
4580 adapter->req_tx_queues = in send_request_cap()
4581 adapter->desired.tx_queues; in send_request_cap()
4583 adapter->req_tx_queues = in send_request_cap()
4584 adapter->opt_tx_comp_sub_queues; in send_request_cap()
4586 if (adapter->desired.rx_queues) in send_request_cap()
4587 adapter->req_rx_queues = in send_request_cap()
4588 adapter->desired.rx_queues; in send_request_cap()
4590 adapter->req_rx_queues = in send_request_cap()
4591 adapter->opt_rx_comp_queues; in send_request_cap()
4593 adapter->req_rx_add_queues = adapter->max_rx_add_queues; in send_request_cap()
4595 atomic_add(cap_reqs, &adapter->running_cap_crqs); in send_request_cap()
4602 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues); in send_request_cap()
4603 cap_reqs--; in send_request_cap()
4607 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues); in send_request_cap()
4608 cap_reqs--; in send_request_cap()
4612 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues); in send_request_cap()
4613 cap_reqs--; in send_request_cap()
4619 cpu_to_be64(adapter->req_tx_entries_per_subcrq); in send_request_cap()
4620 cap_reqs--; in send_request_cap()
4626 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq); in send_request_cap()
4627 cap_reqs--; in send_request_cap()
4631 crq.request_capability.number = cpu_to_be64(adapter->req_mtu); in send_request_cap()
4632 cap_reqs--; in send_request_cap()
4635 if (adapter->netdev->flags & IFF_PROMISC) { in send_request_cap()
4636 if (adapter->promisc_supported) { in send_request_cap()
4640 cap_reqs--; in send_request_cap()
4647 cap_reqs--; in send_request_cap()
4660 union sub_crq *entry = &scrq->msgs[scrq->cur]; in pending_scrq()
4663 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP); in pending_scrq()
4679 spin_lock_irqsave(&scrq->lock, flags); in ibmvnic_next_scrq()
4680 entry = &scrq->msgs[scrq->cur]; in ibmvnic_next_scrq()
4681 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) { in ibmvnic_next_scrq()
4682 if (++scrq->cur == scrq->size) in ibmvnic_next_scrq()
4683 scrq->cur = 0; in ibmvnic_next_scrq()
4687 spin_unlock_irqrestore(&scrq->lock, flags); in ibmvnic_next_scrq()
4699 struct ibmvnic_crq_queue *queue = &adapter->crq; in ibmvnic_next_crq()
4702 crq = &queue->msgs[queue->cur]; in ibmvnic_next_crq()
4703 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) { in ibmvnic_next_crq()
4704 if (++queue->cur == queue->size) in ibmvnic_next_crq()
4705 queue->cur = 0; in ibmvnic_next_crq()
4735 unsigned int ua = adapter->vdev->unit_address; in send_subcrq_indirect()
4736 struct device *dev = &adapter->vdev->dev; in send_subcrq_indirect()
4754 unsigned int ua = adapter->vdev->unit_address; in ibmvnic_send_crq()
4755 struct device *dev = &adapter->vdev->dev; in ibmvnic_send_crq()
4759 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", in ibmvnic_send_crq()
4763 if (!adapter->crq.active && in ibmvnic_send_crq()
4764 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { in ibmvnic_send_crq()
4766 return -EINVAL; in ibmvnic_send_crq()
4790 struct device *dev = &adapter->vdev->dev; in ibmvnic_send_crq_init()
4798 netdev_dbg(adapter->netdev, "Sending CRQ init\n"); in ibmvnic_send_crq_init()
4804 retries--; in ibmvnic_send_crq_init()
4833 len += strlen(utsname()->nodename) + 1; in vnic_client_data_len()
4834 len += strlen(adapter->netdev->name) + 1; in vnic_client_data_len()
4845 /* Type 1 - LPAR OS */ in vnic_add_client_data()
4846 vlcd->type = 1; in vnic_add_client_data()
4848 vlcd->len = cpu_to_be16(len); in vnic_add_client_data()
4849 strscpy(vlcd->name, os_name, len); in vnic_add_client_data()
4850 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); in vnic_add_client_data()
4852 /* Type 2 - LPAR name */ in vnic_add_client_data()
4853 vlcd->type = 2; in vnic_add_client_data()
4854 len = strlen(utsname()->nodename) + 1; in vnic_add_client_data()
4855 vlcd->len = cpu_to_be16(len); in vnic_add_client_data()
4856 strscpy(vlcd->name, utsname()->nodename, len); in vnic_add_client_data()
4857 vlcd = (struct vnic_login_client_data *)(vlcd->name + len); in vnic_add_client_data()
4859 /* Type 3 - device name */ in vnic_add_client_data()
4860 vlcd->type = 3; in vnic_add_client_data()
4861 len = strlen(adapter->netdev->name) + 1; in vnic_add_client_data()
4862 vlcd->len = cpu_to_be16(len); in vnic_add_client_data()
4863 strscpy(vlcd->name, adapter->netdev->name, len); in vnic_add_client_data()
4870 struct device *dev = &adapter->vdev->dev; in send_login()
4883 if (!adapter->tx_scrq || !adapter->rx_scrq) { in send_login()
4884 netdev_err(adapter->netdev, in send_login()
4886 return -ENOMEM; in send_login()
4896 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + in send_login()
4911 sizeof(u64) * adapter->req_tx_queues + in send_login()
4912 sizeof(u64) * adapter->req_rx_queues + in send_login()
4913 sizeof(u64) * adapter->req_rx_queues + in send_login()
4927 adapter->login_buf = login_buffer; in send_login()
4928 adapter->login_buf_token = buffer_token; in send_login()
4929 adapter->login_buf_sz = buffer_size; in send_login()
4930 adapter->login_rsp_buf = login_rsp_buffer; in send_login()
4931 adapter->login_rsp_buf_token = rsp_buffer_token; in send_login()
4932 adapter->login_rsp_buf_sz = rsp_buffer_size; in send_login()
4934 login_buffer->len = cpu_to_be32(buffer_size); in send_login()
4935 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB); in send_login()
4936 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues); in send_login()
4937 login_buffer->off_txcomp_subcrqs = in send_login()
4939 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues); in send_login()
4940 login_buffer->off_rxcomp_subcrqs = in send_login()
4942 sizeof(u64) * adapter->req_tx_queues); in send_login()
4943 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token); in send_login()
4944 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size); in send_login()
4950 sizeof(u64) * adapter->req_tx_queues); in send_login()
4952 for (i = 0; i < adapter->req_tx_queues; i++) { in send_login()
4953 if (adapter->tx_scrq[i]) { in send_login()
4955 cpu_to_be64(adapter->tx_scrq[i]->crq_num); in send_login()
4959 for (i = 0; i < adapter->req_rx_queues; i++) { in send_login()
4960 if (adapter->rx_scrq[i]) { in send_login()
4962 cpu_to_be64(adapter->rx_scrq[i]->crq_num); in send_login()
4968 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); in send_login()
4969 login_buffer->client_data_offset = in send_login()
4970 cpu_to_be32((char *)vlcd - (char *)login_buffer); in send_login()
4971 login_buffer->client_data_len = cpu_to_be32(client_data_len); in send_login()
4975 netdev_dbg(adapter->netdev, "Login Buffer:\n"); in send_login()
4976 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { in send_login()
4977 netdev_dbg(adapter->netdev, "%016lx\n", in send_login()
4978 ((unsigned long *)(adapter->login_buf))[i]); in send_login()
4987 adapter->login_pending = true; in send_login()
4990 adapter->login_pending = false; in send_login()
4991 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); in send_login()
5002 adapter->login_rsp_buf = NULL; in send_login()
5007 adapter->login_buf = NULL; in send_login()
5009 return -ENOMEM; in send_login()
5059 atomic_set(&adapter->running_cap_crqs, cap_reqs); in send_query_cap()
5067 cap_reqs--; in send_query_cap()
5071 cap_reqs--; in send_query_cap()
5075 cap_reqs--; in send_query_cap()
5079 cap_reqs--; in send_query_cap()
5083 cap_reqs--; in send_query_cap()
5087 cap_reqs--; in send_query_cap()
5092 cap_reqs--; in send_query_cap()
5097 cap_reqs--; in send_query_cap()
5102 cap_reqs--; in send_query_cap()
5107 cap_reqs--; in send_query_cap()
5111 cap_reqs--; in send_query_cap()
5115 cap_reqs--; in send_query_cap()
5119 cap_reqs--; in send_query_cap()
5123 cap_reqs--; in send_query_cap()
5127 cap_reqs--; in send_query_cap()
5131 cap_reqs--; in send_query_cap()
5135 cap_reqs--; in send_query_cap()
5139 cap_reqs--; in send_query_cap()
5143 cap_reqs--; in send_query_cap()
5147 cap_reqs--; in send_query_cap()
5151 cap_reqs--; in send_query_cap()
5156 cap_reqs--; in send_query_cap()
5161 cap_reqs--; in send_query_cap()
5166 cap_reqs--; in send_query_cap()
5171 cap_reqs--; in send_query_cap()
5182 struct device *dev = &adapter->vdev->dev; in send_query_ip_offload()
5185 adapter->ip_offload_tok = in send_query_ip_offload()
5187 &adapter->ip_offload_buf, in send_query_ip_offload()
5191 if (dma_mapping_error(dev, adapter->ip_offload_tok)) { in send_query_ip_offload()
5202 cpu_to_be32(adapter->ip_offload_tok); in send_query_ip_offload()
5209 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl; in send_control_ip_offload()
5210 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; in send_control_ip_offload()
5211 struct device *dev = &adapter->vdev->dev; in send_control_ip_offload()
5215 adapter->ip_offload_ctrl_tok = in send_control_ip_offload()
5218 sizeof(adapter->ip_offload_ctrl), in send_control_ip_offload()
5221 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) { in send_control_ip_offload()
5226 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); in send_control_ip_offload()
5227 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB); in send_control_ip_offload()
5228 ctrl_buf->ipv4_chksum = buf->ipv4_chksum; in send_control_ip_offload()
5229 ctrl_buf->ipv6_chksum = buf->ipv6_chksum; in send_control_ip_offload()
5230 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum; in send_control_ip_offload()
5231 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum; in send_control_ip_offload()
5232 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum; in send_control_ip_offload()
5233 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum; in send_control_ip_offload()
5234 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4; in send_control_ip_offload()
5235 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6; in send_control_ip_offload()
5238 ctrl_buf->large_rx_ipv4 = 0; in send_control_ip_offload()
5239 ctrl_buf->large_rx_ipv6 = 0; in send_control_ip_offload()
5241 if (adapter->state != VNIC_PROBING) { in send_control_ip_offload()
5242 old_hw_features = adapter->netdev->hw_features; in send_control_ip_offload()
5243 adapter->netdev->hw_features = 0; in send_control_ip_offload()
5246 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; in send_control_ip_offload()
5248 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) in send_control_ip_offload()
5249 adapter->netdev->hw_features |= NETIF_F_IP_CSUM; in send_control_ip_offload()
5251 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) in send_control_ip_offload()
5252 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM; in send_control_ip_offload()
5254 if ((adapter->netdev->features & in send_control_ip_offload()
5256 adapter->netdev->hw_features |= NETIF_F_RXCSUM; in send_control_ip_offload()
5258 if (buf->large_tx_ipv4) in send_control_ip_offload()
5259 adapter->netdev->hw_features |= NETIF_F_TSO; in send_control_ip_offload()
5260 if (buf->large_tx_ipv6) in send_control_ip_offload()
5261 adapter->netdev->hw_features |= NETIF_F_TSO6; in send_control_ip_offload()
5263 if (adapter->state == VNIC_PROBING) { in send_control_ip_offload()
5264 adapter->netdev->features |= adapter->netdev->hw_features; in send_control_ip_offload()
5265 } else if (old_hw_features != adapter->netdev->hw_features) { in send_control_ip_offload()
5269 adapter->netdev->features &= adapter->netdev->hw_features; in send_control_ip_offload()
5271 tmp = (old_hw_features ^ adapter->netdev->hw_features) & in send_control_ip_offload()
5272 adapter->netdev->hw_features; in send_control_ip_offload()
5273 adapter->netdev->features |= in send_control_ip_offload()
5274 tmp & adapter->netdev->wanted_features; in send_control_ip_offload()
5281 cpu_to_be32(sizeof(adapter->ip_offload_ctrl)); in send_control_ip_offload()
5282 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok); in send_control_ip_offload()
5289 struct device *dev = &adapter->vdev->dev; in handle_vpd_size_rsp()
5291 if (crq->get_vpd_size_rsp.rc.code) { in handle_vpd_size_rsp()
5293 crq->get_vpd_size_rsp.rc.code); in handle_vpd_size_rsp()
5294 complete(&adapter->fw_done); in handle_vpd_size_rsp()
5298 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); in handle_vpd_size_rsp()
5299 complete(&adapter->fw_done); in handle_vpd_size_rsp()
5305 struct device *dev = &adapter->vdev->dev; in handle_vpd_rsp()
5309 memset(adapter->fw_version, 0, 32); in handle_vpd_rsp()
5311 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, in handle_vpd_rsp()
5314 if (crq->get_vpd_rsp.rc.code) { in handle_vpd_rsp()
5316 crq->get_vpd_rsp.rc.code); in handle_vpd_rsp()
5323 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); in handle_vpd_rsp()
5325 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n"); in handle_vpd_rsp()
5330 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { in handle_vpd_rsp()
5339 (adapter->vpd->buff + adapter->vpd->len)) { in handle_vpd_rsp()
5340 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len); in handle_vpd_rsp()
5346 if (adapter->fw_version[0] == '\0') in handle_vpd_rsp()
5347 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version)); in handle_vpd_rsp()
5348 complete(&adapter->fw_done); in handle_vpd_rsp()
5353 struct device *dev = &adapter->vdev->dev; in handle_query_ip_offload_rsp()
5354 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; in handle_query_ip_offload_rsp()
5357 dma_unmap_single(dev, adapter->ip_offload_tok, in handle_query_ip_offload_rsp()
5358 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); in handle_query_ip_offload_rsp()
5360 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); in handle_query_ip_offload_rsp()
5361 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) in handle_query_ip_offload_rsp()
5362 netdev_dbg(adapter->netdev, "%016lx\n", in handle_query_ip_offload_rsp()
5365 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); in handle_query_ip_offload_rsp()
5366 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); in handle_query_ip_offload_rsp()
5367 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n", in handle_query_ip_offload_rsp()
5368 buf->tcp_ipv4_chksum); in handle_query_ip_offload_rsp()
5369 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n", in handle_query_ip_offload_rsp()
5370 buf->tcp_ipv6_chksum); in handle_query_ip_offload_rsp()
5371 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n", in handle_query_ip_offload_rsp()
5372 buf->udp_ipv4_chksum); in handle_query_ip_offload_rsp()
5373 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n", in handle_query_ip_offload_rsp()
5374 buf->udp_ipv6_chksum); in handle_query_ip_offload_rsp()
5375 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n", in handle_query_ip_offload_rsp()
5376 buf->large_tx_ipv4); in handle_query_ip_offload_rsp()
5377 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n", in handle_query_ip_offload_rsp()
5378 buf->large_tx_ipv6); in handle_query_ip_offload_rsp()
5379 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n", in handle_query_ip_offload_rsp()
5380 buf->large_rx_ipv4); in handle_query_ip_offload_rsp()
5381 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n", in handle_query_ip_offload_rsp()
5382 buf->large_rx_ipv6); in handle_query_ip_offload_rsp()
5383 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n", in handle_query_ip_offload_rsp()
5384 buf->max_ipv4_header_size); in handle_query_ip_offload_rsp()
5385 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n", in handle_query_ip_offload_rsp()
5386 buf->max_ipv6_header_size); in handle_query_ip_offload_rsp()
5387 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n", in handle_query_ip_offload_rsp()
5388 buf->max_tcp_header_size); in handle_query_ip_offload_rsp()
5389 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n", in handle_query_ip_offload_rsp()
5390 buf->max_udp_header_size); in handle_query_ip_offload_rsp()
5391 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n", in handle_query_ip_offload_rsp()
5392 buf->max_large_tx_size); in handle_query_ip_offload_rsp()
5393 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n", in handle_query_ip_offload_rsp()
5394 buf->max_large_rx_size); in handle_query_ip_offload_rsp()
5395 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n", in handle_query_ip_offload_rsp()
5396 buf->ipv6_extension_header); in handle_query_ip_offload_rsp()
5397 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n", in handle_query_ip_offload_rsp()
5398 buf->tcp_pseudosum_req); in handle_query_ip_offload_rsp()
5399 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n", in handle_query_ip_offload_rsp()
5400 buf->num_ipv6_ext_headers); in handle_query_ip_offload_rsp()
5401 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n", in handle_query_ip_offload_rsp()
5402 buf->off_ipv6_ext_headers); in handle_query_ip_offload_rsp()
5432 struct device *dev = &adapter->vdev->dev; in handle_error_indication()
5435 cause = be16_to_cpu(crq->error_indication.error_cause); in handle_error_indication()
5439 crq->error_indication.flags in handle_error_indication()
5443 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) in handle_error_indication()
5452 struct net_device *netdev = adapter->netdev; in handle_change_mac_rsp()
5453 struct device *dev = &adapter->vdev->dev; in handle_change_mac_rsp()
5456 rc = crq->change_mac_addr_rsp.rc.code; in handle_change_mac_rsp()
5461 /* crq->change_mac_addr.mac_addr is the requested one in handle_change_mac_rsp()
5462 * crq->change_mac_addr_rsp.mac_addr is the returned valid one. in handle_change_mac_rsp()
5464 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]); in handle_change_mac_rsp()
5465 ether_addr_copy(adapter->mac_addr, in handle_change_mac_rsp()
5466 &crq->change_mac_addr_rsp.mac_addr[0]); in handle_change_mac_rsp()
5468 complete(&adapter->fw_done); in handle_change_mac_rsp()
5475 struct device *dev = &adapter->vdev->dev; in handle_request_cap_rsp()
5479 atomic_dec(&adapter->running_cap_crqs); in handle_request_cap_rsp()
5480 netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n", in handle_request_cap_rsp()
5481 atomic_read(&adapter->running_cap_crqs)); in handle_request_cap_rsp()
5482 switch (be16_to_cpu(crq->request_capability_rsp.capability)) { in handle_request_cap_rsp()
5484 req_value = &adapter->req_tx_queues; in handle_request_cap_rsp()
5488 req_value = &adapter->req_rx_queues; in handle_request_cap_rsp()
5492 req_value = &adapter->req_rx_add_queues; in handle_request_cap_rsp()
5496 req_value = &adapter->req_tx_entries_per_subcrq; in handle_request_cap_rsp()
5500 req_value = &adapter->req_rx_add_entries_per_subcrq; in handle_request_cap_rsp()
5504 req_value = &adapter->req_mtu; in handle_request_cap_rsp()
5508 req_value = &adapter->promisc; in handle_request_cap_rsp()
5513 crq->request_capability.capability); in handle_request_cap_rsp()
5517 switch (crq->request_capability_rsp.rc.code) { in handle_request_cap_rsp()
5523 (long)be64_to_cpu(crq->request_capability_rsp.number), in handle_request_cap_rsp()
5526 if (be16_to_cpu(crq->request_capability_rsp.capability) == in handle_request_cap_rsp()
5530 *req_value = adapter->fallback.mtu; in handle_request_cap_rsp()
5533 be64_to_cpu(crq->request_capability_rsp.number); in handle_request_cap_rsp()
5540 crq->request_capability_rsp.rc.code); in handle_request_cap_rsp()
5545 if (atomic_read(&adapter->running_cap_crqs) == 0) in handle_request_cap_rsp()
5552 struct device *dev = &adapter->vdev->dev; in handle_login_rsp()
5553 struct net_device *netdev = adapter->netdev; in handle_login_rsp()
5554 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; in handle_login_rsp()
5555 struct ibmvnic_login_buffer *login = adapter->login_buf; in handle_login_rsp()
5567 if (!adapter->login_pending) { in handle_login_rsp()
5571 adapter->login_pending = false; in handle_login_rsp()
5577 if (login_rsp_crq->generic.rc.code) { in handle_login_rsp()
5578 adapter->init_done_rc = login_rsp_crq->generic.rc.code; in handle_login_rsp()
5579 complete(&adapter->init_done); in handle_login_rsp()
5583 if (adapter->failover_pending) { in handle_login_rsp()
5584 adapter->init_done_rc = -EAGAIN; in handle_login_rsp()
5586 complete(&adapter->init_done); in handle_login_rsp()
5591 netdev->mtu = adapter->req_mtu - ETH_HLEN; in handle_login_rsp()
5593 netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); in handle_login_rsp()
5594 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { in handle_login_rsp()
5595 netdev_dbg(adapter->netdev, "%016lx\n", in handle_login_rsp()
5596 ((unsigned long *)(adapter->login_rsp_buf))[i]); in handle_login_rsp()
5600 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || in handle_login_rsp()
5601 (be32_to_cpu(login->num_rxcomp_subcrqs) * in handle_login_rsp()
5602 adapter->req_rx_add_queues != in handle_login_rsp()
5603 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) { in handle_login_rsp()
5606 return -EIO; in handle_login_rsp()
5609 rsp_len = be32_to_cpu(login_rsp->len); in handle_login_rsp()
5610 if (be32_to_cpu(login->login_rsp_len) < rsp_len || in handle_login_rsp()
5611 rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || in handle_login_rsp()
5612 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || in handle_login_rsp()
5613 rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || in handle_login_rsp()
5614 rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { in handle_login_rsp()
5622 return -EIO; in handle_login_rsp()
5625 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + in handle_login_rsp()
5626 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); in handle_login_rsp()
5630 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]); in handle_login_rsp()
5632 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); in handle_login_rsp()
5633 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); in handle_login_rsp()
5635 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + in handle_login_rsp()
5636 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); in handle_login_rsp()
5637 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + in handle_login_rsp()
5638 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs)); in handle_login_rsp()
5641 adapter->tx_scrq[i]->handle = tx_handle_array[i]; in handle_login_rsp()
5644 adapter->rx_scrq[i]->handle = rx_handle_array[i]; in handle_login_rsp()
5646 adapter->num_active_tx_scrqs = num_tx_pools; in handle_login_rsp()
5647 adapter->num_active_rx_scrqs = num_rx_pools; in handle_login_rsp()
5650 complete(&adapter->init_done); in handle_login_rsp()
5658 struct device *dev = &adapter->vdev->dev; in handle_request_unmap_rsp()
5661 rc = crq->request_unmap_rsp.rc.code; in handle_request_unmap_rsp()
5669 struct net_device *netdev = adapter->netdev; in handle_query_map_rsp()
5670 struct device *dev = &adapter->vdev->dev; in handle_query_map_rsp()
5673 rc = crq->query_map_rsp.rc.code; in handle_query_map_rsp()
5679 crq->query_map_rsp.page_size, in handle_query_map_rsp()
5680 __be32_to_cpu(crq->query_map_rsp.tot_pages), in handle_query_map_rsp()
5681 __be32_to_cpu(crq->query_map_rsp.free_pages)); in handle_query_map_rsp()
5687 struct net_device *netdev = adapter->netdev; in handle_query_cap_rsp()
5688 struct device *dev = &adapter->vdev->dev; in handle_query_cap_rsp()
5691 atomic_dec(&adapter->running_cap_crqs); in handle_query_cap_rsp()
5693 atomic_read(&adapter->running_cap_crqs)); in handle_query_cap_rsp()
5694 rc = crq->query_capability.rc.code; in handle_query_cap_rsp()
5700 switch (be16_to_cpu(crq->query_capability.capability)) { in handle_query_cap_rsp()
5702 adapter->min_tx_queues = in handle_query_cap_rsp()
5703 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5705 adapter->min_tx_queues); in handle_query_cap_rsp()
5708 adapter->min_rx_queues = in handle_query_cap_rsp()
5709 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5711 adapter->min_rx_queues); in handle_query_cap_rsp()
5714 adapter->min_rx_add_queues = in handle_query_cap_rsp()
5715 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5717 adapter->min_rx_add_queues); in handle_query_cap_rsp()
5720 adapter->max_tx_queues = in handle_query_cap_rsp()
5721 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5723 adapter->max_tx_queues); in handle_query_cap_rsp()
5726 adapter->max_rx_queues = in handle_query_cap_rsp()
5727 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5729 adapter->max_rx_queues); in handle_query_cap_rsp()
5732 adapter->max_rx_add_queues = in handle_query_cap_rsp()
5733 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5735 adapter->max_rx_add_queues); in handle_query_cap_rsp()
5738 adapter->min_tx_entries_per_subcrq = in handle_query_cap_rsp()
5739 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5741 adapter->min_tx_entries_per_subcrq); in handle_query_cap_rsp()
5744 adapter->min_rx_add_entries_per_subcrq = in handle_query_cap_rsp()
5745 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5747 adapter->min_rx_add_entries_per_subcrq); in handle_query_cap_rsp()
5750 adapter->max_tx_entries_per_subcrq = in handle_query_cap_rsp()
5751 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5753 adapter->max_tx_entries_per_subcrq); in handle_query_cap_rsp()
5756 adapter->max_rx_add_entries_per_subcrq = in handle_query_cap_rsp()
5757 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5759 adapter->max_rx_add_entries_per_subcrq); in handle_query_cap_rsp()
5762 adapter->tcp_ip_offload = in handle_query_cap_rsp()
5763 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5765 adapter->tcp_ip_offload); in handle_query_cap_rsp()
5768 adapter->promisc_supported = in handle_query_cap_rsp()
5769 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5771 adapter->promisc_supported); in handle_query_cap_rsp()
5774 adapter->min_mtu = be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5775 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; in handle_query_cap_rsp()
5776 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); in handle_query_cap_rsp()
5779 adapter->max_mtu = be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5780 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; in handle_query_cap_rsp()
5781 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); in handle_query_cap_rsp()
5784 adapter->max_multicast_filters = in handle_query_cap_rsp()
5785 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5787 adapter->max_multicast_filters); in handle_query_cap_rsp()
5790 adapter->vlan_header_insertion = in handle_query_cap_rsp()
5791 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5792 if (adapter->vlan_header_insertion) in handle_query_cap_rsp()
5793 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; in handle_query_cap_rsp()
5795 adapter->vlan_header_insertion); in handle_query_cap_rsp()
5798 adapter->rx_vlan_header_insertion = in handle_query_cap_rsp()
5799 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5801 adapter->rx_vlan_header_insertion); in handle_query_cap_rsp()
5804 adapter->max_tx_sg_entries = in handle_query_cap_rsp()
5805 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5807 adapter->max_tx_sg_entries); in handle_query_cap_rsp()
5810 adapter->rx_sg_supported = in handle_query_cap_rsp()
5811 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5813 adapter->rx_sg_supported); in handle_query_cap_rsp()
5816 adapter->opt_tx_comp_sub_queues = in handle_query_cap_rsp()
5817 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5819 adapter->opt_tx_comp_sub_queues); in handle_query_cap_rsp()
5822 adapter->opt_rx_comp_queues = in handle_query_cap_rsp()
5823 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5825 adapter->opt_rx_comp_queues); in handle_query_cap_rsp()
5828 adapter->opt_rx_bufadd_q_per_rx_comp_q = in handle_query_cap_rsp()
5829 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5831 adapter->opt_rx_bufadd_q_per_rx_comp_q); in handle_query_cap_rsp()
5834 adapter->opt_tx_entries_per_subcrq = in handle_query_cap_rsp()
5835 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5837 adapter->opt_tx_entries_per_subcrq); in handle_query_cap_rsp()
5840 adapter->opt_rxba_entries_per_subcrq = in handle_query_cap_rsp()
5841 be64_to_cpu(crq->query_capability.number); in handle_query_cap_rsp()
5843 adapter->opt_rxba_entries_per_subcrq); in handle_query_cap_rsp()
5846 adapter->tx_rx_desc_req = crq->query_capability.number; in handle_query_cap_rsp()
5848 adapter->tx_rx_desc_req); in handle_query_cap_rsp()
5853 crq->query_capability.capability); in handle_query_cap_rsp()
5857 if (atomic_read(&adapter->running_cap_crqs) == 0) in handle_query_cap_rsp()
5870 mutex_lock(&adapter->fw_lock); in send_query_phys_parms()
5871 adapter->fw_done_rc = 0; in send_query_phys_parms()
5872 reinit_completion(&adapter->fw_done); in send_query_phys_parms()
5876 mutex_unlock(&adapter->fw_lock); in send_query_phys_parms()
5880 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); in send_query_phys_parms()
5882 mutex_unlock(&adapter->fw_lock); in send_query_phys_parms()
5886 mutex_unlock(&adapter->fw_lock); in send_query_phys_parms()
5887 return adapter->fw_done_rc ? -EIO : 0; in send_query_phys_parms()
5893 struct net_device *netdev = adapter->netdev; in handle_query_phys_parms_rsp()
5895 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed); in handle_query_phys_parms_rsp()
5897 rc = crq->query_phys_parms_rsp.rc.code; in handle_query_phys_parms_rsp()
5904 adapter->speed = SPEED_10; in handle_query_phys_parms_rsp()
5907 adapter->speed = SPEED_100; in handle_query_phys_parms_rsp()
5910 adapter->speed = SPEED_1000; in handle_query_phys_parms_rsp()
5913 adapter->speed = SPEED_10000; in handle_query_phys_parms_rsp()
5916 adapter->speed = SPEED_25000; in handle_query_phys_parms_rsp()
5919 adapter->speed = SPEED_40000; in handle_query_phys_parms_rsp()
5922 adapter->speed = SPEED_50000; in handle_query_phys_parms_rsp()
5925 adapter->speed = SPEED_100000; in handle_query_phys_parms_rsp()
5928 adapter->speed = SPEED_200000; in handle_query_phys_parms_rsp()
5933 adapter->speed = SPEED_UNKNOWN; in handle_query_phys_parms_rsp()
5935 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX) in handle_query_phys_parms_rsp()
5936 adapter->duplex = DUPLEX_FULL; in handle_query_phys_parms_rsp()
5937 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX) in handle_query_phys_parms_rsp()
5938 adapter->duplex = DUPLEX_HALF; in handle_query_phys_parms_rsp()
5940 adapter->duplex = DUPLEX_UNKNOWN; in handle_query_phys_parms_rsp()
5948 struct ibmvnic_generic_crq *gen_crq = &crq->generic; in ibmvnic_handle_crq()
5949 struct net_device *netdev = adapter->netdev; in ibmvnic_handle_crq()
5950 struct device *dev = &adapter->vdev->dev; in ibmvnic_handle_crq()
5957 switch (gen_crq->first) { in ibmvnic_handle_crq()
5959 switch (gen_crq->cmd) { in ibmvnic_handle_crq()
5962 adapter->from_passive_init = true; in ibmvnic_handle_crq()
5966 adapter->login_pending = false; in ibmvnic_handle_crq()
5968 if (adapter->state == VNIC_DOWN) in ibmvnic_handle_crq()
5973 if (rc && rc != -EBUSY) { in ibmvnic_handle_crq()
5986 adapter->failover_pending = false; in ibmvnic_handle_crq()
5989 if (!completion_done(&adapter->init_done)) { in ibmvnic_handle_crq()
5990 if (!adapter->init_done_rc) in ibmvnic_handle_crq()
5991 adapter->init_done_rc = -EAGAIN; in ibmvnic_handle_crq()
5992 complete(&adapter->init_done); in ibmvnic_handle_crq()
5998 adapter->crq.active = true; in ibmvnic_handle_crq()
6002 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd); in ibmvnic_handle_crq()
6007 adapter->crq.active = false; in ibmvnic_handle_crq()
6011 if (!completion_done(&adapter->fw_done)) { in ibmvnic_handle_crq()
6012 adapter->fw_done_rc = -EIO; in ibmvnic_handle_crq()
6013 complete(&adapter->fw_done); in ibmvnic_handle_crq()
6016 /* if we got here during crq-init, retry crq-init */ in ibmvnic_handle_crq()
6017 if (!completion_done(&adapter->init_done)) { in ibmvnic_handle_crq()
6018 adapter->init_done_rc = -EAGAIN; in ibmvnic_handle_crq()
6019 complete(&adapter->init_done); in ibmvnic_handle_crq()
6022 if (!completion_done(&adapter->stats_done)) in ibmvnic_handle_crq()
6023 complete(&adapter->stats_done); in ibmvnic_handle_crq()
6024 if (test_bit(0, &adapter->resetting)) in ibmvnic_handle_crq()
6025 adapter->force_reset_recovery = true; in ibmvnic_handle_crq()
6026 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { in ibmvnic_handle_crq()
6027 dev_info(dev, "Migrated, re-enabling adapter\n"); in ibmvnic_handle_crq()
6029 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { in ibmvnic_handle_crq()
6031 adapter->failover_pending = true; in ibmvnic_handle_crq()
6035 gen_crq->cmd); in ibmvnic_handle_crq()
6043 gen_crq->first); in ibmvnic_handle_crq()
6047 switch (gen_crq->cmd) { in ibmvnic_handle_crq()
6049 rc = crq->version_exchange_rsp.rc.code; in ibmvnic_handle_crq()
6055 be16_to_cpu(crq->version_exchange_rsp.version); in ibmvnic_handle_crq()
6067 adapter->fw_done_rc = crq->request_map_rsp.rc.code; in ibmvnic_handle_crq()
6068 complete(&adapter->fw_done); in ibmvnic_handle_crq()
6083 crq->logical_link_state_rsp.link_state, in ibmvnic_handle_crq()
6084 crq->logical_link_state_rsp.rc.code); in ibmvnic_handle_crq()
6085 adapter->logical_link_state = in ibmvnic_handle_crq()
6086 crq->logical_link_state_rsp.link_state; in ibmvnic_handle_crq()
6087 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code; in ibmvnic_handle_crq()
6088 complete(&adapter->init_done); in ibmvnic_handle_crq()
6092 adapter->phys_link_state = in ibmvnic_handle_crq()
6093 crq->link_state_indication.phys_link_state; in ibmvnic_handle_crq()
6094 adapter->logical_link_state = in ibmvnic_handle_crq()
6095 crq->link_state_indication.logical_link_state; in ibmvnic_handle_crq()
6096 if (adapter->phys_link_state && adapter->logical_link_state) in ibmvnic_handle_crq()
6103 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter); in ibmvnic_handle_crq()
6111 complete(&adapter->stats_done); in ibmvnic_handle_crq()
6122 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok, in ibmvnic_handle_crq()
6123 sizeof(adapter->ip_offload_ctrl), in ibmvnic_handle_crq()
6125 complete(&adapter->init_done); in ibmvnic_handle_crq()
6129 complete(&adapter->fw_done); in ibmvnic_handle_crq()
6138 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); in ibmvnic_handle_crq()
6139 complete(&adapter->fw_done); in ibmvnic_handle_crq()
6143 gen_crq->cmd); in ibmvnic_handle_crq()
6151 tasklet_schedule(&adapter->tasklet); in ibmvnic_interrupt()
6158 struct ibmvnic_crq_queue *queue = &adapter->crq; in ibmvnic_tasklet()
6162 spin_lock_irqsave(&queue->lock, flags); in ibmvnic_tasklet()
6167 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded in ibmvnic_tasklet()
6169 * switch(gen_crq->first) and switch(gen_crq->cmd). in ibmvnic_tasklet()
6173 crq->generic.first = 0; in ibmvnic_tasklet()
6176 spin_unlock_irqrestore(&queue->lock, flags); in ibmvnic_tasklet()
6181 struct vio_dev *vdev = adapter->vdev; in ibmvnic_reenable_crq_queue()
6185 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address); in ibmvnic_reenable_crq_queue()
6189 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc); in ibmvnic_reenable_crq_queue()
6196 struct ibmvnic_crq_queue *crq = &adapter->crq; in ibmvnic_reset_crq()
6197 struct device *dev = &adapter->vdev->dev; in ibmvnic_reset_crq()
6198 struct vio_dev *vdev = adapter->vdev; in ibmvnic_reset_crq()
6203 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); in ibmvnic_reset_crq()
6207 if (!crq->msgs) in ibmvnic_reset_crq()
6208 return -EINVAL; in ibmvnic_reset_crq()
6210 memset(crq->msgs, 0, PAGE_SIZE); in ibmvnic_reset_crq()
6211 crq->cur = 0; in ibmvnic_reset_crq()
6212 crq->active = false; in ibmvnic_reset_crq()
6214 /* And re-open it again */ in ibmvnic_reset_crq()
6215 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, in ibmvnic_reset_crq()
6216 crq->msg_token, PAGE_SIZE); in ibmvnic_reset_crq()
6229 struct ibmvnic_crq_queue *crq = &adapter->crq; in release_crq_queue()
6230 struct vio_dev *vdev = adapter->vdev; in release_crq_queue()
6233 if (!crq->msgs) in release_crq_queue()
6236 netdev_dbg(adapter->netdev, "Releasing CRQ\n"); in release_crq_queue()
6237 free_irq(vdev->irq, adapter); in release_crq_queue()
6238 tasklet_kill(&adapter->tasklet); in release_crq_queue()
6240 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); in release_crq_queue()
6243 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE, in release_crq_queue()
6245 free_page((unsigned long)crq->msgs); in release_crq_queue()
6246 crq->msgs = NULL; in release_crq_queue()
6247 crq->active = false; in release_crq_queue()
6252 struct ibmvnic_crq_queue *crq = &adapter->crq; in init_crq_queue()
6253 struct device *dev = &adapter->vdev->dev; in init_crq_queue()
6254 struct vio_dev *vdev = adapter->vdev; in init_crq_queue()
6255 int rc, retrc = -ENOMEM; in init_crq_queue()
6257 if (crq->msgs) in init_crq_queue()
6260 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL); in init_crq_queue()
6263 if (!crq->msgs) in init_crq_queue()
6264 return -ENOMEM; in init_crq_queue()
6266 crq->size = PAGE_SIZE / sizeof(*crq->msgs); in init_crq_queue()
6267 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE, in init_crq_queue()
6269 if (dma_mapping_error(dev, crq->msg_token)) in init_crq_queue()
6272 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, in init_crq_queue()
6273 crq->msg_token, PAGE_SIZE); in init_crq_queue()
6289 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet); in init_crq_queue()
6291 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq); in init_crq_queue()
6292 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x", in init_crq_queue()
6293 adapter->vdev->unit_address); in init_crq_queue()
6294 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter); in init_crq_queue()
6297 vdev->irq, rc); in init_crq_queue()
6307 crq->cur = 0; in init_crq_queue()
6308 spin_lock_init(&crq->lock); in init_crq_queue()
6311 tasklet_schedule(&adapter->tasklet); in init_crq_queue()
6316 tasklet_kill(&adapter->tasklet); in init_crq_queue()
6318 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address); in init_crq_queue()
6321 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); in init_crq_queue()
6323 free_page((unsigned long)crq->msgs); in init_crq_queue()
6324 crq->msgs = NULL; in init_crq_queue()
6330 struct device *dev = &adapter->vdev->dev; in ibmvnic_reset_init()
6332 u64 old_num_rx_queues = adapter->req_rx_queues; in ibmvnic_reset_init()
6333 u64 old_num_tx_queues = adapter->req_tx_queues; in ibmvnic_reset_init()
6336 adapter->from_passive_init = false; in ibmvnic_reset_init()
6344 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { in ibmvnic_reset_init()
6346 return -ETIMEDOUT; in ibmvnic_reset_init()
6349 if (adapter->init_done_rc) { in ibmvnic_reset_init()
6351 dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc); in ibmvnic_reset_init()
6352 return adapter->init_done_rc; in ibmvnic_reset_init()
6355 if (adapter->from_passive_init) { in ibmvnic_reset_init()
6356 adapter->state = VNIC_OPEN; in ibmvnic_reset_init()
6357 adapter->from_passive_init = false; in ibmvnic_reset_init()
6358 dev_err(dev, "CRQ-init failed, passive-init\n"); in ibmvnic_reset_init()
6359 return -EINVAL; in ibmvnic_reset_init()
6363 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset && in ibmvnic_reset_init()
6364 adapter->reset_reason != VNIC_RESET_MOBILITY) { in ibmvnic_reset_init()
6365 if (adapter->req_rx_queues != old_num_rx_queues || in ibmvnic_reset_init()
6366 adapter->req_tx_queues != old_num_tx_queues) { in ibmvnic_reset_init()
6411 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", in ibmvnic_probe()
6412 dev->unit_address); in ibmvnic_probe()
6417 dev_err(&dev->dev, in ibmvnic_probe()
6426 return -ENOMEM; in ibmvnic_probe()
6429 adapter->state = VNIC_PROBING; in ibmvnic_probe()
6430 dev_set_drvdata(&dev->dev, netdev); in ibmvnic_probe()
6431 adapter->vdev = dev; in ibmvnic_probe()
6432 adapter->netdev = netdev; in ibmvnic_probe()
6433 adapter->login_pending = false; in ibmvnic_probe()
6434 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); in ibmvnic_probe()
6435 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ in ibmvnic_probe()
6436 bitmap_set(adapter->map_ids, 0, 1); in ibmvnic_probe()
6438 ether_addr_copy(adapter->mac_addr, mac_addr_p); in ibmvnic_probe()
6439 eth_hw_addr_set(netdev, adapter->mac_addr); in ibmvnic_probe()
6440 netdev->irq = dev->irq; in ibmvnic_probe()
6441 netdev->netdev_ops = &ibmvnic_netdev_ops; in ibmvnic_probe()
6442 netdev->ethtool_ops = &ibmvnic_ethtool_ops; in ibmvnic_probe()
6443 SET_NETDEV_DEV(netdev, &dev->dev); in ibmvnic_probe()
6445 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); in ibmvnic_probe()
6446 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, in ibmvnic_probe()
6448 INIT_LIST_HEAD(&adapter->rwi_list); in ibmvnic_probe()
6449 spin_lock_init(&adapter->rwi_lock); in ibmvnic_probe()
6450 spin_lock_init(&adapter->state_lock); in ibmvnic_probe()
6451 mutex_init(&adapter->fw_lock); in ibmvnic_probe()
6452 init_completion(&adapter->probe_done); in ibmvnic_probe()
6453 init_completion(&adapter->init_done); in ibmvnic_probe()
6454 init_completion(&adapter->fw_done); in ibmvnic_probe()
6455 init_completion(&adapter->reset_done); in ibmvnic_probe()
6456 init_completion(&adapter->stats_done); in ibmvnic_probe()
6457 clear_bit(0, &adapter->resetting); in ibmvnic_probe()
6458 adapter->prev_rx_buf_sz = 0; in ibmvnic_probe()
6459 adapter->prev_mtu = 0; in ibmvnic_probe()
6468 adapter->failover_pending = false; in ibmvnic_probe()
6477 * will not access the ->rwi_list and since we released CRQ, in ibmvnic_probe()
6481 * a reset after we purged but thats ok - we just may end in ibmvnic_probe()
6486 spin_lock_irqsave(&adapter->rwi_lock, flags); in ibmvnic_probe()
6488 spin_unlock_irqrestore(&adapter->rwi_lock, flags); in ibmvnic_probe()
6492 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", in ibmvnic_probe()
6498 } while (rc == -EAGAIN); in ibmvnic_probe()
6516 rc = device_create_file(&dev->dev, &dev_attr_failover); in ibmvnic_probe()
6523 adapter->state = VNIC_PROBED; in ibmvnic_probe()
6524 netdev->mtu = adapter->req_mtu - ETH_HLEN; in ibmvnic_probe()
6525 netdev->min_mtu = adapter->min_mtu - ETH_HLEN; in ibmvnic_probe()
6526 netdev->max_mtu = adapter->max_mtu - ETH_HLEN; in ibmvnic_probe()
6528 adapter->state = VNIC_DOWN; in ibmvnic_probe()
6531 adapter->wait_for_reset = false; in ibmvnic_probe()
6532 adapter->last_reset_time = jiffies; in ibmvnic_probe()
6536 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); in ibmvnic_probe()
6539 dev_info(&dev->dev, "ibmvnic registered\n"); in ibmvnic_probe()
6547 complete(&adapter->probe_done); in ibmvnic_probe()
6555 device_remove_file(&dev->dev, &dev_attr_failover); in ibmvnic_probe()
6570 adapter->state = VNIC_REMOVING; in ibmvnic_probe()
6571 complete(&adapter->probe_done); in ibmvnic_probe()
6572 flush_work(&adapter->ibmvnic_reset); in ibmvnic_probe()
6573 flush_delayed_work(&adapter->ibmvnic_delayed_reset); in ibmvnic_probe()
6577 mutex_destroy(&adapter->fw_lock); in ibmvnic_probe()
6585 struct net_device *netdev = dev_get_drvdata(&dev->dev); in ibmvnic_remove()
6589 spin_lock_irqsave(&adapter->state_lock, flags); in ibmvnic_remove()
6598 spin_lock(&adapter->rwi_lock); in ibmvnic_remove()
6599 adapter->state = VNIC_REMOVING; in ibmvnic_remove()
6600 spin_unlock(&adapter->rwi_lock); in ibmvnic_remove()
6602 spin_unlock_irqrestore(&adapter->state_lock, flags); in ibmvnic_remove()
6606 flush_work(&adapter->ibmvnic_reset); in ibmvnic_remove()
6607 flush_delayed_work(&adapter->ibmvnic_delayed_reset); in ibmvnic_remove()
6621 adapter->state = VNIC_REMOVED; in ibmvnic_remove()
6624 mutex_destroy(&adapter->fw_lock); in ibmvnic_remove()
6625 device_remove_file(&dev->dev, &dev_attr_failover); in ibmvnic_remove()
6627 dev_set_drvdata(&dev->dev, NULL); in ibmvnic_remove()
6640 return -EINVAL; in failover_store()
6642 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, in failover_store()
6653 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, in failover_store()
6674 struct net_device *netdev = dev_get_drvdata(&vdev->dev); in ibmvnic_get_desired_dma()
6680 tbl = get_iommu_table_base(&vdev->dev); in ibmvnic_get_desired_dma()
6691 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++) in ibmvnic_get_desired_dma()
6694 for (i = 0; i < adapter->num_active_rx_pools; i++) in ibmvnic_get_desired_dma()
6695 ret += adapter->rx_pool[i].size * in ibmvnic_get_desired_dma()
6696 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl); in ibmvnic_get_desired_dma()
6706 if (adapter->state != VNIC_OPEN) in ibmvnic_resume()
6709 tasklet_schedule(&adapter->tasklet); in ibmvnic_resume()