1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #ifndef _IONIC_LIF_H_ 5 #define _IONIC_LIF_H_ 6 7 #include <linux/pci.h> 8 #include "ionic_rx_filter.h" 9 10 #define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */ 11 #define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */ 12 13 #define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) 14 #define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) 15 #define IONIC_RX_COPYBREAK_DEFAULT 256 16 17 struct ionic_tx_stats { 18 u64 dma_map_err; 19 u64 pkts; 20 u64 bytes; 21 u64 clean; 22 u64 linearize; 23 u64 no_csum; 24 u64 csum; 25 u64 crc32_csum; 26 u64 tso; 27 u64 frags; 28 u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR]; 29 }; 30 31 struct ionic_rx_stats { 32 u64 dma_map_err; 33 u64 alloc_err; 34 u64 pkts; 35 u64 bytes; 36 u64 csum_none; 37 u64 csum_complete; 38 u64 csum_error; 39 u64 buffers_posted; 40 u64 dropped; 41 }; 42 43 #define IONIC_QCQ_F_INITED BIT(0) 44 #define IONIC_QCQ_F_SG BIT(1) 45 #define IONIC_QCQ_F_INTR BIT(2) 46 #define IONIC_QCQ_F_TX_STATS BIT(3) 47 #define IONIC_QCQ_F_RX_STATS BIT(4) 48 #define IONIC_QCQ_F_NOTIFYQ BIT(5) 49 50 struct ionic_napi_stats { 51 u64 poll_count; 52 u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR]; 53 }; 54 55 struct ionic_q_stats { 56 union { 57 struct ionic_tx_stats tx; 58 struct ionic_rx_stats rx; 59 }; 60 }; 61 62 struct ionic_qcq { 63 void *base; 64 dma_addr_t base_pa; 65 unsigned int total_size; 66 struct ionic_queue q; 67 struct ionic_cq cq; 68 struct ionic_intr_info intr; 69 struct napi_struct napi; 70 struct ionic_napi_stats napi_stats; 71 struct ionic_q_stats *stats; 72 unsigned int flags; 73 struct dentry *dentry; 74 }; 75 76 struct ionic_qcqst { 77 struct ionic_qcq *qcq; 78 struct ionic_q_stats *stats; 79 }; 80 81 #define q_to_qcq(q) container_of(q, struct ionic_qcq, q) 82 #define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx) 83 #define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx) 84 #define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi) 85 #define napi_to_cq(napi) (&napi_to_qcq(napi)->cq) 86 87 enum ionic_deferred_work_type { 88 IONIC_DW_TYPE_RX_MODE, 89 IONIC_DW_TYPE_RX_ADDR_ADD, 90 IONIC_DW_TYPE_RX_ADDR_DEL, 91 IONIC_DW_TYPE_LINK_STATUS, 92 IONIC_DW_TYPE_LIF_RESET, 93 }; 94 95 struct ionic_deferred_work { 96 struct list_head list; 97 enum ionic_deferred_work_type type; 98 union { 99 unsigned int rx_mode; 100 u8 addr[ETH_ALEN]; 101 u8 fw_status; 102 }; 103 }; 104 105 struct ionic_deferred { 106 spinlock_t lock; /* lock for deferred work list */ 107 struct list_head list; 108 struct work_struct work; 109 }; 110 111 struct ionic_lif_sw_stats { 112 u64 tx_packets; 113 u64 tx_bytes; 114 u64 rx_packets; 115 u64 rx_bytes; 116 u64 tx_tso; 117 u64 tx_no_csum; 118 u64 tx_csum; 119 u64 rx_csum_none; 120 u64 rx_csum_complete; 121 u64 rx_csum_error; 122 }; 123 124 enum ionic_lif_state_flags { 125 IONIC_LIF_F_INITED, 126 IONIC_LIF_F_SW_DEBUG_STATS, 127 IONIC_LIF_F_UP, 128 IONIC_LIF_F_LINK_CHECK_REQUESTED, 129 IONIC_LIF_F_QUEUE_RESET, 130 IONIC_LIF_F_FW_RESET, 131 132 /* leave this as last */ 133 IONIC_LIF_F_STATE_SIZE 134 }; 135 136 #define IONIC_LIF_NAME_MAX_SZ 32 137 struct ionic_lif { 138 char name[IONIC_LIF_NAME_MAX_SZ]; 139 struct list_head list; 140 struct net_device *netdev; 141 DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE); 142 struct ionic *ionic; 143 bool registered; 144 unsigned int index; 145 unsigned int hw_index; 146 unsigned int kern_pid; 147 u64 __iomem *kern_dbpage; 148 spinlock_t adminq_lock; /* lock for AdminQ operations */ 149 struct ionic_qcq *adminqcq; 150 struct ionic_qcq *notifyqcq; 151 struct ionic_qcqst *txqcqs; 152 struct ionic_qcqst *rxqcqs; 153 u64 last_eid; 154 unsigned int neqs; 155 unsigned int nxqs; 156 unsigned int ntxq_descs; 157 unsigned int nrxq_descs; 158 u32 rx_copybreak; 159 unsigned int rx_mode; 160 u64 hw_features; 161 bool mc_overflow; 162 unsigned int nmcast; 163 bool uc_overflow; 164 unsigned int nucast; 165 166 struct ionic_lif_info *info; 167 dma_addr_t info_pa; 168 u32 info_sz; 169 170 u16 rss_types; 171 u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; 172 u8 *rss_ind_tbl; 173 dma_addr_t rss_ind_tbl_pa; 174 u32 rss_ind_tbl_sz; 175 176 struct ionic_rx_filters rx_filters; 177 struct ionic_deferred deferred; 178 unsigned long *dbid_inuse; 179 unsigned int dbid_count; 180 struct dentry *dentry; 181 u32 rx_coalesce_usecs; /* what the user asked for */ 182 u32 rx_coalesce_hw; /* what the hw is using */ 183 184 struct work_struct tx_timeout_work; 185 }; 186 187 #define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq) 188 #define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq) 189 #define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx) 190 #define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx) 191 #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) 192 #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) 193 194 /* return 0 if successfully set the bit, else non-zero */ 195 static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname) 196 { 197 return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE); 198 } 199 200 static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) 201 { 202 u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); 203 u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); 204 205 /* Div-by-zero should never be an issue, but check anyway */ 206 if (!div || !mult) 207 return 0; 208 209 /* Round up in case usecs is close to the next hw unit */ 210 usecs += (div / mult) >> 1; 211 212 /* Convert from usecs to device units */ 213 return (usecs * mult) / div; 214 } 215 216 static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) 217 { 218 u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); 219 u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); 220 221 /* Div-by-zero should never be an issue, but check anyway */ 222 if (!div || !mult) 223 return 0; 224 225 /* Convert from device units to usec */ 226 return (units * div) / mult; 227 } 228 229 void ionic_link_status_check_request(struct ionic_lif *lif); 230 void ionic_lif_deferred_enqueue(struct ionic_deferred *def, 231 struct ionic_deferred_work *work); 232 int ionic_lifs_alloc(struct ionic *ionic); 233 void ionic_lifs_free(struct ionic *ionic); 234 void ionic_lifs_deinit(struct ionic *ionic); 235 int ionic_lifs_init(struct ionic *ionic); 236 int ionic_lifs_register(struct ionic *ionic); 237 void ionic_lifs_unregister(struct ionic *ionic); 238 int ionic_lif_identify(struct ionic *ionic, u8 lif_type, 239 union ionic_lif_identity *lif_ident); 240 int ionic_lifs_size(struct ionic *ionic); 241 int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, 242 const u8 *key, const u32 *indir); 243 244 int ionic_open(struct net_device *netdev); 245 int ionic_stop(struct net_device *netdev); 246 int ionic_reset_queues(struct ionic_lif *lif); 247 248 static inline void debug_stats_txq_post(struct ionic_qcq *qcq, 249 struct ionic_txq_desc *desc, bool dbell) 250 { 251 u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT) 252 & IONIC_TXQ_DESC_NSGE_MASK); 253 254 qcq->q.dbell_count += dbell; 255 256 if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1)) 257 num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1; 258 259 qcq->stats->tx.sg_cntr[num_sg_elems]++; 260 } 261 262 static inline void debug_stats_napi_poll(struct ionic_qcq *qcq, 263 unsigned int work_done) 264 { 265 qcq->napi_stats.poll_count++; 266 267 if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1)) 268 work_done = IONIC_MAX_NUM_NAPI_CNTR - 1; 269 270 qcq->napi_stats.work_done_cntr[work_done]++; 271 } 272 273 #define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) 274 #define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++) 275 #define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++) 276 #define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \ 277 debug_stats_txq_post(qcq, txdesc, dbell) 278 #define DEBUG_STATS_NAPI_POLL(qcq, work_done) \ 279 debug_stats_napi_poll(qcq, work_done) 280 281 #endif /* _IONIC_LIF_H_ */ 282