1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include <net/xdp.h>
7 #include "ice.h"
8 #include "ice_base.h"
9 #include "ice_type.h"
10 #include "ice_xsk.h"
11 #include "ice_txrx.h"
12 #include "ice_txrx_lib.h"
13 #include "ice_lib.h"
14
ice_xdp_buf(struct ice_rx_ring * rx_ring,u32 idx)15 static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
16 {
17 return &rx_ring->xdp_buf[idx];
18 }
19
20 /**
21 * ice_qp_reset_stats - Resets all stats for rings of given index
22 * @vsi: VSI that contains rings of interest
23 * @q_idx: ring index in array
24 */
ice_qp_reset_stats(struct ice_vsi * vsi,u16 q_idx)25 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
26 {
27 struct ice_vsi_stats *vsi_stat;
28 struct ice_pf *pf;
29
30 pf = vsi->back;
31 if (!pf->vsi_stats)
32 return;
33
34 vsi_stat = pf->vsi_stats[vsi->idx];
35 if (!vsi_stat)
36 return;
37
38 memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
39 sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
40 memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
41 sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
42 if (ice_is_xdp_ena_vsi(vsi))
43 memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
44 sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
45 }
46
47 /**
48 * ice_qp_clean_rings - Cleans all the rings of a given index
49 * @vsi: VSI that contains rings of interest
50 * @q_idx: ring index in array
51 */
ice_qp_clean_rings(struct ice_vsi * vsi,u16 q_idx)52 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
53 {
54 ice_clean_tx_ring(vsi->tx_rings[q_idx]);
55 if (ice_is_xdp_ena_vsi(vsi)) {
56 synchronize_rcu();
57 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
58 }
59 ice_clean_rx_ring(vsi->rx_rings[q_idx]);
60 }
61
62 /**
63 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
64 * @vsi: VSI that has netdev
65 * @q_vector: q_vector that has NAPI context
66 * @enable: true for enable, false for disable
67 */
68 static void
ice_qvec_toggle_napi(struct ice_vsi * vsi,struct ice_q_vector * q_vector,bool enable)69 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
70 bool enable)
71 {
72 if (!vsi->netdev || !q_vector)
73 return;
74
75 if (enable)
76 napi_enable(&q_vector->napi);
77 else
78 napi_disable(&q_vector->napi);
79 }
80
81 /**
82 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
83 * @vsi: the VSI that contains queue vector being un-configured
84 * @rx_ring: Rx ring that will have its IRQ disabled
85 * @q_vector: queue vector
86 */
87 static void
ice_qvec_dis_irq(struct ice_vsi * vsi,struct ice_rx_ring * rx_ring,struct ice_q_vector * q_vector)88 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
89 struct ice_q_vector *q_vector)
90 {
91 struct ice_pf *pf = vsi->back;
92 struct ice_hw *hw = &pf->hw;
93 u16 reg;
94 u32 val;
95
96 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
97 * here only QINT_RQCTL
98 */
99 reg = rx_ring->reg_idx;
100 val = rd32(hw, QINT_RQCTL(reg));
101 val &= ~QINT_RQCTL_CAUSE_ENA_M;
102 wr32(hw, QINT_RQCTL(reg), val);
103
104 if (q_vector) {
105 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
106 ice_flush(hw);
107 synchronize_irq(q_vector->irq.virq);
108 }
109 }
110
111 /**
112 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
113 * @vsi: the VSI that contains queue vector
114 * @q_vector: queue vector
115 */
116 static void
ice_qvec_cfg_msix(struct ice_vsi * vsi,struct ice_q_vector * q_vector)117 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
118 {
119 u16 reg_idx = q_vector->reg_idx;
120 struct ice_pf *pf = vsi->back;
121 struct ice_hw *hw = &pf->hw;
122 struct ice_tx_ring *tx_ring;
123 struct ice_rx_ring *rx_ring;
124
125 ice_cfg_itr(hw, q_vector);
126
127 ice_for_each_tx_ring(tx_ring, q_vector->tx)
128 ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
129 q_vector->tx.itr_idx);
130
131 ice_for_each_rx_ring(rx_ring, q_vector->rx)
132 ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
133 q_vector->rx.itr_idx);
134
135 ice_flush(hw);
136 }
137
138 /**
139 * ice_qvec_ena_irq - Enable IRQ for given queue vector
140 * @vsi: the VSI that contains queue vector
141 * @q_vector: queue vector
142 */
ice_qvec_ena_irq(struct ice_vsi * vsi,struct ice_q_vector * q_vector)143 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
144 {
145 struct ice_pf *pf = vsi->back;
146 struct ice_hw *hw = &pf->hw;
147
148 ice_irq_dynamic_ena(hw, vsi, q_vector);
149
150 ice_flush(hw);
151 }
152
153 /**
154 * ice_qp_dis - Disables a queue pair
155 * @vsi: VSI of interest
156 * @q_idx: ring index in array
157 *
158 * Returns 0 on success, negative on failure.
159 */
ice_qp_dis(struct ice_vsi * vsi,u16 q_idx)160 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
161 {
162 struct ice_txq_meta txq_meta = { };
163 struct ice_q_vector *q_vector;
164 struct ice_tx_ring *tx_ring;
165 struct ice_rx_ring *rx_ring;
166 int timeout = 50;
167 int err;
168
169 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
170 return -EINVAL;
171
172 tx_ring = vsi->tx_rings[q_idx];
173 rx_ring = vsi->rx_rings[q_idx];
174 q_vector = rx_ring->q_vector;
175
176 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
177 timeout--;
178 if (!timeout)
179 return -EBUSY;
180 usleep_range(1000, 2000);
181 }
182
183 ice_qvec_dis_irq(vsi, rx_ring, q_vector);
184 ice_qvec_toggle_napi(vsi, q_vector, false);
185
186 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
187
188 ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
189 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
190 if (err)
191 return err;
192 if (ice_is_xdp_ena_vsi(vsi)) {
193 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
194
195 memset(&txq_meta, 0, sizeof(txq_meta));
196 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
197 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
198 &txq_meta);
199 if (err)
200 return err;
201 }
202 err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
203 if (err)
204 return err;
205
206 ice_qp_clean_rings(vsi, q_idx);
207 ice_qp_reset_stats(vsi, q_idx);
208
209 return 0;
210 }
211
212 /**
213 * ice_qp_ena - Enables a queue pair
214 * @vsi: VSI of interest
215 * @q_idx: ring index in array
216 *
217 * Returns 0 on success, negative on failure.
218 */
ice_qp_ena(struct ice_vsi * vsi,u16 q_idx)219 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
220 {
221 struct ice_aqc_add_tx_qgrp *qg_buf;
222 struct ice_q_vector *q_vector;
223 struct ice_tx_ring *tx_ring;
224 struct ice_rx_ring *rx_ring;
225 u16 size;
226 int err;
227
228 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
229 return -EINVAL;
230
231 size = struct_size(qg_buf, txqs, 1);
232 qg_buf = kzalloc(size, GFP_KERNEL);
233 if (!qg_buf)
234 return -ENOMEM;
235
236 qg_buf->num_txqs = 1;
237
238 tx_ring = vsi->tx_rings[q_idx];
239 rx_ring = vsi->rx_rings[q_idx];
240 q_vector = rx_ring->q_vector;
241
242 err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
243 if (err)
244 goto free_buf;
245
246 if (ice_is_xdp_ena_vsi(vsi)) {
247 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
248
249 memset(qg_buf, 0, size);
250 qg_buf->num_txqs = 1;
251 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
252 if (err)
253 goto free_buf;
254 ice_set_ring_xdp(xdp_ring);
255 ice_tx_xsk_pool(vsi, q_idx);
256 }
257
258 err = ice_vsi_cfg_rxq(rx_ring);
259 if (err)
260 goto free_buf;
261
262 ice_qvec_cfg_msix(vsi, q_vector);
263
264 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
265 if (err)
266 goto free_buf;
267
268 ice_qvec_toggle_napi(vsi, q_vector, true);
269 ice_qvec_ena_irq(vsi, q_vector);
270
271 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
272 clear_bit(ICE_CFG_BUSY, vsi->state);
273 free_buf:
274 kfree(qg_buf);
275 return err;
276 }
277
278 /**
279 * ice_xsk_pool_disable - disable a buffer pool region
280 * @vsi: Current VSI
281 * @qid: queue ID
282 *
283 * Returns 0 on success, negative on failure
284 */
ice_xsk_pool_disable(struct ice_vsi * vsi,u16 qid)285 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
286 {
287 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
288
289 if (!pool)
290 return -EINVAL;
291
292 xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
293
294 return 0;
295 }
296
297 /**
298 * ice_xsk_pool_enable - enable a buffer pool region
299 * @vsi: Current VSI
300 * @pool: pointer to a requested buffer pool region
301 * @qid: queue ID
302 *
303 * Returns 0 on success, negative on failure
304 */
305 static int
ice_xsk_pool_enable(struct ice_vsi * vsi,struct xsk_buff_pool * pool,u16 qid)306 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
307 {
308 int err;
309
310 if (vsi->type != ICE_VSI_PF)
311 return -EINVAL;
312
313 if (qid >= vsi->netdev->real_num_rx_queues ||
314 qid >= vsi->netdev->real_num_tx_queues)
315 return -EINVAL;
316
317 err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
318 ICE_RX_DMA_ATTR);
319 if (err)
320 return err;
321
322 return 0;
323 }
324
325 /**
326 * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
327 * @rx_ring: Rx ring
328 * @pool_present: is pool for XSK present
329 *
330 * Try allocating memory and return ENOMEM, if failed to allocate.
331 * If allocation was successful, substitute buffer with allocated one.
332 * Returns 0 on success, negative on failure
333 */
334 static int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring * rx_ring,bool pool_present)335 ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
336 {
337 size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
338 sizeof(*rx_ring->rx_buf);
339 void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
340
341 if (!sw_ring)
342 return -ENOMEM;
343
344 if (pool_present) {
345 kfree(rx_ring->rx_buf);
346 rx_ring->rx_buf = NULL;
347 rx_ring->xdp_buf = sw_ring;
348 } else {
349 kfree(rx_ring->xdp_buf);
350 rx_ring->xdp_buf = NULL;
351 rx_ring->rx_buf = sw_ring;
352 }
353
354 return 0;
355 }
356
357 /**
358 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
359 * @vsi: Current VSI
360 * @zc: is zero copy set
361 *
362 * Reallocate buffer for rx_rings that might be used by XSK.
363 * XDP requires more memory, than rx_buf provides.
364 * Returns 0 on success, negative on failure
365 */
ice_realloc_zc_buf(struct ice_vsi * vsi,bool zc)366 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
367 {
368 struct ice_rx_ring *rx_ring;
369 uint i;
370
371 ice_for_each_rxq(vsi, i) {
372 rx_ring = vsi->rx_rings[i];
373 if (!rx_ring->xsk_pool)
374 continue;
375
376 if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
377 return -ENOMEM;
378 }
379
380 return 0;
381 }
382
383 /**
384 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
385 * @vsi: Current VSI
386 * @pool: buffer pool to enable/associate to a ring, NULL to disable
387 * @qid: queue ID
388 *
389 * Returns 0 on success, negative on failure
390 */
ice_xsk_pool_setup(struct ice_vsi * vsi,struct xsk_buff_pool * pool,u16 qid)391 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
392 {
393 bool if_running, pool_present = !!pool;
394 int ret = 0, pool_failure = 0;
395
396 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
397 netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
398 pool_failure = -EINVAL;
399 goto failure;
400 }
401
402 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
403
404 if (if_running) {
405 struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
406
407 ret = ice_qp_dis(vsi, qid);
408 if (ret) {
409 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
410 goto xsk_pool_if_up;
411 }
412
413 ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
414 if (ret)
415 goto xsk_pool_if_up;
416 }
417
418 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
419 ice_xsk_pool_disable(vsi, qid);
420
421 xsk_pool_if_up:
422 if (if_running) {
423 ret = ice_qp_ena(vsi, qid);
424 if (!ret && pool_present)
425 napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
426 else if (ret)
427 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
428 }
429
430 failure:
431 if (pool_failure) {
432 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
433 pool_present ? "en" : "dis", pool_failure);
434 return pool_failure;
435 }
436
437 return ret;
438 }
439
440 /**
441 * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
442 * @pool: XSK Buffer pool to pull the buffers from
443 * @xdp: SW ring of xdp_buff that will hold the buffers
444 * @rx_desc: Pointer to Rx descriptors that will be filled
445 * @count: The number of buffers to allocate
446 *
447 * This function allocates a number of Rx buffers from the fill ring
448 * or the internal recycle mechanism and places them on the Rx ring.
449 *
450 * Note that ring wrap should be handled by caller of this function.
451 *
452 * Returns the amount of allocated Rx descriptors
453 */
ice_fill_rx_descs(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,union ice_32b_rx_flex_desc * rx_desc,u16 count)454 static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
455 union ice_32b_rx_flex_desc *rx_desc, u16 count)
456 {
457 dma_addr_t dma;
458 u16 buffs;
459 int i;
460
461 buffs = xsk_buff_alloc_batch(pool, xdp, count);
462 for (i = 0; i < buffs; i++) {
463 dma = xsk_buff_xdp_get_dma(*xdp);
464 rx_desc->read.pkt_addr = cpu_to_le64(dma);
465 rx_desc->wb.status_error0 = 0;
466
467 rx_desc++;
468 xdp++;
469 }
470
471 return buffs;
472 }
473
474 /**
475 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
476 * @rx_ring: Rx ring
477 * @count: The number of buffers to allocate
478 *
479 * Place the @count of descriptors onto Rx ring. Handle the ring wrap
480 * for case where space from next_to_use up to the end of ring is less
481 * than @count. Finally do a tail bump.
482 *
483 * Returns true if all allocations were successful, false if any fail.
484 */
__ice_alloc_rx_bufs_zc(struct ice_rx_ring * rx_ring,u16 count)485 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
486 {
487 u32 nb_buffs_extra = 0, nb_buffs = 0;
488 union ice_32b_rx_flex_desc *rx_desc;
489 u16 ntu = rx_ring->next_to_use;
490 u16 total_count = count;
491 struct xdp_buff **xdp;
492
493 rx_desc = ICE_RX_DESC(rx_ring, ntu);
494 xdp = ice_xdp_buf(rx_ring, ntu);
495
496 if (ntu + count >= rx_ring->count) {
497 nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
498 rx_desc,
499 rx_ring->count - ntu);
500 if (nb_buffs_extra != rx_ring->count - ntu) {
501 ntu += nb_buffs_extra;
502 goto exit;
503 }
504 rx_desc = ICE_RX_DESC(rx_ring, 0);
505 xdp = ice_xdp_buf(rx_ring, 0);
506 ntu = 0;
507 count -= nb_buffs_extra;
508 ice_release_rx_desc(rx_ring, 0);
509 }
510
511 nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
512
513 ntu += nb_buffs;
514 if (ntu == rx_ring->count)
515 ntu = 0;
516
517 exit:
518 if (rx_ring->next_to_use != ntu)
519 ice_release_rx_desc(rx_ring, ntu);
520
521 return total_count == (nb_buffs_extra + nb_buffs);
522 }
523
524 /**
525 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
526 * @rx_ring: Rx ring
527 * @count: The number of buffers to allocate
528 *
529 * Wrapper for internal allocation routine; figure out how many tail
530 * bumps should take place based on the given threshold
531 *
532 * Returns true if all calls to internal alloc routine succeeded
533 */
ice_alloc_rx_bufs_zc(struct ice_rx_ring * rx_ring,u16 count)534 bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
535 {
536 u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
537 u16 leftover, i, tail_bumps;
538
539 tail_bumps = count / rx_thresh;
540 leftover = count - (tail_bumps * rx_thresh);
541
542 for (i = 0; i < tail_bumps; i++)
543 if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
544 return false;
545 return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
546 }
547
548 /**
549 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
550 * @rx_ring: Rx ring
551 * @xdp: Pointer to XDP buffer
552 *
553 * This function allocates a new skb from a zero-copy Rx buffer.
554 *
555 * Returns the skb on success, NULL on failure.
556 */
557 static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring * rx_ring,struct xdp_buff * xdp)558 ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
559 {
560 unsigned int totalsize = xdp->data_end - xdp->data_meta;
561 unsigned int metasize = xdp->data - xdp->data_meta;
562 struct skb_shared_info *sinfo = NULL;
563 struct sk_buff *skb;
564 u32 nr_frags = 0;
565
566 if (unlikely(xdp_buff_has_frags(xdp))) {
567 sinfo = xdp_get_shared_info_from_buff(xdp);
568 nr_frags = sinfo->nr_frags;
569 }
570 net_prefetch(xdp->data_meta);
571
572 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
573 GFP_ATOMIC | __GFP_NOWARN);
574 if (unlikely(!skb))
575 return NULL;
576
577 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
578 ALIGN(totalsize, sizeof(long)));
579
580 if (metasize) {
581 skb_metadata_set(skb, metasize);
582 __skb_pull(skb, metasize);
583 }
584
585 if (likely(!xdp_buff_has_frags(xdp)))
586 goto out;
587
588 for (int i = 0; i < nr_frags; i++) {
589 struct skb_shared_info *skinfo = skb_shinfo(skb);
590 skb_frag_t *frag = &sinfo->frags[i];
591 struct page *page;
592 void *addr;
593
594 page = dev_alloc_page();
595 if (!page) {
596 dev_kfree_skb(skb);
597 return NULL;
598 }
599 addr = page_to_virt(page);
600
601 memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
602
603 __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
604 addr, 0, skb_frag_size(frag));
605 }
606
607 out:
608 xsk_buff_free(xdp);
609 return skb;
610 }
611
612 /**
613 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
614 * @xdp_ring: XDP Tx ring
615 */
ice_clean_xdp_irq_zc(struct ice_tx_ring * xdp_ring)616 static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
617 {
618 u16 ntc = xdp_ring->next_to_clean;
619 struct ice_tx_desc *tx_desc;
620 u16 cnt = xdp_ring->count;
621 struct ice_tx_buf *tx_buf;
622 u16 completed_frames = 0;
623 u16 xsk_frames = 0;
624 u16 last_rs;
625 int i;
626
627 last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
628 tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
629 if (tx_desc->cmd_type_offset_bsz &
630 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
631 if (last_rs >= ntc)
632 completed_frames = last_rs - ntc + 1;
633 else
634 completed_frames = last_rs + cnt - ntc + 1;
635 }
636
637 if (!completed_frames)
638 return 0;
639
640 if (likely(!xdp_ring->xdp_tx_active)) {
641 xsk_frames = completed_frames;
642 goto skip;
643 }
644
645 ntc = xdp_ring->next_to_clean;
646 for (i = 0; i < completed_frames; i++) {
647 tx_buf = &xdp_ring->tx_buf[ntc];
648
649 if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
650 tx_buf->type = ICE_TX_BUF_EMPTY;
651 xsk_buff_free(tx_buf->xdp);
652 xdp_ring->xdp_tx_active--;
653 } else {
654 xsk_frames++;
655 }
656
657 ntc++;
658 if (ntc >= xdp_ring->count)
659 ntc = 0;
660 }
661 skip:
662 tx_desc->cmd_type_offset_bsz = 0;
663 xdp_ring->next_to_clean += completed_frames;
664 if (xdp_ring->next_to_clean >= cnt)
665 xdp_ring->next_to_clean -= cnt;
666 if (xsk_frames)
667 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
668
669 return completed_frames;
670 }
671
672 /**
673 * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
674 * @xdp: XDP buffer to xmit
675 * @xdp_ring: XDP ring to produce descriptor onto
676 *
677 * note that this function works directly on xdp_buff, no need to convert
678 * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
679 * side will be able to xsk_buff_free() it.
680 *
681 * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
682 * was not enough space on XDP ring
683 */
ice_xmit_xdp_tx_zc(struct xdp_buff * xdp,struct ice_tx_ring * xdp_ring)684 static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
685 struct ice_tx_ring *xdp_ring)
686 {
687 struct skb_shared_info *sinfo = NULL;
688 u32 size = xdp->data_end - xdp->data;
689 u32 ntu = xdp_ring->next_to_use;
690 struct ice_tx_desc *tx_desc;
691 struct ice_tx_buf *tx_buf;
692 struct xdp_buff *head;
693 u32 nr_frags = 0;
694 u32 free_space;
695 u32 frag = 0;
696
697 free_space = ICE_DESC_UNUSED(xdp_ring);
698 if (free_space < ICE_RING_QUARTER(xdp_ring))
699 free_space += ice_clean_xdp_irq_zc(xdp_ring);
700
701 if (unlikely(!free_space))
702 goto busy;
703
704 if (unlikely(xdp_buff_has_frags(xdp))) {
705 sinfo = xdp_get_shared_info_from_buff(xdp);
706 nr_frags = sinfo->nr_frags;
707 if (free_space < nr_frags + 1)
708 goto busy;
709 }
710
711 tx_desc = ICE_TX_DESC(xdp_ring, ntu);
712 tx_buf = &xdp_ring->tx_buf[ntu];
713 head = xdp;
714
715 for (;;) {
716 dma_addr_t dma;
717
718 dma = xsk_buff_xdp_get_dma(xdp);
719 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
720
721 tx_buf->xdp = xdp;
722 tx_buf->type = ICE_TX_BUF_XSK_TX;
723 tx_desc->buf_addr = cpu_to_le64(dma);
724 tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
725 /* account for each xdp_buff from xsk_buff_pool */
726 xdp_ring->xdp_tx_active++;
727
728 if (++ntu == xdp_ring->count)
729 ntu = 0;
730
731 if (frag == nr_frags)
732 break;
733
734 tx_desc = ICE_TX_DESC(xdp_ring, ntu);
735 tx_buf = &xdp_ring->tx_buf[ntu];
736
737 xdp = xsk_buff_get_frag(head);
738 size = skb_frag_size(&sinfo->frags[frag]);
739 frag++;
740 }
741
742 xdp_ring->next_to_use = ntu;
743 /* update last descriptor from a frame with EOP */
744 tx_desc->cmd_type_offset_bsz |=
745 cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
746
747 return ICE_XDP_TX;
748
749 busy:
750 xdp_ring->ring_stats->tx_stats.tx_busy++;
751
752 return ICE_XDP_CONSUMED;
753 }
754
755 /**
756 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
757 * @rx_ring: Rx ring
758 * @xdp: xdp_buff used as input to the XDP program
759 * @xdp_prog: XDP program to run
760 * @xdp_ring: ring to be used for XDP_TX action
761 *
762 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
763 */
764 static int
ice_run_xdp_zc(struct ice_rx_ring * rx_ring,struct xdp_buff * xdp,struct bpf_prog * xdp_prog,struct ice_tx_ring * xdp_ring)765 ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
766 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
767 {
768 int err, result = ICE_XDP_PASS;
769 u32 act;
770
771 act = bpf_prog_run_xdp(xdp_prog, xdp);
772
773 if (likely(act == XDP_REDIRECT)) {
774 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
775 if (!err)
776 return ICE_XDP_REDIR;
777 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
778 result = ICE_XDP_EXIT;
779 else
780 result = ICE_XDP_CONSUMED;
781 goto out_failure;
782 }
783
784 switch (act) {
785 case XDP_PASS:
786 break;
787 case XDP_TX:
788 result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
789 if (result == ICE_XDP_CONSUMED)
790 goto out_failure;
791 break;
792 case XDP_DROP:
793 result = ICE_XDP_CONSUMED;
794 break;
795 default:
796 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
797 fallthrough;
798 case XDP_ABORTED:
799 result = ICE_XDP_CONSUMED;
800 out_failure:
801 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
802 break;
803 }
804
805 return result;
806 }
807
808 static int
ice_add_xsk_frag(struct ice_rx_ring * rx_ring,struct xdp_buff * first,struct xdp_buff * xdp,const unsigned int size)809 ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
810 struct xdp_buff *xdp, const unsigned int size)
811 {
812 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
813
814 if (!size)
815 return 0;
816
817 if (!xdp_buff_has_frags(first)) {
818 sinfo->nr_frags = 0;
819 sinfo->xdp_frags_size = 0;
820 xdp_buff_set_frags_flag(first);
821 }
822
823 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
824 xsk_buff_free(first);
825 return -ENOMEM;
826 }
827
828 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
829 virt_to_page(xdp->data_hard_start),
830 XDP_PACKET_HEADROOM, size);
831 sinfo->xdp_frags_size += size;
832 xsk_buff_add_frag(xdp);
833
834 return 0;
835 }
836
837 /**
838 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
839 * @rx_ring: AF_XDP Rx ring
840 * @budget: NAPI budget
841 *
842 * Returns number of processed packets on success, remaining budget on failure.
843 */
ice_clean_rx_irq_zc(struct ice_rx_ring * rx_ring,int budget)844 int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
845 {
846 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
847 struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
848 u32 ntc = rx_ring->next_to_clean;
849 u32 ntu = rx_ring->next_to_use;
850 struct xdp_buff *first = NULL;
851 struct ice_tx_ring *xdp_ring;
852 unsigned int xdp_xmit = 0;
853 struct bpf_prog *xdp_prog;
854 u32 cnt = rx_ring->count;
855 bool failure = false;
856 int entries_to_alloc;
857
858 /* ZC patch is enabled only when XDP program is set,
859 * so here it can not be NULL
860 */
861 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
862 xdp_ring = rx_ring->xdp_ring;
863
864 if (ntc != rx_ring->first_desc)
865 first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
866
867 while (likely(total_rx_packets < (unsigned int)budget)) {
868 union ice_32b_rx_flex_desc *rx_desc;
869 unsigned int size, xdp_res = 0;
870 struct xdp_buff *xdp;
871 struct sk_buff *skb;
872 u16 stat_err_bits;
873 u16 vlan_tag = 0;
874 u16 rx_ptype;
875
876 rx_desc = ICE_RX_DESC(rx_ring, ntc);
877
878 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
879 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
880 break;
881
882 /* This memory barrier is needed to keep us from reading
883 * any other fields out of the rx_desc until we have
884 * verified the descriptor has been written back.
885 */
886 dma_rmb();
887
888 if (unlikely(ntc == ntu))
889 break;
890
891 xdp = *ice_xdp_buf(rx_ring, ntc);
892
893 size = le16_to_cpu(rx_desc->wb.pkt_len) &
894 ICE_RX_FLX_DESC_PKT_LEN_M;
895
896 xsk_buff_set_size(xdp, size);
897 xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
898
899 if (!first) {
900 first = xdp;
901 } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
902 break;
903 }
904
905 if (++ntc == cnt)
906 ntc = 0;
907
908 if (ice_is_non_eop(rx_ring, rx_desc))
909 continue;
910
911 xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
912 if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
913 xdp_xmit |= xdp_res;
914 } else if (xdp_res == ICE_XDP_EXIT) {
915 failure = true;
916 first = NULL;
917 rx_ring->first_desc = ntc;
918 break;
919 } else if (xdp_res == ICE_XDP_CONSUMED) {
920 xsk_buff_free(first);
921 } else if (xdp_res == ICE_XDP_PASS) {
922 goto construct_skb;
923 }
924
925 total_rx_bytes += xdp_get_buff_len(first);
926 total_rx_packets++;
927
928 first = NULL;
929 rx_ring->first_desc = ntc;
930 continue;
931
932 construct_skb:
933 /* XDP_PASS path */
934 skb = ice_construct_skb_zc(rx_ring, first);
935 if (!skb) {
936 rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
937 break;
938 }
939
940 first = NULL;
941 rx_ring->first_desc = ntc;
942
943 if (eth_skb_pad(skb)) {
944 skb = NULL;
945 continue;
946 }
947
948 total_rx_bytes += skb->len;
949 total_rx_packets++;
950
951 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
952
953 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
954 ICE_RX_FLEX_DESC_PTYPE_M;
955
956 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
957 ice_receive_skb(rx_ring, skb, vlan_tag);
958 }
959
960 rx_ring->next_to_clean = ntc;
961 entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
962 if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
963 failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
964
965 ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
966 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
967
968 if (xsk_uses_need_wakeup(xsk_pool)) {
969 /* ntu could have changed when allocating entries above, so
970 * use rx_ring value instead of stack based one
971 */
972 if (failure || ntc == rx_ring->next_to_use)
973 xsk_set_rx_need_wakeup(xsk_pool);
974 else
975 xsk_clear_rx_need_wakeup(xsk_pool);
976
977 return (int)total_rx_packets;
978 }
979
980 return failure ? budget : (int)total_rx_packets;
981 }
982
983 /**
984 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
985 * @xdp_ring: XDP ring to produce the HW Tx descriptor on
986 * @desc: AF_XDP descriptor to pull the DMA address and length from
987 * @total_bytes: bytes accumulator that will be used for stats update
988 */
ice_xmit_pkt(struct ice_tx_ring * xdp_ring,struct xdp_desc * desc,unsigned int * total_bytes)989 static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
990 unsigned int *total_bytes)
991 {
992 struct ice_tx_desc *tx_desc;
993 dma_addr_t dma;
994
995 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
996 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
997
998 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
999 tx_desc->buf_addr = cpu_to_le64(dma);
1000 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
1001 0, desc->len, 0);
1002
1003 *total_bytes += desc->len;
1004 }
1005
1006 /**
1007 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
1008 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1009 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
1010 * @total_bytes: bytes accumulator that will be used for stats update
1011 */
ice_xmit_pkt_batch(struct ice_tx_ring * xdp_ring,struct xdp_desc * descs,unsigned int * total_bytes)1012 static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
1013 unsigned int *total_bytes)
1014 {
1015 u16 ntu = xdp_ring->next_to_use;
1016 struct ice_tx_desc *tx_desc;
1017 u32 i;
1018
1019 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
1020 dma_addr_t dma;
1021
1022 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
1023 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
1024
1025 tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
1026 tx_desc->buf_addr = cpu_to_le64(dma);
1027 tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
1028 0, descs[i].len, 0);
1029
1030 *total_bytes += descs[i].len;
1031 }
1032
1033 xdp_ring->next_to_use = ntu;
1034 }
1035
1036 /**
1037 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
1038 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1039 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
1040 * @nb_pkts: count of packets to be send
1041 * @total_bytes: bytes accumulator that will be used for stats update
1042 */
ice_fill_tx_hw_ring(struct ice_tx_ring * xdp_ring,struct xdp_desc * descs,u32 nb_pkts,unsigned int * total_bytes)1043 static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
1044 u32 nb_pkts, unsigned int *total_bytes)
1045 {
1046 u32 batched, leftover, i;
1047
1048 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
1049 leftover = nb_pkts & (PKTS_PER_BATCH - 1);
1050 for (i = 0; i < batched; i += PKTS_PER_BATCH)
1051 ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
1052 for (; i < batched + leftover; i++)
1053 ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
1054 }
1055
1056 /**
1057 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
1058 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
1059 *
1060 * Returns true if there is no more work that needs to be done, false otherwise
1061 */
ice_xmit_zc(struct ice_tx_ring * xdp_ring)1062 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
1063 {
1064 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
1065 u32 nb_pkts, nb_processed = 0;
1066 unsigned int total_bytes = 0;
1067 int budget;
1068
1069 ice_clean_xdp_irq_zc(xdp_ring);
1070
1071 budget = ICE_DESC_UNUSED(xdp_ring);
1072 budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
1073
1074 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
1075 if (!nb_pkts)
1076 return true;
1077
1078 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
1079 nb_processed = xdp_ring->count - xdp_ring->next_to_use;
1080 ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
1081 xdp_ring->next_to_use = 0;
1082 }
1083
1084 ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
1085 &total_bytes);
1086
1087 ice_set_rs_bit(xdp_ring);
1088 ice_xdp_ring_update_tail(xdp_ring);
1089 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
1090
1091 if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
1092 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
1093
1094 return nb_pkts < budget;
1095 }
1096
1097 /**
1098 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
1099 * @netdev: net_device
1100 * @queue_id: queue to wake up
1101 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
1102 *
1103 * Returns negative on error, zero otherwise.
1104 */
1105 int
ice_xsk_wakeup(struct net_device * netdev,u32 queue_id,u32 __always_unused flags)1106 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
1107 u32 __always_unused flags)
1108 {
1109 struct ice_netdev_priv *np = netdev_priv(netdev);
1110 struct ice_q_vector *q_vector;
1111 struct ice_vsi *vsi = np->vsi;
1112 struct ice_tx_ring *ring;
1113
1114 if (test_bit(ICE_VSI_DOWN, vsi->state))
1115 return -ENETDOWN;
1116
1117 if (!ice_is_xdp_ena_vsi(vsi))
1118 return -EINVAL;
1119
1120 if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
1121 return -EINVAL;
1122
1123 ring = vsi->rx_rings[queue_id]->xdp_ring;
1124
1125 if (!ring->xsk_pool)
1126 return -EINVAL;
1127
1128 /* The idea here is that if NAPI is running, mark a miss, so
1129 * it will run again. If not, trigger an interrupt and
1130 * schedule the NAPI from interrupt context. If NAPI would be
1131 * scheduled here, the interrupt affinity would not be
1132 * honored.
1133 */
1134 q_vector = ring->q_vector;
1135 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1136 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1137
1138 return 0;
1139 }
1140
1141 /**
1142 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
1143 * @vsi: VSI to be checked
1144 *
1145 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
1146 */
ice_xsk_any_rx_ring_ena(struct ice_vsi * vsi)1147 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1148 {
1149 int i;
1150
1151 ice_for_each_rxq(vsi, i) {
1152 if (xsk_get_pool_from_qid(vsi->netdev, i))
1153 return true;
1154 }
1155
1156 return false;
1157 }
1158
1159 /**
1160 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
1161 * @rx_ring: ring to be cleaned
1162 */
ice_xsk_clean_rx_ring(struct ice_rx_ring * rx_ring)1163 void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
1164 {
1165 u16 ntc = rx_ring->next_to_clean;
1166 u16 ntu = rx_ring->next_to_use;
1167
1168 while (ntc != ntu) {
1169 struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
1170
1171 xsk_buff_free(xdp);
1172 ntc++;
1173 if (ntc >= rx_ring->count)
1174 ntc = 0;
1175 }
1176 }
1177
1178 /**
1179 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
1180 * @xdp_ring: XDP_Tx ring
1181 */
ice_xsk_clean_xdp_ring(struct ice_tx_ring * xdp_ring)1182 void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
1183 {
1184 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1185 u32 xsk_frames = 0;
1186
1187 while (ntc != ntu) {
1188 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1189
1190 if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
1191 tx_buf->type = ICE_TX_BUF_EMPTY;
1192 xsk_buff_free(tx_buf->xdp);
1193 } else {
1194 xsk_frames++;
1195 }
1196
1197 ntc++;
1198 if (ntc >= xdp_ring->count)
1199 ntc = 0;
1200 }
1201
1202 if (xsk_frames)
1203 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1204 }
1205