Lines Matching +full:single +full:- +full:cpu
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
67 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
73 return real_protocol && skb->protocol == real_protocol; in wg_check_packet_protocol()
78 u8 l4_hash = skb->l4_hash; in wg_reset_packet()
79 u8 sw_hash = skb->sw_hash; in wg_reset_packet()
80 u32 hash = skb->hash; in wg_reset_packet()
82 memset(&skb->headers, 0, sizeof(skb->headers)); in wg_reset_packet()
84 skb->l4_hash = l4_hash; in wg_reset_packet()
85 skb->sw_hash = sw_hash; in wg_reset_packet()
86 skb->hash = hash; in wg_reset_packet()
88 skb->queue_mapping = 0; in wg_reset_packet()
89 skb->nohdr = 0; in wg_reset_packet()
90 skb->peeked = 0; in wg_reset_packet()
91 skb->mac_len = 0; in wg_reset_packet()
92 skb->dev = NULL; in wg_reset_packet()
94 skb->tc_index = 0; in wg_reset_packet()
97 skb->hdr_len = skb_headroom(skb); in wg_reset_packet()
107 unsigned int cpu = *stored_cpu, cpu_index, i; in wg_cpumask_choose_online() local
109 if (unlikely(cpu >= nr_cpu_ids || in wg_cpumask_choose_online()
110 !cpumask_test_cpu(cpu, cpu_online_mask))) { in wg_cpumask_choose_online()
112 cpu = cpumask_first(cpu_online_mask); in wg_cpumask_choose_online()
114 cpu = cpumask_next(cpu, cpu_online_mask); in wg_cpumask_choose_online()
115 *stored_cpu = cpu; in wg_cpumask_choose_online()
117 return cpu; in wg_cpumask_choose_online()
121 * unlocked, so it could return the same CPU twice. Adding locking or using
127 int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); in wg_cpumask_next_online() local
128 if (cpu >= nr_cpu_ids) in wg_cpumask_next_online()
129 cpu = cpumask_first(cpu_online_mask); in wg_cpumask_next_online()
130 WRITE_ONCE(*last_cpu, cpu); in wg_cpumask_next_online()
131 return cpu; in wg_cpumask_next_online()
139 /* Single consumer */
142 /* Single consumer */
145 if (queue->peeked) in wg_prev_queue_peek()
146 return queue->peeked; in wg_prev_queue_peek()
147 queue->peeked = wg_prev_queue_dequeue(queue); in wg_prev_queue_peek()
148 return queue->peeked; in wg_prev_queue_peek()
151 /* Single consumer */
154 queue->peeked = NULL; in wg_prev_queue_drop_peeked()
161 int cpu; in wg_queue_enqueue_per_device_and_peer() local
163 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); in wg_queue_enqueue_per_device_and_peer()
168 return -ENOSPC; in wg_queue_enqueue_per_device_and_peer()
173 cpu = wg_cpumask_next_online(&device_queue->last_cpu); in wg_queue_enqueue_per_device_and_peer()
174 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) in wg_queue_enqueue_per_device_and_peer()
175 return -EPIPE; in wg_queue_enqueue_per_device_and_peer()
176 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); in wg_queue_enqueue_per_device_and_peer()
187 atomic_set_release(&PACKET_CB(skb)->state, state); in wg_queue_enqueue_per_peer_tx()
188 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), in wg_queue_enqueue_per_peer_tx()
189 peer->device->packet_crypt_wq, &peer->transmit_packet_work); in wg_queue_enqueue_per_peer_tx()
200 atomic_set_release(&PACKET_CB(skb)->state, state); in wg_queue_enqueue_per_peer_rx()
201 napi_schedule(&peer->napi); in wg_queue_enqueue_per_peer_rx()