Lines Matching +full:out +full:- +full:masks

1 // SPDX-License-Identifier: GPL-2.0-only
54 return to_virtio_vdpa_device(vdev)->vdpa; in vd_get_vdpa()
76 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_generation()
78 if (ops->get_generation) in virtio_vdpa_generation()
79 return ops->get_generation(vdpa); in virtio_vdpa_generation()
87 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_get_status()
89 return ops->get_status(vdpa); in virtio_vdpa_get_status()
108 struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev); in virtio_vdpa_notify()
109 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_notify()
111 ops->kick_vq(vdpa, vq->index); in virtio_vdpa_notify()
118 struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev); in virtio_vdpa_notify_with_data()
119 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_notify_with_data()
122 ops->kick_vq_with_data(vdpa, data); in virtio_vdpa_notify_with_data()
131 virtio_config_changed(&vd_dev->vdev); in virtio_vdpa_config_cb()
140 return vring_interrupt(0, info->vq); in virtio_vdpa_virtqueue_cb()
151 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_setup_vq()
167 if (index >= vdpa->nvqs) in virtio_vdpa_setup_vq()
168 return ERR_PTR(-ENOENT); in virtio_vdpa_setup_vq()
172 if (ops->kick_vq_with_data) in virtio_vdpa_setup_vq()
179 if (ops->get_vq_ready(vdpa, index)) in virtio_vdpa_setup_vq()
180 return ERR_PTR(-ENOENT); in virtio_vdpa_setup_vq()
182 /* Allocate and fill out our active queue description */ in virtio_vdpa_setup_vq()
185 return ERR_PTR(-ENOMEM); in virtio_vdpa_setup_vq()
187 max_num = ops->get_vq_num_max(vdpa); in virtio_vdpa_setup_vq()
189 err = -ENOENT; in virtio_vdpa_setup_vq()
193 if (ops->get_vq_num_min) in virtio_vdpa_setup_vq()
194 min_num = ops->get_vq_num_min(vdpa); in virtio_vdpa_setup_vq()
199 align = ops->get_vq_align(vdpa); in virtio_vdpa_setup_vq()
201 if (ops->get_vq_dma_dev) in virtio_vdpa_setup_vq()
202 dma_dev = ops->get_vq_dma_dev(vdpa, index); in virtio_vdpa_setup_vq()
209 err = -ENOMEM; in virtio_vdpa_setup_vq()
213 vq->num_max = max_num; in virtio_vdpa_setup_vq()
219 ops->set_vq_cb(vdpa, index, &cb); in virtio_vdpa_setup_vq()
220 ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq)); in virtio_vdpa_setup_vq()
226 if (ops->set_vq_address(vdpa, index, in virtio_vdpa_setup_vq()
229 err = -EINVAL; in virtio_vdpa_setup_vq()
237 s->last_avail_counter = 1; in virtio_vdpa_setup_vq()
238 s->last_avail_idx = 0; in virtio_vdpa_setup_vq()
239 s->last_used_counter = 1; in virtio_vdpa_setup_vq()
240 s->last_used_idx = 0; in virtio_vdpa_setup_vq()
242 err = ops->set_vq_state(vdpa, index, &state); in virtio_vdpa_setup_vq()
246 ops->set_vq_ready(vdpa, index, 1); in virtio_vdpa_setup_vq()
248 vq->priv = info; in virtio_vdpa_setup_vq()
249 info->vq = vq; in virtio_vdpa_setup_vq()
251 spin_lock_irqsave(&vd_dev->lock, flags); in virtio_vdpa_setup_vq()
252 list_add(&info->node, &vd_dev->virtqueues); in virtio_vdpa_setup_vq()
253 spin_unlock_irqrestore(&vd_dev->lock, flags); in virtio_vdpa_setup_vq()
260 ops->set_vq_ready(vdpa, index, 0); in virtio_vdpa_setup_vq()
262 WARN_ON(ops->get_vq_ready(vdpa, index)); in virtio_vdpa_setup_vq()
269 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev); in virtio_vdpa_del_vq()
270 struct vdpa_device *vdpa = vd_dev->vdpa; in virtio_vdpa_del_vq()
271 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_del_vq()
272 struct virtio_vdpa_vq_info *info = vq->priv; in virtio_vdpa_del_vq()
273 unsigned int index = vq->index; in virtio_vdpa_del_vq()
276 spin_lock_irqsave(&vd_dev->lock, flags); in virtio_vdpa_del_vq()
277 list_del(&info->node); in virtio_vdpa_del_vq()
278 spin_unlock_irqrestore(&vd_dev->lock, flags); in virtio_vdpa_del_vq()
281 ops->set_vq_ready(vdpa, index, 0); in virtio_vdpa_del_vq()
292 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in virtio_vdpa_del_vqs()
298 affd->nr_sets = 1; in default_calc_sets()
299 affd->set_size[0] = affvecs; in default_calc_sets()
306 struct cpumask *masks = NULL; in create_affinity_masks() local
308 if (nvecs > affd->pre_vectors + affd->post_vectors) in create_affinity_masks()
309 affvecs = nvecs - affd->pre_vectors - affd->post_vectors; in create_affinity_masks()
311 if (!affd->calc_sets) in create_affinity_masks()
312 affd->calc_sets = default_calc_sets; in create_affinity_masks()
314 affd->calc_sets(affd, affvecs); in create_affinity_masks()
319 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); in create_affinity_masks()
320 if (!masks) in create_affinity_masks()
323 /* Fill out vectors at the beginning that don't need affinity */ in create_affinity_masks()
324 for (curvec = 0; curvec < affd->pre_vectors; curvec++) in create_affinity_masks()
325 cpumask_setall(&masks[curvec]); in create_affinity_masks()
327 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { in create_affinity_masks()
328 unsigned int this_vecs = affd->set_size[i]; in create_affinity_masks()
333 kfree(masks); in create_affinity_masks()
338 cpumask_copy(&masks[curvec + j], &result[j]); in create_affinity_masks()
345 /* Fill out vectors at the end that don't need affinity */ in create_affinity_masks()
347 curvec = affd->pre_vectors + affvecs; in create_affinity_masks()
349 curvec = affd->pre_vectors + usedvecs; in create_affinity_masks()
351 cpumask_setall(&masks[curvec]); in create_affinity_masks()
353 return masks; in create_affinity_masks()
365 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_find_vqs()
367 struct cpumask *masks; in virtio_vdpa_find_vqs() local
369 bool has_affinity = desc && ops->set_vq_affinity; in virtio_vdpa_find_vqs()
373 masks = create_affinity_masks(nvqs, desc ? desc : &default_affd); in virtio_vdpa_find_vqs()
374 if (!masks) in virtio_vdpa_find_vqs()
375 return -ENOMEM; in virtio_vdpa_find_vqs()
393 ops->set_vq_affinity(vdpa, i, &masks[i]); in virtio_vdpa_find_vqs()
398 ops->set_config_cb(vdpa, &cb); in virtio_vdpa_find_vqs()
400 kfree(masks); in virtio_vdpa_find_vqs()
407 kfree(masks); in virtio_vdpa_find_vqs()
414 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_get_features()
416 return ops->get_device_features(vdpa); in virtio_vdpa_get_features()
426 return vdpa_set_features(vdpa, vdev->features); in virtio_vdpa_finalize_features()
432 struct vdpa_device *vdpa = vd_dev->vdpa; in virtio_vdpa_bus_name()
434 return dev_name(&vdpa->dev); in virtio_vdpa_bus_name()
440 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev); in virtio_vdpa_set_vq_affinity()
441 struct vdpa_device *vdpa = vd_dev->vdpa; in virtio_vdpa_set_vq_affinity()
442 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_set_vq_affinity()
443 unsigned int index = vq->index; in virtio_vdpa_set_vq_affinity()
445 if (ops->set_vq_affinity) in virtio_vdpa_set_vq_affinity()
446 return ops->set_vq_affinity(vdpa, index, cpu_mask); in virtio_vdpa_set_vq_affinity()
455 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_get_vq_affinity()
457 if (ops->get_vq_affinity) in virtio_vdpa_get_vq_affinity()
458 return ops->get_vq_affinity(vdpa, index); in virtio_vdpa_get_vq_affinity()
491 const struct vdpa_config_ops *ops = vdpa->config; in virtio_vdpa_probe()
493 int ret = -EINVAL; in virtio_vdpa_probe()
497 return -ENOMEM; in virtio_vdpa_probe()
499 vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa); in virtio_vdpa_probe()
500 vd_dev->vdev.dev.release = virtio_vdpa_release_dev; in virtio_vdpa_probe()
501 vd_dev->vdev.config = &virtio_vdpa_config_ops; in virtio_vdpa_probe()
502 vd_dev->vdpa = vdpa; in virtio_vdpa_probe()
503 INIT_LIST_HEAD(&vd_dev->virtqueues); in virtio_vdpa_probe()
504 spin_lock_init(&vd_dev->lock); in virtio_vdpa_probe()
506 vd_dev->vdev.id.device = ops->get_device_id(vdpa); in virtio_vdpa_probe()
507 if (vd_dev->vdev.id.device == 0) in virtio_vdpa_probe()
510 vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa); in virtio_vdpa_probe()
511 ret = register_virtio_device(&vd_dev->vdev); in virtio_vdpa_probe()
522 put_device(&vd_dev->vdev.dev); in virtio_vdpa_probe()
532 unregister_virtio_device(&vd_dev->vdev); in virtio_vdpa_remove()