/openbmc/linux/mm/ |
H A D | mmu_gather.c | 20 struct mmu_gather_batch *batch; in tlb_next_batch() local 23 if (tlb->delayed_rmap && tlb->active != &tlb->local) in tlb_next_batch() 26 batch = tlb->active; in tlb_next_batch() 27 if (batch->next) { in tlb_next_batch() 28 tlb->active = batch->next; in tlb_next_batch() 32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch() 35 batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); in tlb_next_batch() 36 if (!batch) in tlb_next_batch() 39 tlb->batch_count++; in tlb_next_batch() 40 batch->next = NULL; in tlb_next_batch() [all …]
|
/openbmc/qemu/util/ |
H A D | qemu-coroutine.c | 11 * See the COPYING.LIB file in the top-level directory. 20 #include "qemu/coroutine-tls.h" 40 * .-----------------------------------. 41 * | Batch 1 | Batch 2 | Batch 3 | ... | global_pool 42 * `-----------------------------------' 44 * .-------------------. 45 * | Batch 1 | Batch 2 | per-thread local_pool (maximum 2 batches) 46 * `-------------------' 52 /* This batch holds up to @COROUTINE_POOL_BATCH_MAX_SIZE coroutines */ 72 CoroutinePoolBatch *batch = g_new(CoroutinePoolBatch, 1); in coroutine_pool_batch_new() local [all …]
|
/openbmc/linux/arch/powerpc/mm/book3s64/ |
H A D | hash_tlb.c | 1 // SPDX-License-Identifier: GPL-2.0-or-later 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 27 #include <asm/pte-walk.h> 37 * immediately or will batch it up if the current CPU has an active 38 * batch on it. 44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local 51 i = batch->index; in hpte_need_flush() 65 addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); in hpte_need_flush() 90 vsid = get_user_vsid(&mm->context, addr, ssize); in hpte_need_flush() 100 * Check if we have an active batch on this CPU. If not, just in hpte_need_flush() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/selftests/ |
H A D | igt_spinner.c | 2 * SPDX-License-Identifier: MIT 19 spin->gt = gt; in igt_spinner_init() 21 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in igt_spinner_init() 22 if (IS_ERR(spin->hws)) { in igt_spinner_init() 23 err = PTR_ERR(spin->hws); in igt_spinner_init() 26 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); in igt_spinner_init() 28 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in igt_spinner_init() 29 if (IS_ERR(spin->obj)) { in igt_spinner_init() 30 err = PTR_ERR(spin->obj); in igt_spinner_init() 37 i915_gem_object_put(spin->hws); in igt_spinner_init() [all …]
|
H A D | i915_request.c | 75 request = mock_request(rcs0(i915)->kernel_context, HZ / 10); in igt_add_request() 77 return -ENOMEM; in igt_add_request() 89 int err = -EINVAL; in igt_wait_request() 93 request = mock_request(rcs0(i915)->kernel_context, T); in igt_wait_request() 95 return -ENOMEM; in igt_wait_request() 99 if (i915_request_wait(request, 0, 0) != -ETIME) { in igt_wait_request() 104 if (i915_request_wait(request, 0, T) != -ETIME) { in igt_wait_request() 116 if (i915_request_wait(request, 0, 0) != -ETIME) { in igt_wait_request() 126 if (i915_request_wait(request, 0, T / 2) != -ETIME) { in igt_wait_request() 131 if (i915_request_wait(request, 0, T) == -ETIME) { in igt_wait_request() [all …]
|
/openbmc/linux/drivers/iommu/iommufd/ |
H A D | pages.c | 1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. 21 * If access_itree includes the PFN's index then an in-kernel access has 31 * means removing an iommu_domain or releasing a in-kernel access will not fail 108 for (i = 0; i != ARRAY_SIZE(iter->spans); i++) { in interval_tree_double_span_iter_update() 109 if (interval_tree_span_iter_done(&iter->spans[i])) { in interval_tree_double_span_iter_update() 110 iter->is_used = -1; in interval_tree_double_span_iter_update() 114 if (iter->spans[i].is_hole) { in interval_tree_double_span_iter_update() 115 last_hole = min(last_hole, iter->spans[i].last_hole); in interval_tree_double_span_iter_update() 119 iter->is_used = i + 1; in interval_tree_double_span_iter_update() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gt/ |
H A D | gen7_renderclear.c | 1 // SPDX-License-Identifier: MIT 12 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) 48 * a shader on every HW thread, and clear the thread-local registers. in num_primitives() 52 return bv->max_threads; in num_primitives() 59 switch (INTEL_INFO(i915)->gt) { in batch_get_defaults() 62 bv->max_threads = 70; in batch_get_defaults() 65 bv->max_threads = 140; in batch_get_defaults() 68 bv->max_threads = 280; in batch_get_defaults() 71 bv->surface_height = 16 * 16; in batch_get_defaults() 72 bv->surface_width = 32 * 2 * 16; in batch_get_defaults() [all …]
|
H A D | intel_lrc.c | 1 // SPDX-License-Identifier: MIT 24 * The per-platform tables are u8-encoded in @data. Decode @data and set the 29 * [7]: create NOPs - number of NOPs are set in lower bits 60 const u32 base = engine->mmio_base; in set_offsets() 78 if (GRAPHICS_VER(engine->i915) >= 11) in set_offsets() 95 } while (--count); in set_offsets() 99 /* Close the batch; used mainly by live_lrc_layout() */ in set_offsets() 101 if (GRAPHICS_VER(engine->i915) >= 11) in set_offsets() 690 GEM_BUG_ON(GRAPHICS_VER(engine->i915) >= 12 && in reg_offsets() 693 if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) { in reg_offsets() [all …]
|
H A D | gen8_engine_cs.h | 1 /* SPDX-License-Identifier: MIT */ 53 __gen8_emit_pipe_control(u32 *batch, u32 bit_group_0, in __gen8_emit_pipe_control() argument 56 memset(batch, 0, 6 * sizeof(u32)); in __gen8_emit_pipe_control() 58 batch[0] = GFX_OP_PIPE_CONTROL(6) | bit_group_0; in __gen8_emit_pipe_control() 59 batch[1] = bit_group_1; in __gen8_emit_pipe_control() 60 batch[2] = offset; in __gen8_emit_pipe_control() 62 return batch + 6; in __gen8_emit_pipe_control() 65 static inline u32 *gen8_emit_pipe_control(u32 *batch, in gen8_emit_pipe_control() argument 68 return __gen8_emit_pipe_control(batch, 0, bit_group_1, offset); in gen8_emit_pipe_control() 71 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 bit_group_0, in gen12_emit_pipe_control() argument [all …]
|
H A D | selftest_hangcheck.c | 1 // SPDX-License-Identifier: MIT 38 u32 *batch; member 47 h->gt = gt; in hang_init() 49 h->ctx = kernel_context(gt->i915, NULL); in hang_init() 50 if (IS_ERR(h->ctx)) in hang_init() 51 return PTR_ERR(h->ctx); in hang_init() 53 GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); in hang_init() 55 h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in hang_init() 56 if (IS_ERR(h->hws)) { in hang_init() 57 err = PTR_ERR(h->hws); in hang_init() [all …]
|
/openbmc/linux/arch/powerpc/include/asm/book3s/64/ |
H A D | tlbflush-hash.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 6 * TLB flushing for 64-bit hash-MMU CPUs 25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 31 struct ppc64_tlb_batch *batch; in arch_enter_lazy_mmu_mode() local 40 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() 41 batch->active = 1; in arch_enter_lazy_mmu_mode() 46 struct ppc64_tlb_batch *batch; in arch_leave_lazy_mmu_mode() local 50 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() 52 if (batch->index) in arch_leave_lazy_mmu_mode() 53 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode() [all …]
|
/openbmc/linux/include/linux/ |
H A D | pagevec.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 5 * In many places it is efficient to batch an operation up against multiple 20 * struct folio_batch - A collection of folios. 23 * operating on a set of folios. The order of folios in the batch may be 35 * folio_batch_init() - Initialise a batch of folios 36 * @fbatch: The folio batch. 42 fbatch->nr = 0; in folio_batch_init() 43 fbatch->percpu_pvec_drained = false; in folio_batch_init() 48 fbatch->nr = 0; in folio_batch_reinit() 53 return fbatch->nr; in folio_batch_count() [all …]
|
/openbmc/linux/drivers/net/ethernet/netronome/nfp/flower/ |
H A D | lag_conf.c | 1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 27 * struct nfp_flower_cmsg_lag_config - control message payload for LAG config 30 * @ttl: Time to live of packet - host always sets to 0xff 31 * @pkt_number: Config message packet number - increment for each message 32 * @batch_ver: Batch version of messages - increment for each batch of messages 34 * @group_inst: Group instance number - increment when group is reused 35 * @members: Array of 32-bit words listing all active group members 49 * struct nfp_fl_lag_group - list entry for each LAG group 76 /* Use this ID with zero members to ack a batch config */ 88 lag->pkt_num++; in nfp_fl_get_next_pkt_number() [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/gem/selftests/ |
H A D | igt_gem_utils.c | 2 * SPDX-License-Identifier: MIT 31 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); in igt_request_alloc() 48 const int ver = GRAPHICS_VER(vma->vm->i915); in igt_emit_store_dw() 55 obj = i915_gem_object_create_internal(vma->vm->i915, size); in igt_emit_store_dw() 65 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma)); in igt_emit_store_dw() 92 intel_gt_chipset_flush(vma->vm->gt); in igt_emit_store_dw() 94 vma = i915_vma_instance(obj, vma->vm, NULL); in igt_emit_store_dw() 116 struct i915_vma *batch; in igt_gpu_fill_dw() local 120 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); in igt_gpu_fill_dw() 123 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw() [all …]
|
H A D | i915_gem_client_blt.c | 1 // SPDX-License-Identifier: MIT 56 * F so we can use the Y-tile algorithm to get to that point. in linear_x_y_to_ftiled_pos() 104 struct i915_vma *batch; member 115 /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */ in fastblit_supports_x_tiling() 116 drm_WARN_ON(&i915->drm, gen < 9); in fastblit_supports_x_tiling() 129 /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */ in fast_blit_ok() 130 if (GRAPHICS_VER(buf->vma->vm->i915) < 9) in fast_blit_ok() 133 /* filter out platforms with unsupported X-tile support in fastblit */ in fast_blit_ok() 134 if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915)) in fast_blit_ok() 143 struct drm_i915_gem_object *batch) in prepare_blit() argument [all …]
|
/openbmc/linux/drivers/xen/ |
H A D | gntdev.c | 4 * Device for accessing (in user-space) pages that have been granted by other 7 * Copyright (c) 2006-2007, D G Murray. 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 #include <linux/dma-mapping.h> 49 #include "gntdev-common.h" 51 #include "gntdev-dmabuf.h" 57 MODULE_DESCRIPTION("User-space granted page access driver"); 72 /* ------------------------------------------------------------------ */ 86 list_for_each_entry(map, &priv->maps, next) in gntdev_print_maps() 88 map->index, map->count, in gntdev_print_maps() [all …]
|
/openbmc/linux/tools/testing/selftests/tc-testing/ |
H A D | tdc_multibatch.py | 2 # SPDX-License-Identifier: GPL-2.0 4 tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch 14 description='TC multiple batch file generator') 16 parser.add_argument("dir", help="where to put batch files") 18 "num_filters", type=int, help="how many lines per batch file") 19 parser.add_argument("num_files", type=int, help="how many batch files") 25 "-x", 26 "--file_prefix", 28 help="prefix for generated batch file names") 30 "-d", [all …]
|
/openbmc/linux/tools/testing/selftests/bpf/progs/ |
H A D | test_bpf_ma.c | 1 // SPDX-License-Identifier: GPL-2.0 28 char data[_size - sizeof(void *)]; \ 42 static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch, in batch_alloc_free() argument 49 for (i = 0; i < batch; i++) { in batch_alloc_free() 61 old = bpf_kptr_xchg(&value->data, new); in batch_alloc_free() 68 for (i = 0; i < batch; i++) { in batch_alloc_free() 75 old = bpf_kptr_xchg(&value->data, NULL); in batch_alloc_free() 84 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \ argument 85 batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx) 106 /* Alloc 128 8-bytes objects in batch to trigger refilling, in test_bpf_mem_alloc_free() [all …]
|
/openbmc/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_mob.c | 1 // SPDX-License-Identifier: GPL-2.0 OR MIT 4 * Copyright 2012-2023 VMware, Inc., Palo Alto, CA., USA 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 46 * struct vmw_mob - Structure containing page table and metadata for a 50 * @pt_level The indirection level of the page table. 0-2. 62 * struct vmw_otable - Guest Memory OBject table metadata 64 * @size: Size of the table (page-aligned). 103 * vmw_setup_otable_base - Issue an object table base setup command to 111 * This function returns -ENOMEM if it fails to reserve fifo space, 129 BUG_ON(otable->page_table != NULL); in vmw_setup_otable_base() [all …]
|
/openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | rx.c | 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 14 * and private mlx5e_xdp_buff fields fall into xdp_buff_xsk->cb in xsk_buff_to_mxbuf() 22 struct mlx5e_icosq *icosq = rq->icosq; in mlx5e_xsk_alloc_rx_mpwqe() 23 struct mlx5_wq_cyc *wq = &icosq->wq; in mlx5e_xsk_alloc_rx_mpwqe() 26 int batch, i; in mlx5e_xsk_alloc_rx_mpwqe() local 27 u32 offset; /* 17-bit value with MTT. */ in mlx5e_xsk_alloc_rx_mpwqe() 30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe() 34 xsk_buffs = (struct xdp_buff **)wi->alloc_units.xsk_buffs; in mlx5e_xsk_alloc_rx_mpwqe() 35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe() 36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe() [all …]
|
/openbmc/linux/tools/testing/selftests/rcutorture/bin/ |
H A D | kvm.sh | 2 # SPDX-License-Identifier: GPL-2.0+ 5 # by the relevant CFLIST file, but can be overridden by the --configs 6 # command-line argument. 17 T="`mktemp -d ${TMPDIR-/tmp}/kvm.sh.XXXXXX`" 18 trap 'rm -rf $T' 0 23 LANG=en_US.UTF-8; export LANG 55 ds=`date +%Y.%m.%d-%H.%M.%S` 56 jitter="-1" 63 echo " --allcpus" 64 echo " --bootargs kernel-boot-arguments" [all …]
|
/openbmc/linux/include/linux/platform_data/ |
H A D | cros_ec_sensorhub.h | 1 /* SPDX-License-Identifier: GPL-2.0 */ 19 * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information. 27 * typedef cros_ec_sensorhub_push_data_cb_t - Callback function to send datum 67 * struct cros_ec_sensors_ts_filter_state - Timestamp filetr state. 94 /* struct cros_ec_sensors_ts_batch_state - State of batch of a single sensor. 96 * Use to store information to batch data using median fileter information. 98 * @penul_ts: last but one batch timestamp (penultimate timestamp). 100 * when a batch shows up. 101 * @penul_len: last but one batch length. 102 * @last_ts: Last batch timestam. [all …]
|
/openbmc/linux/tools/testing/selftests/bpf/benchs/ |
H A D | run_bench_ringbufs.sh | 5 set -eufo pipefail 7 RUN_RB_BENCH="$RUN_BENCH -c1" 9 header "Single-producer, parallel producer" 10 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 14 header "Single-producer, parallel producer, sampled notification" 15 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 16 summarize $b "$($RUN_RB_BENCH --rb-sampled $b)" 19 header "Single-producer, back-to-back mode" 20 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 21 summarize $b "$($RUN_RB_BENCH --rb-b2b $b)" [all …]
|
/openbmc/linux/tools/virtio/ |
H A D | virtio_test.c | 1 // SPDX-License-Identifier: GPL-2.0 22 #define RANDOM_BATCH -1 49 static const struct vhost_vring_file no_backend = { .fd = -1 }, 55 struct vq_info *info = vq->priv; in vq_notify() 58 r = write(info->kick, &v, sizeof v); in vq_notify() 70 struct vhost_vring_state state = { .index = info->idx }; in vhost_vq_setup() 71 struct vhost_vring_file file = { .index = info->idx }; in vhost_vq_setup() 72 unsigned long long features = dev->vdev.features; in vhost_vq_setup() 74 .index = info->idx, in vhost_vq_setup() 75 .desc_user_addr = (uint64_t)(unsigned long)info->vring.desc, in vhost_vq_setup() [all …]
|
/openbmc/linux/Documentation/bpf/ |
H A D | bpf_prog_run.rst | 1 .. SPDX-License-Identifier: GPL-2.0 16 -------- 20 can be used to unit test BPF programs against user-supplied context objects, and 28 - ``BPF_PROG_TYPE_SOCKET_FILTER`` 29 - ``BPF_PROG_TYPE_SCHED_CLS`` 30 - ``BPF_PROG_TYPE_SCHED_ACT`` 31 - ``BPF_PROG_TYPE_XDP`` 32 - ``BPF_PROG_TYPE_SK_LOOKUP`` 33 - ``BPF_PROG_TYPE_CGROUP_SKB`` 34 - ``BPF_PROG_TYPE_LWT_IN`` [all …]
|