Home
last modified time | relevance | path

Searched full:ring (Results 1 – 25 of 2357) sorted by relevance

12345678910>>...95

/openbmc/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ring.c40 * Most engines on the GPU are fed via ring buffers. Ring
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
55 * @type: ring type for which to return the limit.
73 * amdgpu_ring_alloc - allocate space on the ring buffer
75 * @ring: amdgpu_ring structure holding ring information
76 * @ndw: number of dwords to allocate in the ring buffer
78 * Allocate @ndw dwords in the ring buffer (all asics).
81 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) in amdgpu_ring_alloc() argument
85 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; in amdgpu_ring_alloc()
[all …]
H A Djpeg_v1_0.c37 static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
42 static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_… in jpeg_v1_0_decode_ring_patch_wreg() argument
44 struct amdgpu_device *adev = ring->adev; in jpeg_v1_0_decode_ring_patch_wreg()
45ring->ring[(*ptr)++] = PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JRBC_EXTERNAL_REG_BASE), 0, 0, PACK… in jpeg_v1_0_decode_ring_patch_wreg()
48 ring->ring[(*ptr)++] = 0; in jpeg_v1_0_decode_ring_patch_wreg()
49 ring->ring[(*ptr)++] = PACKETJ((reg_offset >> 2), 0, 0, PACKETJ_TYPE0); in jpeg_v1_0_decode_ring_patch_wreg()
51 ring->ring[(*ptr)++] = reg_offset; in jpeg_v1_0_decode_ring_patch_wreg()
52 ring->ring[(*ptr)++] = PACKETJ(0, 0, 0, PACKETJ_TYPE0); in jpeg_v1_0_decode_ring_patch_wreg()
54 ring->ring[(*ptr)++] = val; in jpeg_v1_0_decode_ring_patch_wreg()
57 static void jpeg_v1_0_decode_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr) in jpeg_v1_0_decode_ring_set_patch_ring() argument
[all …]
H A Damdgpu_fence.c48 * are no longer in use by the associated ring on the GPU and
56 struct amdgpu_ring *ring; member
96 * @ring: ring the fence is associated with
101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq) in amdgpu_fence_write() argument
103 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_write()
112 * @ring: ring the fence is associated with
117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring) in amdgpu_fence_read() argument
119 struct amdgpu_fence_driver *drv = &ring->fence_drv; in amdgpu_fence_read()
131 * amdgpu_fence_emit - emit a fence on the requested ring
133 * @ring: ring the fence is associated with
[all …]
H A Damdgpu_ring_mux.c44 struct amdgpu_ring *ring) in amdgpu_ring_mux_sw_entry() argument
46 return ring->entry_index < mux->ring_entry_size ? in amdgpu_ring_mux_sw_entry()
47 &mux->ring_entry[ring->entry_index] : NULL; in amdgpu_ring_mux_sw_entry()
50 /* copy packages on sw ring range[begin, end) */
52 struct amdgpu_ring *ring, in amdgpu_ring_mux_copy_pkt_from_sw_ring() argument
58 start = s_start & ring->buf_mask; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
59 end = s_end & ring->buf_mask; in amdgpu_ring_mux_copy_pkt_from_sw_ring()
62 DRM_ERROR("no more data copied from sw ring\n"); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
66 amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start); in amdgpu_ring_mux_copy_pkt_from_sw_ring()
67 amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], in amdgpu_ring_mux_copy_pkt_from_sw_ring()
[all …]
H A Damdgpu_ring.h89 /* Direct submission to the ring buffer during init and reset. */
114 /* sync_seq is protected by ring emission lock */
128 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
129 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
130 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
132 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
133 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
140 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
142 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
144 bool amdgpu_fence_process(struct amdgpu_ring *ring);
[all …]
/openbmc/linux/include/linux/soc/ti/
H A Dk3-ringacc.h3 * K3 Ring Accelerator (RA) subsystem interface
18 * RA ring operational modes
20 * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access
43 * RA ring element's sizes in bytes.
60 * enum k3_ring_cfg - RA ring configuration structure
62 * @size: Ring size, number of elements
63 * @elm_size: Ring element size
64 * @mode: Ring operational mode
65 * @flags: Ring configuration flags. Possible values:
66 * @K3_RINGACC_RING_SHARED: when set allows to request the same ring
[all …]
/openbmc/linux/drivers/gpu/drm/radeon/
H A Dradeon_ring.c37 * Most engines on the GPU are fed via ring buffers. Ring
43 * pointers are equal, the ring is idle. When the host
44 * writes commands to the ring buffer, it increments the
48 static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
51 * radeon_ring_supports_scratch_reg - check if the ring supports
55 * @ring: radeon_ring structure holding ring information
57 * Check if a specific ring supports writing to scratch registers (all asics).
58 * Returns true if the ring supports writing to scratch regs, false if not.
61 struct radeon_ring *ring) in radeon_ring_supports_scratch_reg() argument
63 switch (ring->idx) { in radeon_ring_supports_scratch_reg()
[all …]
/openbmc/qemu/hw/net/rocker/
H A Drocker_desc.c2 * QEMU rocker switch emulation - Descriptor ring support
40 DescRing *ring; member
58 PCIDevice *dev = PCI_DEVICE(info->ring->r); in desc_get_buf()
74 PCIDevice *dev = PCI_DEVICE(info->ring->r); in desc_set_buf()
91 return info->ring; in desc_get_ring()
94 int desc_ring_index(DescRing *ring) in desc_ring_index() argument
96 return ring->index; in desc_ring_index()
99 static bool desc_ring_empty(DescRing *ring) in desc_ring_empty() argument
101 return ring->head == ring->tail; in desc_ring_empty()
104 bool desc_ring_set_base_addr(DescRing *ring, uint64_t base_addr) in desc_ring_set_base_addr() argument
[all …]
/openbmc/linux/drivers/soc/ti/
H A Dk3-ringacc.c3 * TI K3 NAVSS Ring Accelerator subsystem driver
31 * @db: Ring Doorbell Register
33 * @occ: Ring Occupancy Register
34 * @indx: Ring Current Index Register
35 * @hwocc: Ring Hardware Occupancy Register
36 * @hwindx: Ring Hardware Current Index Register
57 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
59 * @head_data: Ring Head Entry Data Registers
60 * @tail_data: Ring Tail Entry Data Registers
61 * @peek_head_data: Ring Peek Head Entry Data Regs
[all …]
/openbmc/linux/drivers/bus/mhi/ep/
H A Dring.c10 size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) in mhi_ep_ring_addr2offset() argument
12 return (ptr - ring->rbase) / sizeof(struct mhi_ring_element); in mhi_ep_ring_addr2offset()
15 static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) in mhi_ep_ring_num_elems() argument
19 memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64)); in mhi_ep_ring_num_elems()
24 void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) in mhi_ep_ring_inc_index() argument
26 ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size; in mhi_ep_ring_inc_index()
29 static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) in __mhi_ep_cache_ring() argument
31 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; in __mhi_ep_cache_ring()
37 /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ in __mhi_ep_cache_ring()
38 if (ring->type == RING_TYPE_ER) in __mhi_ep_cache_ring()
[all …]
/openbmc/linux/drivers/net/wireless/broadcom/b43legacy/
H A Ddma.c32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring, in op32_idx2desc() argument
38 *meta = &(ring->meta[slot]); in op32_idx2desc()
39 desc = ring->descbase; in op32_idx2desc()
45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring, in op32_fill_descriptor() argument
50 struct b43legacy_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
57 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
62 addr |= ring->dev->dma.translation; in op32_fill_descriptor()
63 ctl = (bufsize - ring->frameoffset) in op32_fill_descriptor()
65 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) in op32_poke_tx() argument
[all …]
/openbmc/linux/drivers/net/wireless/broadcom/b43/
H A Ddma.c72 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring, in op32_idx2desc() argument
78 *meta = &(ring->meta[slot]); in op32_idx2desc()
79 desc = ring->descbase; in op32_idx2desc()
85 static void op32_fill_descriptor(struct b43_dmaring *ring, in op32_fill_descriptor() argument
90 struct b43_dmadesc32 *descbase = ring->descbase; in op32_fill_descriptor()
97 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); in op32_fill_descriptor()
99 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); in op32_fill_descriptor()
100 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); in op32_fill_descriptor()
103 if (slot == ring->nr_slots - 1) in op32_fill_descriptor()
118 static void op32_poke_tx(struct b43_dmaring *ring, int slot) in op32_poke_tx() argument
[all …]
/openbmc/linux/drivers/net/ethernet/apm/xgene/
H A Dxgene_enet_ring2.c12 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_init() argument
14 u32 *ring_cfg = ring->state; in xgene_enet_ring_init()
15 u64 addr = ring->dma; in xgene_enet_ring_init()
17 if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) { in xgene_enet_ring_init()
18 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); in xgene_enet_ring_init()
27 ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize) in xgene_enet_ring_init()
34 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_type() argument
36 u32 *ring_cfg = ring->state; in xgene_enet_ring_set_type()
40 is_bufpool = xgene_enet_is_bufpool(ring->id); in xgene_enet_ring_set_type()
47 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) in xgene_enet_ring_set_recombbuf() argument
[all …]
/openbmc/linux/drivers/thunderbolt/
H A Dnhi.c28 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") argument
53 static int ring_interrupt_index(const struct tb_ring *ring) in ring_interrupt_index() argument
55 int bit = ring->hop; in ring_interrupt_index()
56 if (!ring->is_tx) in ring_interrupt_index()
57 bit += ring->nhi->hop_count; in ring_interrupt_index()
61 static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) in nhi_mask_interrupt() argument
66 val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring); in nhi_mask_interrupt()
67 iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring); in nhi_mask_interrupt()
69 iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); in nhi_mask_interrupt()
73 static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) in nhi_clear_interrupt() argument
[all …]
/openbmc/linux/drivers/crypto/inside-secure/
H A Dsafexcel_ring.c21 /* Actual command descriptor ring */ in safexcel_init_ring_descriptors()
32 /* Command descriptor shadow ring for storing additional token data */ in safexcel_init_ring_descriptors()
78 struct safexcel_desc_ring *ring, in safexcel_ring_next_cwptr() argument
82 void *ptr = ring->write; in safexcel_ring_next_cwptr()
85 *atoken = ring->shwrite; in safexcel_ring_next_cwptr()
87 if ((ring->write == ring->read - ring->offset) || in safexcel_ring_next_cwptr()
88 (ring->read == ring->base && ring->write == ring->base_end)) in safexcel_ring_next_cwptr()
91 if (ring->write == ring->base_end) { in safexcel_ring_next_cwptr()
92 ring->write = ring->base; in safexcel_ring_next_cwptr()
93 ring->shwrite = ring->shbase; in safexcel_ring_next_cwptr()
[all …]
/openbmc/linux/drivers/crypto/intel/qat/qat_common/
H A Dadf_transport.c40 static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring) in adf_reserve_ring() argument
43 if (bank->ring_mask & (1 << ring)) { in adf_reserve_ring()
47 bank->ring_mask |= (1 << ring); in adf_reserve_ring()
52 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring) in adf_unreserve_ring() argument
55 bank->ring_mask &= ~(1 << ring); in adf_unreserve_ring()
59 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) in adf_enable_ring_irq() argument
64 bank->irq_mask |= (1 << ring); in adf_enable_ring_irq()
72 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) in adf_disable_ring_irq() argument
77 bank->irq_mask &= ~(1 << ring); in adf_disable_ring_irq()
83 bool adf_ring_nearly_full(struct adf_etr_ring_data *ring) in adf_ring_nearly_full() argument
[all …]
/openbmc/linux/Documentation/devicetree/bindings/net/wireless/
H A Dqcom,ath11k.yaml105 - description: interrupt event for ring CE0
106 - description: interrupt event for ring CE1
107 - description: interrupt event for ring CE2
108 - description: interrupt event for ring CE3
109 - description: interrupt event for ring CE4
110 - description: interrupt event for ring CE5
111 - description: interrupt event for ring CE6
112 - description: interrupt event for ring CE7
113 - description: interrupt event for ring CE8
114 - description: interrupt event for ring CE9
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gt/
H A Dintel_ring.c19 unsigned int intel_ring_update_space(struct intel_ring *ring) in intel_ring_update_space() argument
23 space = __intel_ring_space(ring->head, ring->emit, ring->size); in intel_ring_update_space()
25 ring->space = space; in intel_ring_update_space()
29 void __intel_ring_pin(struct intel_ring *ring) in __intel_ring_pin() argument
31 GEM_BUG_ON(!atomic_read(&ring->pin_count)); in __intel_ring_pin()
32 atomic_inc(&ring->pin_count); in __intel_ring_pin()
35 int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww) in intel_ring_pin() argument
37 struct i915_vma *vma = ring->vma; in intel_ring_pin()
42 if (atomic_fetch_inc(&ring->pin_count)) in intel_ring_pin()
45 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ in intel_ring_pin()
[all …]
H A Dselftest_ring.c8 struct intel_ring *ring; in mock_ring() local
10 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); in mock_ring()
11 if (!ring) in mock_ring()
14 kref_init(&ring->ref); in mock_ring()
15 ring->size = sz; in mock_ring()
16 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz); in mock_ring()
17 ring->effective_size = sz; in mock_ring()
18 ring->vaddr = (void *)(ring + 1); in mock_ring()
19 atomic_set(&ring->pin_count, 1); in mock_ring()
21 intel_ring_update_space(ring); in mock_ring()
[all …]
/openbmc/linux/tools/perf/pmu-events/arch/x86/ivytown/
H A Duncore-io.json15 …s into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO …
24 …s into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO …
33 …s into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO …
51 …s into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO …
60 …s into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO …
69 …s into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO …
74 "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
78ring is being used at this ring stop. This includes when packets are passing by and when packets …
83 "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 0",
87ring is being used at this ring stop. This includes when packets are passing by and when packets …
[all …]
/openbmc/linux/drivers/net/ethernet/mellanox/mlx4/
H A Den_tx.c55 struct mlx4_en_tx_ring *ring; in mlx4_en_create_tx_ring() local
59 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); in mlx4_en_create_tx_ring()
60 if (!ring) { in mlx4_en_create_tx_ring()
61 en_err(priv, "Failed allocating TX ring\n"); in mlx4_en_create_tx_ring()
65 ring->size = size; in mlx4_en_create_tx_ring()
66 ring->size_mask = size - 1; in mlx4_en_create_tx_ring()
67 ring->sp_stride = stride; in mlx4_en_create_tx_ring()
68 ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS; in mlx4_en_create_tx_ring()
71 ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node); in mlx4_en_create_tx_ring()
72 if (!ring->tx_info) { in mlx4_en_create_tx_ring()
[all …]
/openbmc/linux/drivers/net/wireless/ath/ath12k/
H A Ddbring.c11 struct ath12k_dbring *ring, in ath12k_dbring_bufs_replenish() argument
23 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; in ath12k_dbring_bufs_replenish()
30 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); in ath12k_dbring_bufs_replenish()
31 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, in ath12k_dbring_bufs_replenish()
38 spin_lock_bh(&ring->idr_lock); in ath12k_dbring_bufs_replenish()
39 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp); in ath12k_dbring_bufs_replenish()
40 spin_unlock_bh(&ring->idr_lock); in ath12k_dbring_bufs_replenish()
64 spin_lock_bh(&ring->idr_lock); in ath12k_dbring_bufs_replenish()
65 idr_remove(&ring->bufs_idr, buf_id); in ath12k_dbring_bufs_replenish()
66 spin_unlock_bh(&ring->idr_lock); in ath12k_dbring_bufs_replenish()
[all …]
/openbmc/linux/drivers/net/wireless/ath/ath11k/
H A Ddbring.c40 struct ath11k_dbring *ring, in ath11k_dbring_bufs_replenish() argument
52 srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; in ath11k_dbring_bufs_replenish()
59 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); in ath11k_dbring_bufs_replenish()
60 ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz); in ath11k_dbring_bufs_replenish()
61 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, in ath11k_dbring_bufs_replenish()
68 spin_lock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish()
69 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC); in ath11k_dbring_bufs_replenish()
70 spin_unlock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish()
95 spin_lock_bh(&ring->idr_lock); in ath11k_dbring_bufs_replenish()
96 idr_remove(&ring->bufs_idr, buf_id); in ath11k_dbring_bufs_replenish()
[all …]
/openbmc/linux/drivers/gpu/drm/msm/
H A Dmsm_ringbuffer.c17 struct msm_fence_context *fctx = submit->ring->fctx; in msm_job_run()
35 /* TODO move submit path over to using a per-ring lock.. */ in msm_job_run()
61 struct msm_ringbuffer *ring; in msm_ringbuffer_new() local
69 ring = kzalloc(sizeof(*ring), GFP_KERNEL); in msm_ringbuffer_new()
70 if (!ring) { in msm_ringbuffer_new()
75 ring->gpu = gpu; in msm_ringbuffer_new()
76 ring->id = id; in msm_ringbuffer_new()
78 ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, in msm_ringbuffer_new()
80 gpu->aspace, &ring->bo, &ring->iova); in msm_ringbuffer_new()
82 if (IS_ERR(ring->start)) { in msm_ringbuffer_new()
[all …]
/openbmc/linux/net/rds/
H A Dib_ring.c66 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_init() argument
68 memset(ring, 0, sizeof(*ring)); in rds_ib_ring_init()
69 ring->w_nr = nr; in rds_ib_ring_init()
70 rdsdebug("ring %p nr %u\n", ring, ring->w_nr); in rds_ib_ring_init()
73 static inline u32 __rds_ib_ring_used(struct rds_ib_work_ring *ring) in __rds_ib_ring_used() argument
78 diff = ring->w_alloc_ctr - (u32) atomic_read(&ring->w_free_ctr); in __rds_ib_ring_used()
79 BUG_ON(diff > ring->w_nr); in __rds_ib_ring_used()
84 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr) in rds_ib_ring_resize() argument
88 BUG_ON(__rds_ib_ring_used(ring)); in rds_ib_ring_resize()
89 ring->w_nr = nr; in rds_ib_ring_resize()
[all …]

12345678910>>...95