/openbmc/qemu/pc-bios/ |
HD | openbios-ppc | ... n , @ (heathrow paddington heathrow Keylargo O [ q ' ? Z u ( > ; M $ Q j ... |
/openbmc/linux/drivers/media/v4l2-core/ |
H A D | videobuf-core.c | 50 #define CALL(q, f, arg...) \ argument 51 ((q->int_ops->f) ? q->int_ops->f(arg) : 0) 52 #define CALLPTR(q, f, arg...) \ argument 53 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) 55 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) in videobuf_alloc_vb() argument 59 BUG_ON(q->msize < sizeof(*vb)); in videobuf_alloc_vb() 61 if (!q->int_ops || !q->int_ops->alloc_vb) { in videobuf_alloc_vb() 66 vb = q->int_ops->alloc_vb(q->msize); in videobuf_alloc_vb() 76 static int state_neither_active_nor_queued(struct videobuf_queue *q, in state_neither_active_nor_queued() argument 82 spin_lock_irqsave(q->irqlock, flags); in state_neither_active_nor_queued() [all …]
|
/openbmc/linux/lib/crypto/ |
H A D | gf128mul.c | 56 #define gf128mul_dat(q) { \ argument 57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\ 58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\ 59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\ 60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\ 61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\ 62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\ 63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\ 64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\ 65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\ [all …]
|
/openbmc/linux/drivers/net/ethernet/fungible/funeth/ |
H A D | funeth_rx.c | 50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf) in cache_offer() argument 52 struct funeth_rx_cache *c = &q->cache; in cache_offer() 58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_offer() 67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb) in cache_get() argument 69 struct funeth_rx_cache *c = &q->cache; in cache_get() 77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr, in cache_get() 88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE, in cache_get() 98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb, in funeth_alloc_page() argument 103 if (cache_get(q, rb)) in funeth_alloc_page() 110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE, in funeth_alloc_page() [all …]
|
H A D | funeth_tx.c | 56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument 58 return (void *)q->hw_wb; in txq_end() 64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument 66 return txq_end(q) - p; in txq_to_end() 78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument 90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl() 93 if (txq_to_end(q, gle) == 0) { in fun_write_gl() 94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl() 107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument 132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx() [all …]
|
/openbmc/linux/net/sched/ |
H A D | sch_choke.c | 75 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument 77 return (q->tail - q->head) & q->tab_mask; in choke_len() 81 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument 83 return q->flags & TC_RED_ECN; in use_ecn() 87 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument 89 return q->flags & TC_RED_HARDDROP; in use_harddrop() 93 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument 96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes() 97 if (q->head == q->tail) in choke_zap_head_holes() 99 } while (q->tab[q->head] == NULL); in choke_zap_head_holes() [all …]
|
H A D | sch_netem.c | 209 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument 211 struct clgstate *clg = &q->clg; in loss_4state() 212 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state() 274 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument 276 struct clgstate *clg = &q->clg; in loss_gilb_ell() 277 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell() 296 static bool loss_event(struct netem_sched_data *q) in loss_event() argument 298 switch (q->loss_model) { in loss_event() 301 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event() 309 return loss_4state(q); in loss_event() [all …]
|
H A D | sch_sfq.c | 150 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument 153 return &q->slots[val].dep; in sfq_dep_head() 154 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head() 157 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument 160 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); in sfq_hash() 166 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local 173 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify() 176 fl = rcu_dereference_bh(q->filter_list); in sfq_classify() 178 return sfq_hash(q, skb) + 1; in sfq_classify() 194 if (TC_H_MIN(res.classid) <= q->divisor) in sfq_classify() [all …]
|
H A D | sch_sfb.c | 123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument 126 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen() 138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) in increment_qlen() argument 144 increment_one_qlen(sfbhash, 0, q); in increment_qlen() 148 increment_one_qlen(sfbhash, 1, q); in increment_qlen() 152 struct sfb_sched_data *q) in decrement_one_qlen() argument 155 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen() 167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument 173 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen() 177 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen() [all …]
|
H A D | sch_red.c | 55 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument 57 return q->flags & TC_RED_ECN; in red_use_ecn() 60 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument 62 return q->flags & TC_RED_HARDDROP; in red_use_harddrop() 65 static int red_use_nodrop(struct red_sched_data *q) in red_use_nodrop() argument 67 return q->flags & TC_RED_NODROP; in red_use_nodrop() 73 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local 74 struct Qdisc *child = q->qdisc; in red_enqueue() 78 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue() 79 &q->vars, in red_enqueue() [all …]
|
H A D | sch_fq_pie.c | 74 static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q, in fq_pie_hash() argument 77 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_pie_hash() 83 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_classify() local 90 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_pie_classify() 93 filter = rcu_dereference_bh(q->filter_list); in fq_pie_classify() 95 return fq_pie_hash(q, skb) + 1; in fq_pie_classify() 111 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_pie_classify() 132 struct fq_pie_sched_data *q = qdisc_priv(sch); in fq_pie_qdisc_enqueue() local 150 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 153 memory_limited = q->memory_usage > q->memory_limit + skb->truesize; in fq_pie_qdisc_enqueue() [all …]
|
H A D | sch_fq_codel.c | 70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument 73 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); in fq_codel_hash() 79 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local 86 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify() 89 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify() 91 return fq_codel_hash(q, skb) + 1; in fq_codel_classify() 107 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify() 140 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local 154 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop() 155 if (q->backlogs[i] > maxbacklog) { in fq_codel_drop() [all …]
|
/openbmc/linux/sound/core/seq/oss/ |
H A D | seq_oss_readq.c | 35 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local 37 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new() 38 if (!q) in snd_seq_oss_readq_new() 41 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new() 42 if (!q->q) { in snd_seq_oss_readq_new() 43 kfree(q); in snd_seq_oss_readq_new() 47 q->maxlen = maxlen; in snd_seq_oss_readq_new() 48 q->qlen = 0; in snd_seq_oss_readq_new() 49 q->head = q->tail = 0; in snd_seq_oss_readq_new() 50 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new() [all …]
|
/openbmc/linux/Documentation/networking/ |
H A D | tls-offload-layers.svg | 1 …q-0.609375 -0.359375 -1.203125 -0.359375q-0.546875 0 -0.96875 0.328125q-0.421875 0.328125 -0.60937…
|
/openbmc/qemu/hw/block/ |
H A D | cdrom.c | 43 uint8_t *q; in cdrom_read_toc() local 48 q = buf + 2; in cdrom_read_toc() 49 *q++ = 1; /* first session */ in cdrom_read_toc() 50 *q++ = 1; /* last session */ in cdrom_read_toc() 52 *q++ = 0; /* reserved */ in cdrom_read_toc() 53 *q++ = 0x14; /* ADR, control */ in cdrom_read_toc() 54 *q++ = 1; /* track number */ in cdrom_read_toc() 55 *q++ = 0; /* reserved */ in cdrom_read_toc() 57 *q++ = 0; /* reserved */ in cdrom_read_toc() 58 lba_to_msf(q, 0); in cdrom_read_toc() [all …]
|
/openbmc/linux/sound/core/seq/ |
H A D | seq_queue.c | 50 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument 58 queue_list[i] = q; in queue_list_add() 59 q->queue = i; in queue_list_add() 71 struct snd_seq_queue *q; in queue_list_remove() local 75 q = queue_list[id]; in queue_list_remove() 76 if (q) { in queue_list_remove() 77 spin_lock(&q->owner_lock); in queue_list_remove() 78 if (q->owner == client) { in queue_list_remove() 80 q->klocked = 1; in queue_list_remove() 81 spin_unlock(&q->owner_lock); in queue_list_remove() [all …]
|
/openbmc/linux/block/ |
H A D | blk-sysfs.c | 50 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 52 return queue_var_show(q->nr_requests, page); in queue_requests_show() 56 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 61 if (!queue_is_mq(q)) in queue_requests_store() 71 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store() 78 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument 82 if (!q->disk) in queue_ra_show() 84 ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); in queue_ra_show() 89 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument 94 if (!q in queue_ra_store() 103 queue_max_sectors_show(struct request_queue * q,char * page) queue_max_sectors_show() argument 110 queue_max_segments_show(struct request_queue * q,char * page) queue_max_segments_show() argument 115 queue_max_discard_segments_show(struct request_queue * q,char * page) queue_max_discard_segments_show() argument 121 queue_max_integrity_segments_show(struct request_queue * q,char * page) queue_max_integrity_segments_show() argument 126 queue_max_segment_size_show(struct request_queue * q,char * page) queue_max_segment_size_show() argument 131 queue_logical_block_size_show(struct request_queue * q,char * page) queue_logical_block_size_show() argument 136 queue_physical_block_size_show(struct request_queue * q,char * page) queue_physical_block_size_show() argument 141 queue_chunk_sectors_show(struct request_queue * q,char * page) queue_chunk_sectors_show() argument 146 queue_io_min_show(struct request_queue * q,char * page) queue_io_min_show() argument 151 queue_io_opt_show(struct request_queue * q,char * page) queue_io_opt_show() argument 156 queue_discard_granularity_show(struct request_queue * q,char * page) queue_discard_granularity_show() argument 161 queue_discard_max_hw_show(struct request_queue * q,char * page) queue_discard_max_hw_show() argument 168 queue_discard_max_show(struct request_queue * q,char * page) queue_discard_max_show() argument 174 queue_discard_max_store(struct request_queue * q,const char * page,size_t count) queue_discard_max_store() argument 197 queue_discard_zeroes_data_show(struct request_queue * q,char * page) queue_discard_zeroes_data_show() argument 202 queue_write_same_max_show(struct request_queue * q,char * page) queue_write_same_max_show() argument 207 queue_write_zeroes_max_show(struct request_queue * q,char * page) queue_write_zeroes_max_show() argument 213 queue_zone_write_granularity_show(struct request_queue * q,char * page) queue_zone_write_granularity_show() argument 219 queue_zone_append_max_show(struct request_queue * q,char * page) queue_zone_append_max_show() argument 227 queue_max_sectors_store(struct request_queue * q,const char * page,size_t count) queue_max_sectors_store() argument 261 queue_max_hw_sectors_show(struct request_queue * q,char * page) queue_max_hw_sectors_show() argument 268 queue_virt_boundary_mask_show(struct request_queue * q,char * page) queue_virt_boundary_mask_show() argument 273 queue_dma_alignment_show(struct request_queue * q,char * page) queue_dma_alignment_show() argument 310 queue_zoned_show(struct request_queue * q,char * page) queue_zoned_show() argument 322 queue_nr_zones_show(struct request_queue * q,char * page) queue_nr_zones_show() argument 327 queue_max_open_zones_show(struct request_queue * q,char * page) queue_max_open_zones_show() argument 332 queue_max_active_zones_show(struct request_queue * q,char * page) queue_max_active_zones_show() argument 337 queue_nomerges_show(struct request_queue * q,char * page) queue_nomerges_show() argument 343 queue_nomerges_store(struct request_queue * q,const char * page,size_t count) queue_nomerges_store() argument 362 queue_rq_affinity_show(struct request_queue * q,char * page) queue_rq_affinity_show() argument 371 queue_rq_affinity_store(struct request_queue * q,const char * page,size_t count) queue_rq_affinity_store() argument 395 queue_poll_delay_show(struct request_queue * q,char * page) queue_poll_delay_show() argument 400 queue_poll_delay_store(struct request_queue * q,const char * page,size_t count) queue_poll_delay_store() argument 406 queue_poll_show(struct request_queue * q,char * page) queue_poll_show() argument 411 queue_poll_store(struct request_queue * q,const char * page,size_t count) queue_poll_store() argument 421 queue_io_timeout_show(struct request_queue * q,char * page) queue_io_timeout_show() argument 426 queue_io_timeout_store(struct request_queue * q,const char * page,size_t count) queue_io_timeout_store() argument 441 queue_wc_show(struct request_queue * q,char * page) queue_wc_show() argument 449 queue_wc_store(struct request_queue * q,const char * page,size_t count) queue_wc_store() argument 466 queue_fua_show(struct request_queue * q,char * page) queue_fua_show() argument 471 queue_dax_show(struct request_queue * q,char * page) queue_dax_show() argument 560 queue_wb_lat_show(struct request_queue * q,char * page) queue_wb_lat_show() argument 571 queue_wb_lat_store(struct request_queue * q,const char * page,size_t count) queue_wb_lat_store() argument 677 struct request_queue *q = disk->queue; queue_attr_visible() local 691 struct request_queue *q = disk->queue; blk_mq_queue_attr_visible() local 719 struct request_queue *q = disk->queue; queue_attr_show() local 736 struct request_queue *q = disk->queue; queue_attr_store() local 772 struct request_queue *q = disk->queue; blk_debugfs_remove() local 789 struct request_queue *q = disk->queue; blk_register_queue() local 874 struct request_queue *q = disk->queue; blk_unregister_queue() local [all...] |
H A D | elevator.c | 62 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge() local 63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 86 static inline bool elv_support_features(struct request_queue *q, in elv_support_features() argument 89 return (q->required_elevator_features & e->elevator_features) == in elv_support_features() 90 q->required_elevator_features; in elv_support_features() 116 static struct elevator_type *elevator_find_get(struct request_queue *q, in elevator_find_get() argument 123 if (e && (!elv_support_features(q, e) || !elevator_tryget(e))) in elevator_find_get() 131 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument 136 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc() [all …]
|
/openbmc/linux/net/xdp/ |
H A D | xsk_queue.h | 120 static inline void __xskq_cons_read_addr_unchecked(struct xsk_queue *q, u32 cached_cons, u64 *addr) in __xskq_cons_read_addr_unchecked() argument 122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked() 123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked() 128 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_read_addr_unchecked() argument 130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked() 131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked() 189 static inline bool xskq_has_descs(struct xsk_queue *q) in xskq_has_descs() argument 191 return q->cached_cons != q->cached_prod; in xskq_has_descs() 194 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument 199 q->invalid_descs++; in xskq_cons_is_valid_desc() [all …]
|
/openbmc/linux/drivers/media/common/videobuf2/ |
H A D | videobuf2-core.c | 37 #define dprintk(q, level, fmt, arg...) \ argument 40 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 93 #define log_qop(q, op) \ argument 94 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 95 (q)->ops->op ? "" : " (nop)") 97 #define call_qop(q, op, args...) \ argument 101 log_qop(q, op); \ 102 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 104 (q)->cnt_ ## op++; \ 108 #define call_void_qop(q, op, args...) \ argument [all …]
|
/openbmc/linux/drivers/net/wireless/mediatek/mt76/ |
H A D | dma.c | 184 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument 186 Q_WRITE(dev, q, desc_base, q->desc_dma); in mt76_dma_sync_idx() 187 Q_WRITE(dev, q, ring_size, q->ndesc); in mt76_dma_sync_idx() 188 q->head = Q_READ(dev, q, dma_idx); in mt76_dma_sync_idx() 189 q->tail = q->head; in mt76_dma_sync_idx() 193 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument 197 if (!q || !q->ndesc) in mt76_dma_queue_reset() 201 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset() 202 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset() 204 Q_WRITE(dev, q, cpu_idx, 0); in mt76_dma_queue_reset() [all …]
|
/openbmc/linux/drivers/spi/ |
H A D | spi-fsl-qspi.c | 277 static inline int needs_swap_endian(struct fsl_qspi *q) in needs_swap_endian() argument 279 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; in needs_swap_endian() 282 static inline int needs_4x_clock(struct fsl_qspi *q) in needs_4x_clock() argument 284 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; in needs_4x_clock() 287 static inline int needs_fill_txfifo(struct fsl_qspi *q) in needs_fill_txfifo() argument 289 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; in needs_fill_txfifo() 292 static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) in needs_wakeup_wait_mode() argument 294 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; in needs_wakeup_wait_mode() 297 static inline int needs_amba_base_offset(struct fsl_qspi *q) in needs_amba_base_offset() argument 299 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); in needs_amba_base_offset() [all …]
|
/openbmc/linux/drivers/net/wireless/broadcom/b43/ |
H A D | pio.c | 24 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument 37 cookie = (((u16)q->index + 1) << 12); in generate_cookie() 49 struct b43_pio_txqueue *q = NULL; in parse_cookie() local 54 q = pio->tx_queue_AC_BK; in parse_cookie() 57 q = pio->tx_queue_AC_BE; in parse_cookie() 60 q = pio->tx_queue_AC_VI; in parse_cookie() 63 q = pio->tx_queue_AC_VO; in parse_cookie() 66 q = pio->tx_queue_mcast; in parse_cookie() 69 if (B43_WARN_ON(!q)) in parse_cookie() 72 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie() [all …]
|
/openbmc/openbmc/meta-security/dynamic-layers/meta-perl/recipes-security/bastille/files/ |
H A D | config | 1 # Q: Would you like to enforce password aging? [Y] 3 # Q: Should Bastille disable clear-text r-protocols that use IP-based authentication? [Y] 5 # Q: Should we disallow root login on tty's 1-6? [N] 7 # Q: What umask would you like to set for users on the system? [077] 9 # Q: Do you want to set the default umask? [Y] 11 # Q: Would you like to deactivate the Apache web server? [Y] 13 # Q: Would you like to password protect single-user mode? [Y] 15 # Q: Should we restrict console access to a small group of user accounts? [N] 17 # Q: Which accounts should be able to login at console? [root] 19 # Q: Would you like to put limits on system resource usage? [N] [all …]
|
/openbmc/linux/drivers/infiniband/sw/rxe/ |
H A D | rxe_queue.c | 46 inline void rxe_queue_reset(struct rxe_queue *q) in rxe_queue_reset() argument 52 memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); in rxe_queue_reset() 58 struct rxe_queue *q; in rxe_queue_init() local 66 q = kzalloc(sizeof(*q), GFP_KERNEL); in rxe_queue_init() 67 if (!q) in rxe_queue_init() 70 q->rxe = rxe; in rxe_queue_init() 71 q->type = type; in rxe_queue_init() 74 q->elem_size = elem_size; in rxe_queue_init() 81 q->log2_elem_size = order_base_2(elem_size); in rxe_queue_init() 85 q->index_mask = num_slots - 1; in rxe_queue_init() [all …]
|