Home
last modified time | relevance | path

Searched refs:segs (Results 1 – 25 of 153) sorted by relevance

1234567

/openbmc/linux/drivers/net/ethernet/intel/ice/
H A Dice_flow.c627 static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) in ice_flow_val_hdrs() argument
633 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && in ice_flow_val_hdrs()
634 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) in ice_flow_val_hdrs()
638 if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && in ice_flow_val_hdrs()
639 !is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) in ice_flow_val_hdrs()
667 sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ? in ice_flow_calc_seg_sz()
671 if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) in ice_flow_calc_seg_sz()
673 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6) in ice_flow_calc_seg_sz()
675 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP) in ice_flow_calc_seg_sz()
677 else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK) in ice_flow_calc_seg_sz()
[all …]
/openbmc/linux/net/core/
H A Dgso.c16 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); in skb_eth_gso_segment() local
22 segs = ptype->callbacks.gso_segment(skb, features); in skb_eth_gso_segment()
28 return segs; in skb_eth_gso_segment()
40 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); in skb_mac_gso_segment() local
53 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
61 return segs; in skb_mac_gso_segment()
91 struct sk_buff *segs; in __skb_gso_segment() local
124 segs = skb_mac_gso_segment(skb, features); in __skb_gso_segment()
126 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) in __skb_gso_segment()
129 return segs; in __skb_gso_segment()
/openbmc/qemu/linux-user/
H A Dvm86.c69 target_v86->regs.cs = tswap16(env->segs[R_CS].selector); in save_v86_state()
70 target_v86->regs.ss = tswap16(env->segs[R_SS].selector); in save_v86_state()
71 target_v86->regs.ds = tswap16(env->segs[R_DS].selector); in save_v86_state()
72 target_v86->regs.es = tswap16(env->segs[R_ES].selector); in save_v86_state()
73 target_v86->regs.fs = tswap16(env->segs[R_FS].selector); in save_v86_state()
74 target_v86->regs.gs = tswap16(env->segs[R_GS].selector); in save_v86_state()
79 env->eflags, env->segs[R_CS].selector, env->eip); in save_v86_state()
152 if (env->segs[R_CS].selector == TARGET_BIOSSEG) in do_int()
166 ssp = env->segs[R_SS].selector << 4; in do_int()
169 vm_putw(env, ssp, sp - 4, env->segs[R_CS].selector); in do_int()
[all …]
/openbmc/linux/net/sctp/
H A Doffload.c42 struct sk_buff *segs = ERR_PTR(-EINVAL); in sctp_gso_segment() local
68 segs = NULL; in sctp_gso_segment()
72 segs = skb_segment(skb, (features | NETIF_F_HW_CSUM) & ~NETIF_F_SG); in sctp_gso_segment()
73 if (IS_ERR(segs)) in sctp_gso_segment()
78 for (skb = segs; skb; skb = skb->next) { in sctp_gso_segment()
87 return segs; in sctp_gso_segment()
/openbmc/qemu/bsd-user/i386/
H A Dtarget_arch_reg.h57 regs->r_fs = env->segs[R_FS].selector & 0xffff; in target_copy_regs()
58 regs->r_es = env->segs[R_ES].selector & 0xffff; in target_copy_regs()
59 regs->r_ds = env->segs[R_DS].selector & 0xffff; in target_copy_regs()
73 regs->r_cs = env->segs[R_CS].selector & 0xffff; in target_copy_regs()
78 regs->r_ss = env->segs[R_SS].selector & 0xffff; in target_copy_regs()
79 regs->r_gs = env->segs[R_GS].selector & 0xffff; in target_copy_regs()
/openbmc/qemu/target/i386/
H A Darch_dump.c74 regs.cs = env->segs[R_CS].selector; in x86_64_write_elf64_note()
75 regs.ss = env->segs[R_SS].selector; in x86_64_write_elf64_note()
76 regs.fs_base = env->segs[R_FS].base; in x86_64_write_elf64_note()
77 regs.gs_base = env->segs[R_GS].base; in x86_64_write_elf64_note()
78 regs.ds = env->segs[R_DS].selector; in x86_64_write_elf64_note()
79 regs.es = env->segs[R_ES].selector; in x86_64_write_elf64_note()
80 regs.fs = env->segs[R_FS].selector; in x86_64_write_elf64_note()
81 regs.gs = env->segs[R_GS].selector; in x86_64_write_elf64_note()
140 prstatus->regs.cs = env->segs[R_CS].selector; in x86_fill_elf_prstatus()
141 prstatus->regs.ss = env->segs[R_SS].selector; in x86_fill_elf_prstatus()
[all …]
H A Dgdbstub.c163 return gdb_get_reg32(mem_buf, env->segs[R_CS].selector); in x86_cpu_gdb_read_register()
165 return gdb_get_reg32(mem_buf, env->segs[R_SS].selector); in x86_cpu_gdb_read_register()
167 return gdb_get_reg32(mem_buf, env->segs[R_DS].selector); in x86_cpu_gdb_read_register()
169 return gdb_get_reg32(mem_buf, env->segs[R_ES].selector); in x86_cpu_gdb_read_register()
171 return gdb_get_reg32(mem_buf, env->segs[R_FS].selector); in x86_cpu_gdb_read_register()
173 return gdb_get_reg32(mem_buf, env->segs[R_GS].selector); in x86_cpu_gdb_read_register()
175 return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_FS].base); in x86_cpu_gdb_read_register()
177 return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_GS].base); in x86_cpu_gdb_read_register()
236 if (selector != env->segs[sreg].selector) { in x86_cpu_gdb_load_seg()
334 return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_FS].base); in x86_cpu_gdb_write_register()
[all …]
H A Dmachine.c242 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { in cpu_pre_save()
243 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); in cpu_pre_save()
244 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); in cpu_pre_save()
245 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK); in cpu_pre_save()
246 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK); in cpu_pre_save()
247 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK); in cpu_pre_save()
248 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK); in cpu_pre_save()
340 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) { in cpu_post_load()
341 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK); in cpu_post_load()
342 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK); in cpu_post_load()
[all …]
/openbmc/linux/net/ipv4/
H A Dudp_offload.c24 struct sk_buff *segs = ERR_PTR(-EINVAL); in __skb_udp_tunnel_segment() local
87 segs = gso_inner_segment(skb, features); in __skb_udp_tunnel_segment()
88 if (IS_ERR_OR_NULL(segs)) { in __skb_udp_tunnel_segment()
94 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in __skb_udp_tunnel_segment()
98 skb = segs; in __skb_udp_tunnel_segment()
150 return segs; in __skb_udp_tunnel_segment()
160 struct sk_buff *segs = ERR_PTR(-EINVAL); in skb_udp_tunnel_segment() local
182 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, in skb_udp_tunnel_segment()
188 return segs; in skb_udp_tunnel_segment()
219 static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) in __udpv4_gso_segment_list_csum() argument
[all …]
H A Dtcp_offload.c59 struct sk_buff *segs = ERR_PTR(-EINVAL); in tcp_gso_segment() local
94 segs = NULL; in tcp_gso_segment()
103 segs = skb_segment(skb, features); in tcp_gso_segment()
104 if (IS_ERR(segs)) in tcp_gso_segment()
108 segs->ooo_okay = ooo_okay; in tcp_gso_segment()
114 if (skb_is_gso(segs)) in tcp_gso_segment()
115 mss *= skb_shinfo(segs)->gso_segs; in tcp_gso_segment()
119 skb = segs; in tcp_gso_segment()
124 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment()
181 return segs; in tcp_gso_segment()
H A Dgre_offload.c21 struct sk_buff *segs = ERR_PTR(-EINVAL); in gre_gso_segment() local
58 segs = skb_mac_gso_segment(skb, features); in gre_gso_segment()
59 if (IS_ERR_OR_NULL(segs)) { in gre_gso_segment()
65 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in gre_gso_segment()
69 skb = segs; in gre_gso_segment()
119 return segs; in gre_gso_segment()
/openbmc/qemu/bsd-user/x86_64/
H A Dtarget_arch_reg.h80 regs->r_fs = env->segs[R_FS].selector & 0xffff; in target_copy_regs()
81 regs->r_gs = env->segs[R_GS].selector & 0xffff; in target_copy_regs()
83 regs->r_es = env->segs[R_ES].selector & 0xffff; in target_copy_regs()
84 regs->r_ds = env->segs[R_DS].selector & 0xffff; in target_copy_regs()
86 regs->r_cs = env->segs[R_CS].selector & 0xffff; in target_copy_regs()
89 regs->r_ss = env->segs[R_SS].selector & 0xffff; in target_copy_regs()
/openbmc/qemu/target/i386/nvmm/
H A Dnvmm-all.c118 nvmm_set_segment(&state->segs[NVMM_X64_SEG_CS], &env->segs[R_CS]); in nvmm_set_registers()
119 nvmm_set_segment(&state->segs[NVMM_X64_SEG_DS], &env->segs[R_DS]); in nvmm_set_registers()
120 nvmm_set_segment(&state->segs[NVMM_X64_SEG_ES], &env->segs[R_ES]); in nvmm_set_registers()
121 nvmm_set_segment(&state->segs[NVMM_X64_SEG_FS], &env->segs[R_FS]); in nvmm_set_registers()
122 nvmm_set_segment(&state->segs[NVMM_X64_SEG_GS], &env->segs[R_GS]); in nvmm_set_registers()
123 nvmm_set_segment(&state->segs[NVMM_X64_SEG_SS], &env->segs[R_SS]); in nvmm_set_registers()
126 nvmm_set_segment(&state->segs[NVMM_X64_SEG_GDT], &env->gdt); in nvmm_set_registers()
127 nvmm_set_segment(&state->segs[NVMM_X64_SEG_LDT], &env->ldt); in nvmm_set_registers()
128 nvmm_set_segment(&state->segs[NVMM_X64_SEG_TR], &env->tr); in nvmm_set_registers()
129 nvmm_set_segment(&state->segs[NVMM_X64_SEG_IDT], &env->idt); in nvmm_set_registers()
[all …]
/openbmc/linux/net/mpls/
H A Dmpls_gso.c23 struct sk_buff *segs = ERR_PTR(-EINVAL); in mpls_gso_segment() local
51 segs = skb_mac_gso_segment(skb, mpls_features); in mpls_gso_segment()
52 if (IS_ERR_OR_NULL(segs)) { in mpls_gso_segment()
57 skb = segs; in mpls_gso_segment()
73 return segs; in mpls_gso_segment()
/openbmc/qemu/target/i386/tcg/
H A Dseg_helper.c409 env->segs[i].selector); in switch_tss_ra()
425 env->segs[i].selector); in switch_tss_ra()
574 if (new_eip > env->segs[R_CS].limit) { in switch_tss_ra()
710 if (env->segs[R_SS].flags & DESC_B_MASK) { in do_interrupt_protected()
716 sa.ss_base = env->segs[R_SS].base; in do_interrupt_protected()
791 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags); in do_interrupt_protected()
792 sa.ss_base = env->segs[R_SS].base; in do_interrupt_protected()
817 pushl(&sa, env->segs[R_GS].selector); in do_interrupt_protected()
818 pushl(&sa, env->segs[R_FS].selector); in do_interrupt_protected()
819 pushl(&sa, env->segs[R_DS].selector); in do_interrupt_protected()
[all …]
/openbmc/qemu/dump/
H A Dwin_dump.c310 .SegEs = env->segs[0].selector, in patch_and_save_context()
311 .SegCs = env->segs[1].selector, in patch_and_save_context()
312 .SegSs = env->segs[2].selector, in patch_and_save_context()
313 .SegDs = env->segs[3].selector, in patch_and_save_context()
314 .SegFs = env->segs[4].selector, in patch_and_save_context()
315 .SegGs = env->segs[5].selector, in patch_and_save_context()
351 .SegEs = env->segs[0].selector, in patch_and_save_context()
352 .SegCs = env->segs[1].selector, in patch_and_save_context()
353 .SegSs = env->segs[2].selector, in patch_and_save_context()
354 .SegDs = env->segs[3].selector, in patch_and_save_context()
[all …]
/openbmc/linux/net/nsh/
H A Dnsh.c81 struct sk_buff *segs = ERR_PTR(-EINVAL); in nsh_gso_segment() local
110 segs = skb_mac_gso_segment(skb, features); in nsh_gso_segment()
111 if (IS_ERR_OR_NULL(segs)) { in nsh_gso_segment()
117 for (skb = segs; skb; skb = skb->next) { in nsh_gso_segment()
126 return segs; in nsh_gso_segment()
/openbmc/linux/tools/lib/bpf/
H A Dusdt.c400 static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt) in parse_elf_segs() argument
427 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); in parse_elf_segs()
431 *segs = tmp; in parse_elf_segs()
432 seg = *segs + *seg_cnt; in parse_elf_segs()
446 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); in parse_elf_segs()
450 static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt) in parse_vma_segs() argument
510 tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs)); in parse_vma_segs()
516 *segs = tmp; in parse_vma_segs()
517 seg = *segs + *seg_cnt; in parse_vma_segs()
533 qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs); in parse_vma_segs()
[all …]
/openbmc/qemu/target/i386/hvf/
H A Dx86hvf.c104 hvf_set_segment(cs, &seg, &env->segs[R_CS], false); in hvf_put_segments()
107 hvf_set_segment(cs, &seg, &env->segs[R_DS], false); in hvf_put_segments()
110 hvf_set_segment(cs, &seg, &env->segs[R_ES], false); in hvf_put_segments()
113 hvf_set_segment(cs, &seg, &env->segs[R_SS], false); in hvf_put_segments()
116 hvf_set_segment(cs, &seg, &env->segs[R_FS], false); in hvf_put_segments()
119 hvf_set_segment(cs, &seg, &env->segs[R_GS], false); in hvf_put_segments()
149 hv_vcpu_write_msr(cs->accel->fd, MSR_GSBASE, env->segs[R_GS].base); in hvf_put_msrs()
150 hv_vcpu_write_msr(cs->accel->fd, MSR_FSBASE, env->segs[R_FS].base); in hvf_put_msrs()
175 hvf_get_segment(&env->segs[R_CS], &seg); in hvf_get_segments()
178 hvf_get_segment(&env->segs[R_DS], &seg); in hvf_get_segments()
[all …]
/openbmc/linux/net/ipv6/
H A Dip6_offload.c112 struct sk_buff *segs = ERR_PTR(-EINVAL); in ipv6_gso_segment() local
139 segs = ERR_PTR(-EPROTONOSUPPORT); in ipv6_gso_segment()
154 segs = ops->callbacks.gso_segment(skb, features); in ipv6_gso_segment()
155 if (!segs) in ipv6_gso_segment()
159 if (IS_ERR_OR_NULL(segs)) in ipv6_gso_segment()
162 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); in ipv6_gso_segment()
164 for (skb = segs; skb; skb = skb->next) { in ipv6_gso_segment()
179 kfree_skb_list(segs); in ipv6_gso_segment()
194 return segs; in ipv6_gso_segment()
/openbmc/linux/drivers/infiniband/sw/rdmavt/
H A Dmr.c379 mr->mr.map[m]->segs[n].vaddr = vaddr; in rvt_reg_user_mr()
380 mr->mr.map[m]->segs[n].length = PAGE_SIZE; in rvt_reg_user_mr()
569 mr->mr.map[m]->segs[n].vaddr = (void *)addr; in rvt_set_page()
570 mr->mr.map[m]->segs[n].length = ps; in rvt_set_page()
599 mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; in rvt_map_mr_sg()
787 while (off >= mr->map[m]->segs[n].length) { in rvt_lkey_ok()
788 off -= mr->map[m]->segs[n].length; in rvt_lkey_ok()
797 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in rvt_lkey_ok()
798 isge->length = mr->map[m]->segs[n].length - off; in rvt_lkey_ok()
894 while (off >= mr->map[m]->segs[n].length) { in rvt_rkey_ok()
[all …]
/openbmc/linux/net/sched/
H A Dsch_tbf.c209 struct sk_buff *segs, *nskb; in tbf_segment() local
214 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in tbf_segment()
216 if (IS_ERR_OR_NULL(segs)) in tbf_segment()
220 skb_list_walk_safe(segs, segs, nskb) { in tbf_segment()
221 skb_mark_not_on_list(segs); in tbf_segment()
222 seg_len = segs->len; in tbf_segment()
223 qdisc_skb_cb(segs)->pkt_len = seg_len; in tbf_segment()
224 ret = qdisc_enqueue(segs, q->qdisc, to_free); in tbf_segment()
/openbmc/qemu/contrib/elf2dmp/
H A Dpdb.c41 for (ptr = r->segs; (ptr < r->segs + r->segs_size); ) { in get_seg_by_num()
109 g_free(r->segs); in pdb_exit_segments()
166 r->segs = pdb_ds_read_file(r, stream_idx); in pdb_init_segments()
167 if (!r->segs) { in pdb_init_segments()
/openbmc/linux/include/uapi/linux/
H A Delf-fdpic.h30 struct elf32_fdpic_loadseg segs[]; member
45 struct elf64_fdpic_loadseg segs[]; member
/openbmc/linux/include/linux/
H A Dblk-integrity.h69 unsigned int segs) in blk_queue_max_integrity_segments() argument
71 q->limits.max_integrity_segments = segs; in blk_queue_max_integrity_segments()
153 unsigned int segs) in blk_queue_max_integrity_segments() argument

1234567