/openbmc/linux/arch/x86/include/asm/ |
H A D | xor_avx.h | 29 static void xor_avx_2(unsigned long bytes, unsigned long * __restrict p0, in xor_avx_2() argument 42 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_2() 44 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_2() 49 p0 = (unsigned long *)((uintptr_t)p0 + 512); in xor_avx_2() 56 static void xor_avx_3(unsigned long bytes, unsigned long * __restrict p0, in xor_avx_3() argument 72 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_3() 74 "=m" (p0[i / sizeof(*p0)])); \ in xor_avx_3() 79 p0 = (unsigned long *)((uintptr_t)p0 + 512); in xor_avx_3() 87 static void xor_avx_4(unsigned long bytes, unsigned long * __restrict p0, in xor_avx_4() argument 106 "m" (p0[i / sizeof(*p0)])); \ in xor_avx_4() [all …]
|
/openbmc/linux/arch/hexagon/mm/ |
H A D | copy_user_template.S | 19 p0 = cmp.gtu(bytes,#0) define 20 if (!p0.new) jump:nt .Ldone 26 p0 = bitsclr(r3,#7) define 27 if (!p0.new) jump:nt .Loop_not_aligned_8 52 p0 = bitsclr(r4,#7) define 53 if (p0.new) jump:nt .Lalign 56 p0 = bitsclr(r3,#3) define 57 if (!p0.new) jump:nt .Loop_not_aligned_4 82 p0 = bitsclr(r3,#1) define 83 if (!p0.new) jump:nt .Loop_not_aligned [all …]
|
/openbmc/linux/arch/hexagon/lib/ |
H A D | memset.S | 29 p0 = cmp.eq(r2, #0) define 36 if p0 jumpr r31 /* count == 0, so return */ 41 p0 = tstbit(r9, #0) define 58 p0 = tstbit(r9, #1) define 60 if !p0 jump 3f /* skip initial byte store */ 71 p0 = tstbit(r9, #2) define 73 if !p0 jump 4f /* skip initial half store */ 84 p0 = cmp.gtu(r2, #7) define 86 if !p0 jump 5f /* skip initial word store */ 91 p0 = cmp.gtu(r2, #11) define [all …]
|
H A D | memcpy_likely_aligned.S | 10 p0 = bitsclr(r1,#7) define 11 p0 = bitsclr(r0,#7) define 12 if (p0.new) r5:4 = memd(r1) 13 if (p0.new) r7:6 = memd(r1+#8) 16 if (!p0) jump:nt .Lmemcpy_call 17 if (p0) r9:8 = memd(r1+#16) 18 if (p0) r11:10 = memd(r1+#24) 19 p0 = cmp.gtu(r2,#64) define 22 if (p0) jump:nt .Lmemcpy_call 23 if (!p0) memd(r0) = r5:4 [all …]
|
H A D | divsi3.S | 10 p0 = cmp.gt(r0,#-1) define 15 p3 = xor(p0,p1) 18 p0 = cmp.gtu(r3,r2) define 26 r0 = mux(p0,#0,r0) 27 p0 = or(p0,p1) define 28 if (p0.new) jumpr:nt r31 35 p0 = cmp.gtu(r6,#4) define 39 if (!p0) r6 = #3 50 if (!p0.new) r0 = add(r0,r5) 51 if (!p0.new) r2 = sub(r2,r4) [all …]
|
H A D | udivsi3.S | 13 p0 = cmp.gtu(r1,r0) define 19 if (p0) jumpr r31 28 p0 = cmp.gtu(r2,r1) define 29 if (!p0.new) r1 = sub(r1,r2) 30 if (!p0.new) r0 = add(r0,r3) 34 p0 = cmp.gtu(r2,r1) define 35 if (!p0.new) r0 = add(r0,r3)
|
H A D | umodsi3.S | 12 p0 = cmp.gtu(r1,r0) define 16 if (p0) jumpr r31 26 p0 = cmp.gtu(r2,r0) define 27 if (!p0.new) r0 = sub(r0,r2) 32 p0 = cmp.gtu(r2,r0) define 33 if (!p0.new) r0 = sub(r0,r1)
|
H A D | modsi3.S | 17 p0 = cmp.gtu(r1,r2) define 21 if (p0) jumpr r31 32 p0 = cmp.gtu(r2,r0) define 33 if (!p0.new) r0 = sub(r0,r2) 38 p0 = cmp.gtu(r2,r0) define 39 if (!p0.new) r0 = sub(r0,r1)
|
/openbmc/qemu/tests/tcg/hexagon/ |
H A D | hvx_misc.c | 30 void *p0 = buffer0; in test_load_tmp() local 49 : : "r"(p0), "r"(p1), "r"(pout) in test_load_tmp() 51 p0 += sizeof(MMVector); in test_load_tmp() 95 void *p0 = buffer0; in test_load_cur() local 103 : : "r"(p0), "r"(pout) : "v2", "memory"); in test_load_cur() 104 p0 += sizeof(MMVector); in test_load_cur() 118 void *p0 = buffer0; in test_load_aligned() local 122 p0 += offset; /* Create an unaligned address */ in test_load_aligned() 125 : : "r"(p0), "r"(pout) : "v2", "memory"); in test_load_aligned() 134 void *p0 = buffer0; in test_load_unaligned() local [all …]
|
H A D | v69_hvx.c | 43 void *p0 = buffer0; in test_vasrvuhubrndsat() local 56 : : "r"(p0), "r"(p1), "r"(pout) in test_vasrvuhubrndsat() 58 p0 += sizeof(MMVector) * 2; in test_vasrvuhubrndsat() 80 void *p0 = buffer0; in test_vasrvuhubsat() local 93 : : "r"(p0), "r"(p1), "r"(pout) in test_vasrvuhubsat() 95 p0 += sizeof(MMVector) * 2; in test_vasrvuhubsat() 117 void *p0 = buffer0; in test_vasrvwuhrndsat() local 130 : : "r"(p0), "r"(p1), "r"(pout) in test_vasrvwuhrndsat() 132 p0 += sizeof(MMVector) * 2; in test_vasrvwuhrndsat() 154 void *p0 = buffer0; in test_vasrvwuhsat() local [all …]
|
H A D | test_fibonacci.S | 11 p0 = cmp.gt(r2, #0); if (!p0.new) jump:nt .LBB0_3 define 22 p0 = cmp.gt(r2, r5); if (p0.new) jump:nt .LBB0_2 define 28 p0 = cmp.eq(r3, #144); if (p0.new) jump:t pass define
|
H A D | test_bitcnt.S | 20 p0 = cmp.eq(r2, #23); if (p0.new) jump:t test2 define 29 p0 = cmp.eq(r2, #55); if (p0.new) jump:t test3 define 38 p0 = cmp.eq(r2, #1); if (p0.new) jump:t pass define
|
/openbmc/linux/drivers/gpu/drm/omapdrm/ |
H A D | tcm.h | 52 struct tcm_pt p0; member 228 slice->p0.y != slice->p1.y && in tcm_slice() 229 (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) { in tcm_slice() 232 slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1; in tcm_slice() 234 parent->p0.x = 0; in tcm_slice() 235 parent->p0.y = slice->p1.y + 1; in tcm_slice() 249 area->p0.y <= area->p1.y && in tcm_area_is_valid() 252 area->p0.x < area->tcm->width && in tcm_area_is_valid() 253 area->p0.x + area->p0.y * area->tcm->width <= in tcm_area_is_valid() 257 area->p0.x <= area->p1.x)); in tcm_area_is_valid() [all …]
|
/openbmc/qemu/tests/tcg/aarch64/ |
H A D | mte-1.c | 12 int *p0, *p1, *p2; in main() local 16 p0 = alloc_mte_mem(sizeof(*p0)); in main() 18 asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(1l)); in main() 19 assert(p1 != p0); in main() 20 asm("subp %0,%1,%2" : "=r"(c) : "r"(p0), "r"(p1)); in main() 24 asm("ldg %0, [%1]" : "=r"(p2) : "r"(p0), "0"(p0)); in main()
|
H A D | mte-5.c | 19 void *p0, *p1, *p2; in main() local 23 p0 = alloc_mte_mem(sizeof(*p0)); in main() 26 asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl)); in main() 29 asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl)); in main() 42 asm volatile("ldr %0, [%1]" : "=r"(p0) : "r"(p1 + 12)); in main()
|
H A D | mte-3.c | 19 long *p0, *p1, *p2; in main() local 23 p0 = alloc_mte_mem(sizeof(*p0)); in main() 26 asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl)); in main() 29 asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl)); in main()
|
H A D | mte-2.c | 19 int *p0, *p1, *p2; in main() local 23 p0 = alloc_mte_mem(sizeof(*p0)); in main() 26 asm("irg %0,%1,%2" : "=r"(p1) : "r"(p0), "r"(excl)); in main() 29 asm("irg %0,%1,%2" : "=r"(p2) : "r"(p0), "r"(excl)); in main()
|
/openbmc/linux/scripts/coccinelle/misc/ |
H A D | minmax.cocci | 172 for p0 in p: 173 coccilib.report.print_report(p0, "WARNING opportunity for max()") 179 for p0 in p: 180 coccilib.org.print_todo(p0, "WARNING opportunity for max()") 186 for p0 in p: 187 coccilib.report.print_report(p0, "WARNING opportunity for max()") 193 for p0 in p: 194 coccilib.org.print_todo(p0, "WARNING opportunity for max()") 200 for p0 in p: 201 coccilib.report.print_report(p0, "WARNING opportunity for min()") [all …]
|
H A D | doubleinit.cocci | 19 position p0,p; 23 struct I s =@p0 { ..., .fld@p = E, ...}; 27 position r.p0,p; 31 struct I s =@p0 { ..., .fld@p = E, ...}; 34 p0 << r.p0; 41 cocci.print_main(fld,p0) 46 p0 << r.p0; 54 coccilib.report.print_report(p0[0],msg)
|
/openbmc/linux/arch/ia64/lib/ |
H A D | memset.S | 69 cmp.eq p_scr, p0 = cnt, r0 81 cmp.ne p_unalgn, p0 = tmp, r0 // 84 cmp.gt p_scr, p0 = 16, cnt // is it a minimalistic task? 118 cmp.gt p_scr, p0 = tmp, cnt // is it a minimalistic task? 137 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 186 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? 194 cmp.le p_scr, p0 = 8, cnt // just a few bytes left ? 207 cmp.gt p_scr, p0 = PREF_AHEAD, linecnt // check against actual value 240 cmp.lt p_scr, p0 = ptr9, ptr1 // do we need more prefetching? 248 cmp.gt p_scr, p0 = 8, cnt // just a few bytes left ? [all …]
|
/openbmc/linux/drivers/scsi/qla4xxx/ |
H A D | ql4_dbg.c | 106 offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), in qla4xxx_dump_registers() 107 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf)); in qla4xxx_dump_registers() 109 offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), in qla4xxx_dump_registers() 110 readw(&ha->reg->u2.isp4022.p0.port_ctrl)); in qla4xxx_dump_registers() 112 offsetof(struct isp_reg, u2.isp4022.p0.port_status), in qla4xxx_dump_registers() 113 readw(&ha->reg->u2.isp4022.p0.port_status)); in qla4xxx_dump_registers() 115 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), in qla4xxx_dump_registers() 116 readw(&ha->reg->u2.isp4022.p0.gp_out)); in qla4xxx_dump_registers() 118 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), in qla4xxx_dump_registers() 119 readw(&ha->reg->u2.isp4022.p0.gp_in)); in qla4xxx_dump_registers() [all …]
|
/openbmc/u-boot/arch/nds32/cpu/n1213/ |
H A D | start.S | 321 andi $p0, $t0, ICAC_MEM_KBF_ISZ 323 ! if $p0=0, then no I CAC existed 324 beqz $p0, end_flush_icache 326 ! get $p0 the index of I$ block 327 srli $p0, $p0, 6 330 addi $t1, $p0, 2 356 andi $p0, $t0, DCAC_MEM_KBF_DSZ 358 ! if $p0=0, then no D CAC existed 359 beqz $p0, end_flush_dcache 361 ! get $p0 the index of D$ block [all …]
|
/openbmc/linux/arch/arm/boot/dts/nuvoton/ |
H A D | nuvoton-npcm750-runbmc-olympus.dts | 430 g1a-p0-0-hog { 436 g1a-p0-1-hog { 442 g1a-p0-2-hog { 448 g1a-p0-3-hog { 454 g1a-p0-4-hog { 460 g1a-p0-5-hog { 466 g1a-p0-6-hog { 527 g1b-p0-0-hog { 533 g1b-p0-1-hog { 539 g1b-p0-2-hog { [all …]
|
/openbmc/linux/arch/ia64/kernel/ |
H A D | ivt.S | 129 cmp.ne p8,p0=r18,r26 269 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 272 ITC_I(p0, r18, r19) 284 cmp.ne p7,p0=r18,r19 313 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 316 ITC_D(p0, r18, r19) 328 cmp.ne p7,p0=r18,r19 343 MOV_FROM_IPSR(p0, r21) 350 cmp.gt p8,p0=6,r22 // user mode 363 cmp.ne p8,p0=r0,r23 // psr.cpl != 0? [all …]
|
H A D | fsys.S | 89 cmp.ne p8,p0=0,r9 118 cmp.ne p8,p0=0,r9 146 tnat.nz p6,p0 = r33 // guard against NaT argument 195 tnat.nz p6,p0 = r31 // guard against Nat argument 210 cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled 230 (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 233 (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control 252 (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful 272 cmp4.ne p7,p0 = r28,r10 283 cmp.ge p6,p0 = r8,r2 [all …]
|