Home
last modified time | relevance | path

Searched refs:L1 (Results 1 – 25 of 186) sorted by relevance

12345678

/openbmc/qemu/tests/unit/
H A Dtest-hbitmap.c19 #define L1 BITS_PER_LONG macro
20 #define L2 (BITS_PER_LONG * L1)
233 hbitmap_test_init(data, L1, 0); in test_hbitmap_iter_empty()
242 hbitmap_test_check(data, L1 - 1); in test_hbitmap_iter_partial()
243 hbitmap_test_check(data, L1); in test_hbitmap_iter_partial()
244 hbitmap_test_check(data, L1 * 2 - 1); in test_hbitmap_iter_partial()
248 hbitmap_test_check(data, L2 + L1); in test_hbitmap_iter_partial()
249 hbitmap_test_check(data, L2 + L1 * 2 - 1); in test_hbitmap_iter_partial()
253 hbitmap_test_check(data, L2 * 2 + L1); in test_hbitmap_iter_partial()
254 hbitmap_test_check(data, L2 * 2 + L1 * 2 - 1); in test_hbitmap_iter_partial()
[all …]
/openbmc/qemu/tests/qemu-iotests/
H A D080.out40 == Invalid L1 table ==
42 qemu-io: can't open device TEST_DIR/t.qcow2: Active L1 table too large
43 qemu-io: can't open device TEST_DIR/t.qcow2: Active L1 table too large
44 qemu-io: can't open device TEST_DIR/t.qcow2: Active L1 table offset invalid
45 qemu-io: can't open device TEST_DIR/t.qcow2: Active L1 table offset invalid
47 == Invalid L1 table (with internal snapshot in the image) ==
49 qemu-img: Could not open 'TEST_DIR/t.IMGFMT': L1 table is too small
62 == Invalid snapshot L1 table offset ==
66 qemu-img: Failed to load snapshot: Snapshot L1 table offset invalid
67 qemu-img: Snapshot L1 table offset invalid
[all …]
H A D029.out3 Test loading internal snapshots where the L1 table of the snapshot
4 is smaller than the current L1 table.
24 qcow2_snapshot_load_tmp() should take the L1 size from the snapshot
H A D02953 echo Test loading internal snapshots where the L1 table of the snapshot
54 echo is smaller than the current L1 table.
/openbmc/linux/Documentation/virt/kvm/x86/
H A Drunning-nested-guests.rst19 | L1 (Guest Hypervisor) |
33 - L1 – level-1 guest; a VM running on L0; also called the "guest
36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest"
45 metal, running the LPAR hypervisor), L1 (host hypervisor), L2
49 L1, and L2) for all architectures; and will largely focus on
148 able to start an L1 guest with::
175 2. The guest hypervisor (L1) must be provided with the ``sie`` CPU
179 3. Now the KVM module can be loaded in the L1 (guest hypervisor)::
187 Migrating an L1 guest, with a *live* nested guest in it, to another
191 On AMD systems, once an L1 guest has started an L2 guest, the L1 guest
[all …]
/openbmc/linux/arch/arc/kernel/
H A Dentry-compact.S152 ; if L2 IRQ interrupted a L1 ISR, disable preemption
154 ; This is to avoid a potential L1-L2-L1 scenario
155 ; -L1 IRQ taken
156 ; -L2 interrupts L1 (before L1 ISR could run)
160 ; But both L1 and L2 re-enabled, so another L1 can be taken
161 ; while prev L1 is still unserviced
165 ; L2 interrupting L1 implies both L2 and L1 active
167 ; need to check STATUS32_L2 to determine if L1 was active
170 bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
320 ; use the same priority as rtie: EXCPN, L2 IRQ, L1 IRQ, None
[all …]
/openbmc/linux/arch/arm/mm/
H A Dproc-xsc3.S68 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line
113 mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
173 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
196 mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line
197 mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
224 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
229 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
245 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
250 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
269 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
[all …]
/openbmc/linux/security/apparmor/include/
H A Dlabel.h163 #define next_comb(I, L1, L2) \ argument
174 #define label_for_each_comb(I, L1, L2, P1, P2) \ argument
176 ((P1) = (L1)->vec[(I).i]) && ((P2) = (L2)->vec[(I).j]); \
177 (I) = next_comb(I, L1, L2))
179 #define fn_for_each_comb(L1, L2, P1, P2, FN) \ argument
183 label_for_each_comb(i, (L1), (L2), (P1), (P2)) { \
243 #define fn_for_each2_XXX(L1, L2, P, FN, ...) \ argument
247 label_for_each ## __VA_ARGS__(i, (L1), (L2), (P)) { \
253 #define fn_for_each_in_merge(L1, L2, P, FN) \ argument
254 fn_for_each2_XXX((L1), (L2), P, FN, _in_merge)
[all …]
H A Dperms.h183 #define xcheck_ns_labels(L1, L2, FN, args...) \ argument
186 fn_for_each((L1), __p1, FN(__p1, (L2), args)); \
190 #define xcheck_labels_profiles(L1, L2, FN, args...) \ argument
191 xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args)
193 #define xcheck_labels(L1, L2, P, FN1, FN2) \ argument
194 xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2)))
/openbmc/linux/arch/powerpc/perf/
H A Dpower8-pmu.c133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
136 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
137 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
138 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
139 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
140 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
H A Dpower9-pmu.c177 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
178 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
179 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
180 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
181 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
182 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
183 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
H A Dpower10-pmu.c133 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
134 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
135 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS);
136 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
137 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
138 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
139 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
/openbmc/phosphor-inventory-manager/
H A Dutils.hpp150 template <typename L1, typename L2, typename R1, typename R2>
151 bool operator()(const std::pair<L1, L2>& l, in operator ()()
168 template <typename L1, typename L2, typename R>
169 bool operator()(const std::pair<L1, L2>& l, const R& r) const in operator ()()
/openbmc/linux/arch/hexagon/lib/
H A Dmemset.S159 if (r2==#0) jump:nt .L1
186 if (p1) jump .L1
197 if (p0.new) jump:nt .L1
208 if (p0.new) jump:nt .L1
284 .L1: label
/openbmc/qemu/docs/interop/
H A Dqed_spec.txt9 … regular cluster may be a '''data cluster''', an '''L2''', or an '''L1 table'''. L1 and L2 tables…
20 uint32_t table_size; /* for L1 and L2 tables, in clusters */
43 * ''l1_table_offset'' is the offset of the first byte of the L1 table in the image file and must be…
69 | L1 table |
82 …ge size must be less than or equal to the maximum possible size of clusters rooted by the L1 table:
85 L1, L2, and data cluster offsets must be aligned to header.cluster_size. The following offsets hav…
114 | L1 index | L2 index | byte offset |
134 …ce. It is an inconsistency to have a cluster referenced more than once by L1 or L2 tables. A clu…
H A Dparallels.rst217 The number of entries in the L1 table of the bitmap.
219 variable: L1 offset table (l1_table), size: 8 * l1_size bytes
223 saved in the L1 offset table specified by the feature extension. Each L1 table
226 Given an offset in bytes into the bitmap data, corresponding L1 entry is::
230 If an L1 table entry is 0, all bits in the corresponding cluster of the bitmap
233 If an L1 table entry is 1, all bits in the corresponding cluster of the bitmap
236 If an L1 table entry is not 0 or 1, it contains the corresponding cluster
/openbmc/linux/Documentation/locking/
H A Dlockdep-design.rst22 dependency can be understood as lock order, where L1 -> L2 suggests that
23 a task is attempting to acquire L2 while holding L1. From lockdep's
24 perspective, the two locks (L1 and L2) are not necessarily related; that
145 <L1> -> <L2>
146 <L2> -> <L1>
521 L1 -> L2
523 , which means lockdep has seen L1 held before L2 held in the same context at runtime.
524 And in deadlock detection, we care whether we could get blocked on L2 with L1 held,
525 IOW, whether there is a locker L3 that L1 blocks L3 and L2 gets blocked by L3. So
526 we only care about 1) what L1 blocks and 2) what blocks L2. As a result, we can combine
[all …]
/openbmc/linux/arch/m68k/fpsp040/
H A Dsetox.S104 | 3.1 R := X + N*L1, where L1 := single-precision(-log2/64).
105 | 3.2 R := R + N*L2, L2 := extended-precision(-log2/64 - L1).
106 | Notes: a) The way L1 and L2 are chosen ensures L1+L2 approximate
108 | b) N*L1 is exact because N is no longer than 22 bits and
109 | L1 is no longer than 24 bits.
110 | c) The calculation X+N*L1 is also exact due to cancellation.
111 | Thus, R is practically X+N(L1+L2) to full 64 bits.
505 fmuls #0xBC317218,%fp0 | ...N * L1, L1 = lead(-log2/64)
506 fmulx L2,%fp2 | ...N * L2, L1+L2 = -log2/64
507 faddx %fp1,%fp0 | ...X + N*L1
[all …]
/openbmc/linux/drivers/pci/pcie/
H A DKconfig73 state L0/L0s/L1.
99 Enable PCI Express ASPM L0s and L1 where possible, even if the
106 Same as PCIEASPM_POWERSAVE, except it also enables L1 substates where
107 possible. This would result in higher power savings while staying in L1
114 Disable PCI Express ASPM L0s and L1, even if the BIOS enabled them.
/openbmc/linux/arch/m68k/lib/
H A Ddivsi3.S95 jpl L1
102 L1: movel sp@(8), d0 /* d0 = dividend */ label
/openbmc/linux/Documentation/devicetree/bindings/media/
H A Dst-rc.txt10 - rx-mode: can be "infrared" or "uhf". This property specifies the L1
13 - tx-mode: should be "infrared". This property specifies the L1
/openbmc/linux/Documentation/translations/zh_CN/arch/arm64/
H A Dmemory.txt90 | | +---------------------> [38:30] L1 索引
105 | +-------------------------------> [47:42] L1 索引
/openbmc/linux/arch/alpha/boot/
H A Dbootp.c65 #define L1 ((unsigned long *) 0x200802000) macro
77 pcb_va->ptbr = L1[1] >> 32; in pal_init()
H A Dmain.c59 #define L1 ((unsigned long *) 0x200802000) macro
71 pcb_va->ptbr = L1[1] >> 32; in pal_init()
/openbmc/linux/Documentation/driver-api/
H A Dedac.rst155 - CPU caches (L1 and L2)
165 For example, a cache could be composed of L1, L2 and L3 levels of cache.
166 Each CPU core would have its own L1 cache, while sharing L2 and maybe L3
174 cpu/cpu0/.. <L1 and L2 block directory>
175 /L1-cache/ce_count
179 cpu/cpu1/.. <L1 and L2 block directory>
180 /L1-cache/ce_count
186 the L1 and L2 directories would be "edac_device_block's"

12345678