/openbmc/linux/fs/minix/ |
H A D | itree_common.c | 158 Indirect *partial; in get_block() local 166 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 169 if (!partial) { in get_block() 173 partial = chain+depth-1; /* the whole chain */ in get_block() 180 while (partial > chain) { in get_block() 181 brelse(partial->bh); in get_block() 182 partial--; in get_block() 196 left = (chain + depth) - partial; in get_block() 197 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block() 201 if (splice_branch(inode, chain, partial, left) < 0) in get_block() [all …]
|
/openbmc/linux/include/crypto/ |
H A D | sha1_base.h | 41 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_update() local 45 if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) { in sha1_base_do_update() 48 if (partial) { in sha1_base_do_update() 49 int p = SHA1_BLOCK_SIZE - partial; in sha1_base_do_update() 51 memcpy(sctx->buffer + partial, data, p); in sha1_base_do_update() 65 partial = 0; in sha1_base_do_update() 68 memcpy(sctx->buffer + partial, data, len); in sha1_base_do_update() 79 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_base_do_finalize() local 81 sctx->buffer[partial++] = 0x80; in sha1_base_do_finalize() 82 if (partial > bit_offset) { in sha1_base_do_finalize() [all …]
|
H A D | sm3_base.h | 44 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_base_do_update() local 48 if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) { in sm3_base_do_update() 51 if (partial) { in sm3_base_do_update() 52 int p = SM3_BLOCK_SIZE - partial; in sm3_base_do_update() 54 memcpy(sctx->buffer + partial, data, p); in sm3_base_do_update() 68 partial = 0; in sm3_base_do_update() 71 memcpy(sctx->buffer + partial, data, len); in sm3_base_do_update() 82 unsigned int partial = sctx->count % SM3_BLOCK_SIZE; in sm3_base_do_finalize() local 84 sctx->buffer[partial++] = 0x80; in sm3_base_do_finalize() 85 if (partial > bit_offset) { in sm3_base_do_finalize() [all …]
|
H A D | sha256_base.h | 42 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in lib_sha256_base_do_update() local 46 if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) { in lib_sha256_base_do_update() 49 if (partial) { in lib_sha256_base_do_update() 50 int p = SHA256_BLOCK_SIZE - partial; in lib_sha256_base_do_update() 52 memcpy(sctx->buf + partial, data, p); in lib_sha256_base_do_update() 66 partial = 0; in lib_sha256_base_do_update() 69 memcpy(sctx->buf + partial, data, len); in lib_sha256_base_do_update() 89 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in lib_sha256_base_do_finalize() local 91 sctx->buf[partial++] = 0x80; in lib_sha256_base_do_finalize() 92 if (partial > bit_offset) { in lib_sha256_base_do_finalize() [all …]
|
H A D | sha512_base.h | 62 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_update() local 68 if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { in sha512_base_do_update() 71 if (partial) { in sha512_base_do_update() 72 int p = SHA512_BLOCK_SIZE - partial; in sha512_base_do_update() 74 memcpy(sctx->buf + partial, data, p); in sha512_base_do_update() 88 partial = 0; in sha512_base_do_update() 91 memcpy(sctx->buf + partial, data, len); in sha512_base_do_update() 102 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_finalize() local 104 sctx->buf[partial++] = 0x80; in sha512_base_do_finalize() 105 if (partial > bit_offset) { in sha512_base_do_finalize() [all …]
|
/openbmc/linux/fs/sysv/ |
H A D | itree.c | 213 Indirect *partial; in get_block() local 221 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 224 if (!partial) { in get_block() 229 partial = chain+depth-1; /* the whole chain */ in get_block() 236 while (partial > chain) { in get_block() 237 brelse(partial->bh); in get_block() 238 partial--; in get_block() 252 left = (chain + depth) - partial; in get_block() 253 err = alloc_branch(inode, left, offsets+(partial-chain), partial); in get_block() 257 if (splice_branch(inode, chain, partial, left) < 0) in get_block() [all …]
|
/openbmc/linux/fs/ext4/ |
H A D | indirect.c | 244 * @partial: pointer to the last triple within a chain 252 Indirect *partial) in ext4_find_goal() argument 260 goal = ext4_find_near(inode, partial); in ext4_find_goal() 316 * we had read the existing part of chain and partial points to the last 538 Indirect *partial; in ext4_ind_map_blocks() local 554 partial = ext4_get_branch(inode, depth, offsets, chain, &err); in ext4_ind_map_blocks() 557 if (!partial) { in ext4_ind_map_blocks() 580 * Count number blocks in a subtree under 'partial'. At each in ext4_ind_map_blocks() 586 for (i = partial - chain + 1; i < depth; i++) in ext4_ind_map_blocks() 620 ar.goal = ext4_find_goal(inode, map->m_lblk, partial); in ext4_ind_map_blocks() [all …]
|
/openbmc/linux/include/linux/ |
H A D | slub_def.h | 21 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 23 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ 38 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ 39 CPU_PARTIAL_FREE, /* Refill cpu partial on free */ 40 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ 41 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ 60 struct slab *partial; /* Partially allocated frozen slabs */ member 70 #define slub_percpu_partial(c) ((c)->partial) 102 /* Used for retrieving partial slabs, etc. */ 110 /* Number of per cpu partial objects to keep around */ [all …]
|
/openbmc/linux/drivers/crypto/ |
H A D | padlock-sha.c | 284 unsigned int partial, done; in padlock_sha1_update_nano() local 291 partial = sctx->count & 0x3f; in padlock_sha1_update_nano() 297 if ((partial + len) >= SHA1_BLOCK_SIZE) { in padlock_sha1_update_nano() 300 if (partial) { in padlock_sha1_update_nano() 301 done = -partial; in padlock_sha1_update_nano() 302 memcpy(sctx->buffer + partial, data, in padlock_sha1_update_nano() 321 partial = 0; in padlock_sha1_update_nano() 324 memcpy(sctx->buffer + partial, src, len - done); in padlock_sha1_update_nano() 332 unsigned int partial, padlen; in padlock_sha1_final_nano() local 339 partial = state->count & 0x3f; in padlock_sha1_final_nano() [all …]
|
/openbmc/linux/arch/arm64/crypto/ |
H A D | sha3-ce-glue.c | 43 if ((sctx->partial + len) >= sctx->rsiz) { in sha3_update() 46 if (sctx->partial) { in sha3_update() 47 int p = sctx->rsiz - sctx->partial; in sha3_update() 49 memcpy(sctx->buf + sctx->partial, data, p); in sha3_update() 56 sctx->partial = 0; in sha3_update() 75 memcpy(sctx->buf + sctx->partial, data, len); in sha3_update() 76 sctx->partial += len; in sha3_update() 91 sctx->buf[sctx->partial++] = 0x06; in sha3_final() 92 memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial); in sha3_final()
|
/openbmc/linux/arch/powerpc/crypto/ |
H A D | sha1.c | 30 unsigned int partial, done; in powerpc_sha1_update() local 33 partial = sctx->count & 0x3f; in powerpc_sha1_update() 38 if ((partial + len) > 63) { in powerpc_sha1_update() 40 if (partial) { in powerpc_sha1_update() 41 done = -partial; in powerpc_sha1_update() 42 memcpy(sctx->buffer + partial, data, done + 64); in powerpc_sha1_update() 52 partial = 0; in powerpc_sha1_update() 54 memcpy(sctx->buffer + partial, src, len - done); in powerpc_sha1_update()
|
/openbmc/linux/arch/sparc/crypto/ |
H A D | sha1_glue.c | 31 unsigned int len, unsigned int partial) in __sha1_sparc64_update() argument 36 if (partial) { in __sha1_sparc64_update() 37 done = SHA1_BLOCK_SIZE - partial; in __sha1_sparc64_update() 38 memcpy(sctx->buffer + partial, data, done); in __sha1_sparc64_update() 55 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; in sha1_sparc64_update() local 58 if (partial + len < SHA1_BLOCK_SIZE) { in sha1_sparc64_update() 60 memcpy(sctx->buffer + partial, data, len); in sha1_sparc64_update() 62 __sha1_sparc64_update(sctx, data, len, partial); in sha1_sparc64_update()
|
H A D | md5_glue.c | 47 unsigned int len, unsigned int partial) in __md5_sparc64_update() argument 52 if (partial) { in __md5_sparc64_update() 53 done = MD5_HMAC_BLOCK_SIZE - partial; in __md5_sparc64_update() 54 memcpy((u8 *)sctx->block + partial, data, done); in __md5_sparc64_update() 71 unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE; in md5_sparc64_update() local 74 if (partial + len < MD5_HMAC_BLOCK_SIZE) { in md5_sparc64_update() 76 memcpy((u8 *)sctx->block + partial, data, len); in md5_sparc64_update() 78 __md5_sparc64_update(sctx, data, len, partial); in md5_sparc64_update()
|
H A D | sha512_glue.c | 30 unsigned int len, unsigned int partial) in __sha512_sparc64_update() argument 36 if (partial) { in __sha512_sparc64_update() 37 done = SHA512_BLOCK_SIZE - partial; in __sha512_sparc64_update() 38 memcpy(sctx->buf + partial, data, done); in __sha512_sparc64_update() 55 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_sparc64_update() local 58 if (partial + len < SHA512_BLOCK_SIZE) { in sha512_sparc64_update() 61 memcpy(sctx->buf + partial, data, len); in sha512_sparc64_update() 63 __sha512_sparc64_update(sctx, data, len, partial); in sha512_sparc64_update()
|
H A D | sha256_glue.c | 31 unsigned int len, unsigned int partial) in __sha256_sparc64_update() argument 36 if (partial) { in __sha256_sparc64_update() 37 done = SHA256_BLOCK_SIZE - partial; in __sha256_sparc64_update() 38 memcpy(sctx->buf + partial, data, done); in __sha256_sparc64_update() 55 unsigned int partial = sctx->count % SHA256_BLOCK_SIZE; in sha256_sparc64_update() local 58 if (partial + len < SHA256_BLOCK_SIZE) { in sha256_sparc64_update() 60 memcpy(sctx->buf + partial, data, len); in sha256_sparc64_update() 62 __sha256_sparc64_update(sctx, data, len, partial); in sha256_sparc64_update()
|
/openbmc/linux/fs/ext2/ |
H A D | inode.c | 325 * @partial: pointer to the last triple within a chain 331 Indirect *partial) in ext2_find_goal() argument 346 return ext2_find_near(inode, partial); in ext2_find_goal() 466 * we had read the existing part of chain and partial points to the last 632 Indirect *partial; in ext2_get_blocks() local 648 partial = ext2_get_branch(inode, depth, offsets, chain, &err); in ext2_get_blocks() 650 if (!partial) { in ext2_get_blocks() 666 partial = chain + depth - 1; in ext2_get_blocks() 696 if (err == -EAGAIN || !verify_chain(chain, partial)) { in ext2_get_blocks() 697 while (partial > chain) { in ext2_get_blocks() [all …]
|
/openbmc/linux/Documentation/ABI/testing/ |
H A D | sysfs-kernel-slab | 95 allocation from a partial or new slab. It can be written to 178 The deactivate_to_head file shows how many times a partial cpu 179 slab was deactivated and added to the head of its node's partial 189 The deactivate_to_tail file shows how many times a partial cpu 190 slab was deactivated and added to the tail of its node's partial 211 partial list. It can be written to clear the current count. 254 its node's partial list. It can be written to clear the current 276 using the slow path (i.e. to a full or partial slab). It can 296 remain on a node's partial list to avoid the overhead of 325 objects are on partial slabs and from which nodes they are [all …]
|
/openbmc/linux/Documentation/devicetree/bindings/fpga/ |
H A D | fpga-region.txt | 18 FPGA Regions represent FPGA's and partial reconfiguration regions of FPGA's in 34 Partial Reconfiguration (PR) 39 Partial Reconfiguration Region (PRR) 51 * Also called a "partial bit stream" 64 * During Partial Reconfiguration of a specific region, that region's bridge 79 * A base image may set up a set of partial reconfiguration regions that may 150 For partial reconfiguration (PR), each PR region will have an FPGA Region. 185 - partial-fpga-config : boolean, set if partial reconfiguration is to be done, 297 * Partial reconfiguration with bridges in the FPGA 301 region while the buses are enabled for other sections. Before any partial [all …]
|
/openbmc/u-boot/lib/ |
H A D | sha512.c | 206 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_update() local 212 if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) { in sha512_base_do_update() 215 if (partial) { in sha512_base_do_update() 216 int p = SHA512_BLOCK_SIZE - partial; in sha512_base_do_update() 218 memcpy(sctx->buf + partial, data, p); in sha512_base_do_update() 232 partial = 0; in sha512_base_do_update() 235 memcpy(sctx->buf + partial, data, len); in sha512_base_do_update() 242 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE; in sha512_base_do_finalize() local 244 sctx->buf[partial++] = 0x80; in sha512_base_do_finalize() 245 if (partial > bit_offset) { in sha512_base_do_finalize() [all …]
|
/openbmc/linux/drivers/crypto/stm32/ |
H A D | stm32-crc32.c | 69 u32 partial; /* crc32c: partial in first 4 bytes of that struct */ member 136 /* Store partial result */ in stm32_crc_init() 137 ctx->partial = readl_relaxed(crc->regs + CRC_DR); in stm32_crc_init() 163 ctx->partial = crc32_le(ctx->partial, d8, length); in burst_update() 165 ctx->partial = __crc32c_le(ctx->partial, d8, length); in burst_update() 176 writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); in burst_update() 205 /* Store partial result */ in burst_update() 206 ctx->partial = readl_relaxed(crc->regs + CRC_DR); in burst_update() 249 ~ctx->partial : ctx->partial, out); in stm32_crc_final()
|
/openbmc/linux/drivers/iommu/ |
H A D | io-pgfault.c | 33 * @partial: faults that are part of a Page Request Group for which the last 40 struct list_head partial; member 175 list_add(&iopf->list, &iopf_param->partial); in iommu_queue_iopf() 184 * need to clean up before leaving, otherwise partial faults in iommu_queue_iopf() 197 /* See if we have partial faults for this group */ in iommu_queue_iopf() 198 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { in iommu_queue_iopf() 208 list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { in iommu_queue_iopf() 252 * iopf_queue_discard_partial - Remove all pending partial fault 253 * @queue: the queue whose partial faults need to be discarded 256 * lost and the IOMMU driver calls this to discard all partial faults. The [all …]
|
/openbmc/linux/arch/mips/cavium-octeon/crypto/ |
H A D | octeon-sha256.c | 70 unsigned int partial; in __octeon_sha256_update() local 74 partial = sctx->count % SHA256_BLOCK_SIZE; in __octeon_sha256_update() 79 if ((partial + len) >= SHA256_BLOCK_SIZE) { in __octeon_sha256_update() 80 if (partial) { in __octeon_sha256_update() 81 done = -partial; in __octeon_sha256_update() 82 memcpy(sctx->buf + partial, data, in __octeon_sha256_update() 93 partial = 0; in __octeon_sha256_update() 95 memcpy(sctx->buf + partial, src, len - done); in __octeon_sha256_update()
|
H A D | octeon-sha1.c | 78 unsigned int partial; in __octeon_sha1_update() local 82 partial = sctx->count % SHA1_BLOCK_SIZE; in __octeon_sha1_update() 87 if ((partial + len) >= SHA1_BLOCK_SIZE) { in __octeon_sha1_update() 88 if (partial) { in __octeon_sha1_update() 89 done = -partial; in __octeon_sha1_update() 90 memcpy(sctx->buffer + partial, data, in __octeon_sha1_update() 101 partial = 0; in __octeon_sha1_update() 103 memcpy(sctx->buffer + partial, src, len - done); in __octeon_sha1_update()
|
/openbmc/linux/arch/x86/include/asm/ |
H A D | unwind.h | 69 * If 'partial' returns true, only the iret frame registers are valid. 72 bool *partial) in unwind_get_entry_regs() argument 77 if (partial) { in unwind_get_entry_regs() 79 *partial = !state->full_regs; in unwind_get_entry_regs() 81 *partial = false; in unwind_get_entry_regs() 89 bool *partial) in unwind_get_entry_regs() argument
|
/openbmc/linux/Documentation/driver-api/md/ |
H A D | raid5-ppl.rst | 2 Partial Parity Log 5 Partial Parity Log (PPL) is a feature available for RAID5 arrays. The issue 15 Partial parity for a write operation is the XOR of stripe data chunks not 17 write hole. XORing partial parity with the modified chunks produces parity for 26 When handling a write request PPL writes partial parity before new data and
|