/openbmc/linux/drivers/md/persistent-data/ |
H A D | dm-space-map.h | 81 sm->destroy(sm); in dm_sm_destroy() 86 return sm->extend(sm, extra_blocks); in dm_sm_extend() 91 return sm->get_nr_blocks(sm, count); in dm_sm_get_nr_blocks() 96 return sm->get_nr_free(sm, count); in dm_sm_get_nr_free() 102 return sm->get_count(sm, b, result); in dm_sm_get_count() 114 return sm->set_count(sm, b, count); in dm_sm_set_count() 119 return sm->commit(sm); in dm_sm_commit() 124 return sm->inc_blocks(sm, b, e); in dm_sm_inc_blocks() 134 return sm->dec_blocks(sm, b, e); in dm_sm_dec_blocks() 144 return sm->new_block(sm, b); in dm_sm_new_block() [all …]
|
H A D | dm-space-map-disk.c | 37 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_destroy() 44 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_extend() 51 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_get_nr_blocks() 60 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_get_nr_free() 70 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_get_count() 95 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_set_count() 108 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_inc_blocks() 121 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_dec_blocks() 134 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_new_block() 229 memcpy(&smd->sm, &ops, sizeof(smd->sm)); in dm_sm_disk_create() [all …]
|
H A D | dm-space-map-metadata.c | 278 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_destroy() 285 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_get_nr_blocks() 294 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_get_nr_free() 307 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_get_count() 347 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_count_is_more_than_one() 729 memcpy(sm, &bootstrap_ops, sizeof(*sm)); in sm_metadata_extend() 765 memcpy(sm, &ops, sizeof(*sm)); in sm_metadata_extend() 779 memcpy(&smm->sm, &ops, sizeof(smm->sm)); in dm_sm_metadata_init() 781 return &smm->sm; in dm_sm_metadata_init() 798 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); in dm_sm_metadata_create() [all …]
|
H A D | dm-transaction-manager.c | 95 struct dm_space_map *sm; member 175 tm->sm = sm; in dm_tm_create() 219 r = dm_sm_commit(tm->sm); in dm_tm_pre_commit() 363 dm_sm_inc_block(tm->sm, b); in dm_tm_inc() 385 dm_sm_dec_block(tm->sm, b); in dm_tm_dec() 470 *sm = dm_sm_metadata_init(); in dm_tm_create_internal() 471 if (IS_ERR(*sm)) in dm_tm_create_internal() 472 return PTR_ERR(*sm); in dm_tm_create_internal() 474 *tm = dm_tm_create(bm, *sm); in dm_tm_create_internal() 476 dm_sm_destroy(*sm); in dm_tm_create_internal() [all …]
|
/openbmc/linux/drivers/mfd/ |
H A D | sm501.c | 1378 sm = kzalloc(sizeof(*sm), GFP_KERNEL); in sm501_plat_probe() 1379 if (!sm) { in sm501_plat_probe() 1395 if (!sm->io_res || !sm->mem_res) { in sm501_plat_probe() 1401 sm->regs_claim = request_mem_region(sm->io_res->start, in sm501_plat_probe() 1411 sm->regs = ioremap(sm->io_res->start, resource_size(sm->io_res)); in sm501_plat_probe() 1429 kfree(sm); in sm501_plat_probe() 1496 if (sm->platdata && sm->platdata->init) { in sm501_plat_resume() 1497 sm501_init_regs(sm, sm->platdata->init); in sm501_plat_resume() 1561 sm = kzalloc(sizeof(*sm), GFP_KERNEL); in sm501_pci_probe() 1562 if (!sm) { in sm501_pci_probe() [all …]
|
/openbmc/linux/fs/xfs/scrub/ |
H A D | scrub.c | 368 struct xfs_scrub_metadata *sm) in xchk_validate_inputs() argument 375 sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; in xchk_validate_inputs() 379 if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved))) in xchk_validate_inputs() 384 if (sm->sm_type >= XFS_SCRUB_TYPE_NR) in xchk_validate_inputs() 386 ops = &meta_scrub_ops[sm->sm_type]; in xchk_validate_inputs() 398 if (sm->sm_ino || sm->sm_gen || sm->sm_agno) in xchk_validate_inputs() 402 if (sm->sm_ino || sm->sm_gen || in xchk_validate_inputs() 407 if (sm->sm_agno || (sm->sm_gen && !sm->sm_ino)) in xchk_validate_inputs() 471 struct xfs_scrub_metadata *sm) in xfs_scrub_metadata() argument 507 sc->sm = sm; in xfs_scrub_metadata() [all …]
|
H A D | common.c | 82 sc->sm, *error); in __xchk_process_error() 87 sc->sm->sm_flags |= errflag; in __xchk_process_error() 141 sc->sm->sm_flags |= errflag; in __xchk_fblock_process_error() 192 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; in xchk_block_set_preen() 206 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN; in xchk_ino_set_preen() 395 if (sc->sm->sm_type == type) in want_ag_read_header_failure() 878 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) in xchk_iget_for_scrubbing() 1073 if (xchk_skip_xref(sc->sm)) in xchk_should_check_xref() 1128 __u32 smtype = sc->sm->sm_type; in xchk_metadata_inode_subtype() 1131 sc->sm->sm_type = scrub_type; in xchk_metadata_inode_subtype() [all …]
|
H A D | health.c | 140 bad = (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT | in xchk_update_health() 142 switch (type_to_health_flag[sc->sm->sm_type].group) { in xchk_update_health() 144 pag = xfs_perag_get(sc->mp, sc->sm->sm_agno); in xchk_update_health() 195 if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT) in xchk_ag_btree_healthy_enough() 200 if (sc->sm->sm_type == XFS_SCRUB_TYPE_CNTBT) in xchk_ag_btree_healthy_enough() 205 if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT) in xchk_ag_btree_healthy_enough() 210 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) in xchk_ag_btree_healthy_enough() 215 if (sc->sm->sm_type == XFS_SCRUB_TYPE_RMAPBT) in xchk_ag_btree_healthy_enough() 220 if (sc->sm->sm_type == XFS_SCRUB_TYPE_REFCNTBT) in xchk_ag_btree_healthy_enough() 236 type_to_health_flag[sc->sm->sm_type].group == XHG_AG) in xchk_ag_btree_healthy_enough() [all …]
|
H A D | stats.c | 183 const struct xfs_scrub_metadata *sm, in xchk_stats_merge_one() argument 188 if (sm->sm_type >= XFS_SCRUB_TYPE_NR) { in xchk_stats_merge_one() 189 ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR); in xchk_stats_merge_one() 193 css = &cs->cs_stats[sm->sm_type]; in xchk_stats_merge_one() 198 if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_stats_merge_one() 200 if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN) in xchk_stats_merge_one() 202 if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL) in xchk_stats_merge_one() 204 if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT) in xchk_stats_merge_one() 208 if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING) in xchk_stats_merge_one() 225 const struct xfs_scrub_metadata *sm, in xchk_stats_merge() argument [all …]
|
/openbmc/linux/drivers/net/fddi/skfp/ |
H A D | smt.c | 510 smc->sba.sm = sm ; in smt_received_pack() 552 sm->smt_version, &sm->smt_source); in smt_received_pack() 558 ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) { in smt_received_pack() 633 if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF]) { in smt_received_pack() 670 smc->sm.pend[SMT_TID_NIF], sm->smt_tid); in smt_received_pack() 702 if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) { in smt_received_pack() 705 else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) { in smt_received_pack() 708 else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) { in smt_received_pack() 720 if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) { in smt_received_pack() 730 sm->smt_dest = sm->smt_source ; in smt_received_pack() [all …]
|
H A D | ess.c | 148 DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type); in ess_raf_received_pack() 149 DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid); in ess_raf_received_pack() 167 if (sm->smt_type == SMT_REQUEST) { in ess_raf_received_pack() 186 smc->ess.alloc_trans_id = sm->smt_tid ; in ess_raf_received_pack() 194 sm->smt_dest = smt_sba_da ; in ess_raf_received_pack() 277 if (sm->smt_type != SMT_REQUEST) { in ess_raf_received_pack() 311 &sm->smt_source); in ess_raf_received_pack() 336 if (sm->smt_type != SMT_REQUEST) { in ess_raf_received_pack() 342 &sm->smt_source); in ess_raf_received_pack() 493 chg->smt.smt_tid = sm->smt_tid ; in ess_send_response() [all …]
|
/openbmc/linux/drivers/scsi/isci/ |
H A D | phy.c | 1063 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_initial_substate_enter() 1071 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sas_power_substate_enter() 1079 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sas_power_substate_exit() 1087 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_power_substate_enter() 1095 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_power_substate_exit() 1103 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_phy_substate_enter() 1110 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_phy_substate_exit() 1117 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_speed_substate_enter() 1124 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_speed_substate_exit() 1131 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sig_fis_uf_substate_enter() [all …]
|
H A D | remote_device.c | 341 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_stop() local 397 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_reset() local 427 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_reset_complete() local 443 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_frame_handler() local 538 struct sci_base_state_machine *sm = &idev->sm; in is_remote_device_ready() local 572 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_event_handler() local 658 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_start_io() local 794 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_complete_io() local 876 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_start_task() local 1017 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_destruct() local [all …]
|
H A D | remote_node_context.c | 270 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_initial_state_enter() 288 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); in sci_remote_node_context_posting_state_enter() 295 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_invalidating_state_enter() 304 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_resuming_state_enter() 325 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_ready_state_enter() 347 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_tx_suspended_state_enter() 354 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_tx_rx_suspended_state_enter() 373 struct sci_base_state_machine *sm) in sci_remote_node_context_await_suspend_state_exit() argument 376 = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_await_suspend_state_exit() 569 = sci_rnc->sm.current_state_id; in sci_remote_node_context_suspend() [all …]
|
H A D | host.c | 159 handler(sm); in sci_init_sm() 167 handler = sm->state_table[sm->current_state_id].exit_state; in sci_change_state() 169 handler(sm); in sci_change_state() 171 sm->previous_state_id = sm->current_state_id; in sci_change_state() 174 handler = sm->state_table[sm->current_state_id].enter_state; in sci_change_state() 176 handler(sm); in sci_change_state() 1310 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); in sci_controller_initial_state_enter() 1317 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); in sci_controller_starting_state_exit() 1444 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); in sci_controller_ready_state_enter() 1597 struct sci_base_state_machine *sm = &ihost->sm; in controller_timeout() local [all …]
|
H A D | port.c | 298 struct sci_base_state_machine *sm = &iport->sm; in port_state_machine_change() local 708 struct sci_base_state_machine *sm = &iport->sm; in sci_port_general_link_up_handler() local 945 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_waiting_enter() 961 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in scic_sds_port_ready_substate_waiting_exit() 968 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_operational_enter() 1028 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_operational_exit() 1047 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_configuring_enter() 1491 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_stopped_state_enter() 1504 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_stopped_state_exit() 1512 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_state_enter() [all …]
|
/openbmc/linux/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
H A D | ctxgm200.c | 52 u8 sm, i; in gm200_grctx_generate_smid_config() local 54 for (sm = 0; sm < gr->sm_nr; sm++) { in gm200_grctx_generate_smid_config() 55 const u8 gpc = gr->sm[sm].gpc; in gm200_grctx_generate_smid_config() 56 const u8 tpc = gr->sm[sm].tpc; in gm200_grctx_generate_smid_config() 57 dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8); in gm200_grctx_generate_smid_config() 58 gpcs[gpc] |= sm << (tpc * 8); in gm200_grctx_generate_smid_config()
|
H A D | ctxgp100.c | 111 u8 sm, i; in gp100_grctx_generate_smid_config() local 113 for (sm = 0; sm < gr->sm_nr; sm++) { in gp100_grctx_generate_smid_config() 114 const u8 gpc = gr->sm[sm].gpc; in gp100_grctx_generate_smid_config() 115 const u8 tpc = gr->sm[sm].tpc; in gp100_grctx_generate_smid_config() 116 dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8); in gp100_grctx_generate_smid_config() 117 gpcs[gpc + (gr->func->gpc_nr * (tpc / 4))] |= sm << ((tpc % 4) * 8); in gp100_grctx_generate_smid_config()
|
H A D | tu102.c | 37 int sm; in tu102_gr_init_fs() local 42 for (sm = 0; sm < gr->sm_nr; sm++) { in tu102_gr_init_fs() 43 int tpc = gv100_gr_nonpes_aware_tpc(gr, gr->sm[sm].gpc, gr->sm[sm].tpc); in tu102_gr_init_fs() 45 nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 + tpc * 4), sm); in tu102_gr_init_fs()
|
/openbmc/linux/drivers/scsi/csiostor/ |
H A D | csio_scsi.h | 264 csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED); in csio_scsi_completed() 265 if (csio_list_deleted(&ioreq->sm.sm_list)) in csio_scsi_completed() 266 list_add_tail(&ioreq->sm.sm_list, cbfn_q); in csio_scsi_completed() 272 csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED); in csio_scsi_aborted() 273 list_add_tail(&ioreq->sm.sm_list, cbfn_q); in csio_scsi_aborted() 279 csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED); in csio_scsi_closed() 280 list_add_tail(&ioreq->sm.sm_list, cbfn_q); in csio_scsi_closed() 298 csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO); in csio_scsi_start_io() 311 csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM); in csio_scsi_start_tm() 324 csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT); in csio_scsi_abort() [all …]
|
/openbmc/linux/sound/soc/sof/ |
H A D | control.c | 21 struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value; in snd_sof_volume_get() local 22 struct snd_sof_control *scontrol = sm->dobj.private; in snd_sof_volume_get() 36 struct soc_mixer_control *sm = (struct soc_mixer_control *)kcontrol->private_value; in snd_sof_volume_put() local 37 struct snd_sof_control *scontrol = sm->dobj.private; in snd_sof_volume_put() 51 struct snd_sof_control *scontrol = sm->dobj.private; in snd_sof_volume_info() 55 if (!sm->platform_max) in snd_sof_volume_info() 56 sm->platform_max = sm->max; in snd_sof_volume_info() 57 platform_max = sm->platform_max; in snd_sof_volume_info() 66 uinfo->value.integer.max = platform_max - sm->min; in snd_sof_volume_info() 74 struct snd_sof_control *scontrol = sm->dobj.private; in snd_sof_switch_get() [all …]
|
/openbmc/linux/fs/ceph/ |
H A D | snap.c | 1204 sm = kmalloc(sizeof(*sm), GFP_NOFS); in ceph_get_snapid_map() 1205 if (!sm) in ceph_get_snapid_map() 1210 kfree(sm); in ceph_get_snapid_map() 1243 kfree(sm); in ceph_get_snapid_map() 1250 sm->snap, sm->dev); in ceph_get_snapid_map() 1251 return sm; in ceph_get_snapid_map() 1257 if (!sm) in ceph_put_snapid_map() 1268 kfree(sm); in ceph_put_snapid_map() 1298 kfree(sm); in ceph_trim_snapid_map() 1323 sm->snap, sm->dev); in ceph_cleanup_snapid_map() [all …]
|
/openbmc/linux/drivers/char/tpm/ |
H A D | tpm_crb.c | 93 u32 sm; member 176 if ((priv->sm == ACPI_TPM2_START_METHOD) || in __crb_go_idle() 225 if ((priv->sm == ACPI_TPM2_START_METHOD) || in __crb_cmd_ready() 427 (priv->sm == ACPI_TPM2_MEMORY_MAPPED) || in crb_send() 431 if ((priv->sm == ACPI_TPM2_START_METHOD) || in crb_send() 452 if (((priv->sm == ACPI_TPM2_START_METHOD) || in crb_cancel() 613 (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { in crb_map_io() 736 u32 sm; in crb_acpi_add() local 747 sm = buf->start_method; in crb_acpi_add() 748 if (sm == ACPI_TPM2_MEMORY_MAPPED) { in crb_acpi_add() [all …]
|
/openbmc/linux/tools/perf/util/ |
H A D | sharded_mutex.h | 22 void sharded_mutex__delete(struct sharded_mutex *sm); 24 static inline struct mutex *sharded_mutex__get_mutex(struct sharded_mutex *sm, size_t hash) in sharded_mutex__get_mutex() argument 26 return &sm->mutexes[hash_bits(hash, sm->cap_bits)]; in sharded_mutex__get_mutex()
|
/openbmc/openbmc/meta-arm/meta-arm/recipes-security/optee/optee-os/ |
H A D | 0003-optee-enable-clang-support.patch | 26 -libgcc$(sm) := $(shell $(CC$(sm)) $(CFLAGS$(arch-bits-$(sm))) \ 27 +libgcc$(sm) := $(shell $(CC$(sm)) $(LIBGCC_LOCATE_CFLAGS) $(CFLAGS$(arch-bits-$(sm))) \
|