Lines Matching refs:tgt

120 	struct sbp2_target *tgt;  member
170 static struct fw_device *target_parent_device(struct sbp2_target *tgt) in target_parent_device() argument
172 return fw_parent_device(tgt->unit); in target_parent_device()
175 static const struct device *tgt_dev(const struct sbp2_target *tgt) in tgt_dev() argument
177 return &tgt->unit->device; in tgt_dev()
182 return &lu->tgt->unit->device; in lu_dev()
436 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_status_write()
446 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_status_write()
473 spin_lock_irqsave(&orb->lu->tgt->lock, flags); in complete_transaction()
479 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
484 spin_unlock_irqrestore(&orb->lu->tgt->lock, flags); in complete_transaction()
493 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_orb()
501 spin_lock_irqsave(&lu->tgt->lock, flags); in sbp2_send_orb()
503 spin_unlock_irqrestore(&lu->tgt->lock, flags); in sbp2_send_orb()
515 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_cancel_orbs()
521 spin_lock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
523 spin_unlock_irq(&lu->tgt->lock); in sbp2_cancel_orbs()
553 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_send_management_orb()
592 timeout = lu->tgt->mgt_orb_timeout; in sbp2_send_management_orb()
607 lu->tgt->management_agent_address); in sbp2_send_management_orb()
649 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset()
653 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset()
666 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_agent_reset_no_wait()
675 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_agent_reset_no_wait()
680 static inline void sbp2_allow_block(struct sbp2_target *tgt) in sbp2_allow_block() argument
682 spin_lock_irq(&tgt->lock); in sbp2_allow_block()
683 --tgt->dont_block; in sbp2_allow_block()
684 spin_unlock_irq(&tgt->lock); in sbp2_allow_block()
699 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_block() local
700 struct fw_card *card = target_parent_device(tgt)->card; in sbp2_conditionally_block()
702 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_conditionally_block()
705 spin_lock_irqsave(&tgt->lock, flags); in sbp2_conditionally_block()
706 if (!tgt->dont_block && !lu->blocked && in sbp2_conditionally_block()
709 if (++tgt->blocked == 1) in sbp2_conditionally_block()
712 spin_unlock_irqrestore(&tgt->lock, flags); in sbp2_conditionally_block()
723 struct sbp2_target *tgt = lu->tgt; in sbp2_conditionally_unblock() local
724 struct fw_card *card = target_parent_device(tgt)->card; in sbp2_conditionally_unblock()
726 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_conditionally_unblock()
729 spin_lock_irq(&tgt->lock); in sbp2_conditionally_unblock()
732 unblock = --tgt->blocked == 0; in sbp2_conditionally_unblock()
734 spin_unlock_irq(&tgt->lock); in sbp2_conditionally_unblock()
746 static void sbp2_unblock(struct sbp2_target *tgt) in sbp2_unblock() argument
749 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_unblock()
751 spin_lock_irq(&tgt->lock); in sbp2_unblock()
752 ++tgt->dont_block; in sbp2_unblock()
753 spin_unlock_irq(&tgt->lock); in sbp2_unblock()
787 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_set_busy_timeout()
791 lu->tgt->node_id, lu->generation, device->max_speed, in sbp2_set_busy_timeout()
801 struct sbp2_target *tgt = lu->tgt; in sbp2_login() local
802 struct fw_device *device = target_parent_device(tgt); in sbp2_login()
826 dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n", in sbp2_login()
829 sbp2_unblock(lu->tgt); in sbp2_login()
834 tgt->node_id = node_id; in sbp2_login()
835 tgt->address_high = local_node_id << 16; in sbp2_login()
844 dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n", in sbp2_login()
861 if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) in sbp2_login()
864 shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_login()
889 sbp2_allow_block(tgt); in sbp2_login()
911 struct sbp2_target *tgt = lu->tgt; in sbp2_reconnect() local
912 struct fw_device *device = target_parent_device(tgt); in sbp2_reconnect()
936 dev_err(tgt_dev(tgt), "failed to reconnect\n"); in sbp2_reconnect()
945 tgt->node_id = node_id; in sbp2_reconnect()
946 tgt->address_high = local_node_id << 16; in sbp2_reconnect()
950 dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n", in sbp2_reconnect()
965 static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) in sbp2_add_logical_unit() argument
983 lu->tgt = tgt; in sbp2_add_logical_unit()
989 ++tgt->dont_block; in sbp2_add_logical_unit()
994 list_add_tail(&lu->link, &tgt->lu_list); in sbp2_add_logical_unit()
998 static void sbp2_get_unit_unique_id(struct sbp2_target *tgt, in sbp2_get_unit_unique_id() argument
1002 tgt->guid = (u64)leaf[1] << 32 | leaf[2]; in sbp2_get_unit_unique_id()
1005 static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, in sbp2_scan_logical_unit_dir() argument
1014 sbp2_add_logical_unit(tgt, value) < 0) in sbp2_scan_logical_unit_dir()
1019 static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory, in sbp2_scan_unit_dir() argument
1030 tgt->management_agent_address = in sbp2_scan_unit_dir()
1035 tgt->directory_id = value; in sbp2_scan_unit_dir()
1048 tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500; in sbp2_scan_unit_dir()
1052 if (sbp2_add_logical_unit(tgt, value) < 0) in sbp2_scan_unit_dir()
1057 sbp2_get_unit_unique_id(tgt, ci.p - 1 + value); in sbp2_scan_unit_dir()
1062 if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0) in sbp2_scan_unit_dir()
1075 static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt) in sbp2_clamp_management_orb_timeout() argument
1077 unsigned int timeout = tgt->mgt_orb_timeout; in sbp2_clamp_management_orb_timeout()
1080 dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n", in sbp2_clamp_management_orb_timeout()
1083 tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000); in sbp2_clamp_management_orb_timeout()
1086 static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, in sbp2_init_workarounds() argument
1093 dev_notice(tgt_dev(tgt), in sbp2_init_workarounds()
1115 dev_notice(tgt_dev(tgt), "workarounds 0x%x " in sbp2_init_workarounds()
1118 tgt->workarounds = w; in sbp2_init_workarounds()
1127 struct sbp2_target *tgt; in sbp2_probe() local
1136 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); in sbp2_probe()
1140 tgt = (struct sbp2_target *)shost->hostdata; in sbp2_probe()
1141 dev_set_drvdata(&unit->device, tgt); in sbp2_probe()
1142 tgt->unit = unit; in sbp2_probe()
1143 INIT_LIST_HEAD(&tgt->lu_list); in sbp2_probe()
1144 spin_lock_init(&tgt->lock); in sbp2_probe()
1145 tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; in sbp2_probe()
1157 tgt->directory_id = ((unit->directory - device->config_rom) * 4 in sbp2_probe()
1163 if (sbp2_scan_unit_dir(tgt, unit->directory, &model, in sbp2_probe()
1167 sbp2_clamp_management_orb_timeout(tgt); in sbp2_probe()
1168 sbp2_init_workarounds(tgt, model, firmware_revision); in sbp2_probe()
1176 tgt->max_payload = min3(device->max_speed + 7, 10U, in sbp2_probe()
1180 list_for_each_entry(lu, &tgt->lu_list, link) in sbp2_probe()
1196 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); in sbp2_update() local
1205 list_for_each_entry(lu, &tgt->lu_list, link) { in sbp2_update()
1215 struct sbp2_target *tgt = dev_get_drvdata(&unit->device); in sbp2_remove() local
1218 container_of((void *)tgt, struct Scsi_Host, hostdata[0]); in sbp2_remove()
1222 sbp2_unblock(tgt); in sbp2_remove()
1224 list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { in sbp2_remove()
1341 struct fw_device *device = target_parent_device(base_orb->lu->tgt); in complete_command_orb()
1402 cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1428 orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high); in sbp2_map_scatterlist()
1447 struct fw_device *device = target_parent_device(lu->tgt); in sbp2_scsi_queuecommand()
1461 COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) | in sbp2_scsi_queuecommand()
1485 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation, in sbp2_scsi_queuecommand()
1509 if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36) in sbp2_scsi_slave_alloc()
1531 lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) in sbp2_scsi_slave_configure()
1534 if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) in sbp2_scsi_slave_configure()
1537 if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION) in sbp2_scsi_slave_configure()
1540 if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) in sbp2_scsi_slave_configure()
1580 (unsigned long long)lu->tgt->guid, in sbp2_sysfs_ieee1394_id_show()
1581 lu->tgt->directory_id, lu->lun); in sbp2_sysfs_ieee1394_id_show()